parser.add_argument("--model_file", required=True, help='model file to use (e.g. model_best.pt)') parser.add_argument("--bert_model_file", required=True, help='bert model file to use (e.g. model_bert_best.pt)') parser.add_argument("--bert_path", required=True, help='path to bert files (bert_config*.json etc)') parser.add_argument("--data_path", required=True, help='path to *.jsonl and *.db files') parser.add_argument("--split", required=True, help='prefix of jsonl and db files (e.g. dev)') parser.add_argument("--result_path", required=True, help='directory in which to place results') args = construct_hyper_param(parser) BERT_PT_PATH = args.bert_path path_save_for_evaluation = args.result_path # Load pre-trained models path_model_bert = args.bert_model_file path_model = args.model_file args.no_pretraining = True # counterintuitive, but avoids loading unused models model, model_bert, tokenizer, bert_config = get_models(args, BERT_PT_PATH, trained=True, path_model_bert=path_model_bert, path_model=path_model) # Load data dev_data, dev_table = load_wikisql_data(args.data_path, mode=args.split, toy_model=False, toy_size=args.toy_size, no_hs_tok=True) dev_loader = torch.utils.data.DataLoader( batch_size=args.bS, dataset=dev_data, shuffle=False, num_workers=1, collate_fn=lambda x: x # now dictionary values are not merged! ) if(args.split=='test'): dev_data.remove(dev_data[1884]) dev_data.remove(dev_data[1883]) dev_data.remove(dev_data[1882]) if not os.path.exists(path_save_for_evaluation) :
# please provide the path to your training and testing datasets training = pd.read_csv("...") test = pd.read_csv("....") complications = ['SBI', 'AKI', 'ARDS'] framework_train = apply_stratified_framework(training, complications) framework_test = apply_stratified_framework(test, complications) train_columns = [ 'Diastolic Blood Pressure_mean', 'Diastolic Blood Pressure_min', 'Oxygen Saturation_max', 'Oxygen Saturation_mean', 'Oxygen Saturation_min', 'Peripheral Pulse Rate_max', 'Peripheral Pulse Rate_mean', 'Peripheral Pulse Rate_min', 'Respiratory Rate_max', 'Respiratory Rate_mean', 'Respiratory Rate_min', 'Systolic Blood Pressure_max', 'Systolic Blood Pressure_mean', 'Systolic Blood Pressure_min', 'Temperature Axillary_max', 'Temperature Axillary_mean', 'Temperature Axillary_min', 'GCS_mean', 'GCS_min', 'GCS_max', 'GENDER', 'AGE', 'COUGH', 'FEVER', 'SOB', 'SORE_THROAT', 'RASH', 'BMI', 'DIABETES', 'HYPERTENSION', 'CKD', 'CANCER' ] models_all, trainsets, classifers = get_models(complications, framework_train, train_columns) true_ouctomes, predicted_ouctomes = get_results(framework_test, complications, models_all, train_columns) plot_roc(complications, true_ouctomes, predicted_ouctomes, "testset") plot_PRC(complications, true_ouctomes, predicted_ouctomes, "testset")
temporal_transform=temporal_transform['train'], openpose_transform=openpose_transform['train'], spatial_transform=spatial_transform['train'], arguments=args), 'val': HandHygiene(os.path.join(VIDEO_DIR, 'val'), temporal_transform=temporal_transform['val'], openpose_transform=openpose_transform['val'], spatial_transform=spatial_transform['val'], arguments=args), } # create model i3d_rgb, i3d_flow = get_models(len(args.label), True, 170, load_pt_weights=True, rgb_weights_path=args.model_path.rgb, flow_weights_path=args.model_path.flow) if torch.cuda.device_count() > 1: i3d_rgb = torch.nn.DataParallel(i3d_rgb).cuda() i3d_flow = torch.nn.DataParallel(i3d_flow).cuda() # hyperparameters / trainable parameters optims = {'rgb': None, 'flow': None} schedulers = {'rgb': None, 'flow': None} feature_extract = True def trainable_params(model, mode='rgb'): params_to_update = model.parameters() print("Params to learn:")