def evaluate(split, model_path, diagnosis, use_gpu): train_loader, valid_loader, test_loader = load_data(diagnosis, use_gpu) model = MRNet() state_dict = torch.load(model_path, map_location=(None if use_gpu else 'cpu')) model.load_state_dict(state_dict) if use_gpu: model = model.cuda() if split == 'train': loader = train_loader elif split == 'valid': loader = valid_loader elif split == 'test': loader = test_loader else: raise ValueError("split must be 'train', 'valid', or 'test'") loss, auc, preds, labels = run_model(model, loader) print(f'{split} loss: {loss:0.4f}') print(f'{split} AUC: {auc:0.4f}') return preds, labels
def get_model(): model = MRNet() state_dict = torch.load( "/mnt/g/Grad Projects/MRNet-interface/model weights/f1score_val0.7155_train0.8096_epoch14", map_location=(torch.device('cpu'))) model.load_state_dict(state_dict) return model
def main(data_dir, models_dir, choose_16): device = 'cuda' if torch.cuda.is_available() else 'cpu' planes = ['axial', 'coronal', 'sagittal'] conditions = ['abnormal', 'acl', 'meniscus'] models = [] print(f'Loading best CNN models from {models_dir}...') for condition in conditions: models_per_condition = [] for plane in planes: checkpoint_pattern = glob(f'{models_dir}/*{plane}*{condition}*.pt') checkpoint_path = sorted(checkpoint_pattern)[-1] checkpoint = torch.load(checkpoint_path, map_location=device) model = MRNet().to(device) model.load_state_dict(checkpoint['state_dict']) models_per_condition.append(model) models.append(models_per_condition) print(f'Creating data loaders...') axial_loader = make_data_loader(data_dir, 'train', 'axial', choose_16) coronal_loader = make_data_loader(data_dir, 'train', 'coronal', choose_16) sagittal_loader = make_data_loader(data_dir, 'train', 'sagittal', choose_16) print(f'Collecting predictions on train dataset from the models...') ys = [] Xs = [[], [], []] # Abnormal, ACL, Meniscus with tqdm(total=len(axial_loader)) as pbar: for (axial_inputs, labels), (coronal_inputs, _), (sagittal_inputs, _) in \ zip(axial_loader, coronal_loader, sagittal_loader): axial_inputs, coronal_inputs, sagittal_inputs = \ axial_inputs.to(device), coronal_inputs.to(device), sagittal_inputs.to(device) ys.append(labels[0].cpu().tolist()) for i, model in enumerate(models): axial_pred = model[0](axial_inputs).detach().cpu().item() coronal_pred = model[1](coronal_inputs).detach().cpu().item() sagittal_pred = model[2](sagittal_inputs).detach().cpu().item() X = [axial_pred, coronal_pred, sagittal_pred] Xs[i].append(X) pbar.update(1) ys = np.asarray(ys).transpose() Xs = np.asarray(Xs) print(f'Training logistic regression models for each condition...') clfs = [] for X, y in zip(Xs, ys): clf = LogisticRegressionCV(cv=5, random_state=0).fit(X, y) clfs.append(clf) for i, clf in enumerate(clfs): print( f'Cross validation score for {conditions[i]}: {clf.score(X, y):.3f}' ) clf_path = f'{models_dir}/lr_{conditions[i]}.pkl' joblib.dump(clf, clf_path) print(f'Logistic regression models saved to {models_dir}')
def main(valid_paths_csv, output_dir): device = 'cuda' if torch.cuda.is_available() else 'cpu' input_files_df = pd.read_csv(valid_paths_csv, header=None) cnn_models_paths = 'src/cnn_models_paths.txt' lr_models_paths = 'src/lr_models_paths.txt' if not os.path.exists(output_dir): os.makedirs(output_dir) output_file = f'{output_dir}/predictions.csv' if os.path.exists(output_file): os.rename(output_file, f'{output_file}.back') print(f'!! {output_file} already exists, renamed to {output_file}.bak') # Load MRNet models print(f'Loading CNN models listed in {cnn_models_paths}...') cnn_models_paths = [ line.rstrip('\n') for line in open(cnn_models_paths, 'r') ] abnormal_mrnets = [] acl_mrnets = [] meniscus_mrnets = [] for i, mrnet_path in enumerate(cnn_models_paths): model = MRNet().to(device) checkpoint = torch.load(mrnet_path, map_location=device) model.load_state_dict(checkpoint['state_dict']) if i < 3: abnormal_mrnets.append(model) elif i >= 3 and i < 6: acl_mrnets.append(model) else: meniscus_mrnets.append(model) mrnets = [abnormal_mrnets, acl_mrnets, meniscus_mrnets] # Load logistic regression models print(f'Loading logistic regression models listed in {lr_models_paths}...') lr_models_paths = [ line.rstrip('\n') for line in open(lr_models_paths, 'r') ] lrs = [joblib.load(lr_path) for lr_path in lr_models_paths] # Parse input, 3 rows at a time (i.e. per case) npy_paths = [row.values[0] for _, row in input_files_df.iterrows()] transform = transforms.Compose( [transforms.ToPILImage(), transforms.ToTensor()]) print(f'Generating predictions per case...') print(f'Predictions will be saved as {output_file}') for i in tqdm(range(0, len(npy_paths), 3)): case_paths = [npy_paths[i], npy_paths[i + 1], npy_paths[i + 2]] data = [] for case_path in case_paths: series = preprocess_data(case_path, transform) data.append(series.unsqueeze(0).to(device)) # Make predictions per case case_preds = [] for i, mrnet in enumerate(mrnets): # For each condition (mrnet) # Based on each plane (data) sagittal_pred = mrnet[0](data[0]).detach().cpu().item() coronal_pred = mrnet[1](data[1]).detach().cpu().item() axial_pred = mrnet[2](data[2]).detach().cpu().item() # Combine predictions to make a final prediction X = [[axial_pred, coronal_pred, sagittal_pred]] case_preds.append(np.float64(lrs[i].predict_proba(X)[:, 1])) # Write to output csv - append if it exists already with open(output_file, 'a+') as csv_file: writer = csv.writer(csv_file) writer.writerow(case_preds)
''' A common function for computing captum attributions ''' return algo.attribute(inputs, **kwargs) # load model """ gc_model = torchvision.models.squeezenet1_1(pretrained=True) for param in gc_model.parameters(): param.requires_grad = True """ gc_model = MRNet(useMultiHead=True) weights = torch.load("weights", map_location=torch.device('cpu')) gc_model.load_state_dict(weights) #print(gc_model) # Grad cam code conv_module = gc_model.model.features[10] #conv_module = gc_model.features[12] gradient_value = None # Stores gradient of the module you chose above during a backwards pass. activation_value = None # Stores the activation of the module you chose above during a forwards pass. def gradient_hook(a, b, gradient): global gradient_value gradient_value = gradient[0]
def evaluate(split, model_path, diagnosis, dataset, use_gpu, attention): preds = None labels = None if dataset == 0: train_loader, valid_loader, test_loader = external_load_data(diagnosis, use_gpu) #model = MRNet(useMultiHead = attention) #state_dict = torch.load(model_path, map_location=(None if use_gpu else 'cpu')) #model.load_state_dict(state_dict) model = torch.load(model_path) if use_gpu: model = model.cuda() if split == 'train': loader = train_loader elif split == 'valid': loader = valid_loader elif split == 'test': loader = test_loader else: raise ValueError("split must be 'train', 'valid', or 'test'") loss, auc, preds, labels = run_model(model, loader) print(f'{split} loss: {loss:0.4f}') print(f'{split} AUC: {auc:0.4f}') if dataset == 1: train_loaders, valid_loaders = mr_load_data(diagnosis, use_gpu, train_shuffle = True) path_s = os.listdir(model_path + '/sagittal') path_a = os.listdir(model_path + '/axial') path_c = os.listdir(model_path + '/coronal') ps = [int(x.split("epoch")[1]) for x in path_s] pa = [int(x.split("epoch")[1]) for x in path_a] pc = [int(x.split("epoch")[1]) for x in path_c] model_path_sag = path_s[ps.index(max(ps))] model_path_ax = path_a[pa.index(max(pa))] model_path_cor = path_c[pc.index(max(pc))] print("{} {} {}".format(model_path_sag, model_path_ax, model_path_cor)) state_dict_sag = torch.load(model_path + '/sagittal/' + model_path_sag, map_location=(None if use_gpu else 'cpu')) state_dict_ax = torch.load(model_path + '/axial/' + model_path_ax, map_location=(None if use_gpu else 'cpu')) state_dict_cor = torch.load(model_path + '/coronal/' + model_path_cor, map_location=(None if use_gpu else 'cpu')) model_sag = MRNet(useMultiHead=attention, max_layers=51) model_sag.load_state_dict(state_dict_sag) model_ax = MRNet(useMultiHead=attention, max_layers=61) model_ax.load_state_dict(state_dict_ax) model_cor = MRNet(useMultiHead=attention, max_layers=58) model_cor.load_state_dict(state_dict_cor) #model_sag = torch.load(model_path + '/sagittal/' + model_path_sag) #model_ax = torch.load(model_path + '/axial/' + model_path_ax) #model_cor = torch.load(model_path + '/coronal/' + model_path_cor) if use_gpu: model_sag = model_sag.cuda() model_ax = model_ax.cuda() model_cor = model_cor.cuda() loss_sag, auc_sag, t_preds_sag, labels_sag = run_model(model_sag, train_loaders[0]) _, _, preds_sag, _ = run_model(model_sag, valid_loaders[0]) print(f'sagittal {split} loss: {loss_sag:0.4f}') print(f'sagittal {split} AUC: {auc_sag:0.4f}') loss_ax, auc_ax, t_preds_ax, labels_ax = run_model(model_ax, train_loaders[1]) _, _, preds_ax, _ = run_model(model_ax, valid_loaders[1]) print(f'axial {split} loss: {loss_ax:0.4f}') print(f'axial {split} AUC: {auc_ax:0.4f}') loss_cor, auc_cor, t_preds_cor, labels_cor = run_model(model_cor, train_loaders[2]) _, _, preds_cor, valid_labels = run_model(model_cor, valid_loaders[2]) print(f'coronal {split} loss: {loss_cor:0.4f}') print(f'coronal {split} AUC: {auc_cor:0.4f}') X = np.zeros((len(t_preds_cor), 3)) X[:, 0] = t_preds_sag X[:, 1] = t_preds_ax X[:, 2] = t_preds_cor y = np.array(labels_cor) lgr = LogisticRegression(solver='lbfgs') lgr.fit(X,y) X_valid = np.zeros((len(preds_cor), 3)) X_valid[:, 0] = preds_sag X_valid[:, 1] = preds_ax X_valid[:, 2] = preds_cor y_preds = lgr.predict(X_valid) y_true = np.array(valid_labels) print(metrics.roc_auc_score(y_true, y_preds)) print(metrics.classification_report(y_true, y_preds, target_names=['class 0', 'class 1'])) return preds, labels