def __init__(self): # data self.data() # Load model(s) model_name = 'unet' # ['simple', 'ti-unet', 'unet']: folder = f'C:/Users/admin/Data/ghent_altar/net_weight/{model_name}_d1_k9_n80' epoch = 1 path = f'C:/Users/admin/Data/ghent_altar/net_weight/{model_name}_d1_k9_n80/w_{epoch}.h5' from scripts.scripts_performance.main_performance import load_model_quick model = load_model_quick(path) neural_net = NeuralNet(model, w_ext=10, norm_x=True) model.summary() for epoch in range(1, 10 + 1): print('epoch', epoch) neural_net.load(folder, epoch) # Predict y_pred = neural_net.predict(self.img_x) if 0: plt.imshow(y_pred[..., 0]) plt.show() for val_name in self.val: print(val_name) y_true_val = self.val[val_name] data_i = _eval_func_single(y_true_val, y_pred) print(data_i) # TODO best performing (ti-unet: 4) neural_net.load(folder, 4) y_pred = neural_net.predict(self.img_x) from performance.testing import get_y_pred_thresh y_pred_thresh = get_y_pred_thresh(y_pred, data_i['thresh']) concurrent([ self.img_x[..., :3], self.img_y[..., 0], y_pred[..., 0], y_pred_thresh[..., 0] ]) y_pred
def average_out_pred(r=2): model_name = 'ti-unet' path = os.path.join( folder_base, f'net_weight/{data}/{model_name}_d1_k{k}_n80/w_{1}.h5') model_i = load_model_quick(path) neural_net_i = NeuralNet(model_i, w_ext=10, norm_x=True) y_pred_lst = [] r = 2 for epoch_i in range(epoch - r, epoch + r + 1): # epochs neural_net_i.load(path.rsplit('/', 1)[0], epoch_i) # Load try: y_pred_i = neural_net_i.predict(img_x) except Exception as e: print(e) continue y_pred_lst.append(y_pred_i[..., 1]) y_pred_avg = np.mean(y_pred_lst, axis=0) return y_pred_avg
def eval_3outputs(): folder_base = 'C:/Users/admin/Data/ghent_altar/' if os.name == 'nt' else '/scratch/lameeus/data/ghent_altar/' assert data == '19botrightcrack3' k = 9 epoch = 25 model_name = 'ti-unet' folder_base = 'C:/Users/admin/Data/ghent_altar/' if os.name == 'nt' else '/scratch/lameeus/data/ghent_altar/' path = os.path.join( folder_base, f'net_weight/{data}/{model_name}_d1_k{k}_n80/w_{epoch}.h5') model = load_model_quick(path) neural_net = NeuralNet(model, w_ext=10, norm_x=True) from scripts.journal_paper.comparison_sh.shared import load_data a = load_data("19botright", 80) img_x, y_eval = a.get_x_train(), a.get_y_test() y_pred = neural_net.predict(img_x) assert y_pred.shape[-1] == 3 y_pred2 = np.stack([1 - y_pred[..., 1], y_pred[..., 1]], axis=-1) data_i = _eval_func_single(y_eval, y_pred2) print(data_i) return
def pred_epochs(): img_x, img_y_val = data_lamb() d = 2 k = 10 model_name = 'ti-unet' train_data = '1319_10nat' w_ext = 10 if d == 1 else 26 y_pred_lst = [] n = [] for epoch in range(10, 101, 10): print(epoch) epoch_start = 50 epoch_corr = epoch + epoch_start if train_data[:5] == '1319_' else epoch path = f'C:/Users/admin/Data/ghent_altar/net_weight/{train_data}/{model_name}_d{d}_k{k}/w_{epoch_corr}.h5' try: model = load_model_quick(path) except Exception as e: print(e) continue neural_net = NeuralNet(model, w_ext=w_ext, norm_x=True) y_pred = neural_net.predict(img_x) if 0: data_i = _eval_func_single(img_y_val, y_pred, metric='kappa') print(data_i) data_i = _eval_func_single(img_y_val, y_pred, metric='jaccard') print(data_i) y_pred_lst.append(y_pred) n.append(epoch) concurrent([a[..., 1] for a in y_pred_lst], n) plt.show() return 1
def main(): """ :return: """ ### Settings k_range = range(2, 30 + 1) # k_range = [10,11] fold_range = range(6) # fold_range = [0, 1] epoch_range = range(1, 40 + 1) # epoch_range = [39, 40] filename_single = f'tiunet_10lamb_kfold_single' filename_avg_pred =f'tiunet_10lamb_kfold_avgpred' if os.name == 'nt': # windows laptop folder_weights = 'C:/Users/Laurens_laptop_w/data' folder_save = 'C:/Users/Laurens_laptop_w/data/ghent_altar/dataframes' else: folder_weights = '/scratch/lameeus/data/ghent_altar/net_weight' folder_save = '/home/lameeus/data/ghent_altar/dataframes' ### Init epoch_range_desc = np.sort(epoch_range)[::-1] k_fold_train_data = get_10lamb_6patches(5) # 5 is the number of modalities train_data_all = k_fold_train_data.get_train_data_all() img_x = train_data_all.get_x_train() img_x = rescale0to1(img_x) img_y_all = train_data_all.get_y_train() for k in k_range: for i_fold in fold_range: ### Reinit to make sure model = None list_y_pred = [] train_data_i = k_fold_train_data.k_split_i(i_fold) img_y_tr = train_data_i.get_y_train() img_y_te = train_data_i.get_y_test() ### lst_data_single = [] lst_data_avg_pred = [] for epoch in epoch_range_desc: """ Load model """ filepath_model = os.path.join(folder_weights, f'10lamb_kfold/ti_unet_k{k}_kfold{i_fold}/w_{epoch}.h5') if epoch == epoch_range_desc[0]: assert model is None assert len(list_y_pred) == 0 model = load_model_quick(filepath_model, model) """ Inference """ n = NeuralNet(model, w_ext=10) y_pred = n.predict(img_x) """ Average out predictions """ list_y_pred.append(y_pred) y_avg_pred = np.mean(list_y_pred, axis=0) """ thresh based on GT """ thresh_single = optimal_test_thresh_equal_distribution(img_y_all, y_pred) thresh_avg_pred = optimal_test_thresh_equal_distribution(img_y_all, y_avg_pred) """ Get scores """ data_single_i = {'k': k, 'i_fold': i_fold, 'epoch': epoch} data_avg_pred_i = {'k': k, 'i_fold': i_fold, 'epoch_start': epoch, 'epoch_end': epoch_range_desc[0]} data_single_i.update(foo_performance(img_y_te, y_pred, thresh_single)) data_avg_pred_i.update(foo_performance(img_y_te, y_avg_pred, thresh_avg_pred)) if 1: print('single', data_single_i) print('avg pred', data_avg_pred_i) lst_data_single.append(data_single_i) lst_data_avg_pred.append(data_avg_pred_i) """ Save data """ df_single = pd.DataFrame(lst_data_single) df_avg_pred = pd.DataFrame(lst_data_avg_pred) path_single = os.path.join(folder_save, filename_single + '.csv') path_avg_pred = os.path.join(folder_save, filename_avg_pred + '.csv') if os.path.exists(path_single): df_single.to_csv(path_single, mode='a', header=False, index=False) else: df_single.to_csv(path_single, index=False) if os.path.exists(path_avg_pred): df_avg_pred.to_csv(path_avg_pred, mode='a', header=False, index=False) else: df_avg_pred.to_csv(path_avg_pred, index=False) return
def main(): b_encoder_fixed = False info_enc_fixed = '_enc_fixed' folder_weights = '/scratch/lameeus/data/ghent_altar/net_weight/10lamb_kfold_pretrained' folder_save = '/home/lameeus/data/ghent_altar/dataframes' filename_single = f'pretrained_unet_10lamb_kfold_single' filename_avg_pred = f'pretrained_unet_10lamb_kfold_avgpred' folder_weights += info_enc_fixed if b_encoder_fixed else '' filename_single += info_enc_fixed if b_encoder_fixed else '' filename_avg_pred += info_enc_fixed if b_encoder_fixed else '' fold_range = range(6) # fold_range = [0, 1] k = 10 epoch_range = range(1, 40 + 1) w_ext_in = 28 k_fold_train_data = get_10lamb_6patches(5) # 5 is the number of modalities train_data_all = k_fold_train_data.get_train_data_all() img_x = train_data_all.get_x_train() img_x = rescale0to1(img_x) img_clean = img_x[..., :3] img_y_all = train_data_all.get_y_train() b_plot = False for i_fold in fold_range: print(i_fold) img_y_te = k_fold_train_data.k_split_i(i_fold).get_y_test() # Init for range epochs lst_data_single = [] lst_data_avg_pred = [] list_y_pred = [] model = None for epoch in np.sort(epoch_range)[::-1]: filepath_model = os.path.join( folder_weights, f'unet_enc_k{k}_ifold{i_fold}/w_{epoch}.h5') model = load_model_quick(filepath_model, model=model) n = NeuralNet(model, w_ext=w_ext_in) y_pred = n.predict(img_x) """ Average out predictions """ list_y_pred.append(y_pred) y_avg_pred = np.mean(list_y_pred, axis=0) thresh_single = optimal_test_thresh_equal_distribution( img_y_all, y_pred) thresh_avg_pred = optimal_test_thresh_equal_distribution( img_y_all, y_avg_pred) y_pred_bin = np.greater_equal(y_pred[..., 1], thresh_single) dict_perf = foo_performance(img_y_te, y_pred, thresh_single) print(dict_perf) if b_plot: concurrent([ y_pred_bin, img_clean, semi_transparant(img_clean, y_pred_bin), semi_transparant(img_clean, img_y_te[..., 1].astype(bool)) ]) data_single_i = {'k': k, 'i_fold': i_fold, 'epoch': epoch} data_avg_pred_i = { 'k': k, 'i_fold': i_fold, 'epoch_start': epoch, 'epoch_end': max(epoch_range) } data_single_i.update(dict_perf) data_avg_pred_i.update( foo_performance(img_y_te, y_avg_pred, thresh_avg_pred)) lst_data_single.append(data_single_i) lst_data_avg_pred.append(data_avg_pred_i) df_single = pd.DataFrame(lst_data_single) df_avg_pred = pd.DataFrame(lst_data_avg_pred) path_single = os.path.join(folder_save, filename_single + '.csv') path_avg_pred = os.path.join(folder_save, filename_avg_pred + '.csv') pandas_save(path_single, df_single, append=True) pandas_save(path_avg_pred, df_avg_pred, append=True) return
def main(): """ :return: """ ### Settings mod = 5 w_patch = 16 * 2 """ Data (all important modalities) """ # folder_windows = r'C:\Users\Laurens_laptop_w\OneDrive - UGent\data\10lamb' train_data = get_10lamb_old(mod) img_x, img_y_tr, _, _ = get_training_data(train_data) # Normalise the input! img_x = rescale0to1(img_x) """ Train segmentation 1) reuse everything 2) fix encoder """ if 1: if 1: b_encoder_fixed = False info_enc_fixed = '_enc_fixed' if b_encoder_fixed else '' get_info = lambda: f'10lamb_kfold_pretrained{info_enc_fixed}/unet_enc_k{k}_ifold{i_fold}' n_epochs = 40 k = 10 if k == 10: epoch_w = 100 else: raise NotImplementedError() ### Settings you don't have to change: w_patch = 50 w_ext_in = 28 b_double = False padding = 'valid' # TODO flag for converting encoder to dilated conv def get_unet_pretrained_encoder(): model_encoder = get_model_encoder() encoder_inputs = model_encoder.input decoder_outputs = decoder(model_encoder, f_out=2) model_pretrained_unet = Model(encoder_inputs, decoder_outputs) from methods.examples import compile_segm compile_segm(model_pretrained_unet, lr=1e-4) model_pretrained_unet.summary() return model_pretrained_unet """ Train """ k_fold_train_data = get_10lamb_6patches(5) for i_fold in range(6): """ Get a new network (not trained yet for segmentation) """ model_pretrained_unet = get_unet_pretrained_encoder() n_pretrained_unet = NeuralNet(model_pretrained_unet) """ The data """ train_data_i = k_fold_train_data.k_split_i(i_fold) info = get_info() img_y_tr = train_data_i.get_y_train() img_y_te = train_data_i.get_y_test() flow_tr = get_flow( img_x, img_y_tr, w_patch=w_patch, # Comes from 10 w_ext_in=w_ext_in) flow_te = get_flow( img_x, img_y_te, w_patch=w_patch, # Comes from 10 w_ext_in=w_ext_in) n_pretrained_unet.train(flow_tr, flow_te, epochs=n_epochs, verbose=1, info=info) """ Prediction """ n_pretrained_unet.w_ext = w_ext_in y_pred = n_pretrained_unet.predict(img_x) concurrent([y_pred[..., 1]]) """ Classification """ if 1: im_clean = img_x[..., :3] k = 8 i_fold = 3 epoch_last = 40 from methods.examples import kappa_loss, weighted_categorical_crossentropy from performance.metrics import accuracy_with0, jaccard_with0 loss = weighted_categorical_crossentropy((1, 1)) list_y_pred = [] ### K fold validation k_fold_train_data = get_10lamb_6patches(5) train_data_i = k_fold_train_data.k_split_i(i_fold) img_y_tr = train_data_i.get_y_train() img_y_te = train_data_i.get_y_test() for epoch in np.arange(31, epoch_last + 1): filepath_model = f'/scratch/lameeus/data/ghent_altar/net_weight/10lamb_kfold/ti_unet_k{k}_kfold{i_fold}/w_{epoch}.h5' model = load_model(filepath_model, custom_objects={ 'loss': loss, 'accuracy_with0': accuracy_with0, 'jaccard_with0': jaccard_with0, 'kappa_loss': kappa_loss }) n = NeuralNet(model, w_ext=10) y_pred = n.predict(img_x) list_y_pred.append(y_pred) y_pred_mean = np.mean(list_y_pred, axis=0) q1 = y_pred_mean[..., 1] concurrent([q1, q1.round(), im_clean]) """ Optimal threshold (making conf matrix symmetric, not based on maximising kappa) """ y_gt = np.any([img_y_tr, img_y_te], axis=0) from performance.testing import _get_scores, filter_non_zero def foo_performance(y_true, y_pred, thresh): # is basically argmax y_pred_thresh_arg = np.greater_equal(y_pred[..., 1], thresh) y_true_flat, y_pred_thresh_arg_flat = filter_non_zero( y_true, y_pred_thresh_arg) y_te_argmax = np.argmax(y_true_flat, axis=-1) # Kappa return _get_scores(y_te_argmax, y_pred_thresh_arg_flat)[-1] """ 1. BEST? PERFORMANCE based on test set """ print('1. Test distribution optimization') thresh = optimal_test_thresh_equal_distribution(img_y_te, y_pred_mean) q1_thresh = np.greater_equal(q1, thresh) concurrent([q1, q1_thresh, im_clean]) print(f'thresh: {thresh}') # Test, train, both print('Kappa performance:') print('\ttrain:', foo_performance(img_y_tr, y_pred_mean, thresh)) print('\ttestset:', foo_performance(img_y_te, y_pred_mean, thresh)) print('\tboth:', foo_performance(y_gt, y_pred_mean, thresh)) print('\nIncremental optimization on test set') test_thresh2 = test_thresh_incremental(y_pred_mean, img_y_tr, img_y_te, n=5, verbose=0) print('Kappa performance:') print('\ttrain:', foo_performance(img_y_tr, y_pred_mean, test_thresh2)) print('\ttestset:', foo_performance(img_y_te, y_pred_mean, test_thresh2)) print('\tboth:', foo_performance(y_gt, y_pred_mean, test_thresh2)) """ 2. based on train """ print('\n2. Training distribution optimization') thresh = optimal_test_thresh_equal_distribution(img_y_tr, y_pred_mean) q1_thresh = np.greater_equal(q1, thresh) concurrent([q1, q1_thresh, im_clean]) print(f'thresh: {thresh}') # Test, train, both print('Kappa performance:') print('\ttrain:', foo_performance(img_y_tr, y_pred_mean, thresh)) print('\ttestset:', foo_performance(img_y_te, y_pred_mean, thresh)) print('\tboth:', foo_performance(y_gt, y_pred_mean, thresh)) """ 3. CONSISTENT: based on train+set """ print('\n3. all GT distribution optimization') thresh = optimal_test_thresh_equal_distribution(y_gt, y_pred_mean) q1_thresh = np.greater_equal(q1, thresh) concurrent([q1, q1_thresh, im_clean]) print(f'thresh: {thresh}') # Test, train, both print('Kappa performance:') print('\ttrain:', foo_performance(img_y_tr, y_pred_mean, thresh)) print('\ttestset:', foo_performance(img_y_te, y_pred_mean, thresh)) print('\tboth:', foo_performance(y_gt, y_pred_mean, thresh)) if 0: """ 4. DUMB/Not needed: Based on prediction of whole panel """ thresh = optimal_test_thresh_equal_distribution(y_gt, y_pred_mean, mask_true=False) q1_thresh = np.greater_equal(q1, thresh) concurrent([q1, q1_thresh, im_clean]) print('Done')
def train_segm(self): folder_save = '/home/lameeus/data/ghent_altar/dataframes' info_batchnorm = '_batchnorm' if self.batch_norm else '' info_fixed = '_encfixed' if self.fixed_enc == 1 else '_prefixed' if self.fixed_enc == 2 else '' info_model = 'tiunet' if self.ti else 'unet' filename_single = f'pretrained/{info_model}_10lamb_kfold{info_fixed}{info_batchnorm}/d{self.depth}_single' path_single = os.path.join(folder_save, filename_single + '.csv') get_info = lambda: f'10lamb_kfold_pretrained{info_fixed}{info_batchnorm}/{info_model}_d{self.depth}_k{self.k}_ifold{i_fold}' img_y_all = self.k_fold_train_data.get_train_data_all().get_y_train() def get_model(): if self.ti: model = self.get_tiunet_preenc(k=self.k, lr=self.lr_opt) else: model = self.get_unet_preenc(k=self.k, lr=self.lr_opt) if self.fixed_enc == 2: n_temp = NeuralNet(model) folder_weights = '/scratch/lameeus/data/ghent_altar/net_weight' folder1 = f'10lamb_kfold_pretrained{"_encfixed"}{info_batchnorm}' folder2 = f'{info_model}_d{self.depth}_k{self.k}_ifold{i_fold}' n_temp.load(os.path.join(folder_weights, folder1, folder2), 100) del (n_temp) return model w_ext = self.w_ext_in_ti if self.ti else self.w_ext_in_ae if not self.lr_opt: model_segm = get_model() find_learning_rate(model_segm, self.flow_segm, lr1=1e0) for i_fold in range(6): print(f'i_fold = {i_fold}') model_segm = get_model() n_segm = NeuralNet(model_segm, w_ext=w_ext) train_data_i = self.k_fold_train_data.k_split_i(i_fold) img_y_tr = train_data_i.get_y_train() img_y_te = train_data_i.get_y_test() flow_tr = get_flow(self.img_x, img_y_tr, w_patch=self.w_patch, w_ext_in=w_ext) flow_te = get_flow(self.img_x, img_y_te, w_patch=self.w_patch, w_ext_in=w_ext) info = get_info() for epoch in range(self.epochs): n_segm.train(flow_tr, flow_te, epochs=1, verbose=2, info=info) y_pred = n_segm.predict(self.img_x) thresh_single = optimal_test_thresh_equal_distribution( img_y_all, y_pred) data_single_i = {'k': self.k, 'i_fold': i_fold, 'epoch': epoch} data_single_i.update( foo_performance(img_y_te, y_pred, thresh_single)) lst_data_single = [data_single_i] df_single = pd.DataFrame(lst_data_single) pandas_save(path_single, df_single, append=True) return
class Main(object): k = 8 n_per_class = 80 d = 1 epochs = 100 if d == 1: w_ext_in = 10 elif d == 2: w_ext_in = 26 w_patch = 10 if 0: lr = 1e-3 steps_per_epoch = 100 else: lr = 1e-4 steps_per_epoch = 1000 def __init__(self, k=None, n_per_class=None): if k is not None: self.k = k if n_per_class is not None: self.n_per_class = n_per_class # Get net self.model_train = self.main_net(set_net) from methods.basic import NeuralNet self.neural_net = NeuralNet(self.model_train, w_ext=self.w_ext_in) # Get data train_data = self.main_data(set_data) n_val_datas = len(self.val_datas) lst_data = [[] for _ in range(n_val_datas + 1)] for _ in range(self.epochs): # Train self.main_train(train_data) # Evaluate data_lst = self.main_eval(train_data) for i, data_i in enumerate(data_lst): data_i.update({'epoch': self.neural_net.epoch}) lst_data[i].append(data_i) for i in range(n_val_datas + 1): df = pd.DataFrame(lst_data[i]) print(df) if i == 0: data_name = 'val' else: data_name = self.val_datas[i - 1]['name'] model_name = f'{set_net["name"]}_data{data_name}_d{self.d}_k{self.k}_n{self.n_per_class}' pandas_save( f'C:/Users/admin/OneDrive - ugentbe/data/dataframes/{model_name}.csv', df, append=True) if 0: if 0: self.neural_net.load( 'C:/Users/admin/Data/ghent_altar/net_weight/tiunet_d1_k10_n80', 4) self.main_eval(train_data, b_plot=True) print("Finished init") def main_net(self, set_n): from methods.examples import compile_segm from neuralNetwork.architectures import ti_unet, convNet, unet n_name = set_n['name'].lower() if n_name == 'ti-unet': model = ti_unet(9, filters=self.k, w=self.w_patch, ext_in=self.w_ext_in // 2, batch_norm=True, max_depth=self.d) elif n_name == 'simple': model = convNet(9, self.k, w_in=self.w_patch + self.w_ext_in, n_convs=5, batch_norm=False, padding='valid') assert model.output_shape[-3:] == (self.w_patch, self.w_patch, 2) elif n_name == 'unet': print('NO BATCH NORM? (not implemented)') model = unet(9, filters=self.k, w=self.w_patch, ext_in=self.w_ext_in // 2, max_depth=self.d, n_per_block=1) else: raise ValueError(n_name) # raise NotImplementedError('Unet is not well implemented: * Double, batchnorm? f per layer etc?') model.summary() compile_segm( model, lr=self.lr) # instead of 10e-3, 10e-4 is probs more stable. return model def main_data(self, set_data): if set_data['name'] == 'zach_sh': from datasets.default_trainingsets import get_13botleftshuang train_data = get_13botleftshuang(mod, n_per_class=self.n_per_class) else: raise NotImplementedError() from data.preprocessing import rescale0to1 train_data.x = rescale0to1(train_data.x) from datasets.default_trainingsets import xy_from_df, panel13withoutRightBot from datasets.examples import get_13zach _, img_y = xy_from_df(get_13zach(), mod) img_y_top2, _ = panel13withoutRightBot(img_y) img_y_test = np.logical_or(img_y_top2, train_data.get_y_test()) self.val_datas = [{ 'name': '13_top2', 'y': img_y_top2 }, { 'name': '13_test', 'y': img_y_test }] return train_data def main_train(self, train_data, steps_per_epoch=None): if steps_per_epoch is None: steps_per_epoch = self.steps_per_epoch from main_general import get_training_data from preprocessing.image import get_flow # TODO train x_train, y_train, x_val, y_val = get_training_data(train_data) # Generator flow_tr = get_flow(x_train, y_train, w_patch=self.w_patch, w_ext_in=self.w_ext_in) flow_va = get_flow(x_val, y_val, w_patch=self.w_patch, w_ext_in=self.w_ext_in) epochs = 1 self.neural_net.train( flow_tr, validation=flow_va, epochs=epochs, steps_per_epoch=steps_per_epoch, info=f'{set_net["name"]}_d{self.d}_k{self.k}_n{self.n_per_class}') def main_eval( self, train_data, b_plot=False, ): x = train_data.get_x_test() y_pred = self.neural_net.predict(x) if b_plot: concurrent([x[..., :3], y_pred[..., 1]], ['input', 'prediction']) val_datas = [{'y': train_data.get_y_test()}] + self.val_datas return _eval_func(y_pred, val_datas, b_plot=b_plot)
def transfer_learning( epoch=25, # Could check a few b_plot=False): d = 2 # 1, 2 img_x, img_y_val = data_lamb() k = 10 model_name = 'ti-unet' w_ext = 10 if d == 1 else 26 # train_data: y_pred_lst = [] n = ['clean'] # train_data_lst = ['1319_10', '10', '1319', '1319_101319'] train_data_lst = ['10nat', '1319_10nat', '1319_10nat1319', '1319'] data_i_lst = {} for train_data in train_data_lst: print(train_data) epoch_start = 50 epoch_corr = epoch + epoch_start if train_data[:5] == '1319_' else epoch if train_data == '1319': epoch_corr = 50 path = f'C:/Users/admin/Data/ghent_altar/net_weight/{train_data}/{model_name}_d{d}_k{k}/w_{epoch_corr}.h5' try: model = load_model_quick(path) except Exception as e: print(e) continue neural_net = NeuralNet(model, w_ext=w_ext, norm_x=True) y_pred = neural_net.predict(img_x) # baseline data_i = _eval_func_single(img_y_val, y_pred, metric='kappa') print(data_i) if 0: """ Checking which baseline ~ .22 i = 0: .268, Remove huge improvement ( a lot of "green" background annotated as paint loss) i = 1: .228 Keep! i = 2: .179 keep! Drop (keep!! i = 3: .159 keep! Even more important i = 4: .252 Remove (huge problem right top) i = 5: .233 Keep, quit relevant """ from datasets.default_trainingsets import get_10lamb_6patches kFoldTrainData = get_10lamb_6patches(5) _eval_func_single( kFoldTrainData.k_split_i(0).get_y_train(), y_pred, metric='kappa') # Check what is influence without! data_i_lst[train_data] = data_i data_i = _eval_func_single(img_y_val, y_pred, metric='jaccard') print(data_i) y_pred_lst.append(y_pred) n.append(train_data) # plt.imshow(neural_net.predict(img_x[::2,::2,:])[..., 1]) if b_plot: concurrent([img_x[..., :3]] + [a[..., 1] for a in y_pred_lst], n) if 0: from figures_paper.overlay import semi_transparant from data.datatools import imread, imsave t = [data_i_lst[n_i]['thresh'] for n_i in train_data_lst] p = [] for i, train_data in enumerate(train_data_lst): b = np.greater_equal(y_pred_lst[i][..., 1], t[i]) k = semi_transparant(img_x[..., :3], b, 0) p.append(k) imsave( os.path.join( "C:/Users/admin/OneDrive - ugentbe/data/images_paper", train_data + '.png'), k) concurrent(p) return data_i_lst
f'net_weight/{data}/{model_name}_d1_k{k}_n80/w_{epoch}.h5') model = load_model_quick(path) neural_net = NeuralNet(model, w_ext=10, norm_x=True) # Image from scripts.journal_paper.comparison_sh.shared import load_data if data == '1319botright': a = load_data("19botright", n_per_class=80) else: a = load_data(data, n_per_class=80) img_x, img_y = a.get_x_train(), a.get_y_test() y_pred = neural_net.predict(img_x) if 1: Evaluater(img_y, y_pred).summary() print("For the tables!") def average_out_pred(r=2): model_name = 'ti-unet' path = os.path.join( folder_base, f'net_weight/{data}/{model_name}_d1_k{k}_n80/w_{1}.h5') model_i = load_model_quick(path) neural_net_i = NeuralNet(model_i, w_ext=10, norm_x=True)
def main(): ### Settings mod=5 panel_nr = 19 i_start ,i_end = 1, epochs_tot # i_start ,i_end = 1, 2 k_lst = np.arange(1, 21) # k_lst = [1, 2] verbose=0 b_plot = False ### if panel_nr == 13: train_data = get_13botleftshuang(mod=mod) folder_weights = '/scratch/lameeus/data/ghent_altar/net_weight/lamb_segmentation' elif panel_nr == 19: train_data = get_19SE_shuang(mod=mod) folder_weights = '/scratch/lameeus/data/ghent_altar/net_weight/19_hand_SE' else: raise ValueError(panel_nr) x, y_tr, _, y_te = get_training_data(train_data) (y_tr, y_te) = map(batch2img, (y_tr, y_te)) assert i_end >= i_start if b_plot: # plotting pred_lst = [] info_lst = [] lst_data = [] lst_data_avg_pred = [] for k in k_lst: model = None pred_lst = [] for epoch in np.arange(i_start, i_end + 1)[::-1]: info = f'settings: k {k}; epoch {epoch}' print('\n\t'+info) filepath_model = os.path.join(folder_weights, f'ti_unet_k{k}_imbalanced/w_{epoch}.h5') if epoch == i_end: model = load_model(filepath_model, custom_objects={'loss': loss, 'accuracy_with0': accuracy_with0, 'jaccard_with0': jaccard_with0, 'kappa_loss': kappa_loss }) else: model.load_weights(filepath_model) n = NeuralNet(model, w_ext=10) y_pred = n.predict(x) o = y_pred[..., 1] pred_lst.append(o) def print_conf(y_true, y_pred): y_true = batch2img(y_true) y_pred = batch2img(y_pred) b_annot = np.sum(y_true, axis=-1).astype(bool) y_true_annot = y_true[b_annot, :].argmax(axis=-1) y_pred_annot = y_pred[b_annot, :].argmax(axis=-1) """ T0; predicted 1, but is 0 predicted 0, but is 1; T1 """ conf_mat = confusion_matrix(y_true_annot, y_pred_annot) print(conf_mat) if 1: # Single prediction if verbose == 1: print_conf(y_tr, y_pred) print_conf(y_te, y_pred) if b_plot: pred_lst.append(o) info_lst.append(info) test_thresh = test_thresh_incremental(y_pred, y_tr, y_te, n=5, verbose=0) pred_thresh = np.greater_equal(o, test_thresh) pred_thresh_bin = np.stack([1-pred_thresh, pred_thresh], axis=-1) y_te_flat, y_pred_flat = filter_non_zero(y_te, pred_thresh_bin) y_te_argmax = np.argmax(y_te_flat, axis=-1) y_pred_argmax = np.argmax(y_pred_flat, axis=-1) acc, jacc, kappa = _get_scores(y_te_argmax, y_pred_argmax) if verbose == 1: print_conf(y_tr, pred_thresh_bin) print_conf(y_te, pred_thresh_bin) if 0: concurrent([pred_thresh]) data_i = {'k':k, 'epoch':epoch, 'test_thresh':test_thresh, 'kappa':kappa, 'accuracy':acc, 'jaccard':jacc } lst_data.append(data_i) if 1: # avg prediction pred_i_average = np.mean(pred_lst, axis=0) # optimizing threshold prediction test_thresh = test_thresh_incremental(np.stack([1 - pred_i_average, pred_i_average], axis=-1), y_tr, y_te, n=5, verbose=0) pred_thresh = np.greater_equal(pred_i_average, test_thresh) pred_thresh_bin = np.stack([1 - pred_thresh, pred_thresh], axis=-1) y_te_flat, y_pred_flat = filter_non_zero(y_te, pred_thresh_bin) y_te_argmax = np.argmax(y_te_flat, axis=-1) y_pred_argmax = np.argmax(y_pred_flat, axis=-1) acc, jacc, kappa = _get_scores(y_te_argmax, y_pred_argmax) data_i = {'k': k, 'epoch_start': epoch, 'test_thresh': test_thresh, 'kappa': kappa, 'accuracy': acc, 'jaccard': jacc } lst_data_avg_pred.append(data_i) b = True if b: df = pd.DataFrame(lst_data) filename_save = f'tiunet_1pool_shaoguang{panel_nr}_imbalanced' filename_path = f'/scratch/lameeus/data/ghent_altar/dataframes/{filename_save}.csv' df.to_csv(filename_path, sep=';') df = pd.DataFrame(lst_data_avg_pred) filename_save = f'tiunet_1pool_shaoguang{panel_nr}_imbalanced_averaging' df.to_csv(f'/scratch/lameeus/data/ghent_altar/dataframes/{filename_save}.csv', sep=';') if b_plot: concurrent(pred_lst, info_lst) plt.show() return