def set_data(self): if self.set == 10: train_data = get_10lamb_old(5) img_x, _, _, _ = get_training_data(train_data) elif self.set == 13: img_x, _, _, _ = get_training_data(get_13(5)) # Normalise the input! img_x = rescale0to1(img_x) self.img_x = img_x self.k_fold_train_data = get_10lamb_6patches(5)
def main_train(self, train_data, steps_per_epoch=None): if steps_per_epoch is None: steps_per_epoch = self.steps_per_epoch from main_general import get_training_data from preprocessing.image import get_flow # TODO train x_train, y_train, x_val, y_val = get_training_data(train_data) # Generator flow_tr = get_flow(x_train, y_train, w_patch=self.w_patch, w_ext_in=self.w_ext_in) flow_va = get_flow(x_val, y_val, w_patch=self.w_patch, w_ext_in=self.w_ext_in) epochs = 1 self.neural_net.train( flow_tr, validation=flow_va, epochs=epochs, steps_per_epoch=steps_per_epoch, info=f'{set_net["name"]}_d{self.d}_k{self.k}_n{self.n_per_class}')
def folds_annot(): train_data = get_10lamb_old(5) img_x, _, _, _ = get_training_data(train_data) img_clean = img_x[..., :3] lst_get = [get_borders1, get_borders2, get_borders3, get_borders4, get_borders5, get_borders6] for i_fold in range(6): img_annot = imread(f'/home/lameeus/data/ghent_altar/input/hierachy/10_lamb/annotations/kfold/annot_{i_fold+1}.png') y1 = annotations2y(img_annot, thresh=.8)[..., 1] a = semi_transparant(img_clean, y1.astype(bool)) w0, w1, h0, h1 = lst_get[i_fold]() clean_annot_crop = a[h0:h1, w0:w1, :] img_clean_crop = img_clean[h0:h1, w0:w1, :] if 0: concurrent([img_clean_crop, clean_annot_crop]) folder_save = '/scratch/lameeus/data/ghent_altar/input/hierarchy/10lamb/ifolds' imsave(os.path.join(folder_save, f'clean_crop_ifold{i_fold}.png'), img_clean_crop) imsave(os.path.join(folder_save, f'clean_annot_crop_ifold{i_fold}.png'), clean_annot_crop) pass
def set_img_x(self): if self.set_nr == 13: train_data = get_13(self.mod) from data.datatools import imread from data.conversion_tools import annotations2y train_data.y_te = np.copy(train_data.y_tr) train_data.y_tr = annotations2y(imread( '/home/lameeus/data/ghent_altar/input/hierarchy/13_small/clean_annot_practical.png' ), thresh=.9) img_x, img_y, _, img_y_te = get_training_data(train_data) # Normalise the input! img_x = rescale0to1(img_x) self.img_x = img_x self.img_y_tr = img_y self.img_y_te = img_y_te train_data_10 = get_10lamb_6patches(self.mod).get_train_data_all() img_x_10, img_y_10, _, _ = get_training_data(train_data_10) # Normalise the input! img_x_10 = rescale0to1(img_x_10) self.flow_tr_set = get_flow(self.img_x, self.img_y_tr, w_patch=self.w_patch, w_ext_in=self.w_ext_in_ti) self.flow_tr_10 = get_flow(img_x_10, img_y_10, w_patch=self.w_patch, w_ext_in=self.w_ext_in_ti) n_multiply = 10 self.flow_tr_set_10 = get_flow([self.img_x] * n_multiply + [img_x_10], [self.img_y_tr] * n_multiply + [img_y_10], w_patch=self.w_patch, w_ext_in=self.w_ext_in_ti) self.flow_ae_tr = get_flow( self.img_x, self.img_x, w_patch=self.w_patch, w_ext_in=self.w_ext_in_ae, )
def set_flow(train_data): x_train, y_train, _, _ = get_training_data(train_data) global flow_tr flow_tr = get_flow(x_train, y_train, w_patch=w_patch, w_ext_in=w_ext_in)
def set_img_x(self): train_data = get_10lamb_old(self.mod) img_x, _, _, _ = get_training_data(train_data) # Normalise the input! img_x = rescale0to1(img_x) self.img_x = img_x
def main(): """ :return: """ ### Settings mod = 5 w_patch = 16 * 2 """ Data (all important modalities) """ # folder_windows = r'C:\Users\Laurens_laptop_w\OneDrive - UGent\data\10lamb' train_data = get_10lamb_old(mod) img_x, img_y_tr, _, _ = get_training_data(train_data) # Normalise the input! img_x = rescale0to1(img_x) """ Train segmentation 1) reuse everything 2) fix encoder """ if 1: if 1: b_encoder_fixed = False info_enc_fixed = '_enc_fixed' if b_encoder_fixed else '' get_info = lambda: f'10lamb_kfold_pretrained{info_enc_fixed}/unet_enc_k{k}_ifold{i_fold}' n_epochs = 40 k = 10 if k == 10: epoch_w = 100 else: raise NotImplementedError() ### Settings you don't have to change: w_patch = 50 w_ext_in = 28 b_double = False padding = 'valid' # TODO flag for converting encoder to dilated conv def get_unet_pretrained_encoder(): model_encoder = get_model_encoder() encoder_inputs = model_encoder.input decoder_outputs = decoder(model_encoder, f_out=2) model_pretrained_unet = Model(encoder_inputs, decoder_outputs) from methods.examples import compile_segm compile_segm(model_pretrained_unet, lr=1e-4) model_pretrained_unet.summary() return model_pretrained_unet """ Train """ k_fold_train_data = get_10lamb_6patches(5) for i_fold in range(6): """ Get a new network (not trained yet for segmentation) """ model_pretrained_unet = get_unet_pretrained_encoder() n_pretrained_unet = NeuralNet(model_pretrained_unet) """ The data """ train_data_i = k_fold_train_data.k_split_i(i_fold) info = get_info() img_y_tr = train_data_i.get_y_train() img_y_te = train_data_i.get_y_test() flow_tr = get_flow( img_x, img_y_tr, w_patch=w_patch, # Comes from 10 w_ext_in=w_ext_in) flow_te = get_flow( img_x, img_y_te, w_patch=w_patch, # Comes from 10 w_ext_in=w_ext_in) n_pretrained_unet.train(flow_tr, flow_te, epochs=n_epochs, verbose=1, info=info) """ Prediction """ n_pretrained_unet.w_ext = w_ext_in y_pred = n_pretrained_unet.predict(img_x) concurrent([y_pred[..., 1]]) """ Classification """ if 1: im_clean = img_x[..., :3] k = 8 i_fold = 3 epoch_last = 40 from methods.examples import kappa_loss, weighted_categorical_crossentropy from performance.metrics import accuracy_with0, jaccard_with0 loss = weighted_categorical_crossentropy((1, 1)) list_y_pred = [] ### K fold validation k_fold_train_data = get_10lamb_6patches(5) train_data_i = k_fold_train_data.k_split_i(i_fold) img_y_tr = train_data_i.get_y_train() img_y_te = train_data_i.get_y_test() for epoch in np.arange(31, epoch_last + 1): filepath_model = f'/scratch/lameeus/data/ghent_altar/net_weight/10lamb_kfold/ti_unet_k{k}_kfold{i_fold}/w_{epoch}.h5' model = load_model(filepath_model, custom_objects={ 'loss': loss, 'accuracy_with0': accuracy_with0, 'jaccard_with0': jaccard_with0, 'kappa_loss': kappa_loss }) n = NeuralNet(model, w_ext=10) y_pred = n.predict(img_x) list_y_pred.append(y_pred) y_pred_mean = np.mean(list_y_pred, axis=0) q1 = y_pred_mean[..., 1] concurrent([q1, q1.round(), im_clean]) """ Optimal threshold (making conf matrix symmetric, not based on maximising kappa) """ y_gt = np.any([img_y_tr, img_y_te], axis=0) from performance.testing import _get_scores, filter_non_zero def foo_performance(y_true, y_pred, thresh): # is basically argmax y_pred_thresh_arg = np.greater_equal(y_pred[..., 1], thresh) y_true_flat, y_pred_thresh_arg_flat = filter_non_zero( y_true, y_pred_thresh_arg) y_te_argmax = np.argmax(y_true_flat, axis=-1) # Kappa return _get_scores(y_te_argmax, y_pred_thresh_arg_flat)[-1] """ 1. BEST? PERFORMANCE based on test set """ print('1. Test distribution optimization') thresh = optimal_test_thresh_equal_distribution(img_y_te, y_pred_mean) q1_thresh = np.greater_equal(q1, thresh) concurrent([q1, q1_thresh, im_clean]) print(f'thresh: {thresh}') # Test, train, both print('Kappa performance:') print('\ttrain:', foo_performance(img_y_tr, y_pred_mean, thresh)) print('\ttestset:', foo_performance(img_y_te, y_pred_mean, thresh)) print('\tboth:', foo_performance(y_gt, y_pred_mean, thresh)) print('\nIncremental optimization on test set') test_thresh2 = test_thresh_incremental(y_pred_mean, img_y_tr, img_y_te, n=5, verbose=0) print('Kappa performance:') print('\ttrain:', foo_performance(img_y_tr, y_pred_mean, test_thresh2)) print('\ttestset:', foo_performance(img_y_te, y_pred_mean, test_thresh2)) print('\tboth:', foo_performance(y_gt, y_pred_mean, test_thresh2)) """ 2. based on train """ print('\n2. Training distribution optimization') thresh = optimal_test_thresh_equal_distribution(img_y_tr, y_pred_mean) q1_thresh = np.greater_equal(q1, thresh) concurrent([q1, q1_thresh, im_clean]) print(f'thresh: {thresh}') # Test, train, both print('Kappa performance:') print('\ttrain:', foo_performance(img_y_tr, y_pred_mean, thresh)) print('\ttestset:', foo_performance(img_y_te, y_pred_mean, thresh)) print('\tboth:', foo_performance(y_gt, y_pred_mean, thresh)) """ 3. CONSISTENT: based on train+set """ print('\n3. all GT distribution optimization') thresh = optimal_test_thresh_equal_distribution(y_gt, y_pred_mean) q1_thresh = np.greater_equal(q1, thresh) concurrent([q1, q1_thresh, im_clean]) print(f'thresh: {thresh}') # Test, train, both print('Kappa performance:') print('\ttrain:', foo_performance(img_y_tr, y_pred_mean, thresh)) print('\ttestset:', foo_performance(img_y_te, y_pred_mean, thresh)) print('\tboth:', foo_performance(y_gt, y_pred_mean, thresh)) if 0: """ 4. DUMB/Not needed: Based on prediction of whole panel """ thresh = optimal_test_thresh_equal_distribution(y_gt, y_pred_mean, mask_true=False) q1_thresh = np.greater_equal(q1, thresh) concurrent([q1, q1_thresh, im_clean]) print('Done')
def main(): ### Settings mod=5 panel_nr = 19 i_start ,i_end = 1, epochs_tot # i_start ,i_end = 1, 2 k_lst = np.arange(1, 21) # k_lst = [1, 2] verbose=0 b_plot = False ### if panel_nr == 13: train_data = get_13botleftshuang(mod=mod) folder_weights = '/scratch/lameeus/data/ghent_altar/net_weight/lamb_segmentation' elif panel_nr == 19: train_data = get_19SE_shuang(mod=mod) folder_weights = '/scratch/lameeus/data/ghent_altar/net_weight/19_hand_SE' else: raise ValueError(panel_nr) x, y_tr, _, y_te = get_training_data(train_data) (y_tr, y_te) = map(batch2img, (y_tr, y_te)) assert i_end >= i_start if b_plot: # plotting pred_lst = [] info_lst = [] lst_data = [] lst_data_avg_pred = [] for k in k_lst: model = None pred_lst = [] for epoch in np.arange(i_start, i_end + 1)[::-1]: info = f'settings: k {k}; epoch {epoch}' print('\n\t'+info) filepath_model = os.path.join(folder_weights, f'ti_unet_k{k}_imbalanced/w_{epoch}.h5') if epoch == i_end: model = load_model(filepath_model, custom_objects={'loss': loss, 'accuracy_with0': accuracy_with0, 'jaccard_with0': jaccard_with0, 'kappa_loss': kappa_loss }) else: model.load_weights(filepath_model) n = NeuralNet(model, w_ext=10) y_pred = n.predict(x) o = y_pred[..., 1] pred_lst.append(o) def print_conf(y_true, y_pred): y_true = batch2img(y_true) y_pred = batch2img(y_pred) b_annot = np.sum(y_true, axis=-1).astype(bool) y_true_annot = y_true[b_annot, :].argmax(axis=-1) y_pred_annot = y_pred[b_annot, :].argmax(axis=-1) """ T0; predicted 1, but is 0 predicted 0, but is 1; T1 """ conf_mat = confusion_matrix(y_true_annot, y_pred_annot) print(conf_mat) if 1: # Single prediction if verbose == 1: print_conf(y_tr, y_pred) print_conf(y_te, y_pred) if b_plot: pred_lst.append(o) info_lst.append(info) test_thresh = test_thresh_incremental(y_pred, y_tr, y_te, n=5, verbose=0) pred_thresh = np.greater_equal(o, test_thresh) pred_thresh_bin = np.stack([1-pred_thresh, pred_thresh], axis=-1) y_te_flat, y_pred_flat = filter_non_zero(y_te, pred_thresh_bin) y_te_argmax = np.argmax(y_te_flat, axis=-1) y_pred_argmax = np.argmax(y_pred_flat, axis=-1) acc, jacc, kappa = _get_scores(y_te_argmax, y_pred_argmax) if verbose == 1: print_conf(y_tr, pred_thresh_bin) print_conf(y_te, pred_thresh_bin) if 0: concurrent([pred_thresh]) data_i = {'k':k, 'epoch':epoch, 'test_thresh':test_thresh, 'kappa':kappa, 'accuracy':acc, 'jaccard':jacc } lst_data.append(data_i) if 1: # avg prediction pred_i_average = np.mean(pred_lst, axis=0) # optimizing threshold prediction test_thresh = test_thresh_incremental(np.stack([1 - pred_i_average, pred_i_average], axis=-1), y_tr, y_te, n=5, verbose=0) pred_thresh = np.greater_equal(pred_i_average, test_thresh) pred_thresh_bin = np.stack([1 - pred_thresh, pred_thresh], axis=-1) y_te_flat, y_pred_flat = filter_non_zero(y_te, pred_thresh_bin) y_te_argmax = np.argmax(y_te_flat, axis=-1) y_pred_argmax = np.argmax(y_pred_flat, axis=-1) acc, jacc, kappa = _get_scores(y_te_argmax, y_pred_argmax) data_i = {'k': k, 'epoch_start': epoch, 'test_thresh': test_thresh, 'kappa': kappa, 'accuracy': acc, 'jaccard': jacc } lst_data_avg_pred.append(data_i) b = True if b: df = pd.DataFrame(lst_data) filename_save = f'tiunet_1pool_shaoguang{panel_nr}_imbalanced' filename_path = f'/scratch/lameeus/data/ghent_altar/dataframes/{filename_save}.csv' df.to_csv(filename_path, sep=';') df = pd.DataFrame(lst_data_avg_pred) filename_save = f'tiunet_1pool_shaoguang{panel_nr}_imbalanced_averaging' df.to_csv(f'/scratch/lameeus/data/ghent_altar/dataframes/{filename_save}.csv', sep=';') if b_plot: concurrent(pred_lst, info_lst) plt.show() return