Exemplo n.º 1
0
    def average_out_pred(r=2):
        model_name = 'ti-unet'

        path = os.path.join(
            folder_base,
            f'net_weight/{data}/{model_name}_d1_k{k}_n80/w_{1}.h5')
        model_i = load_model_quick(path)

        neural_net_i = NeuralNet(model_i, w_ext=10, norm_x=True)

        y_pred_lst = []
        r = 2
        for epoch_i in range(epoch - r, epoch + r + 1):  # epochs

            neural_net_i.load(path.rsplit('/', 1)[0], epoch_i)  # Load

            try:
                y_pred_i = neural_net_i.predict(img_x)
            except Exception as e:
                print(e)
                continue

            y_pred_lst.append(y_pred_i[..., 1])
        y_pred_avg = np.mean(y_pred_lst, axis=0)

        return y_pred_avg
Exemplo n.º 2
0
def eval_3outputs():

    folder_base = 'C:/Users/admin/Data/ghent_altar/' if os.name == 'nt' else '/scratch/lameeus/data/ghent_altar/'

    assert data == '19botrightcrack3'

    k = 9
    epoch = 25
    model_name = 'ti-unet'

    folder_base = 'C:/Users/admin/Data/ghent_altar/' if os.name == 'nt' else '/scratch/lameeus/data/ghent_altar/'
    path = os.path.join(
        folder_base,
        f'net_weight/{data}/{model_name}_d1_k{k}_n80/w_{epoch}.h5')

    model = load_model_quick(path)
    neural_net = NeuralNet(model, w_ext=10, norm_x=True)

    from scripts.journal_paper.comparison_sh.shared import load_data
    a = load_data("19botright", 80)
    img_x, y_eval = a.get_x_train(), a.get_y_test()

    y_pred = neural_net.predict(img_x)

    assert y_pred.shape[-1] == 3
    y_pred2 = np.stack([1 - y_pred[..., 1], y_pred[..., 1]], axis=-1)

    data_i = _eval_func_single(y_eval, y_pred2)

    print(data_i)

    return
Exemplo n.º 3
0
    def __init__(self):

        # data
        self.data()

        # Load model(s)

        model_name = 'unet'  # ['simple', 'ti-unet', 'unet']:

        folder = f'C:/Users/admin/Data/ghent_altar/net_weight/{model_name}_d1_k9_n80'
        epoch = 1
        path = f'C:/Users/admin/Data/ghent_altar/net_weight/{model_name}_d1_k9_n80/w_{epoch}.h5'

        from scripts.scripts_performance.main_performance import load_model_quick
        model = load_model_quick(path)
        neural_net = NeuralNet(model, w_ext=10, norm_x=True)

        model.summary()

        for epoch in range(1, 10 + 1):
            print('epoch', epoch)
            neural_net.load(folder, epoch)

            # Predict
            y_pred = neural_net.predict(self.img_x)

            if 0:
                plt.imshow(y_pred[..., 0])
                plt.show()

            for val_name in self.val:
                print(val_name)

                y_true_val = self.val[val_name]

                data_i = _eval_func_single(y_true_val, y_pred)

                print(data_i)

        # TODO best performing (ti-unet: 4)
        neural_net.load(folder, 4)

        y_pred = neural_net.predict(self.img_x)

        from performance.testing import get_y_pred_thresh
        y_pred_thresh = get_y_pred_thresh(y_pred, data_i['thresh'])

        concurrent([
            self.img_x[..., :3], self.img_y[..., 0], y_pred[..., 0],
            y_pred_thresh[..., 0]
        ])

        y_pred
Exemplo n.º 4
0
    def __init__(self, k=None, n_per_class=None):

        if k is not None:
            self.k = k

        if n_per_class is not None:
            self.n_per_class = n_per_class

        # Get net
        self.model_train = self.main_net(set_net)
        from methods.basic import NeuralNet

        self.neural_net = NeuralNet(self.model_train, w_ext=self.w_ext_in)

        # Get data
        train_data = self.main_data(set_data)

        n_val_datas = len(self.val_datas)
        lst_data = [[] for _ in range(n_val_datas + 1)]
        for _ in range(self.epochs):
            # Train
            self.main_train(train_data)

            # Evaluate
            data_lst = self.main_eval(train_data)

            for i, data_i in enumerate(data_lst):
                data_i.update({'epoch': self.neural_net.epoch})

                lst_data[i].append(data_i)

        for i in range(n_val_datas + 1):
            df = pd.DataFrame(lst_data[i])
            print(df)

            if i == 0:
                data_name = 'val'
            else:
                data_name = self.val_datas[i - 1]['name']
            model_name = f'{set_net["name"]}_data{data_name}_d{self.d}_k{self.k}_n{self.n_per_class}'
            pandas_save(
                f'C:/Users/admin/OneDrive - ugentbe/data/dataframes/{model_name}.csv',
                df,
                append=True)

        if 0:
            if 0:
                self.neural_net.load(
                    'C:/Users/admin/Data/ghent_altar/net_weight/tiunet_d1_k10_n80',
                    4)
            self.main_eval(train_data, b_plot=True)

        print("Finished init")
Exemplo n.º 5
0
    def model(self, b_optimal_lr=False):

        features_out = 3 if data_name == '19botrightcrack3' else 2

        if model_name == 'ti-unet':
            model = ti_unet(
                9,
                filters=self.k,
                w=w_patch,
                ext_in=w_ext_in // 2,
                batch_norm=True,
                max_depth=d,
                features_out=features_out,
            )

        elif model_name == 'unet':
            # model = ti_unet(9, filters=self.k, w=w_patch, ext_in=w_ext_in // 2, batch_norm=True,
            #                 max_depth=d)
            print('NO BATCH NORM? (not implemented)')
            model = unet(9,
                         filters=self.k,
                         w=w_patch,
                         ext_in=w_ext_in // 2,
                         max_depth=d,
                         n_per_block=1)

        else:
            raise ValueError(model_name)

        model.summary()

        compile_segm(model, lr=lr)

        if b_optimal_lr:
            from neuralNetwork.optimization import find_learning_rate

            global flow_tr
            find_learning_rate(model, flow_tr)

        self.neural_net = NeuralNet(model, w_ext=w_ext_in)

        if data_name[:5] == '1319_':  # pre Load model!
            # TODO which epoch to start from, I guess 10 should have an impact
            epoch_start = 50  # probably better (learned something)
            self.neural_net.load(
                f'C:/Users/admin/Data/ghent_altar/net_weight/1319/{model_name}_d{d}_k{self.k}',
                epoch=epoch_start)
Exemplo n.º 6
0
def pred_epochs():
    img_x, img_y_val = data_lamb()

    d = 2
    k = 10
    model_name = 'ti-unet'

    train_data = '1319_10nat'

    w_ext = 10 if d == 1 else 26

    y_pred_lst = []
    n = []
    for epoch in range(10, 101, 10):
        print(epoch)

        epoch_start = 50
        epoch_corr = epoch + epoch_start if train_data[:5] == '1319_' else epoch
        path = f'C:/Users/admin/Data/ghent_altar/net_weight/{train_data}/{model_name}_d{d}_k{k}/w_{epoch_corr}.h5'

        try:
            model = load_model_quick(path)
        except Exception as e:
            print(e)
            continue

        neural_net = NeuralNet(model, w_ext=w_ext, norm_x=True)

        y_pred = neural_net.predict(img_x)

        if 0:
            data_i = _eval_func_single(img_y_val, y_pred, metric='kappa')
            print(data_i)
            data_i = _eval_func_single(img_y_val, y_pred, metric='jaccard')
            print(data_i)

        y_pred_lst.append(y_pred)
        n.append(epoch)

    concurrent([a[..., 1] for a in y_pred_lst], n)
    plt.show()

    return 1
        def get_model():
            if self.ti:
                model = self.get_tiunet_preenc(k=self.k, lr=self.lr_opt)

            else:
                model = self.get_unet_preenc(k=self.k, lr=self.lr_opt)

            if self.fixed_enc == 2:
                n_temp = NeuralNet(model)

                folder_weights = '/scratch/lameeus/data/ghent_altar/net_weight'
                folder1 = f'10lamb_kfold_pretrained{"_encfixed"}{info_batchnorm}'
                folder2 = f'{info_model}_d{self.depth}_k{self.k}_ifold{i_fold}'

                n_temp.load(os.path.join(folder_weights, folder1, folder2),
                            100)

                del (n_temp)

            return model
Exemplo n.º 8
0
    def get_n_model_regular(self, i_fold=None, k=None, epoch=None):

        n_in = 9

        model_tiunet = ti_unet(n_in,
                               filters=k,
                               w=self.w_patch,
                               ext_in=10 // 2,
                               batch_norm=True,
                               wrong_batch_norm=True)
        """ TODO wrong tiunet """

        n = NeuralNet(model_tiunet, w_ext=10)

        info = f'10lamb_kfold/ti_unet_k{k}_kfold{i_fold}'
        folder = os.path.join('/scratch/lameeus/data/ghent_altar/net_weight',
                              info)
        n.load(folder=folder, epoch=epoch)

        return n
Exemplo n.º 9
0
    def get_n_model(self, i_fold=None, k=None, epoch=None):

        n_in = 9

        model_tiunet = ti_unet(n_in,
                               filters=k,
                               w=self.w_patch,
                               ext_in=10 // 2,
                               batch_norm=True)

        n = NeuralNet(model_tiunet, w_ext=10)

        info_batchnorm = '_batchnorm'
        info_fixed = '_encfixed' if self.fixed_enc == 1 else '_prefixed' if self.fixed_enc == 2 else ''
        info_model = 'tiunet'
        info = f'10lamb_kfold_pretrained{info_fixed}{info_batchnorm}/{info_model}_d{1}_k{k}_ifold{i_fold}'
        folder = os.path.join('/scratch/lameeus/data/ghent_altar/net_weight',
                              info)
        n.load(folder=folder, epoch=epoch)

        return n
Exemplo n.º 10
0
def main():
    """

    :return:
    """
    
    ### Settings
    
    k_range = range(2, 30 + 1)
    # k_range = [10,11]

    fold_range = range(6)
    # fold_range = [0, 1]

    epoch_range = range(1, 40 + 1)
    # epoch_range = [39, 40]

    filename_single = f'tiunet_10lamb_kfold_single'
    filename_avg_pred =f'tiunet_10lamb_kfold_avgpred'

    if os.name == 'nt':     # windows laptop
        folder_weights = 'C:/Users/Laurens_laptop_w/data'
        folder_save = 'C:/Users/Laurens_laptop_w/data/ghent_altar/dataframes'
    else:
        folder_weights = '/scratch/lameeus/data/ghent_altar/net_weight'
        folder_save = '/home/lameeus/data/ghent_altar/dataframes'

    ### Init
    epoch_range_desc = np.sort(epoch_range)[::-1]

    k_fold_train_data = get_10lamb_6patches(5)  # 5 is the number of modalities
    train_data_all = k_fold_train_data.get_train_data_all()
    img_x = train_data_all.get_x_train()
    img_x = rescale0to1(img_x)
    img_y_all = train_data_all.get_y_train()

    for k in k_range:
        for i_fold in fold_range:
            
            ### Reinit to make sure
            model = None
            list_y_pred = []

            train_data_i = k_fold_train_data.k_split_i(i_fold)
            img_y_tr = train_data_i.get_y_train()
            img_y_te = train_data_i.get_y_test()

            ###
            lst_data_single = []
            lst_data_avg_pred = []
            
            for epoch in epoch_range_desc:
    
                """
                Load model
                """
                filepath_model = os.path.join(folder_weights, f'10lamb_kfold/ti_unet_k{k}_kfold{i_fold}/w_{epoch}.h5')

                if epoch == epoch_range_desc[0]:
                    assert model is None
                    assert len(list_y_pred) == 0

                model = load_model_quick(filepath_model, model)

                """
                Inference
                """
        
                n = NeuralNet(model, w_ext=10)
                y_pred = n.predict(img_x)
                
                """
                Average out predictions
                """
                list_y_pred.append(y_pred)

                y_avg_pred = np.mean(list_y_pred, axis=0)
                
                """
                thresh based on GT
                """

                thresh_single = optimal_test_thresh_equal_distribution(img_y_all, y_pred)
                thresh_avg_pred = optimal_test_thresh_equal_distribution(img_y_all, y_avg_pred)
                
                """
                Get scores
                """
                
                data_single_i = {'k': k,
                                 'i_fold': i_fold,
                                 'epoch': epoch}
                data_avg_pred_i = {'k': k,
                                   'i_fold': i_fold,
                                   'epoch_start': epoch,
                                   'epoch_end': epoch_range_desc[0]}
                
                data_single_i.update(foo_performance(img_y_te, y_pred, thresh_single))
                data_avg_pred_i.update(foo_performance(img_y_te, y_avg_pred, thresh_avg_pred))
                
                if 1:
                    print('single', data_single_i)
                    print('avg pred', data_avg_pred_i)

                lst_data_single.append(data_single_i)
                lst_data_avg_pred.append(data_avg_pred_i)
        
            """
            Save data
            """
            
            df_single = pd.DataFrame(lst_data_single)
            df_avg_pred = pd.DataFrame(lst_data_avg_pred)
            
            path_single = os.path.join(folder_save, filename_single + '.csv')
            path_avg_pred = os.path.join(folder_save, filename_avg_pred + '.csv')
            if os.path.exists(path_single):
                df_single.to_csv(path_single, mode='a', header=False, index=False)
            else:
                df_single.to_csv(path_single, index=False)

            if os.path.exists(path_avg_pred):
                df_avg_pred.to_csv(path_avg_pred, mode='a', header=False, index=False)
            else:
                df_avg_pred.to_csv(path_avg_pred, index=False)
    
    return
Exemplo n.º 11
0
def main():

    b_encoder_fixed = False
    info_enc_fixed = '_enc_fixed'

    folder_weights = '/scratch/lameeus/data/ghent_altar/net_weight/10lamb_kfold_pretrained'
    folder_save = '/home/lameeus/data/ghent_altar/dataframes'
    filename_single = f'pretrained_unet_10lamb_kfold_single'
    filename_avg_pred = f'pretrained_unet_10lamb_kfold_avgpred'
    folder_weights += info_enc_fixed if b_encoder_fixed else ''
    filename_single += info_enc_fixed if b_encoder_fixed else ''
    filename_avg_pred += info_enc_fixed if b_encoder_fixed else ''

    fold_range = range(6)
    # fold_range = [0, 1]

    k = 10
    epoch_range = range(1, 40 + 1)

    w_ext_in = 28

    k_fold_train_data = get_10lamb_6patches(5)  # 5 is the number of modalities
    train_data_all = k_fold_train_data.get_train_data_all()
    img_x = train_data_all.get_x_train()
    img_x = rescale0to1(img_x)
    img_clean = img_x[..., :3]
    img_y_all = train_data_all.get_y_train()

    b_plot = False

    for i_fold in fold_range:

        print(i_fold)

        img_y_te = k_fold_train_data.k_split_i(i_fold).get_y_test()

        # Init for range epochs
        lst_data_single = []
        lst_data_avg_pred = []
        list_y_pred = []
        model = None

        for epoch in np.sort(epoch_range)[::-1]:

            filepath_model = os.path.join(
                folder_weights, f'unet_enc_k{k}_ifold{i_fold}/w_{epoch}.h5')

            model = load_model_quick(filepath_model, model=model)
            n = NeuralNet(model, w_ext=w_ext_in)
            y_pred = n.predict(img_x)
            """
            Average out predictions
            """
            list_y_pred.append(y_pred)
            y_avg_pred = np.mean(list_y_pred, axis=0)

            thresh_single = optimal_test_thresh_equal_distribution(
                img_y_all, y_pred)
            thresh_avg_pred = optimal_test_thresh_equal_distribution(
                img_y_all, y_avg_pred)

            y_pred_bin = np.greater_equal(y_pred[..., 1], thresh_single)

            dict_perf = foo_performance(img_y_te, y_pred, thresh_single)
            print(dict_perf)

            if b_plot:
                concurrent([
                    y_pred_bin, img_clean,
                    semi_transparant(img_clean, y_pred_bin),
                    semi_transparant(img_clean, img_y_te[..., 1].astype(bool))
                ])

            data_single_i = {'k': k, 'i_fold': i_fold, 'epoch': epoch}
            data_avg_pred_i = {
                'k': k,
                'i_fold': i_fold,
                'epoch_start': epoch,
                'epoch_end': max(epoch_range)
            }

            data_single_i.update(dict_perf)
            data_avg_pred_i.update(
                foo_performance(img_y_te, y_avg_pred, thresh_avg_pred))

            lst_data_single.append(data_single_i)
            lst_data_avg_pred.append(data_avg_pred_i)

        df_single = pd.DataFrame(lst_data_single)
        df_avg_pred = pd.DataFrame(lst_data_avg_pred)

        path_single = os.path.join(folder_save, filename_single + '.csv')
        path_avg_pred = os.path.join(folder_save, filename_avg_pred + '.csv')

        pandas_save(path_single, df_single, append=True)
        pandas_save(path_avg_pred, df_avg_pred, append=True)

    return
def main():
    """

    :return:
    """

    ### Settings
    mod = 5

    w_patch = 16 * 2
    """
    Data (all important modalities)
    """

    # folder_windows = r'C:\Users\Laurens_laptop_w\OneDrive - UGent\data\10lamb'
    train_data = get_10lamb_old(mod)
    img_x, img_y_tr, _, _ = get_training_data(train_data)
    # Normalise the input!
    img_x = rescale0to1(img_x)
    """
    Train segmentation
        1) reuse everything
        2) fix encoder
    """

    if 1:

        if 1:
            b_encoder_fixed = False

            info_enc_fixed = '_enc_fixed' if b_encoder_fixed else ''
            get_info = lambda: f'10lamb_kfold_pretrained{info_enc_fixed}/unet_enc_k{k}_ifold{i_fold}'

            n_epochs = 40

            k = 10

            if k == 10:
                epoch_w = 100
            else:
                raise NotImplementedError()

            ### Settings you don't have to change:

            w_patch = 50
            w_ext_in = 28
            b_double = False
            padding = 'valid'

            # TODO flag for converting encoder to dilated conv

            def get_unet_pretrained_encoder():

                model_encoder = get_model_encoder()

                encoder_inputs = model_encoder.input

                decoder_outputs = decoder(model_encoder, f_out=2)

                model_pretrained_unet = Model(encoder_inputs, decoder_outputs)
                from methods.examples import compile_segm
                compile_segm(model_pretrained_unet, lr=1e-4)

                model_pretrained_unet.summary()

                return model_pretrained_unet

            """
            Train
            """

            k_fold_train_data = get_10lamb_6patches(5)
            for i_fold in range(6):
                """
                Get a new network (not trained yet for segmentation)
                """

                model_pretrained_unet = get_unet_pretrained_encoder()
                n_pretrained_unet = NeuralNet(model_pretrained_unet)
                """
                The data
                """

                train_data_i = k_fold_train_data.k_split_i(i_fold)

                info = get_info()

                img_y_tr = train_data_i.get_y_train()
                img_y_te = train_data_i.get_y_test()

                flow_tr = get_flow(
                    img_x,
                    img_y_tr,
                    w_patch=w_patch,  # Comes from 10
                    w_ext_in=w_ext_in)

                flow_te = get_flow(
                    img_x,
                    img_y_te,
                    w_patch=w_patch,  # Comes from 10
                    w_ext_in=w_ext_in)

                n_pretrained_unet.train(flow_tr,
                                        flow_te,
                                        epochs=n_epochs,
                                        verbose=1,
                                        info=info)
                """
                Prediction
                """

                n_pretrained_unet.w_ext = w_ext_in
                y_pred = n_pretrained_unet.predict(img_x)

                concurrent([y_pred[..., 1]])
    """
    Classification
    """

    if 1:
        im_clean = img_x[..., :3]

        k = 8
        i_fold = 3
        epoch_last = 40

        from methods.examples import kappa_loss, weighted_categorical_crossentropy
        from performance.metrics import accuracy_with0, jaccard_with0
        loss = weighted_categorical_crossentropy((1, 1))

        list_y_pred = []

        ### K fold validation
        k_fold_train_data = get_10lamb_6patches(5)
        train_data_i = k_fold_train_data.k_split_i(i_fold)
        img_y_tr = train_data_i.get_y_train()
        img_y_te = train_data_i.get_y_test()

        for epoch in np.arange(31, epoch_last + 1):
            filepath_model = f'/scratch/lameeus/data/ghent_altar/net_weight/10lamb_kfold/ti_unet_k{k}_kfold{i_fold}/w_{epoch}.h5'

            model = load_model(filepath_model,
                               custom_objects={
                                   'loss': loss,
                                   'accuracy_with0': accuracy_with0,
                                   'jaccard_with0': jaccard_with0,
                                   'kappa_loss': kappa_loss
                               })

            n = NeuralNet(model, w_ext=10)
            y_pred = n.predict(img_x)

            list_y_pred.append(y_pred)

        y_pred_mean = np.mean(list_y_pred, axis=0)
        q1 = y_pred_mean[..., 1]
        concurrent([q1, q1.round(), im_clean])
        """
        Optimal threshold (making conf matrix symmetric, not based on maximising kappa)
        """
        y_gt = np.any([img_y_tr, img_y_te], axis=0)

        from performance.testing import _get_scores, filter_non_zero

        def foo_performance(y_true, y_pred, thresh):
            # is basically argmax
            y_pred_thresh_arg = np.greater_equal(y_pred[..., 1], thresh)

            y_true_flat, y_pred_thresh_arg_flat = filter_non_zero(
                y_true, y_pred_thresh_arg)
            y_te_argmax = np.argmax(y_true_flat, axis=-1)

            # Kappa
            return _get_scores(y_te_argmax, y_pred_thresh_arg_flat)[-1]

        """
        1. BEST? PERFORMANCE based on test set
        """

        print('1. Test distribution optimization')

        thresh = optimal_test_thresh_equal_distribution(img_y_te, y_pred_mean)
        q1_thresh = np.greater_equal(q1, thresh)
        concurrent([q1, q1_thresh, im_clean])

        print(f'thresh: {thresh}')

        # Test, train, both
        print('Kappa performance:')
        print('\ttrain:', foo_performance(img_y_tr, y_pred_mean, thresh))
        print('\ttestset:', foo_performance(img_y_te, y_pred_mean, thresh))
        print('\tboth:', foo_performance(y_gt, y_pred_mean, thresh))

        print('\nIncremental optimization on test set')

        test_thresh2 = test_thresh_incremental(y_pred_mean,
                                               img_y_tr,
                                               img_y_te,
                                               n=5,
                                               verbose=0)

        print('Kappa performance:')
        print('\ttrain:', foo_performance(img_y_tr, y_pred_mean, test_thresh2))
        print('\ttestset:', foo_performance(img_y_te, y_pred_mean,
                                            test_thresh2))
        print('\tboth:', foo_performance(y_gt, y_pred_mean, test_thresh2))
        """
        2. based on train
        """

        print('\n2. Training distribution optimization')

        thresh = optimal_test_thresh_equal_distribution(img_y_tr, y_pred_mean)
        q1_thresh = np.greater_equal(q1, thresh)
        concurrent([q1, q1_thresh, im_clean])

        print(f'thresh: {thresh}')

        # Test, train, both
        print('Kappa performance:')
        print('\ttrain:', foo_performance(img_y_tr, y_pred_mean, thresh))
        print('\ttestset:', foo_performance(img_y_te, y_pred_mean, thresh))
        print('\tboth:', foo_performance(y_gt, y_pred_mean, thresh))
        """
        3. CONSISTENT: based on train+set
        """

        print('\n3. all GT distribution optimization')

        thresh = optimal_test_thresh_equal_distribution(y_gt, y_pred_mean)
        q1_thresh = np.greater_equal(q1, thresh)
        concurrent([q1, q1_thresh, im_clean])

        print(f'thresh: {thresh}')

        # Test, train, both
        print('Kappa performance:')
        print('\ttrain:', foo_performance(img_y_tr, y_pred_mean, thresh))
        print('\ttestset:', foo_performance(img_y_te, y_pred_mean, thresh))
        print('\tboth:', foo_performance(y_gt, y_pred_mean, thresh))

        if 0:
            """
            4. DUMB/Not needed: Based on prediction of whole panel
            """

            thresh = optimal_test_thresh_equal_distribution(y_gt,
                                                            y_pred_mean,
                                                            mask_true=False)
            q1_thresh = np.greater_equal(q1, thresh)
            concurrent([q1, q1_thresh, im_clean])

    print('Done')
    def train_segm(self):
        from figures_paper.overlay import semi_transparant

        if self.fixed_enc == -2:

            def get_n_model_regular(i_fold=None, k=None, epoch=None):

                n_in = 9

                model_tiunet = ti_unet(n_in,
                                       filters=k,
                                       w=self.w_patch,
                                       ext_in=10 // 2,
                                       batch_norm=True,
                                       wrong_batch_norm=True)
                compile_segm(model_tiunet, 1e-4)
                """ TODO wrong tiunet """

                n = NeuralNet(model_tiunet, w_ext=10)

                info = f'10lamb_kfold/ti_unet_k{k}_kfold{i_fold}'
                folder = os.path.join(
                    '/scratch/lameeus/data/ghent_altar/net_weight', info)
                n.load(folder=folder, epoch=epoch)

                return n

            n_segm = get_n_model_regular(i_fold=self.i_fold,
                                         k=self.k,
                                         epoch=40)

        elif self.fixed_enc == -1:
            # No init
            model_segm = self.get_tiunet_preenc(k=self.k, lr=self.lr_opt)
            n_segm = NeuralNet(model_segm, w_ext=self.w_ext_in_ti)

        elif self.fixed_enc in [0, 1, 2]:

            # Load model
            model_segm = self.get_tiunet_preenc(k=self.k, lr=self.lr_opt)
            n_segm = NeuralNet(model_segm, w_ext=self.w_ext_in_ti)

            # Train on set
            folder_weights = '/scratch/lameeus/data/ghent_altar/net_weight'
            if self.fixed_enc == 0:
                folder1 = '10lamb_kfold_pretrained_batchnorm'
            elif self.fixed_enc == 1:
                folder1 = '10lamb_kfold_pretrained_encfixed_batchnorm'
            elif self.fixed_enc == 2:
                folder1 = '10lamb_kfold_pretrained_prefixed_batchnorm'

            folder2 = f'{"tiunet"}_d{self.depth}_k{self.k}_ifold{self.i_fold}'

            n_segm.load(os.path.join(folder_weights, folder1, folder2), 100)

        elif self.fixed_enc == 3:

            model_segm = self.get_tiunet_preenc(k=self.k,
                                                lr=self.lr_opt,
                                                set_info=f'_{self.set_nr}',
                                                epoch_start=100)
            n_segm = NeuralNet(model_segm, w_ext=self.w_ext_in_ti)

        else:
            NotImplementedError()

        def foo(n_segm, b=0):
            y_pred = n_segm.predict(self.img_x)

            thresh_single = optimal_test_thresh_equal_distribution(
                self.img_y_te, y_pred)
            # data_single_i = {'k': self.k,
            #                  'i_fold': i_fold,
            #                  'epoch': epoch}
            print(foo_performance(self.img_y_te, y_pred, thresh_single))

            img_clean = self.img_x[..., :3]
            concurrent([
                img_clean, y_pred[..., 1], y_pred[..., 1] >= thresh_single,
                semi_transparant(img_clean, y_pred[..., 1] >= thresh_single)
            ])

            if b:
                from data.datatools import imsave

                folder = '/home/lameeus/data/ghent_altar/output/hierarchy/'
                info_epoch = f'_epoch{n_segm.epoch}' if n_segm.epoch > 0 else ''
                filename = folder + f'13_small/pred_transfer_kfoldenc{self.fixed_enc}_ifold{self.i_fold}_avg{info_epoch}.png'
                imsave(filename, y_pred[..., 1])

        def set_encoder_state(model, trainable=False):

            assert len(model.layers) == 14
            for layer in model.layers[:7]:
                layer.trainable = trainable
            compile_segm(model)

        n_segm.epoch = 0

        # Without pretraining
        foo(n_segm)

        # epochs

        set_encoder_state(n_segm.model, trainable=False)

        for _ in range(10):
            n_segm.train(self.flow_tr_10, epochs=1, verbose=2)

        foo(n_segm, 0)

        if 0:
            set_encoder_state(n_segm.model, trainable=True)

            for _ in range(1):
                n_segm.train(self.flow_tr_set_10, epochs=10, verbose=2)
                foo(n_segm, 0)

        set_encoder_state(n_segm.model, trainable=True)

        for _ in range(10):
            n_segm.train(self.flow_tr_set, epochs=1, verbose=2)

        foo(n_segm, 0)
Exemplo n.º 14
0
                     delimiter=';')

    i_max = df['kappa'].idxmax()

    k, epoch = map(int, df.iloc[i_max][['k', 'epoch']])

    model_name = 'ti-unet'

    folder_base = 'C:/Users/admin/Data/ghent_altar/' if os.name == 'nt' else '/scratch/lameeus/data/ghent_altar/'

    path = os.path.join(
        folder_base,
        f'net_weight/{data}/{model_name}_d1_k{k}_n80/w_{epoch}.h5')

    model = load_model_quick(path)
    neural_net = NeuralNet(model, w_ext=10, norm_x=True)

    # Image

    from scripts.journal_paper.comparison_sh.shared import load_data

    if data == '1319botright':
        a = load_data("19botright", n_per_class=80)
    else:
        a = load_data(data, n_per_class=80)
    img_x, img_y = a.get_x_train(), a.get_y_test()

    y_pred = neural_net.predict(img_x)

    if 1:
        Evaluater(img_y, y_pred).summary()
Exemplo n.º 15
0
def transfer_learning(
        epoch=25,  # Could check a few
        b_plot=False):

    d = 2  # 1, 2

    img_x, img_y_val = data_lamb()

    k = 10

    model_name = 'ti-unet'

    w_ext = 10 if d == 1 else 26

    # train_data:
    y_pred_lst = []
    n = ['clean']

    # train_data_lst = ['1319_10', '10', '1319', '1319_101319']
    train_data_lst = ['10nat', '1319_10nat', '1319_10nat1319', '1319']

    data_i_lst = {}

    for train_data in train_data_lst:
        print(train_data)

        epoch_start = 50
        epoch_corr = epoch + epoch_start if train_data[:5] == '1319_' else epoch
        if train_data == '1319':
            epoch_corr = 50
        path = f'C:/Users/admin/Data/ghent_altar/net_weight/{train_data}/{model_name}_d{d}_k{k}/w_{epoch_corr}.h5'

        try:
            model = load_model_quick(path)
        except Exception as e:
            print(e)
            continue

        neural_net = NeuralNet(model, w_ext=w_ext, norm_x=True)

        y_pred = neural_net.predict(img_x)

        # baseline
        data_i = _eval_func_single(img_y_val, y_pred, metric='kappa')
        print(data_i)

        if 0:
            """
            Checking which 
            
            baseline ~ .22
            i = 0: .268, Remove huge improvement  ( a lot of "green" background annotated as paint loss)
            i = 1: .228 Keep!
            i = 2: .179 keep! Drop (keep!!
            i = 3: .159 keep! Even more important
            i = 4: .252 Remove (huge problem right top)
            i = 5: .233 Keep, quit relevant
            """

            from datasets.default_trainingsets import get_10lamb_6patches
            kFoldTrainData = get_10lamb_6patches(5)

            _eval_func_single(
                kFoldTrainData.k_split_i(0).get_y_train(),
                y_pred,
                metric='kappa')  # Check what is influence without!

        data_i_lst[train_data] = data_i

        data_i = _eval_func_single(img_y_val, y_pred, metric='jaccard')
        print(data_i)

        y_pred_lst.append(y_pred)
        n.append(train_data)

    # plt.imshow(neural_net.predict(img_x[::2,::2,:])[..., 1])

    if b_plot:
        concurrent([img_x[..., :3]] + [a[..., 1] for a in y_pred_lst], n)

    if 0:
        from figures_paper.overlay import semi_transparant
        from data.datatools import imread, imsave

        t = [data_i_lst[n_i]['thresh'] for n_i in train_data_lst]
        p = []
        for i, train_data in enumerate(train_data_lst):
            b = np.greater_equal(y_pred_lst[i][..., 1], t[i])

            k = semi_transparant(img_x[..., :3], b, 0)
            p.append(k)

            imsave(
                os.path.join(
                    "C:/Users/admin/OneDrive - ugentbe/data/images_paper",
                    train_data + '.png'), k)

        concurrent(p)

    return data_i_lst
    def train_segm(self):
        folder_save = '/home/lameeus/data/ghent_altar/dataframes'

        info_batchnorm = '_batchnorm' if self.batch_norm else ''
        info_fixed = '_encfixed' if self.fixed_enc == 1 else '_prefixed' if self.fixed_enc == 2 else ''
        info_model = 'tiunet' if self.ti else 'unet'

        filename_single = f'pretrained/{info_model}_10lamb_kfold{info_fixed}{info_batchnorm}/d{self.depth}_single'
        path_single = os.path.join(folder_save, filename_single + '.csv')

        get_info = lambda: f'10lamb_kfold_pretrained{info_fixed}{info_batchnorm}/{info_model}_d{self.depth}_k{self.k}_ifold{i_fold}'

        img_y_all = self.k_fold_train_data.get_train_data_all().get_y_train()

        def get_model():
            if self.ti:
                model = self.get_tiunet_preenc(k=self.k, lr=self.lr_opt)

            else:
                model = self.get_unet_preenc(k=self.k, lr=self.lr_opt)

            if self.fixed_enc == 2:
                n_temp = NeuralNet(model)

                folder_weights = '/scratch/lameeus/data/ghent_altar/net_weight'
                folder1 = f'10lamb_kfold_pretrained{"_encfixed"}{info_batchnorm}'
                folder2 = f'{info_model}_d{self.depth}_k{self.k}_ifold{i_fold}'

                n_temp.load(os.path.join(folder_weights, folder1, folder2),
                            100)

                del (n_temp)

            return model

        w_ext = self.w_ext_in_ti if self.ti else self.w_ext_in_ae

        if not self.lr_opt:
            model_segm = get_model()
            find_learning_rate(model_segm, self.flow_segm, lr1=1e0)

        for i_fold in range(6):
            print(f'i_fold = {i_fold}')

            model_segm = get_model()
            n_segm = NeuralNet(model_segm, w_ext=w_ext)

            train_data_i = self.k_fold_train_data.k_split_i(i_fold)
            img_y_tr = train_data_i.get_y_train()
            img_y_te = train_data_i.get_y_test()
            flow_tr = get_flow(self.img_x,
                               img_y_tr,
                               w_patch=self.w_patch,
                               w_ext_in=w_ext)
            flow_te = get_flow(self.img_x,
                               img_y_te,
                               w_patch=self.w_patch,
                               w_ext_in=w_ext)

            info = get_info()

            for epoch in range(self.epochs):
                n_segm.train(flow_tr, flow_te, epochs=1, verbose=2, info=info)

                y_pred = n_segm.predict(self.img_x)
                thresh_single = optimal_test_thresh_equal_distribution(
                    img_y_all, y_pred)
                data_single_i = {'k': self.k, 'i_fold': i_fold, 'epoch': epoch}
                data_single_i.update(
                    foo_performance(img_y_te, y_pred, thresh_single))
                lst_data_single = [data_single_i]
                df_single = pd.DataFrame(lst_data_single)
                pandas_save(path_single, df_single, append=True)

        return
Exemplo n.º 17
0
def main():
    ### Settings
    
    mod=5
    panel_nr = 19
    
    i_start ,i_end = 1, epochs_tot
    # i_start ,i_end = 1, 2
    
    k_lst = np.arange(1, 21)
    # k_lst = [1, 2]
    
    verbose=0
    b_plot = False
    
    ###
    
    if panel_nr == 13:
        train_data = get_13botleftshuang(mod=mod)
        folder_weights = '/scratch/lameeus/data/ghent_altar/net_weight/lamb_segmentation'
    elif panel_nr == 19:
        train_data = get_19SE_shuang(mod=mod)
        folder_weights = '/scratch/lameeus/data/ghent_altar/net_weight/19_hand_SE'
    else:
        raise ValueError(panel_nr)


    x, y_tr, _, y_te = get_training_data(train_data)

    (y_tr, y_te) = map(batch2img, (y_tr, y_te))
    


    assert i_end >= i_start
    
    if b_plot:
        # plotting
        pred_lst = []
        info_lst = []
    
    lst_data = []
    lst_data_avg_pred = []
    
    for k in k_lst:
    
        model = None
        
        pred_lst = []
        
        for epoch in np.arange(i_start, i_end + 1)[::-1]:
    
            info = f'settings: k {k}; epoch {epoch}'
            print('\n\t'+info)
            
            filepath_model = os.path.join(folder_weights, f'ti_unet_k{k}_imbalanced/w_{epoch}.h5')
    
            if epoch == i_end:
                model = load_model(filepath_model, custom_objects={'loss': loss,
                                                                   'accuracy_with0': accuracy_with0,
                                                                   'jaccard_with0': jaccard_with0,
                                                                   'kappa_loss': kappa_loss
                                                                   })
        
            else:
                model.load_weights(filepath_model)

            n = NeuralNet(model, w_ext=10)
    
            y_pred = n.predict(x)
            o = y_pred[..., 1]

            pred_lst.append(o)
            
            def print_conf(y_true, y_pred):
                    y_true = batch2img(y_true)
                    y_pred = batch2img(y_pred)
                    
                    b_annot = np.sum(y_true, axis=-1).astype(bool)
                    
                    y_true_annot = y_true[b_annot, :].argmax(axis=-1)
                    y_pred_annot = y_pred[b_annot, :].argmax(axis=-1)
                    
                    """
                    T0; predicted 1, but is 0
                    predicted 0, but is 1; T1
                    """
                    conf_mat = confusion_matrix(y_true_annot, y_pred_annot)
                    print(conf_mat)
                
            if 1:   # Single prediction
                
                if verbose == 1:
                    print_conf(y_tr, y_pred)
                    print_conf(y_te, y_pred)
                    
                if b_plot:
                    pred_lst.append(o)
                    info_lst.append(info)
    
                test_thresh = test_thresh_incremental(y_pred, y_tr, y_te, n=5, verbose=0)
                
                pred_thresh = np.greater_equal(o, test_thresh)
    
                pred_thresh_bin = np.stack([1-pred_thresh, pred_thresh], axis=-1)
    
                y_te_flat, y_pred_flat = filter_non_zero(y_te, pred_thresh_bin)
                y_te_argmax = np.argmax(y_te_flat, axis=-1)
                y_pred_argmax = np.argmax(y_pred_flat, axis=-1)
                acc, jacc, kappa = _get_scores(y_te_argmax, y_pred_argmax)
            
                if verbose == 1:
                    print_conf(y_tr, pred_thresh_bin)
                    print_conf(y_te, pred_thresh_bin)
    
                if 0: concurrent([pred_thresh])
                
                data_i = {'k':k,
                          'epoch':epoch,
                          'test_thresh':test_thresh,
                          'kappa':kappa,
                          'accuracy':acc,
                          'jaccard':jacc
                          }
                lst_data.append(data_i)
            
            if 1:   # avg prediction
    
                pred_i_average = np.mean(pred_lst, axis=0)
    
                # optimizing threshold prediction
                test_thresh = test_thresh_incremental(np.stack([1 - pred_i_average, pred_i_average], axis=-1), y_tr, y_te, n=5,
                                                      verbose=0)
                pred_thresh = np.greater_equal(pred_i_average, test_thresh)
                pred_thresh_bin = np.stack([1 - pred_thresh, pred_thresh], axis=-1)
    
                y_te_flat, y_pred_flat = filter_non_zero(y_te, pred_thresh_bin)
                y_te_argmax = np.argmax(y_te_flat, axis=-1)
                y_pred_argmax = np.argmax(y_pred_flat, axis=-1)
                acc, jacc, kappa = _get_scores(y_te_argmax, y_pred_argmax)
    
                data_i = {'k': k,
                          'epoch_start': epoch,
                          'test_thresh': test_thresh,
                          'kappa': kappa,
                          'accuracy': acc,
                          'jaccard': jacc
                          }
    
                lst_data_avg_pred.append(data_i)
            
    b = True
    if b:
        df = pd.DataFrame(lst_data)
        filename_save = f'tiunet_1pool_shaoguang{panel_nr}_imbalanced'
        filename_path = f'/scratch/lameeus/data/ghent_altar/dataframes/{filename_save}.csv'
        df.to_csv(filename_path, sep=';')
    
        df = pd.DataFrame(lst_data_avg_pred)
        filename_save = f'tiunet_1pool_shaoguang{panel_nr}_imbalanced_averaging'
        df.to_csv(f'/scratch/lameeus/data/ghent_altar/dataframes/{filename_save}.csv', sep=';')

    if b_plot:
        concurrent(pred_lst, info_lst)
    
    plt.show()
    
    return