def main_data(self, set_data):

        if set_data['name'] == 'zach_sh':
            from datasets.default_trainingsets import get_13botleftshuang
            train_data = get_13botleftshuang(mod, n_per_class=self.n_per_class)
        else:
            raise NotImplementedError()

        from data.preprocessing import rescale0to1
        train_data.x = rescale0to1(train_data.x)

        from datasets.default_trainingsets import xy_from_df, panel13withoutRightBot
        from datasets.examples import get_13zach

        _, img_y = xy_from_df(get_13zach(), mod)
        img_y_top2, _ = panel13withoutRightBot(img_y)

        img_y_test = np.logical_or(img_y_top2, train_data.get_y_test())

        self.val_datas = [{
            'name': '13_top2',
            'y': img_y_top2
        }, {
            'name': '13_test',
            'y': img_y_test
        }]

        return train_data
    def set_img_x(self):
        if self.set_nr == 13:
            train_data = get_13(self.mod)

            from data.datatools import imread
            from data.conversion_tools import annotations2y
            train_data.y_te = np.copy(train_data.y_tr)
            train_data.y_tr = annotations2y(imread(
                '/home/lameeus/data/ghent_altar/input/hierarchy/13_small/clean_annot_practical.png'
            ),
                                            thresh=.9)

            img_x, img_y, _, img_y_te = get_training_data(train_data)

        # Normalise the input!
        img_x = rescale0to1(img_x)
        self.img_x = img_x
        self.img_y_tr = img_y
        self.img_y_te = img_y_te

        train_data_10 = get_10lamb_6patches(self.mod).get_train_data_all()
        img_x_10, img_y_10, _, _ = get_training_data(train_data_10)
        # Normalise the input!
        img_x_10 = rescale0to1(img_x_10)

        self.flow_tr_set = get_flow(self.img_x,
                                    self.img_y_tr,
                                    w_patch=self.w_patch,
                                    w_ext_in=self.w_ext_in_ti)
        self.flow_tr_10 = get_flow(img_x_10,
                                   img_y_10,
                                   w_patch=self.w_patch,
                                   w_ext_in=self.w_ext_in_ti)
        n_multiply = 10
        self.flow_tr_set_10 = get_flow([self.img_x] * n_multiply + [img_x_10],
                                       [self.img_y_tr] * n_multiply +
                                       [img_y_10],
                                       w_patch=self.w_patch,
                                       w_ext_in=self.w_ext_in_ti)

        self.flow_ae_tr = get_flow(
            self.img_x,
            self.img_x,
            w_patch=self.w_patch,
            w_ext_in=self.w_ext_in_ae,
        )
Exemple #3
0
    def method(self, x_img, w=None):
        if self.norm_x:
            print('normalising input')
            x = rescale0to1(x_img)
        else:
            x = x_img

        return inference(self.model, x, w=w, w_ext=self.w_ext)
Exemple #4
0
    def predict(self, x_img, w=None):
        if self.norm_x:
            print('normalising input')
            x = rescale0to1(x_img)
        else:
            x = x_img

        return self.method(x, w=w)
    def set_data(self):

        if self.set == 10:
            train_data = get_10lamb_old(5)
            img_x, _, _, _ = get_training_data(train_data)
        elif self.set == 13:
            img_x, _, _, _ = get_training_data(get_13(5))

        # Normalise the input!
        img_x = rescale0to1(img_x)
        self.img_x = img_x

        self.k_fold_train_data = get_10lamb_6patches(5)
Exemple #6
0
def load_data(data_name, n_per_class, seed=None):
    def _data10nat():
        # Sparse annotations
        train_data = get_10lamb_old(5)

        from datasets.examples import get_10lamb
        from data.conversion_tools import annotations2y
        train_data.y_tr = annotations2y(get_10lamb().get("annot_tflearning"))

        return train_data

    if data_name == '13botright':
        train_data = get_13botleftshuang(5, n_per_class=n_per_class)

    elif data_name == '19botright':
        train_data = get_19SE_shuang(5, n_per_class=n_per_class, seed=seed)

    elif data_name == '19botrightcrack':
        train_data = get_19SE_shuang_crack(5, n_per_class=n_per_class)

    elif data_name == '19botrightcrack3':
        train_data = get_19SE_shuang_crack(5,
                                           n_per_class=n_per_class,
                                           n_outputs=3)

    elif data_name == '1319':
        train_data = get_1319(5)

    elif data_name == '1319botright':

        from datasets.training import TrainData

        a13 = load_data("13botright", n_per_class)
        a19 = load_data("19botright", n_per_class)

        img_x = [a13.get_x_train(), a19.get_x_train()]
        img_y_train = [a13.get_y_train(), a19.get_y_train()]
        img_y_val = [a13.get_y_test(), a19.get_y_test()]

        train_data = TrainData(img_x, img_y_train, img_y_val)

    elif data_name.split('_')[-1] == '10':

        # Sparse annotations
        train_data = get_10lamb_old(5)

    elif data_name.split('_')[-1] == '101319':
        from datasets.default_trainingsets import xy_from_df, get_13zach, get_19hand, get_10lamb, TrainData

        img_x10, img_y10 = xy_from_df(get_10lamb(), 5)
        img_x13, img_y13 = xy_from_df(get_13zach(), 5)
        img_x19, img_y19 = xy_from_df(get_19hand(), 5)

        img_x = [img_x10, img_x13, img_x19]
        img_y = [img_y10, img_y13, img_y19]

        # No test data
        train_data = TrainData(
            img_x, img_y, [np.zeros(shape=img_y_i.shape) for img_y_i in img_y])

    elif data_name.split('_')[-1] == '10nat':
        train_data = _data10nat()

    elif data_name.split('_')[-1] == '10nat1319':
        from datasets.default_trainingsets import TrainData

        train_data10 = _data10nat()
        train_data1319 = get_1319(5)

        img_x = [train_data10.get_x_train()] + train_data1319.get_x_train()
        img_y = [train_data10.get_y_train()] + train_data1319.get_y_train()

        train_data = TrainData(
            img_x, img_y, [np.zeros(shape=img_y_i.shape) for img_y_i in img_y])

    else:
        raise ValueError(data_name)

    from data.preprocessing import rescale0to1
    train_data.x = rescale0to1(train_data.x)

    return train_data
Exemple #7
0
def main():
    """

    :return:
    """
    
    ### Settings
    
    k_range = range(2, 30 + 1)
    # k_range = [10,11]

    fold_range = range(6)
    # fold_range = [0, 1]

    epoch_range = range(1, 40 + 1)
    # epoch_range = [39, 40]

    filename_single = f'tiunet_10lamb_kfold_single'
    filename_avg_pred =f'tiunet_10lamb_kfold_avgpred'

    if os.name == 'nt':     # windows laptop
        folder_weights = 'C:/Users/Laurens_laptop_w/data'
        folder_save = 'C:/Users/Laurens_laptop_w/data/ghent_altar/dataframes'
    else:
        folder_weights = '/scratch/lameeus/data/ghent_altar/net_weight'
        folder_save = '/home/lameeus/data/ghent_altar/dataframes'

    ### Init
    epoch_range_desc = np.sort(epoch_range)[::-1]

    k_fold_train_data = get_10lamb_6patches(5)  # 5 is the number of modalities
    train_data_all = k_fold_train_data.get_train_data_all()
    img_x = train_data_all.get_x_train()
    img_x = rescale0to1(img_x)
    img_y_all = train_data_all.get_y_train()

    for k in k_range:
        for i_fold in fold_range:
            
            ### Reinit to make sure
            model = None
            list_y_pred = []

            train_data_i = k_fold_train_data.k_split_i(i_fold)
            img_y_tr = train_data_i.get_y_train()
            img_y_te = train_data_i.get_y_test()

            ###
            lst_data_single = []
            lst_data_avg_pred = []
            
            for epoch in epoch_range_desc:
    
                """
                Load model
                """
                filepath_model = os.path.join(folder_weights, f'10lamb_kfold/ti_unet_k{k}_kfold{i_fold}/w_{epoch}.h5')

                if epoch == epoch_range_desc[0]:
                    assert model is None
                    assert len(list_y_pred) == 0

                model = load_model_quick(filepath_model, model)

                """
                Inference
                """
        
                n = NeuralNet(model, w_ext=10)
                y_pred = n.predict(img_x)
                
                """
                Average out predictions
                """
                list_y_pred.append(y_pred)

                y_avg_pred = np.mean(list_y_pred, axis=0)
                
                """
                thresh based on GT
                """

                thresh_single = optimal_test_thresh_equal_distribution(img_y_all, y_pred)
                thresh_avg_pred = optimal_test_thresh_equal_distribution(img_y_all, y_avg_pred)
                
                """
                Get scores
                """
                
                data_single_i = {'k': k,
                                 'i_fold': i_fold,
                                 'epoch': epoch}
                data_avg_pred_i = {'k': k,
                                   'i_fold': i_fold,
                                   'epoch_start': epoch,
                                   'epoch_end': epoch_range_desc[0]}
                
                data_single_i.update(foo_performance(img_y_te, y_pred, thresh_single))
                data_avg_pred_i.update(foo_performance(img_y_te, y_avg_pred, thresh_avg_pred))
                
                if 1:
                    print('single', data_single_i)
                    print('avg pred', data_avg_pred_i)

                lst_data_single.append(data_single_i)
                lst_data_avg_pred.append(data_avg_pred_i)
        
            """
            Save data
            """
            
            df_single = pd.DataFrame(lst_data_single)
            df_avg_pred = pd.DataFrame(lst_data_avg_pred)
            
            path_single = os.path.join(folder_save, filename_single + '.csv')
            path_avg_pred = os.path.join(folder_save, filename_avg_pred + '.csv')
            if os.path.exists(path_single):
                df_single.to_csv(path_single, mode='a', header=False, index=False)
            else:
                df_single.to_csv(path_single, index=False)

            if os.path.exists(path_avg_pred):
                df_avg_pred.to_csv(path_avg_pred, mode='a', header=False, index=False)
            else:
                df_avg_pred.to_csv(path_avg_pred, index=False)
    
    return
Exemple #8
0
def main():

    b_encoder_fixed = False
    info_enc_fixed = '_enc_fixed'

    folder_weights = '/scratch/lameeus/data/ghent_altar/net_weight/10lamb_kfold_pretrained'
    folder_save = '/home/lameeus/data/ghent_altar/dataframes'
    filename_single = f'pretrained_unet_10lamb_kfold_single'
    filename_avg_pred = f'pretrained_unet_10lamb_kfold_avgpred'
    folder_weights += info_enc_fixed if b_encoder_fixed else ''
    filename_single += info_enc_fixed if b_encoder_fixed else ''
    filename_avg_pred += info_enc_fixed if b_encoder_fixed else ''

    fold_range = range(6)
    # fold_range = [0, 1]

    k = 10
    epoch_range = range(1, 40 + 1)

    w_ext_in = 28

    k_fold_train_data = get_10lamb_6patches(5)  # 5 is the number of modalities
    train_data_all = k_fold_train_data.get_train_data_all()
    img_x = train_data_all.get_x_train()
    img_x = rescale0to1(img_x)
    img_clean = img_x[..., :3]
    img_y_all = train_data_all.get_y_train()

    b_plot = False

    for i_fold in fold_range:

        print(i_fold)

        img_y_te = k_fold_train_data.k_split_i(i_fold).get_y_test()

        # Init for range epochs
        lst_data_single = []
        lst_data_avg_pred = []
        list_y_pred = []
        model = None

        for epoch in np.sort(epoch_range)[::-1]:

            filepath_model = os.path.join(
                folder_weights, f'unet_enc_k{k}_ifold{i_fold}/w_{epoch}.h5')

            model = load_model_quick(filepath_model, model=model)
            n = NeuralNet(model, w_ext=w_ext_in)
            y_pred = n.predict(img_x)
            """
            Average out predictions
            """
            list_y_pred.append(y_pred)
            y_avg_pred = np.mean(list_y_pred, axis=0)

            thresh_single = optimal_test_thresh_equal_distribution(
                img_y_all, y_pred)
            thresh_avg_pred = optimal_test_thresh_equal_distribution(
                img_y_all, y_avg_pred)

            y_pred_bin = np.greater_equal(y_pred[..., 1], thresh_single)

            dict_perf = foo_performance(img_y_te, y_pred, thresh_single)
            print(dict_perf)

            if b_plot:
                concurrent([
                    y_pred_bin, img_clean,
                    semi_transparant(img_clean, y_pred_bin),
                    semi_transparant(img_clean, img_y_te[..., 1].astype(bool))
                ])

            data_single_i = {'k': k, 'i_fold': i_fold, 'epoch': epoch}
            data_avg_pred_i = {
                'k': k,
                'i_fold': i_fold,
                'epoch_start': epoch,
                'epoch_end': max(epoch_range)
            }

            data_single_i.update(dict_perf)
            data_avg_pred_i.update(
                foo_performance(img_y_te, y_avg_pred, thresh_avg_pred))

            lst_data_single.append(data_single_i)
            lst_data_avg_pred.append(data_avg_pred_i)

        df_single = pd.DataFrame(lst_data_single)
        df_avg_pred = pd.DataFrame(lst_data_avg_pred)

        path_single = os.path.join(folder_save, filename_single + '.csv')
        path_avg_pred = os.path.join(folder_save, filename_avg_pred + '.csv')

        pandas_save(path_single, df_single, append=True)
        pandas_save(path_avg_pred, df_avg_pred, append=True)

    return
 def set_img_x(self):
     train_data = get_10lamb_old(self.mod)
     img_x, _, _, _ = get_training_data(train_data)
     # Normalise the input!
     img_x = rescale0to1(img_x)
     self.img_x = img_x
def main():
    """

    :return:
    """

    ### Settings
    mod = 5

    w_patch = 16 * 2
    """
    Data (all important modalities)
    """

    # folder_windows = r'C:\Users\Laurens_laptop_w\OneDrive - UGent\data\10lamb'
    train_data = get_10lamb_old(mod)
    img_x, img_y_tr, _, _ = get_training_data(train_data)
    # Normalise the input!
    img_x = rescale0to1(img_x)
    """
    Train segmentation
        1) reuse everything
        2) fix encoder
    """

    if 1:

        if 1:
            b_encoder_fixed = False

            info_enc_fixed = '_enc_fixed' if b_encoder_fixed else ''
            get_info = lambda: f'10lamb_kfold_pretrained{info_enc_fixed}/unet_enc_k{k}_ifold{i_fold}'

            n_epochs = 40

            k = 10

            if k == 10:
                epoch_w = 100
            else:
                raise NotImplementedError()

            ### Settings you don't have to change:

            w_patch = 50
            w_ext_in = 28
            b_double = False
            padding = 'valid'

            # TODO flag for converting encoder to dilated conv

            def get_unet_pretrained_encoder():

                model_encoder = get_model_encoder()

                encoder_inputs = model_encoder.input

                decoder_outputs = decoder(model_encoder, f_out=2)

                model_pretrained_unet = Model(encoder_inputs, decoder_outputs)
                from methods.examples import compile_segm
                compile_segm(model_pretrained_unet, lr=1e-4)

                model_pretrained_unet.summary()

                return model_pretrained_unet

            """
            Train
            """

            k_fold_train_data = get_10lamb_6patches(5)
            for i_fold in range(6):
                """
                Get a new network (not trained yet for segmentation)
                """

                model_pretrained_unet = get_unet_pretrained_encoder()
                n_pretrained_unet = NeuralNet(model_pretrained_unet)
                """
                The data
                """

                train_data_i = k_fold_train_data.k_split_i(i_fold)

                info = get_info()

                img_y_tr = train_data_i.get_y_train()
                img_y_te = train_data_i.get_y_test()

                flow_tr = get_flow(
                    img_x,
                    img_y_tr,
                    w_patch=w_patch,  # Comes from 10
                    w_ext_in=w_ext_in)

                flow_te = get_flow(
                    img_x,
                    img_y_te,
                    w_patch=w_patch,  # Comes from 10
                    w_ext_in=w_ext_in)

                n_pretrained_unet.train(flow_tr,
                                        flow_te,
                                        epochs=n_epochs,
                                        verbose=1,
                                        info=info)
                """
                Prediction
                """

                n_pretrained_unet.w_ext = w_ext_in
                y_pred = n_pretrained_unet.predict(img_x)

                concurrent([y_pred[..., 1]])
    """
    Classification
    """

    if 1:
        im_clean = img_x[..., :3]

        k = 8
        i_fold = 3
        epoch_last = 40

        from methods.examples import kappa_loss, weighted_categorical_crossentropy
        from performance.metrics import accuracy_with0, jaccard_with0
        loss = weighted_categorical_crossentropy((1, 1))

        list_y_pred = []

        ### K fold validation
        k_fold_train_data = get_10lamb_6patches(5)
        train_data_i = k_fold_train_data.k_split_i(i_fold)
        img_y_tr = train_data_i.get_y_train()
        img_y_te = train_data_i.get_y_test()

        for epoch in np.arange(31, epoch_last + 1):
            filepath_model = f'/scratch/lameeus/data/ghent_altar/net_weight/10lamb_kfold/ti_unet_k{k}_kfold{i_fold}/w_{epoch}.h5'

            model = load_model(filepath_model,
                               custom_objects={
                                   'loss': loss,
                                   'accuracy_with0': accuracy_with0,
                                   'jaccard_with0': jaccard_with0,
                                   'kappa_loss': kappa_loss
                               })

            n = NeuralNet(model, w_ext=10)
            y_pred = n.predict(img_x)

            list_y_pred.append(y_pred)

        y_pred_mean = np.mean(list_y_pred, axis=0)
        q1 = y_pred_mean[..., 1]
        concurrent([q1, q1.round(), im_clean])
        """
        Optimal threshold (making conf matrix symmetric, not based on maximising kappa)
        """
        y_gt = np.any([img_y_tr, img_y_te], axis=0)

        from performance.testing import _get_scores, filter_non_zero

        def foo_performance(y_true, y_pred, thresh):
            # is basically argmax
            y_pred_thresh_arg = np.greater_equal(y_pred[..., 1], thresh)

            y_true_flat, y_pred_thresh_arg_flat = filter_non_zero(
                y_true, y_pred_thresh_arg)
            y_te_argmax = np.argmax(y_true_flat, axis=-1)

            # Kappa
            return _get_scores(y_te_argmax, y_pred_thresh_arg_flat)[-1]

        """
        1. BEST? PERFORMANCE based on test set
        """

        print('1. Test distribution optimization')

        thresh = optimal_test_thresh_equal_distribution(img_y_te, y_pred_mean)
        q1_thresh = np.greater_equal(q1, thresh)
        concurrent([q1, q1_thresh, im_clean])

        print(f'thresh: {thresh}')

        # Test, train, both
        print('Kappa performance:')
        print('\ttrain:', foo_performance(img_y_tr, y_pred_mean, thresh))
        print('\ttestset:', foo_performance(img_y_te, y_pred_mean, thresh))
        print('\tboth:', foo_performance(y_gt, y_pred_mean, thresh))

        print('\nIncremental optimization on test set')

        test_thresh2 = test_thresh_incremental(y_pred_mean,
                                               img_y_tr,
                                               img_y_te,
                                               n=5,
                                               verbose=0)

        print('Kappa performance:')
        print('\ttrain:', foo_performance(img_y_tr, y_pred_mean, test_thresh2))
        print('\ttestset:', foo_performance(img_y_te, y_pred_mean,
                                            test_thresh2))
        print('\tboth:', foo_performance(y_gt, y_pred_mean, test_thresh2))
        """
        2. based on train
        """

        print('\n2. Training distribution optimization')

        thresh = optimal_test_thresh_equal_distribution(img_y_tr, y_pred_mean)
        q1_thresh = np.greater_equal(q1, thresh)
        concurrent([q1, q1_thresh, im_clean])

        print(f'thresh: {thresh}')

        # Test, train, both
        print('Kappa performance:')
        print('\ttrain:', foo_performance(img_y_tr, y_pred_mean, thresh))
        print('\ttestset:', foo_performance(img_y_te, y_pred_mean, thresh))
        print('\tboth:', foo_performance(y_gt, y_pred_mean, thresh))
        """
        3. CONSISTENT: based on train+set
        """

        print('\n3. all GT distribution optimization')

        thresh = optimal_test_thresh_equal_distribution(y_gt, y_pred_mean)
        q1_thresh = np.greater_equal(q1, thresh)
        concurrent([q1, q1_thresh, im_clean])

        print(f'thresh: {thresh}')

        # Test, train, both
        print('Kappa performance:')
        print('\ttrain:', foo_performance(img_y_tr, y_pred_mean, thresh))
        print('\ttestset:', foo_performance(img_y_te, y_pred_mean, thresh))
        print('\tboth:', foo_performance(y_gt, y_pred_mean, thresh))

        if 0:
            """
            4. DUMB/Not needed: Based on prediction of whole panel
            """

            thresh = optimal_test_thresh_equal_distribution(y_gt,
                                                            y_pred_mean,
                                                            mask_true=False)
            q1_thresh = np.greater_equal(q1, thresh)
            concurrent([q1, q1_thresh, im_clean])

    print('Done')
Exemple #11
0
    def train(self, xy, validation=None, epochs=1, verbose=1, info='scratch',
              steps_per_epoch=100
              ):
        """
        
        :param xy: Can be either tuple of (x, y) or Keras Generator
        :param validation:
        :param epochs:
        :param verbose:
        :param info:
        :param steps_per_epoch:
        :return:
        """

        if self.norm_x:
            print('normalising input')

            assert isinstance(xy, tuple)

            x, y = xy

            x = rescale0to1(x)

            xy = (x, y)
        
        def get_flow_xy(xy):
            if isinstance(xy, tuple):
                x, y = map(batch2img, xy)
                
                flow = get_flow(batch2img(x), batch2img(y))
                return flow
            
            elif isinstance(xy, (NumpyArrayIterator, )):
                return xy
                
            else:
                raise TypeError(f'Unkown type for xy: {type(xy)}')

        flow_tr = get_flow_xy(xy)
        
        flow_va = get_flow_xy(validation) if (validation is not None) else None

        folder_base = 'C:/Users/admin/Data/ghent_altar/' if os.name == 'nt' else '/scratch/lameeus/data/ghent_altar/'

        folder_checkpoint = os.path.join(folder_base, 'net_weight', info)
        filepath_checkpoint = os.path.join(folder_checkpoint, 'w_{epoch}.h5')
        folder_tensorboard = os.path.join(folder_base, 'logs', info)
        
        if not os.path.exists(folder_checkpoint):
            os.makedirs(folder_checkpoint)
            
        checkpoint = ModelCheckpoint(filepath_checkpoint, save_weights_only=False)
        # Only save the graph if it's first epoch training the model
        tensorboard = TensorBoard(folder_tensorboard, write_graph=self.epoch==0)
        callbacks = [checkpoint, tensorboard]
        
        self.get_model().fit_generator(flow_tr,
                                       initial_epoch=self.epoch, epochs=self.epoch+epochs,
                                       steps_per_epoch=steps_per_epoch,
                                       validation_data=flow_va, validation_steps=steps_per_epoch//10,
                                       verbose=verbose, callbacks=callbacks)
        self.epoch += epochs