def get_tiunet_preenc(self, k=10, lr=1e-4):
        """

        :param k:
        :param lr:
        :param f_out:
        :return:
        """

        model_encoder = self.get_encoder(k)
        n_in = get_mod_n_in(self.mod)

        model_tiunet = ti_unet(n_in,
                               filters=k,
                               w=self.w_patch,
                               ext_in=10 // 2,
                               batch_norm=self.batch_norm)

        if self.batch_norm:
            o = model_tiunet.get_layer(f'batchnorm_left{self.depth}_0').output
        else:
            o = model_tiunet.get_layer(f'left{self.depth}_0').output
        model_tiunet_encoder = Model(model_tiunet.input, o)

        model_tiunet_encoder.set_weights(model_encoder.get_weights())

        if self.fixed_enc == 1:
            for layer in model_tiunet_encoder.layers:
                layer.trainable = False

        model_tiunet.summary()

        compile_segm(model_tiunet, lr=lr)

        return model_tiunet
    def get_tiunet_y(self):

        from keras.layers import AveragePooling2D

        n_in = get_mod_n_in(self.mod)
        model_tiunet = ti_unet(n_in,
                               filters=self.k,
                               w=self.w_patch,
                               ext_in=self.w_ext_in_ti // 2,
                               batch_norm=self.batch_norm)

        model_ae_example = get_neural_net_ae(
            self.mod,
            k=self.k,
            w_in=self.w_patch,
            b_double=False,
            batch_norm=self.batch_norm).get_model()

        name_dec_in = 'batchnorm_enc_output' if self.batch_norm else 'encoder_output'
        name_dec_in = f'dec{self.depth}'
        dec_in = model_ae_example.get_layer(name_dec_in).input
        ae_decoder = Model(dec_in, model_ae_example.layers[-1].output)

        # TODO use pretrained AE?

        name_enc = 'batchnorm_left1_0' if self.batch_norm else 'left1_0'
        layer_enc = model_tiunet.get_layer(name_enc)

        # Basically subsample layer (no pooling)
        l = AveragePooling2D((1, 1), (2, 2))(layer_enc.output)

        # TODO use crop!

        return
Example #3
0
    def model(self, b_optimal_lr=False):

        features_out = 3 if data_name == '19botrightcrack3' else 2

        if model_name == 'ti-unet':
            model = ti_unet(
                9,
                filters=self.k,
                w=w_patch,
                ext_in=w_ext_in // 2,
                batch_norm=True,
                max_depth=d,
                features_out=features_out,
            )

        elif model_name == 'unet':
            # model = ti_unet(9, filters=self.k, w=w_patch, ext_in=w_ext_in // 2, batch_norm=True,
            #                 max_depth=d)
            print('NO BATCH NORM? (not implemented)')
            model = unet(9,
                         filters=self.k,
                         w=w_patch,
                         ext_in=w_ext_in // 2,
                         max_depth=d,
                         n_per_block=1)

        else:
            raise ValueError(model_name)

        model.summary()

        compile_segm(model, lr=lr)

        if b_optimal_lr:
            from neuralNetwork.optimization import find_learning_rate

            global flow_tr
            find_learning_rate(model, flow_tr)

        self.neural_net = NeuralNet(model, w_ext=w_ext_in)

        if data_name[:5] == '1319_':  # pre Load model!
            # TODO which epoch to start from, I guess 10 should have an impact
            epoch_start = 50  # probably better (learned something)
            self.neural_net.load(
                f'C:/Users/admin/Data/ghent_altar/net_weight/1319/{model_name}_d{d}_k{self.k}',
                epoch=epoch_start)
    def get_n_model_regular(self, i_fold=None, k=None, epoch=None):

        n_in = 9

        model_tiunet = ti_unet(n_in,
                               filters=k,
                               w=self.w_patch,
                               ext_in=10 // 2,
                               batch_norm=True,
                               wrong_batch_norm=True)
        """ TODO wrong tiunet """

        n = NeuralNet(model_tiunet, w_ext=10)

        info = f'10lamb_kfold/ti_unet_k{k}_kfold{i_fold}'
        folder = os.path.join('/scratch/lameeus/data/ghent_altar/net_weight',
                              info)
        n.load(folder=folder, epoch=epoch)

        return n
    def get_n_model(self, i_fold=None, k=None, epoch=None):

        n_in = 9

        model_tiunet = ti_unet(n_in,
                               filters=k,
                               w=self.w_patch,
                               ext_in=10 // 2,
                               batch_norm=True)

        n = NeuralNet(model_tiunet, w_ext=10)

        info_batchnorm = '_batchnorm'
        info_fixed = '_encfixed' if self.fixed_enc == 1 else '_prefixed' if self.fixed_enc == 2 else ''
        info_model = 'tiunet'
        info = f'10lamb_kfold_pretrained{info_fixed}{info_batchnorm}/{info_model}_d{1}_k{k}_ifold{i_fold}'
        folder = os.path.join('/scratch/lameeus/data/ghent_altar/net_weight',
                              info)
        n.load(folder=folder, epoch=epoch)

        return n
    def main_net(self, set_n):
        from methods.examples import compile_segm
        from neuralNetwork.architectures import ti_unet, convNet, unet

        n_name = set_n['name'].lower()
        if n_name == 'ti-unet':
            model = ti_unet(9,
                            filters=self.k,
                            w=self.w_patch,
                            ext_in=self.w_ext_in // 2,
                            batch_norm=True,
                            max_depth=self.d)
        elif n_name == 'simple':
            model = convNet(9,
                            self.k,
                            w_in=self.w_patch + self.w_ext_in,
                            n_convs=5,
                            batch_norm=False,
                            padding='valid')

            assert model.output_shape[-3:] == (self.w_patch, self.w_patch, 2)

        elif n_name == 'unet':
            print('NO BATCH NORM? (not implemented)')
            model = unet(9,
                         filters=self.k,
                         w=self.w_patch,
                         ext_in=self.w_ext_in // 2,
                         max_depth=self.d,
                         n_per_block=1)
        else:
            raise ValueError(n_name)

            # raise NotImplementedError('Unet is not well implemented: * Double, batchnorm? f per layer etc?')

        model.summary()
        compile_segm(
            model, lr=self.lr)  # instead of 10e-3, 10e-4 is probs more stable.

        return model
Example #7
0
def neuralNet0(mod,
               lr=None,
               k=20,
               verbose=1,
               class_weights=None,
               batch_norm=True):

    n_in = get_mod_n_in(mod)

    if 0:
        model = fullyConnected1x1(n_in, k=k, batch_norm=batch_norm)
        w_ext = 0
    elif 0:
        model = convNet(n_in, k=k, batch_norm=batch_norm)
        w_ext = 2
    elif 0:
        model = unet(n_in, filters=k, batch_norm=batch_norm)
        w_ext = 2
    else:
        w_ext = 10
        model = ti_unet(n_in,
                        filters=k,
                        w=10,
                        ext_in=w_ext // 2,
                        batch_norm=batch_norm)

    if verbose:
        model.summary()

    args = {}
    if lr is not None: args['lr'] = lr
    if class_weights is not None: args['class_weights'] = class_weights

    compile_segm(model, **args)

    n = NeuralNet(model, w_ext=w_ext)

    return n