def get_tiunet_preenc(self, k=10, lr=1e-4):
        """

        :param k:
        :param lr:
        :param f_out:
        :return:
        """

        model_encoder = self.get_encoder(k)
        n_in = get_mod_n_in(self.mod)

        model_tiunet = ti_unet(n_in,
                               filters=k,
                               w=self.w_patch,
                               ext_in=10 // 2,
                               batch_norm=self.batch_norm)

        if self.batch_norm:
            o = model_tiunet.get_layer(f'batchnorm_left{self.depth}_0').output
        else:
            o = model_tiunet.get_layer(f'left{self.depth}_0').output
        model_tiunet_encoder = Model(model_tiunet.input, o)

        model_tiunet_encoder.set_weights(model_encoder.get_weights())

        if self.fixed_enc == 1:
            for layer in model_tiunet_encoder.layers:
                layer.trainable = False

        model_tiunet.summary()

        compile_segm(model_tiunet, lr=lr)

        return model_tiunet
            def get_unet_pretrained_encoder():

                model_encoder = get_model_encoder()

                encoder_inputs = model_encoder.input

                decoder_outputs = decoder(model_encoder, f_out=2)

                model_pretrained_unet = Model(encoder_inputs, decoder_outputs)
                from methods.examples import compile_segm
                compile_segm(model_pretrained_unet, lr=1e-4)

                model_pretrained_unet.summary()

                return model_pretrained_unet
    def model(self, b_optimal_lr=False):

        features_out = 3 if data_name == '19botrightcrack3' else 2

        if model_name == 'ti-unet':
            model = ti_unet(
                9,
                filters=self.k,
                w=w_patch,
                ext_in=w_ext_in // 2,
                batch_norm=True,
                max_depth=d,
                features_out=features_out,
            )

        elif model_name == 'unet':
            # model = ti_unet(9, filters=self.k, w=w_patch, ext_in=w_ext_in // 2, batch_norm=True,
            #                 max_depth=d)
            print('NO BATCH NORM? (not implemented)')
            model = unet(9,
                         filters=self.k,
                         w=w_patch,
                         ext_in=w_ext_in // 2,
                         max_depth=d,
                         n_per_block=1)

        else:
            raise ValueError(model_name)

        model.summary()

        compile_segm(model, lr=lr)

        if b_optimal_lr:
            from neuralNetwork.optimization import find_learning_rate

            global flow_tr
            find_learning_rate(model, flow_tr)

        self.neural_net = NeuralNet(model, w_ext=w_ext_in)

        if data_name[:5] == '1319_':  # pre Load model!
            # TODO which epoch to start from, I guess 10 should have an impact
            epoch_start = 50  # probably better (learned something)
            self.neural_net.load(
                f'C:/Users/admin/Data/ghent_altar/net_weight/1319/{model_name}_d{d}_k{self.k}',
                epoch=epoch_start)
            def get_n_model_regular(i_fold=None, k=None, epoch=None):

                n_in = 9

                model_tiunet = ti_unet(n_in,
                                       filters=k,
                                       w=self.w_patch,
                                       ext_in=10 // 2,
                                       batch_norm=True,
                                       wrong_batch_norm=True)
                compile_segm(model_tiunet, 1e-4)
                """ TODO wrong tiunet """

                n = NeuralNet(model_tiunet, w_ext=10)

                info = f'10lamb_kfold/ti_unet_k{k}_kfold{i_fold}'
                folder = os.path.join(
                    '/scratch/lameeus/data/ghent_altar/net_weight', info)
                n.load(folder=folder, epoch=epoch)

                return n
    def main_net(self, set_n):
        from methods.examples import compile_segm
        from neuralNetwork.architectures import ti_unet, convNet, unet

        n_name = set_n['name'].lower()
        if n_name == 'ti-unet':
            model = ti_unet(9,
                            filters=self.k,
                            w=self.w_patch,
                            ext_in=self.w_ext_in // 2,
                            batch_norm=True,
                            max_depth=self.d)
        elif n_name == 'simple':
            model = convNet(9,
                            self.k,
                            w_in=self.w_patch + self.w_ext_in,
                            n_convs=5,
                            batch_norm=False,
                            padding='valid')

            assert model.output_shape[-3:] == (self.w_patch, self.w_patch, 2)

        elif n_name == 'unet':
            print('NO BATCH NORM? (not implemented)')
            model = unet(9,
                         filters=self.k,
                         w=self.w_patch,
                         ext_in=self.w_ext_in // 2,
                         max_depth=self.d,
                         n_per_block=1)
        else:
            raise ValueError(n_name)

            # raise NotImplementedError('Unet is not well implemented: * Double, batchnorm? f per layer etc?')

        model.summary()
        compile_segm(
            model, lr=self.lr)  # instead of 10e-3, 10e-4 is probs more stable.

        return model
        def set_encoder_state(model, trainable=False):

            assert len(model.layers) == 14
            for layer in model.layers[:7]:
                layer.trainable = trainable
            compile_segm(model)
    def get_unet_preenc(
        self,
        k=10,
        lr=1e-4,
        f_out=2,
    ):
        """
        
        :param k:
        :param lr:
        :param f_out:
        :return:
        """

        from keras.layers import Conv2D, UpSampling2D, Concatenate, Cropping2D, Conv2DTranspose, BatchNormalization
        from methods.examples import compile_segm

        model_encoder = self.get_encoder(k)

        b_double = False
        padding = 'valid'

        encoder_outputs = model_encoder.output

        l = encoder_outputs

        if self.depth == 2:
            list_w_crop = [12, 4]
        elif self.depth == 1:
            list_w_crop = [4]

        for i_d in range(self.depth)[::-1]:
            f = 2**i_d * k if b_double else k
            l = Conv2D(f, (3, 3),
                       activation='elu',
                       padding=padding,
                       name=f'dec{i_d+1}')(l)

            if self.batch_norm:
                l = BatchNormalization(name=f'batchnorm_dec{i_d+1}')(l)

            if 0:
                l = UpSampling2D(2)(l)
            else:
                l = Conv2DTranspose(f, (2, 2), strides=(2, 2))(l)
                if self.batch_norm:
                    l = BatchNormalization(name=f'batchnorm_up{i_d}')(l)

            # Combine
            l_left_crop = Cropping2D(list_w_crop[i_d], name=f'crop_enc{i_d}')(
                model_encoder.get_layer(f'enc{i_d}').output)
            l = Concatenate(name=f'conc_dec{i_d}')([l, l_left_crop])

        l = Conv2D(k, (3, 3),
                   activation='elu',
                   padding=padding,
                   name=f'dec{0}')(l)
        if self.batch_norm:
            l = BatchNormalization(name=f'batchnorm_dec{0}')(l)
        decoder_outputs = Conv2D(f_out, (1, 1),
                                 activation='softmax',
                                 padding=padding)(l)

        model_pretrained_unet = Model(model_encoder.input, decoder_outputs)
        compile_segm(model_pretrained_unet, lr=lr)

        model_pretrained_unet.summary()

        return model_pretrained_unet