Пример #1
0
def build_basic_AE_encoder(inp,
                           filters,
                           res_layers,
                           relu_param=0.3,
                           use_batch_norm=True,
                           use_dropout=False):
    """
        Helper Function to build an encoder really fast
    :param inp:
    :param filters:
    :param res_layers:
    :param relu_param:
    :param use_batch_norm:
    :param use_dropout:
    :return:
    """

    feat = build_basic_encoder(inp, filters, relu_param, use_batch_norm,
                               use_dropout)
    if res_layers > 0:
        tfeat = build_basic_transformation_layers(feat, res_layers,
                                                  filters[-1], relu_param,
                                                  use_batch_norm, use_dropout)
    else:
        tfeat = feat
    return tfeat
Пример #2
0
    def __init__(self, ae_name, img_shape, cfg, AE_eval):
        self.name = ae_name
        self.optimizers = possible_optimizers           # {'adam':keras.optimizers.adam ......}
        self.loss_functions = possible_loss_functions   # {'mae':keras.losses.mae, 'mse':keras.losses.mse ......}
        self.ae_eval = AE_eval
        self.seed = cfg['SEED']

        lr              = cfg['VAE']['LEARNING_RATE']
        lr_decay        = cfg['VAE']['LR_DEF']
        relu_param      = cfg['VAE']['RELU_PARAM']
        optimizer       = self.optimizers[cfg['VAE']['OPTIMIZER']]
        use_drop_out    = cfg['VAE']['USE_DROP_OUT']
        use_batch_normalisation = cfg['VAE']['USE_BATCH_NORM']
        trafo_layers            = cfg['VAE']['TRAFO_LAYERS']
        filters                 = cfg['VAE']['FILTERS']
        metric_2_be_used        = cfg['METRIC']


        inp = Input(img_shape)
        feat = build_basic_encoder(inp, filters, relu_param, use_batch_normalisation, use_drop_out)
        if trafo_layers > 0:
            tfeat = build_basic_transformation_layers(feat, trafo_layers, filters[-1], relu_param, use_batch_normalisation, use_drop_out)
        else:
            tfeat = feat
        out = build_basic_decoder(tfeat, list(reversed(filters)), relu_param, use_batch_normalisation, use_drop_out)

        self.auto_encoder = Model(inp, out, name=ae_name)

        print('Basic nRes AE: ')
        self.auto_encoder.summary()

        # compile model
        ae_optimizer = optimizer(lr, lr_decay)

        if metric_2_be_used is 1:
            print('using trim metric')
            def ae_loss_function(xx, yy):
                yy_xx_abs = keras.backend.abs(yy[:, 0]-xx[:, 0])
                c_cond = keras.backend.less(yy_xx_abs, 0.005)
                a_new  = keras.backend.switch(c_cond, keras.backend.zeros_like(yy_xx_abs), yy_xx_abs)
                return keras.backend.mean(a_new)

            autoencoder_loss = ae_loss_function
        else:
            autoencoder_loss = cfg['VAE']['IMAGE_LOSS']
        self.auto_encoder.compile(optimizer=ae_optimizer, loss=autoencoder_loss)


        # plot model to file
        keras.utils.plot_model(self.auto_encoder, self.ae_eval.model_saves_dir + '/' + self.auto_encoder.name + '.png')
Пример #3
0
    def __init__(self, vae_name, img_shape, cfg, id='', path=None):
        self.name = vae_name
        self.optimizers = {'adam': keras.optimizers.adam}

        if path is None:
            vae_input_id = Input(shape=img_shape)
            vae_input_no = Input(shape=img_shape)

            lr = cfg['VAE' + id]['LEARNING_RATE']
            lr_decay = cfg['VAE' + id]['LR_DEF']
            relu_param = cfg['VAE' + id]['RELU_PARAM']
            optimizer = self.optimizers[cfg['VAE' + id]['OPTIMIZER']]
            filters = cfg['VAE' + id]['FILTERS']
            use_drop_out = cfg['VAE' + id]['USE_DROP_OUT']
            use_batch_normalisation = cfg['VAE' + id]['USE_BATCH_NORM']
            trafo_layers = cfg['VAE' + id]['TRAFO_LAYERS']
            filters_rev = list(reversed(filters))
            feature_inp = Input(shape=(img_shape[0] / (2**len(filters)),
                                       filters[-1]))

            # -- VAE Model ID ---
            encoder_ID_features = build_basic_encoder(vae_input_id, filters,
                                                      relu_param,
                                                      use_batch_normalisation,
                                                      use_drop_out)
            decoder_ID_out = build_basic_decoder(feature_inp, filters_rev,
                                                 relu_param,
                                                 use_batch_normalisation,
                                                 use_drop_out)
            self.decoder_optimizer = optimizer(lr, lr_decay)
            self.Decoder = Model(inputs=[feature_inp],
                                 outputs=[decoder_ID_out],
                                 name=vae_name + '_decoder')

            print('Decoder Layout: ')
            self.Decoder.summary()
            self.Decoder.compile(optimizer=self.decoder_optimizer,
                                 loss=cfg['VAE' + id]['DECODER_LOSS'])

            # build vae_id encoder:
            vae__out_id = self.Decoder(encoder_ID_features)
            self.vae_id_optimizer = optimizer(lr, lr_decay)
            self.VAE_ID = Model(inputs=[vae_input_id],
                                outputs=[vae__out_id],
                                name=vae_name + '_vaeID')
            self.VAE_ID_MultiOut = Model(
                inputs=[vae_input_id],
                outputs=[vae__out_id, encoder_ID_features],
                name=vae_name + '_vaeIDMOU')

            print('VAE-ID Layout: ')
            self.VAE_ID.summary()
            self.VAE_ID_MultiOut.compile(
                optimizer=self.vae_id_optimizer,
                loss=[cfg['VAE' + id]['IMAGE_LOSS'], None],
                loss_weights=[1, None])
            self.VAE_ID.compile(optimizer=self.vae_id_optimizer,
                                loss=cfg['VAE' + id]['IMAGE_LOSS'])

            # freeze the decoder in the domain A->B setting hence it should learn to reconstruct the features
            self.Decoder.trainable = False

            # transformation Model
            encoder_NO_features = build_basic_encoder(vae_input_no, filters,
                                                      relu_param,
                                                      use_batch_normalisation,
                                                      use_drop_out)
            transfo_NO_features = build_basic_transformation_layers(
                encoder_NO_features, trafo_layers, filters[-1], relu_param,
                use_batch_normalisation, use_drop_out)
            vae__out_no = self.Decoder(transfo_NO_features)
            self.vae_no_optimizer = optimizer(lr, lr_decay)
            self.VAE_NO = Model(inputs=[vae_input_no],
                                outputs=[vae__out_no, transfo_NO_features],
                                name=vae_name + '_vaeNO')
            self.VAE_NO_MulitOut = Model(inputs=[vae_input_no],
                                         outputs=[
                                             vae__out_no, transfo_NO_features,
                                             encoder_NO_features
                                         ],
                                         name=vae_name + '_vaeNOMOU')

            print('VAE-NO Layout: ')
            self.VAE_NO.summary()
            self.VAE_NO.compile(optimizer=self.vae_no_optimizer,
                                loss=[
                                    cfg['VAE' + id]['IMAGE_LOSS'],
                                    cfg['VAE' + id]['FEATURE_LOSS']
                                ],
                                loss_weights=cfg['VAE' + id]['LOSS_WEIGHTS'])
            self.VAE_NO_MulitOut.compile(optimizer=self.vae_no_optimizer,
                                         loss=[
                                             cfg['VAE' + id]['IMAGE_LOSS'],
                                             cfg['VAE' + id]['FEATURE_LOSS'],
                                             None
                                         ],
                                         loss_weights=[1, 1, None])
            # lossweights unimportant hence this moel wont be trained
        else:
            self.load_Model(path)