예제 #1
0
def test_gan_custom_layer_graph():
    z_shape = (1, 8, 8)
    z = Input(shape=z_shape, name='z')
    gen_cond = Input(shape=(1, 8, 8), name='gen_cond')

    inputs = [z, gen_cond]
    gen_input = merge(inputs, mode='concat', concat_axis=1)
    gen_output = Convolution2D(1, 2, 2, activation='relu',
                               name='g1',
                               border_mode='same')(gen_input)
    generator = Container(inputs, gen_output)

    f, r = Input(z_shape, name='fake'), Input(z_shape, name='real')
    inputs = [f, r]
    dis_input = merge(inputs, mode='concat', concat_axis=0)
    dis_conv = Convolution2D(5, 2, 2, name='d1', activation='relu')(dis_input)
    dis_flatten = Flatten()(dis_conv)
    dis = Dense(1, activation='sigmoid')(dis_flatten)
    discriminator = Container(inputs, gan_outputs(dis))

    gan = GAN(generator, discriminator, z_shape=z_shape, real_shape=z_shape)
    gan.build('adam', 'adam', gan_binary_crossentropy)
    fn = gan.compile_custom_layers(['g1', 'd1'])
    z = np.random.uniform(-1, 1, (64,) + z_shape)
    real = np.random.uniform(-1, 1, (64,) + z_shape)
    cond = np.random.uniform(-1, 1, (64,) + z_shape)
    print(z.shape)
    print(real.shape)
    print(cond.shape)
    fn({'z': z, 'gen_cond': cond, 'real': real})
예제 #2
0
 def from_config(cls, config):
     return cls(
         Container.from_config(config['generator']),
         Container.from_config(config['discriminator']),
         config['z_shape'],
         config['real_shape'],
     )
예제 #3
0
 def obsModule(self, kpm=True, neighEnc=True, visEnc=False, vStt=False):
     if (neighEnc): self.neighStateEncoder(vStt=vStt)
     if (visEnc): self.neighVisEncoder()
     if (kpm):
         self.kpmGate()
         mod_inp = []
         mod_out = []
         if (neighEnc):
             mod_inp += [self.pose, self.gaze, self.modeN]
             mod_out = Multiply()([
                 self.kpm_mod([self.pose, self.gaze]),
                 self.nstate_mod([self.modeN])
             ])
         if (visEnc):
             mult_out = Multiply()([
                 self.kpm_mod([self.pose, self.gaze]),
                 self.nvis_mod([self.visP, self.visG])
             ])
             if (mod_out != []): mod_out = Cat()([mod_out, mult_out])
             else: mod_out = mult_out
             mod_inp += [self.visP, self.visG]
         self.obs_mod = Container(mod_inp, mod_out)
     else:
         if (neighEnc and not visEnc): self.obs_mod = self.nstate_mod
         elif (not neighEnc and visEnc): self.obs_mod = self.nvis_mod
         elif (neighEnc and visEnc):
             mod_out = Cat()([
                 self.nstate_mod([self.modeN]),
                 self.nvis_mod([self.visP, self.visG])
             ])
             self.obs_mod = Container([self.modeN, self.visP, self.visG],
                                      mod_out)
     self.obs_mod.name = 'ObsMod'
예제 #4
0
def test_orderings():
    masked_input_layer, input_layer, mask_layer = create_input_layers(6)
    mog = Container(inputs=[masked_input_layer, input_layer, mask_layer],
                    outputs=mog_layer(input_layer, Activation("relu")
                    (Add()([Dense(16)(Lambda(lambda x: x[:, :2], output_shape=(2,))(masked_input_layer)),
                            Dense(16, use_bias=False)(mask_layer)])), 1))
    inner_model = Container(inputs=[masked_input_layer, input_layer, mask_layer],
                            outputs=mog([masked_input_layer, input_layer, mask_layer]))
    model = training_model(inner_model)
    model.compile(loss=utils.maximize_prediction, optimizer=optimizers.SGD(lr=0.1),)
    model.fit(np.random.normal(0, size=(1000, 6)),
              np.zeros(shape=(1000, 1)),
              batch_size=10,
              verbose=0,
              epochs=1)

    test_set = np.random.normal(0, size=(5, 6))
    real_ld = np.log(np.sum(np.exp(-0.5 * test_set ** 2 - 0.5 * np.log(2 * np.pi))))

    test_1_model = logdensity_model(inner_model)
    test_1_model.compile(optimizer="sgd", loss=utils.maximize_prediction)
    test_8_model = logdensity_model(inner_model, num_of_orderings=8)
    test_8_model.compile(optimizer="sgd", loss=utils.maximize_prediction)

    ld_1 = test_1_model.evaluate(test_set, np.zeros(shape=(5, 1)), verbose=0)
    ld_8 = test_8_model.evaluate(test_set, np.zeros(shape=(5, 1)), verbose=0)
    assert abs(-real_ld - ld_8) < abs(-real_ld - ld_1)
예제 #5
0
def build_autoencoder(config: BEGANConfig):
    n_filters = config.n_filters
    hidden_size = config.hidden_size

    dx = image_input = Input((config.image_height, config.image_width, 3))

    dx = convolution_image_for_encoding(
        dx, n_filters, strides=(2, 2))  # output: (N, 32, 32, n_filters)
    dx = convolution_image_for_encoding(
        dx, n_filters * 2, strides=(2, 2))  # output: (N, 16, 16, n_filters*2)
    dx = convolution_image_for_encoding(
        dx, n_filters * 3, strides=(2, 2))  # output: (N, 8, 8, n_filters*3)
    dx = convolution_image_for_encoding(
        dx, n_filters * 4, strides=(1, 1))  # output: (N, 8, 8, n_filters*4)
    dx = Flatten()(dx)
    hidden = Dense(hidden_size, activation='linear')(dx)
    image_output = build_decoder_layer(config, hidden)

    autoencoder = Container(image_input, image_output, name="autoencoder")
    autoencoder_not_trainable = Container(image_input,
                                          image_output,
                                          name="autoencoder_not_trainable")
    autoencoder_not_trainable.trainable = False

    return autoencoder, autoencoder_not_trainable
예제 #6
0
파일: gan.py 프로젝트: nebw/beras
 def from_config(cls, config):
     return cls(
         Container.from_config(config['generator']),
         Container.from_config(config['discriminator']),
         config['z_shape'],
         config['real_shape'],
     )
예제 #7
0
def test_gan_get_config(tmpdir):
    z_shape = (1, 8, 8)

    z = Input(z_shape, name='z')
    g_out = Convolution2D(10, 2, 2, activation='relu', border_mode='same')(z)
    generator = Container(z, g_out)
    f, r = Input(z_shape, name='f'), Input(z_shape, name='r')

    dis_input = merge([f, r], mode='concat', concat_axis=1)
    dis_conv = Convolution2D(5, 2, 2, activation='relu')(dis_input)
    dis_flatten = Flatten()(dis_conv)
    dis = Dense(1, activation='sigmoid')(dis_flatten)
    discriminator = Container([f, r], gan_outputs(dis))

    gan = GAN(generator, discriminator, z_shape, z_shape)
    weights_fname = str(tmpdir.mkdir("weights").join("{}.hdf5"))
    gan.save_weights(weights_fname)
    true_config = gan.get_config()

    import json
    with open(os.path.join(TEST_OUTPUT_DIR, "true_config.json"), 'w+') as f:
        json.dump(true_config, f, indent=2)

    gan_from_config = layer_from_config(true_config, custom_objects={
        'GAN': GAN,
        'Split': Split,
    })

    with open(os.path.join(TEST_OUTPUT_DIR, "loaded_config.json"), 'w+') as f:
        json.dump(gan_from_config.get_config(), f, indent=2)
    gan_from_config.load_weights(weights_fname)
예제 #8
0
 def neighStateEncoder(self, vStt=False):
     self.modeN = Input(shape=(self.n_enc, self.n_states), name='mode')
     if (vStt):
         self.pose = Input(shape=(self.n_enc, 2), name='pose')
         self.gaze = Input(shape=(self.n_enc, 2), name='gaze')
         gru_out = GRU(64, name='StateEnc-gru')(
             Cat()([self.pose, self.gaze, self.modeN]))
         elu_out = Dense(64, activation='elu', name='StateEnc-elu')(gru_out)
         self.nstate_mod = Container([self.pose, self.gaze, self.modeN],
                                     elu_out,
                                     name='StateEnc')
     else:
         gru_out = GRU(64, name='StateEnc-gru')(self.modeN)
         elu_out = Dense(64, activation='elu', name='StateEnc-elu')(gru_out)
         self.nstate_mod = Container([self.modeN], elu_out, name='StateEnc')
예제 #9
0
파일: hred.py 프로젝트: if001/makeSentens2
    def build_decoder(self):
        K.set_learning_phase(1)
        decoder_input = Input(shape=(None, NNVAL.input_dim))
        state_h = Input(shape=(NNVAL.encoder_latent_dim, ))
        state_c = Input(shape=(NNVAL.encoder_latent_dim, ))

        decoder_dense_outputs = Dense(NNVAL.decoder_latent_dim,
                                      activation='sigmoid')(decoder_input)
        decoder_bi_lstm = LSTM(NNVAL.decoder_latent_dim,
                               return_sequences=True,
                               dropout=0.6,
                               recurrent_dropout=0.6)
        decoder_bi_outputs = Bi(decoder_bi_lstm)(decoder_dense_outputs)
        decoder_lstm = LSTM(NNVAL.decoder_latent_dim,
                            return_sequences=True,
                            return_state=True,
                            dropout=0.4,
                            recurrent_dropout=0.4)

        encoder_states = [state_h, state_c]
        decoder_output, output_h, output_c = decoder_lstm(
            decoder_bi_outputs, initial_state=encoder_states)

        decoder_output = Dense(NNVAL.decoder_latent_dim,
                               activation='tanh')(decoder_output)
        decoder_output = Dropout(0.2)(decoder_output)
        decoder_output = Dense(NNVAL.output_dim,
                               activation='linear')(decoder_output)

        return Container([decoder_input, state_h, state_c],
                         [decoder_output, output_h, output_c])
예제 #10
0
파일: models.py 프로젝트: mylabteam/planet
def build_decoder(config: BEGANConfig, name):
    """
    generator and decoder( of discriminator) have same network structure, but don't share weights.
    This function takes different input layer, flow another network, and return different output layer.
    """
    n_filters = config.n_filters
    n_layer = config.n_layer_in_conv

    dx = input_z = Input((64, ))
    dx = Dense((8*8*n_filters), activation='linear', name="%s/Dense" % name)(dx)
    dx = Reshape((8, 8, n_filters))(dx)

    # output: (N, 16, 16, n_filters)
    dx = convolution_image_for_decoding(dx, n_filters, upsample=True, name="%s/L1" % name, n_layer=n_layer)

    # output: (N, 32, 32, n_filters)
    dx = convolution_image_for_decoding(dx, n_filters, upsample=True, name="%s/L2" % name, n_layer=n_layer)

    # output: (N, 64, 64, n_filters)
    dx = convolution_image_for_decoding(dx, n_filters, upsample=True, name="%s/L3" % name, n_layer=n_layer)

    # output: (N, 64, 64, n_filters)
    dx = convolution_image_for_decoding(dx, n_filters, upsample=False, name="%s/L4" % name, n_layer=n_layer)

    # output: (N, 64, 64, 3)
    image_output = Convolution2D(3, (3, 3), padding="same", activation="linear", name="%s/FinalConv" % name)(dx)
    decoder = Container(input_z, image_output, name=name)
    return decoder
예제 #11
0
 def from_config(cls, config, custom_objects=None):
     step_model = Container.from_config(config, custom_objects)
     config.pop('name')
     config.pop('layers')
     config.pop('input_layers')
     config.pop('output_layers')
     return cls(_step_model=step_model, **config)
예제 #12
0
def build_autoencoder(config, name="autoencoder"):
    encoder = build_encoder(config, name="%s/encoder" % name)
    decoder = build_decoder(config, name="%s/decoder" % name)
    autoencoder = Container(encoder.inputs,
                            decoder(encoder.outputs),
                            name=name)
    return autoencoder
예제 #13
0
def test_compare_original_nade():
    """ Compare output computation with github.com/MarcCote/NADE

    This test use weights learned with reference implementation.
    Following parameters where used
    orderlessNADE.py --theano --form MoG --dataset simple.hdf5 --samples_name 0 --hlayers 1
    --n_components 1 --epoch_size 10000 --momentum 0.0 --units 16 --training_route training
    --no_validation --batch_size 5

    Training data consisted of 10000 samples drawn from normal(mean=0, sigma=1)
    The architecture here is the same as it would be created by reference implementation.
    """
    import h5py
    masked_input_layer, input_layer, mask_layer = create_input_layers(2)

    mog = Container(inputs=[masked_input_layer, input_layer, mask_layer],
                    outputs=mog_layer(input_layer, Activation("relu")
                    (add([Dense(16)(Lambda(lambda x: x[:, :2])(masked_input_layer)),
                          Dense(16, use_bias=False)(mask_layer)])), 1))
    inner_model = Container(inputs=[masked_input_layer, input_layer, mask_layer],
                            outputs=mog([masked_input_layer, input_layer, mask_layer]))

    model = training_model(inner_model, mask_seed=1)
    model.compile(loss=utils.maximize_prediction, optimizer="sgd")
    with h5py.File("tests/original_nade_weights.hdf5") as h:
        model.set_weights([
            h["final_model/parameters/W1"][()].astype(np.float32),
            h["final_model/parameters/b1"][()].astype(np.float32),
            h["final_model/parameters/Wflags"][()].astype(np.float32),
            h["final_model/parameters/V_alpha"][()].T.reshape((16, 2)).astype(np.float32),
            h["final_model/parameters/b_alpha"][()].reshape(2).astype(np.float32),
            h["final_model/parameters/V_sigma"][()].T.reshape((16, 2)).astype(np.float32),
            h["final_model/parameters/b_sigma"][()].reshape(2).astype(np.float32),
            h["final_model/parameters/V_mu"][()].T.reshape((16, 2)).astype(np.float32),
            h["final_model/parameters/b_mu"][()].reshape(2).astype(np.float32)
        ])

    np.random.seed(1)
    output = model.predict(np.random.normal(size=(5, 2)))
    # Different random generation leads to different masks
    if K.backend() == "tensorflow":
        assert np.allclose(np.array([-2.20870864, -2.12633744, -4.85813326, -3.63397837, -1.89778014]), output)
    elif K.backend() == "theano":
        assert np.allclose(np.array([-3.33089394, -2.55555928, -4.85813281, -4.85442475, -1.92244674]), output)
    else:
        raise NotImplementedError()
예제 #14
0
    def get_generator():
        z = Input(shape=z_shape, name='z')

        inputs = [z, gen_cond]
        gen_input = merge(inputs, mode='concat', concat_axis=1)
        gen_output = Convolution2D(10, 2, 2, activation='relu',
                                   border_mode='same')(gen_input)
        return Container(inputs, gen_output)
예제 #15
0
 def neighVisEncoder(self):
     self.visP = Input(shape=(self.n_vis, 2), name='visP')
     self.visG = Input(shape=(self.n_vis, 2), name='visG')
     gru_out = GRU(64, name='VisEnc-gru')(Cat()([self.visP, self.visG]))
     elu_out = Dense(64, activation='elu', name='VisEnc-elu')(gru_out)
     self.nvis_mod = Container([self.visP, self.visG],
                               elu_out,
                               name='VisEnc')
예제 #16
0
 def get_discriminator():
     f, r = Input(z_shape, name='f'), Input(z_shape, name='r')
     inputs = [f, r]
     dis_input = merge(inputs, mode='concat', concat_axis=1)
     dis_conv = Convolution2D(5, 2, 2, activation='relu')(dis_input)
     dis_flatten = Flatten()(dis_conv)
     dis = Dense(1, activation='sigmoid')(dis_flatten)
     return Container(inputs, gan_outputs(dis))
예제 #17
0
def simple_gan():
    z = Input(batch_shape=simple_gan_z_shape, name='z')
    generator = sequential([
        Dense(simple_gan_nb_z, activation='relu', name='g1'),
        Dense(simple_gan_nb_z, activation='relu', name='g2'),
        Dense(simple_gan_nb_out, activation='sigmoid', name='g3'),
    ])(z)

    fake = Input(batch_shape=simple_gan_real_shape, name='fake')
    real = Input(batch_shape=simple_gan_real_shape, name='real')

    discriminator = sequential([
        Dense(20, activation='relu', input_dim=2, name='d1'),
        Dense(1, activation='sigmoid', name='d2')
    ])(concat([fake, real], axis=0))
    return GAN(Container(z, generator),
               Container([fake, real],  gan_outputs(discriminator)),
               simple_gan_z_shape[1:], simple_gan_real_shape[1:])
예제 #18
0
  def Discriminator(self, input_shape):


      # Assuming model is not recurrent
      model = Sequential()

      # Architecture Based on EEG Classification Model at https://arxiv.org/pdf/1703.05051.pdf
    
      model.add(layers.Conv2D(25, kernel_size=(1,11), strides=(1,1), 
                               padding='same', input_shape=input_shape, 
                               kernel_initializer='he_normal'))
                               #kernel_regularizer=regularizers.l2(.001)))

      model.add(layers.Conv2D(25, kernel_size=(22,1), strides=(1,1),
                              padding='valid',
                              kernel_initializer='he_normal'))
                              #kernel_regularizer=regularizers.l2(.001)))
      model.add(layers.LeakyReLU(alpha=0.2))
      model.add(layers.AveragePooling2D(pool_size=(1,3)))
      model.add(layers.Dropout(rate=0.2))


      model.add(layers.Conv2D(50, kernel_size=(1,11),
                              padding='same',
                              kernel_initializer='he_normal'))
                              #kernel_regularizer=regularizers.l2(.001)))
      model.add(layers.LeakyReLU(alpha=0.2))
      model.add(layers.AveragePooling2D(pool_size=(1,3)))
      model.add(layers.Dropout(rate=0.2))

      model.add(layers.Conv2D(100, kernel_size=(1,11), strides=(1,1),
                        padding='same',
                        kernel_initializer='he_normal'))
                        #kernel_regularizer=regularizers.l2(.001)))
      model.add(layers.LeakyReLU(alpha=0.2))
      model.add(layers.AveragePooling2D(pool_size=(1,3)))
      model.add(layers.Dropout(rate=0.2))

      model.add(layers.Conv2D(200, kernel_size=(1,11), strides=(1,1),
                        padding='same',
                        kernel_initializer='he_normal'))
                        #kernel_regularizer=regularizers.l2(.001)))                          
      model.add(layers.LeakyReLU(alpha=0.2))
      model.add(layers.AveragePooling2D(pool_size=(1,3)))
      model.add(layers.Dropout(rate=0.2))

      model.add(layers.Flatten())
      model.add(layers.Dense(1, activation='sigmoid'))
      #model.summary()

      eeg = layers.Input(shape=input_shape)
      validity = model(eeg)

      fixed = Container(eeg, validity)

      return Model(eeg, validity), fixed
예제 #19
0
    def Discriminator(self, input_shape):

        model = Sequential()
        model.add(LSTM(100, input_shape=input_shape))
        model.add(Dense(1, activation='sigmoid'))

        inp = Input(shape=input_shape)
        validity = model(inp)

        return Model(inp, validity), Container(inp, validity)
예제 #20
0
def test_train_logdensity():
    masked_input_layer, input_layer, mask_layer = create_input_layers(2)
    mog = Container(inputs=[masked_input_layer, input_layer, mask_layer],
                    outputs=mog_layer(input_layer, Activation("relu")
                    (Add()([Dense(16)(Lambda(lambda x: x[:, :2], output_shape=(2,))(masked_input_layer)),
                            Dense(16, use_bias=False)(mask_layer)])), 1))
    inner_model = Container(inputs=[masked_input_layer, input_layer, mask_layer],
                            outputs=mog([masked_input_layer, input_layer, mask_layer]))
    model = training_model(inner_model)
    model.compile(loss=utils.maximize_prediction, optimizer=optimizers.SGD(lr=0.1),)
    model.fit(np.random.normal(0, size=(200, 2)),
              np.zeros(shape=(200, 1)),
              batch_size=10,
              verbose=0,
              epochs=5)
    test_model = logdensity_model(inner_model)
    test_model.compile(optimizer="sgd", loss=utils.maximize_prediction)
    real = test_model.evaluate(np.random.normal(0, size=(50, 2)), np.zeros(shape=(50, 1)), verbose=0)
    wrong = test_model.evaluate(np.random.normal(1, size=(50, 2)), np.zeros(shape=(50, 1)), verbose=0)
    assert real < wrong
예제 #21
0
def init_models(Cnet_path=None, Dnet_path=None):
    assert ((Cnet_path == None) ^ (Dnet_path == None)) is False
    #----------------------------- completion_model ----------------------------
    holed_origins_inp = Input(shape=IMG_SHAPE, name='holed_origins_inp')
    complnet_inp = Input(shape=IMG_SHAPE, name='complnet_inp')
    masks_inp = Input(shape=MASK_SHAPE, name='masks_inp')

    complnet_out = completion_net(VAR_IMG_SHAPE)(complnet_inp)
    merged_out = Add()([holed_origins_inp, 
                         Multiply()([complnet_out, 
                                     masks_inp])])
    compl_model = Model([holed_origins_inp, complnet_inp, masks_inp], 
                        merged_out)
    if Cnet_path: 
        compl_model.load_weights(Cnet_path, by_name=True)
    compl_model.compile(loss='mse', optimizer=Adadelta())

    #compl_model.summary()
    #plot_model(compl_model, to_file='C_model.png', show_shapes=True)

    #--------------------------- discrimination_model --------------------------
    origins_inp = Input(shape=IMG_SHAPE, name='origins_inp')
    crop_yxhw_inp = Input(shape=(4,), dtype=np.int32, name='yxhw_inp')

    local_cropped = merge([origins_inp,crop_yxhw_inp], mode=cropping, 
                          output_shape=MASK_SHAPE, name='local_crop')
    discrim_out = discrimination_net(IMG_SHAPE, LD_CROP_SHAPE)([origins_inp, 
                                                                local_cropped])
    discrim_model = Model([origins_inp,crop_yxhw_inp], discrim_out)
    if Dnet_path: 
        discrim_model.load_weights(Dnet_path, by_name=True)
    discrim_model.compile(loss='binary_crossentropy', 
                          optimizer=Adadelta(lr=D_MODEL_LR)) # good? lol
                          #optimizer=Adam(lr=0.000001))

    #discrim_model.summary()
    #plot_model(discrim_model, to_file='D_model.png', show_shapes=True)

    #------------------------------- joint_model -------------------------------
    d_container = Container([origins_inp,crop_yxhw_inp], 
                            discrim_out, name='D_container')
    d_container.trainable = False
    joint_model = Model([holed_origins_inp, complnet_inp, masks_inp, 
                         crop_yxhw_inp],
                        [merged_out, d_container([merged_out,crop_yxhw_inp])])

    joint_model.compile(loss=['mse', 'binary_crossentropy'],
                        loss_weights=[1.0, ALPHA], optimizer=Adadelta())

    #joint_model.summary()
    #plot_model(joint_model, to_file='joint_model.png', show_shapes=True)
    return compl_model, discrim_model, joint_model
예제 #22
0
def test_compare_original_nade_reg():
    """ Same as test_compare_original_nade,
     but with regularization of activities in MOG layer enabled. Should not influence the output
    """
    import h5py
    masked_input_layer, input_layer, mask_layer = create_input_layers(2)

    mog = Container(inputs=[masked_input_layer, input_layer, mask_layer],
                    outputs=mog_layer(input_layer, Activation("relu")
                    (add([Dense(16)(Lambda(lambda x: x[:, :2])(masked_input_layer)),
                          Dense(16, use_bias=False)(mask_layer)])), 1, True))
    inner_model = Container(inputs=[masked_input_layer, input_layer, mask_layer],
                            outputs=mog([masked_input_layer, input_layer, mask_layer]))

    model = training_model(inner_model, mask_seed=1)
    model.compile(loss=utils.maximize_prediction, optimizer="sgd")
    with h5py.File("tests/original_nade_weights.hdf5") as h:
        model.set_weights([
            h["final_model/parameters/W1"][()].astype(np.float32),
            h["final_model/parameters/b1"][()].astype(np.float32),
            h["final_model/parameters/Wflags"][()].astype(np.float32),
            h["final_model/parameters/V_alpha"][()].T.reshape((16, 2)).astype(np.float32),
            h["final_model/parameters/b_alpha"][()].reshape(2).astype(np.float32),
            h["final_model/parameters/V_sigma"][()].T.reshape((16, 2)).astype(np.float32),
            h["final_model/parameters/b_sigma"][()].reshape(2).astype(np.float32),
            h["final_model/parameters/V_mu"][()].T.reshape((16, 2)).astype(np.float32),
            h["final_model/parameters/b_mu"][()].reshape(2).astype(np.float32)
        ])

    np.random.seed(1)
    output = model.predict(np.random.normal(size=(5, 2)))
    # Different random generation leads to different masks
    if K.backend() == "tensorflow":
        assert np.allclose(np.array([-2.20870864, -2.12633744, -4.85813326, -3.63397837, -1.89778014]), output)
    elif K.backend() == "theano":
        assert np.allclose(np.array([-3.33089394, -2.55555928, -4.85813281, -4.85442475, -1.92244674]), output)
    else:
        raise NotImplementedError()
예제 #23
0
def test_gan_graph():
    z_shape = (1, 8, 8)
    z = Input(shape=z_shape, name='z')
    gen_cond = Input(shape=(1, 8, 8), name='gen_cond')

    inputs = [z, gen_cond]
    gen_input = merge(inputs, mode='concat', concat_axis=1)
    gen_output = Convolution2D(10, 2, 2, activation='relu',
                               border_mode='same')(gen_input)
    generator = Container(inputs, gen_output)

    f, r = Input(z_shape, name='f'), Input(z_shape, name='r')
    inputs = [f, r]
    dis_input = merge(inputs, mode='concat', concat_axis=1)
    dis_conv = Convolution2D(5, 2, 2, activation='relu')(dis_input)
    dis_flatten = Flatten()(dis_conv)
    dis = Dense(1, activation='sigmoid')(dis_flatten)
    discriminator = Container(inputs, gan_outputs(dis))

    gan = GAN(generator, discriminator, z_shape=z_shape, real_shape=z_shape)
    gan.build('adam', 'adam', gan_binary_crossentropy)
    gan.compile()
    gan.generate({'gen_cond': np.zeros((64,) + z_shape)}, nb_samples=64)
예제 #24
0
파일: hred.py 프로젝트: if001/makeSentens2
 def build_encoder(self):
     K.set_learning_phase(1)
     encoder_input = Input(shape=(None, NNVAL.input_dim))
     encoder_dense_outputs = Dense(NNVAL.encoder_latent_dim,
                                   activation='sigmoid')(encoder_input)
     encoder_bi_lstm = LSTM(NNVAL.encoder_latent_dim,
                            return_sequences=True,
                            dropout=0.4,
                            recurrent_dropout=0.4)
     encoder_bi_outputs = Bi(encoder_bi_lstm)(encoder_dense_outputs)
     _, state_h, state_c = LSTM(NNVAL.encoder_latent_dim,
                                return_state=True,
                                dropout=0.2,
                                recurrent_dropout=0.2)(encoder_bi_outputs)
     return Container(encoder_input, [state_h, state_c])
예제 #25
0
 def kpmGate(self):
     if (self.n_kpm > 1):
         self.pose = Input(shape=(self.n_kpm, 2), name='pose')
         self.gaze = Input(shape=(self.n_kpm, 2), name='gaze')
         vis_out = GRU(64, name='KPM-gru')(Cat()([self.pose, self.gaze]))
     else:
         self.pose = Input(shape=(2, ), name='pose')
         self.gaze = Input(shape=(2, ), name='gaze')
         vis_out = Dense(64, activation='elu',
                         name='KPM-elu')(Cat()([self.pose, self.gaze]))
     sig_out = Dense(1,
                     activation='hard_sigmoid',
                     name='KPM-sig',
                     kernel_initializer='zero')(vis_out)
     rep_out = Flatten()(RepeatVector(64)(sig_out))
     self.kpm_gate = Model([self.pose, self.gaze], sig_out)
     self.kpm_mod = Container([self.pose, self.gaze],
                              rep_out,
                              name='KPM-Gate')
예제 #26
0
def make_decoder(input_size, fixed=False):
    
    # Reveal network
    reveal_input = Input(shape=(input_size))
    
    # Adding Gaussian noise with 0.01 standard deviation.
    input_with_noise = GaussianNoise(0.01, name='output_C_noise')(reveal_input)
    
    x3 = Conv2D(50, (3, 3), strides = (1, 1), padding='same', activation='relu', name='conv_rev0_3x3')(input_with_noise)
    x4 = Conv2D(10, (4, 4), strides = (1, 1), padding='same', activation='relu', name='conv_rev0_4x4')(input_with_noise)
    x5 = Conv2D(5, (5, 5), strides = (1, 1), padding='same', activation='relu', name='conv_rev0_5x5')(input_with_noise)
    x = concatenate([x3, x4, x5])
    
    x3 = Conv2D(50, (3, 3), strides = (1, 1), padding='same', activation='relu', name='conv_rev1_3x3')(x)
    x4 = Conv2D(10, (4, 4), strides = (1, 1), padding='same', activation='relu', name='conv_rev1_4x4')(x)
    x5 = Conv2D(5, (5, 5), strides = (1, 1), padding='same', activation='relu', name='conv_rev1_5x5')(x)
    x = concatenate([x3, x4, x5])
    
    x3 = Conv2D(50, (3, 3), strides = (1, 1), padding='same', activation='relu', name='conv_rev2_3x3')(x)
    x4 = Conv2D(10, (4, 4), strides = (1, 1), padding='same', activation='relu', name='conv_rev2_4x4')(x)
    x5 = Conv2D(5, (5, 5), strides = (1, 1), padding='same', activation='relu', name='conv_rev2_5x5')(x)
    x = concatenate([x3, x4, x5])
    
    x3 = Conv2D(50, (3, 3), strides = (1, 1), padding='same', activation='relu', name='conv_rev3_3x3')(x)
    x4 = Conv2D(10, (4, 4), strides = (1, 1), padding='same', activation='relu', name='conv_rev3_4x4')(x)
    x5 = Conv2D(5, (5, 5), strides = (1, 1), padding='same', activation='relu', name='conv_rev3_5x5')(x)
    x = concatenate([x3, x4, x5])
    
    x3 = Conv2D(50, (3, 3), strides = (1, 1), padding='same', activation='relu', name='conv_rev4_3x3')(x)
    x4 = Conv2D(10, (4, 4), strides = (1, 1), padding='same', activation='relu', name='conv_rev4_4x4')(x)
    x5 = Conv2D(5, (5, 5), strides = (1, 1), padding='same', activation='relu', name='conv_rev5_5x5')(x)
    x = concatenate([x3, x4, x5])
    
    output_Sprime = Conv2D(3, (3, 3), strides = (1, 1), padding='same', activation='relu', name='output_S')(x)
    
    if not fixed:
        return Model(inputs=reveal_input,
                     outputs=output_Sprime,
                     name = 'Decoder')
    else:
        return Container(inputs=reveal_input,
                         outputs=output_Sprime,
                         name = 'DecoderFixed')
예제 #27
0
def build_encoder(config, name="encoder"):
    n_filters = config.n_filters
    hidden_size = config.hidden_size
    n_layer = config.n_layer_in_conv

    dx = image_input = Input((config.image_height, config.image_width, 3))

    # output: (N, 32, 32, n_filters)
    dx = convolution_image_for_encoding(dx,
                                        n_filters,
                                        strides=(2, 2),
                                        name="%s/L1" % name,
                                        n_layer=n_layer)

    # output: (N, 16, 16, n_filters*2)
    dx = convolution_image_for_encoding(dx,
                                        n_filters * 2,
                                        strides=(2, 2),
                                        name="%s/L2" % name,
                                        n_layer=n_layer)

    # output: (N, 8, 8, n_filters*3)
    dx = convolution_image_for_encoding(dx,
                                        n_filters * 3,
                                        strides=(2, 2),
                                        name="%s/L3" % name,
                                        n_layer=n_layer)

    # output: (N, 8, 8, n_filters*4)
    dx = convolution_image_for_encoding(dx,
                                        n_filters * 4,
                                        strides=(1, 1),
                                        name="%s/L4" % name,
                                        n_layer=n_layer)

    dx = Flatten()(dx)
    hidden = Dense(hidden_size, activation='linear',
                   name="%s/Dense" % name)(dx)

    encoder = Container(image_input, hidden, name=name)
    return encoder
예제 #28
0
def sequential_to_gan(generator: Sequential,
                      discriminator: Sequential,
                      nb_real=32,
                      nb_fake=96):
    generator

    fake = Input(shape=discriminator.input_shape[1:], name='fake')
    real = Input(shape=discriminator.input_shape[1:], name='real')

    dis_in = merge([fake, real],
                   concat_axis=0,
                   mode='concat',
                   name='concat_fake_real')
    dis = discriminator(dis_in)
    dis_outputs = gan_outputs(dis,
                              fake_for_gen=(0, nb_fake),
                              fake_for_dis=(nb_fake - nb_real, nb_real),
                              real=(nb_fake, nb_fake + nb_real))
    dis_container = Container([fake, real], dis_outputs)
    return GAN(generator,
               dis_container,
               z_shape=generator.input_shape[1:],
               real_shape=discriminator.input_shape[1:])
예제 #29
0
model2 = model_list[1]['autoencoder']
model3 = model_list[2]['autoencoder']

encode_layer = list()
decode_layer = list()

for i in range(len(model_list)):
    encode_layer.append(model_list[i]['autoencoder'].get_layer('encode_layer' +
                                                               str(i + 1)))
    decode_layer.append(model_list[i]['autoencoder'].get_layer('decode_layer' +
                                                               str(i + 1)))

o = encode_layer[0](input_encode)
o = encode_layer[1](o)
o = encode_layer[2](o)
encode_layers = Container(input_encode, o, name='encode_container')

o = decode_layer[2](input_decode)
o = decode_layer[1](o)
o = decode_layer[0](o)
decode_layers = Container(input_decode, o, name='decode_container')

o = GaussianNoise(0.01)(input_encode)
o = encode_layers(o)
o = decode_layers(o)

stacked_autoencoder = Model(input_encode, o)
encoder_sa = Model(input_encode, encode_layers(input_encode))
decoder_sa = Model(input_decode, decode_layers(input_decode))

stacked_autoencoder.summary()
    def __init__(self, lr_D=2e-4, lr_G=2e-4, image_shape=(140, 168, 144, 1), 
                 date_time_string_addition='', image_folder='MR_crop'):
        self.img_shape = image_shape
        self.channels = self.img_shape[-1]
        self.normalization = InstanceNormalization
        # Hyper parameters
        self.lambda_1 = 10.0  # Cyclic loss weight A_2_B
        self.lambda_2 = 10.0  # Cyclic loss weight B_2_A
        self.lambda_D = 1.0  # Weight for loss from discriminator guess on synthetic images
        self.learning_rate_D = lr_D
        self.learning_rate_G = lr_G
        self.generator_iterations = 2  # Number of generator training iterations in each training loop
        self.discriminator_iterations = 1  # Number of generator training iterations in each training loop
        self.beta_1 = 0.5
        self.beta_2 = 0.999
        self.batch_size = 1
        self.epochs = 200  # choose multiples of 25 since the models are save each 25th epoch
        self.save_interval = 1
        self.synthetic_pool_size = 50

        # Linear decay of learning rate, for both discriminators and generators
        self.use_linear_decay = True
        self.decay_epoch = 75  # The epoch where the linear decay of the learning rates start

        # Identity loss - sometimes send images from B to G_A2B (and the opposite) to teach identity mappings
        self.use_identity_learning = True
        self.identity_mapping_modulus = 10  # Identity mapping will be done each time the iteration number is divisable with this number

        # PatchGAN - if false the discriminator learning rate should be decreased
        self.use_patchgan = True

        # Multi scale discriminator - if True the generator have an extra encoding/decoding step to match discriminator information access
        self.use_multiscale_discriminator = False

        # Resize convolution - instead of transpose convolution in deconvolution layers (uk) - can reduce checkerboard artifacts but the blurring might affect the cycle-consistency
        self.use_resize_convolution = False

        # Supervised learning part - for MR images - comparison
        self.use_supervised_learning = True
        self.supervised_weight = 40.0

        # Fetch data during training instead of pre caching all images - might be necessary for large datasets
        self.use_data_generator = True

        # Tweaks
        self.REAL_LABEL = 1.0  # Use e.g. 0.9 to avoid training the discriminators to zero loss

        # Used as storage folder name
        self.date_time = time.strftime('%Y%m%d-%H%M%S', time.localtime()) + date_time_string_addition

        # optimizer
        self.opt_D = Adam(self.learning_rate_D, self.beta_1, self.beta_2)
        self.opt_G = Adam(self.learning_rate_G, self.beta_1, self.beta_2)


        # ======= Data ==========
        # Use 'None' to fetch all available images
        nr_A_train_imgs = None
        nr_B_train_imgs = None
        nr_A_test_imgs = None
        nr_B_test_imgs = None


        if self.use_data_generator:
            print('--- Using dataloader during training ---')

            #self.data_generator = load_data.load_data(
            #    nr_of_channels=self.batch_size, generator=True, subfolder=image_folder)
            
            self.data_generator = load_crop_data.load_data(self.img_shape,
                                       nr_of_channels=self.channels,
                                       batch_size=self.batch_size,
                                       nr_A_train_imgs=nr_A_train_imgs,
                                       nr_B_train_imgs=nr_B_train_imgs,
                                       nr_A_test_imgs=nr_A_test_imgs,
                                       nr_B_test_imgs=nr_B_test_imgs,
                                       generator=True,
                                       subfolder=image_folder)

            # Only store test images
            nr_A_train_imgs = 0
            nr_B_train_imgs = 0

        else:
            print('--- Caching data ---')

        data = load_crop_data.load_data(img_shape=self.img_shape,
                                   nr_of_channels=self.channels,
                                   batch_size=self.batch_size,
                                   nr_A_train_imgs=nr_A_train_imgs,
                                   nr_B_train_imgs=nr_B_train_imgs,
                                   nr_A_test_imgs=nr_A_test_imgs,
                                   nr_B_test_imgs=nr_B_test_imgs,
                                   subfolder=image_folder)
        X = data["train_A_images"]
        print("A_train shape: ", X.shape)
        Y = data["train_B_images"]
        print("B_train shape: ", Y.shape)
        Z = data["test_A_images"]
        print("A_test shape: ", Z.shape)
        W = data["test_B_images"]
        print("B_train shape: ", W.shape)

        self.A_train = data["train_A_images"]
        self.B_train = data["train_B_images"]
        self.A_test = data["test_A_images"]
        self.B_test = data["test_B_images"]
        self.testA_image_names = data["test_A_image_names"]
        self.testB_image_names = data["test_B_image_names"]

        print('Data has been loaded')
        sys.stdout.flush() 


        # Set up parallel processing
        strategy = tf.contrib.distribute.MirroredStrategy()
        with strategy.scope():

            # ======= Discriminator model ==========
            if self.use_multiscale_discriminator:
                D_A = self.modelMultiScaleDiscriminator()
                D_B = self.modelMultiScaleDiscriminator()
                loss_weights_D = [0.5, 0.5] # 0.5 since we train on real and synthetic images
            else:
                D_A = self.modelDiscriminator()
                D_B = self.modelDiscriminator()
                loss_weights_D = [0.5]  # 0.5 since we train on real and synthetic images
            D_A.summary()

            # Discriminator builds
            image_A = Input(shape=self.img_shape)
            image_B = Input(shape=self.img_shape)
            guess_A = D_A(image_A)
            guess_B = D_B(image_B)
            self.D_A = Model(inputs=image_A, outputs=guess_A, name='D_A_model')
            self.D_B = Model(inputs=image_B, outputs=guess_B, name='D_B_model')

            #self.D_A.summary()
            #self.D_B.summary()
            self.D_A.compile(optimizer=self.opt_D,
                             loss=self.lse,
                             loss_weights=loss_weights_D)
            self.D_B.compile(optimizer=self.opt_D,
                             loss=self.lse,
                             loss_weights=loss_weights_D)

            # Use containers to avoid falsy keras error about weight descripancies
            self.D_A_static = Container(inputs=image_A, outputs=guess_A, name='D_A_static_model')
            self.D_B_static = Container(inputs=image_B, outputs=guess_B, name='D_B_static_model')

            # ======= Generator model ==========
            # Do note update discriminator weights during generator training
            self.D_A_static.trainable = False
            self.D_B_static.trainable = False

            # Generators
            self.G_A2B = self.modelGenerator(name='G_A2B_model')
            self.G_B2A = self.modelGenerator(name='G_B2A_model')
            self.G_A2B.summary()
            self.G_B2A.summary()

            if self.use_identity_learning:
                self.G_A2B.compile(optimizer=self.opt_G, loss=self.cycle_loss)
                self.G_B2A.compile(optimizer=self.opt_G, loss=self.cycle_loss)

            # Generator builds
            real_A = Input(shape=self.img_shape, name='real_A')
            real_B = Input(shape=self.img_shape, name='real_B')
            synthetic_B = self.G_A2B(real_A)
            synthetic_A = self.G_B2A(real_B)
            dA_guess_synthetic = self.D_A_static(synthetic_A)
            dB_guess_synthetic = self.D_B_static(synthetic_B)
            reconstructed_A = self.G_B2A(synthetic_B)
            reconstructed_B = self.G_A2B(synthetic_A)

            model_outputs = [reconstructed_A, reconstructed_B]
            compile_losses = [self.cycle_loss, self.cycle_loss,
                              self.lse, self.lse]
            compile_weights = [self.lambda_1, self.lambda_2,
                               self.lambda_D, self.lambda_D]

            if self.use_multiscale_discriminator:
                for _ in range(2):
                    compile_losses.append(self.lse)
                    compile_weights.append(self.lambda_D)  # * 1e-3)  # Lower weight to regularize the model
                for i in range(2):
                    model_outputs.append(dA_guess_synthetic[i])
                    model_outputs.append(dB_guess_synthetic[i])
            else:
                model_outputs.append(dA_guess_synthetic)
                model_outputs.append(dB_guess_synthetic)

            if self.use_supervised_learning:
                model_outputs.append(synthetic_A)
                model_outputs.append(synthetic_B)
                compile_losses.append('MAE')
                compile_losses.append('MAE')
                compile_weights.append(self.supervised_weight)
                compile_weights.append(self.supervised_weight)

            self.G_model = Model(inputs=[real_A, real_B],
                                 outputs=model_outputs,
                                 name='G_model')

            self.G_model.compile(optimizer=self.opt_G,
                                 loss=compile_losses,
                                 loss_weights=compile_weights)
            self.G_A2B.summary()



        # ======= Create designated run folder and store meta data ==========
        directory = os.path.join('/outdata', 'images', self.date_time)
        if not os.path.exists(directory):
            os.makedirs(directory)
        self.writeMetaDataToJSON()

        # ======= Avoid pre-allocating GPU memory ==========
        # TensorFlow wizardry
        config = tf.ConfigProto()

        # Don't pre-allocate memory; allocate as-needed
        config.gpu_options.allow_growth = True

        # Create a session with the above options specified.
        K.tensorflow_backend.set_session(tf.Session(config=config))

        # ===== Tests ======
        # Simple Model
#         self.G_A2B = self.modelSimple('simple_T1_2_T2_model')
#         self.G_B2A = self.modelSimple('simple_T2_2_T1_model')
#         self.G_A2B.compile(optimizer=Adam(), loss='MAE')
#         self.G_B2A.compile(optimizer=Adam(), loss='MAE')
#         # self.trainSimpleModel()
#         self.load_model_and_generate_synthetic_images()

        # ======= Initialize training ==========
        sys.stdout.flush()
        #plot_model(self.G_A2B, to_file='GA2B_expanded_model_new.png', show_shapes=True)
        self.train(epochs=self.epochs, batch_size=self.batch_size, save_interval=self.save_interval)
예제 #31
0
def example_gan(result_dir="output", data_dir="data"):
    input_shape = (128, 128, 3)
    local_shape = (64, 64, 3)
    batch_size = 32
    n_epoch = 10

    #tc = int(n_epoch * 0.18)
    #td = int(n_epoch * 0.02)
    tc = 2
    td = 2
    alpha = 0.0004

    train_datagen = DataGenerator(input_shape[:2], local_shape[:2])

    generator = model_generator(input_shape)
    discriminator = model_discriminator(input_shape, local_shape)
    optimizer = Adadelta()

    # build model
    org_img = Input(shape=input_shape)
    mask = Input(shape=(input_shape[0], input_shape[1], 1))

    in_img = merge([org_img, mask],
                   mode=lambda x: x[0] * (1 - x[1]),
                   output_shape=input_shape)
    imitation = generator(in_img)
    completion = merge([imitation, org_img, mask],
                       mode=lambda x: x[0] * x[2] + x[1] * (1 - x[2]),
                       output_shape=input_shape)
    cmp_container = Container([org_img, mask], completion, name='g_container')
    cmp_out = cmp_container([org_img, mask])

    cmp_model = Model([org_img, mask], cmp_out)
    cmp_model.compile(loss='mse',
                      optimizer=optimizer)

    local_img = Input(shape=local_shape)
    d_container = Container([org_img, local_img], discriminator([org_img, local_img]),
                                                            name='d_container')
    d_model = Model([org_img, local_img], d_container([org_img, local_img]))
    d_model.compile(loss='binary_crossentropy', 
                    optimizer=optimizer)

    '''
    '''
    cmp_model.summary()
    d_model.summary()
    from keras.utils import plot_model
    plot_model(cmp_model, to_file='cmp_model.png', show_shapes=True)
    plot_model(d_model, to_file='d_model.png', show_shapes=True)
    def random_cropping(x, x1, y1, x2, y2):
        out = []
        for idx in range(batch_size):
            out.append(x[idx, y1[idx]:y2[idx], x1[idx]:x2[idx], :])
        return K.stack(out, axis=0)
    cropping = Lambda(random_cropping, output_shape=local_shape)

    for n in range(n_epoch):
        ''''''
        org_img = Input(shape=input_shape)
        mask = Input(shape=(input_shape[0], input_shape[1], 1))

        in_img = merge([org_img, mask],
                       mode=lambda x: x[0] * (1 - x[1]),
                       output_shape=input_shape)
        imitation = generator(in_img)
        completion = merge([imitation, org_img, mask],
                           mode=lambda x: x[0] * x[2] + x[1] * (1 - x[2]),
                           output_shape=input_shape)
        cmp_container = Container([org_img, mask], completion, name='g_container')
        cmp_out = cmp_container([org_img, mask])

        cmp_model = Model([org_img, mask], cmp_out)
        cmp_model.compile(loss='mse',
                          optimizer=optimizer)

        local_img = Input(shape=local_shape)
        d_container = Container([org_img, local_img], discriminator([org_img, local_img]),
                                                                name='d_container')
        d_model = Model([org_img, local_img], d_container([org_img, local_img]))
        d_model.compile(loss='binary_crossentropy', 
                        optimizer=optimizer)

        cmp_model = Model([org_img, mask], cmp_out)
        local_img = Input(shape=local_shape)
        d_container = Container([org_img, local_img], discriminator([org_img, local_img]),
                                                                name='d_container')
        d_model = Model([org_img, local_img], d_container([org_img, local_img]))
        '''
        for inputs, points, masks in train_datagen.flow_from_directory(data_dir, batch_size,
                                                                       hole_min=48, hole_max=64):
            cmp_image = cmp_model.predict([inputs, masks])
            local = []
            local_cmp = []
            for i in range(batch_size):
                x1, y1, x2, y2 = points[i]
                local.append(inputs[i][y1:y2, x1:x2, :])
                local_cmp.append(cmp_image[i][y1:y2, x1:x2, :])

            valid = np.ones((batch_size, 1))
            fake = np.zeros((batch_size, 1))

            g_loss = 0.0
            d_loss = 0.0
            if n < tc:
                g_loss = cmp_model.train_on_batch([inputs, masks], inputs)
                print("epoch: %d < %d [D loss: %e] [G mse: %e]" % (n,tc, d_loss, g_loss))
                
            else:
                #d_model.trainable = True
                d_loss_real = d_model.train_on_batch([inputs, np.array(local)], valid)
                d_loss_fake = d_model.train_on_batch([cmp_image, np.array(local_cmp)], fake)
                d_loss = 0.5 * np.add(d_loss_real, d_loss_fake)
                print('train D',n,(tc+td),'|',d_loss,'|',g_loss)
                if n >= tc + td:
                    d_container.trainable = False
                    cropping.arguments = {'x1': points[:, 0], 'y1': points[:, 1],
                                          'x2': points[:, 2], 'y2': points[:, 3]}
                    all_model = Model([org_img, mask],
                                      [cmp_out, d_container([cmp_out, cropping(cmp_out)])])
                    all_model.compile(loss=['mse', 'binary_crossentropy'],
                                      loss_weights=[1.0, alpha], optimizer=optimizer)
                    g_loss = all_model.train_on_batch([inputs, masks],
                                                      [inputs, valid])
                #print("epoch: %d [D loss: %e] [G all: %e]" % (n, d_loss, g_loss))
                    print(all_model.metrics_names)
                    print('train ALL',n,'|',d_loss,'|',g_loss)

        '''
        if n < tc:
            print("epoch: %d < %d [D loss: %e] [G mse: %e]" % (n,tc, d_loss, g_loss))
        else:
            print('train D',n,(tc+td),'|',d_loss,'|',g_loss)
            if n >= tc + td:
                print(all_model.metrics_names)
                print('train ALL',n,'|',d_loss,'|',g_loss)
        '''


        num_img = min(5, batch_size)
        fig, axs = plt.subplots(num_img, 3)
        for i in range(num_img):
            axs[i, 0].imshow(inputs[i] * (1 - masks[i]))
            axs[i, 0].axis('off')
            axs[i, 0].set_title('Input')
            axs[i, 1].imshow(cmp_image[i])
            axs[i, 1].axis('off')
            axs[i, 1].set_title('Output')
            axs[i, 2].imshow(inputs[i])
            axs[i, 2].axis('off')
            axs[i, 2].set_title('Ground Truth')
        fig.savefig(os.path.join(result_dir, "result_%d.png" % n))
        plt.close()
        # save model
        generator.save(os.path.join(result_dir, "generator_%d.h5" % n))
        discriminator.save(os.path.join(result_dir, "discriminator_%d.h5" % n))

        K.clear_session()