Пример #1
0
    def build_actor(self, discrete=True, action_space=None, activation=None):
        print(action_space)
        input_image = Input(shape=(84, 84, self.frames))
        actual_value = Input(shape=(1, ))
        predicted_value = Input(shape=(1, ))
        old_prediction = Input(shape=(action_space, ))

        x = Conv2D(32, (8, 8), (2, 2), 'same', activation=relu)(input_image)
        x = Conv2D(64, (4, 4), (2, 2), 'same', activation=relu)(x)
        x = Conv2D(128, (2, 2), (2, 2), 'same', activation=relu)(x)
        x = Conv2D(256, (1, 1), (2, 2), 'same', activation=relu)(x)
        x = Flatten()(x)

        x = Dense(512, activation=relu)(x)

        out_actions = Dense(action_space, activation=softmax, name='output')(x)
        #out_actions = NoisyDense(action_space, activation=softmax, sigma_init=0.02, name='output')(x)

        model = Model(inputs=[
            input_image, actual_value, predicted_value, old_prediction
        ],
                      outputs=[out_actions])
        model.compile(optimizer=Adam(lr=10e-4),
                      loss=[
                          proximal_policy_optimization_loss(
                              actual_value=actual_value,
                              old_prediction=old_prediction,
                              predicted_value=predicted_value)
                      ])
        model.summary()
        return model
Пример #2
0
def FCN32(input_shape, dropout_rate=0.0, classes=2):
    """
    Implementation of the FCN32 network based on the VGG16 from
    keras.applications.

    :param input_shape: Model input shape.
    :type input_shape: (int, int)
    :param dropout_rate: dropout rate to be used in the model.
    :type dropout_rate: float
    :param classes: Number of classes for the segmantation.
    :type classes: int
    :return: FCN 32 Keras model.
    :rtype: `tensorflow.python.keras.Model`
    """
    net = VGG16(include_top=False, input_shape=input_shape, weights=None)
    inputs = net.input
    base_output = net.output

    net = Conv2D(4096, (7, 7), padding='same', activation='relu')(base_output)
    net = Dropout(dropout_rate)(net)
    net = Conv2D(4096, (1, 1), padding='same', activation='relu')(net)
    net = Dropout(dropout_rate)(net)
    net = Conv2D(classes, (1, 1))(net)
    net = Conv2DTranspose(classes, (64, 64),
                          strides=32,
                          use_bias=False,
                          padding='same',
                          activation='softmax')(net)
    model = Model(inputs, net, name='fcn32')
    # check the model using the summary
    model.summary()
    return model
Пример #3
0
def get_hegemax_model(seq_length, print_summary=True):
    forward_image_input = Input(shape=(seq_length, 160, 350, 3),
                                name="forward_image_input")
    info_input = Input(shape=(seq_length, 3), name="info_input")
    hlc_input = Input(shape=(seq_length, 6), name="hlc_input")

    x = TimeDistributed(Cropping2D(cropping=((50, 0),
                                             (0, 0))))(forward_image_input)
    x = TimeDistributed(Lambda(lambda x: ((x / 255.0) - 0.5)))(x)
    x = TimeDistributed(Conv2D(24, (5, 5), strides=(2, 2),
                               activation="relu"))(x)
    x = TimeDistributed(Conv2D(36, (5, 5), strides=(2, 2),
                               activation="relu"))(x)
    x = TimeDistributed(Conv2D(48, (5, 5), strides=(2, 2),
                               activation="relu"))(x)
    x = TimeDistributed(Conv2D(64, (3, 3), strides=(2, 2),
                               activation="relu"))(x)
    x = TimeDistributed(Conv2D(64, (3, 3), activation="relu"))(x)
    x = TimeDistributed(Conv2D(64, (3, 3), activation="relu"))(x)
    conv_output = TimeDistributed(Flatten())(x)

    x = concatenate([conv_output, info_input, hlc_input])

    x = TimeDistributed(Dense(100, activation="relu"))(x)
    x = CuDNNLSTM(10, return_sequences=False)(x)
    steer_pred = Dense(10, activation="tanh", name="steer_pred")(x)

    x = TimeDistributed(Cropping2D(cropping=((50, 0),
                                             (0, 0))))(forward_image_input)
    x = TimeDistributed(Lambda(lambda x: ((x / 255.0) - 0.5)))(x)
    x = TimeDistributed(Conv2D(24, (5, 5), strides=(2, 2),
                               activation="relu"))(x)
    x = TimeDistributed(Conv2D(36, (5, 5), strides=(2, 2),
                               activation="relu"))(x)
    x = TimeDistributed(Conv2D(48, (5, 5), strides=(2, 2),
                               activation="relu"))(x)
    x = TimeDistributed(Conv2D(64, (3, 3), strides=(2, 2),
                               activation="relu"))(x)
    x = TimeDistributed(Conv2D(64, (3, 3), activation="relu"))(x)
    x = TimeDistributed(Conv2D(64, (3, 3), activation="relu"))(x)
    conv_output = TimeDistributed(Flatten())(x)

    x = concatenate([conv_output, info_input, hlc_input])

    x = TimeDistributed(Dense(100, activation="relu"))(x)
    x = CuDNNLSTM(10, return_sequences=False)(x)
    throtte_pred = Dense(1, name="throttle_pred")(x)
    brake_pred = Dense(1, name="brake_pred")(x)

    model = Model(inputs=[forward_image_input, info_input, hlc_input],
                  outputs=[steer_pred, throtte_pred, brake_pred])

    if print_summary:
        model.summary()

    return model
def create_neural_network(hidden_neurons: int) -> Model:
    if hidden_neurons < 1:
        hidden_neurons = 1
    if globals()['USE_NORMED']:
        inputs = layers.Input(shape=654)
    else:
        inputs = layers.Input(shape=77)
    hidden = layers.Dense(units=hidden_neurons, activation=activations.sigmoid, use_bias=True)(inputs)
    output = layers.Dense(10, use_bias=True, activation="softmax")(hidden)
    model = Model(inputs=inputs, outputs=output)

    model.summary()

    return model
Пример #5
0
def generate_resnet_model_advance_and_density(classes_len: int):
    """
    Function to create a ResNet50 model pre-trained with custom FC Layers.
    If the "advanced" command line argument is selected, adds an extra convolutional layer with extra filters to support
    larger images.
    :param classes_len: The number of classes (labels).
    :return: The ResNet50 model.
    """
    # Reconfigure single channel input into a greyscale 3 channel input
    img_input = Input(shape=(config.VGG_IMG_SIZE['HEIGHT'],
                             config.VGG_IMG_SIZE['WIDTH'], 1))

    # Add convolution and pooling layers
    model = Sequential()
    model.add(img_input)
    for i in range(0, config.CONV_CNT):
        model.add(Conv2D(3, (3, 3), activation='relu', padding='same'))
        model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    # Generate a ResNet50 model with pre-trained ImageNet weights, input as given above, excluded fully connected layers.
    model_base = ResNet50(include_top=False, weights='imagenet')

    # Start with base model consisting of convolutional layers
    model.add(model_base)

    # Flatten layer to convert each input into a 1D array (no parameters in this layer, just simple pre-processing).
    model.add(Flatten())

    # Possible dropout for regularisation can be added later and experimented with:
    if config.DROPOUT != 0:
        model.add(Dropout(config.DROPOUT, name='Dropout_Regularization_1'))

    # Add fully connected hidden layers.
    model.add(Dense(units=512, activation='relu', name='Dense_Intermediate_1'))
    model.add(Dense(units=32, activation='relu', name='Dense_Intermediate_2'))

    model_density = Sequential()
    model_density.add(
        Dense(int(config.model.split('-')[1]),
              input_shape=(int(config.model.split('-')[1]), ),
              activation='relu'))

    model_concat = concatenate([model.output, model_density.output], axis=-1)

    # Final output layer that uses softmax activation function (because the classes are exclusive).
    if classes_len == 2:
        model_concat = Dense(1, activation='sigmoid',
                             name='Output')(model_concat)
    else:
        model_concat = Dense(classes_len, activation='softmax',
                             name='Output')(model_concat)

    model_combine = Model(inputs=[model.input, model_density.input],
                          outputs=model_concat)

    # Print model details if running in debug mode.
    if config.verbose_mode:
        print(model_combine.summary())

    return model_combine
Пример #6
0
    def build_critic(self):
        input_image = Input(shape=(84, 84, self.frames))
        x = Conv2D(32, (8, 8), (2, 2), 'same', activation=relu)(input_image)
        x = Conv2D(64, (4, 4), (2, 2), 'same', activation=relu)(x)
        x = Conv2D(128, (2, 2), (2, 2), 'same', activation=relu)(x)
        x = Conv2D(256, (1, 1), (2, 2), 'same', activation=relu)(x)
        x = Flatten()(x)

        x = Dense(512, activation=relu)(x)

        out_value = Dense(1, activation=linear)(x)

        model = Model(inputs=[input_image], outputs=[out_value])
        model.compile(optimizer=Adam(lr=10e-4), loss='mse')
        model.summary()
        return model
    def model(self):
        # Don't train the discriminator model weights
        self.discriminator.trainable = False

        # Send the image to the generator model
        generator_output = self.generator(self.input_image)

        # Send the actual input and generator output to discriminator model
        discriminator_out = self.discriminator(
            [self.input_image, generator_output])

        #  Final Model
        model = Model(self.input_image, [discriminator_out, generator_output])
        optimizer = Adam(lr=0.0002, beta_1=0.5)
        model.compile(loss=['binary_crossentropy', 'mae'],
                      optimizer=optimizer,
                      loss_weights=[1, 100])
        print(
            "\n******************************************* GAN Model ********************************************"
        )
        print(model.summary())
        plot_model(model,
                   "modelplots/pix2pix/gan.png",
                   show_shapes=True,
                   show_layer_names=True)
        return model
Пример #8
0
def yolo_v3(input_shape=(416, 416, 3), obj_c=3 * (4 + 5)):
    inputs = Input(shape=input_shape, name='img_input')
    x = Lambda(padding)(inputs)
    x = Conv2D(filters=32, kernel_size=3, padding='VALID', use_bias=False)(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.1)(x)
    x = resdual_net(x, 64, 1)
    x = resdual_net(x, 128, 2)
    x_8 = resdual_net(x, 128, 4, name='shortcut_8')
    x_16 = resdual_net(x_8, 256, 4, name='shortcut_16')
    x_32 = resdual_net(x_16, 512, 2, name='shortcut_32')

    x = DarknetConv2D_BN_Leaky(x_32, filters=512, kernel=1)
    x = DarknetConv2D_BN_Leaky(x, filters=512 * 2, kernel=3)
    x = DarknetConv2D_BN_Leaky(x, filters=512, kernel=1)
    x = DarknetConv2D_BN_Leaky(x, filters=512 * 2, kernel=3)
    x1 = DarknetConv2D_BN_Leaky(x, filters=512, kernel=1)
    x = DarknetConv2D_BN_Leaky(x1, filters=512 * 2, kernel=3)
    y1 = Conv2D(filters=obj_c, kernel_size=1)(x)

    x = DarknetConv2D_BN_Leaky(x1, filters=256, kernel=1)
    x = UpSampling2D(2)(x)
    x = tf.keras.layers.concatenate([x, x_16])

    x = DarknetConv2D_BN_Leaky(x, filters=256, kernel=1)
    x = DarknetConv2D_BN_Leaky(x, filters=256 * 2, kernel=3)
    x = DarknetConv2D_BN_Leaky(x, filters=256, kernel=1)
    x = DarknetConv2D_BN_Leaky(x, filters=256 * 2, kernel=3)
    x2 = DarknetConv2D_BN_Leaky(x, filters=256, kernel=1)
    x = DarknetConv2D_BN_Leaky(x2, filters=256 * 2, kernel=3)
    y2 = Conv2D(filters=obj_c, kernel_size=1)(x)

    x = DarknetConv2D_BN_Leaky(x2, filters=128, kernel=1)
    x = UpSampling2D(2)(x)
    x = tf.keras.layers.concatenate([x, x_8])

    x = DarknetConv2D_BN_Leaky(x, filters=128, kernel=1)
    x = DarknetConv2D_BN_Leaky(x, filters=128 * 2, kernel=3)
    x = DarknetConv2D_BN_Leaky(x, filters=128, kernel=1)
    x = DarknetConv2D_BN_Leaky(x, filters=128 * 2, kernel=3)
    x = DarknetConv2D_BN_Leaky(x, filters=128, kernel=1)
    x = DarknetConv2D_BN_Leaky(x, filters=128 * 2, kernel=3)
    y3 = Conv2D(filters=obj_c, kernel_size=1)(x)
    model = Model(inputs, [y3, y2, y1])
    model.summary()
    return model
Пример #9
0
def create_deep_autoencoder():
    input_shape = Input(shape=(INPUT_LENGTH, ))
    model = Model(input_shape, create_deep_decoder(create_deep_encoder(input_shape)))

    model.compile(optimizer=Adam(lr=0.0001), loss='binary_crossentropy')

    print(model.summary())

    return model
def get_model(input_shape, intermediate_dim, latent_dim):
    # VAE model = encoder + decoder
    # build encoder model
    inputs = Input(shape=input_shape, name='encoder_input')
    x = Conv2D(64, (2, 2), activation=relu)(inputs)
    x = Conv2D(64, (3, 3), activation=relu)(x)
    x = Conv2D(64, (3, 3), activation=relu)(x)
    x = Flatten()(x)
    z_mean = Dense(latent_dim, name='z_mean')(x)
    z_log_var = Dense(latent_dim, name='z_log_var')(x)

    # use reparameterization trick to push the sampling out as input
    # note that "output_shape" isn't necessary with the TensorFlow backend
    z = Lambda(sampling, output_shape=(latent_dim, ),
               name='z')([z_mean, z_log_var])

    # instantiate encoder model
    encoder = Model(inputs, [z_mean, z_log_var, z], name='encoder')
    encoder.summary()
    # plot_model(encoder, to_file='vae_mlp_encoder.png', show_shapes=True)

    # build decoder model
    latent_inputs = Input(shape=(latent_dim, ), name='z_sampling')
    x = Dense(intermediate_dim, activation=relu)(latent_inputs)
    x = Dense(64 * 28 * 28, activation=relu)(x)
    x = Reshape((28, 28, 64))(x)
    x = Conv2DTranspose(64, (3, 3), activation=sigmoid, padding='same')(x)
    x = Conv2DTranspose(64, (3, 3), activation=sigmoid, padding='same')(x)
    outputs = Conv2D(image_channels, (2, 2), padding='same')(x)

    # instantiate decoder model
    decoder = Model(latent_inputs, outputs, name='decoder')
    decoder.summary()
    # plot_model(decoder, to_file='vae_mlp_decoder.png', show_shapes=True)

    # instantiate VAE model
    outputs = decoder(encoder(inputs)[2])
    vae = Model(inputs, outputs, name='vae_mlp')
    return vae, encoder, decoder, inputs, z_mean, z_log_var, latent_inputs, outputs
Пример #11
0
def simple_cnn(input_shape, classes=2):

    input = Input(shape=input_shape)
    net = Conv2D(128, (3, 3), padding='same', activation='relu')(input)
    net = BatchNormalization()(net)
    net = Activation('relu')(net)
    net = MaxPool2D((2, 2), padding='same')(net)
    net = Conv2D(256, (3, 3), padding='same', activation='relu')(net)
    net = BatchNormalization()(net)
    net = Activation('relu')(net)
    net = MaxPool2D((2, 2), padding='same')(net)
    net = Conv2D(512, (3, 3), padding='same', activation='relu')(net)
    net = BatchNormalization()(net)
    net = Conv2DTranspose(classes, (3, 3),
                          strides=4,
                          use_bias=False,
                          padding='same',
                          activation='softmax')(net)
    model = Model(input, net, name='fcn32')
    # check the model using the summary
    model.summary()
    return model
Пример #12
0
def get_densenet121_model(classes=2):
    def preprocess_input(img):
        img[:, :, 0] = (img[:, :, 0] - 103.94) * 0.017
        img[:, :, 1] = (img[:, :, 1] - 116.78) * 0.017
        img[:, :, 2] = (img[:, :, 2] - 123.68) * 0.017
        return img.astype(np.float32)

    def decode_img(img):
        img[:, :, 0] = (img[:, :, 0] / 0.017) + 103.94
        img[:, :, 1] = (img[:, :, 1] / 0.017) + 116.78
        img[:, :, 2] = (img[:, :, 2] / 0.017) + 123.68
        return img.astype(np.uint8)

    base_model = tf.keras.applications.DenseNet121(include_top=False,
                                                   classes=2)
    x = base_model.output
    x = GlobalAveragePooling2D()(x)
    pre = Dense(classes, activation='softmax', name='fc1000')(x)
    model = Model(inputs=base_model.input, outputs=pre)
    model.summary()
    for layer in base_model.layers:
        layer.trainable = False

    ckpt = './ckpt/densenet121.h5'
    checkpoint = ModelCheckpoint(filepath=ckpt)
    tensorboard = './log/densenet121'
    tensorboard = TensorBoard(log_dir=tensorboard)
    if os.path.exists(ckpt):
        model.load_weights(ckpt, by_name=True)
        print("load done")
    else:
        plot_model(model, to_file='densenet121.png')

    model.compile(optimizer=tf.train.AdamOptimizer(0.001),
                  loss='binary_crossentropy',
                  metrics=['accuracy'])

    return model, checkpoint, tensorboard, preprocess_input, decode_img
Пример #13
0
def channel_estimation():
    rate=0.1
    y = Input(shape=(90,),dtype=tf.float32)
    index=Input(shape=(45,),dtype=tf.float32)
    tmp=concatenate([y,index],axis=1)
    tmp=dense_unit_dropout(tmp,256,rate)
    tmp1 = dense_unit_dropout(tmp,32,rate)
    tmp2 = dense_unit_dropout(tmp, 32, rate)
    tmp3 = dense_unit_dropout(tmp, 32, rate)
    tmp1 = dense_unit_dropout(tmp1, 8, rate)
    tmp2 = dense_unit_dropout(tmp2, 8, rate)
    tmp3 = dense_unit_dropout(tmp3, 8, rate)
    tmp1=Dense(1, activation='sigmoid')(tmp1)
    tmp2 = Dense(1, activation='sigmoid')(tmp2)
    tmp3 = Dense(1, activation='sigmoid')(tmp3)
    tmp1=Lambda(To_rad)(tmp1)
    tmp2=Lambda(To_rad)(tmp2)
    tmp3=Lambda(To_rad)(tmp3)
    aod = concatenate([tmp1, tmp2,tmp3], axis=1)
    model = Model([y,index], aod)
    model.compile(optimizer=tf.train.AdamOptimizer(), loss=RankMse)
    model.summary()
    return model
Пример #14
0
def _test_batch_mode(layer, **kwargs):
    print('Batch mode')
    A_batch = np.stack([A] * batch_size)
    X_batch = np.stack([X] * batch_size)

    A_in = Input(shape=(N, N))
    X_in = Input(shape=(N, F))
    inputs = [X_in, A_in]
    input_data = [X_batch, A_batch]
    if kwargs.pop('edges', None):
        E_batch = np.stack([E] * batch_size)
        E_in = Input(shape=(N, N, S))
        inputs.append(E_in)
        input_data.append(E_batch)

    layer_instance = layer(**kwargs)
    output = layer_instance(inputs)
    model = Model(inputs, output)

    output = model(input_data)
    model.summary()

    assert output.shape == (batch_size, N, kwargs['channels'])
Пример #15
0
def get_mobilev2_model(classes=2):
    def preprocess_input(img):
        img = img / 128.
        img = img - 1.
        return img.astype(np.float32)

    def decode_img(img):
        img = img + 1.
        img = img * 128.
        return img.astype(np.uint8)

    base_model = MobileNetV2(include_top=False, input_shape=(224, 224, 3))
    x = base_model.output
    x = GlobalAveragePooling2D()(x)
    pre = Dense(classes, activation='softmax')(x)
    model = Model(inputs=base_model.input, outputs=pre)
    model.summary()
    # 冻结这些层就无法训练
    # 迁移学习,用训练好的权重,重写全连接层再进行训练
    for layer in base_model.layers:
        layer.trainable = False

    ckpt = './ckpt/mobilev2.h5'
    checkpoint = ModelCheckpoint(filepath=ckpt)
    tensorboard = './log/mobilev2'
    tensorboard = TensorBoard(log_dir=tensorboard)
    if os.path.exists(ckpt):
        model.load_weights(ckpt)
        print('load done')
    else:
        plot_model(model, to_file='mobilev2.png')

    sgd = SGD(lr=1e-2, decay=1e-6, momentum=0.9, nesterov=True)
    model.compile(optimizer=sgd,
                  loss='binary_crossentropy',
                  metrics=['accuracy'])
    return model, checkpoint, tensorboard, preprocess_input, decode_img
Пример #16
0
def bfnn_model():  # construct the BFNN proposed in this letter
    input_tensor = Input(
        shape=(2 * Nt +
               1, ))  # consists of the estimated CSI and the SNR value
    temp = dense_unit_dropout(input_tensor, 2048,
                              0)  # the first dense unit with 2048 neurons
    temp = dense_unit_dropout(temp, 1024,
                              0)  # the second dense unit with 1024 neurons
    temp = dense_unit_dropout(temp, 256,
                              0)  # the third dense unit with 256 neurons
    out_phases = dense_unit_dropout(temp, Nt,
                                    0)  # to output the phases of v_RF
    model = Model(input_tensor, out_phases)
    model.compile(optimizer=tf.train.AdamOptimizer(), loss=loss_function_rate)
    print(model.summary())
    return model
def build_model(num_lags,
                model="MLP",
                summary=True,
                date_time=False,
                combined_model=False,
                event_info=False):
    assert model in models
    input_lags = Input(shape=(num_lags, ), name="pickup_lags")
    inputs = []
    concat = []
    x = input_lags
    if event_info:
        input_events = Input(shape=(max_count, ), name="events")
        events_feat = Embedding(max_count, 8)(input_events)
        events_feat = LSTM(16)(events_feat)
        inputs.append(input_events)
        concat.append(events_feat)
    if date_time:
        input_date_time = Input(shape=(75, ), name="date_time")
        inputs.append(input_date_time)
        concat.append(input_date_time)
    if combined_model:
        pickup_zone = Input(shape=(4, ), name="pickup_zone")
        inputs.append(pickup_zone)
        concat.append(pickup_zone)
    if model.upper() == "MLP":
        # x = concatenate([x] + concat)
        x = MLP(x)
    elif model.upper() == "LSTM":
        x = Reshape((1, 48))(x)
        x = SLSTM(x)

    if len(inputs) > 0:
        x = concatenate([x] + concat)
        # x = Dense(units=10, activation="relu")(x)
        inputs.append(input_lags)
    else:
        inputs = input_lags
    preds = Dense(units=1, name="predicted")(x)
    model = Model(inputs, preds)
    model.compile(loss="mean_squared_error", optimizer="adam")
    if summary:
        print(model.summary())
    return model
Пример #18
0
    def __init__(self, game):

        self.game = game
        self.shape = (game._base_board.height, game._base_board.width)
        self.input_board = Input(self.shape, dtype=float)
        self.possible_moves_size = self.shape[1]
        self.checkpoint_dir = "checkpoints"
        create_dir(self.checkpoint_dir)

        X = Reshape((self.shape[0], self.shape[1], 1))(self.input_board)
        h_conv1 = ReLU()(BatchNormalization(axis=3)(Conv2D(config.num_channels,
                                                           3,
                                                           padding='same',
                                                           use_bias=False)(X)))
        h_conv2 = ReLU()(BatchNormalization(axis=3)(Conv2D(
            config.num_channels, 3, padding='same', use_bias=False)(h_conv1)))
        h_conv3 = ReLU()(BatchNormalization(axis=3)(Conv2D(
            config.num_channels, 3, padding='valid', use_bias=False)(h_conv2)))
        h_conv4 = ReLU()(BatchNormalization(axis=3)(Conv2D(
            config.num_channels, 3, padding='valid', use_bias=False)(h_conv3)))
        h_conv4_flat = Reshape((config.num_channels * (self.shape[0] - 4) *
                                (self.shape[1] - 4), ))(h_conv4)
        s_fc1 = Dropout(config.dropout)(ReLU()(BatchNormalization(axis=1)(
            Dense(1024, use_bias=False)(h_conv4_flat))))
        s_fc2 = Dropout(config.dropout)(ReLU()(BatchNormalization(axis=1)(
            Dense(512, use_bias=False)(s_fc1))))

        self.pi = Dense(self.possible_moves_size,
                        activation='softmax',
                        name='pi')(s_fc2)
        self.v = Dense(1, activation='tanh', name='v')(s_fc2)

        self.target_pi = Input([None, self.possible_moves_size], dtype=float)
        self.target_v = Input([None], dtype=float)

        model = Model(inputs=self.input_board, outputs=(self.pi, self.v))
        model.compile(loss=total_loss, optimizer=Adam(config.lr))
        print(model.summary())
        plot_model(model,
                   "modelplots/model.png",
                   show_shapes=True,
                   show_layer_names=True)
        self.model = model
 def model(self):
     down1 = self.encoder(self.input_image, 64, batch_norm=False)
     down2 = self.encoder(down1, 128)
     down3 = self.encoder(down2, 256)
     down4 = self.encoder(down3, 512)
     down5 = self.encoder(down4, 512)
     down6 = self.encoder(down5, 512)
     down7 = self.encoder(down6, 512)
     # Not adding batch normalization and Relu to the bottle neck layer
     bottleneck = Conv2D(512, (4, 4),
                         strides=(2, 2),
                         padding='same',
                         kernel_initializer=self.init)(down7)
     bottleneck = Activation('relu')(bottleneck)
     # decoder model
     up1 = self.decoder(bottleneck, down7, 512)
     up2 = self.decoder(up1, down6, 512)
     up3 = self.decoder(up2, down5, 512)
     up4 = self.decoder(up3, down4, 512, dropout=False)
     up5 = self.decoder(up4, down3, 256, dropout=False)
     up6 = self.decoder(up5, down2, 128, dropout=False)
     up7 = self.decoder(up6, down1, 64, dropout=False)
     # output
     out = Conv2DTranspose(3, (4, 4),
                           strides=(2, 2),
                           padding='same',
                           kernel_initializer=self.init)(up7)
     out_image = Activation('tanh')(out)
     # define model
     model = Model(self.input_image, out_image)
     print(
         "\n**************************************** Generator Model *****************************************"
     )
     print(model.summary())
     plot_model(model,
                "modelplots/pix2pix/generator_model.png",
                show_shapes=True,
                show_layer_names=True)
     return model
    def model(self):
        out = LeakyReLU(0.2)(self.layer_1(self.merged_image))
        out = LeakyReLU(0.2)(BatchNormalization()(self.layer_2(out)))
        out = LeakyReLU(0.2)(BatchNormalization()(self.layer_3(out)))
        out = LeakyReLU(0.2)(BatchNormalization()(self.layer_4(out)))
        out = LeakyReLU(0.2)(BatchNormalization()(self.layer_5(out)))

        patch_out = Activation('sigmoid')(self.layer_6(out))
        model = Model([self.input_image, self.target_image], patch_out)

        # Using Adam optimizer
        optimizer = Adam(lr=0.0002, beta_1=0.5)
        model.compile(loss='binary_crossentropy',
                      optimizer=optimizer,
                      loss_weights=[0.5])
        print(
            "\n************************************* Discriminator Model ****************************************"
        )
        print(model.summary())
        plot_model(model,
                   "modelplots/pix2pix/discriminator_model.png",
                   show_shapes=True,
                   show_layer_names=True)
        return model