Esempio n. 1
0
    def build_actor(self, discrete=True, action_space=None, activation=None):
        print(action_space)
        input_image = Input(shape=(84, 84, self.frames))
        actual_value = Input(shape=(1, ))
        predicted_value = Input(shape=(1, ))
        old_prediction = Input(shape=(action_space, ))

        x = Conv2D(32, (8, 8), (2, 2), 'same', activation=relu)(input_image)
        x = Conv2D(64, (4, 4), (2, 2), 'same', activation=relu)(x)
        x = Conv2D(128, (2, 2), (2, 2), 'same', activation=relu)(x)
        x = Conv2D(256, (1, 1), (2, 2), 'same', activation=relu)(x)
        x = Flatten()(x)

        x = Dense(512, activation=relu)(x)

        out_actions = Dense(action_space, activation=softmax, name='output')(x)
        #out_actions = NoisyDense(action_space, activation=softmax, sigma_init=0.02, name='output')(x)

        model = Model(inputs=[
            input_image, actual_value, predicted_value, old_prediction
        ],
                      outputs=[out_actions])
        model.compile(optimizer=Adam(lr=10e-4),
                      loss=[
                          proximal_policy_optimization_loss(
                              actual_value=actual_value,
                              old_prediction=old_prediction,
                              predicted_value=predicted_value)
                      ])
        model.summary()
        return model
    def model(self):
        # Don't train the discriminator model weights
        self.discriminator.trainable = False

        # Send the image to the generator model
        generator_output = self.generator(self.input_image)

        # Send the actual input and generator output to discriminator model
        discriminator_out = self.discriminator(
            [self.input_image, generator_output])

        #  Final Model
        model = Model(self.input_image, [discriminator_out, generator_output])
        optimizer = Adam(lr=0.0002, beta_1=0.5)
        model.compile(loss=['binary_crossentropy', 'mae'],
                      optimizer=optimizer,
                      loss_weights=[1, 100])
        print(
            "\n******************************************* GAN Model ********************************************"
        )
        print(model.summary())
        plot_model(model,
                   "modelplots/pix2pix/gan.png",
                   show_shapes=True,
                   show_layer_names=True)
        return model
Esempio n. 3
0
class KerasPolicy:

    def __init__(self,
                 past_measurement_dimensions,
                 future_measurements_dimensions,
                 hidden_dim,
                 action_dimension,
                 drop_prob=0.2):
        """
        Build model, predict only next quality
        :param past_measurement_dimensions:
        :param future_measurements_dimensions:
        :param hidden_dim:
        :param action_dimension:
        :param drop_prob:
        """
        self.policy_past_input = Input(shape=(None, past_measurement_dimensions))
        self.policy_past_GRU = GRU(units=hidden_dim,
                                   return_sequences=False, dropout=drop_prob)(self.policy_past_input)
        self.policy_future_input = Input(shape=(None, future_measurements_dimensions))
        self.policy_future_GRU = GRU(units=hidden_dim, return_sequences=False, dropout=drop_prob)(
            self.policy_future_input)
        self.policy_dense1 = Dense(units=hidden_dim, activation="relu")
        self.policy_dense2 = Dense(activation="softmax", units=action_dimension)
        concatenated = concatenate([self.policy_past_GRU, self.policy_future_GRU])
        concatenated = self.policy_dense1(concatenated)
        self.policy_action_output = self.policy_dense2(concatenated)
        self.model = Model(inputs=[self.policy_past_input, self.policy_future_input], outputs=self.policy_action_output)
        self.model.compile(loss="categorical_crossentropy", optimizer='adam')
Esempio n. 4
0
 def build_model(self):
     frame_shape = list(self.results[0].frame0.shape)
     frame_shape[-1] *= 2
     input_f01 = Input(shape=frame_shape)
     x = Conv2D(32,
                kernel_size=8,
                strides=4,
                padding="valid",
                activation='relu')(input_f01)
     x = Conv2D(32,
                kernel_size=3,
                strides=1,
                padding="valid",
                activation='relu')(x)
     x = Flatten()(x)
     # forward_back = Dense(4, activation='softmax', name="move_forward_back")(x)
     # move_left_right = Dense(3, activation='softmax', name="move_left_right")(x)
     # camera_left_right = Dense(3, activation='softmax', name="camera_left_right")(x)
     # up_down = Dense(3, activation='softmax', name="up_down")(x)
     # model = Model(input_f01, [forward_back, camera_left_right, up_down, move_left_right])
     x = Dense(24, activation='relu')(x)
     actions = Dense(54, activation='softmax', name="actions")(x)
     model = Model(input_f01, actions)
     model.compile(Adam(lr=0.001), loss=categorical_crossentropy)
     return model
    def test_loss_with_sample_weight_in_layer_call(self):
        class MyLayer(layers.Layer):
            def __init__(self):
                super(MyLayer, self).__init__()
                self.bias = testing_utils.Bias()

            def call(self, inputs):
                out = self.bias(inputs[0])
                self.add_loss(MAE()(inputs[1], out, inputs[2]))
                self.add_loss(
                    math_ops.reduce_mean(inputs[2] * mae(inputs[1], out)))
                return out

        inputs = Input(shape=(1, ))
        targets = Input(shape=(1, ))
        sw = Input(shape=(1, ))

        outputs = MyLayer()([inputs, targets, sw])
        model = Model([inputs, targets, sw], outputs)
        model.predict([self.x, self.y, self.w])
        model.compile(optimizer_v2.gradient_descent.SGD(0.05),
                      run_eagerly=testing_utils.should_run_eagerly(),
                      experimental_run_tf_function=testing_utils.
                      should_run_tf_function())

        history = model.fit([self.x, self.y, self.w], batch_size=3, epochs=5)
        self.assertAllClose(history.history['loss'], [2., 1.8, 1.6, 1.4, 1.2],
                            1e-3)

        output = model.evaluate([self.x, self.y, self.w])
        self.assertAlmostEqual(output, 1.0, 3)

        output = model.test_on_batch([self.x, self.y, self.w])
        self.assertAlmostEqual(output, 1.0, 3)
Esempio n. 6
0
def get_model(use_model) -> Model:
    if use_model == 'MobileNetV2':
        conv_model: Model = MobileNetV2(weights='imagenet', include_top=False, input_shape=IMG_SHAPE)

    else:
        conv_model: Model = ResNet50(weights='imagenet', include_top=False, input_shape=IMG_SHAPE)

    layer: Layer
    for layer in conv_model.layers:
        layer.trainable = False

    image_a = Input(IMG_SHAPE)
    image_b = Input(IMG_SHAPE)

    branch_a = conv_model(image_a)
    branch_b = conv_model(image_b)

    merged_layers = concatenate([branch_a, branch_b])
    merged_layers = GlobalAveragePooling2D()(merged_layers)

    merged_layers = Dense(256, activation='relu')(merged_layers)
    merged_layers = Dropout(0.1)(merged_layers)

    merged_layers = Dense(256, activation='relu')(merged_layers)
    merged_layers = Dropout(0.0)(merged_layers)

    output = Dense(1, kernel_initializer='normal', activation='linear')(merged_layers)
    model = Model(inputs=[image_a, image_b], outputs=output)

    model.compile(optimizer=tf.keras.optimizers.Adam(0.00100),
                  loss='mse',
                  metrics=[loss_in_fact])

    return model
class KerasValue:

    def __init__(self,
                 past_measurement_dimensions,
                 future_measurements_dimensions,
                 hidden_dim,
                 drop_prob=0.2):
        """
        V(S) function
        :param past_measurement_dimensions:
        :param future_measurements_dimensions:
        :param hidden_dim:
        :param drop_prob:
        """
        discriminator_past_input = Input(shape=(None, past_measurement_dimensions))
        discriminator_future_input = Input(shape=(None, future_measurements_dimensions))

        self.discriminator_past_GRU = GRU(units=hidden_dim, return_sequences=False, dropout=drop_prob)
        self.discriminator_future_GRU = GRU(units=hidden_dim, return_sequences=False, dropout=drop_prob)
        self.discriminator_dense_1 = Dense(units=hidden_dim // 2, activation="relu")
        self.discriminator_dense_2 = Dense(units=hidden_dim // 2, activation="relu")
        self.discriminator_dense_3 = Dense(units=hidden_dim // 4, activation="relu")
        self.discriminator_dense_final = Dense(units=1)
        concatenated = concatenate(
            [self.discriminator_past_GRU(discriminator_past_input),
             self.discriminator_future_GRU(discriminator_future_input)])
        concatenated = self.discriminator_dense_1(concatenated)
        concatenated = self.discriminator_dense_2(concatenated)
        concatenated = self.discriminator_dense_3(concatenated)
        linear_output_layer = self.discriminator_dense_final(concatenated)
        self.model = Model(inputs=[discriminator_past_input, discriminator_future_input],
                           outputs=linear_output_layer)
        self.model.compile(loss="mse", optimizer='adam')
class Agent(object):
    def __init__(self, name='model', input_num=None, output_num=None):
        """A learning agent that uses tensorflow to create a neural network"""
        assert input_num is not None
        assert output_num is not None
        self.input_num = input_num
        self.output_num = output_num
        self._build_net()

    def _build_net(self):
        """Construct the neural network"""

        # Change the network structure here
        S = Input(shape=[self.input_num])
        h0 = Dense(300, activation="sigmoid")(S)
        h1 = Dense(600, activation="sigmoid")(h0)
        h2 = Dense(29, activation="sigmoid")(h1)
        V = Dense(self.output_num, activation="sigmoid")(h2)
        self.model = Model(inputs=S, outputs=V)
        self.model.compile(optimizer="adam", loss='mse')

    def train(self, x, y, n_epoch=100, batch=32):
        """Train the network"""
        self.model.fit(x=x, y=y, epochs=n_epoch, batch_size=batch)

    def predict(self, x):
        """Input values to the neural network and return the result"""
        a = self.model.predict(x)
        return a
class KerasGAIL:
    def __init__(self,
                 past_measurement_dimensions,
                 future_measurements_dimensions,
                 hidden_dim,
                 action_dimension,
                 drop_prob=0.2):
        """
        Keras model for GAIL approach
        :param past_measurement_dimensions:
        :param future_measurements_dimensions:
        :param hidden_dim:
        :param action_dimension:
        :param drop_prob:
        """
        self.policy_model = KerasPolicy(past_measurement_dimensions,
                                        future_measurements_dimensions,
                                        hidden_dim,
                                        action_dimension,
                                        drop_prob=drop_prob)
        advantage_input = Input(shape=(1,))
        likelihood_action_distributed_input = Input(shape=(action_dimension,))
        keras_inputs = []
        keras_inputs += [self.policy_model.policy_past_input, self.policy_model.policy_future_input]
        keras_inputs += [advantage_input, likelihood_action_distributed_input]
        self.gail_training_model = Model(inputs=keras_inputs, outputs=self.policy_model.policy_action_output)
        self.gail_training_model.compile(loss=proximal_policy_optimization_loss(advantage=advantage_input,
                                                                                old_prediction=likelihood_action_distributed_input),
                                         optimizer=Adam(lr=1e-4))
class KerasDiscriminator:

    def __init__(self,
                 past_measurement_dimensions,
                 future_measurements_dimensions,
                 hidden_dim,
                 action_dimension,
                 drop_prob=0.2):
        """
        General purpose keras GRU classifer
        :param past_measurement_dimensions:
        :param future_measurements_dimensions:
        :param hidden_dim:
        :param action_dimension:
        :param drop_prob:
        """
        discriminator_past_input = Input(shape=(None, past_measurement_dimensions))
        discriminator_future_input = Input(shape=(None, future_measurements_dimensions))
        discriminator_action_input = Input(shape=(action_dimension,))

        self.discriminator_past_GRU = GRU(units=hidden_dim, return_sequences=False, dropout=drop_prob)
        self.discriminator_future_GRU = GRU(units=hidden_dim, return_sequences=False, dropout=drop_prob)
        self.discriminator_dense_1 = Dense(units=hidden_dim, activation="relu")
        self.discriminator_dense_2 = Dense(units=hidden_dim, activation="relu")
        self.discriminator_dense_final = Dense(activation="softmax", units=2)  # We predict two -> Is it real or not
        concatenated = concatenate(
            [self.discriminator_past_GRU(discriminator_past_input),
             self.discriminator_future_GRU(discriminator_future_input)])
        concatenated = self.discriminator_dense_1(concatenated)
        concatenated = concatenate([concatenated, discriminator_action_input])
        concatenated = self.discriminator_dense_2(concatenated)
        discriminator_likelihood = self.discriminator_dense_final(concatenated)
        self.model = Model(inputs=[discriminator_past_input, discriminator_future_input,
                                   discriminator_action_input], outputs=discriminator_likelihood)
        self.model.compile(loss="categorical_crossentropy", optimizer='adam')
Esempio n. 11
0
def create_keras_model(inputShape, learning_rate):
    # Model input
    inputs = Input(shape=inputShape)

    # Convolutional layers
    x = Conv2D(32, kernel_size=(3, 3), activation='relu')(inputs)
    x = Conv2D(32, kernel_size=(3, 3), activation='relu')(x)
    x = MaxPooling2D(pool_size=(2, 2))(x)
    x = Dropout(0.25)(x)

    # Fully connected Dense layers
    x = Flatten()(x)
    x = Dense(128, activation='relu')(x)
    x = Dropout(0.5)(x)

    outputs = Dense(10, activation='softmax')(x)

    model = Model(inputs=inputs, outputs=outputs, name='myModel')

    # Custom Optimizer:
    # https://www.tensorflow.org/api_docs/python/tf/train/RMSPropOptimizer
    optimizer = tf.keras.optimizers.RMSprop(lr=learning_rate)

    # Compile Keras model
    model.compile(loss='categorical_crossentropy',
                  optimizer=optimizer,
                  metrics=['accuracy'])

    return model
Esempio n. 12
0
def autoencoder():
    input_image = Input(shape=(784, ))
    # Encoder
    encoder = Dense(units=784, activation='relu')(input_image)
    encoder = Dense(units=512, activation='relu')(encoder)
    encoder = Dense(units=256, activation='relu')(encoder)
    encoder = Dense(units=128, activation='relu')(encoder)
    encoder = Dense(units=64, activation='relu')(encoder)
    encoder = Dense(units=32, activation='relu')(encoder)

    # Decoder
    decoder = Dense(units=64, activation='relu')(encoder)
    decoder = Dense(units=128, activation='relu')(decoder)
    decoder = Dense(units=256, activation='relu')(decoder)
    decoder = Dense(units=512, activation='relu')(decoder)
    decoder = Dense(units=784, activation='sigmoid')(decoder)

    enc = Model(input_image, encoder)

    autoenc = Model(input_image, decoder)

    autoenc.compile(optimizer='adadelta',
                    loss='binary_crossentropy',
                    metrics=['accuracy'])

    return enc, autoenc
Esempio n. 13
0
def VGG16Trunk(image_shape,
               input_name,
               optimizer,
               loss,
               metrics,
               fine_tuning=False):

    x = Input(shape=image_shape, name=input_name)
    base_model = VGG16(weights='imagenet', include_top=False, input_tensor=x)

    for layer in base_model.layers:
        layer.trainable = False

    conv_base = base_model.output

    a = Flatten()(conv_base)
    a = Dense(1024, activation='relu')(a)
    a = Dropout(0.5)(a)
    y = Dense(NUM_CLASSES, activation='softmax')(a)

    model = Model(inputs=x, outputs=y)

    model.compile(loss=loss, optimizer=optimizer, metrics=metrics)

    return model
Esempio n. 14
0
def build_model():
    """ Builds word2vec model and return training and validation models."""

    embedding = Embedding(VOCAB_SIZE,
                          EMBEDDING_DIM,
                          input_length=1,
                          name='embedding')

    target_inputs = Input((1, ))
    context_inputs = Input((1, ))

    target = embedding(target_inputs)
    target = Reshape((EMBEDDING_DIM, 1))(target)

    context = embedding(context_inputs)
    context = Reshape((EMBEDDING_DIM, 1))(context)

    # setup a cosine similarity operation which will be output in a secondary model
    similarity = Dot(axes=0, normalize=True)([target, context])

    # now perform the dot product operation to get a similarity measure
    dot_product = Dot(axes=1)([target, context])
    dot_product = Reshape((1, ))(dot_product)

    # add the sigmoid output layer
    output = Dense(1, activation='sigmoid')(dot_product)

    # create the primary training model
    model = Model([target_inputs, context_inputs], output)
    model.compile(loss='binary_crossentropy', optimizer='rmsprop')

    # create a secondary validation model to run our similarity checks during training
    val_model = Model([target_inputs, context_inputs], similarity)

    return model, val_model
Esempio n. 15
0
def compile_bidding_model():
    # bidding neural net
    input1 = Input(shape=(8, 32), dtype=tf.float64, name='x1')
    input2 = Input(shape=(4, ), dtype=tf.float64, name='x2')
    input3 = Input(shape=(4, 10), dtype=tf.float64, name='x3')
    y1 = tf.keras.layers.Dense(units=128,
                               activation='elu',
                               kernel_initializer='he_uniform')(input1)
    y1 = tf.keras.layers.Flatten()(y1)
    y2 = tf.keras.layers.Concatenate(axis=1)([y1, input2])
    y2 = tf.keras.layers.Dense(units=128,
                               activation='elu',
                               kernel_initializer='he_uniform')(y2)
    y2 = tf.keras.layers.Flatten()(y2)
    y3 = tf.keras.layers.Dense(units=128,
                               activation='elu',
                               kernel_initializer='he_uniform')(input3)
    y3 = tf.keras.layers.Flatten()(y3)
    y4 = tf.keras.layers.Concatenate(axis=1)([y2, y3])
    y4 = tf.keras.layers.Dense(units=128,
                               activation='elu',
                               kernel_initializer='he_uniform')(y4)
    y = tf.keras.layers.Dense(units=9,
                              activation='softmax',
                              kernel_initializer='he_uniform')(y4)
    model = Model(inputs=[input1, input2, input3], outputs=y)

    # compile and build NN
    adam = Adam(lr=0.02, decay=0.01)
    model.compile(optimizer='adam',
                  loss=tf.keras.losses.CategoricalCrossentropy(),
                  metrics=['accuracy'])

    return model
Esempio n. 16
0
def make_deep_autoencoder(dims,
                          act='relu',
                          init='glorot_uniform',
                          layer_decorator: typing.Optional[typing.Callable[
                              [Layer, bool], Layer]] = None,
                          compile=True) -> typing.Tuple[Model, Model]:
    r"""Build a fully-connected symmetric autoencoder model.

    :param dims: Per-layer neuron counts for the entire AE.
    :param act: Activation function for all neurons (except input and output layers)
    :param init: Initialization for the weights of every neuron.
    :param layer_decorator: Callable that can modify every layer of the network.
    :param compile: Compile the model now or leave it up to the caller.
    :return: (ae_model, encoder_model), Model of autoencoder and model of encoder
    """
    input_img = Input(shape=(dims[0], ), name='input')
    n_stacks = len(dims) - 1
    assert n_stacks > 0

    x = input_img

    # internal layers in encoder
    for i in range(n_stacks - 1):
        x = Dense(dims[i + 1],
                  activation=act,
                  kernel_initializer=init,
                  name='encoder_%d' % i)(x)
        if layer_decorator:
            x = layer_decorator(x, is_output=False)

    # encoder output layer (the last hidden encoding layer in the autoencoder)
    # features are extracted from here
    x = Dense(dims[-1],
              kernel_initializer=init,
              name='encoder_%d' % (n_stacks - 1))(x)
    if layer_decorator:
        x = layer_decorator(x, is_output=False)
    encoded = x

    # internal layers in decoder
    for i in range(n_stacks - 1, 0, -1):
        x = Dense(dims[i],
                  activation=act,
                  kernel_initializer=init,
                  name='decoder_%d' % i)(x)
        if layer_decorator:
            x = layer_decorator(x, is_output=False)

    # output
    x = Dense(dims[0], kernel_initializer=init, name='decoder_0')(x)
    if layer_decorator:
        x = layer_decorator(x, is_output=True)
    decoded = x

    encoder = Model(inputs=input_img, outputs=encoded, name='encoder')
    autoencoder = Model(inputs=input_img, outputs=decoded, name='AE')
    if compile:
        autoencoder.compile(optimizer='adadelta', loss='mse')
    return autoencoder, encoder
Esempio n. 17
0
 def build_model(self):
     shared_model = self.base_network_build_fn()
     t_status = shared_model.output
     output = layers.Dense(self.action_num)(t_status)
     model = Model(shared_model.input, output, name=self.model_type)
     # 这里的优化器,损失没有用
     model.compile(optimizer=Adam(lr=self.lr), loss=DDPG_loss)
     return model
Esempio n. 18
0
def define_com_cnn_model(com_cnn):
    com_cnn.encode_the_compact_representation_of_the_original_image()
    model_com_cnn = Model(com_cnn.input_layer, com_cnn.set_third_block())
    model_com_cnn.compile(
        loss=[com_cnn_cost_function],
        optimizer=the_optimizer
    )
    return model_com_cnn
Esempio n. 19
0
def define_rec_model(rec_cnn):
    returned_tuple = rec_cnn.set_residual_block()
    model_rec_cnn = Model(rec_cnn.upscaled_image, returned_tuple)
    model_rec_cnn.compile(
        loss=[rec_cnn_cost_function],
        optimizer=the_optimizer
    )
    return model_rec_cnn
Esempio n. 20
0
class RTSNNet:
    def __init__(self, game, encoder):
        """
        NNet model, copied from Othello NNet, with reduced fully connected layers fc1 and fc2 and reduced nnet_args.num_channels
        :param game: game configuration
        :param encoder: Encoder, used to encode game boards
        """
        from rts.src.config_class import CONFIG

        # game params
        self.board_x, self.board_y, num_encoders = game.getBoardSize()
        self.action_size = game.getActionSize()
        """
        num_encoders = CONFIG.nnet_args.encoder.num_encoders
        """
        num_encoders = encoder.num_encoders

        # Neural Net
        self.input_boards = Input(shape=(
            self.board_x, self.board_y,
            num_encoders))  # s: batch_size x board_x x board_y x num_encoders

        x_image = Reshape(
            (self.board_x, self.board_y, num_encoders)
        )(self.input_boards)  # batch_size  x board_x x board_y x num_encoders
        h_conv1 = Activation('relu')(BatchNormalization(axis=3)(Conv2D(
            CONFIG.nnet_args.num_channels, 3, padding='same', use_bias=False)(
                x_image)))  # batch_size  x board_x x board_y x num_channels
        h_conv2 = Activation('relu')(BatchNormalization(axis=3)(Conv2D(
            CONFIG.nnet_args.num_channels, 3, padding='same', use_bias=False)(
                h_conv1)))  # batch_size  x board_x x board_y x num_channels
        h_conv3 = Activation('relu')(
            BatchNormalization(axis=3)(Conv2D(CONFIG.nnet_args.num_channels,
                                              3,
                                              padding='valid',
                                              use_bias=False)(h_conv2))
        )  # batch_size  x (board_x-2) x (board_y-2) x num_channels
        h_conv4 = Activation('relu')(
            BatchNormalization(axis=3)(Conv2D(CONFIG.nnet_args.num_channels,
                                              3,
                                              padding='valid',
                                              use_bias=False)(h_conv3))
        )  # batch_size  x (board_x-4) x (board_y-4) x num_channels
        h_conv4_flat = Flatten()(h_conv4)
        s_fc1 = Dropout(CONFIG.nnet_args.dropout)(Activation('relu')(
            BatchNormalization(axis=1)(Dense(
                256, use_bias=False)(h_conv4_flat))))  # batch_size x 1024
        s_fc2 = Dropout(CONFIG.nnet_args.dropout)(Activation('relu')(
            BatchNormalization(axis=1)(Dense(
                128, use_bias=False)(s_fc1))))  # batch_size x 1024
        self.pi = Dense(self.action_size, activation='softmax',
                        name='pi')(s_fc2)  # batch_size x self.action_size
        self.v = Dense(1, activation='tanh', name='v')(s_fc2)  # batch_size x 1

        self.model = Model(inputs=self.input_boards, outputs=[self.pi, self.v])
        self.model.compile(
            loss=['categorical_crossentropy', 'mean_squared_error'],
            optimizer=Adam(CONFIG.nnet_args.lr))
Esempio n. 21
0
 def build_model(self):
     # shared_model = self.build_shared_model()
     shared_model = self.base_network_build_fn()
     t_status = shared_model.output
     output = layers.Dense(self.action_num, activation='softmax')(t_status)
     model = Model(shared_model.input, output, name=self.model_type)
     model.compile(optimizer=Adam(lr=self.lr),
                   loss=losses.categorical_crossentropy)
     return model
Esempio n. 22
0
def create_deep_autoencoder():
    input_shape = Input(shape=(INPUT_LENGTH, ))
    model = Model(input_shape, create_deep_decoder(create_deep_encoder(input_shape)))

    model.compile(optimizer=Adam(lr=0.0001), loss='binary_crossentropy')

    print(model.summary())

    return model
Esempio n. 23
0
    def _add_discriminator_block(old_model, config):
        # new shape is double the size of previous one
        old_input_shape = list(old_model.input.shape)
        new_input_shape = (old_input_shape[-2] * 2, old_input_shape[-2] * 2,
                           old_input_shape[-1])
        model_input = Input(shape=new_input_shape, name="doubled_dis_input")

        # weights init
        w_init = RandomNormal(stddev=0.02)
        w_const = max_norm(1.0)

        # conv layers
        x = model_input
        for strides in [1, 3, 3]:
            x = Conv2D(config['filters'],
                       strides,
                       padding='same',
                       kernel_initializer=w_init,
                       kernel_constraint=w_const)(x)
            x = LeakyReLU()(x)

        x = AveragePooling2D()(x)

        new_block = x
        # skip the input, 1x1 and activation for the old model
        for i in range(config['num_input_layers'], len(old_model.layers)):
            x = old_model.layers[i](x)

        # define straight-through model
        model1 = Model(model_input, x)

        # compile model
        model1.compile(loss=wasserstein_loss,
                       optimizer=ProGan.get_optimizer(config))

        # downsample the new larger image
        downsample = AveragePooling2D()(model_input)

        # connect old input processing to downsampled new input
        old_block = old_model.layers[1](downsample)
        old_block = old_model.layers[2](old_block)

        # fade in output of old model input layer with new input
        x = WeightedSum()([old_block, new_block])
        # skip the input, 1x1 and activation for the old model
        for i in range(config['num_input_layers'], len(old_model.layers)):
            x = old_model.layers[i](x)

        # define fade-in model
        model2 = Model(model_input, x)

        # compile model
        model2.compile(loss=wasserstein_loss,
                       optimizer=ProGan.get_optimizer(config))

        return [model1, model2]
Esempio n. 24
0
    def model_description(self):
        filter_sizes = [3, 4, 5]
        num_filters = 10
        drop = 0.1
        embedding_dim = self.EMBEDDING_DIMENTION

        inputs = Input(shape=(self.MAX_SEQ_LEN, ), dtype='int32')
        embedding = self.embedding_layer()(inputs)

        reshape = Reshape((self.MAX_SEQ_LEN, embedding_dim, 1))(embedding)

        conv_0 = Conv2D(num_filters,
                        kernel_size=(filter_sizes[0], embedding_dim),
                        padding='valid',
                        kernel_initializer='normal',
                        activation='relu')(reshape)
        conv_1 = Conv2D(num_filters,
                        kernel_size=(filter_sizes[1], embedding_dim),
                        padding='valid',
                        kernel_initializer='normal',
                        activation='relu')(reshape)
        conv_2 = Conv2D(num_filters,
                        kernel_size=(filter_sizes[2], embedding_dim),
                        padding='valid',
                        kernel_initializer='normal',
                        activation='relu')(reshape)

        maxpool_0 = MaxPool2D(pool_size=(self.MAX_SEQ_LEN - filter_sizes[0] +
                                         1, 1),
                              strides=(1, 1),
                              padding='valid')(conv_0)
        maxpool_1 = MaxPool2D(pool_size=(self.MAX_SEQ_LEN - filter_sizes[1] +
                                         1, 1),
                              strides=(1, 1),
                              padding='valid')(conv_1)
        maxpool_2 = MaxPool2D(pool_size=(self.MAX_SEQ_LEN - filter_sizes[2] +
                                         1, 1),
                              strides=(1, 1),
                              padding='valid')(conv_2)

        concatenated_tensor = Concatenate(axis=1)(
            [maxpool_0, maxpool_1, maxpool_2])
        flatten = Flatten()(concatenated_tensor)
        dropout = Dropout(drop)(flatten)
        output = Dense(self.num_labels(), activation=self.ACTIVATION)(dropout)

        # this creates a model that includes
        model = Model(inputs=inputs, outputs=output)

        # adam = Adam(lr=1e-4, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)

        model.compile(optimizer='adam',
                      loss=self.LOSS_FUNCTION,
                      metrics=['accuracy'])

        return model
Esempio n. 25
0
 def build_model(self):
     # shared_model = build_stacked_rnn_model(self)
     shared_model = self.base_network_build_fn()
     # shared_model = build_multi_attention_model(self.input_steps)
     t_status = shared_model.output
     output = layers.Dense(self.action_num, activation='softmax')(t_status)
     model = Model(shared_model.input, output, name=self.model_type)
     # 这里的优化器,损失都没用
     model.compile(optimizer=Adam(lr=self.lr), loss='mse')
     return model
Esempio n. 26
0
def attn_many_to_one(dataset_object: LSTM_data):

    X_train, X_test, Y_train, Y_test = dataset_object.get_memory()
    X_train, X_test = X_train[:, :, :-12], X_test[:, :, :-12]

    i = Input(shape=(X_train.shape[1], X_train.shape[2]))

    att_in = LSTM(NEURONS,
                    return_sequences=True,
                    activation=ACTIVATION,
                  recurrent_activation="sigmoid",
                    activity_regularizer=regularizers.l2(L2),
                    bias_regularizer=regularizers.l2(BIAIS_REG),
                  )(i)

    att_in = LSTM(NEURONS,
                  return_sequences=True,
                  activation=ACTIVATION,
                  recurrent_activation="sigmoid",
                  activity_regularizer=regularizers.l2(L2),
                  bias_regularizer=regularizers.l2(BIAIS_REG),
                  )(att_in)
    att_in = LSTM(NEURONS,
                  return_sequences=True,
                  activation=ACTIVATION,
                  recurrent_activation="sigmoid",
                  activity_regularizer=regularizers.l2(L2),
                  bias_regularizer=regularizers.l2(BIAIS_REG),
                  )(att_in)
    att_out = attention()(att_in)
    att_out = Dropout(DROPOUT)(att_out)
    outputs = Dense(1,
                    activation='relu',
                    trainable=True,
                    bias_regularizer=regularizers.l2(BIAIS_REG),
                    activity_regularizer=regularizers.l2(L2)
                    )(att_out)

    model = Model(inputs=[i], outputs=[outputs])
    optim = Adam()
    model.compile(optimizer=optim,
                  loss=['mean_squared_error']
                  )

    # Fitting the RNN to the Training set
    history = model.fit(X_train, Y_train,
                        epochs=EPOCHS,
                        batch_size=BATCH_SIZE,
                        validation_data=(X_test, Y_test),
                        callbacks=[EARLY_STOP, REDUCE_LR]
                        )
    model.save("data/weights/attn_based_lstm_no_senti")
    plot_train_loss(history)
    evaluate(model,X_test,Y_test, dataset_object,name="attn_evaluate", senti="no")
Esempio n. 27
0
    def build_model(self):
        shared_model = self.base_network_build_fn()
        # shared_model = build_multi_attention_model(self.input_steps)
        t_status = shared_model.output
        output = layers.Dense(self.action_num)(t_status)
        model = Model(shared_model.input, output, name=self.model_type)

        # model.compile(optimizer=Adam(lr=self.lr), loss=dqn_loss)
        model.compile(optimizer=Adam(lr=self.lr), loss='mse')

        return model
Esempio n. 28
0
def make_model_no_reg(classes, points_per_sample, channel_mode='channels_last'):
    # creates the Time Distributed CNN for range Doppler heatmap ##########################
    mmw_rdpl_input = (int(points_per_sample),) + rd_shape + (1,) if channel_mode == 'channels_last' else (points_per_sample, 1) + rd_shape
    mmw_rdpl_TDCNN = Sequential()
    mmw_rdpl_TDCNN.add(
        TimeDistributed(
            Conv2D(filters=8, kernel_size=(3, 3), data_format=channel_mode,
                   kernel_initializer='random_uniform'),
            input_shape=mmw_rdpl_input))
    mmw_rdpl_TDCNN.add(TimeDistributed(tf.keras.layers.LeakyReLU(alpha=0.1)))
    mmw_rdpl_TDCNN.add(TimeDistributed(BatchNormalization()))
    mmw_rdpl_TDCNN.add(TimeDistributed(
        Conv2D(filters=16, kernel_size=(3, 3),
               )))
    mmw_rdpl_TDCNN.add(TimeDistributed(tf.keras.layers.LeakyReLU(alpha=0.1)))
    mmw_rdpl_TDCNN.add(TimeDistributed(BatchNormalization()))
    mmw_rdpl_TDCNN.add(TimeDistributed(MaxPooling2D(pool_size=2)))
    mmw_rdpl_TDCNN.add(TimeDistributed(Flatten()))  # this should be where layers meets

    # creates the Time Distributed CNN for range Azimuth heatmap ###########################
    mmw_razi_input = (int(points_per_sample),) + ra_shape + (1,) if channel_mode == 'channels_last' else (points_per_sample, 1) + ra_shape
    mmw_razi_TDCNN = Sequential()
    mmw_razi_TDCNN.add(
        TimeDistributed(
            Conv2D(filters=8, kernel_size=(3, 3),
                   kernel_initializer='random_uniform'),
            input_shape=mmw_razi_input))
    mmw_razi_TDCNN.add(TimeDistributed(tf.keras.layers.LeakyReLU(alpha=0.1)))
    mmw_razi_TDCNN.add(TimeDistributed(BatchNormalization()))
    mmw_razi_TDCNN.add(TimeDistributed(
        Conv2D(filters=16, kernel_size=(3, 3), data_format=channel_mode,
               )))
    mmw_razi_TDCNN.add(TimeDistributed(tf.keras.layers.LeakyReLU(alpha=0.1)))
    mmw_razi_TDCNN.add(TimeDistributed(BatchNormalization()))
    mmw_razi_TDCNN.add(TimeDistributed(MaxPooling2D(pool_size=2)))
    mmw_razi_TDCNN.add(TimeDistributed(Flatten()))  # this should be where layers meets

    merged = concatenate([mmw_rdpl_TDCNN.output, mmw_razi_TDCNN.output])  # concatenate two feature extractors
    regressive_tensor = LSTM(units=32, return_sequences=True, kernel_initializer='random_uniform',
                             )(merged)
    regressive_tensor = Dropout(rate=0.5)(regressive_tensor)
    regressive_tensor = LSTM(units=32, return_sequences=False, kernel_initializer='random_uniform',
                             )(regressive_tensor)
    regressive_tensor = Dropout(rate=0.5)(regressive_tensor)

    regressive_tensor = Dense(units=256,
                              )(regressive_tensor)
    regressive_tensor = Dropout(rate=0.5)(regressive_tensor)
    regressive_tensor = Dense(len(classes), activation='softmax', kernel_initializer='random_uniform')(regressive_tensor)

    model = Model(inputs=[mmw_rdpl_TDCNN.input, mmw_razi_TDCNN.input], outputs=regressive_tensor)
    adam = tf.keras.optimizers.Adam(lr=5e-5, decay=1e-7)
    model.compile(optimizer=adam, loss='categorical_crossentropy', metrics=['accuracy'])
    return model
Esempio n. 29
0
 def test_add_entropy_loss_on_functional_model(self):
     inputs = Input(shape=(1, ))
     targets = Input(shape=(1, ))
     outputs = testing_utils.Bias()(inputs)
     model = Model([inputs, targets], outputs)
     model.add_loss(losses.binary_crossentropy(targets, outputs))
     model.compile('sgd', run_eagerly=testing_utils.should_run_eagerly())
     with test.mock.patch.object(logging, 'warning') as mock_log:
         model.fit([self.x, self.y], batch_size=3, epochs=5)
         self.assertNotIn('Gradients do not exist for variables',
                          str(mock_log.call_args))
Esempio n. 30
0
def FCN1(input_shape, input_name, optimizer, loss, metrics, regularizer):
      
    x = Input(shape=INPUT_SHAPE, name=INPUT_NAME)
    a = Dense(1024, activation='relu', kernel_regularizer=regularizer)(x)
    y = Dense(NUM_CLASSES, activation='softmax', kernel_regularizer=regularizer, name='Softmax')(a)
     
    model = Model(x, y, name='FCN1')
    model.compile(loss=loss,
                  optimizer=optimizer,
                  metrics=metrics) 
    return model