def define_model(self):
        input_img = Input(shape=(
            self.model_parameters.img_height,
            self.model_parameters.img_width,
            self.model_parameters.num_channels
        ))

        x = layers.Conv2D(filters=64, kernel_size=(3, 3), strides=(2, 2), padding='same')(input_img)
        x = layers.LeakyReLU()(x)

        x = layers.Conv2D(filters=128, kernel_size=(3, 3), strides=(2, 2), padding='same')(x)
        x = tfa.layers.InstanceNormalization(axis=-1)(x)
        x = layers.LeakyReLU()(x)

        x = layers.Conv2D(filters=256, kernel_size=(3, 3), strides=(2, 2), padding='same', )(x)
        x = tfa.layers.InstanceNormalization(axis=-1)(x)
        x = layers.LeakyReLU()(x)

        x = layers.Conv2D(
            filters=512,
            kernel_size=(3, 3),
            strides=(2, 2),
            padding='same',
        )(x)
        x = tfa.layers.InstanceNormalization(axis=-1)(x)
        x = layers.LeakyReLU()(x)

        x = layers.Conv2D(filters=512, kernel_size=(3, 3), strides=(2, 2), padding='same')(x)
        x = tfa.layers.InstanceNormalization(axis=-1)(x)
        x = layers.LeakyReLU()(x)

        x = layers.Flatten()(x)
        x = layers.Dense(units=1)(x)

        model = Model(name=self.model_name, inputs=input_img, outputs=x)

        return model
    def create_model(self):
        z = Input(shape=[self.hidden_size])

        x = layers.Dense(units=8 * 8 * 256, use_bias=False)(z)
        x = layers.BatchNormalization()(x)
        x = layers.LeakyReLU()(x)

        x = layers.Reshape((8, 8, 256))(x)
        x = layers.Conv2DTranspose(128, (5, 5),
                                   strides=(1, 1),
                                   padding='same',
                                   use_bias=False)(x)
        x = layers.BatchNormalization()(x)
        x = layers.LeakyReLU()(x)

        x = layers.Conv2DTranspose(128, (5, 5),
                                   strides=(2, 2),
                                   padding='same',
                                   use_bias=False)(x)
        x = layers.BatchNormalization()(x)
        x = layers.LeakyReLU()(x)

        x = layers.Conv2DTranspose(64, (5, 5),
                                   strides=(2, 2),
                                   padding='same',
                                   use_bias=False)(x)
        x = layers.BatchNormalization()(x)
        x = layers.LeakyReLU()(x)

        x = layers.Conv2DTranspose(3, (5, 5),
                                   strides=(2, 2),
                                   padding='same',
                                   use_bias=False,
                                   activation='tanh')(x)

        model = Model(name='Generator', inputs=z, outputs=x)
        return model
Beispiel #3
0
def make_generator_model():
    model = tf.keras.Sequential()
    model.add(layers.Dense(7 * 7 * 256, use_bias=False, input_shape=(100, )))
    model.add(layers.BatchNormalization())
    model.add(layers.LeakyReLU())

    model.add(layers.Reshape((7, 7, 256)))
    assert model.output_shape == (None, 7, 7, 256
                                  )  # Note: None is the batch size

    model.add(
        layers.Conv2DTranspose(128, (5, 5),
                               strides=(1, 1),
                               padding='same',
                               use_bias=False))
    assert model.output_shape == (None, 7, 7, 128)
    model.add(layers.BatchNormalization())
    model.add(layers.LeakyReLU())

    model.add(
        layers.Conv2DTranspose(64, (5, 5),
                               strides=(2, 2),
                               padding='same',
                               use_bias=False))
    assert model.output_shape == (None, 14, 14, 64)
    model.add(layers.BatchNormalization())
    model.add(layers.LeakyReLU())

    model.add(
        layers.Conv2DTranspose(1, (5, 5),
                               strides=(2, 2),
                               padding='same',
                               use_bias=False,
                               activation='tanh'))
    assert model.output_shape == (None, 28, 28, 1)

    return model
Beispiel #4
0
def create_disc(Xt, Ct,
                img_shape=(32, 32, 3),
                filter_size=5,
                strides=[2, 2, 2, 2],
                filters=[64, 128, 256, 512]):

    with tf.name_scope("Disc"):
        X = kl.Input(img_shape, tensor=Xt, name="X")
        C = kl.Input(img_shape, tensor=Ct, name="C")

        layer = kl.concatenate([X, C], axis=1)
        layer = kl.GaussianNoise(stddev=0.1)(layer)
        # Discriminator

        layer = kl.Conv2D(
            filters=filters[0],
            kernel_size=filter_size,
            padding="same",
            strides=2)(layer)
        layer = kl.LeakyReLU()(layer)

        for l in range(1, len(filters)):
            conv = kl.Conv2D(
                filters=filters[l],
                kernel_size=filter_size,
                padding="same",
                strides=strides[l])(layer)
            layer = kl.LeakyReLU()(conv)
            layer = kl.Dropout(0.2)(layer)
            layer = kl.BatchNormalization()(layer)

        layer = kl.Flatten()(layer)
        D_out = kl.Dense(1, activation="sigmoid")(layer)

        model = k.Model(inputs=[X, C], outputs=D_out)
    return model
def make_discriminator_model():
    """ Discriminator network structure.

  Returns:
    Sequential model.

  """
    model = tf.keras.Sequential()
    model.add(
        layers.Conv2D(64, (5, 5),
                      strides=(2, 2),
                      padding='same',
                      input_shape=[32, 32, 3]))
    model.add(layers.LeakyReLU())
    model.add(layers.Dropout(rate=0.3))

    model.add(layers.Conv2D(128, (5, 5), strides=(2, 2), padding='same'))
    model.add(layers.LeakyReLU())
    model.add(layers.Dropout(rate=0.3))

    model.add(layers.Flatten())
    model.add(layers.Dense(1))

    return model
Beispiel #6
0
def make_discriminator_model():
    """ implements discriminate.

  Returns:
    model.

  """
    model = tf.keras.Sequential()
    model.add(
        layers.Conv2D(64, (5, 5),
                      strides=(2, 2),
                      padding='same',
                      input_shape=[28, 28, 1]))
    model.add(layers.LeakyReLU())
    model.add(layers.Dropout(rate=0.3))

    model.add(layers.Conv2D(128, (5, 5), strides=(2, 2), padding='same'))
    model.add(layers.LeakyReLU())
    model.add(layers.Dropout(rate=0.3))

    model.add(layers.Flatten())
    model.add(layers.Dense(1))

    return model
Beispiel #7
0
def make_discriminator_model(num_classes, color_ch=3):
    cnn = tf.keras.Sequential()
    cnn.add(
        layers.Conv2D(128,
                      4,
                      padding='same',
                      activity_regularizer=l2(1e-4),
                      input_shape=(32, 32, color_ch)))
    cnn.add(layers.BatchNormalization())
    cnn.add(layers.LeakyReLU())
    cnn.add(layers.Dropout(0.15))

    cnn.add(
        layers.Conv2D(256,
                      5,
                      strides=2,
                      padding='same',
                      activity_regularizer=l2(1e-4)))
    cnn.add(layers.BatchNormalization())
    cnn.add(layers.LeakyReLU())
    cnn.add(layers.Dropout(0.15))

    cnn.add(
        layers.Conv2D(128, 4, padding='same', activity_regularizer=l2(1e-4)))
    cnn.add(layers.BatchNormalization())
    cnn.add(layers.LeakyReLU())
    cnn.add(layers.Dropout(0.15))

    cnn.add(
        layers.Conv2D(256,
                      5,
                      strides=2,
                      padding='same',
                      activity_regularizer=l2(1e-4)))
    cnn.add(layers.BatchNormalization())
    cnn.add(layers.LeakyReLU())
    cnn.add(layers.Dropout(0.15))

    cnn.add(layers.Flatten())
    cnn.add(layers.Dense(256, activity_regularizer=l2(1e-4)))
    cnn.add(layers.BatchNormalization())
    cnn.add(layers.LeakyReLU())
    cnn.add(layers.Dropout(0.4))

    cnn.add(layers.Dense(128, activity_regularizer=l2(1e-4)))
    cnn.add(layers.BatchNormalization())
    cnn.add(layers.LeakyReLU())
    cnn.add(layers.Dropout(0.3))

    cnn.add(layers.Dense(num_classes, activation='softmax', name='auxiliary'))

    cnn.compile(optimizer=Adam(decay=1e-5),
                loss=cross_entropy,
                metrics=[sparse_categorical_accuracy])

    return cnn
Beispiel #8
0
        def encoder_block(a, n_filters):
            a = layers.Conv2D(filters=n_filters,
                              kernel_size=(4, 4),
                              padding='same',
                              kernel_regularizer=regularizers.l1_l2(
                                  l1=Config.l1_kernel_regularization,
                                  l2=Config.l2_kernel_regularization))(a)
            a = layers.BatchNormalization()(a)
            a = layers.LeakyReLU()(a)
            a = layers.MaxPool2D(pool_size=(2, 2))(a)

            if Config.use_spatial_dropout:
                a = layers.SpatialDropout2D(
                    rate=Config.spatial_dropout_rate)(a)
            return a
Beispiel #9
0
def CNN5(input_tensor, kr=0.01):
    """
    CNN-5
    :param input_tensor:
    :param kr: kernel regularizer rate
    :return:
    """
    filters = [32, 64, 128, 128, 256]
    x = input_tensor
    for i in range(5):
        x = layers.Conv2D(filters[i], (3, 3), padding='same', kernel_regularizer=l2(kr))(x)
        x = layers.BatchNormalization()(x)
        x = layers.LeakyReLU(alpha=0.2)(x)
        if i == 0:
            x = layers.MaxPooling2D(pool_size=(2, 2), strides=(1, 1))(x)
        else:
            x = layers.MaxPooling2D(pool_size=(2, 2))(x)
    return x
Beispiel #10
0
def discriminator_and_classifier_model(c_dim):

    inputs = keras.Input(shape=(99,))
    x = layers.Dense(1024, input_shape=(64,))(inputs)
    x = layers.BatchNormalization()(x)
    x = layers.ReLU()(x)
    x = layers.Dense(512)(x)
    x = layers.BatchNormalization()(x)
    x = layers.ReLU()(x)
    x = layers.Dense(256)(x)
    x = layers.BatchNormalization()(x)
    x = layers.ReLU()(x)

    d_out = layers.Dense(1)(x)

    x = layers.Dense(256)(x)
    x = layers.LeakyReLU()(x)
    q_out = layers.Dense(c_dim)(x)

    return keras.Model(inputs=inputs, outputs=d_out), keras.Model(inputs=inputs, outputs=q_out)
Beispiel #11
0
def discriminator_2d():
    initializer = random_normal_initializer(0., 0.02)
    inputs = layers.Input(shape=[20, 49, 1])

    x = downsample(32, input_shape=[20, 49, 1], layer_type='conv')(inputs)
    x = downsample(64, input_shape=[10, 25, 32], layer_type='conv')(x)
    x = downsample(128, input_shape=[5, 13, 64], layer_type='conv')(x)
    x = layers.Conv2D(256,
                      kernel_size=3,
                      strides=1,
                      kernel_initializer=initializer,
                      use_bias=False,
                      padding='same')(x)
    x = layers.BatchNormalization()(x)
    x = layers.LeakyReLU()(x)
    x = layers.Conv2D(1,
                      kernel_size=3,
                      strides=1,
                      kernel_initializer=initializer,
                      padding='same')(x)

    return Model(inputs, x)
Beispiel #12
0
def create_classifier():

    with tf.name_scope("Disc"):
        X = kl.Input((32, 32, 3), name="X")
        layer = X

        for l in range(4):
            layer = kl.Conv2D(
                filters=32 * (2**l),
                kernel_size=3,
                padding="same",
                use_bias=False,
                activation="relu",
                kernel_regularizer=kr.l2())(layer)
            layer = kl.BatchNormalization()(layer)
            layer = kl.Conv2D(
                filters=32 * (2**l),
                kernel_size=3,
                padding="same",
                use_bias=False,
                activation="relu",
                kernel_regularizer=kr.l2())(layer)
            layer = kl.BatchNormalization()(layer)
            layer = kl.MaxPool2D()(layer)
            layer = kl.Dropout(0.5)(layer)

        layer = kl.Flatten()(layer)
        layer = kl.Dense(600, kernel_regularizer=kr.l2())(layer)
        layer = kl.LeakyReLU()(layer)
        layer = kl.Dropout(0.5)(layer)
        D_out = kl.Dense(10, activation="softmax",
                         kernel_regularizer=kr.l2())(layer)

        model = k.Model(inputs=X, outputs=D_out)
        fidmodel = k.Model(inputs=X, outputs=layer)
    return model, fidmodel
    def create_model(self):
        # input_img = Input(shape=(self.img_height, self.img_width, self.num_channels))
        input_text = Input(shape=self.max_sequence_length)
        input_image = Input(shape=(self.img_height, self.img_width,
                                   self.num_channels))

        embedded_id = layers.Embedding(self.vocab_size,
                                       self.embedding_size)(input_text)
        embedded_id = layers.Flatten()(embedded_id)
        embedded_id = layers.Dense(units=input_image.shape[1] *
                                   input_image.shape[2])(embedded_id)
        embedded_id = layers.Reshape(target_shape=(input_image.shape[1],
                                                   input_image.shape[2],
                                                   1))(embedded_id)

        x = layers.Concatenate(axis=3)([input_image, embedded_id])
        x = layers.Conv2D(
            filters=64,
            kernel_size=(4, 4),
            strides=(2, 2),
            padding='same',
        )(input_image)
        x = layers.LeakyReLU()(x)

        x = layers.Conv2D(
            filters=128,
            kernel_size=(4, 4),
            strides=(2, 2),
            padding='same',
        )(x)
        x = tfa.layers.InstanceNormalization(axis=-1)(x)
        x = layers.LeakyReLU()(x)

        x = layers.Conv2D(
            filters=256,
            kernel_size=(4, 4),
            strides=(2, 2),
            padding='same',
        )(x)
        x = tfa.layers.InstanceNormalization(axis=-1)(x)
        x = layers.LeakyReLU()(x)

        x = layers.ZeroPadding2D()(x)

        x = layers.Conv2D(
            filters=512,
            kernel_size=(4, 4),
            strides=(1, 1),
            padding='valid',
        )(x)
        x = tfa.layers.InstanceNormalization(axis=-1)(x)
        x = layers.LeakyReLU()(x)

        x = layers.ZeroPadding2D()(x)

        x = layers.Conv2D(
            filters=1,
            kernel_size=(4, 4),
            strides=(1, 1),
            padding='valid',
        )(x)

        model = Model(name='discriminator',
                      inputs=[input_text, input_image],
                      outputs=x)

        return model
Beispiel #14
0
        if not os.path.isdir(model_dir):
            continue

        model = load_model(model_dir)
    else:

        #input_tensor = Input(shape=(30,4,1))
        input_tensor = Input(shape=(input_size, 4, 1))

        layer_x = layers.Conv2D(
            16, (1, 4),
            kernel_regularizer=regularizers.l1(l=regularizer),
            kernel_initializer="zeros",
            bias_initializer="zeros")(input_tensor)
        layer_x = layers.BatchNormalization()(layer_x)
        layer_x = layers.LeakyReLU(alpha=0.01)(layer_x)

        layer_x = layers.Conv2D(
            16, (4, 1),
            padding='same',
            kernel_regularizer=regularizers.l1(l=regularizer),
            kernel_initializer="zeros",
            bias_initializer="zeros")(layer_x)
        layer_x = layers.BatchNormalization()(layer_x)
        layer_x = layers.LeakyReLU(alpha=0.01)(layer_x)

        layer_x = layers.Conv2D(
            16, (4, 1),
            padding='same',
            kernel_regularizer=regularizers.l1(l=regularizer),
            kernel_initializer="zeros",
Beispiel #15
0
    def define_model(self):
        z = Input(shape=[self.model_parameters.latent_size])
        class_id = Input(shape=[1])

        embedded_id = layers.Embedding(input_dim=10, output_dim=50)(class_id)
        embedded_id = layers.Dense(units=8 * 8)(embedded_id)
        embedded_id = layers.Reshape(target_shape=(8, 8, 1))(embedded_id)

        x = layers.Dense(units=8 * 8 * 256, use_bias=False)(z)
        x = layers.BatchNormalization(momentum=0.9)(x)
        x = layers.LeakyReLU(alpha=0.1)(x)
        x = layers.Reshape((8, 8, 256))(x)

        inputs = layers.Concatenate(axis=3)([x, embedded_id])

        x = layers.Conv2DTranspose(128,
                                   kernel_size=(4, 4),
                                   strides=(2, 2),
                                   padding='same',
                                   use_bias=False)(inputs)
        x = layers.BatchNormalization(momentum=0.9)(x)
        x = layers.LeakyReLU(alpha=0.1)(x)

        x = layers.Conv2D(128,
                          kernel_size=(5, 5),
                          strides=(1, 1),
                          padding='same',
                          use_bias=False)(x)
        x = layers.BatchNormalization(momentum=0.9)(x)
        x = layers.LeakyReLU(alpha=0.1)(x)

        x = layers.Conv2DTranspose(128,
                                   kernel_size=(4, 4),
                                   strides=(2, 2),
                                   padding='same',
                                   use_bias=False)(x)
        x = layers.BatchNormalization(momentum=0.9)(x)
        x = layers.LeakyReLU(alpha=0.1)(x)

        x = layers.Conv2D(128,
                          kernel_size=(5, 5),
                          strides=(1, 1),
                          padding='same',
                          use_bias=False)(x)
        x = layers.BatchNormalization(momentum=0.9)(x)
        x = layers.LeakyReLU(alpha=0.1)(x)

        x = layers.Conv2D(128,
                          kernel_size=(5, 5),
                          strides=(1, 1),
                          padding='same',
                          use_bias=False)(x)
        x = layers.BatchNormalization(momentum=0.9)(x)
        x = layers.LeakyReLU(alpha=0.1)(x)

        x = layers.Conv2D(3,
                          kernel_size=(5, 5),
                          strides=(1, 1),
                          padding='same',
                          use_bias=False,
                          activation='tanh')(x)

        model = Model(name=self.model_name, inputs=[z, class_id], outputs=x)
        return model
Beispiel #16
0
#temporal input branch
temporal_input_layer = Input(shape=(sequence_length, 7))
main_rnn_layer = layers.LSTM(64, return_sequences=True,
                             recurrent_dropout=0.15)(temporal_input_layer)

#demographic input branch
demographic_input_layer = Input(shape=(18))
demographic_dense = layers.Dense(16)(demographic_input_layer)
demographic_dropout = layers.Dropout(0.15)(demographic_dense)

rnn_c = layers.LSTM(32)(main_rnn_layer)
merge_c = layers.Concatenate(axis=-1)([rnn_c, demographic_dropout])
dense_c = layers.Dense(128)(merge_c)
dropout_c = layers.Dropout(0.3)(dense_c)
precases = layers.Dense(1,
                        activation=layers.LeakyReLU(alpha=0.25),
                        name="precases")(dropout_c)

rnn_f = layers.LSTM(32)(main_rnn_layer)
merge_f = layers.Concatenate(axis=-1)([rnn_f, demographic_dropout])
dense_f = layers.Dense(128)(merge_f)
dropout_f = layers.Dropout(0.3)(dense_f)
prefatalities = layers.Dense(1,
                             activation=layers.LeakyReLU(alpha=0.25),
                             name="prefatalities")(dropout_f)

model = Model([temporal_input_layer, demographic_input_layer],
              [precases, prefatalities])

model.summary()
Beispiel #17
0
    def define_model(self) -> keras.Model:
        x = Input(shape=[
            self.model_parameters.img_height, self.model_parameters.img_width,
            self.model_parameters.num_channels
        ])
        z = Input(shape=[
            self.model_parameters.img_height, self.model_parameters.img_width,
            self.model_parameters.num_channels
        ])

        xz = z
        if self.model_parameters.has_input_images:
            xz += x

        xz = layers.Conv2D(
            filters=32,
            kernel_size=(3, 3),
            padding='same',
            use_bias=False,
        )(xz)
        xz = layers.BatchNormalization()(xz)
        xz = layers.LeakyReLU(alpha=0.2)(xz)

        xz = layers.Conv2D(
            filters=32,
            kernel_size=(3, 3),
            padding='same',
            use_bias=False,
        )(xz)
        xz = layers.BatchNormalization()(xz)
        xz = layers.LeakyReLU(alpha=0.2)(xz)

        xz = layers.Conv2D(
            filters=32,
            kernel_size=(3, 3),
            padding='same',
            use_bias=False,
        )(xz)
        xz = layers.BatchNormalization()(xz)
        xz = layers.LeakyReLU(alpha=0.2)(xz)

        xz = layers.Conv2D(
            filters=32,
            kernel_size=(3, 3),
            padding='same',
            use_bias=False,
        )(xz)
        xz = layers.BatchNormalization()(xz)
        xz = layers.LeakyReLU(alpha=0.2)(xz)

        xz = layers.Conv2D(
            filters=3,
            kernel_size=(3, 3),
            padding='same',
            activation='tanh',
            use_bias=False,
        )(xz)

        if self.model_parameters.has_input_images:
            xz += x

        model = Model(name=self.model_name, inputs=[x, z], outputs=xz)
        return model
Beispiel #18
0
def main(thread_num):
    # causing allocating 2 gpu devices
    # if tf.test.is_gpu_available():

    # select gpu device 0,1
    if device_type == 'gpu':
        os.environ["CUDA_VISIBLE_DEVICES"] = thread_num

    task = pd.read_csv("task.csv")
    #print(task)

    if os.path.isfile("output.csv"):
        output_csv = pd.read_csv("output.csv")
    else:
        output_csv = task
        output_csv = output_csv.drop(columns=['data_set'])

        #output_csv['train_acc'] = 0.0
        output_csv['final_train_loss'] = 100.0
        #output_csv['valid_acc'] = 0.0
        output_csv['final_valid_loss'] = 100.0
        output_csv['best_trade_acc'] = 0.0
        output_csv['best_trade_acc_epoch'] = 0
        output_csv['best_trade_f1'] = 0.0
        output_csv['best_trade_f1_epoch'] = 0
        output_csv['best_trade_precision'] = 0.0
        output_csv['best_trade_precision_epoch'] = 0
        output_csv['best_trade_recall'] = 0.0
        output_csv['best_trade_recall_epoch'] = 0
        # output_csv['best_trade_loss'] = 100.0
        # output_csv['best_trade_loss_epoch'] = 0

        output_csv['completed'] = 0

    for index, row in task.iterrows():

        #if tf.test.is_gpu_available():
        if device_type == 'gpu':
            if index % 2 != int(thread_num):
                continue

        completed = output_csv['completed'][index]
        if completed == 1:
            continue

        data_set = int(task['data_set'][index])

        load_dir = os.path.join(os.getcwd(), 'data_set/' + str(data_set))
        if not os.path.isdir(load_dir):
            continue

        task_id = int(task['task_id'][index])
        input_size = int(task['input'][index])
        pred_k = int(task['k'][index])
        feature_num = int(task['feature_num'][index])
        label_threshold = float(task['label_threshold'][index])
        lstm_units = int(task['lstm_units'][index])
        lr = float(task['learning_rate'][index])
        epsilon = float(task['epsilon'][index])
        regularizer = float(task['regularizer'][index])

        train_x = np.load(os.path.join(load_dir, 'train_x.npy'))
        train_y = np.load(os.path.join(load_dir, 'train_y_onehot.npy'))
        valid_x = np.load(os.path.join(load_dir, 'valid_x.npy'))
        valid_y = np.load(os.path.join(load_dir, 'valid_y_onehot.npy'))
        trade_y = np.load(os.path.join(load_dir, 'trading_valid_y_onehot.npy'))

        print('Running experiment {}'.format(task_id))

        #clear previous models
        clear_session()

        model_dir = os.path.join(os.getcwd(), 'load_model')

        if os.path.isdir(model_dir):
            model_dir = os.path.join(
                model_dir,
                str(task_id) + '/model/model_epoch_500.h5')
            if not os.path.isdir(model_dir):
                continue

            model = load_model(model_dir)
        else:

            #input_tensor = Input(shape=(30,4,1))
            input_tensor = Input(shape=(input_size, 4, 1))

            layer_x = layers.Conv2D(16, (1, 4),
                                    kernel_regularizer=regularizers.l1(
                                        l=regularizer))(input_tensor)
            layer_x = layers.BatchNormalization()(layer_x)
            layer_x = layers.LeakyReLU(alpha=0.01)(layer_x)

            layer_x = layers.Conv2D(
                16, (4, 1),
                padding='same',
                kernel_regularizer=regularizers.l1(l=regularizer))(layer_x)
            layer_x = layers.BatchNormalization()(layer_x)
            layer_x = layers.LeakyReLU(alpha=0.01)(layer_x)

            layer_x = layers.Conv2D(
                16, (4, 1),
                padding='same',
                kernel_regularizer=regularizers.l1(l=regularizer))(layer_x)
            layer_x = layers.BatchNormalization()(layer_x)
            layer_x = layers.LeakyReLU(alpha=0.01)(layer_x)

            #dual input for ohlc+volume
            if feature_num == 5:
                train_x_ohlc = train_x[:, :, :4, :]
                train_x_volume = train_x[:, :, -1:, :]
                train_x = [train_x_ohlc, train_x_volume]

                valid_x_ohlc = valid_x[:, :, :4, :]
                valid_x_volume = valid_x[:, :, -1:, :]
                valid_x = [valid_x_ohlc, valid_x_volume]

                input_tensor2 = Input(shape=(input_size, 1, 1))

                layer_x2 = layers.Conv2D(16, (1, 1),
                                         kernel_regularizer=regularizers.l1(
                                             l=regularizer))(input_tensor2)
                layer_x2 = layers.BatchNormalization()(layer_x2)
                layer_x2 = layers.LeakyReLU(alpha=0.01)(layer_x2)

                layer_x2 = layers.Conv2D(16, (4, 1),
                                         padding='same',
                                         kernel_regularizer=regularizers.l1(
                                             l=regularizer))(layer_x2)
                layer_x2 = layers.BatchNormalization()(layer_x2)
                layer_x2 = layers.LeakyReLU(alpha=0.01)(layer_x2)

                layer_x2 = layers.Conv2D(16, (4, 1),
                                         padding='same',
                                         kernel_regularizer=regularizers.l1(
                                             l=regularizer))(layer_x2)
                layer_x2 = layers.BatchNormalization()(layer_x2)
                layer_x2 = layers.LeakyReLU(alpha=0.01)(layer_x2)

                layer_x = layers.concatenate([layer_x, layer_x2], axis=-1)

            # Inception Module
            tower_1 = layers.Conv2D(
                32, (1, 1),
                padding='same',
                kernel_regularizer=regularizers.l1(l=regularizer))(layer_x)
            tower_1 = layers.BatchNormalization()(tower_1)
            tower_1 = layers.LeakyReLU(alpha=0.01)(tower_1)
            tower_1 = layers.Conv2D(
                32, (3, 1),
                padding='same',
                kernel_regularizer=regularizers.l1(l=regularizer))(tower_1)
            tower_1 = layers.BatchNormalization()(tower_1)
            tower_1 = layers.LeakyReLU(alpha=0.01)(tower_1)

            tower_2 = layers.Conv2D(
                32, (1, 1),
                padding='same',
                kernel_regularizer=regularizers.l1(l=regularizer))(layer_x)
            tower_2 = layers.BatchNormalization()(tower_2)
            tower_2 = layers.LeakyReLU(alpha=0.01)(tower_2)
            tower_2 = layers.Conv2D(
                32, (5, 1),
                padding='same',
                kernel_regularizer=regularizers.l1(l=regularizer))(tower_2)
            tower_2 = layers.BatchNormalization()(tower_2)
            tower_2 = layers.LeakyReLU(alpha=0.01)(tower_2)

            tower_3 = layers.MaxPooling2D((3, 1),
                                          padding='same',
                                          strides=(1, 1))(layer_x)
            tower_3 = layers.Conv2D(
                32, (1, 1),
                padding='same',
                kernel_regularizer=regularizers.l1(l=regularizer))(tower_3)
            tower_3 = layers.BatchNormalization()(tower_3)
            tower_3 = layers.LeakyReLU(alpha=0.01)(tower_3)

            layer_x = layers.concatenate([tower_1, tower_2, tower_3], axis=-1)

            # concatenate features of tower_1, tower_2, tower_3
            layer_x = layers.Reshape((input_size, 96))(layer_x)
            #layer_x = layers.Reshape((input_size,feature_num))(input_tensor)

            # # 64 LSTM units
            #layer_x = layers.LSTM(64)(layer_x)

            # if using GPU
            if device_type == 'gpu':
                print('using GPU')
                layer_x = layers.CuDNNLSTM(
                    lstm_units,
                    kernel_regularizer=regularizers.l1(l=regularizer))(layer_x)
            # if using CPU
            elif device_type == 'cpu':
                print('using CPU')
                layer_x = layers.LSTM(
                    lstm_units,
                    kernel_regularizer=regularizers.l1(l=regularizer))(layer_x)
            else:
                sys.exit("wrong device type")

            # # The last output layer uses a softmax activation function
            output = layers.Dense(3, activation='softmax')(layer_x)

            if feature_num == 4:
                model = Model(input_tensor, output)

            elif feature_num == 5:
                model = Model([input_tensor, input_tensor2], output)

            opt = Adam(lr=lr, epsilon=epsilon)
            model.compile(loss='categorical_crossentropy',
                          optimizer=opt,
                          metrics=['accuracy'])
            #model.summary()

        save_dir = os.path.join(os.getcwd(), 'result/' + str(task_id))
        if not os.path.isdir(save_dir):
            os.makedirs(save_dir)

        final_train_loss, \
        final_valid_loss, \
        best_trade_acc, \
        best_trade_acc_epoch, \
        best_trade_f1, \
        best_trade_f1_epoch, \
        best_trade_precision, \
        best_trade_precision_epoch, \
        best_trade_recall, \
        best_trade_recall_epoch \
         = train_model(model, \
                      save_dir, \
                      task_id, \
                      train_x, \
                      train_y, \
                      valid_x, \
                      valid_y, \
                      trade_y, \
                      batch_size=512, \
                      epochs=3)

        with open(os.path.join(save_dir, 'readme.txt'), 'w') as f:
            f.write("""'task id = {}\n
                        input size = {}\n
                        prediction k = {}\n
                        feature = {}\n
                        label threshold = {}\n
                        lstm units = {}\n
                        learning rate = {}\n
                        epsilon = {}\n
                        regularizer = {}\n
                        data set = {}'""".format(task_id, \
                                                input_size, \
                                                pred_k, \
                                                feature_num, \
                                                label_threshold, \
                                                lstm_units, \
                                                lr, \
                                                epsilon, \
                                                regularizer, \
                                                data_set))

        #output_csv['train_acc'][index] = train_acc
        output_csv['final_train_loss'][index] = final_train_loss
        #output_csv['valid_acc'][index] = valid_acc
        output_csv['final_valid_loss'][index] = final_valid_loss
        output_csv['best_trade_acc'][index] = best_trade_acc
        output_csv['best_trade_acc_epoch'][index] = best_trade_acc_epoch
        output_csv['best_trade_f1'][index] = best_trade_f1
        output_csv['best_trade_f1_epoch'][index] = best_trade_f1_epoch
        output_csv['best_trade_precision'][index] = best_trade_precision
        output_csv['best_trade_precision_epoch'][
            index] = best_trade_precision_epoch
        output_csv['best_trade_recall'][index] = best_trade_recall
        output_csv['best_trade_recall_epoch'][index] = best_trade_recall_epoch
        #output_csv['best_trade_loss'] = best_trade_loss
        #output_csv['best_trade_loss_epoch'] = best_trade_loss_epoch

        output_csv['completed'][index] = 1

        output_csv.to_csv('output.csv')
    def create_model(self):
        input_images = Input(shape=[self.img_height, self.img_width, self.num_channels])

        x1 = layers.Conv2D(
            filters=64,
            kernel_size=(7, 7),
            strides=(2, 2),
            padding='same',
            use_bias=False,
        )(input_images)
        x1 = tfa.layers.InstanceNormalization()(x1)
        x1 = layers.ReLU()(x1)

        x2 = layers.Conv2D(
            filters=128,
            kernel_size=(3, 3),
            strides=(2, 2),
            padding='same',
            use_bias=False,
        )(x1)
        x2 = tfa.layers.InstanceNormalization()(x2)
        x2 = layers.ReLU()(x2)

        x3 = layers.Conv2D(
            filters=256,
            kernel_size=(3, 3),
            strides=(2, 2),
            padding='same',
            use_bias=False,
        )(x2)
        x3 = tfa.layers.InstanceNormalization()(x3)
        x3 = layers.ReLU()(x3)

        x4 = layers.Conv2D(
            filters=512,
            kernel_size=(3, 3),
            strides=(2, 2),
            padding='same',
            use_bias=False,
        )(x3)
        x4 = tfa.layers.InstanceNormalization()(x4)
        x4 = layers.ReLU()(x4)

        x5 = layers.UpSampling2D()(x4)
        x5 = layers.Concatenate()([x5, x3])

        x5 = layers.Conv2D(
            filters=256,
            kernel_size=(3, 3),
            strides=(1, 1),
            padding='same',
            use_bias=False,
        )(x5)
        x5 = tfa.layers.InstanceNormalization()(x5)
        x5 = layers.LeakyReLU(alpha=0.2)(x5)

        x6 = layers.UpSampling2D()(x5)
        x6 = layers.Concatenate()([x6, x2])

        x6 = layers.Conv2D(
            filters=128,
            kernel_size=(3, 3),
            strides=(1, 1),
            padding='same',
            use_bias=False,
        )(x6)
        x6 = tfa.layers.InstanceNormalization()(x6)
        x6 = layers.LeakyReLU(alpha=0.2)(x6)

        x7 = layers.UpSampling2D()(x6)
        x7 = layers.Concatenate()([x7, x1])
        x7 = layers.Conv2D(
            filters=64,
            kernel_size=(3, 3),
            strides=(1, 1),
            padding='same',
            use_bias=False,
        )(x7)
        x7 = tfa.layers.InstanceNormalization()(x7)
        x7 = layers.LeakyReLU(alpha=0.2)(x7)

        x8 = layers.UpSampling2D()(x7)
        x8 = layers.Concatenate()([x8, input_images])
        x8 = layers.Conv2D(
            filters=32,
            kernel_size=(3, 3),
            strides=(1, 1),
            padding='same',
            use_bias=False,
        )(x8)
        x8 = tfa.layers.InstanceNormalization()(x8)
        x8 = layers.LeakyReLU(alpha=0.2)(x8)

        x9 = layers.Conv2D(
            filters=3,
            kernel_size=(5, 5),
            strides=(1, 1),
            padding='same',
            use_bias=False,
            activation='tanh',
        )(x8)

        model = Model(name='Generator', inputs=input_images, outputs=x9)
        return model
Beispiel #20
0
    def define_model(self):
        input_images = Input(shape=[
            self.model_parameters.img_height, self.model_parameters.img_width,
            self.model_parameters.num_channels
        ])

        x1 = layers.Conv2D(
            filters=64,
            kernel_size=(7, 7),
            strides=(2, 2),
            padding='same',
            use_bias=False,
        )(input_images)
        x1 = tfa.layers.InstanceNormalization()(x1)
        x1 = layers.ReLU()(x1)

        x2 = layers.Conv2D(
            filters=128,
            kernel_size=(3, 3),
            strides=(2, 2),
            padding='same',
            use_bias=False,
        )(x1)
        x2 = tfa.layers.InstanceNormalization()(x2)
        x2 = layers.ReLU()(x2)

        x3 = layers.Conv2D(
            filters=256,
            kernel_size=(3, 3),
            strides=(2, 2),
            padding='same',
            use_bias=False,
        )(x2)
        x3 = tfa.layers.InstanceNormalization()(x3)
        x3 = layers.ReLU()(x3)

        x4 = advanced_layers.densely_connected_residual_block(x3)

        x5 = layers.Concatenate()([x4, x3])

        x5 = layers.Conv2D(
            filters=256,
            kernel_size=(3, 3),
            strides=(1, 1),
            padding='same',
            use_bias=False,
        )(x5)
        x5 = tfa.layers.InstanceNormalization()(x5)
        x5 = layers.LeakyReLU(alpha=0.2)(x5)

        x6 = layers.UpSampling2D()(x5)
        x6 = layers.Concatenate()([x6, x2])

        x6 = layers.Conv2D(
            filters=128,
            kernel_size=(3, 3),
            strides=(1, 1),
            padding='same',
            use_bias=False,
        )(x6)
        x6 = tfa.layers.InstanceNormalization()(x6)
        x6 = layers.LeakyReLU(alpha=0.2)(x6)

        x7 = layers.UpSampling2D()(x6)
        x7 = layers.Concatenate()([x7, x1])
        x7 = layers.Conv2D(
            filters=64,
            kernel_size=(3, 3),
            strides=(1, 1),
            padding='same',
            use_bias=False,
        )(x7)
        x7 = tfa.layers.InstanceNormalization()(x7)
        x7 = layers.LeakyReLU(alpha=0.2)(x7)

        x8 = layers.UpSampling2D()(x7)
        x8 = layers.Concatenate()([x8, input_images])
        x8 = layers.Conv2D(
            filters=32,
            kernel_size=(3, 3),
            strides=(1, 1),
            padding='same',
            use_bias=False,
        )(x8)
        x8 = tfa.layers.InstanceNormalization()(x8)
        x8 = layers.LeakyReLU(alpha=0.2)(x8)

        x9 = layers.Conv2D(
            filters=3,
            kernel_size=(5, 5),
            strides=(1, 1),
            padding='same',
            use_bias=False,
            activation='tanh',
        )(x8)

        model = Model(name=self.model_name, inputs=input_images, outputs=x9)
        return model
Beispiel #21
0
 def bn_act(x, act=True):
     x = layers.BatchNormalization()(x)
     if act == True:
         x = layers.LeakyReLU(alpha=0.3)(x)
     return x
Beispiel #22
0
    def create_model(self):
        # inputs = Input(shape=[self.max_sequence_length, self.embedding_size])
        z = Input(shape=[self.hidden_size])
        captions = Input(shape=self.max_sequence_length)

        embeddings = layers.Embedding(self.vocab_size,
                                      self.embedding_size)(captions)

        embeddings = attention.multihead_attention_model(embeddings)
        embeddings = layers.Flatten()(embeddings)

        embeddings = layers.Dense(units=8 * 8 * 32, use_bias=False)(embeddings)
        embeddings = layers.BatchNormalization()(embeddings)
        embeddings = layers.LeakyReLU()(embeddings)
        embeddings = layers.Reshape((8, 8, 32))(embeddings)

        x = layers.Dense(units=8 * 8 * 256, use_bias=False)(z)
        x = layers.BatchNormalization()(x)
        x = layers.ReLU()(x)
        x = layers.Reshape((8, 8, 256))(x)

        x = layers.Concatenate(axis=3)([x, embeddings])

        x = layers.UpSampling2D()(x)
        x = layers.Conv2D(
            filters=512,
            kernel_size=(3, 3),
            strides=(1, 1),
            padding='same',
            use_bias=False,
        )(x)
        x = layers.BatchNormalization()(x)
        x = layers.ReLU()(x)

        x = layers.UpSampling2D()(x)
        x = layers.Conv2D(
            filters=256,
            kernel_size=(3, 3),
            strides=(1, 1),
            padding='same',
            use_bias=False,
        )(x)
        x = layers.BatchNormalization()(x)
        x = layers.ReLU()(x)

        n_resnet = 6
        for _ in range(n_resnet):
            x = resnet_block(256, x)

        x = layers.UpSampling2D()(x)
        x = layers.Conv2D(
            filters=256,
            kernel_size=(3, 3),
            strides=(1, 1),
            padding='same',
            use_bias=False,
        )(x)
        x = layers.BatchNormalization()(x)
        x = layers.ReLU()(x)

        x = layers.UpSampling2D()(x)
        x = layers.Conv2D(
            filters=128,
            kernel_size=(3, 3),
            strides=(1, 1),
            padding='same',
            use_bias=False,
        )(x)
        x = layers.BatchNormalization()(x)
        x = layers.ReLU()(x)

        x = layers.UpSampling2D()(x)
        x = layers.Conv2D(
            filters=64,
            kernel_size=(3, 3),
            strides=(1, 1),
            padding='same',
            use_bias=False,
        )(x)
        x = layers.BatchNormalization()(x)
        x = layers.ReLU()(x)

        x = layers.Conv2D(
            filters=3,
            kernel_size=(3, 3),
            strides=(1, 1),
            padding='same',
            use_bias=False,
            activation='tanh',
        )(x)
        # model = Model(name='Generator', inputs=z, outputs=x)
        model = Model(name='Generator', inputs=[z, captions], outputs=x)
        return model
Beispiel #23
0
def general_input(self):
    role1_actions = Input(shape=(self.input_steps, ), name='role1_actions')
    role2_actions = Input(shape=(self.input_steps, ), name='role2_actions')
    # 鉴于embedding就是onehot+全连接,这里加大embedding的size
    role1_actions_embedding = layers.Embedding(
        512, 32, name='role1_actions_embedding')(role1_actions)
    role2_actions_embedding = layers.Embedding(
        512, 32, name='role2_actions_embedding')(role2_actions)

    role1_energy = Input(shape=(self.input_steps, ), name='role1_energy')
    role1_energy_embedding = layers.Embedding(
        5, 4, name='role1_energy_embedding')(role1_energy)
    role2_energy = Input(shape=(self.input_steps, ), name='role2_energy')
    role2_energy_embedding = layers.Embedding(
        5, 4, name='role2_energy_embedding')(role2_energy)

    role1_baoqi = Input(shape=(self.input_steps, ), name='role1_baoqi')
    role1_baoqi_embedding = layers.Embedding(
        2, 8, name='role1_baoqi_embedding')(role1_baoqi)
    role2_baoqi = Input(shape=(self.input_steps, ), name='role2_baoqi')
    role2_baoqi_embedding = layers.Embedding(
        2, 8, name='role2_baoqi_embedding')(role2_baoqi)

    role_position = Input(shape=(self.input_steps, 4), name='role_x_y')

    # 感觉这种环境每次都不同,小批量数据bn可能不太稳定,需要测试
    # 步长1 距离,步长2速度
    role_position_1 = layers.Conv1D(filters=32,
                                    kernel_size=1,
                                    strides=1,
                                    padding='same')(role_position)
    role_position_2 = layers.Conv1D(filters=32,
                                    kernel_size=2,
                                    strides=1,
                                    padding='same')(role_position)
    # role_position_1 = BatchNormalization(name='bn_1')(role_position_1)
    # role_position_2 = BatchNormalization(name='bn_2')(role_position_2)
    role_position_1 = layers.LeakyReLU(0.05)(role_position_1)
    role_position_2 = layers.LeakyReLU(0.05)(role_position_2)
    actions_input = Input(shape=(self.action_steps, ), name='last_action')
    actions_embedding = layers.Embedding(
        self.action_num, 64, name='last_action_embedding')(actions_input)

    model_input = [
        role1_actions, role2_actions, role1_energy, role2_energy,
        role_position, role1_baoqi, role2_baoqi, actions_input
    ]

    encoder_input = [
        role1_actions_embedding,
        role2_actions_embedding,
        # normal_role_position, normal_role_distance, normal_role_abs_distance,
        role_position,
        role_position_1,
        role_position_2,
        role1_energy_embedding,
        role2_energy_embedding,
        role1_baoqi_embedding,
        role2_baoqi_embedding,
    ]
    decoder_output = actions_embedding

    return model_input, encoder_input, decoder_output
Beispiel #24
0
def _depthwise_conv_block(inputs, pointwise_conv_filters, alpha,
                          depth_multiplier=1, strides=(1, 1), block_id=1):
    """Adds a depthwise convolution block.

    A depthwise convolution block consists of a depthwise conv,
    batch normalization, relu6, pointwise convolution,
    batch normalization and relu6 activation.

    # Arguments
        inputs: Input tensor of shape `(rows, cols, channels)`
            (with `channels_last` data format) or
            (channels, rows, cols) (with `channels_first` data format).
        pointwise_conv_filters: Integer, the dimensionality of the output space
            (i.e. the number of output filters in the pointwise convolution).
        alpha: controls the width of the network.
            - If `alpha` < 1.0, proportionally decreases the number
                of filters in each layer.
            - If `alpha` > 1.0, proportionally increases the number
                of filters in each layer.
            - If `alpha` = 1, default number of filters from the paper
                 are used at each layer.
        depth_multiplier: The number of depthwise convolution output channels
            for each input channel.
            The total number of depthwise convolution output
            channels will be equal to `filters_in * depth_multiplier`.
        strides: An integer or tuple/list of 2 integers,
            specifying the strides of the convolution
            along the width and height.
            Can be a single integer to specify the same value for
            all spatial dimensions.
            Specifying any stride value != 1 is incompatible with specifying
            any `dilation_rate` value != 1.
        block_id: Integer, a unique identification designating
            the block number.

    # Input shape
        4D tensor with shape:
        `(batch, channels, rows, cols)` if data_format='channels_first'
        or 4D tensor with shape:
        `(batch, rows, cols, channels)` if data_format='channels_last'.

    # Output shape
        4D tensor with shape:
        `(batch, filters, new_rows, new_cols)`
        if data_format='channels_first'
        or 4D tensor with shape:
        `(batch, new_rows, new_cols, filters)`
        if data_format='channels_last'.
        `rows` and `cols` values might have changed due to stride.

    # Returns
        Output tensor of block.
    """
    # channel_axis = 1 if backend.image_data_format() == 'channels_first' else -1
    pointwise_conv_filters = int(pointwise_conv_filters * alpha)

    if strides == (1, 1):
        x = inputs
    else:
        x = layers.ZeroPadding2D(((1, 1), (1, 1)), name='conv_pad_%d' % block_id)(inputs)

    x = layers.DepthwiseConv2D((3, 3),
                               padding='same' if strides == (1, 1) else 'valid',
                               depth_multiplier=depth_multiplier,
                               strides=strides,
                               use_bias=False,
                               name='conv_dw_%d' % block_id)(x)

    x = layers.BatchNormalization(name='conv_dw_%d_bn' % block_id)(x)
    x = layers.ReLU(name='conv_dw_%d_relu' % block_id)(x)

    x = layers.Conv2D(pointwise_conv_filters, (1, 1),
                      padding='same',
                      use_bias=False,
                      strides=(1, 1),
                      name='conv_pw_%d' % block_id)(x)
    x = layers.BatchNormalization(name='conv_pw_%d_bn' % block_id)(x)
    return layers.LeakyReLU(name='conv_pw_%d_relu' % block_id)(x)
Beispiel #25
0
 def __init__(self, alpha=0.2):
     super(LeakyRelu, self).__init__()
     self.leaky_relu = layers.LeakyReLU(alpha=alpha)
Beispiel #26
0
def MobilenetConv2D(kernel, alpha, filters):
    last_block_filters = _make_divisible(filters * alpha, 8)
    return compose(
        kl.Conv2D(last_block_filters, kernel, padding='same', use_bias=False),
        kl.BatchNormalization(), kl.LeakyReLU())
Beispiel #27
0
def _conv_block(inputs, filters, alpha, kernel=(3, 3), strides=(1, 1)):
    """Adds an initial convolution layer (with batch normalization and relu6).

    # Arguments
        inputs: Input tensor of shape `(rows, cols, 3)`
            (with `channels_last` data format) or
            (3, rows, cols) (with `channels_first` data format).
            It should have exactly 3 inputs channels,
            and width and height should be no smaller than 32.
            E.g. `(224, 224, 3)` would be one valid value.
        filters: Integer, the dimensionality of the output space
            (i.e. the number of output filters in the convolution).
        alpha: controls the width of the network.
            - If `alpha` < 1.0, proportionally decreases the number
                of filters in each layer.
            - If `alpha` > 1.0, proportionally increases the number
                of filters in each layer.
            - If `alpha` = 1, default number of filters from the paper
                 are used at each layer.
        kernel: An integer or tuple/list of 2 integers, specifying the
            width and height of the 2D convolution window.
            Can be a single integer to specify the same value for
            all spatial dimensions.
        strides: An integer or tuple/list of 2 integers,
            specifying the strides of the convolution
            along the width and height.
            Can be a single integer to specify the same value for
            all spatial dimensions.
            Specifying any stride value != 1 is incompatible with specifying
            any `dilation_rate` value != 1.

    # Input shape
        4D tensor with shape:
        `(samples, channels, rows, cols)` if data_format='channels_first'
        or 4D tensor with shape:
        `(samples, rows, cols, channels)` if data_format='channels_last'.

    # Output shape
        4D tensor with shape:
        `(samples, filters, new_rows, new_cols)`
        if data_format='channels_first'
        or 4D tensor with shape:
        `(samples, new_rows, new_cols, filters)`
        if data_format='channels_last'.
        `rows` and `cols` values might have changed due to stride.

    # Returns
        Output tensor of block.
    """
    # channel_axis = 1 if backend.image_data_format() == 'channels_first' else -1
    filters = int(filters * alpha)
    if tuple(strides) == (2, 2):
        x = layers.ZeroPadding2D(padding=((1, 1), (1, 1)), name='conv1_pad')(inputs)
        x = layers.Conv2D(filters, kernel,
                          padding='valid',
                          use_bias=False,
                          strides=strides,
                          name='conv1')(x)
    else:
        x = layers.Conv2D(filters, kernel,
                          padding='same',
                          use_bias=False,
                          strides=strides,
                          name='conv1')(inputs)
    x = layers.BatchNormalization(name='conv1_bn')(x)
    return layers.LeakyReLU(name='conv1_relu')(x)
Beispiel #28
0
def conv_block(input_tensor, num_filters):
    encoder = layers.Conv2D(num_filters, (3, 3), padding='same')(input_tensor)
    encoder = layers.BatchNormalization()(encoder)
    encoder = layers.LeakyReLU(alpha=0.3)(encoder)
    return encoder
Beispiel #29
0
dataset = mnist.MnistDataset(model_parameters)


def validation_dataset():
    return tf.random.normal(
        [model_parameters.batch_size, model_parameters.latent_size])


validation_dataset = validation_dataset()

generator = sequential.SequentialModel(layers=[
    keras.Input(shape=[model_parameters.latent_size]),
    layers.Dense(units=7 * 7 * 256, use_bias=False),
    layers.BatchNormalization(),
    layers.LeakyReLU(),
    layers.Reshape((7, 7, 256)),
    layers.Conv2DTranspose(
        128, (5, 5), strides=(1, 1), padding='same', use_bias=False),
    layers.BatchNormalization(),
    layers.LeakyReLU(),
    layers.Conv2DTranspose(
        64, (5, 5), strides=(2, 2), padding='same', use_bias=False),
    layers.BatchNormalization(),
    layers.LeakyReLU(),
    layers.Conv2DTranspose(1, (5, 5),
                           strides=(2, 2),
                           padding='same',
                           use_bias=False,
                           activation='tanh')
])
Beispiel #30
0
def DarknetConv2D_BN_Leaky(*args, **kwargs):
    """Darknet Convolution2D followed by BatchNormalization and LeakyReLU."""
    no_bias_kwargs = {'use_bias': False}
    no_bias_kwargs.update(kwargs)
    return compose(DarknetConv2D(*args, **no_bias_kwargs),
                   kl.BatchNormalization(), kl.LeakyReLU(alpha=0.1))