def build_discriminator(self):

        img = Input(shape=self.img_shape)
        '''    
        l1 = Input(shape=(64,))
        
        #label1 = Embedding(10, 10 )(l1)
        #label2 = Embedding(10, 10  )(l2)
        n_nodes = 128* 128
        label1 = Dense(n_nodes)(l1)
        label1 = Reshape((128, 128, 1))(label1)
        
        l2 = Input(shape=(64,))
        label2 = Dense(n_nodes)(l2)
        label2 = Reshape((128, 128, 1))(label2)

        l3 = Input(shape=(64,))
        #label3 = Embedding(10, 10 )(l3)
        label3 = Dense(n_nodes)(l3)
        label3 = Reshape((128, 128, 1))(label3)
        
        merge = Concatenate()([img, label1,label2,label3])
        '''
        l1 = Input(shape=(64,))
        l2 = Input(shape=(64,))
        l3 = Input(shape=(64,))
        label =Concatenate()([ l1,l2,l3])
        n_nodes = 128* 128
        label = Dense(n_nodes)(label)
        label = Reshape((128, 128, 1))(label)
        merge = Concatenate()([img, label])
        
        dis = Conv2D(16, kernel_size=3, strides=2, padding="same")(merge)
        dis = LeakyReLU(alpha=0.2)(dis)
        #dis = Dropout(0.25)(dis)

        dis = Conv2D(32, kernel_size=3, strides=2, padding="same")(dis)
        dis = LeakyReLU(alpha=0.2)(dis)
        #dis = Dropout(0.25)(dis)
        #dis = BatchNormalization(momentum=0.8)(dis)

        dis = Conv2D(64, kernel_size=3, strides=2, padding="same")(dis)
        dis = LeakyReLU(alpha=0.2)(dis)
        #dis = Dropout(0.25)(dis)
        #dis = BatchNormalization(momentum=0.8)(dis)

        dis = Conv2D(128, kernel_size=3, strides=2, padding="same")(dis)
        dis = LeakyReLU(alpha=0.2)(dis)
        #dis = Dropout(0.25)(dis)

        dis = Flatten()(dis)

        # Extract feature representation
        features = dis

        # Determine validity and label of the image
        validity = Dense(1, activation="sigmoid")(features)
        model = Model([img, l1,l2,l3], [validity])

        model.compile(loss=self.loss,
            optimizer=self.optimizer,
            metrics=['accuracy'])

        return model
# pd.DataFrame(x_test).to_csv("C:\\Users\\nurizdau\\Desktop\\transformed_set.csv")

# splitting the dataset into train and test -- train_test_split is imported for this purpose
# x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2)

# print("train inputs are here: ", X_train)
# print("test inputs are here: ", X_test)
# print("train values are here: ", y_train)
# print("test values are here: ", y_test)

# Building the neural network
#---------------------------------------------------------------------------------------------
# Sub-network 1 - Layer1 (for all currents)
left_branch_input = Input(shape=(11,), name='Left_input')
left_branch_output_1 = Dense(100)(left_branch_input)
left_branch_output_2 = LeakyReLU(alpha=0.7)(left_branch_output_1)
left_branch_output_3 = Dropout(0.4)(left_branch_output_2)

# Sub-network 1 - Layer2
left_branch_output_4 = Dense(100)(left_branch_output_3)
left_branch_output_5 = LeakyReLU(alpha=0.7)(left_branch_output_4)
left_branch_output_6 = Dropout(0.4)(left_branch_output_5)

# Sub-network 1 - Layer3
left_branch_output_7 = Dense(100)(left_branch_output_6)
left_branch_output_8 = LeakyReLU(alpha=0.7)(left_branch_output_7)
left_branch_output_9 = Dropout(0.4)(left_branch_output_8)

# Sub-network 1 - Layer4
left_branch_output_10 = Dense(100)(left_branch_output_9)
left_branch_output_11 = LeakyReLU(alpha=0.7)(left_branch_output_10)
def cnn_model(embedding_matrix_init, ntargets, seq_max, nb_meta, loss):
    """Pre-defined architecture of a CNN model.

    Parameters
    ----------
    embedding_matrix_init : np.array,
        Pretrained embedding matrix.

    ntargets : int, optional
        Dimension of model output.
        Default value, 18.

    seq_max : int, optional
        Maximum input length.
        Default value, 100.

    nb_meta : int, optional
        Dimension of meta data input.
        Default value, 252.

    loss : str, optional
        Loss function for training.
        Default value, 'categorical_crossentropy'.

    Returns
    -------
    Model instance
    """

    text_input = Input(shape=(seq_max, ), dtype='int32')

    x = keras.layers.Embedding(input_dim=embedding_matrix_init.shape[0],
                               output_dim=embedding_matrix_init.shape[1],
                               input_length=seq_max,
                               weights=[embedding_matrix_init],
                               trainable=True)(text_input)
    x = Conv1D(200, 2, padding='same', activation='linear', strides=1)(x)
    x = SpatialDropout1D(0.15)(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.05)(x)
    x = Conv1D(250, 2, padding='same', activation='linear', strides=1)(x)
    x = SpatialDropout1D(0.15)(x)
    x = LeakyReLU(alpha=0.05)(x)
    x = Dropout(0.15)(x)
    x = GlobalMaxPooling1D()(x)
    x = Dense(250, activation="linear")(x)
    x = LeakyReLU(alpha=0.05)(x)
    x = Dense(150, activation="linear")(x)
    x = Dropout(0.15)(x)
    x = LeakyReLU(alpha=0.05)(x)

    if nb_meta == 0:
        inputs = text_input
        concatenate_2 = x
    else:
        Meta_input = Input(shape=(nb_meta, ), dtype='float32')
        inputs = [text_input, Meta_input]

        concatenate_1 = Meta_input
        y = Dense(150, activation="linear")(concatenate_1)
        y = Dropout(0.2)(y)
        y = LeakyReLU(alpha=0.05)(y)
        y = Dense(100, activation="linear")(y)
        y = Dropout(0.2)(y)
        y = LeakyReLU(alpha=0.05)(y)
        y = Dense(80, activation="linear")(y)
        y = Dropout(0.2)(y)
        y = LeakyReLU(alpha=0.05)(y)
        concatenate_2 = concatenate([x, y])

    z = Dense(200, activation="linear")(concatenate_2)
    z = Dropout(0.2)(z)
    z = LeakyReLU(alpha=0.05)(z)
    z = Dense(100, activation="linear")(z)
    z = Dropout(0.2)(z)
    z = LeakyReLU(alpha=0.05)(z)
    outputs = Dense(ntargets, activation='softmax')(z)

    model = Model(inputs=inputs, outputs=outputs)

    model.compile(optimizer=Adam(), loss=loss, metrics=['accuracy'])

    return model
示例#4
0
                     "w+")  # Log episode result

########################################################

# Import Pretrained model into new Environment
AE_trial_no = '4h1'
AE = keras.models.load_model('Log_files/' + AE_trial_no + '/AE')

# This model maps an input & action to its next state
# Input state
curr_state = keras.Input(shape=(state_dim, ), name="curr_state")
curr_action = keras.Input(shape=(action_dim, ), name="curr_action")
# FDM model
curr_state_action = concatenate([curr_state, curr_action])
fdm_h1 = Dense(16, name="dense1_FDM")(curr_state_action)
fdm_h1 = LeakyReLU(alpha=0.2, name="LeakyRelu1_FDM")(fdm_h1)
fdm_h2 = Dense(16, name="dense2_FDM")(fdm_h1)
fdm_h2 = LeakyReLU(alpha=0.2, name="LeakyRelu2_FDM")(fdm_h2)
fdm_pred_state = layers.Dense(state_dim, name="dense3_FDM")(fdm_h2)
FDM = keras.Model(inputs=[curr_state, curr_action],
                  outputs=fdm_pred_state,
                  name="FDM")

#print(FDM.summary())
#tf.keras.utils.plot_model(FDM, to_file='FDM_model_plot.png', show_shapes=True, show_layer_names=True)

opt_FDM = tf.keras.optimizers.RMSprop(learning_rate=0.00015)
FDM.compile(loss='mean_squared_error', optimizer=opt_FDM, metrics=['mse'])

p_state = np.zeros((1, state_dim))
# Action for FDM to sample from
示例#5
0
def discriminator(architecture_size='small',
                  phaseshuffle_samples=0,
                  n_classes=5):

    discriminator_filters = [64, 128, 256, 512, 1024, 2048]

    if architecture_size == 'large':
        audio_input_dim = 65536
    elif architecture_size == 'medium':
        audio_input_dim = 32768
    elif architecture_size == 'small':
        audio_input_dim = 16384

    label_input = Input(shape=(1, ),
                        dtype='int32',
                        name='discriminator_label_input')
    label_em = Embedding(n_classes, n_classes * 20)(label_input)
    label_em = Dense(audio_input_dim)(label_em)
    label_em = Reshape((audio_input_dim, 1))(label_em)

    discriminator_input = Input(shape=(audio_input_dim, 1),
                                name='discriminator_input')
    x = Concatenate()([discriminator_input, label_em])

    if architecture_size == 'small':
        # layers 0 to 3
        for i in range(4):
            x = Conv1D(filters=discriminator_filters[i],
                       kernel_size=25,
                       strides=4,
                       padding='same',
                       name=f'discriminator_conv_{i}')(x)

            x = LeakyReLU(alpha=0.2)(x)
            if phaseshuffle_samples > 0:
                x = Lambda(apply_phaseshuffle)([x, phaseshuffle_samples])

        #layer 4, no phase shuffle
        x = Conv1D(filters=discriminator_filters[4],
                   kernel_size=25,
                   strides=4,
                   padding='same',
                   name=f'discriminator_conv_4')(x)

        x = Flatten()(x)

    if architecture_size == 'medium':

        # layers
        for i in range(4):
            x = Conv1D(filters=discriminator_filters[i],
                       kernel_size=25,
                       strides=4,
                       padding='same',
                       name=f'discriminator_conv_{i}')(x)

            x = LeakyReLU(alpha=0.2)(x)
            if phaseshuffle_samples > 0:
                x = Lambda(apply_phaseshuffle)([x, phaseshuffle_samples])

        x = Conv1D(filters=discriminator_filters[4],
                   kernel_size=25,
                   strides=4,
                   padding='same',
                   name='discriminator_conv_4')(x)

        x = LeakyReLU(alpha=0.2)(x)

        x = Conv1D(filters=discriminator_filters[5],
                   kernel_size=25,
                   strides=2,
                   padding='same',
                   name='discriminator_conv_5')(x)

        x = LeakyReLU(alpha=0.2)(x)
        x = Flatten()(x)

    if architecture_size == 'large':

        # layers
        for i in range(4):
            x = Conv1D(filters=discriminator_filters[i],
                       kernel_size=25,
                       strides=4,
                       padding='same',
                       name=f'discriminator_conv_{i}')(x)
            x = LeakyReLU(alpha=0.2)(x)
            if phaseshuffle_samples > 0:
                x = Lambda(apply_phaseshuffle)([x, phaseshuffle_samples])

        #last 2 layers without phase shuffle
        x = Conv1D(filters=discriminator_filters[4],
                   kernel_size=25,
                   strides=4,
                   padding='same',
                   name='discriminator_conv_4')(x)
        x = LeakyReLU(alpha=0.2)(x)

        x = Conv1D(filters=discriminator_filters[5],
                   kernel_size=25,
                   strides=4,
                   padding='same',
                   name='discriminator_conv_5')(x)
        x = LeakyReLU(alpha=0.2)(x)
        x = Flatten()(x)

    discriminator_output = Dense(1)(x)
    discriminator = Model([discriminator_input, label_input],
                          discriminator_output,
                          name='Discriminator')
    return discriminator
示例#6
0
state_dim = env.observation_space.shape[0]
action_dim = env.action_space.n

########################################################
# RL Agent for Oracle
weights_file = 'rldqn_cartpole.h5'
oracle = DeepQNetwork(state_dim, action_dim, 0.001, 0.95, 1, 0.001, 0.995)
oracle.load_weights(weights_file)
########################################################

# This model maps an input to its next state
# Input state
AE_state = keras.Input(shape=(state_dim, ), name="AE_state")
# 2layer neural network to predict the next state
encoded = Dense(32, name="dense1_NS")(AE_state)
encoded = LeakyReLU(alpha=0.2, name="LeakyRelu1_NS")(encoded)
encoded = Dense(32, name="dense2_NS")(encoded)
encoded = LeakyReLU(alpha=0.2, name="LeakyRelu2_NS")(encoded)
n_state = layers.Dense(state_dim, name="dense3_NS")(encoded)
AE = keras.Model(inputs=AE_state, outputs=n_state, name="AE")

#print(AE.summary())
#tf.keras.utils.plot_model(AE, to_file='AE_model_plot.png', show_shapes=True, show_layer_names=True)

opt_AE = tf.keras.optimizers.RMSprop(learning_rate=0.00015)
AE.compile(loss='mean_squared_error', optimizer=opt_AE, metrics=['mse'])

# This model maps an input & action to its next state
# Input state
curr_state = keras.Input(shape=(state_dim, ), name="curr_state")
curr_action = keras.Input(shape=(action_dim, ), name="curr_action")
示例#7
0
    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)

        # TODO: How can we expose these options to the user?
        self.hidden_dim = self.output_dim
        self.attn_act = LeakyReLU(0.2)
def build_cifar10_generator(ngf=64, z_dim=128):
    """ Builds CIFAR10 DCGAN Generator Model
    PARAMS
    ------
    ngf: number of generator filters
    z_dim: number of dimensions in latent vector

    RETURN
    ------
    G: keras sequential
    """
    init = initializers.RandomNormal(stddev=0.02)

    G = Sequential()

    # Dense 1: 2x2x512
    G.add(
        Dense(2 * 2 * ngf * 8,
              input_shape=(z_dim, ),
              use_bias=True,
              kernel_initializer=init))
    G.add(Reshape((2, 2, ngf * 8)))
    G.add(BatchNormalization())
    G.add(LeakyReLU(0.2))

    # Conv 1: 4x4x256
    G.add(
        Conv2DTranspose(ngf * 4,
                        kernel_size=5,
                        strides=2,
                        padding='same',
                        use_bias=True,
                        kernel_initializer=init))
    G.add(BatchNormalization())
    G.add(LeakyReLU(0.2))

    # Conv 2: 8x8x128
    G.add(
        Conv2DTranspose(ngf * 2,
                        kernel_size=5,
                        strides=2,
                        padding='same',
                        use_bias=True,
                        kernel_initializer=init))
    G.add(BatchNormalization())
    G.add(LeakyReLU(0.2))

    # Conv 3: 16x16x64
    G.add(
        Conv2DTranspose(ngf,
                        kernel_size=5,
                        strides=2,
                        padding='same',
                        use_bias=True,
                        kernel_initializer=init))
    G.add(BatchNormalization())
    G.add(LeakyReLU(0.2))

    # Conv 4: 32x32x3
    G.add(
        Conv2DTranspose(3,
                        kernel_size=5,
                        strides=2,
                        padding='same',
                        use_bias=True,
                        kernel_initializer=init))
    G.add(Activation('tanh'))

    print("\nGenerator")
    G.summary()

    return G
示例#9
0
文件: WGAN.py 项目: Zachdr1/WGAN
    def critic(self):
        dropout_prob = .4

        inputs = Input(shape=(128, 128, 3))

        # Input size = 128x128x3
        x = Conv2D(filters=128,
                   kernel_size=5,
                   padding='same',
                   strides=(2, 2),
                   use_bias=False)(inputs)
        x = LeakyReLU(0.02)(x)
        # Output size = 64x64x128

        # Input size = 64x64x128
        x = Conv2D(filters=128,
                   kernel_size=5,
                   padding='same',
                   strides=(2, 2),
                   use_bias=False)(x)
        x = BatchNormalization()(x)
        x = LeakyReLU(0.02)(x)
        # Output size = 32x32x256

        # Input size = 32x32x128
        x = Conv2D(filters=256, kernel_size=5, padding='same',
                   use_bias=False)(x)
        x = BatchNormalization()(x)
        x = LeakyReLU(0.02)(x)
        # Output size = 32x32x256

        # Input size = 32x32x128
        x = Conv2D(filters=256, kernel_size=5, padding='same',
                   use_bias=False)(x)
        x = BatchNormalization()(x)
        x = LeakyReLU(0.02)(x)
        # Output size = 32x32x256

        # Input size = 32x32x128
        x = Conv2D(filters=256,
                   kernel_size=5,
                   padding='same',
                   strides=(2, 2),
                   use_bias=False)(x)
        x = BatchNormalization()(x)
        x = LeakyReLU(0.02)(x)
        # Output size = 16x16x256

        # Input size = 16x16x128
        x = Conv2D(filters=256,
                   kernel_size=5,
                   padding='same',
                   strides=(2, 2),
                   use_bias=False)(x)
        x = BatchNormalization()(x)
        x = LeakyReLU(0.02)(x)
        # Output size = 8x8x256

        # Input size = 8x8x256
        x = Conv2D(filters=512,
                   kernel_size=5,
                   strides=(2, 2),
                   padding='same',
                   use_bias=False)(x)
        x = BatchNormalization()(x)
        x = LeakyReLU(0.02)(x)
        # Output size = 4x4x512

        # Input size = 4x4x512
        x = Conv2D(filters=1024, kernel_size=5, padding='same',
                   use_bias=False)(x)
        x = BatchNormalization()(x)
        x = LeakyReLU(0.02)(x)
        # Output size = 4x4x1024

        # Input size = 4x4x1024
        x = Flatten()(x)
        out = Dense(1)(x)

        net = Model(inputs=inputs, outputs=out)

        return net
示例#10
0
ADAMPARAM = {
    'learning_rate': 0.001,
    'beta1': 0.9,
    'beta2': 0.999,
    'epsilon': 1e-08
}

###MODEL###
model1 = Sequential()
model1.add(
    Conv2D(filters=16,
           kernel_size=(3, 3),
           strides=(1, 1),
           padding='valid',
           input_shape=(28, 28, 1)))  #26*26
model1.add(LeakyReLU())
model1.add(
    Conv2D(filters=16, kernel_size=(3, 3), strides=(1, 1),
           padding='valid'))  #24*24
model1.add(LeakyReLU())
model1.add(MaxPooling2D(pool_size=(2, 2)))  #12*12
model1.add(
    Conv2D(filters=32,
           kernel_size=(3, 3),
           strides=(1, 1),
           padding='valid',
           input_shape=(28, 28, 1)))  #10*10
model1.add(LeakyReLU())
model1.add(
    Conv2D(filters=32, kernel_size=(3, 3), strides=(1, 1),
           padding='valid'))  #8*8
def build_cifar10_discriminator(ndf=64, image_shape=(32, 32, 3)):
    """ Builds CIFAR10 DCGAN Discriminator Model
    PARAMS
    ------
    ndf: number of discriminator filters
    image_shape: 32x32x3

    RETURN
    ------
    D: keras sequential
    """
    init = initializers.RandomNormal(stddev=0.02)

    D = Sequential()

    # Conv 1: 16x16x64
    D.add(
        Conv2D(ndf,
               kernel_size=5,
               strides=2,
               padding='same',
               use_bias=True,
               kernel_initializer=init,
               input_shape=image_shape))
    D.add(LeakyReLU(0.2))

    # Conv 2: 8x8x128
    D.add(
        Conv2D(ndf * 2,
               kernel_size=5,
               strides=2,
               padding='same',
               use_bias=True,
               kernel_initializer=init))
    D.add(BatchNormalization())
    D.add(LeakyReLU(0.2))

    # Conv 3: 4x4x256
    D.add(
        Conv2D(ndf * 4,
               kernel_size=5,
               strides=2,
               padding='same',
               use_bias=True,
               kernel_initializer=init))
    D.add(BatchNormalization())
    D.add(LeakyReLU(0.2))

    # Conv 4:  2x2x512
    D.add(
        Conv2D(ndf * 8,
               kernel_size=5,
               strides=2,
               padding='same',
               use_bias=True,
               kernel_initializer=init))
    D.add(BatchNormalization())
    D.add(LeakyReLU(0.2))

    # Flatten: 2x2x512 -> (2048)
    D.add(Flatten())

    # Dense Layer
    D.add(Dense(1, kernel_initializer=init))
    D.add(Activation('sigmoid'))

    print("\nDiscriminator")
    D.summary()

    return D
示例#12
0
def build_stage2_discriminator():
    """
    Create Stage-II discriminator network
    """
    input_layer = Input(shape=(256, 256, 3))

    x = Conv2D(64, (4, 4),
               padding='same',
               strides=2,
               input_shape=(256, 256, 3),
               use_bias=False)(input_layer)
    x = LeakyReLU(alpha=0.2)(x)

    x = Conv2D(128, (4, 4), padding='same', strides=2, use_bias=False)(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.2)(x)

    x = Conv2D(256, (4, 4), padding='same', strides=2, use_bias=False)(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.2)(x)

    x = Conv2D(512, (4, 4), padding='same', strides=2, use_bias=False)(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.2)(x)

    x = Conv2D(1024, (4, 4), padding='same', strides=2, use_bias=False)(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.2)(x)

    x = Conv2D(2048, (4, 4), padding='same', strides=2, use_bias=False)(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.2)(x)

    x = Conv2D(1024, (1, 1), padding='same', strides=1, use_bias=False)(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.2)(x)

    x = Conv2D(512, (1, 1), padding='same', strides=1, use_bias=False)(x)
    x = BatchNormalization()(x)

    x2 = Conv2D(128, (1, 1), padding='same', strides=1, use_bias=False)(x)
    x2 = BatchNormalization()(x2)
    x2 = LeakyReLU(alpha=0.2)(x2)

    x2 = Conv2D(128, (3, 3), padding='same', strides=1, use_bias=False)(x2)
    x2 = BatchNormalization()(x2)
    x2 = LeakyReLU(alpha=0.2)(x2)

    x2 = Conv2D(512, (3, 3), padding='same', strides=1, use_bias=False)(x2)
    x2 = BatchNormalization()(x2)

    added_x = add([x, x2])
    added_x = LeakyReLU(alpha=0.2)(added_x)

    input_layer2 = Input(shape=(4, 4, 128))

    merged_input = concatenate([added_x, input_layer2])

    x3 = Conv2D(64 * 8, kernel_size=1, padding="same", strides=1)(merged_input)
    x3 = BatchNormalization()(x3)
    x3 = LeakyReLU(alpha=0.2)(x3)
    x3 = Flatten()(x3)
    x3 = Dense(1)(x3)
    x3 = Activation('sigmoid')(x3)

    stage2_dis = Model(inputs=[input_layer, input_layer2], outputs=[x3])
    return stage2_dis
示例#13
0
def build_stage2_generator():
    """
    Create Stage-II generator containing the CA Augmentation Network,
    the image encoder and the generator network
    """

    # 1. CA Augmentation Network
    input_layer = Input(shape=(1024, ))
    input_lr_images = Input(shape=(64, 64, 3))

    ca = Dense(256)(input_layer)
    mean_logsigma = LeakyReLU(alpha=0.2)(ca)
    c = Lambda(generate_c)(mean_logsigma)

    # 2. Image Encoder
    x = ZeroPadding2D(padding=(1, 1))(input_lr_images)
    x = Conv2D(128, kernel_size=(3, 3), strides=1, use_bias=False)(x)
    x = ReLU()(x)

    x = ZeroPadding2D(padding=(1, 1))(x)
    x = Conv2D(256, kernel_size=(4, 4), strides=2, use_bias=False)(x)
    x = BatchNormalization()(x)
    x = ReLU()(x)

    x = ZeroPadding2D(padding=(1, 1))(x)
    x = Conv2D(512, kernel_size=(4, 4), strides=2, use_bias=False)(x)
    x = BatchNormalization()(x)
    x = ReLU()(x)

    # 3. Joint
    c_code = Lambda(joint_block)([c, x])

    x = ZeroPadding2D(padding=(1, 1))(c_code)
    x = Conv2D(512, kernel_size=(3, 3), strides=1, use_bias=False)(x)
    x = BatchNormalization()(x)
    x = ReLU()(x)

    # 4. Residual blocks
    x = residual_block(x)
    x = residual_block(x)
    x = residual_block(x)
    x = residual_block(x)

    # 5. Upsampling blocks
    x = UpSampling2D(size=(2, 2))(x)
    x = Conv2D(512, kernel_size=3, padding="same", strides=1,
               use_bias=False)(x)
    x = BatchNormalization()(x)
    x = ReLU()(x)

    x = UpSampling2D(size=(2, 2))(x)
    x = Conv2D(256, kernel_size=3, padding="same", strides=1,
               use_bias=False)(x)
    x = BatchNormalization()(x)
    x = ReLU()(x)

    x = UpSampling2D(size=(2, 2))(x)
    x = Conv2D(128, kernel_size=3, padding="same", strides=1,
               use_bias=False)(x)
    x = BatchNormalization()(x)
    x = ReLU()(x)

    x = UpSampling2D(size=(2, 2))(x)
    x = Conv2D(64, kernel_size=3, padding="same", strides=1, use_bias=False)(x)
    x = BatchNormalization()(x)
    x = ReLU()(x)

    x = Conv2D(3, kernel_size=3, padding="same", strides=1, use_bias=False)(x)
    x = Activation('tanh')(x)

    model = Model(inputs=[input_layer, input_lr_images],
                  outputs=[x, mean_logsigma])
    return model
示例#14
0
 def discriminator(self):
     
     if self.D:
         return self.D
     
     self.D = Sequential()
     
     #add Gaussian noise to prevent Discriminator overfitting
     self.D.add(GaussianNoise(0.2, input_shape = [256, 256, 3]))
     
     #256x256x3 Image
     self.D.add(Conv2D(filters = 8, kernel_size = 3, padding = 'same'))
     self.D.add(LeakyReLU(0.2))
     self.D.add(Dropout(0.25))
     self.D.add(AveragePooling2D())
     
     #128x128x8
     self.D.add(Conv2D(filters = 16, kernel_size = 3, padding = 'same'))
     self.D.add(BatchNormalization(momentum = 0.7))
     self.D.add(LeakyReLU(0.2))
     self.D.add(Dropout(0.25))
     self.D.add(AveragePooling2D())
     
     #64x64x16
     self.D.add(Conv2D(filters = 32, kernel_size = 3, padding = 'same'))
     self.D.add(BatchNormalization(momentum = 0.7))
     self.D.add(LeakyReLU(0.2))
     self.D.add(Dropout(0.25))
     self.D.add(AveragePooling2D())
     
     #32x32x32
     self.D.add(Conv2D(filters = 64, kernel_size = 3, padding = 'same'))
     self.D.add(BatchNormalization(momentum = 0.7))
     self.D.add(LeakyReLU(0.2))
     self.D.add(Dropout(0.25))
     self.D.add(AveragePooling2D())
     
     #16x16x64
     self.D.add(Conv2D(filters = 128, kernel_size = 3, padding = 'same'))
     self.D.add(BatchNormalization(momentum = 0.7))
     self.D.add(LeakyReLU(0.2))
     self.D.add(Dropout(0.25))
     self.D.add(AveragePooling2D())
     
     #8x8x128
     self.D.add(Conv2D(filters = 256, kernel_size = 3, padding = 'same'))
     self.D.add(BatchNormalization(momentum = 0.7))
     self.D.add(LeakyReLU(0.2))
     self.D.add(Dropout(0.25))
     self.D.add(AveragePooling2D())
     
     #4x4x256
     self.D.add(Flatten())
     
     #256
     self.D.add(Dense(128))
     self.D.add(LeakyReLU(0.2))
     
     self.D.add(Dense(1, activation = 'sigmoid'))
     
     return self.D
from compose import *
from tensorflow.keras.layers import Activation, Conv2D, Dense, Input, LeakyReLU, Multiply
from tensorflow.keras.models import Model

__all__ = ['build_discriminative_net']

conv_stage = lambda filters, kernal_size, strides, name: compose(
        Conv2D(filters, kernel_size=kernal_size, strides=strides, padding='same',
               use_bias=False, name=name + '/conv'),
        LeakyReLU(name=name + '/lrelu')
    )

def build_discriminative_net(img_shape):
    
    img_input = Input(img_shape)
    
    conv_stage_6 = compose(
        conv_stage(8, 5, 1, 'conv_stage_1'),
        conv_stage(16, 5, 1, 'conv_stage_2'),
        conv_stage(32, 5, 1, 'conv_stage_3'),
        conv_stage(64, 5, 1, 'conv_stage_4'),
        conv_stage(128, 5, 1, 'conv_stage_5'),
        conv_stage(128, 5, 1, 'conv_stage_6')
    )(img_input)
    
    attention_map = Conv2D(1, kernel_size=5, strides=1, padding='same', use_bias=False, 
                           name='attention_map')(conv_stage_6)

    fc_2 = compose(
        Multiply(),
        conv_stage(64, 5, 4, 'conv_stage_7'),
示例#16
0
文件: WGAN.py 项目: Zachdr1/WGAN
    def generator(self):
        # Input size = 100
        inputs = Input(shape=(100, ))
        x = Dense(4 * 4 * 1024, input_shape=(100, ))(inputs)
        x = Reshape(target_shape=(4, 4, 1024))(x)
        x = BatchNormalization()(x)
        x = LeakyReLU(0.02)(x)
        # Output size = 4x4x1024

        # Input size = 4x4x1024
        x = Conv2D(filters=512, kernel_size=5, padding='same',
                   use_bias=False)(x)
        x = BatchNormalization()(x)
        x = LeakyReLU(0.02)(x)
        x = UpSampling2D()(x)
        # Output size = 8x8x512

        # Input size = 8x8x512
        x = Conv2D(filters=256, kernel_size=5, padding='same',
                   use_bias=False)(x)
        x = BatchNormalization()(x)
        x = LeakyReLU(0.02)(x)
        x = UpSampling2D()(x)
        # Output size = 16x16x256

        # Input size = 16x16x512
        x = Conv2D(filters=256, kernel_size=5, padding='same',
                   use_bias=False)(x)
        x = BatchNormalization()(x)
        x = LeakyReLU(0.02)(x)
        # Output size = 16x16x256

        # Input size = 16x16x256
        x = Conv2D(filters=128, kernel_size=5, padding='same',
                   use_bias=False)(x)
        x = BatchNormalization()(x)
        x = LeakyReLU(0.02)(x)
        x = UpSampling2D()(x)

        # Output size = 32x32x128

        # Input size = 32x32x256
        x = Conv2D(filters=128, kernel_size=5, padding='same',
                   use_bias=False)(x)
        x = BatchNormalization()(x)
        x = LeakyReLU(0.02)(x)
        x = UpSampling2D()(x)
        # Output size = 64x64x128

        # Input size = 64x64x256
        x = Conv2D(filters=128, kernel_size=5, padding='same',
                   use_bias=False)(x)
        x = BatchNormalization()(x)
        x = LeakyReLU(0.02)(x)
        x = UpSampling2D()(x)
        # Output size = 128x128x128

        # Input size = 128x128x256
        x = Conv2D(filters=128, kernel_size=5, padding='same',
                   use_bias=False)(x)
        x = BatchNormalization()(x)
        x = LeakyReLU(0.02)(x)
        # Output size = 128x128x128

        # Input size = 128x128x128
        x = Conv2D(filters=3, kernel_size=5, padding='same', use_bias=False)(x)
        out = Activation('tanh')(x)
        # Output size = 32x32x3

        net = Model(inputs=inputs, outputs=out)

        return net
def get_test_model_exhaustive():
    """Returns a exhaustive test model."""
    input_shapes = [(2, 3, 4, 5, 6), (2, 3, 4, 5, 6), (7, 8, 9, 10),
                    (7, 8, 9, 10), (11, 12, 13), (11, 12, 13), (14, 15),
                    (14, 15), (16, ),
                    (16, ), (2, ), (1, ), (2, ), (1, ), (1, 3), (1, 4),
                    (1, 1, 3), (1, 1, 4), (1, 1, 1, 3), (1, 1, 1, 4),
                    (1, 1, 1, 1, 3), (1, 1, 1, 1, 4), (26, 28, 3), (4, 4, 3),
                    (4, 4, 3), (4, ), (2, 3), (1, ), (1, ), (1, ), (2, 3),
                    (9, 16, 1), (1, 9, 16)]

    inputs = [Input(shape=s) for s in input_shapes]

    outputs = []

    outputs.append(Conv1D(1, 3, padding='valid')(inputs[6]))
    outputs.append(Conv1D(2, 1, padding='same')(inputs[6]))
    outputs.append(Conv1D(3, 4, padding='causal', dilation_rate=2)(inputs[6]))
    outputs.append(ZeroPadding1D(2)(inputs[6]))
    outputs.append(Cropping1D((2, 3))(inputs[6]))
    outputs.append(MaxPooling1D(2)(inputs[6]))
    outputs.append(MaxPooling1D(2, strides=2, padding='same')(inputs[6]))
    outputs.append(MaxPooling1D(2, data_format="channels_first")(inputs[6]))
    outputs.append(AveragePooling1D(2)(inputs[6]))
    outputs.append(AveragePooling1D(2, strides=2, padding='same')(inputs[6]))
    outputs.append(
        AveragePooling1D(2, data_format="channels_first")(inputs[6]))
    outputs.append(GlobalMaxPooling1D()(inputs[6]))
    outputs.append(GlobalMaxPooling1D(data_format="channels_first")(inputs[6]))
    outputs.append(GlobalAveragePooling1D()(inputs[6]))
    outputs.append(
        GlobalAveragePooling1D(data_format="channels_first")(inputs[6]))

    for axis in range(1, 6):
        shape = input_shapes[0][axis - 1]
        outputs.append(
            Normalization(axis=axis,
                          mean=np.random.rand(shape),
                          variance=np.random.rand(shape))(inputs[0]))
    outputs.append(Normalization(axis=None, mean=2.1, variance=2.2)(inputs[4]))
    outputs.append(Normalization(axis=-1, mean=2.1, variance=2.2)(inputs[6]))

    outputs.append(Conv2D(4, (3, 3))(inputs[4]))
    outputs.append(Conv2D(4, (3, 3), use_bias=False)(inputs[4]))
    outputs.append(
        Conv2D(4, (2, 4), strides=(2, 3), padding='same')(inputs[4]))
    outputs.append(
        Conv2D(4, (2, 4), padding='same', dilation_rate=(2, 3))(inputs[4]))

    outputs.append(SeparableConv2D(3, (3, 3))(inputs[4]))
    outputs.append(DepthwiseConv2D((3, 3))(inputs[4]))
    outputs.append(DepthwiseConv2D((1, 2))(inputs[4]))

    outputs.append(MaxPooling2D((2, 2))(inputs[4]))
    # todo: check if TensorFlow >= 2.8 supports this
    # outputs.append(MaxPooling2D((2, 2), data_format="channels_first")(inputs[4]))
    outputs.append(
        MaxPooling2D((1, 3), strides=(2, 3), padding='same')(inputs[4]))
    outputs.append(AveragePooling2D((2, 2))(inputs[4]))
    # todo: check if TensorFlow >= 2.8 supports this
    # outputs.append(AveragePooling2D((2, 2), data_format="channels_first")(inputs[4]))
    outputs.append(
        AveragePooling2D((1, 3), strides=(2, 3), padding='same')(inputs[4]))

    outputs.append(GlobalAveragePooling2D()(inputs[4]))
    outputs.append(
        GlobalAveragePooling2D(data_format="channels_first")(inputs[4]))
    outputs.append(GlobalMaxPooling2D()(inputs[4]))
    outputs.append(GlobalMaxPooling2D(data_format="channels_first")(inputs[4]))

    outputs.append(Permute((3, 4, 1, 5, 2))(inputs[0]))
    outputs.append(Permute((1, 5, 3, 2, 4))(inputs[0]))
    outputs.append(Permute((3, 4, 1, 2))(inputs[2]))
    outputs.append(Permute((2, 1, 3))(inputs[4]))
    outputs.append(Permute((2, 1))(inputs[6]))
    outputs.append(Permute((1, ))(inputs[8]))

    outputs.append(Permute((3, 1, 2))(inputs[31]))
    outputs.append(Permute((3, 1, 2))(inputs[32]))
    outputs.append(BatchNormalization()(Permute((3, 1, 2))(inputs[31])))
    outputs.append(BatchNormalization()(Permute((3, 1, 2))(inputs[32])))

    outputs.append(BatchNormalization()(inputs[0]))
    outputs.append(BatchNormalization(axis=1)(inputs[0]))
    outputs.append(BatchNormalization(axis=2)(inputs[0]))
    outputs.append(BatchNormalization(axis=3)(inputs[0]))
    outputs.append(BatchNormalization(axis=4)(inputs[0]))
    outputs.append(BatchNormalization(axis=5)(inputs[0]))
    outputs.append(BatchNormalization()(inputs[2]))
    outputs.append(BatchNormalization(axis=1)(inputs[2]))
    outputs.append(BatchNormalization(axis=2)(inputs[2]))
    outputs.append(BatchNormalization(axis=3)(inputs[2]))
    outputs.append(BatchNormalization(axis=4)(inputs[2]))
    outputs.append(BatchNormalization()(inputs[4]))
    # todo: check if TensorFlow >= 2.1 supports this
    # outputs.append(BatchNormalization(axis=1)(inputs[4])) # tensorflow.python.framework.errors_impl.InternalError:  The CPU implementation of FusedBatchNorm only supports NHWC tensor format for now.
    outputs.append(BatchNormalization(axis=2)(inputs[4]))
    outputs.append(BatchNormalization(axis=3)(inputs[4]))
    outputs.append(BatchNormalization()(inputs[6]))
    outputs.append(BatchNormalization(axis=1)(inputs[6]))
    outputs.append(BatchNormalization(axis=2)(inputs[6]))
    outputs.append(BatchNormalization()(inputs[8]))
    outputs.append(BatchNormalization(axis=1)(inputs[8]))
    outputs.append(BatchNormalization()(inputs[27]))
    outputs.append(BatchNormalization(axis=1)(inputs[27]))
    outputs.append(BatchNormalization()(inputs[14]))
    outputs.append(BatchNormalization(axis=1)(inputs[14]))
    outputs.append(BatchNormalization(axis=2)(inputs[14]))
    outputs.append(BatchNormalization()(inputs[16]))
    # todo: check if TensorFlow >= 2.1 supports this
    # outputs.append(BatchNormalization(axis=1)(inputs[16])) # tensorflow.python.framework.errors_impl.InternalError:  The CPU implementation of FusedBatchNorm only supports NHWC tensor format for now.
    outputs.append(BatchNormalization(axis=2)(inputs[16]))
    outputs.append(BatchNormalization(axis=3)(inputs[16]))
    outputs.append(BatchNormalization()(inputs[18]))
    outputs.append(BatchNormalization(axis=1)(inputs[18]))
    outputs.append(BatchNormalization(axis=2)(inputs[18]))
    outputs.append(BatchNormalization(axis=3)(inputs[18]))
    outputs.append(BatchNormalization(axis=4)(inputs[18]))
    outputs.append(BatchNormalization()(inputs[20]))
    outputs.append(BatchNormalization(axis=1)(inputs[20]))
    outputs.append(BatchNormalization(axis=2)(inputs[20]))
    outputs.append(BatchNormalization(axis=3)(inputs[20]))
    outputs.append(BatchNormalization(axis=4)(inputs[20]))
    outputs.append(BatchNormalization(axis=5)(inputs[20]))

    outputs.append(Dropout(0.5)(inputs[4]))

    outputs.append(ZeroPadding2D(2)(inputs[4]))
    outputs.append(ZeroPadding2D((2, 3))(inputs[4]))
    outputs.append(ZeroPadding2D(((1, 2), (3, 4)))(inputs[4]))
    outputs.append(Cropping2D(2)(inputs[4]))
    outputs.append(Cropping2D((2, 3))(inputs[4]))
    outputs.append(Cropping2D(((1, 2), (3, 4)))(inputs[4]))

    outputs.append(Dense(3, use_bias=True)(inputs[13]))
    outputs.append(Dense(3, use_bias=True)(inputs[14]))
    outputs.append(Dense(4, use_bias=False)(inputs[16]))
    outputs.append(Dense(4, use_bias=False, activation='tanh')(inputs[18]))
    outputs.append(Dense(4, use_bias=False)(inputs[20]))

    outputs.append(Reshape(((2 * 3 * 4 * 5 * 6), ))(inputs[0]))
    outputs.append(Reshape((2, 3 * 4 * 5 * 6))(inputs[0]))
    outputs.append(Reshape((2, 3, 4 * 5 * 6))(inputs[0]))
    outputs.append(Reshape((2, 3, 4, 5 * 6))(inputs[0]))
    outputs.append(Reshape((2, 3, 4, 5, 6))(inputs[0]))

    outputs.append(Reshape((16, ))(inputs[8]))
    outputs.append(Reshape((2, 8))(inputs[8]))
    outputs.append(Reshape((2, 2, 4))(inputs[8]))
    outputs.append(Reshape((2, 2, 2, 2))(inputs[8]))
    outputs.append(Reshape((2, 2, 1, 2, 2))(inputs[8]))

    outputs.append(RepeatVector(3)(inputs[8]))

    outputs.append(
        UpSampling2D(size=(1, 2), interpolation='nearest')(inputs[4]))
    outputs.append(
        UpSampling2D(size=(5, 3), interpolation='nearest')(inputs[4]))
    outputs.append(
        UpSampling2D(size=(1, 2), interpolation='bilinear')(inputs[4]))
    outputs.append(
        UpSampling2D(size=(5, 3), interpolation='bilinear')(inputs[4]))

    outputs.append(ReLU()(inputs[0]))

    for axis in [-5, -4, -3, -2, -1, 1, 2, 3, 4, 5]:
        outputs.append(Concatenate(axis=axis)([inputs[0], inputs[1]]))
    for axis in [-4, -3, -2, -1, 1, 2, 3, 4]:
        outputs.append(Concatenate(axis=axis)([inputs[2], inputs[3]]))
    for axis in [-3, -2, -1, 1, 2, 3]:
        outputs.append(Concatenate(axis=axis)([inputs[4], inputs[5]]))
    for axis in [-2, -1, 1, 2]:
        outputs.append(Concatenate(axis=axis)([inputs[6], inputs[7]]))
    for axis in [-1, 1]:
        outputs.append(Concatenate(axis=axis)([inputs[8], inputs[9]]))
    for axis in [-1, 2]:
        outputs.append(Concatenate(axis=axis)([inputs[14], inputs[15]]))
    for axis in [-1, 3]:
        outputs.append(Concatenate(axis=axis)([inputs[16], inputs[17]]))
    for axis in [-1, 4]:
        outputs.append(Concatenate(axis=axis)([inputs[18], inputs[19]]))
    for axis in [-1, 5]:
        outputs.append(Concatenate(axis=axis)([inputs[20], inputs[21]]))

    outputs.append(UpSampling1D(size=2)(inputs[6]))
    # outputs.append(UpSampling1D(size=2)(inputs[8])) # ValueError: Input 0 of layer up_sampling1d_1 is incompatible with the layer: expected ndim=3, found ndim=2. Full shape received: [None, 16]

    outputs.append(Multiply()([inputs[10], inputs[11]]))
    outputs.append(Multiply()([inputs[11], inputs[10]]))
    outputs.append(Multiply()([inputs[11], inputs[13]]))
    outputs.append(Multiply()([inputs[10], inputs[11], inputs[12]]))
    outputs.append(Multiply()([inputs[11], inputs[12], inputs[13]]))

    shared_conv = Conv2D(1, (1, 1),
                         padding='valid',
                         name='shared_conv',
                         activation='relu')

    up_scale_2 = UpSampling2D((2, 2))
    x1 = shared_conv(up_scale_2(inputs[23]))  # (1, 8, 8)
    x2 = shared_conv(up_scale_2(inputs[24]))  # (1, 8, 8)
    x3 = Conv2D(1, (1, 1),
                padding='valid')(up_scale_2(inputs[24]))  # (1, 8, 8)
    x = Concatenate()([x1, x2, x3])  # (3, 8, 8)
    outputs.append(x)

    x = Conv2D(3, (1, 1), padding='same', use_bias=False)(x)  # (3, 8, 8)
    outputs.append(x)
    x = Dropout(0.5)(x)
    outputs.append(x)
    x = Concatenate()([MaxPooling2D((2, 2))(x),
                       AveragePooling2D((2, 2))(x)])  # (6, 4, 4)
    outputs.append(x)

    x = Flatten()(x)  # (1, 1, 96)
    x = Dense(4, use_bias=False)(x)
    outputs.append(x)
    x = Dense(3)(x)  # (1, 1, 3)
    outputs.append(x)

    outputs.append(Add()([inputs[26], inputs[30], inputs[30]]))
    outputs.append(Subtract()([inputs[26], inputs[30]]))
    outputs.append(Multiply()([inputs[26], inputs[30], inputs[30]]))
    outputs.append(Average()([inputs[26], inputs[30], inputs[30]]))
    outputs.append(Maximum()([inputs[26], inputs[30], inputs[30]]))
    outputs.append(Concatenate()([inputs[26], inputs[30], inputs[30]]))

    intermediate_input_shape = (3, )
    intermediate_in = Input(intermediate_input_shape)
    intermediate_x = intermediate_in
    intermediate_x = Dense(8)(intermediate_x)
    intermediate_x = Dense(5, name='duplicate_layer_name')(intermediate_x)
    intermediate_model = Model(inputs=[intermediate_in],
                               outputs=[intermediate_x],
                               name='intermediate_model')
    intermediate_model.compile(loss='mse', optimizer='nadam')

    x = intermediate_model(x)  # (1, 1, 5)

    intermediate_model_2 = Sequential()
    intermediate_model_2.add(Dense(7, input_shape=(5, )))
    intermediate_model_2.add(Dense(5, name='duplicate_layer_name'))
    intermediate_model_2.compile(optimizer='rmsprop',
                                 loss='categorical_crossentropy')

    x = intermediate_model_2(x)  # (1, 1, 5)

    intermediate_model_3_nested = Sequential()
    intermediate_model_3_nested.add(Dense(7, input_shape=(6, )))
    intermediate_model_3_nested.compile(optimizer='rmsprop',
                                        loss='categorical_crossentropy')

    intermediate_model_3 = Sequential()
    intermediate_model_3.add(Dense(6, input_shape=(5, )))
    intermediate_model_3.add(intermediate_model_3_nested)
    intermediate_model_3.add(Dense(8))
    intermediate_model_3.compile(optimizer='rmsprop',
                                 loss='categorical_crossentropy')

    x = intermediate_model_3(x)  # (1, 1, 8)

    x = Dense(3)(x)  # (1, 1, 3)

    shared_activation = Activation('tanh')

    outputs = outputs + [
        Activation('tanh')(inputs[25]),
        Activation('hard_sigmoid')(inputs[25]),
        Activation('selu')(inputs[25]),
        Activation('sigmoid')(inputs[25]),
        Activation('softplus')(inputs[25]),
        Activation('softmax')(inputs[25]),
        Activation('relu')(inputs[25]),
        Activation('relu6')(inputs[25]),
        Activation('swish')(inputs[25]),
        Activation('exponential')(inputs[25]),
        Activation('gelu')(inputs[25]),
        Activation('softsign')(inputs[25]),
        LeakyReLU()(inputs[25]),
        ReLU()(inputs[25]),
        ReLU(max_value=0.4, negative_slope=1.1, threshold=0.3)(inputs[25]),
        ELU()(inputs[25]),
        PReLU()(inputs[24]),
        PReLU()(inputs[25]),
        PReLU()(inputs[26]),
        shared_activation(inputs[25]),
        Activation('linear')(inputs[26]),
        Activation('linear')(inputs[23]),
        x,
        shared_activation(x),
    ]

    model = Model(inputs=inputs, outputs=outputs, name='test_model_exhaustive')
    model.compile(loss='mse', optimizer='nadam')

    # fit to dummy data
    training_data_size = 2
    data_in = generate_input_data(training_data_size, input_shapes)
    initial_data_out = model.predict(data_in)
    data_out = generate_output_data(training_data_size, initial_data_out)
    model.fit(data_in, data_out, epochs=10)
    return model
示例#18
0
ax.set_title("Future Importance",fontdict={"fontsize":12,"fontweight":"bold"})

plt.show()

# NEURAL NETWORK

from tensorflow.keras import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import LeakyReLU



model = Sequential()

model.add(Dense(16,kernel_initializer = "uniform",activation = LeakyReLU(),input_dim = 32))


model.add(Dense(16,kernel_initializer = "uniform",activation = LeakyReLU()))

model.add(Dense(1,kernel_initializer = "uniform",activation = "sigmoid"))

model.compile(optimizer = "adam",loss = "binary_crossentropy",metrics = ["accuracy"])


model.summary()


history = model.fit(X_train,y_train,batch_size = 16,epochs=10)

示例#19
0
    model = Model(placeholder, x, name=f'simple_model_unet_{str(model_depth).zfill(3)}')
    model_list.append(model)

filter_count_list = [12, 16, 32, 64, 128, 128 + 64, 256]
kernel_size_list = [(3, 3)] * (len(filter_count_list) - 1) + [(4, 4)]
iter_count_list = [2] * 7
in_layer = c_layer = Input((256, 256, 3), name='data')
upsamplig_type = 'nearest'  # 'bilinear'

join_layer_list = []
for i in range(len(filter_count_list) - 1):
    for j in range(iter_count_list[i]):
        c_layer = Conv2D(filter_count_list[i], kernel_size_list[i], padding='same')(c_layer)
        #         c_layer = BatchNormalization()(c_layer)
        #         c_layer = ReLU()(c_layer)
        c_layer = LeakyReLU(0.1)(c_layer)
    join_layer_list.append(c_layer)
    c_layer = MaxPooling2D((2, 2))(c_layer)
    # print(c_layer, kernel_size_list[i])

join_layer_list = list(reversed(join_layer_list))
filter_count_list = list(reversed(filter_count_list[:-1]))
kernel_size_list = list(reversed(kernel_size_list[:-1]))
for i in range(len(filter_count_list)):
    c_layer = UpSampling2D((2, 2), interpolation=upsamplig_type)(c_layer)
    c_layer = Concatenate(axis=-1)([c_layer, join_layer_list[i]])
    for j in range(iter_count_list[i]):
        c_layer = Conv2D(filter_count_list[i], kernel_size_list[i], padding='same')(c_layer)
        #   c_layer = BatchNormalization()(c_layer)
        #   c_layer = ReLU()(c_layer)
        c_layer = LeakyReLU(0.1)(c_layer)
示例#20
0
def define_generator(image_shape, probe_light_shape, latent_dim):
    init = RandomNormal(stddev=0.02)
    in_image = Input(shape=image_shape)
    probe_image_target = Input(shape=probe_light_shape)
    conv1 = Conv2D(64, (7, 7), padding='same',
                   kernel_initializer=init)(in_image)
    conv1 = BatchNormalization(axis=-1)(conv1)
    conv1 - LeakyReLU(alpha=0.2)(conv1)
    pool1 = AveragePooling2D(pool_size=(2, 2))(conv1)

    conv2 = Conv2D(128, (3, 3),
                   strides=(2, 2),
                   padding='same',
                   kernel_initializer=init)(pool1)
    conv2 = BatchNormalization(axis=-1)(conv2)
    conv2 = LeakyReLU(alpha=0.2)(conv2)
    pool2 = AveragePooling2D(pool_size=(2, 2))(conv2)

    conv3 = Conv2D(256, (3, 3),
                   strides=(2, 2),
                   padding='same',
                   kernel_initializer=init)(pool2)
    conv3 = BatchNormalization(axis=-1)(conv3)
    conv3 = LeakyReLU(alpha=0.2)(conv3)

    pn = Conv2D(64, (7, 7), padding='same',
                kernel_initializer=init)(probe_image_target)
    pn = BatchNormalization(axis=-1)(pn)
    pn = LeakyReLU(alpha=0.2)(pn)
    pn = AveragePooling2D(pool_size=(2, 2))(pn)

    pn = Conv2D(128, (3, 3),
                strides=(2, 2),
                padding='same',
                kernel_initializer=init)(pn)
    pn = BatchNormalization(axis=-1)(pn)
    pn = LeakyReLU(alpha=0.2)(pn)
    pn = AveragePooling2D(pool_size=(2, 2))(pn)

    pn = Conv2D(256, (3, 3),
                strides=(2, 2),
                padding='same',
                kernel_initializer=init)(pn)
    pn = BatchNormalization(axis=-1)(pn)
    pn = LeakyReLU(alpha=0.2)(pn)

    g = Flatten()(conv3)
    pn = Flatten()(pn)

    g = Concatenate()([g, pn])

    g = Dense(latent_dim, activation='relu', kernel_initializer=init)(g)

    g = Dense(16 * 16 * 256, activation='relu', kernel_initializer=init)(g)

    g = Reshape((16, 16, 256))(g)
    sub_layer1 = Lambda(lambda x: tf.nn.depth_to_space(x, 2))
    sub_layer2 = Lambda(lambda x: tf.nn.depth_to_space(x, 2))
    up1 = Conv2DTranspose(128, (3, 3),
                          strides=(2, 2),
                          padding='same',
                          kernel_initializer=init)(sub_layer1(inputs=g))
    up1 = BatchNormalization(axis=-1)(up1)
    up1 = LeakyReLU(alpha=0.2)(up1)

    merge1 = Concatenate()([up1, conv2])

    up2 = Conv2DTranspose(64, (3, 3),
                          strides=(2, 2),
                          padding='same',
                          kernel_initializer=init)(sub_layer2(inputs=merge1))
    up2 = BatchNormalization(axis=-1)(up2)
    up2 = LeakyReLU(alpha=0.2)(up2)
    merge2 = Concatenate()([up2, conv1])

    final = Conv2D(3, (7, 7), padding='same', kernel_initializer=init)(merge2)
    final = BatchNormalization(axis=-1)(final)
    out_image = Activation('tanh')(final)
    model = Model([in_image, probe_image_target], out_image)
    return model
示例#21
0
# for i, correct in enumerate(correct[:9]):
#     plt.subplot(3,3,i+1)
#     plt.imshow(X_test[correct].reshape(28,28), cmap='gray', interpolation='none')
#     plt.title("Predicted {}, Class {}".format(predicted_classes[correct], Y_test[correct]))
#     plt.tight_layout()

# In[7]:

fashion_model = Sequential()
fashion_model.add(
    Conv2D(32,
           kernel_size=(3, 3),
           activation='linear',
           padding='same',
           input_shape=(256, 256, 3)))
fashion_model.add(LeakyReLU(alpha=0.1))
fashion_model.add(MaxPooling2D((2, 2), padding='same'))
fashion_model.add(Dropout(0.25))
fashion_model.add(Conv2D(64, (3, 3), activation='linear', padding='same'))
fashion_model.add(LeakyReLU(alpha=0.1))
fashion_model.add(MaxPooling2D(pool_size=(2, 2), padding='same'))
fashion_model.add(Dropout(0.25))
fashion_model.add(Conv2D(128, (3, 3), activation='linear', padding='same'))
fashion_model.add(LeakyReLU(alpha=0.1))
fashion_model.add(MaxPooling2D(pool_size=(2, 2), padding='same'))
fashion_model.add(Dropout(0.4))
fashion_model.add(Flatten())
fashion_model.add(Dense(128, activation='linear'))
fashion_model.add(LeakyReLU(alpha=0.1))
fashion_model.add(Dropout(0.3))
fashion_model.add(Dense(21, activation='softmax'))
示例#22
0
def define_discriminator(image_shape, probe_shape, latent_dim):
    init = RandomNormal(stddev=0.02)
    in_image = Input(shape=image_shape)
    probe_image = Input(shape=probe_shape)
    d = Conv2D(64, (5, 5),
               strides=(2, 2),
               padding='same',
               kernel_initializer=init)(in_image)
    d = LeakyReLU(alpha=0.2)(d)

    d = Conv2D(128, (5, 5),
               strides=(2, 2),
               padding='same',
               kernel_initializer=init)(d)
    d = BatchNormalization(axis=-1)(d)
    d = LeakyReLU(alpha=0.2)(d)

    d = Conv2D(256, (5, 5),
               strides=(2, 2),
               padding='same',
               kernel_initializer=init)(d)
    d = BatchNormalization(axis=-1)(d)
    d = LeakyReLU(alpha=0.2)(d)

    d = Conv2D(512, (5, 5),
               strides=(2, 2),
               padding='same',
               kernel_initializer=init)(d)
    d = BatchNormalization(axis=-1)(d)
    d = LeakyReLU(alpha=0.2)(d)

    d = Conv2D(512, (5, 5), padding='same', kernel_initializer=init)(d)
    d = BatchNormalization(axis=-1)(d)
    d = LeakyReLU(alpha=0.2)(d)

    d1 = Conv2D(64, (5, 5),
                strides=(2, 2),
                padding='same',
                kernel_initializer=init)(probe_image)
    d1 = LeakyReLU(alpha=0.2)(d1)

    d1 = Conv2D(128, (5, 5),
                strides=(2, 2),
                padding='same',
                kernel_initializer=init)(d1)
    d1 = BatchNormalization(axis=-1)(d1)
    d1 = LeakyReLU(alpha=0.2)(d1)

    d1 = Conv2D(256, (5, 5),
                strides=(2, 2),
                padding='same',
                kernel_initializer=init)(d1)
    d1 = BatchNormalization(axis=-1)(d1)
    d1 = LeakyReLU(alpha=0.2)(d1)

    d1 = Conv2D(512, (5, 5),
                strides=(2, 2),
                padding='same',
                kernel_initializer=init)(d1)
    d1 = BatchNormalization(axis=-1)(d1)
    d1 = LeakyReLU(alpha=0.2)(d1)

    d1 = Conv2D(512, (5, 5), padding='same', kernel_initializer=init)(d1)
    d1 = BatchNormalization(axis=-1)(d1)
    d1 = LeakyReLU(alpha=0.2)(d1)

    d = Flatten()(d)
    d = Dense(latent_dim, kernel_initializer=init)(d)
    d = LeakyReLU(alpha=0.2)(d)

    d1 = Flatten()(d1)
    d1 = Dense(latent_dim, kernel_initializer=init)(d1)
    d1 = LeakyReLU(alpha=0.2)(d1)

    d = Concatenate()([d, d1])

    d = Dense(latent_dim, kernel_initializer=init)(d)
    d = LeakyReLU(alpha=0.2)(d)

    out = Dense(1)(d)
    # define model
    model = Model([in_image, probe_image], out)
    # compile model
    model.compile(loss='mse', optimizer=Adam(lr=0.0003))
    return model
示例#23
0
def cbr(x, out_layer, kernel, stride):
    x=Conv2D(out_layer, kernel_size=kernel, strides=stride, padding="same")(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.1)(x)
    return x
示例#24
0
# Hyperparameters 설정값 지정

# gan에 입력되는 noise에 대한 dimension
NOISE_DIM = 500

# adam optimizer 정의, learning_rate = 0.0002, beta_1로 줍니다.
# Vanilla Gan과 DCGAN에서 이렇게 셋팅을 해주는데
# 이렇게 해줘야 훨씬 학습을 잘합니다.
adam = Adam(lr=0.0002, beta_1=0.5)

# ------------------------------------------------------------------------
# Generator 생성자 함수 정의

generator = Sequential([
    Dense(256, input_dim=NOISE_DIM), 
    LeakyReLU(0.2), 
    Dense(512), 
    LeakyReLU(0.2), 
    Dense(1024), 
    LeakyReLU(0.2), 
    Dense(22144, activation='tanh'),    # 데이터 쉐잎과 맞춰줌(22144)
])

generator.summary()
# Model: "sequential"
# _________________________________________________________________
# Layer (type)                 Output Shape              Param #
# =================================================================
# dense (Dense)                (None, 256)               128256
# _________________________________________________________________
# leaky_re_lu (LeakyReLU)      (None, 256)               0
示例#25
0
def DarknetConv2D_BN_Leaky(*args, **kwargs):
    """Darknet Convolution2D followed by BatchNormalization and LeakyReLU."""
    no_bias_kwargs = {'use_bias': False}
    no_bias_kwargs.update(kwargs)
    return compose(DarknetConv2D(*args, **no_bias_kwargs),
                   BatchNormalization(), LeakyReLU(alpha=0.1))
示例#26
0
def unet(input_shape=(512, 512, 3), num_classes=1):
    inputs = Input(shape=input_shape)
    # 512
    use_bias = False
    down0a = Conv2D(16, (3, 3), padding='same', use_bias=use_bias)(inputs)
    down0a = BatchNormalization()(down0a)
    down0a = LeakyReLU(alpha=0.1)(down0a)
    down0a = Conv2D(16, (3, 3), padding='same', use_bias=use_bias)(down0a)
    down0a = BatchNormalization()(down0a)
    down0a = LeakyReLU(alpha=0.1)(down0a)
    down0a_pool = MaxPooling2D((2, 2), strides=(2, 2))(down0a)
    # 256

    down0 = Conv2D(32, (3, 3), padding='same', use_bias=use_bias)(down0a_pool)
    down0 = BatchNormalization()(down0)

    down0 = LeakyReLU(alpha=0.1)(down0)
    down0 = Conv2D(32, (3, 3), padding='same', use_bias=use_bias)(down0)
    down0 = BatchNormalization()(down0)
    down0 = LeakyReLU(alpha=0.1)(down0)
    down0_pool = MaxPooling2D((2, 2), strides=(2, 2))(down0)
    # 128

    down1 = Conv2D(64, (3, 3), padding='same', use_bias=use_bias)(down0_pool)
    down1 = BatchNormalization()(down1)
    down1 = LeakyReLU(alpha=0.1)(down1)
    down1 = Conv2D(64, (3, 3), padding='same', use_bias=use_bias)(down1)
    down1 = BatchNormalization()(down1)
    down1 = LeakyReLU(alpha=0.1)(down1)
    down1_pool = MaxPooling2D((2, 2), strides=(2, 2))(down1)
    # 64

    down2 = Conv2D(128, (3, 3), padding='same', use_bias=use_bias)(down1_pool)
    down2 = BatchNormalization()(down2)
    down2 = LeakyReLU(alpha=0.1)(down2)
    down2 = Conv2D(128, (3, 3), padding='same', use_bias=use_bias)(down2)
    down2 = BatchNormalization()(down2)
    down2 = LeakyReLU(alpha=0.1)(down2)
    down2_pool = MaxPooling2D((2, 2), strides=(2, 2))(down2)
    # 32

    down3 = Conv2D(256, (3, 3), padding='same', use_bias=use_bias)(down2_pool)
    down3 = BatchNormalization()(down3)
    down3 = LeakyReLU(alpha=0.1)(down3)
    down3 = Conv2D(256, (3, 3), padding='same', use_bias=use_bias)(down3)
    down3 = BatchNormalization()(down3)
    down3 = LeakyReLU(alpha=0.1)(down3)
    down3_pool = MaxPooling2D((2, 2), strides=(2, 2))(down3)
    # 16

    down4 = Conv2D(512, (3, 3), padding='same', use_bias=use_bias)(down3_pool)
    down4 = BatchNormalization()(down4)
    down4 = LeakyReLU(alpha=0.1)(down4)
    down4 = Conv2D(512, (3, 3), padding='same', use_bias=use_bias)(down4)
    down4 = BatchNormalization()(down4)
    down4 = LeakyReLU(alpha=0.1)(down4)
    down4_pool = MaxPooling2D((2, 2), strides=(2, 2))(down4)
    # 8

    center = Conv2D(1024, (3, 3), padding='same',
                    use_bias=use_bias)(down4_pool)
    center = BatchNormalization()(center)
    center = LeakyReLU(alpha=0.1)(center)
    center = Conv2D(1024, (3, 3), padding='same', use_bias=use_bias)(center)
    center = BatchNormalization()(center)
    center = LeakyReLU(alpha=0.1)(center)
    # center

    up4 = UpSampling2D((2, 2))(center)
    up4 = concatenate([down4, up4], axis=3)
    up4 = Conv2D(512, (3, 3), padding='same', use_bias=use_bias)(up4)
    up4 = BatchNormalization()(up4)
    up4 = LeakyReLU(alpha=0.1)(up4)
    up4 = Conv2D(512, (3, 3), padding='same', use_bias=use_bias)(up4)
    up4 = BatchNormalization()(up4)
    up4 = LeakyReLU(alpha=0.1)(up4)
    up4 = Conv2D(512, (3, 3), padding='same', use_bias=use_bias)(up4)
    up4 = BatchNormalization()(up4)
    up4 = LeakyReLU(alpha=0.1)(up4)
    # 16

    up3 = UpSampling2D((2, 2))(up4)
    up3 = concatenate([down3, up3], axis=3)
    up3 = Conv2D(256, (3, 3), padding='same', use_bias=use_bias)(up3)
    up3 = BatchNormalization()(up3)
    up3 = LeakyReLU(alpha=0.1)(up3)
    up3 = Conv2D(256, (3, 3), padding='same', use_bias=use_bias)(up3)
    up3 = BatchNormalization()(up3)
    up3 = LeakyReLU(alpha=0.1)(up3)
    up3 = Conv2D(256, (3, 3), padding='same', use_bias=use_bias)(up3)
    up3 = BatchNormalization()(up3)
    up3 = LeakyReLU(alpha=0.1)(up3)
    # 32

    up2 = UpSampling2D((2, 2))(up3)
    up2 = concatenate([down2, up2], axis=3)
    up2 = Conv2D(128, (3, 3), padding='same', use_bias=use_bias)(up2)
    up2 = BatchNormalization()(up2)
    up2 = LeakyReLU(alpha=0.1)(up2)
    up2 = Conv2D(128, (3, 3), padding='same', use_bias=use_bias)(up2)
    up2 = BatchNormalization()(up2)
    up2 = LeakyReLU(alpha=0.1)(up2)
    up2 = Conv2D(128, (3, 3), padding='same', use_bias=use_bias)(up2)
    up2 = BatchNormalization()(up2)
    up2 = LeakyReLU(alpha=0.1)(up2)
    # 64

    up1 = UpSampling2D((2, 2))(up2)
    up1 = concatenate([down1, up1], axis=3)
    up1 = Conv2D(64, (3, 3), padding='same', use_bias=use_bias)(up1)
    up1 = BatchNormalization()(up1)
    up1 = LeakyReLU(alpha=0.1)(up1)
    up1 = Conv2D(64, (3, 3), padding='same', use_bias=use_bias)(up1)
    up1 = BatchNormalization()(up1)
    up1 = LeakyReLU(alpha=0.1)(up1)
    up1 = Conv2D(64, (3, 3), padding='same', use_bias=use_bias)(up1)
    up1 = BatchNormalization()(up1)
    up1 = LeakyReLU(alpha=0.1)(up1)
    # 128

    up0 = UpSampling2D((2, 2))(up1)
    up0 = concatenate([down0, up0], axis=3)
    up0 = Conv2D(32, (3, 3), padding='same', use_bias=use_bias)(up0)
    up0 = BatchNormalization()(up0)
    up0 = LeakyReLU(alpha=0.1)(up0)
    up0 = Conv2D(32, (3, 3), padding='same', use_bias=use_bias)(up0)
    up0 = BatchNormalization()(up0)
    up0 = LeakyReLU(alpha=0.1)(up0)
    up0 = Conv2D(32, (3, 3), padding='same', use_bias=use_bias)(up0)
    up0 = BatchNormalization()(up0)
    up0 = LeakyReLU(alpha=0.1)(up0)
    # 256

    up0a = UpSampling2D((2, 2))(up0)
    up0a = concatenate([down0a, up0a], axis=3)
    up0a = Conv2D(16, (3, 3), padding='same', use_bias=use_bias)(up0a)
    up0a = BatchNormalization()(up0a)
    up0a = LeakyReLU(alpha=0.1)(up0a)
    up0a = Conv2D(16, (3, 3), padding='same', use_bias=use_bias)(up0a)
    up0a = BatchNormalization()(up0a)
    up0a = LeakyReLU(alpha=0.1)(up0a)
    up0a = Conv2D(16, (3, 3), padding='same', use_bias=use_bias)(up0a)
    up0a = BatchNormalization()(up0a)
    up0a = LeakyReLU(alpha=0.1)(up0a)
    # 512

    classify = Conv2D(num_classes, (1, 1), activation='sigmoid')(up0a)

    model = Model(inputs=inputs, outputs=classify)

    return model
示例#27
0
文件: gat.py 项目: poows/GNNGallery
    def call(self, inputs):
        '''
        inputs: (x, adj), x is node attribute matrix with shape [N, F], 
        adj is adjacency matrix with shape [N, N].

        Note:
        N: number of nodes
        F: input dim
        F': output dim
        '''
        x, adj = inputs

        outputs = []
        for head in range(self.attn_heads):
            kernel = self.kernels[head]  # W in the paper
            attn_kernel_self, attn_kernel_neighs = self.attn_kernels[
                head]  # Attention kernel a in the paper (2F' x 1)

            # Compute inputs to attention network
            h = x @ kernel  # [N, F']

            # Compute attentions for self and neighbors
            attn_for_self = h @ attn_kernel_self  # [N, 1]
            attn_for_neighs = h @ attn_kernel_neighs  # [N, 1]

            # combine the attention with adjacency matrix via broadcast
            attn_for_self = adj * attn_for_self
            attn_for_neighs = adj * tf.transpose(attn_for_neighs)

            # Attention head a(Wh_i, Wh_j) = a^T [[Wh_i], [Wh_j]]
            attentions = tf.sparse.add(attn_for_self, attn_for_neighs)

            # Add nonlinearty by LeakyReLU
            attentions = tf.sparse.SparseTensor(
                indices=attentions.indices,
                values=LeakyReLU(alpha=0.2)(attentions.values),
                dense_shape=attentions.dense_shape)
            # Apply softmax to get attention coefficients
            attentions = tf.sparse.softmax(attentions)  # (N x N)

            # Apply dropout to attributes and attention coefficients
            if self.dropout:
                attentions = tf.sparse.SparseTensor(
                    indices=attentions.indices,
                    values=Dropout(rate=self.dropout)(attentions.values),
                    dense_shape=attentions.dense_shape)  # (N x N)
                h = Dropout(self.dropout)(h)  # (N x F')

            # Linear combination with neighbors' attributes
            h = tf.sparse.sparse_dense_matmul(attentions, h)  # (N x F')

            if self.use_bias:
                h += self.biases[head]

            # Add output of attention head to final output
            outputs.append(h)

        # Aggregate the heads' output according to the reduction method
        if self.reduction == 'concat':
            output = tf.concat(outputs, axis=1)  # (N x KF')
        else:
            output = tf.reduce_mean(tf.stack(outputs), axis=0)  # (N x F')

        return self.activation(output)
示例#28
0
		def d_layer(pre_lyr, filters, kernel_size=(3,3), name=''):
			x = Conv2D(filters, kernel_size=kernel_size, strides=(2,2), padding='same', name= 'disc_conv_'+ name)(pre_lyr)
			x = BatchNormalization(name= 'disc_norm_'+ name)(x)
			x = LeakyReLU(alpha=relu_alpha, name= 'disc_relu_'+ name)(x)
			x = Dropout(dropout_rate, name='disc_dropout_' + name)(x)
			return x
示例#29
0
num_rows = 28
num_cols = 28
num_channels = 1

latent_dim = 100
NUM_EPOCHS = 10000
BATCH_SIZE = 32
##################################################################################################
# GENERATOR
g = Sequential()

g.add(
    Dense(
        units=7 * 7 * 256,  # 7x7 64-channel fmap
        input_shape=(latent_dim, ),
        activation=LeakyReLU(0.3)))

g.add(Reshape(target_shape=(7, 7, 256)))

g.add(
    Conv2DTranspose(filters=128,
                    kernel_size=[5, 5],
                    strides=2,
                    padding="same",
                    activation=LeakyReLU(0.3)))

g.add(BatchNormalization())

g.add(
    Conv2DTranspose(filters=64,
                    kernel_size=[5, 5],
示例#30
0
def unet3D(
    x_in,
    img_shape,
    out_im_chans,
    nf_enc=[64, 64, 128, 128, 256, 256, 512],
    nf_dec=None,
    layer_prefix='unet',
    n_convs_per_stage=1,
):
    ks = 3
    x = x_in

    encodings = []
    encoding_vol_sizes = []
    for i in range(len(nf_enc)):
        for j in range(n_convs_per_stage):
            x = Conv3D(nf_enc[i],
                       kernel_size=ks,
                       strides=(1, 1, 1),
                       padding='same',
                       name='{}_enc_conv3D_{}_{}'.format(
                           layer_prefix, i, j + 1))(x)
            x = LeakyReLU(0.2)(x)

        encodings.append(x)
        encoding_vol_sizes.append(np.asarray(x.get_shape().as_list()[1:-1]))

        if i < len(nf_enc) - 1:
            x = MaxPooling3D(pool_size=(2, 2, 2),
                             padding='same',
                             name='{}_enc_maxpool_{}'.format(layer_prefix,
                                                             i))(x)

    if nf_dec is None:
        nf_dec = list(reversed(nf_enc[1:]))

    for i in range(len(nf_dec)):
        curr_shape = x.get_shape().as_list()[1:-1]

        # only do upsample if we are not yet at max resolution
        if np.any(curr_shape < list(img_shape[:len(curr_shape)])):
            us = (2, 2, 2)
            x = UpSampling3D(size=us,
                             name='{}_dec_upsamp_{}'.format(layer_prefix,
                                                            i))(x)

        # just concatenate the final layer here
        if i <= len(encodings) - 2:
            x = _pad_or_crop_to_shape_3D(
                x, np.asarray(x.get_shape().as_list()[1:-1]),
                encoding_vol_sizes[-i - 2])
            x = Concatenate(axis=-1)([x, encodings[-i - 2]])

        for j in range(n_convs_per_stage):
            x = Conv3D(nf_dec[i],
                       kernel_size=ks,
                       strides=(1, 1, 1),
                       padding='same',
                       name='{}_dec_conv3D_{}_{}'.format(layer_prefix, i,
                                                         j))(x)
            x = LeakyReLU(0.2)(x)

    y = Conv3D(out_im_chans,
               kernel_size=1,
               padding='same',
               name='{}_dec_conv3D_final'.format(layer_prefix))(
                   x)  # add your own activation after this model

    # add your own activation after this model
    return y