def generator(self):
        if self.G:
            return self.G
        self.G = Sequential()
        dropout = 0.4
        depth = 8
        dim = self.patch[0]
        # In: 100
        # Out: dim x dim x depth
        self.G.add(
            Conv2D(depth,
                   kernel_size=self.kernel_size,
                   strides=2,
                   input_shape=(1024, 1024, 2),
                   data_format='channels_last',
                   padding='same'))
        self.G.add(LeakyReLU(alpha=0.2))
        self.G.add(
            Conv2D(depth * 2,
                   kernel_size=self.kernel_size,
                   strides=2,
                   padding='same'))
        self.G.add(LeakyReLU(alpha=0.2))
        self.G.add(BatchNormalization(momentum=0.8))
        self.G.add(
            Conv2D(depth * 4,
                   kernel_size=self.kernel_size,
                   strides=2,
                   padding='same'))
        self.G.add(LeakyReLU(alpha=0.2))
        self.G.add(BatchNormalization(momentum=0.8))
        self.G.add(
            Conv2D(depth * 8,
                   kernel_size=self.kernel_size,
                   strides=1,
                   padding='same'))
        self.G.add(LeakyReLU(alpha=0.2))
        self.G.add(BatchNormalization(momentum=0.8))
        # self.G.add(Flatten())
        # self.G.add(Dense(dim*dim*depth, input_dim=dim*dim))
        # self.G.add(BatchNormalization(momentum=0.9))
        # self.G.add(Activation('relu'))
        # self.G.add(Reshape((dim, dim, depth)))
        self.G.add(Dropout(dropout))

        # In: dim * dim * depth
        # Out: img_row x img_col x 1
        self.G.add(UpSampling2D())
        self.G.add(Conv2DTranspose(depth * 8, self.kernel_size,
                                   padding='same'))
        self.G.add(BatchNormalization(momentum=0.8))
        self.G.add(Activation('relu'))

        self.G.add(UpSampling2D())
        self.G.add(Conv2DTranspose(depth * 4, self.kernel_size,
                                   padding='same'))
        self.G.add(BatchNormalization(momentum=0.8))
        self.G.add(Activation('relu'))

        self.G.add(UpSampling2D())
        self.G.add(Conv2DTranspose(depth * 2, self.kernel_size,
                                   padding='same'))
        self.G.add(BatchNormalization(momentum=0.8))
        self.G.add(Activation('relu'))

        # self.G.add(UpSampling2D())
        self.G.add(Conv2DTranspose(depth, self.kernel_size, padding='same'))
        self.G.add(BatchNormalization(momentum=0.8))
        self.G.add(Activation('relu'))

        # Out: 200 x 200 x 1 grayscale image [0.0,1.0] per pix
        self.G.add(Conv2DTranspose(1, self.kernel_size, padding='same'))
        self.G.add(Activation('tanh'))
        self.G.summary()
        return self.G
    def leaky_dropout(self, alpha1, alpha2, rate):
        mashup = Input(shape=(None, None, self.channels), name='input')
        dropout = Dropout(rate)(mashup)

        conv_a = Conv2D(64, 3, padding='same')(dropout)
        conv_a = LeakyReLU(alpha=alpha1)(conv_a)
        conv_a = Dropout(rate)(conv_a)

        conv = Conv2D(64, 4, strides=2, padding='same', use_bias=False)(conv_a)
        conv = LeakyReLU(alpha=alpha1)(conv)
        conv = Dropout(rate)(conv)

        conv = BatchNormalization()(conv)

        conv_b = Conv2D(64, 3, padding='same')(conv)
        conv_b = LeakyReLU(alpha=alpha1)(conv_b)
        conv_b = Dropout(rate)(conv_b)

        conv = Conv2D(64, 4, strides=2, padding='same', use_bias=False)(conv_b)
        conv = LeakyReLU(alpha=alpha1)(conv)
        conv = Dropout(rate)(conv)

        conv = BatchNormalization()(conv)

        conv = Conv2D(128, 3, padding='same')(conv)
        conv = LeakyReLU(alpha=alpha1)(conv)
        conv = Dropout(rate)(conv)

        conv = Conv2D(128, 3, padding='same', use_bias=False)(conv)
        conv = LeakyReLU(alpha=alpha1)(conv)
        conv = Dropout(rate)(conv)

        conv = BatchNormalization()(conv)
        conv = UpSampling2D((2, 2))(conv)

        conv = Concatenate()([conv, conv_b])

        conv = Conv2D(64, 3, padding='same')(conv)
        conv = LeakyReLU(alpha=alpha1)(conv)
        conv = Dropout(rate)(conv)

        conv = Conv2D(64, 3, padding='same', use_bias=False)(conv)
        conv = LeakyReLU(alpha=alpha1)(conv)
        conv = Dropout(rate)(conv)

        conv = BatchNormalization()(conv)
        conv = UpSampling2D((2, 2))(conv)

        conv = Concatenate()([conv, conv_a])

        conv = Conv2D(64, 3, padding='same')(conv)
        conv = LeakyReLU(alpha=alpha2)(conv)
        conv = Dropout(rate)(conv)

        conv = Conv2D(64, 3, padding='same')(conv)
        conv = LeakyReLU(alpha=alpha2)(conv)
        conv = Dropout(rate)(conv)

        conv = Conv2D(32, 3, padding='same')(conv)
        conv = LeakyReLU(alpha=alpha2)(conv)
        conv = Dropout(rate)(conv)

        conv = Conv2D(self.channels, 3, padding='same')(conv)
        if not self.config.learn_phase:
            conv = LeakyReLU(alpha=alpha2)(conv)

        vocal = conv
        return Model(inputs=mashup, outputs=vocal)
Example #3
0
# are lumped into one
# depending which one we are working on, different
# layers are trainable

latent_dim = 100
full_input_dim = latent_dim
h_dim = 1024
img_shape = (28, 28)
img_len = 28 * 28

g_h1 = Dense(h_dim // 4,
             input_dim=full_input_dim,
             name="gen_h1",
             kernel_initializer=initializer,
             kernel_regularizer=regularizer)
g_h1a = LeakyReLU(0.2)

g_h2 = Dense(h_dim // 2,
             name="gen_h2",
             kernel_initializer=initializer,
             kernel_regularizer=regularizer)
g_h2a = LeakyReLU(0.2)

g_h3 = Dense(h_dim,
             name="gen_h3",
             kernel_initializer=initializer,
             kernel_regularizer=regularizer)
g_h3a = LeakyReLU(0.2)

g_y = Dense(img_len,
            activation="sigmoid",
Example #4
0
                 0.01)))  #activity_regularizer=regularizers.l2(0.01)
    model.add(BatchNormalization())
    model.add(Dropout(0.5))

    model.add(
        LSTM(40,
             activation='tanh',
             recurrent_activation='sigmoid',
             activity_regularizer=regularizers.l2(
                 0.01)))  # activity_regularizer=regularizers.l2(0.01)
    model.add(BatchNormalization())
    model.add(Dropout(0.5))

    model.add(Dense(10))
    model.add(BatchNormalization())
    model.add(LeakyReLU())
    model.add(Dropout(0.5))

    model.add(Dense(1))
    opt = Nadam(lr=0.005)  # 0.005, 0.002, 0.001, 0.01
    model.compile(loss='mse', optimizer=opt)

    model.load_weights(MODEL_FILE_PATH)
    predictions = model.predict(test_X)
    score = model.evaluate(test_X, test_Y, verbose=0)
    print('Test loss:', score)  # this is mean_squared_error

    ###########################################################

    df_result = pd.DataFrame(test_Y, columns=['real'])
    df_result['pred'] = pd.Series(predictions.reshape(-1))
    def call(self, inputs, **kwargs):
        """
        Creates the layer as a Keras graph

        Notes:
            This does not add self loops to the adjacency matrix.
            The output indices are only used when `final_layer=True`

        Args:
            inputs (list): list of inputs with 4 items:
            node features (size b x N x F),
            output indices (size b x M),
            sparse graph adjacency matrix (size N x N),
            where N is the number of nodes in the graph,
                  F is the dimensionality of node features
                  M is the number of output nodes
        """
        X = inputs[0]  # Node features (1 x N x F)
        out_indices = inputs[1]  # output indices (1 x K)
        A_sparse = inputs[2]  # Adjacency matrix (1 x N x N)

        if not isinstance(A_sparse, K.tf.SparseTensor):
            raise TypeError("A is not sparse")

        # Get undirected graph edges (E x 2)
        A_indices = A_sparse.indices

        batch_dim, n_nodes, _ = K.int_shape(X)
        if batch_dim != 1:
            raise ValueError(
                "Currently full-batch methods only support a batch dimension of one"
            )
        else:
            # Remove singleton batch dimension
            out_indices = K.squeeze(out_indices, 0)
            X = K.squeeze(X, 0)

        outputs = []
        for head in range(self.attn_heads):
            kernel = self.kernels[head]  # W in the paper (F x F')
            attention_kernel = self.attn_kernels[
                head]  # Attention kernel a in the paper (2F' x 1)

            # Compute inputs to attention network
            features = K.dot(X, kernel)  # (N x F')

            # Compute feature combinations
            # Note: [[a_1], [a_2]]^T [[Wh_i], [Wh_j]] = [a_1]^T [Wh_i] + [a_2]^T [Wh_j]
            attn_for_self = K.dot(
                features, attention_kernel[0])  # (N x 1), [a_1]^T [Wh_i]
            attn_for_neighs = K.dot(
                features, attention_kernel[1])  # (N x 1), [a_2]^T [Wh_j]

            # Attention head a(Wh_i, Wh_j) = a^T [[Wh_i], [Wh_j]]
            dense = attn_for_self + K.transpose(
                attn_for_neighs)  # (N x N) via broadcasting

            # Create sparse attention vector (All non-zero values of the matrix)
            sparse_attn_self = K.tf.gather(K.reshape(attn_for_self, [-1]),
                                           A_indices[:, 0],
                                           axis=0)
            sparse_attn_neighs = K.tf.gather(K.reshape(attn_for_neighs, [-1]),
                                             A_indices[:, 1],
                                             axis=0)
            attn_values = sparse_attn_self + sparse_attn_neighs

            # Add nonlinearity
            attn_values = LeakyReLU(alpha=0.2)(attn_values)

            # Apply dropout to features and attention coefficients
            dropout_feat = Dropout(self.in_dropout_rate)(features)  # (N x F')
            dropout_attn = Dropout(self.attn_dropout_rate)(
                attn_values)  # (N x N)

            # Convert to sparse matrix
            sparse_attn = K.tf.sparse.SparseTensor(
                A_indices, values=dropout_attn, dense_shape=[n_nodes, n_nodes])

            # Apply softmax to get attention coefficients
            sparse_attn = K.tf.sparse.softmax(
                sparse_attn)  # (N x N), Eq. 3 of the paper

            # Linear combination with neighbors' features [YT: see Eq. 4]
            node_features = K.tf.sparse.matmul(sparse_attn,
                                               dropout_feat)  # (N x F')

            if self.use_bias:
                node_features = K.bias_add(node_features, self.biases[head])

            # Add output of attention head to final output
            outputs.append(node_features)

        # Aggregate the heads' output according to the reduction method
        if self.attn_heads_reduction == "concat":
            output = K.concatenate(outputs)  # (N x KF')
        else:
            output = K.mean(K.stack(outputs), axis=0)  # N x F')

        output = self.activation(output)

        # On the final layer we gather the nodes referenced by the indices
        if self.final_layer:
            output = K.gather(output, out_indices)

        # Add batch dimension back if we removed it
        if batch_dim == 1:
            output = K.expand_dims(output, 0)
        return output
Example #6
0
'''
model = Sequential([
    Dense(32, activation='relu', input_shape=(7,)), #CHANGE THIS VARIABLE IF YOU DROP THINGS
    Dense(32, activation='relu'),
    Dense(32, activation='relu'),
    Dense(32, activation='relu'),
    Dense(32, activation='relu'),
    Dense(32, activation='relu'),
    Dense(32, activation='relu'),
    Dense(1, activation='sigmoid')
])
'''

model = Sequential()
model.add(Dense(32, activation='relu', input_shape=(7, )))
model.add(LeakyReLU(alpha=0.05))
model.add(LeakyReLU(alpha=0.05))
model.add(LeakyReLU(alpha=0.05))
model.add(LeakyReLU(alpha=0.05))
model.add(Dense(1, activation='sigmoid'))

# Training network ------------------------------------------------------------
print("Training network...")

model.compile(optimizer='sgd',
              loss='binary_crossentropy',
              metrics=['accuracy'])

train = model.fit(X_train,
                  Y_train,
                  batch_size=32,
Example #7
0
    def decoder_b(self):
        """ Decoder for side B """
        kwargs = dict(kernel_size=5,
                      kernel_initializer=self.kernel_initializer)
        dense_dim = 384 if self.low_mem else self.config["complexity_decoder_b"]
        decoder_complexity = 384 if self.low_mem else 512
        decoder_shape = self.input_shape[0] // 16
        input_ = Input(shape=(decoder_shape, decoder_shape, dense_dim))

        var_x = input_
        if self.low_mem:
            var_x = UpscaleBlock(decoder_complexity,
                                 activation="leakyrelu",
                                 **kwargs)(var_x)
            var_x = UpscaleBlock(decoder_complexity // 2,
                                 activation="leakyrelu",
                                 **kwargs)(var_x)
            var_x = UpscaleBlock(decoder_complexity // 4,
                                 activation="leakyrelu",
                                 **kwargs)(var_x)
            var_x = UpscaleBlock(decoder_complexity // 8,
                                 activation="leakyrelu",
                                 **kwargs)(var_x)
        else:
            var_x = UpscaleBlock(decoder_complexity, activation=None,
                                 **kwargs)(var_x)
            var_x = LeakyReLU(alpha=0.2)(var_x)
            var_x = ResidualBlock(
                decoder_complexity,
                kernel_initializer=self.kernel_initializer)(var_x)
            var_x = UpscaleBlock(decoder_complexity, activation=None,
                                 **kwargs)(var_x)
            var_x = LeakyReLU(alpha=0.2)(var_x)
            var_x = ResidualBlock(
                decoder_complexity,
                kernel_initializer=self.kernel_initializer)(var_x)
            var_x = UpscaleBlock(decoder_complexity // 2,
                                 activation=None,
                                 **kwargs)(var_x)
            var_x = LeakyReLU(alpha=0.2)(var_x)
            var_x = ResidualBlock(
                decoder_complexity // 2,
                kernel_initializer=self.kernel_initializer)(var_x)
            var_x = UpscaleBlock(decoder_complexity // 4,
                                 activation="leakyrelu",
                                 **kwargs)(var_x)
        var_x = Conv2DOutput(3, 5, name="face_out_b")(var_x)
        outputs = [var_x]

        if self.config.get("learn_mask", False):
            var_y = input_
            var_y = UpscaleBlock(decoder_complexity,
                                 activation="leakyrelu")(var_y)
            if not self.low_mem:
                var_y = UpscaleBlock(decoder_complexity,
                                     activation="leakyrelu")(var_y)
            var_y = UpscaleBlock(decoder_complexity // 2,
                                 activation="leakyrelu")(var_y)
            var_y = UpscaleBlock(decoder_complexity // 4,
                                 activation="leakyrelu")(var_y)
            if self.low_mem:
                var_y = UpscaleBlock(decoder_complexity // 8,
                                     activation="leakyrelu")(var_y)
            var_y = Conv2DOutput(1, 5, name="mask_out_b")(var_y)
            outputs.append(var_y)
        return KerasModel(input_, outputs=outputs, name="decoder_b")
Example #8
0
 def build_discriminator(self):
     # Source image input
     source_img_in = Input(shape = self.image_shape)
     # Target image input
     target_img_in = Input(shape = self.image_shape)
     # concatenate images
     merged = Concatenate()([source_img_in, target_img_in])
     # C64
     model = Conv2D(64, (4,4), strides=(2,2), padding = "same", kernel_initializer = RandomNormal(stddev=0.02))(merged)
     model = LeakyReLU(alpha=0.2)(model)
     # C128
     model = Conv2D(128, (4,4), strides=(2,2), padding = "same", kernel_initializer = RandomNormal(stddev=0.02))(model)
     model = BatchNormalization()(model)
     model = LeakyReLU(alpha=0.2)(model)
     # C256
     model = Conv2D(256, (4,4), strides=(2,2), padding = "same", kernel_initializer = RandomNormal(stddev=0.02))(model)
     model = BatchNormalization()(model)
     model = LeakyReLU(alpha=0.2)(model)
     # C512
     model = Conv2D(512, (4,4), strides=(2,2), padding = "same", kernel_initializer = RandomNormal(stddev=0.02))(model)
     model = BatchNormalization()(model)
     model = LeakyReLU(alpha=0.2)(model)
     # Last layer
     model = Conv2D(256, (4,4), padding = "same", kernel_initializer = RandomNormal(stddev=0.02))(model)
     model = BatchNormalization()(model)
     model = LeakyReLU(alpha=0.2)(model)
     # Patch output
     model = Conv2D(1, (4,4), padding = "same", kernel_initializer = RandomNormal(stddev=0.02))(model)
     patch_out = Activation("sigmoid")(model)
     # Define model
     model = Model([source_img_in, target_img_in], patch_out)
     # Compile model
     opt = Adam(lr=0.0002, beta_2=0.5)
     model.compile(loss = "binary_crossentropy", optimizer=opt, loss_weights=[0.5])   
     
     return model
Example #9
0
    def get_segmentor_discriminator_adveriasal(self):
        dropout = 0.75
        channel_depth = 32

        s_inputs = Input((self.img_rows, self.img_cols, 1), name='raw_image_input')
        conv1 = Conv2D(32, (3,3), padding='same')(s_inputs)
        conv1 = BatchNormalization(momentum=0.9)(conv1)
        conv1 = LeakyReLU(alpha=0.2)(conv1)
        conv1 = Dropout(dropout)(conv1)
        pool1 = MaxPooling2D(pool_size=(2,2))(conv1)

        conv2 = Conv2D(64, (3, 3), padding='same')(pool1)
        conv2 = BatchNormalization(momentum=0.9)(conv2)
        conv2 = LeakyReLU(alpha=0.2)(conv2)
        conv2 = Dropout(dropout)(conv2)
        pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)

        conv3 = Conv2D(128, (3, 3), padding='same')(pool2)
        conv3 = BatchNormalization(momentum=0.9)(conv3)
        conv3 = LeakyReLU(alpha=0.2)(conv3)
        conv3 = Dropout(dropout)(conv3)
        pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)

        conv4 = Conv2D(256, (3, 3), padding='same')(pool3)
        conv4 = BatchNormalization(momentum=0.9)(conv4)
        conv4 = LeakyReLU(alpha=0.2)(conv4)
        conv4 = Dropout(dropout)(conv4)
        pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)

        conv5 = Conv2D(512, (3,3), padding='same')(pool4)
        conv5 = LeakyReLU(alpha=0.2)(conv5)

        up6 = Conv2DTranspose(256, (2,2), strides=(2,2), padding='same')(conv5)
        up6 = concatenate([up6, conv4], axis=3)
        conv6 = Conv2D(256, (3,3), padding='same')(up6)
        conv6 = BatchNormalization(momentum=0.9)(conv6)
        conv6 = LeakyReLU(alpha=0.2)(conv6)
        conv6 = Dropout(dropout)(conv6)

        up7 = Conv2DTranspose(128, (2,2), strides=(2,2), padding='same')(conv6)
        up7 = concatenate([up7, conv3], axis=3)
        conv7 = Conv2D(128, (3,3), padding='same')(up7)
        conv7 = BatchNormalization(momentum=0.9)(conv7)
        conv7 = LeakyReLU(alpha=0.2)(conv7)
        conv7 = Dropout(dropout)(conv7)

        up8 = Conv2DTranspose(64, (2,2), strides=(2,2), padding='same')(conv7)
        up8 = concatenate([up8, conv2], axis=3)
        conv8 = Conv2D(64, (3,3), padding='same')(up8)
        conv8 = BatchNormalization(momentum=0.9)(conv8)
        conv8 = LeakyReLU(alpha=0.2)(conv8)
        conv8 = Dropout(dropout)(conv8)

        up9 = Conv2DTranspose(32, (2,2), strides=(2,2), padding='same')(conv8)
        up9 = concatenate([up9, conv1], axis=3)
        conv9 = Conv2D(32, (3,3), padding='same')(up9)
        conv9 = BatchNormalization(momentum=0.9)(conv9)
        conv9 = LeakyReLU(alpha=0.2)(conv9)
        conv9 = Dropout(dropout)(conv9)

        conv10 = Conv2D(1, (1,1), name='segmentor_output')(conv9)

        d_conv1 = Conv2D(channel_depth * 1, 5, strides=2, padding='same')
        d_conv1_relu = LeakyReLU(alpha=0.2)
        d_conv1_dropout = Dropout(dropout)

        d_conv2 = Conv2D(channel_depth * 2, 5, strides=2, padding='same')
        d_conv2_relu = LeakyReLU(alpha=0.2)
        d_conv2_droput = Dropout(dropout)

        d_conv3 = Conv2D(channel_depth * 4, 5, strides=2, padding='same')
        d_conv3_relu = LeakyReLU(alpha=0.2)
        d_conv3_dropout = Dropout(dropout)

        d_conv4 = Conv2D(channel_depth * 8, 5, strides=1, padding='same')
        d_conv4_relu = LeakyReLU(alpha=0.2)
        d_conv4_dropout = Dropout(dropout)

        d_flatten = Flatten()
        d_logit = Dense(1)
        d_pred = Activation('sigmoid', name='discriminator_output')

        discriminator_input = Input(shape=(self.img_rows, self.img_cols, 2))
        discriminator_conv1 = d_conv1(discriminator_input)
        discriminator_conv1 = d_conv1_relu(discriminator_conv1)
        discriminator_conv1 = d_conv1_dropout(discriminator_conv1)

        discriminator_conv2 = d_conv2(discriminator_conv1)
        discriminator_conv2 = d_conv2_relu(discriminator_conv2)
        discriminator_conv2 = d_conv2_droput(discriminator_conv2)

        discriminator_conv3 = d_conv3(discriminator_conv2)
        discriminator_conv3 = d_conv3_relu(discriminator_conv3)
        discriminator_conv3 = d_conv3_dropout(discriminator_conv3)

        discriminator_conv4 = d_conv4(discriminator_conv3)
        discriminator_conv4 = d_conv4_relu(discriminator_conv4)
        discriminator_conv4 = d_conv4_dropout(discriminator_conv4)

        discriminator_flatten = d_flatten(discriminator_conv4)
        discriminator_logit = d_logit(discriminator_flatten)
        discriminator_pred = d_pred(discriminator_logit)



        adveriasal_d_input = concatenate([s_inputs, conv10], axis=3)
        adveriasal_d_conv1 = d_conv1(adveriasal_d_input)
        adveriasal_d_conv1 = d_conv1_relu(adveriasal_d_conv1)
        adveriasal_d_conv1 = d_conv1_dropout(adveriasal_d_conv1)

        adveriasal_d_conv2 = d_conv2(adveriasal_d_conv1)
        adveriasal_d_conv2 = d_conv2_relu(adveriasal_d_conv2)
        adveriasal_d_conv2 = d_conv2_droput(adveriasal_d_conv2)

        adveriasal_d_conv3 = d_conv3(adveriasal_d_conv2)
        adveriasal_d_conv3 = d_conv3_relu(adveriasal_d_conv3)
        adveriasal_d_conv3 = d_conv3_dropout(adveriasal_d_conv3)

        adveriasal_d_conv4 = d_conv4(adveriasal_d_conv3)
        adveriasal_d_conv4 = d_conv4_relu(adveriasal_d_conv4)
        adveriasal_d_conv4 = d_conv4_dropout(adveriasal_d_conv4)

        adveriasal_d_flatten = d_flatten(adveriasal_d_conv4)
        adveriasal_d_logit = d_logit(adveriasal_d_flatten)
        adveriasal_d_pred = d_pred(adveriasal_d_logit)

        segmentor = Model(inputs=[s_inputs], outputs=[conv10])
        discriminator = Model(inputs=[discriminator_input], outputs=[discriminator_pred])
        adveriasal_model = Model(inputs=[s_inputs], outputs=[adveriasal_d_pred])

        discriminator_optimizer = RMSprop(lr=0.0002, decay=6e-8)
        discriminator.compile(loss='binary_crossentropy', optimizer=discriminator_optimizer, metrics=['accuracy'])

        adveriasal_optimizer = RMSprop(lr=0.0002, decay=6e-8)
        adveriasal_model.compile(loss='binary_crossentropy', optimizer=adveriasal_optimizer, metrics=['accuracy'])

        return segmentor, discriminator, adveriasal_model
test_speech_component = np.reshape(test_speech_component, (test_speech_component.shape[0],test_speech_component.shape[1],1))
print('     test_speech_component data shape: %s' % str(test_speech_component.shape))

test_noisy_speech = np.reshape(test_noisy_speech, (test_noisy_speech.shape[0],test_noisy_speech.shape[1],1))
print('     test_noisy_speech data shape: %s' % str(test_noisy_speech.shape))
#####################################################################################
# 3 define model
#####################################################################################

input_noisy = Input(shape=(INPUT_SHAPE))
input_noise_component= Input(shape=(INPUT_SHAPE2))
input_speech_component = Input(shape=(INPUT_SHAPE2))

c1 = Conv1D(n1, N_cnn, padding='same')(input_noisy)
c1 = LeakyReLU(0.2)(c1)
c1 = Conv1D(n1, N_cnn, padding='same')(c1)
c1 = LeakyReLU(0.2)(c1)
x = MaxPooling1D(2)(c1)

c2 = Conv1D(n2, N_cnn, padding='same')(x)
c2 = LeakyReLU(0.2)(c2)
c2 = Conv1D(n2, N_cnn, padding='same')(c2)
c2 = LeakyReLU(0.2)(c2)
x = MaxPooling1D(2)(c2)

c3 = Conv1D(n3, N_cnn, padding='same')(x)
c3 = LeakyReLU(0.2)(c3)
x = UpSampling1D(2)(c3)

c2_2 = Conv1D(n2, N_cnn, padding='same')(x)
def final_model(input_shape) :
    
    X_input = Input(input_shape,name='input')
    
    X = Conv2D(32,(3,3),strides=(1,1),padding='same',name='conv2d_1')(X_input)
    X = BatchNormalization(name='bn_1')(X)
    X = LeakyReLU(name='leaky_relu_1')(X)
    X = MaxPooling2D(pool_size=(2,2),strides=(2,2),padding='same',name='max_pooling_1')(X)
    
    X = Conv2D(64,(3,3),strides=(1,1),padding='same',name='conv2d_2')(X)
    X = BatchNormalization(name='bn_2')(X)
    X = LeakyReLU(name='leaky_relu_2')(X)
    X = MaxPooling2D(pool_size=(2,2),strides=(2,2),padding='same',name='max_pooling_2')(X)
    
    X = Conv2D(128,(3,3),strides=(1,1),padding='same',name='conv2d_3')(X)
    X = BatchNormalization(name='bn_3')(X)
    X = LeakyReLU(name='leaky_relu_3')(X)
    
    X = Conv2D(64,(1,1),strides=(1,1),padding='same',name='conv2d_4')(X)
    X = BatchNormalization(name='bn_4')(X)
    X = LeakyReLU(name='leaky_relu_4')(X)
    
    X = Conv2D(128,(3,3),strides=(1,1),padding='same',name='conv2d_5')(X)
    X = BatchNormalization(name='bn_5')(X)
    X = LeakyReLU(name='leaky_relu_5')(X)
    X = MaxPooling2D(pool_size=(2,2),strides=(2,2),padding='same',name='max_pooling_3')(X)
    
    X = Conv2D(256,(3,3),strides=(1,1),padding='same',name='conv2d_6')(X)
    X = BatchNormalization(name='bn_6')(X)
    X = LeakyReLU(name='leaky_relu_6')(X)
    
    X = Conv2D(128,(1,1),strides=(1,1),padding='same',name='conv2d_7')(X)
    X = BatchNormalization(name='bn_7')(X)
    X = LeakyReLU(name='leaky_relu_7')(X)
    
    X = Conv2D(256,(3,3),strides=(1,1),padding='same',name='conv2d_8')(X)
    X = BatchNormalization(name='bn_8')(X)
    X = LeakyReLU(name='leaky_relu_8')(X)
    X = MaxPooling2D(pool_size=(2,2),strides=(2,2),padding='same',name='max_pooling_4')(X)
    
    X = Conv2D(512,(3,3),strides=(1,1),padding='same',name='conv2d_9')(X)
    X = BatchNormalization(name='bn_9')(X)
    X = LeakyReLU(name='leaky_relu_9')(X)
    
    X = Conv2D(256,(1,1),strides=(1,1),padding='same',name='conv2d_10')(X)
    X = BatchNormalization(name='bn_10')(X)
    X = LeakyReLU(name='leaky_relu_10')(X)
    
    X = Conv2D(512,(3,3),strides=(1,1),padding='same',name='conv2d_11')(X)
    X = BatchNormalization(name='bn_11')(X)
    X = LeakyReLU(name='leaky_relu_11')(X)
    
    X = Conv2D(256,(1,1),strides=(1,1),padding='same',name='conv2d_12')(X)
    X = BatchNormalization(name='bn_12')(X)
    X = LeakyReLU(name='leaky_relu_12')(X)
    
    X = Conv2D(512,(3,3),strides=(1,1),padding='same',name='conv2d_13')(X)
    X = BatchNormalization(name='bn_13')(X)
    X = LeakyReLU(name='leaky_relu_13')(X)
    X = MaxPooling2D(pool_size=(2,2),strides=(2,2),padding='same',name='max_pooling_5')(X)
    
    X = Conv2D(1024,(3,3),strides=(1,1),padding='same',name='conv2d_14')(X)
    X = BatchNormalization(name='bn_14')(X)
    X = LeakyReLU(name='leaky_relu_14')(X)
    
    X = Conv2D(512,(1,1),strides=(1,1),padding='same',name='conv2d_15')(X)
    X = BatchNormalization(name='bn_15')(X)
    X = LeakyReLU(name='leaky_relu_15')(X)
    
    X = Conv2D(1024,(3,3),strides=(1,1),padding='same',name='conv2d_16')(X)
    X = BatchNormalization(name='bn_16')(X)
    X = LeakyReLU(name='leaky_relu_16')(X)
    
    X = Conv2D(512,(1,1),strides=(1,1),padding='same',name='conv2d_17')(X)
    X = BatchNormalization(name='bn_17')(X)
    X = LeakyReLU(name='leaky_relu_17')(X)
    
    X = Conv2D(1024,(3,3),strides=(1,1),padding='same',name='conv2d_18')(X)
    X = BatchNormalization(name='bn_18')(X)
    X = LeakyReLU(name='leaky_relu_18')(X)
    
    X = Conv2D(1024,(3,3),strides=(1,1),padding='same',name='conv2d_19')(X)
    X = BatchNormalization(name='bn_19')(X)
    X = LeakyReLU(name='leaky_relu_19')(X)
    
    X = Conv2D(1024,(3,3),strides=(1,1),padding='same',name='conv2d_20')(X)
    X = BatchNormalization(name='bn_20')(X)
    X = LeakyReLU(name='leaky_relu_20')(X)
    
    X = Conv2D(1024,(3,3),strides=(1,1),padding='same',name='conv2d_21')(X)
    X = BatchNormalization(name='bn_21')(X)
    X = LeakyReLU(name='leaky_relu_21')(X)

    X = Conv2D(425,(1,1),strides=(1,1),padding='same',name='conv2d_22')(X)
    
    X = Flatten(name='flatten_1')(X)
    
    X = Dense(64,activation='relu',name='fc_1')(X)
    
    X = Dense(64,activation='relu',name='fc_2')(X)
    
    X = Dense(4,name='fc_3')(X)
    
    model = Model(inputs=X_input,outputs=X)
    
    return model
onehotencoder_h_prev_test = OneHotEncoder(categorical_features=[-2])
x_test_prev = onehotencoder_h_prev_test.fit_transform(x_test_prev).toarray()
x_test_prev = x_test_prev[:, 1:]

scaled_x_train_prev = scalar.fit_transform(x_train_prev)
scaled_x_test_prev = scalar.transform(x_test_prev)

from keras.layers import LeakyReLU
ANN_leaky_relu = Sequential()
# The Input Layer :
ANN_leaky_relu.add(
    Dense(128,
          kernel_initializer='normal',
          input_dim=scaled_x_train_prev.shape[1]))
ANN_leaky_relu.add(LeakyReLU(alpha=0.1))
# The Hidden Layers :
ANN_leaky_relu.add(Dropout(0.2))
ANN_leaky_relu.add(Dense(256, kernel_initializer='normal'))
ANN_leaky_relu.add(LeakyReLU(alpha=0.1))

ANN_leaky_relu.add(Dense(256, kernel_initializer='normal'))
ANN_leaky_relu.add(LeakyReLU(alpha=0.1))
ANN_leaky_relu.add(Dropout(0.2))

ANN_leaky_relu.add(Dense(256, kernel_initializer='normal'))
ANN_leaky_relu.add(LeakyReLU(alpha=0.1))
ANN_leaky_relu.add(Dropout(0.2))

# The Output Layer :
ANN_leaky_relu.add(Dense(1, kernel_initializer='normal', activation='relu'))
Example #13
0
def denselayer(input_layer, units):
	CL_1 = Dense(units,activation='sigmoid')(input_layer)
	CL_2 = LeakyReLU(alpha=.3)(CL_1)
	return CL_2
Example #14
0
    def call(self, inputs):
        X = inputs[0]  # Node features (N x F)
        A = inputs[1]  # Adjacency matrix (N x N)

        outputs = []
        for head in range(self.attn_heads):
            kernel = self.kernels[head]  # W in the paper (F x F')
            attention_kernel = self.attn_kernels[
                head]  # Attention kernel a in the paper (2F' x 1)

            # Compute inputs to attention network
            features = K.dot(X, kernel)  # (N x F')

            # Compute feature combinations
            # One simplified version of the attention (good for large-scale data)
            # Note: [[a_1], [a_2]]^T [[Wh_i], [Wh_2]] = [a_1]^T [Wh_i] + [a_2]^T [Wh_j]
            # attn_for_self = K.dot(features, attention_kernel[0])    # (N x 1), [a_1]^T [Wh_i]
            # attn_for_neighs = K.dot(features, attention_kernel[1])  # (N x 1), [a_2]^T [Wh_j]

            # # Attention head a(Wh_i, Wh_j) = a^T [[Wh_i], [Wh_j]]
            # dense = attn_for_self + K.transpose(attn_for_neighs)  # (N x N) via broadcasting

            # Implementation based on the original paper (good for small-scale data)
            center_embeddings = K.tile(K.expand_dims(features, 1),
                                       [1, shape(X, 0), 1])  # [N, N, F']
            neighbor_embedding = K.tile(K.expand_dims(features, 0),
                                        [shape(X, 0), 1, 1])  # [N, N, F']

            embedding_pairs = K.concatenate(
                [center_embeddings, neighbor_embedding], 2)  # [N, N, F'*2]

            dense = K.squeeze(K.dot(embedding_pairs, attention_kernel[2]),
                              2)  # [N, N]

            # Add nonlinearty
            dense = LeakyReLU(alpha=0.2)(dense)

            # Mask values before activation (Vaswani et al., 2017)
            mask = -10e9 * (1.0 - A)
            dense += mask

            # Apply softmax to get attention coefficients
            dense = K.softmax(dense)  # (N x N)

            # Apply dropout to features and attention coefficients
            dropout_attn = Dropout(self.dropout_rate)(dense)  # (N x N)
            dropout_feat = Dropout(self.dropout_rate)(features)  # (N x F')

            # Linear combination with neighbors' features
            node_features = K.dot(dropout_attn, dropout_feat)  # (N x F')

            if self.use_bias:
                node_features = K.bias_add(node_features, self.biases[head])

            # Add output of attention head to final output
            outputs.append(node_features)

        # Aggregate the heads' output according to the reduction method
        if self.attn_heads_reduction == 'concat':
            output = K.concatenate(outputs)  # (N x KF')
        else:
            output = K.mean(K.stack(outputs), axis=0)  # N x F')

        output = self.activation(output)
        return output
Example #15
0
y_ = pd.DataFrame(y_)
X_train = np.expand_dims(X_train, axis=2)
X_test = np.expand_dims(X_test, axis=2)

# NN (Skip Connections) Model
input_ = Input(shape=(
    len(X.columns),
    1,
))
x = Conv1D(128, (3),
           padding='same',
           kernel_initializer='glorot_uniform',
           kernel_regularizer=regularizers.l1_l2(l1=1e-5, l2=1e-4),
           bias_regularizer=regularizers.l2(1e-4),
           activity_regularizer=regularizers.l2(1e-5))(input_)
x = LeakyReLU(alpha=0.05)(x)
x = Conv1D(64, (3),
           padding='same',
           kernel_initializer='glorot_uniform',
           kernel_regularizer=regularizers.l1_l2(l1=1e-5, l2=1e-4),
           bias_regularizer=regularizers.l2(1e-4),
           activity_regularizer=regularizers.l2(1e-5))(x)
x = LeakyReLU(alpha=0.05)(x)
x = Conv1D(32, (3),
           padding='same',
           kernel_initializer='glorot_uniform',
           kernel_regularizer=regularizers.l1_l2(l1=1e-5, l2=1e-4),
           bias_regularizer=regularizers.l2(1e-4),
           activity_regularizer=regularizers.l2(1e-5))(x)
x = LeakyReLU(alpha=0.05)(x)
Example #16
0
def conv(x, **kwargs):
    x = my_conv(**kwargs)(x)
    x = BatchNormalization(axis=-1)(x)
    x = LeakyReLU(alpha=0.05)(x)
    return x
Example #17
0
    def __init__(self, input_shape):  #input_shape = (512,128,1)
        # adapt this if using `channels_first` image data format

        input_spec = Input(shape=input_shape)

        #encode part:
        encoded_spec = Conv2D(filters=16,
                              kernel_size=(5, 5),
                              strides=2,
                              padding='same')(input_spec)
        encoded_spec = BatchNormalization()(encoded_spec)
        encoded_1st = LeakyReLU(alpha=0.2)(encoded_spec)

        encoded_spec = Conv2D(filters=32,
                              kernel_size=(5, 5),
                              strides=2,
                              padding='same')(encoded_1st)
        encoded_spec = BatchNormalization()(encoded_spec)
        encoded_2nd = LeakyReLU(alpha=0.2)(encoded_spec)

        encoded_spec = Conv2D(filters=64,
                              kernel_size=(5, 5),
                              strides=2,
                              padding='same')(encoded_2nd)
        encoded_spec = BatchNormalization()(encoded_spec)
        encoded_3rd = LeakyReLU(alpha=0.2)(encoded_spec)

        encoded_spec = Conv2D(filters=128,
                              kernel_size=(5, 5),
                              strides=2,
                              padding='same')(encoded_3rd)
        encoded_spec = BatchNormalization()(encoded_spec)
        encoded_4th = LeakyReLU(alpha=0.2)(encoded_spec)

        encoded_spec = Conv2D(filters=256,
                              kernel_size=(5, 5),
                              strides=2,
                              padding='same')(encoded_4th)
        encoded_spec = BatchNormalization()(encoded_spec)
        encoded_5th = LeakyReLU(alpha=0.2)(encoded_spec)

        encoded_spec = Conv2D(filters=512,
                              kernel_size=(5, 5),
                              strides=2,
                              padding='same')(encoded_5th)
        encoded_spec = BatchNormalization()(encoded_spec)
        encoded_result = LeakyReLU(alpha=0.2)(encoded_spec)
        #decode part:
        decoded_spec = Conv2DTranspose(filters=256,
                                       kernel_size=(5, 5),
                                       strides=2,
                                       padding='same')(encoded_result)
        decoded_spec = BatchNormalization()(decoded_spec)
        decoded_spec = Activation('relu')(decoded_spec)
        decoded_spec = Dropout(0.5)(decoded_spec)
        decoded_spec = Concatenate()([decoded_spec, encoded_5th])

        decoded_spec = Conv2DTranspose(filters=128,
                                       kernel_size=(5, 5),
                                       strides=2,
                                       padding='same')(decoded_spec)
        decoded_spec = BatchNormalization()(decoded_spec)
        decoded_spec = Activation('relu')(decoded_spec)
        decoded_spec = Dropout(0.5)(decoded_spec)
        decoded_spec = Concatenate()([decoded_spec, encoded_4th])

        decoded_spec = Conv2DTranspose(filters=64,
                                       kernel_size=(5, 5),
                                       strides=2,
                                       padding='same')(decoded_spec)
        decoded_spec = BatchNormalization()(decoded_spec)
        decoded_spec = Activation('relu')(decoded_spec)
        decoded_spec = Dropout(0.5)(decoded_spec)
        decoded_spec = Concatenate()([decoded_spec, encoded_3rd])

        decoded_spec = Conv2DTranspose(filters=32,
                                       kernel_size=(5, 5),
                                       strides=2,
                                       padding='same')(decoded_spec)
        decoded_spec = BatchNormalization()(decoded_spec)
        decoded_spec = Activation('relu')(decoded_spec)
        #decoded_spec = Dropout(0.5)(decoded_spec)
        decoded_spec = Concatenate()([decoded_spec, encoded_2nd])

        decoded_spec = Conv2DTranspose(filters=16,
                                       kernel_size=(5, 5),
                                       strides=2,
                                       padding='same')(decoded_spec)
        decoded_spec = BatchNormalization()(decoded_spec)
        decoded_spec = Activation('relu')(decoded_spec)
        #decoded_spec = Dropout(0.5)(decoded_spec)
        decoded_spec = Concatenate()([decoded_spec, encoded_1st])

        decoded_spec = Conv2DTranspose(filters=1,
                                       kernel_size=(5, 5),
                                       strides=2,
                                       padding='same')(decoded_spec)
        decoded_spec = BatchNormalization()(decoded_spec)
        decoded_mask = Activation('sigmoid')(decoded_spec)
        #decoded_spec = Dropout(0.5)(decoded_spec)
        #decoded_spec = Concatenate()([decoded_spec,encoded_5th])
        output_spec = Multiply()([input_spec, decoded_mask])

        self.model = Model(inputs=input_spec, outputs=output_spec)
        nadam = Nadam(lr=0.002,
                      beta_1=0.9,
                      beta_2=0.999,
                      epsilon=1e-08,
                      schedule_decay=0.004)
        #self.model = Model(input_img, decoded)
        self.model.summary()
        self.model.compile(optimizer=nadam,
                           loss='mae',
                           metrics=['mse', 'mae', 'acc'])
        print self.model.get_config()
Example #18
0
def unet_cell(inputs,
              transpose_conv=False,
              num_filters=16,
              kernel_size=3,
              strides=1,
              kernel_initializer='glorot_uniform',
              kernel_regularizer=None,
              activation='leaky_relu',
              batch_normalization=True,
              conv_first=True):
    """2D Convolution -> Batch Normalization -> Activation stack builder

    # Arguments
        inputs (tensor): input tensor from input image or previous layer

        ## Conv2D features:
        transpose_conv (bool): use Conv2DTranspose instead of Conv2D
        num_filters (int): number of filters used by Conv2D
        kernel_size (int): square kernel dimension
        strides (int): square stride dimension
        kernel_initializer (string): method used to initialize kernel
        kernel_regularizer (keras.regularizers.Regularizer): 
            method used to constrain (regularize) kernel values or None
        
        ## Other cell features
        activation (string): name of activation function to be used or None
        batch_normalization (bool): whether to use batch normalization
        conv_first (bool): conv -> bn         -> activation, if True; 
                           bn   -> activation -> conv,       if False

    # Returns
        x (tensor): tensor as input to the next layer
    """

    # Validate arguments
    if kernel_regularizer is not None:
        if not isinstance(kernel_regularizer, Regularizer):
            raise TypeError("Argument `kernel_regularizer` must be "
                            "type %s or None." % repr(Regularizer))

    # Determine which convolutional layer to use:
    if transpose_conv:
        conv = Conv2DTranspose(
            num_filters,
            kernel_size=
            kernel_size,  # TODO: Check what kernel_size, strides to use
            strides=strides,
            padding='same',
            kernel_initializer=kernel_initializer,
            kernel_regularizer=kernel_regularizer)
    else:
        conv = Conv2D(
            num_filters,
            kernel_size=kernel_size,
            strides=strides,
            padding='same',  # This is not optional for our Unet.
            kernel_initializer=kernel_initializer,
            kernel_regularizer=kernel_regularizer)

    # Determine which activation function to use:
    if isinstance(activation, str):
        if activation.lower() == 'leaky_relu':
            activation_fn = LeakyReLU(alpha=LEAKY_RELU_ALPHA)
        else:
            activation_fn = Activation(activation)
    else:
        activation_fn = None

    x = inputs
    if conv_first:
        x = conv(x)
        if batch_normalization:
            x = BatchNormalization(momentum=BATCH_NORM_MOMENTUM)(x)
        if activation_fn is not None:
            x = activation_fn(x)
    else:
        if batch_normalization:
            x = BatchNormalization(momentum=BATCH_NORM_MOMENTUM)(x)
        if activation_fn is not None:
            x = activation_fn(x)
        x = conv(x)
    return x
Example #19
0
        image_type_eem = os.environ['EEM_OCC']

    #get input image shape
    (_, image_height, image_width) = get_data(file_list[0], image_type).shape

    #load or build the model

    input_img = Input(shape=(1, image_height, image_width))

    #encoder
    x = Conv2D(8, (5, 5),
               padding='same',
               data_format='channels_first',
               use_bias=False)(input_img)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.1)(x)
    x = Dropout(0.2)(x)
    x = MaxPooling2D((5, 5), padding='same', data_format='channels_first')(x)
    logging.debug(str(x.shape))

    x = Conv2D(8, (4, 4),
               padding='same',
               data_format='channels_first',
               use_bias=False)(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.1)(x)
    x = Dropout(0.2)(x)

    x = MaxPooling2D((2, 2), padding='same', data_format='channels_first')(x)
    logging.debug(str(x.shape))
def get_denet(input_shape,
              n_classes,
              sr=16000,
              before_pooling=True,
              dropout=0.3):

    inp = Input(input_shape)

    # SincNet Layer
    x = TimeDistributed(SincConv1D(80, 251, sr))(inp)

    attention_layer = DELayer(sum_channels=True, dropout=dropout)

    # Attention before pooling
    if before_pooling:
        x = TimeDistributed(attention_layer)(x)

    x = TimeDistributed(MaxPooling1D(pool_size=3))(x)
    x = TimeDistributed(LeakyReLU())(x)
    if dropout != 0:
        x = TimeDistributed(SpatialDropout1D(dropout))(x)

    # Attention after the full layer
    if not before_pooling:
        x = TimeDistributed(attention_layer)(x)

    # First Conv Layer
    x = TimeDistributed(Conv1D(60, 5, padding='valid'))(x)
    x = TimeDistributed(MaxPooling1D(pool_size=3))(x)
    x = TimeDistributed(LayerNorm())(x)
    x = TimeDistributed(LeakyReLU())(x)
    if dropout != 0:
        x = TimeDistributed(SpatialDropout1D(dropout))(x)

    # Second Conv Layer
    x = TimeDistributed(Conv1D(60, 5, padding='valid'))(x)
    x = TimeDistributed(MaxPooling1D(pool_size=3))(x)
    x = TimeDistributed(LayerNorm())(x)
    x = TimeDistributed(LeakyReLU())(x)
    if dropout != 0:
        x = TimeDistributed(SpatialDropout1D(dropout))(x)

    # Flatten
    x = TimeDistributed(Flatten())(x)

    # BGRU
    gru_layer = GRU(2048,
                    activation="tanh",
                    recurrent_activation="hard_sigmoid",
                    dropout=dropout,
                    return_sequences=True)
    x = Bidirectional(gru_layer, "sum")(x)

    # MLP
    x = TimeDistributed(Dense(1024))(x)
    x = TimeDistributed(BatchNormalization())(x)
    x = TimeDistributed(LeakyReLU())(x)
    if dropout != 0:
        x = TimeDistributed(Dropout(dropout))(x)
    x = TimeDistributed(Dense(512))(x)
    x = TimeDistributed(BatchNormalization())(x)
    x = TimeDistributed(LeakyReLU())(x)
    if dropout != 0:
        x = TimeDistributed(Dropout(dropout))(x)

    # classes
    x = TimeDistributed(Dense(n_classes, activation='softmax'))(x)

    return Model(inputs=inp, outputs=x)
def __combine(layer, filters, strides):
    x = Conv2D(filters=filters, kernel_size=3, strides=strides, padding='same')(layer)
    x = BatchNormalization()(x)
    x = LeakyReLU()(x)
    return x
Example #22
0
x_train = x_train.astype('float32') / 255.
x_test = x_test.astype('float32') / 255.
x_train = x_train.reshape((-1, img_dim, img_dim, 1))
x_test = x_test.reshape((-1, img_dim, img_dim, 1))
num_classes = len(np.unique(y_train))

x = Input(shape=(img_dim, img_dim, 1))
h = x

for i in range(2):
    filters *= 2
    h = Conv2D(filters=filters,
            kernel_size=3,
            strides=2,
            padding='same')(h)
    h = LeakyReLU(0.2)(h)
    h = Conv2D(filters=filters,
            kernel_size=3,
            strides=1,
            padding='same')(h)
    h = LeakyReLU(0.2)(h)


h_shape = K.int_shape(h)[1:]
h = Flatten()(h)
z_mean = Dense(latent_dim)(h) 
z_log_var = Dense(latent_dim)(h) 

clvae_encoder = Model(x, z_mean) 

Example #23
0
    def call(self, inputs):
        """
        Creates the layer as a Keras graph.

        Note that the inputs are tensors with a batch dimension of 1:
        Keras requires this batch dimension, and for full-batch methods
        we only have a single "batch".

        There are three inputs required, the node features, the output
        indices (the nodes that are to be selected in the final layer)
        and the graph adjacency matrix

        Notes:
            This does not add self loops to the adjacency matrix.
            The output indices are only used when ``final_layer=True``

        Args:
            inputs (list): list of inputs with 3 items:
            node features (size 1 x N x F),
            output indices (size 1 x M),
            graph adjacency matrix (size N x N),
            where N is the number of nodes in the graph,
                  F is the dimensionality of node features
                  M is the number of output nodes
        """
        X = inputs[0]  # Node features (1 x N x F)
        out_indices = inputs[1]  # output indices (1 x K)
        A = inputs[2]  # Adjacency matrix (N x N)

        batch_dim, n_nodes, _ = K.int_shape(X)
        if batch_dim != 1:
            raise ValueError(
                "Currently full-batch methods only support a batch dimension of one"
            )

        else:
            # Remove singleton batch dimension
            X = K.squeeze(X, 0)
            out_indices = K.squeeze(out_indices, 0)

        outputs = []
        for head in range(self.attn_heads):
            kernel = self.kernels[head]  # W in the paper (F x F')
            attention_kernel = self.attn_kernels[
                head]  # Attention kernel a in the paper (2F' x 1)

            # Compute inputs to attention network
            features = K.dot(X, kernel)  # (N x F')

            # Compute feature combinations
            # Note: [[a_1], [a_2]]^T [[Wh_i], [Wh_2]] = [a_1]^T [Wh_i] + [a_2]^T [Wh_j]
            attn_for_self = K.dot(
                features, attention_kernel[0])  # (N x 1), [a_1]^T [Wh_i]
            attn_for_neighs = K.dot(
                features, attention_kernel[1])  # (N x 1), [a_2]^T [Wh_j]

            # Attention head a(Wh_i, Wh_j) = a^T [[Wh_i], [Wh_j]]
            dense = attn_for_self + K.transpose(
                attn_for_neighs)  # (N x N) via broadcasting

            # Add nonlinearity
            dense = LeakyReLU(alpha=0.2)(dense)

            # Mask values before activation (Vaswani et al., 2017)
            # YT: this only works for 'binary' A, not for 'weighted' A!
            # YT: if A does not have self-loops, the node itself will be masked, so A should have self-loops
            # YT: this is ensured by setting the diagonal elements of A tensor to 1 above
            mask = -10e9 * (1.0 - A)
            dense += mask

            # Apply softmax to get attention coefficients
            dense = K.softmax(dense, axis=1)  # (N x N), Eq. 3 of the paper

            # Apply dropout to features and attention coefficients
            dropout_feat = Dropout(self.in_dropout_rate)(features)  # (N x F')
            dropout_attn = Dropout(self.attn_dropout_rate)(dense)  # (N x N)

            # Linear combination with neighbors' features [YT: see Eq. 4]
            node_features = K.dot(dropout_attn, dropout_feat)  # (N x F')

            if self.use_bias:
                node_features = K.bias_add(node_features, self.biases[head])

            # Add output of attention head to final output
            outputs.append(node_features)

        # Aggregate the heads' output according to the reduction method
        if self.attn_heads_reduction == "concat":
            output = K.concatenate(outputs)  # (N x KF')
        else:
            output = K.mean(K.stack(outputs), axis=0)  # N x F')

        # Nonlinear activation function
        output = self.activation(output)

        # On the final layer we gather the nodes referenced by the indices
        if self.final_layer:
            output = K.gather(output, out_indices)

        # Add batch dimension back if we removed it
        if batch_dim == 1:
            output = K.expand_dims(output, 0)

        return output
Example #24
0
def activation_layer(use_leaky_relu=True, leaky_alpha=0.1):
    if use_leaky_relu:
        return LeakyReLU(alpha=leaky_alpha)
    else:
        return Activation('relu')
Example #25
0
 def conv_layer(layer_input, filters, kernel_size=5, strides=2):
     d = Conv1D(filters, kernel_size, strides=strides,
                padding='same')(layer_input)
     d = LeakyReLU(alpha=0.20)(d)
     d = InstanceNormalization()(d)
     return d
Example #26
0
x_train = x_train.reshape((x_train.shape[0], ) +
                          (32, 32, 3)).astype('float32') / 255.0

#training params
num_iter = 10000
batch_size = 20

#model params
latent_dim = 32
height, width, channels = 32, 32, 3

#generator architecture
generator_input = Input(shape=(latent_dim, ))

x = Dense(128 * 16 * 16)(generator_input)
x = LeakyReLU()(x)
x = Reshape((16, 16, 128))(x)

x = Conv2D(256, 5, padding='same')(x)
x = LeakyReLU()(x)

x = Conv2DTranspose(256, 4, strides=2, padding='same')(x)
x = LeakyReLU()(x)

x = Conv2D(256, 5, padding='same')(x)
x = LeakyReLU()(x)
x = Conv2D(256, 5, padding='same')(x)
x = LeakyReLU()(x)

x = Conv2D(channels, 7, activation='tanh', padding='same')(x)  #NOTE: tanh
generator = Model(generator_input, x)
 def add_common_layers(y):
     y = BatchNormalization()(y)
     y = LeakyReLU()(y)
     return y
Example #28
0
    def build_discriminator(self):

        img_shape = (self.img_size[0], self.img_size[1], self.channels)

        model = Sequential()

        model.add(
            Conv2D(32,
                   kernel_size=self.kernel_size,
                   strides=2,
                   input_shape=img_shape,
                   padding="same"))  # 256x256 -> 128x128
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.5))

        model.add(
            Conv2D(64, kernel_size=self.kernel_size, strides=2,
                   padding="same"))  # 128x128-> 64x64
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.5))
        model.add(BatchNormalization())

        model.add(
            Conv2D(128,
                   kernel_size=self.kernel_size,
                   strides=2,
                   padding="same"))  # 64x64 -> 32x32
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.5))
        model.add(BatchNormalization())

        model.add(
            Conv2D(256,
                   kernel_size=self.kernel_size,
                   strides=2,
                   padding="same"))  # 32x32 -> 16x16
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.5))
        model.add(BatchNormalization())

        model.add(
            Conv2D(256,
                   kernel_size=self.kernel_size,
                   strides=2,
                   padding="same"))  # 16x16 -> 8x8
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.5))
        model.add(BatchNormalization())

        model.add(Flatten())
        ''' model.add(Dense(1024))
        model.add(LeakyReLU(alpha=0.2))
        model.add(BatchNormalization())'''
        model.add(Dense(1, activation='sigmoid'))

        model.summary()

        img = Input(shape=img_shape)
        validity = model(img)

        return Model(img, validity)
# adapted from https://github.com/MSC-BUAA/Keras-progressive_growing_of_gans

import keras.backend as K
from keras.layers import Layer
from keras.layers.merge import _Merge
from keras import initializers
from keras.layers import Dense, Conv2D, LeakyReLU, Reshape
import tensorflow as tf
import numpy as np

w_init = initializers.random_normal(0, 1)
activation = LeakyReLU(alpha=0.2)


class Bias(Layer):
    def __init__(self, initializer='zeros', **kwargs):
        super(Bias, self).__init__(**kwargs)
        self.initializer = initializers.get(initializer)

    def build(self, input_shape):
        self.bias = self.add_weight(name='{}_bias'.format(self.name),
                                    shape=(input_shape[-1], ),
                                    initializer=self.initializer,
                                    trainable=True)

    def call(self, x):
        return K.bias_add(x, self.bias, data_format="channels_last")


def DenseBlock(x,
               size,
Example #30
0
    return np.random.normal(loc=0, scale=1, size=(n_samples, sample_size))


# The sample size is a hyperparameter.  Below, we use a vector of 100 randomly generated number as a sample.

# In[7]:

make_latent_samples(1, 100)  # generates one sample

# The generator is a simple fully connected neural network with one hidden layer with the leaky ReLU activation.  It takes one latent sample (100 values) and produces 784 (=28x28) data points which represent a digit image.

# In[8]:

generator = Sequential([
    Dense(128, input_shape=(100, )),
    LeakyReLU(alpha=0.01),
    Dense(784),
    Activation('tanh')
],
                       name='generator')

generator.summary()

# The last activation is **tanh**.  According to [[4]](#ref4), this works the best.  It also means that we need to rescale the MNIST images to be between -1 and 1.

# Initially, the generator can only produce garbages.
#
# <img src='../images/gan_mnist/generator_bad.png' width='65%'/>
#
# As such, the generator needs to learn how to generate realistic hand-written images from the latent sample (randomly generated numbers).
#