def create_discriminator(img_shape=(28, 28, 1)): sequence = Input(shape=img_shape) # model = Flatten()(sequence) # model = Dense(512)(model) # model = LeakyReLU(alpha=0.2)(model) # model = Dense(256)(model) # model = LeakyReLU(alpha=0.2)(model) # output = Dense(1, activation='sigmoid')(model) model = Conv2D(32, kernel_size=3, strides=2, padding='same')(sequence) model = LeakyReLU(alpha=0.2)(model) model = Dropout(0.25)(model) model = Conv2D(64, kernel_size=3, strides=2, padding='same')(model) model = ZeroPadding2D(padding=((0, 1), (0, 1)))(model) model = BatchNormalization(momentum=0.8)(model) model = LeakyReLU(alpha=0.2)(model) model = Dropout(0.25)(model) model = Conv2D(128, kernel_size=3, strides=2, padding='same')(model) model = BatchNormalization(momentum=0.8)(model) model = LeakyReLU(alpha=0.2)(model) model = Dropout(0.25)(model) model = Conv2D(256, kernel_size=3, strides=2, padding='same')(model) model = BatchNormalization(momentum=0.8)(model) model = LeakyReLU(alpha=0.2)(model) model = Dropout(0.25)(model) model = Flatten()(model) output = Dense(1, activation='sigmoid')(model) model = Model(inputs=sequence, outputs=output) model.summary() return model
def create_discriminator(): discriminator_input = Input(shape=(28, 28, 1)) discriminator = Conv2D(65, 5, strides=(2, 2), padding='same')(discriminator_input) discriminator = LeakyReLU()(discriminator) discriminator = Dropout(0.4)(discriminator) discriminator = Conv2D(256, 5, strides=(2, 2), padding='same')(discriminator) discriminator = LeakyReLU()(discriminator) discriminator = Dropout(0.4)(discriminator) discriminator = Conv2D(512, 5, strides=(2, 2), padding='same')(discriminator) discriminator = LeakyReLU()(discriminator) discriminator = Dropout(0.4)(discriminator) discriminator = Flatten()(discriminator) discriminator_output = Dense(1, activation='sigmoid')(discriminator) discriminator = Model(discriminator_input, discriminator_output) discriminator.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']) discriminator.summary() return discriminator
def build_critic(self): num_feat = 1 max_len = self.inp_cols mus = Input(shape=(max_len, num_feat)) condition_tensor = Input(shape=(NUM_CONDS, )) model = Conv1D(16, kernel_size=2, padding="same")(mus) model = LeakyReLU(alpha=0.2)(model) model = Dropout(0.25)(model) model = Conv1D(32, kernel_size=2, padding="same")(model) model = BatchNormalization(momentum=0.8)(model) model = LeakyReLU(alpha=0.2)(model) model = Dropout(0.25)(model) model = Conv1D(64, kernel_size=2, padding="same")(model) model = BatchNormalization(momentum=0.8)(model) model = LeakyReLU(alpha=0.2)(model) model = Dropout(0.25)(model) model = Conv1D(128, kernel_size=2, padding="same")(model) model = BatchNormalization(momentum=0.8)(model) model = LeakyReLU(alpha=0.2)(model) model = Dropout(0.25)(model) model = Flatten()(model) model = Concatenate(axis=1)([model, condition_tensor]) model = Dense(1)(model) output_layer = model model = Model([mus, condition_tensor], output_layer) model.summary() validity = model([mus, condition_tensor]) model = Model([mus, condition_tensor], validity) return model