示例#1
0
def build_generator(img_shape, num_classes, latent_dim):

    model = Sequential()

    model.add(Dense(256, input_dim=latent_dim))
    model.add(LeakyReLU(alpha=0.2))
    model.add(BatchNormalization(momentum=0.8))
    model.add(Dense(512))
    model.add(LeakyReLU(alpha=0.2))
    model.add(BatchNormalization(momentum=0.8))
    model.add(Dense(1024))
    model.add(LeakyReLU(alpha=0.2))
    model.add(BatchNormalization(momentum=0.8))
    model.add(Dense(np.prod(img_shape), activation='tanh'))
    model.add(Reshape(img_shape))

    model.summary()

    noise = Input(shape=(latent_dim, ))
    label = Input(shape=(1, ), dtype='int32')
    label_embedding = Flatten()(Embedding(num_classes, latent_dim)(label))

    model_input = multiply([noise, label_embedding])
    img = model(model_input)

    return Model([noise, label], img)
示例#2
0
    def build_critic(self):

        model = Sequential()

        model.add(
            Conv2D(16,
                   kernel_size=3,
                   strides=2,
                   input_shape=self.img_shape,
                   padding="same"))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.25))
        model.add(Conv2D(32, kernel_size=3, strides=2, padding="same"))
        model.add(ZeroPadding2D(padding=((0, 1), (0, 1))))
        model.add(BatchNormalization(momentum=0.8))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.25))
        model.add(Conv2D(64, kernel_size=3, strides=2, padding="same"))
        model.add(BatchNormalization(momentum=0.8))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.25))
        model.add(Conv2D(128, kernel_size=3, strides=1, padding="same"))
        model.add(BatchNormalization(momentum=0.8))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.25))
        model.add(Flatten())
        model.add(Dense(1))

        model.summary()

        img = Input(shape=self.img_shape)
        validity = model(img)

        return Model(img, validity)
示例#3
0
def build_discriminator(img_shape, num_classes, optimizer):

    model = Sequential()
    model.add(Dense(512, input_dim=np.prod(img_shape)))
    model.add(LeakyReLU(alpha=0.2))
    model.add(Dense(512))
    model.add(LeakyReLU(alpha=0.2))
    model.add(Dropout(0.4))
    model.add(Dense(512))
    model.add(LeakyReLU(alpha=0.2))
    model.add(Dropout(0.4))
    model.add(Dense(1, activation='sigmoid'))
    model.summary()

    img = Input(shape=img_shape)
    label = Input(shape=(1, ), dtype='int32')

    label_embedding = Flatten()(Embedding(num_classes,
                                          np.prod(img_shape))(label))
    flat_img = Flatten()(img)

    model_input = multiply([flat_img, label_embedding])

    validity = model(model_input)

    # Compilar discriminator
    Model2 = Model([img, label], validity)
    Model2.compile(loss=['binary_crossentropy'],
                   optimizer=optimizer,
                   metrics=['accuracy'])

    return Model2
示例#4
0
文件: nets.py 项目: OrangeBai/C3DLab
def discriminator(image_shape):
    dis_input = Input(shape=image_shape)

    model = Conv2D(filters=64, kernel_size=3, strides=1,
                   padding="same")(dis_input)
    model = LeakyReLU(alpha=0.2)(model)

    model = discriminator_block(model, 64, 3, 2)
    model = discriminator_block(model, 128, 3, 1)
    model = discriminator_block(model, 128, 3, 2)
    model = discriminator_block(model, 256, 3, 1)
    model = discriminator_block(model, 256, 3, 2)
    model = discriminator_block(model, 512, 3, 1)
    model = discriminator_block(model, 512, 3, 2)

    model = Flatten()(model)
    model = Dense(1024)(model)
    model = LeakyReLU(alpha=0.2)(model)

    model = Dense(1)(model)
    model = Activation('sigmoid')(model)

    discriminator_model = Model(inputs=dis_input, outputs=model)

    return discriminator_model
示例#5
0
def build_model():
    model = tf.keras.Sequential()
    model.add(layers.Conv2D(96, kernel_size = (11,11), input_shape=(227,227,1), strides=(4,4), activation='linear', padding='valid', name='conv1'))
    model.add(LeakyReLU(alpha=0.01,name='leaky_relu1'))
    model.add(layers.MaxPooling2D(pool_size=3,strides=2, name='pool1'))
    model.add(layers.BatchNormalization())

    model.add(layers.Conv2D(256, kernel_size = (5,5), activation='linear', padding='valid', name='conv2'))
    model.add(LeakyReLU(alpha=0.01,name='leaky_relu2'))
    model.add(layers.MaxPooling2D(pool_size=3,strides=2, name='pool2'))
    model.add(layers.BatchNormalization())

    model.add(layers.Conv2D(384, kernel_size = (3,3), padding='valid', name='conv3'))
    model.add(LeakyReLU(alpha=0.01,name='leaky_relu3'))

    model.add(layers.Conv2D(384, kernel_size = (3,3), padding='valid', name='conv4'))
    model.add(LeakyReLU(alpha=0.01,name='leaky_relu4'))

    model.add(layers.Conv2D(256, kernel_size = (3,3), padding='valid', name='conv5'))
    model.add(LeakyReLU(alpha=0.01,name='leaky_relu5'))
    model.add(layers.MaxPooling2D(pool_size=2,strides=2, name='pool3'))
    
    model.add(layers.Flatten())
    model.add(layers.Dense(1024, name='dense1'))
    model.add(layers.Dropout(0.5))
    model.add(layers.Dense(1024, name='dense2'))
    model.add(layers.Dropout(0.5))
    model.add(layers.Dense(10, name='dense3', activation='softmax'))

    return model
示例#6
0
def create_generator():
    model = Sequential()

    # Reshape input into 32x32x256 tensor via a fully connected layer
    model.add(Dense(64 * 32 * 32, input_dim=z_dim))
    model.add(Reshape((32, 32, 64)))
    model.add(
        Conv2DTranspose(  # Transposed convolution layer, from 32x32x256 into 64x64x128 tensor
            128,
            kernel_size=3,
            strides=2,
            padding='same'))
    model.add(BatchNormalization())  # Batch normalization
    model.add(LeakyReLU(alpha=0.01))  # Leaky ReLU
    model.add(
        Conv2DTranspose(  # Transposed convolution layer, from 64x64x128 to 128x128x64 tensor
            64,
            kernel_size=3,
            strides=2,
            padding='same'))
    model.add(BatchNormalization())  # Batch normalization
    model.add(LeakyReLU(alpha=0.01))  # Leaky ReLU
    model.add(
        Conv2DTranspose(  # Transposed convolution layer, from 128x128x64 to 256x256x3 tensor
            3,
            kernel_size=3,
            strides=2,
            padding='same'))
    model.add(Activation('tanh'))  # Tanh activation
    model.compile(loss='binary_crossentropy', optimizer=adam_optimizer())
    model.summary()
    return model
示例#7
0
def create_discriminator():

    model = Sequential()
    model.add(
        Conv2D(32,
               kernel_size=3,
               strides=2,
               padding="same",
               input_shape=(256, 256, 3)))
    model.add(LeakyReLU(alpha=0.2))
    model.add(Dropout(0.25))
    model.add(BatchNormalization(momentum=0.8))
    model.add(Conv2D(64, kernel_size=3, strides=2, padding="same"))
    model.add(LeakyReLU(alpha=0.2))
    model.add(Dropout(0.25))
    model.add(BatchNormalization(momentum=0.8))
    model.add(Conv2D(128, kernel_size=3, strides=2, padding="same"))
    model.add(LeakyReLU(alpha=0.2))
    model.add(Dropout(0.25))
    model.add(BatchNormalization(momentum=0.8))
    model.add(Flatten())
    model.add(Dense(1, activation='sigmoid'))
    model.compile(loss='binary_crossentropy', optimizer=adam_optimizer())
    model.summary()
    return model
示例#8
0
def discriminator_model():
    """Build discriminator architecture."""
    n_layers, use_sigmoid = 3, False
    inputs = Input(shape=input_shape_discriminator)

    x = Conv2D(filters=ndf, kernel_size=(4, 4), strides=2, padding='same')(inputs)
    x = LeakyReLU(0.2)(x)

    nf_mult, nf_mult_prev = 1, 1
    for n in range(n_layers):
        nf_mult_prev, nf_mult = nf_mult, min(2**n, 8)
        x = Conv2D(filters=ndf*nf_mult, kernel_size=(4, 4), strides=2, padding='same')(x)
        x = BatchNormalization()(x)
        x = LeakyReLU(0.2)(x)

    nf_mult_prev, nf_mult = nf_mult, min(2**n_layers, 8)
    x = Conv2D(filters=ndf*nf_mult, kernel_size=(4, 4), strides=1, padding='same')(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(0.2)(x)

    x = Conv2D(filters=1, kernel_size=(4, 4), strides=1, padding='same')(x)
    if use_sigmoid:
        x = Activation('sigmoid')(x)

    x = Flatten()(x)
    x = Dense(1024, activation='tanh')(x)
    x = Dense(1, activation='sigmoid')(x)

    model = Model(inputs=inputs, outputs=x, name='Discriminator')
    return model
    def build_discriminator(self):
        '''
        This function is used to define the discriminator of model(DCGAN).
        '''
        model = Sequential()
        model.add(Reshape((self.img_size, self.img_size, self.img_channels)))
        model.add(Conv2D(32, kernel_size = 5, strides = 2, padding = 'same', use_bias = True, kernel_initializer = RandomNormal(stddev = 0.02)))
        model.add(LeakyReLU(alpha = 0.2))
        model.add(Dropout(rate = 0.25))
        model.add(Conv2D(64, kernel_size = 5, strides = 2, padding = 'same', use_bias = True, kernel_initializer = RandomNormal(stddev = 0.02)))
        #model.add(ZeroPadding2D(padding = ((0,1),(0,1))))
        model.add(BatchNormalization(momentum = 0.99))
        model.add(LeakyReLU(alpha = 0.2))
        model.add(Dropout(rate = 0.25))
        model.add(Conv2D(128, kernel_size = 5, strides = 2, padding = 'same', use_bias = True, kernel_initializer = RandomNormal(stddev = 0.02)))
        #model.add(ZeroPadding2D(padding = ((0,1),(0,1))))
        model.add(BatchNormalization(momentum = 0.99))
        model.add(LeakyReLU(alpha = 0.2))
        model.add(Dropout(rate = 0.25))
        model.add(Conv2D(128, kernel_size = 5, strides = 2, padding = 'same', use_bias = True, kernel_initializer = RandomNormal(stddev = 0.02)))
        
        model.add(BatchNormalization(momentum = 0.99))
        model.add(LeakyReLU(alpha = 0.2))
        model.add(Dropout(rate = 0.25))
        model.add(Flatten())
        model.add(Dense(1, activation = 'sigmoid'))

        img = Input(shape = (self.img_size, self.img_size, self.img_channels))
        output_ = model(img)

        return Model(img, output_)
def bbox_3D_net(input_shape=(224, 224, 3), vgg_weights=None, freeze_vgg=False, bin_num=6):
    vgg16_model = VGG16(include_top=False, weights=vgg_weights, input_shape=input_shape)

    if freeze_vgg:
        for layer in vgg16_model.layers:
            layer.trainable = False

    x = Flatten()(vgg16_model.output)

    dimension = Dense(512)(x)
    dimension = LeakyReLU(alpha=0.1)(dimension)
    dimension = Dropout(0.5)(dimension)
    dimension = Dense(3)(dimension)
    dimension = LeakyReLU(alpha=0.1, name='dimension')(dimension)

    orientation = Dense(256)(x)
    orientation = LeakyReLU(alpha=0.1)(orientation)
    orientation = Dropout(0.5)(orientation)
    orientation = Dense(bin_num * 2)(orientation)
    orientation = LeakyReLU(alpha=0.1)(orientation)
    orientation = Reshape((bin_num, -1))(orientation)
    orientation = Lambda(l2_normalize, name='orientation')(orientation)

    confidence = Dense(256)(x)
    confidence = LeakyReLU(alpha=0.1)(confidence)
    confidence = Dropout(0.5)(confidence)
    confidence = Dense(bin_num, activation='softmax', name='confidence')(confidence)

    model = Model(vgg16_model.input, outputs=[dimension, orientation, confidence])
    return model
示例#11
0
 def f(up, conv):
     concat = concatenate([up, conv], axis=-1)
     conv = Conv2D(filters, strides, padding=padding)(concat)
     conv = LeakyReLU(alpha=0.3)(conv)
     conv = Dropout(0.2)(conv)
     conv = Conv2D(filters, strides, padding=padding)(conv)
     conv = LeakyReLU(alpha=0.3)(conv)
     return conv
 def __init__(self, latent_dim, original_dim):
     super(Decoder, self).__init__()
     # decoder sub layers
     self.hidden_layer1 = Dense(
         units=latent_dim, activation='relu', kernel_initializer='he_uniform')
     self.bn1 = BatchNormalization()
     self.leakyr1 = LeakyReLU()
     self.hidden_layer2 = Dense(units=32, activation='relu')
     self.bn2 = BatchNormalization()
     self.leakyr2 = LeakyReLU()
     self.output_layer = Dense(units=original_dim, activation='sigmoid')
示例#13
0
 def conv_I(self, inputs):
     shortcut = inputs
     inputs = Conv2D(filters=32, kernel_size=1, strides=1,  padding='SAME',
                         name='CONV_I',kernel_initializer='glorot_uniform',
                         use_bias=False)(inputs)
     inputs = self.batch_norm(inputs)
     inputs = LeakyReLU(alpha=_LEAKY_RELU)(inputs)
     inputs = Conv2D(filters=64, kernel_size=3, strides=1, padding='SAME',
                     kernel_initializer='glorot_uniform', use_bias=False)(inputs)
     inputs = self.batch_norm(inputs)
     inputs = Add()([inputs, shortcut])
     inputs = LeakyReLU(alpha=_LEAKY_RELU)(inputs)
     return inputs
    def __init__(self, input_dim):
        super(Encoder, self).__init__()
        self.output_dim = 16  # bottleneck size

        # encoder sub layers
        self.hidden_layer1 = Dense(
            units=64, activation='relu', kernel_initializer='he_uniform', input_dim=input_dim)
        self.bn1 = BatchNormalization()
        self.leakyr1 = LeakyReLU()
        self.hidden_layer2 = Dense(units=32, activation='relu')
        self.bn2 = BatchNormalization()
        self.leakyr2 = LeakyReLU()
        self.output_layer = Dense(units=self.output_dim, activation='sigmoid')
示例#15
0
def discriminator_network(shape):
    input_ab = Input(shape=(*shape, 2), name='d_ab_input')
    input_l = Input(shape=(*shape, 1), name='d_l_input')
    net = concatenate([input_l, input_ab], name='d_concat')
    net = Conv2D(64, (4, 4), padding='same', strides=(2, 2), name='d_conv2d_1')(net)  # 112, 112, 64
    net = LeakyReLU(name='leakyrelu_1')(net)
    net = Conv2D(128, (4, 4), padding='same', strides=(2, 2), name='d_conv2d_2')(net)  # 56, 56, 128
    net = LeakyReLU(name='leakyrelu_2')(net)
    net = Conv2D(256, (4, 4), padding='same', strides=(2, 2), name='d_conv2d_3')(net)  # 28, 28, 256
    net = LeakyReLU(name='leakyrelu_3')(net)
    net = Conv2D(512, (4, 4), padding='same', strides=(1, 1), name='d_conv2d_4')(net)  # 28, 28, 512
    net = LeakyReLU(name='leakyrelu_4')(net)
    net = Conv2D(1, (4, 4), padding='same', strides=(1, 1), name='d_conv2d_5')(net)  # 28, 28,1
    return Model([input_ab, input_l], net)
def create_generator():
    generator = Sequential()
    generator.add(Dense(units=256, input_dim=100))
    generator.add(LeakyReLU(0.2))

    generator.add(Dense(units=512))
    generator.add(LeakyReLU(0.2))

    generator.add(Dense(units=1024))
    generator.add(LeakyReLU(0.2))

    generator.add(Dense(units=784, activation='tanh'))

    generator.compile(loss='binary_crossentropy', optimizer=adam_optimizer())
    return generator
示例#17
0
def build_discriminator(data_dim, num_classes):
    model = Sequential()
    model.add(Dense(31, input_dim=data_dim))
    model.add(LeakyReLU(alpha=0.2))
    model.add(BatchNormalization(momentum=0.8))
    model.add(Dropout(0.25))
    model.add(Dense(16, input_dim=data_dim))
    model.add(LeakyReLU(alpha=0.2))
    
    model.summary()
    trans = Input(shape=(data_dim,))
    features = model(trans)
    valid = Dense(1, activation="sigmoid")(features)
    label = Dense(num_classes+1, activation="softmax")(features)
    return Model(trans, [valid, label])
示例#18
0
    def build_discriminator(self):
        model = Sequential()

        model.add(Flatten(input_shape=self.img_shape))
        model.add(Dense(512))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dense(256))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dense(1, activation='sigmoid'))
        model.summary()
        
        img = Input(shape=self.img_shape)
        validity = model(img)
        
        return Model(img, validity)
示例#19
0
def generator_model(shape):
    generator = Sequential()
    generator.add(Dense(units=256, input_dim=shape))
    generator.add(LeakyReLU(0.2))

    generator.add(Dense(units=512))
    generator.add(LeakyReLU(0.2))

    generator.add(Dense(units=1024))
    generator.add(LeakyReLU(0.2))

    generator.add(Dense(units=shape, activation='tanh'))

    generator.compile(loss='binary_crossentropy', optimizer='adam')
    return generator
示例#20
0
 def conv(self,
          inp,
          filters,
          kernel_size=5,
          strides=2,
          padding="same",
          use_instance_norm=False,
          res_block_follows=False,
          **kwargs):
     """ Convolution Layer"""
     logger.debug(
         "inp: %s, filters: %s, kernel_size: %s, strides: %s, use_instance_norm: %s, "
         "kwargs: %s)", inp, filters, kernel_size, strides,
         use_instance_norm, kwargs)
     name = self.get_name("conv_{}".format(inp.shape[1]))
     if self.use_reflect_padding:
         inp = ReflectionPadding2D(
             stride=strides,
             kernel_size=kernel_size,
             name="{}_reflectionpadding2d".format(name))(inp)
         padding = "valid"
     var_x = self.conv2d(inp,
                         filters,
                         kernel_size=kernel_size,
                         strides=strides,
                         padding=padding,
                         name="{}_conv2d".format(name),
                         **kwargs)
     if use_instance_norm:
         var_x = InstanceNormalization(
             name="{}_instancenorm".format(name))(var_x)
     if not res_block_follows:
         var_x = LeakyReLU(0.1, name="{}_leakyrelu".format(name))(var_x)
     return var_x
示例#21
0
    def build_model(self):
        inputs = Input((self.patch_height, self.patch_width, 1))
        conv1 = self.encoding_block(32, strides=(3, 3), padding='same')(inputs)
        conv1 = self.se_block(ratio=2)(conv1)
        pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
        conv2 = self.encoding_block(64, strides=(3, 3), padding='same')(pool1)
        conv2 = self.se_block(ratio=2)(conv2)
        pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
        conv3 = self.encoding_block(128, strides=(3, 3), padding='same')(pool2)
        conv3 = self.se_block(ratio=2)(conv3)

        up = UpSampling2D(size=(2, 2))(conv3)
        conv4 = decoding_block(filters, strides=(3, 3), padding='same')(up,
                                                                        conv2)
        conv4 = self.se_block(ratio=2)(conv4)
        up1 = UpSampling2D(size=(2, 2))(conv4)
        conv5 = decoding_block(filters, strides=(3, 3), padding='same')(up1,
                                                                        conv1)
        conv5 = self.se_block(ratio=2)(conv5)

        conv6 = Conv2D(self.num_seg_class + 1, (1, 1), padding='same')(conv5)
        conv6 = LeakyReLU(alpha=0.3)(conv6)
        conv6 = core.Reshape((self.patch_height * self.patch_width,
                              self.num_seg_class + 1))(conv6)

        act = Activation('softmax')(conv6)

        model = Model(inputs=inputs, outputs=act)
        model.compile(optimizer='adam',
                      loss='categorical_crossentropy',
                      metrics=['categorical_accuracy'])
        plot_model(model,
                   to_file=os.path.join(self.config.checkpoint, "model.png"),
                   show_shapes=True)
        self.model = model
示例#22
0
def block(out,
          nkernels,
          down=True,
          bn=True,
          dropout=False,
          leaky=True,
          normalization=InstanceNormalization):
    if leaky:
        out = LeakyReLU(0.2)(out)
    else:
        out = Activation('relu')(out)
    if down:
        out = ZeroPadding2D((1, 1))(out)
        out = Conv2D(nkernels,
                     kernel_size=(4, 4),
                     strides=(2, 2),
                     use_bias=False)(out)
    else:
        out = Conv2DTranspose(nkernels,
                              kernel_size=(4, 4),
                              strides=(2, 2),
                              use_bias=False)(out)
        out = Cropping2D((1, 1))(out)
    if bn:
        out = normalization(axis=-1)(out)
    if dropout:
        out = Dropout(0.5)(out)
    return out
 def d_layer(layer_input, filters, f_size=4, bn=True):
     """Discriminator layer"""
     d = Conv2D(filters, kernel_size=f_size, strides=2, padding='same')(layer_input)
     d = LeakyReLU(alpha=0.2)(d)
     if bn:
         d = BatchNormalization(momentum=0.8)(d)
     return d
示例#24
0
    def conv_III(self, inputs):
        shortcut = inputs
        for _ in range(4):
            inputs = Conv2D(filters=128, kernel_size=1, strides=1,
                            padding='SAME', kernel_initializer='glorot_uniform', use_bias=False)(inputs)
            inputs = self.batch_norm(inputs)
            inputs = LeakyReLU(alpha=_LEAKY_RELU)(inputs)

            inputs = Conv2D(filters=128, kernel_size=3, strides=1, padding='SAME',
                        kernel_initializer='glorot_uniform', use_bias=False)(inputs)
            inputs = self.batch_norm(inputs)

            inputs = Add()([inputs, shortcut])
            inputs = LeakyReLU(alpha=_LEAKY_RELU)(inputs)
            shortcut = inputs
        return inputs
示例#25
0
def DarknetConv2D_BN_Leaky(*args, **kwargs):
    # batch normalization を使用する場合、畳み込み層の bias は不要である。
    no_bias_kwargs = {'use_bias': False}
    # num_blocks 個の residual block を作成する。
    no_bias_kwargs.update(kwargs)
    return compose(DarknetConv2D(*args, **no_bias_kwargs),
                   BatchNormalization(), LeakyReLU(alpha=0.1))
def discriminator():
    model = Sequential()
    model.add(
        Conv2D(1,
               kernel_size=(5, 5),
               strides=(2, 2),
               padding='same',
               input_shape=(_WIDTH, _HEIGHT, 1)))
    model.add(LeakyReLU())
    model.add(Conv2D(16, kernel_size=(5, 5), strides=(2, 2), padding='same'))
    model.add(LeakyReLU())
    model.add(Conv2D(24, kernel_size=(5, 5), strides=(2, 2), padding='same'))
    model.add(LeakyReLU())
    model.add(Flatten())
    model.add(Dense(1))
    return model
示例#27
0
    def build_discriminator(self):

        model = Sequential()
        model.add(
            CuDNNLSTM(512, input_shape=self.seq_shape, return_sequences=True))
        model.add(Bidirectional(CuDNNLSTM(512)))
        model.add(Dense(512))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dense(256))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dense(1, activation='sigmoid'))
        model.summary()

        seq = Input(shape=self.seq_shape)
        validity = model(seq)

        return Model(seq, validity)
示例#28
0
def discriminator_model(shape):
    discriminator = Sequential()
    discriminator.add(Dense(units=1024, input_dim=shape))
    discriminator.add(LeakyReLU(0.2))
    discriminator.add(Dropout(0.3))  #to prevent overfitting

    discriminator.add(Dense(units=512))
    discriminator.add(LeakyReLU(0.2))
    discriminator.add(Dropout(0.3))  #to prevent overfitting

    discriminator.add(Dense(units=256))
    discriminator.add(LeakyReLU(0.2))

    discriminator.add(Dense(units=1, activation='sigmoid'))

    discriminator.compile(loss='binary_crossentropy', optimizer='adam')
    return discriminator
示例#29
0
 def conv2d(layer_input, filters, f_size=4, bn=True):
     """Layers used during downsampling"""
     d = Conv2D(filters, kernel_size=f_size, strides=2,
                padding='same')(layer_input)
     d = LeakyReLU(alpha=0.2)(d)
     if bn:
         d = BatchNormalization(momentum=0.8)(d)
     return d
示例#30
0
 def __init__(self, fltr, fltr_pre):
     super(EncoderBlock_s_conv, self).__init__()
     self.fltr = fltr
     self.fltr_pre = fltr_pre
     self.fc = layers.Dense(self.fltr_pre)
     self.add = layers.Add()
     self.lrelu = LeakyReLU()
     self.conv = Conv2d(self.fltr)