示例#1
0
    def __init__(self,
                 n_filters,
                 strides=1,
                 downsample=None,
                 regularization=0.01):
        """Initialize the BottleneckResidualUnit module.

    Args:
      n_filters: (int) the number of output filters.
      strides: (int)  the strides of the convolution.
      downsample: a function to down-sample the feature maps.
      regularization: L2 regularization factor for layer weights.
    """
        super(BottleneckResidualUnit, self).__init__()
        self.bn1 = BatchNormalization()
        self.conv1 = Conv2D(n_filters,
                            1,
                            padding='same',
                            use_bias=False,
                            kernel_regularizer=regularizers.l2(regularization))
        self.bn2 = BatchNormalization()
        self.conv2 = Conv2D(n_filters,
                            3,
                            strides=strides,
                            padding='same',
                            use_bias=False,
                            kernel_regularizer=regularizers.l2(regularization))
        self.bn3 = BatchNormalization()
        self.conv3 = Conv2D(n_filters * self.expansion,
                            1,
                            padding='same',
                            use_bias=False,
                            kernel_regularizer=regularizers.l2(regularization))
        self.leaky_relu = LeakyReLU()
        self.downsample = downsample
示例#2
0
def _conv_block(inp, convs, do_skip=True):
    x = inp
    count = 0

    for conv in convs:
        if count == (len(convs) - 2) and do_skip:
            skip_connection = x
        count += 1

        if conv['stride'] > 1:
            x = ZeroPadding2D(((1, 0), (1, 0)))(
                x)  # unlike tensorflow darknet prefer left and top paddings
        x = Conv2D(
            conv['filter'],
            conv['kernel'],
            strides=conv['stride'],
            padding='valid' if conv['stride'] > 1 else
            'same',  # unlike tensorflow darknet prefer left and top paddings
            name='conv_' + str(conv['layer_idx']),
            use_bias=False if conv['bnorm'] else True)(x)
        if conv['bnorm']:
            x = BatchNormalization(epsilon=0.001,
                                   name='bnorm_' + str(conv['layer_idx']))(x)
        if conv['leaky']:
            x = LeakyReLU(alpha=0.1, name='leaky_' + str(conv['layer_idx']))(x)

    return add([skip_connection, x]) if do_skip else x
示例#3
0
文件: DCGan.py 项目: olonok69/GAN
 def define_generator(latent_dim):
     model = Sequential()
     # foundation for 4x4 image
     n_nodes = 256 * 4 * 4
     model.add(Dense(n_nodes, input_dim=latent_dim))
     model.add(LeakyReLU(alpha=0.2))
     model.add(Reshape((4, 4, 256)))
     # upsample to 8x8
     model.add(Conv2DTranspose(128, (4, 4), strides=(2, 2), padding='same'))
     model.add(LeakyReLU(alpha=0.2))
     # upsample to 16x16
     model.add(Conv2DTranspose(128, (4, 4), strides=(2, 2), padding='same'))
     model.add(LeakyReLU(alpha=0.2))
     # upsample to 32x32
     model.add(Conv2DTranspose(128, (4, 4), strides=(2, 2), padding='same'))
     model.add(LeakyReLU(alpha=0.2))
     # output layer
     model.add(Conv2D(3, (3, 3), activation='tanh', padding='same'))
     return model
def neural_network(input_shape):
    inputs = keras.Input(shape=input_shape)

    #Layer 1
    x = MaxPooling2D(pool_size=(2, 2), name='MaxPooling2D_1')(inputs)
    x = Conv2D(32, kernel_size=(5, 5), padding='same')(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.1)(x)
    x = MaxPooling2D(pool_size=(4, 4))(x)

    #Layer 2
    x = Conv2D(64, kernel_size=(5, 5), padding='same', name='Conv2D_2')(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.1)(x)
    x = MaxPooling2D(pool_size=(2, 2), name='MaxPooling2D_3')(x)

    x = Flatten(name='Flatten')(x)

    #Layer 3
    #model.add(Dense(256,name = 'Dense_1'))
    #model.add(BatchNormalization(name = 'BatchNormalization_2'))
    #model.add(LeakyReLU(alpha=0.1))
    #model.add(Dropout(0.5,name = 'Dropout_1'))

    #Layer 4
    x = Dense(128, name='Dense_2')(x)
    x = BatchNormalization(name='BatchNormalization_3')(x)
    x = LeakyReLU(alpha=0.1)(x)
    x = Dropout(0.5, name='Dropout_2')(x)

    #Layer 5
    x = Dense(128, name='Dense_3')(x)
    x = BatchNormalization(name='BatchNormalization_4')(x)
    x = LeakyReLU(alpha=0.1)(x)
    #model.add(Dropout(0.5,name = 'Dropout_3'))

    outputs = Dense(1, activation='sigmoid', name='Dense_4')(x)

    model = Model(inputs, outputs)
    return model
示例#5
0
文件: DCGan.py 项目: olonok69/GAN
 def define_discriminator(in_shape=(32, 32, 3)):
     model = Sequential()
     # normal
     model.add(Conv2D(64, (3, 3), padding='same', input_shape=in_shape))
     model.add(LeakyReLU(alpha=0.2))
     # downsample
     model.add(Conv2D(128, (3, 3), strides=(2, 2), padding='same'))
     model.add(LeakyReLU(alpha=0.2))
     # downsample
     model.add(Conv2D(128, (3, 3), strides=(2, 2), padding='same'))
     model.add(LeakyReLU(alpha=0.2))
     # downsample
     model.add(Conv2D(256, (3, 3), strides=(2, 2), padding='same'))
     model.add(LeakyReLU(alpha=0.2))
     # classifier
     model.add(Flatten())
     model.add(Dropout(0.4))
     model.add(Dense(1, activation='sigmoid'))
     # compile model
     opt = Adam(lr=0.0002, beta_1=0.5)
     model.compile(loss='binary_crossentropy',
                   optimizer=opt,
                   metrics=['accuracy'])
     return model
示例#6
0
 def __init__(self, regularization=0.01):
     super(SiameseEncoder, self).__init__()
     self.inplanes = 64
     # Siamese branch.
     self.siamese = Sequential([
         Conv2D(64,
                7,
                strides=2,
                padding='same',
                use_bias=False,
                kernel_regularizer=regularizers.l2(regularization)),
         self._make_resblock(2,
                             128,
                             strides=2,
                             regularization=regularization),
         self._make_resblock(2,
                             128,
                             strides=2,
                             regularization=regularization),
         self._make_resblock(2,
                             256,
                             strides=2,
                             regularization=regularization),
     ])
     # Merged main branch.
     self.mainstream = Sequential([
         self._make_resblock(2,
                             256,
                             strides=2,
                             regularization=regularization),
         self._make_resblock(2,
                             256,
                             strides=2,
                             regularization=regularization),
     ])
     self.bn = BatchNormalization()
     self.leaky_relu = LeakyReLU()
示例#7
0
    def __init__(self, n_out, regularization=0.01):
        """Initialize the DirectionNet.

    Args:
      n_out: (int) the number of output distributions.
      regularization: L2 regularization factor for layer weights.
    """
        super(DirectionNet, self).__init__()
        self.encoder = SiameseEncoder()
        self.inplanes = self.encoder.inplanes
        self.decoder_block1 = Sequential([
            Conv2D(256,
                   3,
                   use_bias=False,
                   kernel_regularizer=regularizers.l2(regularization)),
            self._make_resblock(2, 128, regularization=regularization),
            BatchNormalization(),
            LeakyReLU()
        ])
        self.decoder_block2 = Sequential([
            Conv2D(128,
                   3,
                   use_bias=False,
                   kernel_regularizer=regularizers.l2(regularization)),
            self._make_resblock(2, 64, regularization=regularization),
            BatchNormalization(),
            LeakyReLU()
        ])
        self.decoder_block3 = Sequential([
            Conv2D(64,
                   3,
                   use_bias=False,
                   kernel_regularizer=regularizers.l2(regularization)),
            self._make_resblock(2, 32, regularization=regularization),
            BatchNormalization(),
            LeakyReLU()
        ])
        self.decoder_block4 = Sequential([
            Conv2D(32,
                   3,
                   use_bias=False,
                   kernel_regularizer=regularizers.l2(regularization)),
            self._make_resblock(2, 16, regularization=regularization),
            BatchNormalization(),
            LeakyReLU()
        ])
        self.decoder_block5 = Sequential([
            Conv2D(16,
                   3,
                   use_bias=False,
                   kernel_regularizer=regularizers.l2(regularization)),
            self._make_resblock(2, 8, regularization=regularization),
            BatchNormalization(),
            LeakyReLU()
        ])
        self.decoder_block6 = Sequential([
            Conv2D(8,
                   3,
                   use_bias=False,
                   kernel_regularizer=regularizers.l2(regularization)),
            self._make_resblock(2, 4, regularization=regularization),
            BatchNormalization(),
            LeakyReLU()
        ])
        self.down_channel = Conv2D(
            n_out, 1, kernel_regularizer=regularizers.l2(regularization))
                  input_tensor=Input(shape=(224, 224, 3)))

print("baseModel.output:", baseModel.output)
print("baseModel.output.shape:", baseModel.output.shape)

# construct the head of the model that will be placed on top of the the base model
headModel = baseModel.output
headModel = Conv2D(
    224,
    kernel_size=[5, 5],
    strides=[2, 2],
    padding="same",
    kernel_initializer=keras.initializers.TruncatedNormal(stddev=init_stddev),
)(headModel)
headModel = BatchNormalization(momentum=MOM)(headModel)
headModel = LeakyReLU(alpha=0.2)(headModel)
headModel = Conv2D(
    448,
    kernel_size=[5, 5],
    strides=[2, 2],
    padding="same",
    kernel_initializer=keras.initializers.TruncatedNormal(stddev=init_stddev),
)(headModel)
headModel = BatchNormalization(momentum=MOM)(headModel)
headModel = LeakyReLU(alpha=0.2)(headModel)
headModel = AveragePooling2D(pool_size=(2, 2))(headModel)
headModel = Flatten(name="flatten")(headModel)
headModel = Dense(64, activation="relu")(headModel)
headModel = BatchNormalization(momentum=MOM)(headModel)
headModel = LeakyReLU(alpha=0.2)(headModel)
headModel = Dense(32, activation="relu")(headModel)