示例#1
0
    def __init__(self,
                 n_filters,
                 strides=1,
                 downsample=None,
                 regularization=0.01):
        """Initialize the BottleneckResidualUnit module.

    Args:
      n_filters: (int) the number of output filters.
      strides: (int)  the strides of the convolution.
      downsample: a function to down-sample the feature maps.
      regularization: L2 regularization factor for layer weights.
    """
        super(BottleneckResidualUnit, self).__init__()
        self.bn1 = BatchNormalization()
        self.conv1 = Conv2D(n_filters,
                            1,
                            padding='same',
                            use_bias=False,
                            kernel_regularizer=regularizers.l2(regularization))
        self.bn2 = BatchNormalization()
        self.conv2 = Conv2D(n_filters,
                            3,
                            strides=strides,
                            padding='same',
                            use_bias=False,
                            kernel_regularizer=regularizers.l2(regularization))
        self.bn3 = BatchNormalization()
        self.conv3 = Conv2D(n_filters * self.expansion,
                            1,
                            padding='same',
                            use_bias=False,
                            kernel_regularizer=regularizers.l2(regularization))
        self.leaky_relu = LeakyReLU()
        self.downsample = downsample
def get_seq_model():
  """Define three channel input shape depending on image data format."""
  if K.image_data_format() == 'channels_first':
    input_shape = (3, img_width, img_height)
  else:
    input_shape = (img_width, img_height, 3)

  # Initialize CNN by creating a sequential model.
  model = Sequential()
  model.add(Conv2D(32, (3, 3), input_shape=input_shape))
  model.add(Activation('relu'))
  model.add(MaxPooling2D(pool_size=(2, 2)))

  model.add(Conv2D(32, (3, 3)))
  model.add(Activation('relu'))
  model.add(MaxPooling2D(pool_size=(2, 2)))

  model.add(Conv2D(64, (3, 3)))
  model.add(Activation('relu'))
  model.add(MaxPooling2D(pool_size=(2, 2)))

  model.add(Flatten())
  model.add(Dense(64))
  model.add(Activation('relu'))
  model.add(Dropout(0.5))
  model.add(Dense(2))
  model.add(Activation('sigmoid'))

  model.compile(
      loss='binary_crossentropy', optimizer='rmsprop', metrics=['accuracy'])

  return model
示例#3
0
def u_dense_net(input_shape,
                num_db,
                num_channels=64,
                growth_rate=32,
                convs_per_db=3):
    assert len(
        input_shape
    ) == 3, f"Input shape must have 3 dimension! Received '{input_shape}'!"
    assert (num_db > 1) and (
        num_db % 2 == 1
    ), f"Number of DenseBlocks must be an odd number more than 1! Received '{num_db}'!"
    # In a U-shaped DenseNet with N DenseBlocks, each side has floor(N/2) DenseBlocks
    num_trans_down = num_trans_up = num_db // 2
    assert (input_shape[0] % (2**num_trans_down) == 0) and (
        input_shape[1] % (2**num_trans_down) == 0
    ), f"Dimension of the input shape {input_shape[:2]} must be a multiple of {2**num_trans_down} to preserve the tensor shape after down-scaling and up-scaling"
    assert (num_channels > 0) and (
        num_channels % 2 == 0
    ), f"Number of channels for TransitionBlock must be an even number more than 0! Received '{num_channels}'!"

    _num_channels = num_channels
    img_in = Input(dtype="float32", shape=input_shape, name="image_input")
    x = Conv2D(_num_channels,
               kernel_size=(5, 5),
               activation="relu",
               padding="same")(img_in)
    ############################### Transition down section ###############################
    db_outputs = []
    for i in range(num_trans_down):
        x = DenseBlock(num_layers=convs_per_db, filters=growth_rate)(x)
        db_outputs.insert(0, x)
        num_channels += growth_rate * i
        num_channels //= 2
        x = TransitionBlock(filters=num_channels, trans_down=True)(x)
    #################################### Mid DenseBlock ###################################
    x = DenseBlock(num_layers=convs_per_db, filters=growth_rate)(x)
    ################################ Transition up section ################################
    for i in range(num_trans_up):
        num_channels += growth_rate * (i + 1)
        num_channels //= 2
        x = TransitionBlock(filters=num_channels, trans_down=False)(x)
        x = Concatenate(axis=-1)([x, db_outputs[i]])
        x = DenseBlock(num_layers=convs_per_db, filters=growth_rate)(x)

    img_out = Conv2D(1,
                     kernel_size=(5, 5),
                     activation="sigmoid",
                     padding="same",
                     name="image_output")(x)
    model = Model(inputs=[img_in], outputs=[img_out], name="DenseNet")
    return model
示例#4
0
文件: models.py 项目: ameroueh/oaz
def residual_block(
    inputs,
    num_filters=16,
    kernel_size=3,
    strides=1,
    activation="relu",
    batch_normalization=True,
    conv_first=True,
):
    """2D Convolution-Batch Normalization-Activation stack builder

    # Arguments
        inputs (tensor): input tensor from input image or previous layer
        num_filters (int): Conv2D number of filters
        kernel_size (int): Conv2D square kernel dimensions
        strides (int): Conv2D square stride dimensions
        activation (string): activation name
        batch_normalization (bool): whether to include batch normalization
        conv_first (bool): conv-bn-activation (True) or
            bn-activation-conv (False)

    # Returns
        x (tensor): tensor as input to the next layer
    """
    conv = Conv2D(
        num_filters,
        kernel_size=kernel_size,
        strides=strides,
        padding="same",
        kernel_initializer="he_normal",
        kernel_regularizer=l2(1e-4),
        activation=None,
    )
    conv2 = Conv2D(
        num_filters,
        kernel_size=kernel_size,
        strides=strides,
        padding="same",
        kernel_initializer="he_normal",
        kernel_regularizer=l2(1e-4),
        activation="linear",
    )

    x = conv(inputs)
    x = BatchNormalization()(x)
    x = Activation(activation)(x)
    x = conv2(x)
    x = add([inputs, x])
    x = BatchNormalization()(x)
    x = Activation(activation)(x)
    return x
示例#5
0
def model_fn_LENET_5(features,
                     activation='relu',
                     kernel_initializer=tf.keras.initializers.TruncatedNormal(
                         mean=0, stddev=0.1),
                     bias_initializer='zeros'):

    # conv1: output is [None, 28, 28, 6]
    conv1 = Conv2D(filters=6,
                   kernel_size=(5, 5),
                   strides=(1, 1),
                   padding='valid',
                   activation=activation,
                   use_bias=True,
                   kernel_initializer=kernel_initializer,
                   bias_initializer=bias_initializer)(features)

    # pool1: output is [None, 14, 14, 6]
    pool1 = MaxPool2D(pool_size=(2, 2))(conv1)

    # conv2: output is [None, 10, 10, 16]
    conv2 = Conv2D(filters=16,
                   kernel_size=(5, 5),
                   strides=(1, 1),
                   padding='valid',
                   activation=activation,
                   use_bias=True,
                   kernel_initializer=kernel_initializer,
                   bias_initializer=bias_initializer)(pool1)

    # pool2: output is [None, 5, 5, 16] -> flattened on input of FC to [None, 400]
    pool2 = MaxPool2D(pool_size=(2, 2))(conv2)
    flatten = Flatten()(pool2)

    # fc3: output is [None, 120]
    fc3 = Dense(units=120,
                activation=activation,
                use_bias=True,
                kernel_initializer=kernel_initializer,
                bias_initializer=bias_initializer)(flatten)

    # fc4: output is [None, 84]
    fc4 = Dense(units=84,
                activation=activation,
                use_bias=True,
                kernel_initializer=kernel_initializer,
                bias_initializer=bias_initializer)(fc3)

    return fc4
示例#6
0
def _conv_block(inp, convs, do_skip=True):
    x = inp
    count = 0

    for conv in convs:
        if count == (len(convs) - 2) and do_skip:
            skip_connection = x
        count += 1

        if conv['stride'] > 1:
            x = ZeroPadding2D(((1, 0), (1, 0)))(
                x)  # unlike tensorflow darknet prefer left and top paddings
        x = Conv2D(
            conv['filter'],
            conv['kernel'],
            strides=conv['stride'],
            padding='valid' if conv['stride'] > 1 else
            'same',  # unlike tensorflow darknet prefer left and top paddings
            name='conv_' + str(conv['layer_idx']),
            use_bias=False if conv['bnorm'] else True)(x)
        if conv['bnorm']:
            x = BatchNormalization(epsilon=0.001,
                                   name='bnorm_' + str(conv['layer_idx']))(x)
        if conv['leaky']:
            x = LeakyReLU(alpha=0.1, name='leaky_' + str(conv['layer_idx']))(x)

    return add([skip_connection, x]) if do_skip else x
示例#7
0
 def __init__(self, filters, **kwargs):
     super(ConvBlock, self).__init__(**kwargs)
     self.filters = filters
     self.bn = BatchNormalization()
     self.conv2d = Conv2D(filters=filters,
                          kernel_size=(3, 3),
                          padding="same")
     self.relu = ReLU()
     self.concat = Concatenate(axis=-1)
示例#8
0
    def Train(self):
        # self.loadDataFeature()
        self.loadDataTxt()
        self.train_and_test_split(0.75)
        # model
        model = Sequential()

        # model.add(Dense(392, activation='relu'))
        # model.add(Dense(128, activation='relu'))
        # model.add(Dense(36, activation='softmax'))

        #cnn model

        model.add(
            Conv2D(64, (3, 3), activation='relu', input_shape=(28, 28, 1)))
        model.add(Conv2D(64, (3, 3), activation='relu'))
        model.add(MaxPooling2D((2, 2)))
        model.add(Conv2D(64, (3, 3), activation='relu'))
        model.add(Conv2D(64, (3, 3), activation='relu'))
        model.add(MaxPooling2D((2, 2)))
        model.add(Flatten())
        model.add(Dense(128, activation='relu'))
        model.add(Dense(128, activation='relu'))
        model.add(Dense(36, activation='softmax'))

        # model.compile(loss='mse', optimizer='adam', metrics=['accuracy'])
        model.compile(loss='categorical_crossentropy',
                      optimizer='adam',
                      metrics=['accuracy'])
        model.fit(
            self.train_data['data'],
            self.train_data['class_name'],
            batch_size=25,
            epochs=100,
            verbose=1,
            validation_data=(self.test_data['data'],
                             self.test_data['class_name']),
        )
        self.model = model
        model.save('digit_classification_model1.h5')
        # Y_pred = model.predict(self.test_data['data'])
        # self.metric(self.test_data['class_name'], Y_pred, data_type='binary')
        self.metric()
示例#9
0
def spatial_block(name, space, cfg):
    inpt = Input(space.shape, name=name + '_input')
    block = tf.split(inpt, space.shape[0], axis=1)

    for i, (name,
            dim) in enumerate(zip(space.spatial_feats, space.spatial_dims)):
        if dim > 1:
            block[i] = tf.squeeze(block[i], axis=1)
            # Embedding dim 10 as per https://arxiv.org/pdf/1806.01830.pdf
            block[i] = Embedding(input_dim=dim, output_dim=10)(block[i])
            # [N, H, W, C] -> [N, C, H, W]
            block[i] = tf.transpose(block[i], perm=[0, 3, 1, 2])
        else:
            block[i] = tf.log(block[i] + 1e-5)

    block = tf.concat(block, axis=1)
    block = Conv2D(16, 5, **cfg)(block)
    block = Conv2D(32, 3, **cfg)(block)

    return block, inpt
示例#10
0
    def _cnn(self, features: tf.Tensor,
             mode: tf.estimator.ModeKeys) -> tf.Tensor:
        """
            Feature extractor based on Conv layers

            :param features: input of the sub network
            :param mode: standard names for Estimator model modes
            :return: output of the sub network

        """
        activation = 'relu'
        kernel_initializer = initializers.TruncatedNormal(mean=0, stddev=0.1)
        bias_initializer = 'zeros'

        # conv1: output is [None, 28, 28, 6]
        conv1 = Conv2D(filters=6,
                       kernel_size=(5, 5),
                       strides=(1, 1),
                       padding='valid',
                       activation=activation,
                       use_bias=True,
                       kernel_initializer=kernel_initializer,
                       bias_initializer=bias_initializer)(features)

        # pool1: output is [None, 14, 14, 6]
        pool1 = MaxPool2D(pool_size=(2, 2))(conv1)

        # conv2: output is [None, 10, 10, 16]
        conv2 = Conv2D(filters=16,
                       kernel_size=(5, 5),
                       strides=(1, 1),
                       padding='valid',
                       activation=activation,
                       use_bias=True,
                       kernel_initializer=kernel_initializer,
                       bias_initializer=bias_initializer)(pool1)

        # pool2: output is [None, 5, 5, 16]
        pool2 = MaxPool2D(pool_size=(2, 2))(conv2)

        return pool2
def inception_block_1a(X):
    """
    Implementation of an inception block
    """

    X_3x3 = Conv2D(96, (1, 1), data_format='channels_first', name='inception_3a_3x3_conv1')(X)
    X_3x3 = BatchNormalization(axis=1, epsilon=0.00001, name='inception_3a_3x3_bn1')(X_3x3)
    X_3x3 = Activation('relu')(X_3x3)
    X_3x3 = ZeroPadding2D(padding=(1, 1), data_format='channels_first')(X_3x3)
    X_3x3 = Conv2D(128, (3, 3), data_format='channels_first', name='inception_3a_3x3_conv2')(X_3x3)
    X_3x3 = BatchNormalization(axis=1, epsilon=0.00001, name='inception_3a_3x3_bn2')(X_3x3)
    X_3x3 = Activation('relu')(X_3x3)

    X_5x5 = Conv2D(16, (1, 1), data_format='channels_first', name='inception_3a_5x5_conv1')(X)
    X_5x5 = BatchNormalization(axis=1, epsilon=0.00001, name='inception_3a_5x5_bn1')(X_5x5)
    X_5x5 = Activation('relu')(X_5x5)
    X_5x5 = ZeroPadding2D(padding=(2, 2), data_format='channels_first')(X_5x5)
    X_5x5 = Conv2D(32, (5, 5), data_format='channels_first', name='inception_3a_5x5_conv2')(X_5x5)
    X_5x5 = BatchNormalization(axis=1, epsilon=0.00001, name='inception_3a_5x5_bn2')(X_5x5)
    X_5x5 = Activation('relu')(X_5x5)

    X_pool = MaxPooling2D(pool_size=3, strides=2, data_format='channels_first')(X)
    X_pool = Conv2D(32, (1, 1), data_format='channels_first', name='inception_3a_pool_conv')(X_pool)
    X_pool = BatchNormalization(axis=1, epsilon=0.00001, name='inception_3a_pool_bn')(X_pool)
    X_pool = Activation('relu')(X_pool)
    X_pool = ZeroPadding2D(padding=((3, 4), (3, 4)), data_format='channels_first')(X_pool)

    X_1x1 = Conv2D(64, (1, 1), data_format='channels_first', name='inception_3a_1x1_conv')(X)
    X_1x1 = BatchNormalization(axis=1, epsilon=0.00001, name='inception_3a_1x1_bn')(X_1x1)
    X_1x1 = Activation('relu')(X_1x1)

    # CONCAT
    inception = concatenate([X_3x3, X_5x5, X_pool, X_1x1], axis=1)

    return inception
def neural_network(input_shape):
    inputs = keras.Input(shape=input_shape)

    #Layer 1
    x = MaxPooling2D(pool_size=(2, 2), name='MaxPooling2D_1')(inputs)
    x = Conv2D(32, kernel_size=(5, 5), padding='same')(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.1)(x)
    x = MaxPooling2D(pool_size=(4, 4))(x)

    #Layer 2
    x = Conv2D(64, kernel_size=(5, 5), padding='same', name='Conv2D_2')(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.1)(x)
    x = MaxPooling2D(pool_size=(2, 2), name='MaxPooling2D_3')(x)

    x = Flatten(name='Flatten')(x)

    #Layer 3
    #model.add(Dense(256,name = 'Dense_1'))
    #model.add(BatchNormalization(name = 'BatchNormalization_2'))
    #model.add(LeakyReLU(alpha=0.1))
    #model.add(Dropout(0.5,name = 'Dropout_1'))

    #Layer 4
    x = Dense(128, name='Dense_2')(x)
    x = BatchNormalization(name='BatchNormalization_3')(x)
    x = LeakyReLU(alpha=0.1)(x)
    x = Dropout(0.5, name='Dropout_2')(x)

    #Layer 5
    x = Dense(128, name='Dense_3')(x)
    x = BatchNormalization(name='BatchNormalization_4')(x)
    x = LeakyReLU(alpha=0.1)(x)
    #model.add(Dropout(0.5,name = 'Dropout_3'))

    outputs = Dense(1, activation='sigmoid', name='Dense_4')(x)

    model = Model(inputs, outputs)
    return model
def inception_block_1b(X):
    X_3x3 = Conv2D(96, (1, 1), data_format='channels_first', name='inception_3b_3x3_conv1')(X)
    X_3x3 = BatchNormalization(axis=1, epsilon=0.00001, name='inception_3b_3x3_bn1')(X_3x3)
    X_3x3 = Activation('relu')(X_3x3)
    X_3x3 = ZeroPadding2D(padding=(1, 1), data_format='channels_first')(X_3x3)
    X_3x3 = Conv2D(128, (3, 3), data_format='channels_first', name='inception_3b_3x3_conv2')(X_3x3)
    X_3x3 = BatchNormalization(axis=1, epsilon=0.00001, name='inception_3b_3x3_bn2')(X_3x3)
    X_3x3 = Activation('relu')(X_3x3)

    X_5x5 = Conv2D(32, (1, 1), data_format='channels_first', name='inception_3b_5x5_conv1')(X)
    X_5x5 = BatchNormalization(axis=1, epsilon=0.00001, name='inception_3b_5x5_bn1')(X_5x5)
    X_5x5 = Activation('relu')(X_5x5)
    X_5x5 = ZeroPadding2D(padding=(2, 2), data_format='channels_first')(X_5x5)
    X_5x5 = Conv2D(64, (5, 5), data_format='channels_first', name='inception_3b_5x5_conv2')(X_5x5)
    X_5x5 = BatchNormalization(axis=1, epsilon=0.00001, name='inception_3b_5x5_bn2')(X_5x5)
    X_5x5 = Activation('relu')(X_5x5)

    X_pool = AveragePooling2D(pool_size=(3, 3), strides=(3, 3), data_format='channels_first')(X)
    X_pool = Conv2D(64, (1, 1), data_format='channels_first', name='inception_3b_pool_conv')(X_pool)
    X_pool = BatchNormalization(axis=1, epsilon=0.00001, name='inception_3b_pool_bn')(X_pool)
    X_pool = Activation('relu')(X_pool)
    X_pool = ZeroPadding2D(padding=(4, 4), data_format='channels_first')(X_pool)

    X_1x1 = Conv2D(64, (1, 1), data_format='channels_first', name='inception_3b_1x1_conv')(X)
    X_1x1 = BatchNormalization(axis=1, epsilon=0.00001, name='inception_3b_1x1_bn')(X_1x1)
    X_1x1 = Activation('relu')(X_1x1)

    inception = concatenate([X_3x3, X_5x5, X_pool, X_1x1], axis=1)

    return inception
示例#14
0
 def __init__(self, filters, trans_down=True, **kwargs):
     super(TransitionBlock, self).__init__(**kwargs)
     self.filters = filters
     self.trans_down = trans_down
     self.bn = BatchNormalization()
     self.relu = ReLU()
     self.conv2d = Conv2D(filters=filters,
                          kernel_size=(1, 1),
                          padding="same")
     if trans_down:
         self.pool2d = AvgPool2D(pool_size=2, strides=2)
     else:
         self.pool2d = UpSampling2D(size=(2, 2))
示例#15
0
def build_cnn(input_layer, layers, conv_cfg, dense=None, prefix=''):
    x = input_layer
    for i, (n_filters, kernel_size, stride) in enumerate(layers):
        x = Conv2D(n_filters,
                   kernel_size,
                   stride,
                   name='%sconv%02d' % (prefix, i + 1),
                   **conv_cfg)(x)

    if dense:
        x = Flatten()(x)
        x = Dense(dense)(x)

    return x
示例#16
0
文件: DCGan.py 项目: olonok69/GAN
 def define_discriminator(in_shape=(32, 32, 3)):
     model = Sequential()
     # normal
     model.add(Conv2D(64, (3, 3), padding='same', input_shape=in_shape))
     model.add(LeakyReLU(alpha=0.2))
     # downsample
     model.add(Conv2D(128, (3, 3), strides=(2, 2), padding='same'))
     model.add(LeakyReLU(alpha=0.2))
     # downsample
     model.add(Conv2D(128, (3, 3), strides=(2, 2), padding='same'))
     model.add(LeakyReLU(alpha=0.2))
     # downsample
     model.add(Conv2D(256, (3, 3), strides=(2, 2), padding='same'))
     model.add(LeakyReLU(alpha=0.2))
     # classifier
     model.add(Flatten())
     model.add(Dropout(0.4))
     model.add(Dense(1, activation='sigmoid'))
     # compile model
     opt = Adam(lr=0.0002, beta_1=0.5)
     model.compile(loss='binary_crossentropy',
                   optimizer=opt,
                   metrics=['accuracy'])
     return model
示例#17
0
def SeparableConv(input):
    '''
    Args:
        input: input tensor
    Output:
        output: output tensor
    '''
    # Depthwise Convolution
    x = DepthwiseConv2D((3, 3), padding='same')(input)
    x = BatchNormalization()(x)
    x = ReLU()(x)

    # Pointwise Convolution
    x = Conv2D(128, (1, 1))(x)
    x = BatchNormalization()(x)
    x = ReLU()(x)

    return x
示例#18
0
文件: DCGan.py 项目: olonok69/GAN
 def define_generator(latent_dim):
     model = Sequential()
     # foundation for 4x4 image
     n_nodes = 256 * 4 * 4
     model.add(Dense(n_nodes, input_dim=latent_dim))
     model.add(LeakyReLU(alpha=0.2))
     model.add(Reshape((4, 4, 256)))
     # upsample to 8x8
     model.add(Conv2DTranspose(128, (4, 4), strides=(2, 2), padding='same'))
     model.add(LeakyReLU(alpha=0.2))
     # upsample to 16x16
     model.add(Conv2DTranspose(128, (4, 4), strides=(2, 2), padding='same'))
     model.add(LeakyReLU(alpha=0.2))
     # upsample to 32x32
     model.add(Conv2DTranspose(128, (4, 4), strides=(2, 2), padding='same'))
     model.add(LeakyReLU(alpha=0.2))
     # output layer
     model.add(Conv2D(3, (3, 3), activation='tanh', padding='same'))
     return model
示例#19
0
def build_fully_conv(obs_spec,
                     act_spec,
                     data_format='channels_first',
                     broadcast_non_spatial=False,
                     fc_dim=256):
    screen, screen_input = spatial_block('screen', obs_spec.spaces[0],
                                         conv_cfg(data_format, 'relu'))
    minimap, minimap_input = spatial_block('minimap', obs_spec.spaces[1],
                                           conv_cfg(data_format, 'relu'))

    non_spatial_inputs = [Input(s.shape) for s in obs_spec.spaces[2:]]

    if broadcast_non_spatial:
        non_spatial, spatial_dim = non_spatial_inputs[1], obs_spec.spaces[
            0].shape[1]
        non_spatial = tf.log(non_spatial + 1e-5)
        broadcasted_non_spatial = Broadcast2D(spatial_dim)(non_spatial)
        state = tf.concat([screen, minimap, broadcasted_non_spatial], axis=1)
    else:
        state = tf.concat([screen, minimap], axis=1)

    fc = Flatten(name="state_flat")(state)
    fc = Dense(fc_dim, **dense_cfg('relu'))(fc)

    value = Dense(1, name="value_out", **dense_cfg(scale=0.1))(fc)
    value = tf.squeeze(value, axis=-1)

    logits = []
    for space in act_spec:
        if space.is_spatial():
            logits.append(
                Conv2D(1, 1, **conv_cfg(data_format, scale=0.1))(state))
            logits[-1] = Flatten()(logits[-1])
        else:
            logits.append(Dense(space.size(), **dense_cfg(scale=0.1))(fc))

    mask_actions = Lambda(lambda x: tf.where(non_spatial_inputs[0] > 0, x,
                                             -1000 * tf.ones_like(x)),
                          name="mask_unavailable_action_ids")
    logits[0] = mask_actions(logits[0])

    return Model(inputs=[screen_input, minimap_input] + non_spatial_inputs,
                 outputs=logits + [value])
示例#20
0
 def __init__(self, regularization=0.01):
     super(SiameseEncoder, self).__init__()
     self.inplanes = 64
     # Siamese branch.
     self.siamese = Sequential([
         Conv2D(64,
                7,
                strides=2,
                padding='same',
                use_bias=False,
                kernel_regularizer=regularizers.l2(regularization)),
         self._make_resblock(2,
                             128,
                             strides=2,
                             regularization=regularization),
         self._make_resblock(2,
                             128,
                             strides=2,
                             regularization=regularization),
         self._make_resblock(2,
                             256,
                             strides=2,
                             regularization=regularization),
     ])
     # Merged main branch.
     self.mainstream = Sequential([
         self._make_resblock(2,
                             256,
                             strides=2,
                             regularization=regularization),
         self._make_resblock(2,
                             256,
                             strides=2,
                             regularization=regularization),
     ])
     self.bn = BatchNormalization()
     self.leaky_relu = LeakyReLU()
示例#21
0
    def _make_resblock(self,
                       n_blocks,
                       n_filters,
                       strides=1,
                       regularization=0.01):
        """Build Residual blocks from BottleneckResidualUnit layers.

    Args:
      n_blocks: [BATCH, HEIGHT, WIDTH, 3] input source images.
      n_filters: (int) the number of filters.
      strides: (int)  the strides of the convolution.
      regularization: (float) l2 regularization coefficient.

    Returns:
     [BATCH, 1, 1, 1024] image embeddings.
    """
        layers = []
        if strides != 1 or self.inplanes != n_filters * BottleneckResidualUnit.expansion:
            downsample = Conv2D(n_filters * BottleneckResidualUnit.expansion,
                                1,
                                strides=strides,
                                padding='same',
                                use_bias=False)
        else:
            downsample = None
        self.inplanes = n_filters * BottleneckResidualUnit.expansion
        layers.append(
            BottleneckResidualUnit(n_filters,
                                   strides,
                                   downsample,
                                   regularization=regularization))
        for _ in range(1, n_blocks):
            layers.append(
                BottleneckResidualUnit(n_filters,
                                       1,
                                       regularization=regularization))
        return Sequential(layers)
trainAug = ImageDataGenerator(rotation_range=15, fill_mode="nearest")

# load the VGG16 network, ensuring the head FC layer sets are left off
baseModel = VGG16(weights="imagenet",
                  include_top=False,
                  input_tensor=Input(shape=(224, 224, 3)))

print("baseModel.output:", baseModel.output)
print("baseModel.output.shape:", baseModel.output.shape)

# construct the head of the model that will be placed on top of the the base model
headModel = baseModel.output
headModel = Conv2D(
    224,
    kernel_size=[5, 5],
    strides=[2, 2],
    padding="same",
    kernel_initializer=keras.initializers.TruncatedNormal(stddev=init_stddev),
)(headModel)
headModel = BatchNormalization(momentum=MOM)(headModel)
headModel = LeakyReLU(alpha=0.2)(headModel)
headModel = Conv2D(
    448,
    kernel_size=[5, 5],
    strides=[2, 2],
    padding="same",
    kernel_initializer=keras.initializers.TruncatedNormal(stddev=init_stddev),
)(headModel)
headModel = BatchNormalization(momentum=MOM)(headModel)
headModel = LeakyReLU(alpha=0.2)(headModel)
headModel = AveragePooling2D(pool_size=(2, 2))(headModel)
def InceptionModel(input_shape):
    """
    Implementation of the Inception model used for FaceNet
    
    Arguments:
    input_shape -- shape of the images of the dataset

    Returns:
    model -- a Model() instance in Keras
    """

    # Define the input as a tensor with shape input_shape
    X_input = Input(input_shape)

    # Zero-Padding
    X = ZeroPadding2D((3, 3))(X_input)

    # First Block
    X = Conv2D(64, (7, 7), strides=(2, 2), name='conv1')(X)
    X = BatchNormalization(axis=1, name='bn1')(X)
    X = Activation('relu')(X)

    # Zero-Padding + MAXPOOL
    X = ZeroPadding2D((1, 1))(X)
    X = MaxPooling2D((3, 3), strides=2)(X)

    # Second Block
    X = Conv2D(64, (1, 1), strides=(1, 1), name='conv2')(X)
    X = BatchNormalization(axis=1, epsilon=0.00001, name='bn2')(X)
    X = Activation('relu')(X)

    # Zero-Padding + MAXPOOL
    X = ZeroPadding2D((1, 1))(X)

    # Second Block
    X = Conv2D(192, (3, 3), strides=(1, 1), name='conv3')(X)
    X = BatchNormalization(axis=1, epsilon=0.00001, name='bn3')(X)
    X = Activation('relu')(X)

    # Zero-Padding + MAXPOOL
    X = ZeroPadding2D((1, 1))(X)
    X = MaxPooling2D(pool_size=3, strides=2)(X)

    # Inception 1: a/b/c
    X = inception_block_1a(X)
    X = inception_block_1b(X)
    X = inception_block_1c(X)

    # Inception 2: a/b
    X = inception_block_2a(X)
    X = inception_block_2b(X)

    # Inception 3: a/b
    X = inception_block_3a(X)
    X = inception_block_3b(X)

    # Top layer
    X = AveragePooling2D(pool_size=(3, 3), strides=(1, 1), data_format='channels_first')(X)
    X = Flatten()(X)
    X = Dense(128, name='dense_layer')(X)

    # L2 normalization
    X = Lambda(lambda x: K.l2_normalize(x, axis=1))(X)

    # Create model instance
    model = Model(inputs=X_input, outputs=X, name='FaceRecoModel')

    return model
示例#24
0
文件: models.py 项目: ameroueh/oaz
def create_alpha_zero_model(
    depth,
    input_shape,
    policy_output_size,
    num_filters=64,
    activation="relu",
    policy_factor=1.0,
):
    input = tf.keras.Input(shape=input_shape, name="input")
    conv = Conv2D(
        num_filters,
        kernel_size=3,
        strides=1,
        padding="same",
        kernel_initializer="he_normal",
        kernel_regularizer=l2(1e-4),
        activation=None,
    )

    x = conv(input)
    x = BatchNormalization()(x)
    x = Activation(activation)(x)

    block_output = residual_block(inputs=x, strides=1, num_filters=num_filters)

    for _ in range(depth):
        block_output = residual_block(inputs=block_output,
                                      strides=1,
                                      num_filters=num_filters)

    # TODO: consider adding an extra conv layer here and for the policy head as
    # well, see https://medium.com/oracledevs/lessons-from-alpha-zero-part-6-hyperparameter-tuning-b1cfcbe4ca9
    value_conv_output = Conv2D(
        num_filters // 2,
        kernel_size=3,
        strides=1,
        padding="same",
        kernel_initializer="he_normal",
        kernel_regularizer=l2(1e-4),
        activation=None,
    )(block_output)
    value_conv_output = BatchNormalization()(value_conv_output)
    value_conv_output = Activation(activation)(value_conv_output)

    value = Dense(
        units=1,
        kernel_regularizer=l2(1e-4),
        kernel_initializer="he_normal",
        activation="tanh",
        name="value",
    )(Flatten()(value_conv_output))

    policy_conv_output = Conv2D(
        num_filters // 2,
        kernel_size=3,
        strides=1,
        padding="same",
        kernel_initializer="he_normal",
        kernel_regularizer=l2(1e-4),
        activation=None,
    )(block_output)

    policy_conv_output = BatchNormalization()(policy_conv_output)
    policy_conv_output = Activation(activation)(policy_conv_output)

    policy = (Dense(
        units=policy_output_size,
        kernel_regularizer=l2(1e-4),
        kernel_initializer="he_normal",
        activation=None,
    )(Flatten()(policy_conv_output)) * policy_factor)
    policy = Activation("softmax", name="policy")(policy)
    # policy = tf.keras.layers.Lambda(
    #     # lambda x: x * policy_factor, name="policy"
    # )(policy)
    model = tf.keras.Model(inputs=input, outputs=[policy, value])

    return model
示例#25
0
def main(_):

    encoder_w0 = tf.keras.Sequential([
        Conv2D(filters=32,
               kernel_size=3,
               strides=(2, 2),
               activation='relu',
               padding='same'),
        Conv2D(filters=48,
               kernel_size=3,
               strides=(2, 2),
               activation='relu',
               padding='same'),
        MaxPooling2D(pool_size=(2, 2)),
        Conv2D(filters=64,
               kernel_size=3,
               strides=(2, 2),
               activation='relu',
               padding='same'),
        tf.keras.layers.Flatten(),
        tf.keras.layers.Dense(FLAGS.dim_w),
    ])

    decoder0 = tf.keras.Sequential([
        tf.keras.layers.Dense(100, activation=tf.nn.relu),
        tf.keras.layers.Dense(100, activation=tf.nn.relu),
        tf.keras.layers.Dense(FLAGS.dim_y),
    ])

    dim_output = FLAGS.dim_y
    dim_input = FLAGS.dim_im * FLAGS.dim_im * 1

    if FLAGS.weight_decay:
        exp_name = '%s.update_lr-%g.beta-%g.trial-%d' % (
            'np_vanilla', FLAGS.update_lr, FLAGS.beta, FLAGS.trial)
    else:
        exp_name = '%s.update_lr-%g.trial-%d' % ('np_vanilla', FLAGS.update_lr,
                                                 FLAGS.trial)
    checkpoint_dir = os.path.join(FLAGS.logdir, exp_name)

    x_train, y_train = pickle.load(
        tf.io.gfile.GFile(os.path.join(get_data_dir(), FLAGS.data[0]), 'rb'))
    x_val, y_val = pickle.load(
        tf.io.gfile.GFile(os.path.join(get_data_dir(), FLAGS.data[1]), 'rb'))

    x_train, y_train = np.array(x_train), np.array(y_train)
    y_train = y_train[:, :, -1, None]
    x_val, y_val = np.array(x_val), np.array(y_val)
    y_val = y_val[:, :, -1, None]

    ds_train = tf.data.Dataset.from_generator(
        functools.partial(gen, x_train, y_train),
        (tf.float32, tf.float32, tf.float32, tf.float32),
        (tf.TensorShape(
            [None, FLAGS.update_batch_size * FLAGS.num_classes, dim_input]),
         tf.TensorShape(
             [None, FLAGS.update_batch_size * FLAGS.num_classes, dim_output]),
         tf.TensorShape(
             [None, FLAGS.update_batch_size * FLAGS.num_classes, dim_input]),
         tf.TensorShape(
             [None, FLAGS.update_batch_size * FLAGS.num_classes, dim_output])))

    ds_val = tf.data.Dataset.from_generator(
        functools.partial(gen, x_val, y_val),
        (tf.float32, tf.float32, tf.float32, tf.float32),
        (tf.TensorShape(
            [None, FLAGS.update_batch_size * FLAGS.num_classes, dim_input]),
         tf.TensorShape(
             [None, FLAGS.update_batch_size * FLAGS.num_classes, dim_output]),
         tf.TensorShape(
             [None, FLAGS.update_batch_size * FLAGS.num_classes, dim_input]),
         tf.TensorShape(
             [None, FLAGS.update_batch_size * FLAGS.num_classes, dim_output])))

    inputa, labela, inputb, labelb = ds_train.make_one_shot_iterator(
    ).get_next()

    input_tensors = {'inputa': inputa,\
                     'inputb': inputb,\
                     'labela': labela, 'labelb': labelb}

    inputa_val, labela_val, inputb_val, labelb_val = ds_val.make_one_shot_iterator(
    ).get_next()

    metaval_input_tensors = {'inputa': inputa_val,\
                             'inputb': inputb_val,\
                             'labela': labela_val, 'labelb': labelb_val}

    loss, train_op, facto = construct_model(input_tensors,
                                            encoder_w0,
                                            decoder0,
                                            prefix='metatrain_')
    loss_val = construct_model(metaval_input_tensors,
                               encoder_w0,
                               decoder0,
                               prefix='metaval_')

    ###########

    summ_op = tf.summary.merge_all()
    sess = tf.InteractiveSession()
    summary_writer = tf.summary.FileWriter(checkpoint_dir, sess.graph)
    tf.global_variables_initializer().run()

    PRINT_INTERVAL = 50  # pylint: disable=invalid-name
    SUMMARY_INTERVAL = 5  # pylint: disable=invalid-name
    prelosses, prelosses_val = [], []
    old_time = time.time()
    for itr in range(FLAGS.num_updates):

        feed_dict = {facto: FLAGS.facto}

        if itr % SUMMARY_INTERVAL == 0:
            summary, cost, cost_val = sess.run([summ_op, loss, loss_val],
                                               feed_dict)
            summary_writer.add_summary(summary, itr)
            prelosses.append(cost)  # 0 step loss on training set
            prelosses_val.append(
                cost_val)  # 0 step loss on meta_val training set

        sess.run(train_op, feed_dict)

        if (itr != 0) and itr % PRINT_INTERVAL == 0:
            print('Iteration ' + str(itr) + ': ' + str(np.mean(prelosses)),
                  'time =',
                  time.time() - old_time)
            prelosses = []
            old_time = time.time()
            print('Validation results: ' + str(np.mean(prelosses_val)))
            prelosses_val = []
示例#26
0
#else
from add_func_9x9 import constraint_violation, pricing_plotter, plotter_autoencoder

tf.compat.v1.keras.backend.set_floatx('float64')

NN1a = Sequential()
NN1a.add(InputLayer(input_shape=(
    Nparameters,
    1,
    1,
)))
NN1a.add(ZeroPadding2D(padding=(2, 2)))
NN1a.add(
    Conv2D(32, (3, 1),
           padding='valid',
           use_bias=True,
           strides=(1, 1),
           activation='elu'))  #X_train_trafo.shape[1:],activation='elu'))
NN1a.add(ZeroPadding2D(padding=(3, 1)))
NN1a.add(
    Conv2D(32, (2, 2),
           padding='valid',
           use_bias=True,
           strides=(1, 1),
           activation='elu'))
NN1a.add(
    Conv2D(32, (2, 2),
           padding='valid',
           use_bias=True,
           strides=(2, 1),
           activation='elu'))
# conv_layers = [1, 2, 3]

dense_layers = [0, 1, 2]
layer_sizes = [4, 8, 16]
conv_layers = [1, 2]

for dense_layer in dense_layers:
    for layer_size in layer_sizes:
        for conv_layer in conv_layers:
            NAME = f'Pneumonia-{IMG_SIZE}px-{NUM_SAMPLES}samples-{conv_layer}conv-{layer_size}nodes-{dense_layer}dense-{int(time.time())}'
            tensorboard = TensorBoard(log_dir=f'logs/{NAME}')
            print(NAME)

            model = Sequential()
            # format: Num of filters, window/step, dimensions
            model.add(Conv2D(layer_size, (3, 3),
                             input_shape=x_train.shape[1:]))
            model.add(Activation("relu"))
            model.add(MaxPooling2D(pool_size=(2, 2)))
            print('Layer 0 generated')

            for i in range(conv_layer - 1):
                print(f'Layer {i + 1} generated.')
                model.add(Conv2D(layer_size, (3, 3)))
                model.add(Activation("relu"))
                model.add(MaxPooling2D(pool_size=(2, 2)))

            model.add(Flatten())
            for l in range(dense_layer):
                model.add(Dense(layer_size))
                model.add(Activation("relu"))
示例#28
0
def stack_layers(inputs, layers, kernel_initializer='glorot_uniform'):
    '''
    Builds the architecture of the network by applying each layer specified in layers to inputs.

    inputs:     a dict containing input_types and input_placeholders for each key and value pair, respecively.
                for spectralnet, this means the input_types 'Unlabeled' and 'Orthonorm'*
    layers:     a list of dicts containing all layers to be used in the network, where each dict describes
                one such layer. each dict requires the key 'type'. all other keys are dependent on the layer
                type

    kernel_initializer: initialization configuration passed to keras (see keras initializers)

    returns:    outputs, a dict formatted in much the same way as inputs. it contains input_types and
                output_tensors for each key and value pair, respectively, where output_tensors are
                the outputs of the input_placeholders in inputs after each layer in layers is applied

    * this is necessary since spectralnet takes multiple inputs and performs special computations on the
      orthonorm layer
    '''
    outputs = dict()

    for key in inputs:
        outputs[key] = inputs[key]

    for layer in layers:
        # check for l2_reg argument
        l2_reg = layer.get('l2_reg')
        if l2_reg:
            l2_reg = l2(layer['l2_reg'])

        # create the layer
        if layer['type'] == 'softplus_reg':
            l = Dense(layer['size'],
                      activation='softplus',
                      kernel_initializer=kernel_initializer,
                      kernel_regularizer=l2(0.001),
                      name=layer.get('name'))
        elif layer['type'] == 'softplus':
            l = Dense(layer['size'],
                      activation='softplus',
                      kernel_initializer=kernel_initializer,
                      kernel_regularizer=l2_reg,
                      name=layer.get('name'))
        elif layer['type'] == 'softmax':
            l = Dense(layer['size'],
                      activation='softmax',
                      kernel_initializer=kernel_initializer,
                      kernel_regularizer=l2_reg,
                      name=layer.get('name'))
        elif layer['type'] == 'tanh':
            l = Dense(layer['size'],
                      activation='tanh',
                      kernel_initializer=kernel_initializer,
                      kernel_regularizer=l2_reg,
                      name=layer.get('name'))
        elif layer['type'] == 'relu':
            l = Dense(layer['size'],
                      activation='relu',
                      kernel_initializer=kernel_initializer,
                      kernel_regularizer=l2_reg,
                      name=layer.get('name'))
        elif layer['type'] == 'selu':
            l = Dense(layer['size'],
                      activation='selu',
                      kernel_initializer=kernel_initializer,
                      kernel_regularizer=l2_reg,
                      name=layer.get('name'))
        elif layer['type'] == 'Conv2D':
            l = Conv2D(layer['channels'],
                       kernel_size=layer['kernel'],
                       activation='relu',
                       data_format='channels_last',
                       kernel_regularizer=l2_reg,
                       name=layer.get('name'))
        elif layer['type'] == 'BatchNormalization':
            l = BatchNormalization(name=layer.get('name'))
        elif layer['type'] == 'MaxPooling2D':
            l = MaxPooling2D(pool_size=layer['pool_size'],
                             data_format='channels_first',
                             name=layer.get('name'))
        elif layer['type'] == 'Dropout':
            l = Dropout(layer['rate'], name=layer.get('name'))
        elif layer['type'] == 'Flatten':
            l = Flatten(name=layer.get('name'))
        elif layer['type'] == 'Orthonorm':
            l = Orthonorm(outputs['Orthonorm'], name=layer.get('name'))
        else:
            raise ValueError("Invalid layer type '{}'".format(layer['type']))

        # apply the layer to each input in inputs
        for k in outputs:
            outputs[k] = l(outputs[k])

    return outputs
示例#29
0
    def __init__(self, n_out, regularization=0.01):
        """Initialize the DirectionNet.

    Args:
      n_out: (int) the number of output distributions.
      regularization: L2 regularization factor for layer weights.
    """
        super(DirectionNet, self).__init__()
        self.encoder = SiameseEncoder()
        self.inplanes = self.encoder.inplanes
        self.decoder_block1 = Sequential([
            Conv2D(256,
                   3,
                   use_bias=False,
                   kernel_regularizer=regularizers.l2(regularization)),
            self._make_resblock(2, 128, regularization=regularization),
            BatchNormalization(),
            LeakyReLU()
        ])
        self.decoder_block2 = Sequential([
            Conv2D(128,
                   3,
                   use_bias=False,
                   kernel_regularizer=regularizers.l2(regularization)),
            self._make_resblock(2, 64, regularization=regularization),
            BatchNormalization(),
            LeakyReLU()
        ])
        self.decoder_block3 = Sequential([
            Conv2D(64,
                   3,
                   use_bias=False,
                   kernel_regularizer=regularizers.l2(regularization)),
            self._make_resblock(2, 32, regularization=regularization),
            BatchNormalization(),
            LeakyReLU()
        ])
        self.decoder_block4 = Sequential([
            Conv2D(32,
                   3,
                   use_bias=False,
                   kernel_regularizer=regularizers.l2(regularization)),
            self._make_resblock(2, 16, regularization=regularization),
            BatchNormalization(),
            LeakyReLU()
        ])
        self.decoder_block5 = Sequential([
            Conv2D(16,
                   3,
                   use_bias=False,
                   kernel_regularizer=regularizers.l2(regularization)),
            self._make_resblock(2, 8, regularization=regularization),
            BatchNormalization(),
            LeakyReLU()
        ])
        self.decoder_block6 = Sequential([
            Conv2D(8,
                   3,
                   use_bias=False,
                   kernel_regularizer=regularizers.l2(regularization)),
            self._make_resblock(2, 4, regularization=regularization),
            BatchNormalization(),
            LeakyReLU()
        ])
        self.down_channel = Conv2D(
            n_out, 1, kernel_regularizer=regularizers.l2(regularization))
示例#30
0
def model_fn_ALEXNET(features,
                     activation='relu',
                     kernel_initializer=tf.keras.initializers.TruncatedNormal(
                         mean=0, stddev=0.1),
                     bias_initializer='zeros'):

    # input: [None, 227, 227, 3]
    # conv1: f 96, k (11,11), s (4,4), VALID, relu --> [None, 54, 54, 96]
    with tf.control_dependencies(
            tf.debugging.assert_equal(features.get_shape()[1:],
                                      [227, 227, 3])):
        conv1 = Conv2D(filters=96,
                       kernel_size=(11, 11),
                       strides=(4, 4),
                       padding='valid',
                       activation=activation,
                       use_bias=True,
                       kernel_initializer=kernel_initializer,
                       bias_initializer=bias_initializer)(features)

    # pool1: k (3,3), s (2,2), VALID               --> [None, 26, 26, 96]
    with tf.control_dependencies(
            tf.debugging.assert_equal(conv1.get_shape()[1:], [54, 54, 96])):
        pool1 = MaxPool2D(pool_size=(3, 3), strides=(2, 2),
                          padding='valid')(conv1)

    # conv2: f 256, k (5,5), s (1,1), SAME, relu   --> [None, 26, 26, 256]
    with tf.control_dependencies(
            tf.debugging.assert_equal(features.get_shape()[1:], [26, 26, 96])):
        conv2 = Conv2D(filters=256,
                       kernel_size=(5, 5),
                       strides=(1, 1),
                       padding='same',
                       activation=activation,
                       use_bias=True,
                       kernel_initializer=kernel_initializer,
                       bias_initializer=bias_initializer)(pool1)

    # pool2: k (3,3), s (2,2), VALID               --> [None, 12, 12, 256]
    with tf.control_dependencies(
            tf.debugging.assert_equal(conv1.get_shape()[1:], [26, 26, 256])):
        pool2 = MaxPool2D(pool_size=(3, 3), strides=(2, 2),
                          padding='valid')(conv2)

    # conv3: f 384, k (3,3), s(1,1), SAME, relu    --> [None, 12, 12, 384]
    with tf.control_dependencies(
            tf.debugging.assert_equal(features.get_shape()[1:],
                                      [12, 12, 256])):
        conv3 = Conv2D(filters=384,
                       kernel_size=(3, 3),
                       strides=(1, 1),
                       padding='same',
                       activation=activation,
                       use_bias=True,
                       kernel_initializer=kernel_initializer,
                       bias_initializer=bias_initializer)(pool2)

    # conv4: f 384, k (3,3), s(1,1), SAME, relu    --> [None, 12, 12, 384]
    with tf.control_dependencies(
            tf.debugging.assert_equal(features.get_shape()[1:],
                                      [12, 12, 384])):
        conv4 = Conv2D(filters=384,
                       kernel_size=(3, 3),
                       strides=(1, 1),
                       padding='same',
                       activation=activation,
                       use_bias=True,
                       kernel_initializer=kernel_initializer,
                       bias_initializer=bias_initializer)(conv3)

    # conv5: f 256, k (3,3), s(1,1), SAME, relu    --> [None, 12, 12, 256]
    with tf.control_dependencies(
            tf.debugging.assert_equal(features.get_shape()[1:],
                                      [12, 12, 384])):
        conv5 = Conv2D(filters=256,
                       kernel_size=(3, 3),
                       strides=(1, 1),
                       padding='same',
                       activation=activation,
                       use_bias=True,
                       kernel_initializer=kernel_initializer,
                       bias_initializer=bias_initializer)(conv4)

    # pool5: k (3,3), s (2,2)                      --> [None,  5,  5, 256]
    with tf.control_dependencies(
            tf.debugging.assert_equal(conv1.get_shape()[1:], [12, 12, 256])):
        pool5 = MaxPool2D(pool_size=(3, 3), strides=(2, 2),
                          padding='valid')(conv5)

    # flatten --> [None, 6400]
    flatten = Flatten()(pool5)

    # fc6: f 4096, relu --> [None, 4096]
    with tf.control_dependencies(
            tf.debugging.assert_equal(flatten.get_shape()[1:], [6400])):
        fc6 = Dense(units=496,
                    activation=activation,
                    use_bias=True,
                    kernel_initializer=kernel_initializer,
                    bias_initializer=bias_initializer)(flatten)

    # drop7: p 0.5      --> [None, 4096]
    drop7 = Dropout(rate=0.5)(fc6)

    # fc7: f 4096, relu --> [None, 4096]
    with tf.control_dependencies(
            tf.debugging.assert_equal(fc6.get_shape()[1:], [6400])):
        fc7 = Dense(units=496,
                    activation=activation,
                    use_bias=True,
                    kernel_initializer=kernel_initializer,
                    bias_initializer=bias_initializer)(drop7)

    # drop8: p 0.5      --> [None, 4096]
    drop8 = Dropout(rate=0.5)(fc7)

    return drop8