Exemple #1
0
def identity_block(input_tensor,
                   kernel_size,
                   filters,
                   stage,
                   block,
                   trainable=True):

    nb_filter1, nb_filter2, nb_filter3 = filters
    bn_axis = 3

    conv_name_base = 'res' + str(stage) + block + '_branch'
    bn_name_base = 'bn' + str(stage) + block + '_branch'

    x = Convolution2D(nb_filter1, (1, 1),
                      name=conv_name_base + '2a',
                      trainable=trainable)(input_tensor)
    x = FixedBatchNormalization(axis=bn_axis, name=bn_name_base + '2a')(x)
    x = Activation('relu')(x)

    x = Convolution2D(nb_filter2, (kernel_size, kernel_size),
                      padding='same',
                      name=conv_name_base + '2b',
                      trainable=trainable)(x)
    x = FixedBatchNormalization(axis=bn_axis, name=bn_name_base + '2b')(x)
    x = Activation('relu')(x)

    x = Convolution2D(nb_filter3, (1, 1),
                      name=conv_name_base + '2c',
                      trainable=trainable)(x)
    x = FixedBatchNormalization(axis=bn_axis, name=bn_name_base + '2c')(x)

    x = Add()([x, input_tensor])
    x = Activation('relu')(x)
    return x
Exemple #2
0
    def build(self, input_shape):
        self.input_spec = [InputSpec(shape=input_shape)]
        self.conv_layers = {c: [] for c in ['i', 'f', 'c', 'o', 'a', 'ahat']}

        for l in range(self.nb_layers):
            for c in ['i', 'f', 'c', 'o']:
                act = self.LSTM_activation if c == 'c' else self.LSTM_inner_activation
                self.conv_layers[c].append(
                    Convolution2D(self.R_stack_sizes[l],
                                  self.R_filt_sizes[l],
                                  padding='same',
                                  data_format="channels_last",
                                  activation=act))
            act = 'relu' if l == 0 else self.A_activation
            self.conv_layers['ahat'].append(
                Convolution2D(self.stack_sizes[l],
                              self.Ahat_filt_sizes[l],
                              padding='same',
                              data_format="channels_last",
                              activation=act))
            if l < self.nb_layers - 1:
                self.conv_layers['a'].append(
                    Convolution2D(self.stack_sizes[l + 1],
                                  self.A_filt_sizes[l],
                                  padding='same',
                                  data_format="channels_last",
                                  activation=self.A_activation))

        self.upsample = UpSampling2D(data_format="channels_last")  # upsampling
        self.pool = MaxPooling2D(data_format="channels_last")  # downsampling

        self._trainable_weights = []
        nb_row, nb_col = (input_shape[-3], input_shape[-2])
        # Super model
        for c in sorted(self.conv_layers.keys()):
            for l in range(len(self.conv_layers[c])):
                ds_factor = 2**l
                if c == 'ahat':
                    nb_channels = self.R_stack_sizes[l]
                elif c == 'a':
                    nb_channels = 2 * self.stack_sizes[l]
                else:  # i, c, o, f
                    nb_channels = self.stack_sizes[l] * 2 + self.R_stack_sizes[
                        l]
                    if l < self.nb_layers - 1:
                        nb_channels += self.R_stack_sizes[l + 1]
                in_shape = (input_shape[0], nb_row // ds_factor,
                            nb_col // ds_factor, nb_channels
                            )  # up -> downsampling
                self.conv_layers[c][l].build(in_shape)
                self._trainable_weights += self.conv_layers[c][
                    l].trainable_weights

        self.states = [None] * self.nb_layers * 3  # ['r', 'c', 'e']
        if self.extrap_start_time is not None:
            self.t_extrap = K.variable(np.array(self.extrap_start_time),
                                       'int32')
            self.states += [None] * 2
Exemple #3
0
def conv_block_td(input_tensor,
                  kernel_size,
                  filters,
                  stage,
                  block,
                  input_shape,
                  strides=(2, 2),
                  trainable=True):

    # conv block time distributed

    nb_filter1, nb_filter2, nb_filter3 = filters
    bn_axis = 3

    conv_name_base = 'res' + str(stage) + block + '_branch'
    bn_name_base = 'bn' + str(stage) + block + '_branch'

    x = TimeDistributed(Convolution2D(nb_filter1, (1, 1),
                                      strides=strides,
                                      trainable=trainable,
                                      kernel_initializer='normal'),
                        input_shape=input_shape,
                        name=conv_name_base + '2a')(input_tensor)
    x = TimeDistributed(FixedBatchNormalization(axis=bn_axis),
                        name=bn_name_base + '2a')(x)
    x = Activation('relu')(x)

    x = TimeDistributed(Convolution2D(nb_filter2, (kernel_size, kernel_size),
                                      padding='same',
                                      trainable=trainable,
                                      kernel_initializer='normal'),
                        name=conv_name_base + '2b')(x)
    x = TimeDistributed(FixedBatchNormalization(axis=bn_axis),
                        name=bn_name_base + '2b')(x)
    x = Activation('relu')(x)

    x = TimeDistributed(Convolution2D(nb_filter3, (1, 1),
                                      kernel_initializer='normal'),
                        name=conv_name_base + '2c',
                        trainable=trainable)(x)
    x = TimeDistributed(FixedBatchNormalization(axis=bn_axis),
                        name=bn_name_base + '2c')(x)

    shortcut = TimeDistributed(Convolution2D(nb_filter3, (1, 1),
                                             strides=strides,
                                             trainable=trainable,
                                             kernel_initializer='normal'),
                               name=conv_name_base + '1')(input_tensor)
    shortcut = TimeDistributed(FixedBatchNormalization(axis=bn_axis),
                               name=bn_name_base + '1')(shortcut)

    x = Add()([x, shortcut])
    x = Activation('relu')(x)
    return x
Exemple #4
0
def rpn(base_layers, num_anchors):

    x = Convolution2D(512, (3, 3),
                      padding='same',
                      activation='relu',
                      kernel_initializer='normal',
                      name='rpn_conv1')(base_layers)

    x_class = Convolution2D(num_anchors, (1, 1),
                            activation='sigmoid',
                            kernel_initializer='uniform',
                            name='rpn_out_class')(x)
    x_regr = Convolution2D(num_anchors * 4, (1, 1),
                           activation='linear',
                           kernel_initializer='zero',
                           name='rpn_out_regress')(x)

    return [x_class, x_regr, base_layers]
Exemple #5
0
def identity_block_td(input_tensor,
                      kernel_size,
                      filters,
                      stage,
                      block,
                      trainable=True):

    # identity block time distributed

    nb_filter1, nb_filter2, nb_filter3 = filters
    bn_axis = 3

    conv_name_base = 'res' + str(stage) + block + '_branch'
    bn_name_base = 'bn' + str(stage) + block + '_branch'

    x = TimeDistributed(Convolution2D(nb_filter1, (1, 1),
                                      trainable=trainable,
                                      kernel_initializer='normal'),
                        name=conv_name_base + '2a')(input_tensor)
    x = TimeDistributed(FixedBatchNormalization(axis=bn_axis),
                        name=bn_name_base + '2a')(x)
    x = Activation('relu')(x)

    x = TimeDistributed(Convolution2D(nb_filter2, (kernel_size, kernel_size),
                                      trainable=trainable,
                                      kernel_initializer='normal',
                                      padding='same'),
                        name=conv_name_base + '2b')(x)
    x = TimeDistributed(FixedBatchNormalization(axis=bn_axis),
                        name=bn_name_base + '2b')(x)
    x = Activation('relu')(x)

    x = TimeDistributed(Convolution2D(nb_filter3, (1, 1),
                                      trainable=trainable,
                                      kernel_initializer='normal'),
                        name=conv_name_base + '2c')(x)
    x = TimeDistributed(FixedBatchNormalization(axis=bn_axis),
                        name=bn_name_base + '2c')(x)

    x = Add()([x, input_tensor])
    x = Activation('relu')(x)

    return x
def build_model(num_output_classes):
    conv1size = 32
    conv2size = 64
    convfiltsize = 4
    densesize = 128
    poolsize = (2, 2)
    imgdepth = 3
    dropout = 0.3
    if IMG_COLORMODE == 'grayscale':
        imgdepth = 1
    inpshape = IMG_TGT_SIZE + (imgdepth, )
    inputs = Input(shape=inpshape)
    conv1 = Convolution2D(conv1size,
                          convfiltsize,
                          strides=(1, 1),
                          padding='valid',
                          activation='relu',
                          name='conv1',
                          data_format='channels_last')(inputs)
    pool1 = MaxPooling2D(pool_size=poolsize, name='pool1')(conv1)
    drop1 = Dropout(dropout)(pool1)
    conv2 = Convolution2D(conv2size,
                          convfiltsize,
                          strides=(1, 1),
                          padding='valid',
                          activation='relu',
                          name='conv2',
                          data_format='channels_last')(drop1)
    pool2 = MaxPooling2D(pool_size=poolsize, name='pool2')(conv2)
    drop2 = Dropout(dropout)(pool2)
    flat2 = Flatten()(drop2)
    dense = Dense(densesize, name='dense')(flat2)
    denseact = Activation('relu')(dense)
    output = Dense(num_output_classes, name='output')(denseact)
    outputact = Activation('softmax')(output)

    model = Model(inputs=inputs, outputs=outputact)
    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])
    return model
def simple_model():
    model = Sequential()
    model.add(
        Convolution2D(filters=32,
                      kernel_size=(3, 3),
                      strides=(1, 1),
                      input_shape=(IMAGE_SIZE, IMAGE_SIZE, 1)))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))

    model.add(Convolution2D(filters=64, kernel_size=(3, 3)))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Flatten())
    model.add(Dense(256))
    model.add(Activation('relu'))
    model.add(Dropout(0.5))
    model.add(Dense(LABEL_NUM))
    model.add(Activation('softmax'))

    return model
def mk_model_with_bn():
    model = Sequential()
    model.add(
        Convolution2D(filters=64,
                      kernel_size=(3, 3),
                      strides=(1, 1),
                      input_shape=(IMAGE_SIZE, IMAGE_SIZE, 1),
                      kernel_initializer=kernel_initializer()))
    model.add(BatchNormalization())
    model.add(Activation('relu'))

    model.add(
        Convolution2D(filters=64,
                      kernel_size=(3, 3),
                      strides=(1, 1),
                      kernel_initializer=kernel_initializer()))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.5))

    model.add(
        Convolution2D(filters=128,
                      kernel_size=(3, 3),
                      kernel_initializer=kernel_initializer()))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(
        Convolution2D(filters=128,
                      kernel_size=(3, 3),
                      kernel_initializer=kernel_initializer()))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.5))

    model.add(
        Convolution2D(filters=256,
                      kernel_size=(3, 3),
                      kernel_initializer=kernel_initializer()))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(
        Convolution2D(filters=256,
                      kernel_size=(3, 3),
                      kernel_initializer=kernel_initializer()))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.5))

    model.add(Flatten())
    model.add(Dense(256, kernel_initializer='he_normal'))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(Dropout(0.5))
    model.add(Dense(LABEL_NUM, kernel_initializer='he_normal'))
    model.add(Activation('softmax'))

    return model
def mk_model():
    model = Sequential()
    model.add(
        Convolution2D(filters=32,
                      kernel_size=(3, 3),
                      strides=(1, 1),
                      input_shape=(IMAGE_SIZE, IMAGE_SIZE, 1),
                      kernel_initializer=kernel_initializer()))
    model.add(LeakyReLU(alpha=.1))
    model.add(
        Convolution2D(filters=32,
                      kernel_size=(3, 3),
                      strides=(1, 1),
                      kernel_initializer=kernel_initializer()))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.5))

    model.add(
        Convolution2D(filters=64,
                      kernel_size=(3, 3),
                      kernel_initializer=kernel_initializer()))
    model.add(LeakyReLU(alpha=0.1))
    model.add(
        Convolution2D(filters=64,
                      kernel_size=(3, 3),
                      kernel_initializer=kernel_initializer()))
    model.add(LeakyReLU(alpha=0.1))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.5))

    model.add(Flatten())
    model.add(MaxoutDense(output_dim=256, nb_feature=4))
    model.add(Activation('relu'))
    model.add(Dropout(0.5))
    model.add(Dense(LABEL_NUM, kernel_initializer='he_uniform'))
    model.add(Activation('softmax'))

    return model
Exemple #10
0
def discriminator(X_dim, y_dim):
    df_dim = 64
    x_in = Input(shape=(X_dim), name='X_input')
    y_in = Input(shape=(y_dim, ), name='Y_input')
    D_h = LeakyReLU(0.2)(Convolution2D(df_dim,
                                       kernel_size=(5, 5),
                                       strides=(2, 2),
                                       padding='same')(x_in))
    D_h = LeakyReLU(0.2)(BatchNormalization()(Convolution2D(
        df_dim * 2, kernel_size=(5, 5), strides=(2, 2), padding='same')(D_h)))
    D_h = LeakyReLU(0.2)(BatchNormalization()(Convolution2D(
        df_dim * 2, kernel_size=(5, 5), strides=(2, 2), padding='same')(D_h)))
    D_h = LeakyReLU(0.2)(BatchNormalization()(Convolution2D(
        df_dim * 4, kernel_size=(5, 5), strides=(2, 2), padding='same')(D_h)))
    D_h = LeakyReLU(0.2)(BatchNormalization()(Convolution2D(
        df_dim * 8, kernel_size=(5, 5), strides=(2, 2), padding='same')(D_h)))
    D_h = Flatten()(D_h)
    D_h = concatenate([D_h, y_in])
    D_logit = Dense(1, name='Discriminator_Output')(D_h)
    D = Model(inputs=[x_in, y_in], outputs=D_logit, name='Discriminator')
    print('=== Discriminator ===')
    D.summary()
    print('\n\n')
    return D
Exemple #11
0
def discriminator(X_dim):
    # It must be Auto-Encoder style architecture
    # Architecture : (64)4c2s-FC32_BR-FC64*14*14_BR-(1)4dc2s_S
    x_in = Input(shape=X_dim, name='X_input')
    D_h = Activation('relu')(Convolution2D(64,
                                           kernel_size=(4, 4),
                                           strides=(2, 2),
                                           padding='same',
                                           name='d_conv1')(x_in))
    D_h = Flatten()(D_h)
    code = Activation('relu')(BatchNormalization()(Dense(32)(D_h)))
    D_h = Activation('relu')(BatchNormalization()(Dense(64 * 14 * 14)(code)))
    D_h = Reshape(target_shape=[14, 14, 64])(D_h)
    out = Conv2DTranspose(1,
                          kernel_size=(2, 2),
                          strides=(2, 2),
                          padding='same',
                          activation='sigmoid')(D_h)
    D_model = Model(inputs=x_in, outputs=[out, code])
    # recon loss
    # recon_error = tf.sqrt(2 * tf.nn.l2_loss(out - x_in)) / self.batch_size
    # return out, recon_error, code
    D_model.summary()
    return D_model
Exemple #12
0
plot_x = np.r_[train, test]
plot_t = np.r_[train_label, test_label]

N_train = len(train)
vector_dim = train.shape[2]

epochs = 100
batch_size = 256
f_dim = train.shape[2]

model = Sequential()

model.add(
    Convolution2D(32, (2, 2),
                  2,
                  padding='same',
                  input_shape=train.shape[1:],
                  kernel_regularizer=l2(0.005),
                  use_bias=False))
model.add(BatchNormalization(axis=3))
model.add(Dropout(0.3))
model.add(
    Convolution2D(64, (2, 2),
                  2,
                  padding='same',
                  kernel_regularizer=l2(0.005),
                  use_bias=False))
model.add(BatchNormalization(axis=3))
model.add(Dropout(0.3))
model.add(Activation("relu"))

model.add(
Exemple #13
0
def nn_base(input_tensor=None, trainable=False):

    # Determine proper input shape
    input_shape = (None, None, 3)

    if input_tensor is None:
        img_input = Input(shape=input_shape)
    else:
        if not is_keras_tensor(input_tensor):
            img_input = Input(tensor=input_tensor, shape=input_shape)
        else:
            img_input = input_tensor

    bn_axis = 3

    x = ZeroPadding2D((3, 3))(img_input)

    x = Convolution2D(64, (7, 7),
                      strides=(2, 2),
                      name='conv1',
                      trainable=trainable)(x)
    x = FixedBatchNormalization(axis=bn_axis, name='bn_conv1')(x)
    x = Activation('relu')(x)
    x = MaxPooling2D((3, 3), strides=(2, 2))(x)

    x = conv_block(x,
                   3, [64, 64, 256],
                   stage=2,
                   block='a',
                   strides=(1, 1),
                   trainable=trainable)
    x = identity_block(x,
                       3, [64, 64, 256],
                       stage=2,
                       block='b',
                       trainable=trainable)
    x = identity_block(x,
                       3, [64, 64, 256],
                       stage=2,
                       block='c',
                       trainable=trainable)

    x = conv_block(x,
                   3, [128, 128, 512],
                   stage=3,
                   block='a',
                   trainable=trainable)
    x = identity_block(x,
                       3, [128, 128, 512],
                       stage=3,
                       block='b',
                       trainable=trainable)
    x = identity_block(x,
                       3, [128, 128, 512],
                       stage=3,
                       block='c',
                       trainable=trainable)
    x = identity_block(x,
                       3, [128, 128, 512],
                       stage=3,
                       block='d',
                       trainable=trainable)

    x = conv_block(x,
                   3, [256, 256, 1024],
                   stage=4,
                   block='a',
                   trainable=trainable)
    x = identity_block(x,
                       3, [256, 256, 1024],
                       stage=4,
                       block='b',
                       trainable=trainable)
    x = identity_block(x,
                       3, [256, 256, 1024],
                       stage=4,
                       block='c',
                       trainable=trainable)
    x = identity_block(x,
                       3, [256, 256, 1024],
                       stage=4,
                       block='d',
                       trainable=trainable)
    x = identity_block(x,
                       3, [256, 256, 1024],
                       stage=4,
                       block='e',
                       trainable=trainable)
    x = identity_block(x,
                       3, [256, 256, 1024],
                       stage=4,
                       block='f',
                       trainable=trainable)

    return x
Exemple #14
0
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255

# Preprocess  labels
Y_train = np_utils.to_categorical(y_train, 10)
Y_test = np_utils.to_categorical(y_test, 10)

# Model
model = Sequential()

#defaultformat: (samples, rows, col
model.add(
    Convolution2D(32,
                  kernel_size=(3, 3),
                  strides=(1, 1),
                  activation='relu',
                  input_shape=(28, 28, 1)))
model.add(
    Convolution2D(32, kernel_size=(3, 3), strides=(1, 1), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))

model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(10, activation='softmax'))
model.summary()

model.compile(loss='categorical_crossentropy',
              optimizer='adam',
Exemple #15
0
def SSD300(input_shape, num_classes=21):
    """SSD300 architecture.

    # Arguments
        input_shape: Shape of the input image,
            expected to be either (300, 300, 3) or (3, 300, 300)(not tested).
        num_classes: Number of classes including background.

    # References
        https://arxiv.org/abs/1512.02325
    """
    net = {}
    # Block 1
    input_tensor = input_tensor = Input(shape=input_shape)
    img_size = (input_shape[1], input_shape[0])
    net['input'] = input_tensor
    net['conv1_1'] = Convolution2D(64, (3, 3),
                                   activation='relu',
                                   padding='same',
                                   name='conv1_1')(net['input'])

    net['conv1_2'] = Convolution2D(64, (3, 3),
                                   activation='relu',
                                   padding='same',
                                   name='conv1_2')(net['conv1_1'])

    net['pool1'] = MaxPooling2D((2, 2),
                                strides=(2, 2),
                                padding='same',
                                name='pool1')(net['conv1_2'])
    # Block 2
    net['conv2_1'] = Convolution2D(128, (3, 3),
                                   activation='relu',
                                   padding='same',
                                   name='conv2_1')(net['pool1'])

    net['conv2_2'] = Convolution2D(128, (3, 3),
                                   activation='relu',
                                   padding='same',
                                   name='conv2_2')(net['conv2_1'])
    net['pool2'] = MaxPooling2D((2, 2),
                                strides=(2, 2),
                                padding='same',
                                name='pool2')(net['conv2_2'])

    # Block 3
    net['conv3_1'] = Convolution2D(256, (3, 3),
                                   activation='relu',
                                   padding='same',
                                   name='conv3_1')(net['pool2'])
    net['conv3_2'] = Convolution2D(256, (3, 3),
                                   activation='relu',
                                   padding='same',
                                   name='conv3_2')(net['conv3_1'])
    net['conv3_3'] = Convolution2D(256, (3, 3),
                                   activation='relu',
                                   padding='same',
                                   name='conv3_3')(net['conv3_2'])
    net['pool3'] = MaxPooling2D((2, 2),
                                strides=(2, 2),
                                padding='same',
                                name='pool3')(net['conv3_3'])
    # Block 4
    net['conv4_1'] = Convolution2D(512, (3, 3),
                                   activation='relu',
                                   padding='same',
                                   name='conv4_1')(net['pool3'])
    net['conv4_2'] = Convolution2D(512, (3, 3),
                                   activation='relu',
                                   padding='same',
                                   name='conv4_2')(net['conv4_1'])
    net['conv4_3'] = Convolution2D(512, (3, 3),
                                   activation='relu',
                                   padding='same',
                                   name='conv4_3')(net['conv4_2'])
    net['pool4'] = MaxPooling2D((2, 2),
                                strides=(2, 2),
                                padding='same',
                                name='pool4')(net['conv4_3'])
    # Block 5
    net['conv5_1'] = Convolution2D(512, (3, 3),
                                   activation='relu',
                                   padding='same',
                                   name='conv5_1')(net['pool4'])
    net['conv5_2'] = Convolution2D(512, (3, 3),
                                   activation='relu',
                                   padding='same',
                                   name='conv5_2')(net['conv5_1'])
    net['conv5_3'] = Convolution2D(512, (3, 3),
                                   activation='relu',
                                   padding='same',
                                   name='conv5_3')(net['conv5_2'])
    net['pool5'] = MaxPooling2D((3, 3),
                                strides=(1, 1),
                                padding='same',
                                name='pool5')(net['conv5_3'])
    # FC6
    net['fc6'] = Convolution2D(1024, (3, 3),
                               dilation_rate=(6, 6),
                               activation='relu',
                               padding='same',
                               name='fc6')(net['pool5'])
    # x = Dropout(0.5, name='drop6')(x)
    # FC7
    net['fc7'] = Convolution2D(1024, (1, 1),
                               activation='relu',
                               padding='same',
                               name='fc7')(net['fc6'])
    # x = Dropout(0.5, name='drop7')(x)
    # Block 6
    net['conv6_1'] = Convolution2D(256, (1, 1),
                                   activation='relu',
                                   padding='same',
                                   name='conv6_1')(net['fc7'])

    net['conv6_2'] = Convolution2D(512, (3, 3),
                                   strides=(2, 2),
                                   activation='relu',
                                   padding='same',
                                   name='conv6_2')(net['conv6_1'])
    # Block 7
    net['conv7_1'] = Convolution2D(128, (1, 1),
                                   activation='relu',
                                   padding='same',
                                   name='conv7_1')(net['conv6_2'])
    net['conv7_2'] = ZeroPadding2D()(net['conv7_1'])
    net['conv7_2'] = Convolution2D(256, (3, 3),
                                   strides=(2, 2),
                                   activation='relu',
                                   padding='valid',
                                   name='conv7_2')(net['conv7_2'])
    # Block 8
    net['conv8_1'] = Convolution2D(128, (1, 1),
                                   activation='relu',
                                   padding='same',
                                   name='conv8_1')(net['conv7_2'])
    net['conv8_2'] = Convolution2D(256, (3, 3),
                                   strides=(2, 2),
                                   activation='relu',
                                   padding='same',
                                   name='conv8_2')(net['conv8_1'])
    # Last Pool
    net['pool6'] = GlobalAveragePooling2D(name='pool6')(net['conv8_2'])
    # Prediction from conv4_3
    net['conv4_3_norm'] = Normalize(20, name='conv4_3_norm')(net['conv4_3'])
    num_priors = 3
    x = Convolution2D(num_priors * 4, (3, 3),
                      padding='same',
                      name='conv4_3_norm_mbox_loc')(net['conv4_3_norm'])
    net['conv4_3_norm_mbox_loc'] = x
    flatten = Flatten(name='conv4_3_norm_mbox_loc_flat')
    net['conv4_3_norm_mbox_loc_flat'] = flatten(net['conv4_3_norm_mbox_loc'])
    name = 'conv4_3_norm_mbox_conf'
    if num_classes != 21:
        name += '_{}'.format(num_classes)
    x = Convolution2D(num_priors * num_classes, (3, 3),
                      padding='same',
                      name=name)(net['conv4_3_norm'])
    net['conv4_3_norm_mbox_conf'] = x
    flatten = Flatten(name='conv4_3_norm_mbox_conf_flat')
    net['conv4_3_norm_mbox_conf_flat'] = flatten(net['conv4_3_norm_mbox_conf'])
    priorbox = PriorBox(img_size,
                        30.0,
                        aspect_ratios=[2],
                        variances=[0.1, 0.1, 0.2, 0.2],
                        name='conv4_3_norm_mbox_priorbox')
    net['conv4_3_norm_mbox_priorbox'] = priorbox(net['conv4_3_norm'])
    # Prediction from fc7
    num_priors = 6
    net['fc7_mbox_loc'] = Convolution2D(num_priors * 4, (3, 3),
                                        padding='same',
                                        name='fc7_mbox_loc')(net['fc7'])
    flatten = Flatten(name='fc7_mbox_loc_flat')
    net['fc7_mbox_loc_flat'] = flatten(net['fc7_mbox_loc'])
    name = 'fc7_mbox_conf'
    if num_classes != 21:
        name += '_{}'.format(num_classes)
    net['fc7_mbox_conf'] = Convolution2D(num_priors * num_classes, (3, 3),
                                         padding='same',
                                         name=name)(net['fc7'])
    flatten = Flatten(name='fc7_mbox_conf_flat')
    net['fc7_mbox_conf_flat'] = flatten(net['fc7_mbox_conf'])
    priorbox = PriorBox(img_size,
                        60.0,
                        max_size=114.0,
                        aspect_ratios=[2, 3],
                        variances=[0.1, 0.1, 0.2, 0.2],
                        name='fc7_mbox_priorbox')
    net['fc7_mbox_priorbox'] = priorbox(net['fc7'])
    # Prediction from conv6_2
    num_priors = 6
    x = Convolution2D(num_priors * 4, (3, 3),
                      padding='same',
                      name='conv6_2_mbox_loc')(net['conv6_2'])
    net['conv6_2_mbox_loc'] = x
    flatten = Flatten(name='conv6_2_mbox_loc_flat')
    net['conv6_2_mbox_loc_flat'] = flatten(net['conv6_2_mbox_loc'])
    name = 'conv6_2_mbox_conf'
    if num_classes != 21:
        name += '_{}'.format(num_classes)
    x = Convolution2D(num_priors * num_classes, (3, 3),
                      padding='same',
                      name=name)(net['conv6_2'])
    net['conv6_2_mbox_conf'] = x
    flatten = Flatten(name='conv6_2_mbox_conf_flat')
    net['conv6_2_mbox_conf_flat'] = flatten(net['conv6_2_mbox_conf'])
    priorbox = PriorBox(img_size,
                        114.0,
                        max_size=168.0,
                        aspect_ratios=[2, 3],
                        variances=[0.1, 0.1, 0.2, 0.2],
                        name='conv6_2_mbox_priorbox')
    net['conv6_2_mbox_priorbox'] = priorbox(net['conv6_2'])
    # Prediction from conv7_2
    num_priors = 6
    x = Convolution2D(num_priors * 4, (3, 3),
                      padding='same',
                      name='conv7_2_mbox_loc')(net['conv7_2'])
    net['conv7_2_mbox_loc'] = x
    flatten = Flatten(name='conv7_2_mbox_loc_flat')
    net['conv7_2_mbox_loc_flat'] = flatten(net['conv7_2_mbox_loc'])
    name = 'conv7_2_mbox_conf'
    if num_classes != 21:
        name += '_{}'.format(num_classes)
    x = Convolution2D(num_priors * num_classes, (3, 3),
                      padding='same',
                      name=name)(net['conv7_2'])
    net['conv7_2_mbox_conf'] = x
    flatten = Flatten(name='conv7_2_mbox_conf_flat')
    net['conv7_2_mbox_conf_flat'] = flatten(net['conv7_2_mbox_conf'])
    priorbox = PriorBox(img_size,
                        168.0,
                        max_size=222.0,
                        aspect_ratios=[2, 3],
                        variances=[0.1, 0.1, 0.2, 0.2],
                        name='conv7_2_mbox_priorbox')
    net['conv7_2_mbox_priorbox'] = priorbox(net['conv7_2'])
    # Prediction from conv8_2
    num_priors = 6
    x = Convolution2D(num_priors * 4, (3, 3),
                      padding='same',
                      name='conv8_2_mbox_loc')(net['conv8_2'])
    net['conv8_2_mbox_loc'] = x
    flatten = Flatten(name='conv8_2_mbox_loc_flat')
    net['conv8_2_mbox_loc_flat'] = flatten(net['conv8_2_mbox_loc'])
    name = 'conv8_2_mbox_conf'
    if num_classes != 21:
        name += '_{}'.format(num_classes)
    x = Convolution2D(num_priors * num_classes, (3, 3),
                      padding='same',
                      name=name)(net['conv8_2'])
    net['conv8_2_mbox_conf'] = x
    flatten = Flatten(name='conv8_2_mbox_conf_flat')
    net['conv8_2_mbox_conf_flat'] = flatten(net['conv8_2_mbox_conf'])
    priorbox = PriorBox(img_size,
                        222.0,
                        max_size=276.0,
                        aspect_ratios=[2, 3],
                        variances=[0.1, 0.1, 0.2, 0.2],
                        name='conv8_2_mbox_priorbox')
    net['conv8_2_mbox_priorbox'] = priorbox(net['conv8_2'])
    # Prediction from pool6
    num_priors = 6
    x = Dense(num_priors * 4, name='pool6_mbox_loc_flat')(net['pool6'])
    net['pool6_mbox_loc_flat'] = x
    name = 'pool6_mbox_conf_flat'
    if num_classes != 21:
        name += '_{}'.format(num_classes)
    x = Dense(num_priors * num_classes, name=name)(net['pool6'])
    net['pool6_mbox_conf_flat'] = x
    priorbox = PriorBox(img_size,
                        276.0,
                        max_size=330.0,
                        aspect_ratios=[2, 3],
                        variances=[0.1, 0.1, 0.2, 0.2],
                        name='pool6_mbox_priorbox')
    target_shape = (1, 1, 256)
    net['pool6_reshaped'] = Reshape(target_shape,
                                    name='pool6_reshaped')(net['pool6'])
    net['pool6_mbox_priorbox'] = priorbox(net['pool6_reshaped'])
    # Gather all predictions
    net['mbox_loc'] = concatenate([
        net['conv4_3_norm_mbox_loc_flat'], net['fc7_mbox_loc_flat'],
        net['conv6_2_mbox_loc_flat'], net['conv7_2_mbox_loc_flat'],
        net['conv8_2_mbox_loc_flat'], net['pool6_mbox_loc_flat']
    ],
                                  axis=1,
                                  name='mbox_loc')
    net['mbox_conf'] = concatenate([
        net['conv4_3_norm_mbox_conf_flat'], net['fc7_mbox_conf_flat'],
        net['conv6_2_mbox_conf_flat'], net['conv7_2_mbox_conf_flat'],
        net['conv8_2_mbox_conf_flat'], net['pool6_mbox_conf_flat']
    ],
                                   axis=1,
                                   name='mbox_conf')
    net['mbox_priorbox'] = concatenate([
        net['conv4_3_norm_mbox_priorbox'], net['fc7_mbox_priorbox'],
        net['conv6_2_mbox_priorbox'], net['conv7_2_mbox_priorbox'],
        net['conv8_2_mbox_priorbox'], net['pool6_mbox_priorbox']
    ],
                                       axis=1,
                                       name='mbox_priorbox')
    num_boxes = K.int_shape(net['mbox_loc'])[-1] // 4
    net['mbox_loc'] = Reshape((num_boxes, 4),
                              name='mbox_loc_final')(net['mbox_loc'])
    net['mbox_conf'] = Reshape((num_boxes, num_classes),
                               name='mbox_conf_logits')(net['mbox_conf'])
    net['mbox_conf'] = Activation('softmax',
                                  name='mbox_conf_final')(net['mbox_conf'])
    net['predictions'] = concatenate(
        [net['mbox_loc'], net['mbox_conf'], net['mbox_priorbox']],
        axis=2,
        #axis = 0,
        name='predictions')
    model = Model(net['input'], net['predictions'])
    return model