Exemple #1
0
model_Xception_conv = Xception(weights='imagenet', include_top=False)
#model_vgg16_conv.summary()

#Create your own input format (here 3x200x200)
input = Input(shape=(229, 229, 3), name='image_input')

#Use the generated model
output_Xception_conv = model_Xception_conv(input)
for i, layer in enumerate(model_Xception_conv.layers):
    print(layer)
    if i <= 111:
        layer.trainable = False
model_Xception_conv.summary()
#Conv layers
x = ZeroPadding2D((1, 1), name='zeropadding2d_43')(output_Xception_conv)
x = Convolution2D(128, 3, 3)(x)  #, activation='relu', name='conv7')(x)a
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = MaxPooling2D((2, 2), strides=(2, 2))(x)
#Fully-connected layers
x = Flatten(name='flatten')(x)
x = Dense(128, activation='relu', name='fc1')(x)
x = Dropout(0.5)(x)
x = Dense(64, activation='relu', name='fc2')(x)
x = Dropout(0.5)(x)
x = Dense(1, activation='sigmoid', name='predictions')(x)

my_model = Model(input=input, output=x)

#In the summary, weights and layers from Xception part will be hidden, but they will be fit during training
def conv_block(input_tensor,
               kernel_size,
               filters,
               stage,
               block,
               strides=(2, 2)):
    '''conv_block is the block that has a conv layer at shortcut
    # Arguments
        input_tensor: input tensor
        kernel_size: defualt 3, the kernel size of middle conv layer at main path
        filters: list of integers, the nb_filters of 3 conv layer at main path
        stage: integer, current stage label, used for generating layer names
        block: 'a','b'..., current block label, used for generating layer names
    Note that from stage 3, the first conv layer at main path is with subsample=(2,2)
    And the shortcut should have subsample=(2,2) as well
    '''
    eps = 1.1e-5
    nb_filter1, nb_filter2, nb_filter3 = filters
    conv_name_base = 'res' + str(stage) + block + '_branch'
    bn_name_base = 'bn' + str(stage) + block + '_branch'
    scale_name_base = 'scale' + str(stage) + block + '_branch'

    x = Convolution2D(nb_filter1,
                      1,
                      1,
                      subsample=strides,
                      name=conv_name_base + '2a',
                      bias=False)(input_tensor)
    x = BatchNormalization(epsilon=eps, axis=bn_axis,
                           name=bn_name_base + '2a')(x)
    x = Scale(axis=bn_axis, name=scale_name_base + '2a')(x)
    x = Activation('relu', name=conv_name_base + '2a_relu')(x)

    x = ZeroPadding2D((1, 1), name=conv_name_base + '2b_zeropadding')(x)
    x = Convolution2D(nb_filter2,
                      kernel_size,
                      kernel_size,
                      name=conv_name_base + '2b',
                      bias=False)(x)
    x = BatchNormalization(epsilon=eps, axis=bn_axis,
                           name=bn_name_base + '2b')(x)
    x = Scale(axis=bn_axis, name=scale_name_base + '2b')(x)
    x = Activation('relu', name=conv_name_base + '2b_relu')(x)

    x = Convolution2D(nb_filter3, 1, 1, name=conv_name_base + '2c',
                      bias=False)(x)
    x = BatchNormalization(epsilon=eps, axis=bn_axis,
                           name=bn_name_base + '2c')(x)
    x = Scale(axis=bn_axis, name=scale_name_base + '2c')(x)

    shortcut = Convolution2D(nb_filter3,
                             1,
                             1,
                             subsample=strides,
                             name=conv_name_base + '1',
                             bias=False)(input_tensor)
    shortcut = BatchNormalization(epsilon=eps,
                                  axis=bn_axis,
                                  name=bn_name_base + '1')(shortcut)
    shortcut = Scale(axis=bn_axis, name=scale_name_base + '1')(shortcut)

    x = merge([x, shortcut], mode='sum', name='res' + str(stage) + block)
    x = Activation('relu', name='res' + str(stage) + block + '_relu')(x)
    return x
Exemple #3
0
 def ConvBlock(self, layers, filters):
     model = self.model
     for i in range(layers):
         model.add(ZeroPadding2D((1, 1)))
         model.add(Convolution2D(filters, 3, 3, activation='relu'))
     model.add(MaxPooling2D((2, 2), strides=(2, 2)))
def densenet161_model(img_rows,
                      img_cols,
                      color_type=1,
                      nb_dense_block=4,
                      growth_rate=48,
                      nb_filter=96,
                      reduction=0.5,
                      dropout_rate=0.0,
                      weight_decay=1e-4,
                      num_classes=None):
    '''
    DenseNet 161 Model for Keras
    Model Schema is based on 
    https://github.com/flyyufelix/DenseNet-Keras
    ImageNet Pretrained Weights 
    Theano: https://drive.google.com/open?id=0Byy2AcGyEVxfVnlCMlBGTDR3RGs
    TensorFlow: https://drive.google.com/open?id=0Byy2AcGyEVxfUDZwVjU2cFNidTA
    # Arguments
        nb_dense_block: number of dense blocks to add to end
        growth_rate: number of filters to add per dense block
        nb_filter: initial number of filters
        reduction: reduction factor of transition blocks.
        dropout_rate: dropout rate
        weight_decay: weight decay factor
        classes: optional number of classes to classify images
        weights_path: path to pre-trained weights
    # Returns
        A Keras model instance.
    '''
    eps = 1.1e-5

    # compute compression factor
    compression = 1.0 - reduction

    # Handle Dimension Ordering for different backends
    global concat_axis
    if K.image_dim_ordering() == 'tf':
        concat_axis = 3
        img_input = Input(shape=(224, 224, 3), name='data')
    else:
        concat_axis = 1
        img_input = Input(shape=(3, 224, 224), name='data')

    # From architecture for ImageNet (Table 1 in the paper)
    nb_filter = 96
    nb_layers = [6, 12, 36, 24]  # For DenseNet-161

    # Initial convolution
    x = ZeroPadding2D((3, 3), name='conv1_zeropadding')(img_input)
    x = Convolution2D(nb_filter,
                      7,
                      7,
                      subsample=(2, 2),
                      name='conv1',
                      bias=False)(x)
    x = BatchNormalization(epsilon=eps, axis=concat_axis, name='conv1_bn')(x)
    x = Scale(axis=concat_axis, name='conv1_scale')(x)
    x = Activation('relu', name='relu1')(x)
    x = ZeroPadding2D((1, 1), name='pool1_zeropadding')(x)
    x = MaxPooling2D((3, 3), strides=(2, 2), name='pool1')(x)

    # Add dense blocks
    for block_idx in range(nb_dense_block - 1):
        stage = block_idx + 2
        x, nb_filter = dense_block(x,
                                   stage,
                                   nb_layers[block_idx],
                                   nb_filter,
                                   growth_rate,
                                   dropout_rate=dropout_rate,
                                   weight_decay=weight_decay)

        # Add transition_block
        x = transition_block(x,
                             stage,
                             nb_filter,
                             compression=compression,
                             dropout_rate=dropout_rate,
                             weight_decay=weight_decay)
        nb_filter = int(nb_filter * compression)

    final_stage = stage + 1
    x, nb_filter = dense_block(x,
                               final_stage,
                               nb_layers[-1],
                               nb_filter,
                               growth_rate,
                               dropout_rate=dropout_rate,
                               weight_decay=weight_decay)

    x = BatchNormalization(epsilon=eps,
                           axis=concat_axis,
                           name='conv' + str(final_stage) + '_blk_bn')(x)
    x = Scale(axis=concat_axis,
              name='conv' + str(final_stage) + '_blk_scale')(x)
    x = Activation('relu', name='relu' + str(final_stage) + '_blk')(x)

    x_fc = GlobalAveragePooling2D(name='pool' + str(final_stage))(x)
    x_fc = Dense(1000, name='fc6')(x_fc)
    x_fc = Activation('softmax', name='prob')(x_fc)

    model = Model(img_input, x_fc, name='densenet')
    weights_path = densenet161_weights

    #    if K.image_dim_ordering() == 'th':
    #      # Use pre-trained weights for Theano backend
    #      weights_path = 'imagenet_models/densenet161_weights_th.h5'
    #    else:
    #      # Use pre-trained weights for Tensorflow backend
    #      weights_path = 'imagenet_models/densenet161_weights_tf.h5'

    model.load_weights(weights_path, by_name=True)

    # Truncate and replace softmax layer for transfer learning
    # Cannot use model.layers.pop() since model is not of Sequential() type
    # The method below works since pre-trained weights are stored in layers but not in the model
    x_newfc = GlobalAveragePooling2D(name='pool' + str(final_stage))(x)
    x_newfc = Dense(num_classes, name='fc6')(x_newfc)
    x_newfc = Activation('softmax', name='prob')(x_newfc)

    model = Model(img_input, x_newfc)

    # Learning rate is changed to 0.001
    #    sgd = SGD(lr=1e-3, decay=1e-6, momentum=0.9, nesterov=True)
    #    model.compile(optimizer=sgd, loss='categorical_crossentropy', metrics=['accuracy'])

    return model
def final_structure(categorical, n):
    model = Sequential()
    model.add(ZeroPadding2D((1,1),input_shape=(224,224,3)))
    model.add(Convolution2D(64, kernel_size=(4, 4), strides=(2, 2 ), activation='relu'))
    model.add(ZeroPadding2D((1,1)))
    model.add(Convolution2D(64, kernel_size=(4, 4), strides=(2, 2 ), activation='relu'))
    
    model.add(ZeroPadding2D((1,1)))
    model.add(Convolution2D(128, kernel_size=(4, 4), strides=(2, 2 ), activation='relu'))
    model.add(ZeroPadding2D((1,1)))
    model.add(Convolution2D(128, kernel_size=(4, 4), strides=(2, 2 ), activation='relu'))

    model.add(ZeroPadding2D((1,1)))
    model.add(Convolution2D(256, kernel_size=(4, 4), strides=(2, 2 ), activation='relu'))
    model.add(ZeroPadding2D((1,1)))
    model.add(Convolution2D(256, kernel_size=(4, 4), strides=(2, 2 ), activation='relu'))
    model.add(ZeroPadding2D((1,1)))
    model.add(Convolution2D(256, kernel_size=(4, 4), strides=(2, 2 ), activation='relu'))

    model.add(ZeroPadding2D((1,1)))
    model.add(Convolution2D(512, kernel_size=(4, 4), strides=(2, 2 ), activation='relu'))
    model.add(ZeroPadding2D((1,1)))
    model.add(Convolution2D(512, kernel_size=(4, 4), strides=(2, 2 ), activation='relu'))
    model.add(ZeroPadding2D((1,1)))
    model.add(Convolution2D(512, kernel_size=(4, 4), strides=(2, 2 ), activation='relu'))

    model.add(ZeroPadding2D((1,1)))
    model.add(Convolution2D(512, kernel_size=(4, 4), strides=(2, 2 ), activation='relu'))
    model.add(ZeroPadding2D((1,1)))
    model.add(Convolution2D(512, kernel_size=(4, 4), strides=(2, 2 ), activation='relu'))
    model.add(ZeroPadding2D((1,1)))
    model.add(Convolution2D(512, kernel_size=(4, 4), strides=(2, 2 ), activation='relu'))

    model.add(Flatten())
    model.add(Dense(2000, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(2000, activation='relu'))
    model.add(Dropout(0.5))
    if categorical:
        model.add(Dense(n, activation='softmax'))
    else:
        model.add(Dense(20, activation='relu'))
        model.add(Dense(n, activation='softmax'))

    return model
Exemple #6
0
    def _VGG_16(self):
        '''
        Implementation of VGG 16-layer net.
        '''
        print 'Compiling VGG Net...'

        model = Sequential()
        model.add(ZeroPadding2D((1, 1), input_shape=self.input_shape))
        model.add(
            Convolution2D(64,
                          self.kernel_size,
                          self.kernel_size,
                          activation='relu',
                          input_shape=self.input_shape))
        model.add(ZeroPadding2D((1, 1)))
        model.add(
            Convolution2D(64,
                          self.kernel_size,
                          self.kernel_size,
                          activation='relu'))
        model.add(MaxPooling2D((2, 2), strides=(2, 2)))

        model.add(ZeroPadding2D((1, 1)))
        model.add(
            Convolution2D(128,
                          self.kernel_size,
                          self.kernel_size,
                          activation='relu'))
        model.add(ZeroPadding2D((1, 1)))
        model.add(
            Convolution2D(128,
                          self.kernel_size,
                          self.kernel_size,
                          activation='relu'))
        model.add(MaxPooling2D((2, 2), strides=(2, 2)))

        model.add(ZeroPadding2D((1, 1)))
        model.add(
            Convolution2D(256,
                          self.kernel_size,
                          self.kernel_size,
                          activation='relu'))
        model.add(ZeroPadding2D((1, 1)))
        model.add(
            Convolution2D(256,
                          self.kernel_size,
                          self.kernel_size,
                          activation='relu'))
        model.add(ZeroPadding2D((1, 1)))
        model.add(
            Convolution2D(256,
                          self.kernel_size,
                          self.kernel_size,
                          activation='relu'))
        model.add(MaxPooling2D((2, 2), strides=(2, 2)))

        model.add(ZeroPadding2D((1, 1)))
        model.add(
            Convolution2D(512,
                          self.kernel_size,
                          self.kernel_size,
                          activation='relu'))
        model.add(ZeroPadding2D((1, 1)))
        model.add(
            Convolution2D(512,
                          self.kernel_size,
                          self.kernel_size,
                          activation='relu'))
        model.add(ZeroPadding2D((1, 1)))
        model.add(
            Convolution2D(512,
                          self.kernel_size,
                          self.kernel_size,
                          activation='relu'))
        model.add(MaxPooling2D((2, 2), strides=(2, 2)))

        model.add(ZeroPadding2D((1, 1)))
        model.add(
            Convolution2D(512,
                          self.kernel_size,
                          self.kernel_size,
                          activation='relu'))
        model.add(ZeroPadding2D((1, 1)))
        model.add(
            Convolution2D(512,
                          self.kernel_size,
                          self.kernel_size,
                          activation='relu'))
        model.add(ZeroPadding2D((1, 1)))
        model.add(
            Convolution2D(512,
                          self.kernel_size,
                          self.kernel_size,
                          activation='relu'))
        model.add(MaxPooling2D((2, 2), strides=(2, 2)))

        model.add(Flatten())
        model.add(Dense(2048, activation='relu'))
        model.add(Dropout(0.5))
        model.add(Dense(2048, activation='relu'))
        model.add(Dropout(0.5))
        model.add(Dense(self.nb_classes, activation='softmax'))

        sgd = SGD(lr=self.lr, decay=0.01, momentum=0.9, nesterov=True)
        model.compile(optimizer='sgd', loss='categorical_crossentropy')
        return model
Exemple #7
0
def VGG_16(weights_path=None):
    model = Sequential()
    model.add(ZeroPadding2D((1, 1), input_shape=(3, 224, 224)))
    model.add(Convolution2D(64, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(64, 3, 3, activation='relu'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(128, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(128, 3, 3, activation='relu'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(256, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(256, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(256, 3, 3, activation='relu'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(Flatten())
    model.add(Dense(4096, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(4096, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(365, activation='softmax'))

    model.load_weights(weights_path)
    sgd = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True)
    model.compile(optimizer=sgd, loss='categorical_crossentropy')
    return model
Exemple #8
0
def VGG_16(spatial_size, classes, channels, channel_first=True, weights_path=None):
	model = Sequential()
	if channel_first:
		model.add(ZeroPadding2D((1,1),input_shape=(channels, spatial_size, spatial_size)))
	else:
		model.add(ZeroPadding2D((1,1),input_shape=(spatial_size, spatial_size, channels)))


	model.add(Conv2D(64, (3, 3), activation='relu'))
	model.add(ZeroPadding2D((1,1)))
	model.add(Conv2D(64, (3, 3), activation='relu'))
	model.add(MaxPooling2D((2,2), strides=(2,2)))

	model.add(ZeroPadding2D((1,1)))
	model.add(Conv2D(128, (3, 3), activation='relu'))
	model.add(ZeroPadding2D((1,1)))
	model.add(Conv2D(128, (3, 3), activation='relu'))
	model.add(MaxPooling2D((2,2), strides=(2,2)))

	model.add(ZeroPadding2D((1,1)))
	model.add(Conv2D(256, (3, 3), activation='relu'))
	model.add(ZeroPadding2D((1,1)))
	model.add(Conv2D(256, (3, 3), activation='relu'))
	model.add(ZeroPadding2D((1,1)))
	model.add(Conv2D(256, (3, 3), activation='relu'))
	model.add(MaxPooling2D((2,2), strides=(2,2)))

	model.add(ZeroPadding2D((1,1)))
	model.add(Conv2D(512, (3, 3), activation='relu'))
	model.add(ZeroPadding2D((1,1)))
	model.add(Conv2D(512, (3, 3), activation='relu'))
	model.add(ZeroPadding2D((1,1)))
	model.add(Conv2D(512, (3, 3), activation='relu'))
	model.add(MaxPooling2D((2,2), strides=(2,2)))

	model.add(ZeroPadding2D((1,1)))
	model.add(Conv2D(512, (3, 3), activation='relu'))
	model.add(ZeroPadding2D((1,1)))
	model.add(Conv2D(512, (3, 3), activation='relu'))
	model.add(ZeroPadding2D((1,1)))
	model.add(Conv2D(512, (3, 3), activation='relu'))
	model.add(MaxPooling2D((2,2), strides=(2,2))) # 33

	model.add(Flatten())
	model.add(Dense(4096, activation='relu')) # 34
	model.add(Dropout(0.5))
	model.add(Dense(4096, activation='relu')) # 35
	model.add(Dropout(0.5))
	model.add(Dense(2622, activation='softmax')) # Dropped


	if weights_path:
		model.load_weights(weights_path)
	model.pop()
	model.add(Dense(classes, activation='softmax')) # 36

	return model
Exemple #9
0
def SegNet(nClasses, shape, W):
    input_height = shape[0]
    input_width = shape[1]
    kernel = 3
    filter_size = 64
    pad = 1
    pool_size = 2

    model = Sequential()
    model.add(Layer(input_shape=(input_height, input_width, 1)))

    # encoder
    model.add(ZeroPadding2D(padding=(pad, pad)))
    model.add(Convolution2D(filter_size, kernel, kernel, border_mode='valid'))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(pool_size, pool_size)))

    model.add(ZeroPadding2D(padding=(pad, pad)))
    model.add(Convolution2D(128, kernel, kernel, border_mode='valid'))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(pool_size, pool_size)))

    model.add(ZeroPadding2D(padding=(pad, pad)))
    model.add(Convolution2D(256, kernel, kernel, border_mode='valid'))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(pool_size, pool_size)))

    model.add(ZeroPadding2D(padding=(pad, pad)))
    model.add(Convolution2D(512, kernel, kernel, border_mode='valid'))
    model.add(BatchNormalization())
    model.add(Activation('relu'))

    # decoder
    model.add(ZeroPadding2D(padding=(pad, pad)))
    model.add(Convolution2D(512, kernel, kernel, border_mode='valid'))
    model.add(BatchNormalization())

    model.add(UpSampling2D(size=(pool_size, pool_size)))
    model.add(ZeroPadding2D(padding=(pad, pad)))
    model.add(Convolution2D(256, kernel, kernel, border_mode='valid'))
    model.add(BatchNormalization())

    model.add(UpSampling2D(size=(pool_size, pool_size)))
    model.add(ZeroPadding2D(padding=(pad, pad)))
    model.add(Convolution2D(128, kernel, kernel, border_mode='valid'))
    model.add(BatchNormalization())

    model.add(UpSampling2D(size=(pool_size, pool_size)))
    model.add(ZeroPadding2D(padding=(pad, pad)))
    model.add(Convolution2D(filter_size, kernel, kernel, border_mode='valid'))
    model.add(BatchNormalization())

    model.add(Convolution2D(nClasses, 1, 1, border_mode='valid'))

    model.outputHeight = model.output_shape[-2]
    model.outputWidth = model.output_shape[-1]

    if nClasses == 1:
        model.add(Activation('sigmoid'))
    if nClasses > 1:
        model.add(Activation('relu'))
        model.add(
            Reshape((shape[0] * shape[1], nClasses),
                    input_shape=(shape[0], shape[1], nClasses)))
        #        model.add(Permute((2, 1)))
        model.add(Activation('softmax'))
    if W != '':
        model.load_weights(W)
    if nClasses == 1:
        model.compile(loss='binary_crossentropy',
                      optimizer=Adam(lr=0.0001),
                      metrics=['accuracy'])
    if nClasses > 1:
        model.compile(loss='categorical_crossentropy',
                      optimizer=Adadelta(lr=0.0001),
                      metrics=['accuracy'])
    model.summary()
    return model
Exemple #10
0
def AlexNet(weights_path=None, heatmap=False):
    if heatmap:
        inputs = Input(shape=(3, None, None))
    else:
        inputs = Input(shape=(3, 227, 227))

    conv_1 = Convolution2D(96,
                           11,
                           11,
                           subsample=(4, 4),
                           activation='relu',
                           name='conv_1')(inputs)

    conv_2 = MaxPooling2D((3, 3), strides=(2, 2))(conv_1)
    conv_2 = crosschannelnormalization(name='convpool_1')(conv_2)
    conv_2 = ZeroPadding2D((2, 2))(conv_2)
    conv_2 = merge([
        Convolution2D(
            128, 5, 5, activation='relu', name='conv_2_' + str(i + 1))(
                splittensor(ratio_split=2, id_split=i)(conv_2))
        for i in range(2)
    ],
                   mode='concat',
                   concat_axis=1,
                   name='conv_2')

    conv_3 = MaxPooling2D((3, 3), strides=(2, 2))(conv_2)
    conv_3 = crosschannelnormalization()(conv_3)
    conv_3 = ZeroPadding2D((1, 1))(conv_3)
    conv_3 = Convolution2D(384, 3, 3, activation='relu', name='conv_3')(conv_3)

    conv_4 = ZeroPadding2D((1, 1))(conv_3)
    conv_4 = merge([
        Convolution2D(
            192, 3, 3, activation='relu', name='conv_4_' + str(i + 1))(
                splittensor(ratio_split=2, id_split=i)(conv_4))
        for i in range(2)
    ],
                   mode='concat',
                   concat_axis=1,
                   name='conv_4')

    conv_5 = ZeroPadding2D((1, 1))(conv_4)
    conv_5 = merge([
        Convolution2D(
            128, 3, 3, activation='relu', name='conv_5_' + str(i + 1))(
                splittensor(ratio_split=2, id_split=i)(conv_5))
        for i in range(2)
    ],
                   mode='concat',
                   concat_axis=1,
                   name='conv_5')

    dense_1 = MaxPooling2D((3, 3), strides=(2, 2), name='convpool_5')(conv_5)

    if heatmap:
        dense_1 = Convolution2D(4096, 6, 6, activation='relu',
                                name='dense_1')(dense_1)
        dense_2 = Convolution2D(4096, 1, 1, activation='relu',
                                name='dense_2')(dense_1)
        dense_3 = Convolution2D(1000, 1, 1, name='dense_3')(dense_2)
        prediction = Softmax4D(axis=1, name='softmax')(dense_3)
    else:
        dense_1 = Flatten(name='flatten')(dense_1)
        dense_1 = Dense(4096, activation='relu', name='dense_1')(dense_1)
        dense_2 = Dropout(0.5)(dense_1)
        dense_2 = Dense(4096, activation='relu', name='dense_2')(dense_2)
        dense_3 = Dropout(0.5)(dense_2)
        dense_3 = Dense(4, name='dense_3')(dense_3)
        prediction = Activation('softmax', name='softmax')(dense_3)

    model = Model(input=inputs, output=prediction)

    if weights_path:
        model.load_weights(weights_path)

    return model
def alexnet_model(weights_path=None):
    """
    Returns a keras model for AlexNet, achieving roughly 80% at ImageNet2012 validation set
    
    Model and weights from
    https://github.com/heuritech/convnets-keras/blob/master/convnetskeras/convnets.py
    and only slightly modified to work with TF backend
    """

    K.set_image_dim_ordering('th')
    inputs = Input(shape=(3, 227, 227))

    conv_1 = Conv2D(96, 11, strides=4, activation='relu', name='conv_1')(inputs)

    conv_2 = MaxPooling2D((3, 3), strides=(2, 2))(conv_1)
    conv_2 = cross_channel_normalization(name="convpool_1")(conv_2)
    conv_2 = ZeroPadding2D((2, 2))(conv_2)
    conv_2 = merge([
        Conv2D(128, 5, activation="relu", name='conv_2_' + str(i + 1))
        (split_tensor(ratio_split=2, id_split=i)(conv_2)
         ) for i in range(2)], mode='concat', concat_axis=1, name="conv_2")

    conv_3 = MaxPooling2D((3, 3), strides=(2, 2))(conv_2)
    conv_3 = cross_channel_normalization()(conv_3)
    conv_3 = ZeroPadding2D((1, 1))(conv_3)
    conv_3 = Conv2D(384, 3, activation='relu', name='conv_3')(conv_3)

    conv_4 = ZeroPadding2D((1, 1))(conv_3)
    conv_4 = merge([
        Conv2D(192, 3, activation="relu", name='conv_4_' + str(i + 1))(
            split_tensor(ratio_split=2, id_split=i)(conv_4)
        ) for i in range(2)], mode='concat', concat_axis=1, name="conv_4")

    conv_5 = ZeroPadding2D((1, 1))(conv_4)
    conv_5 = merge([
        Conv2D(128, 3, activation="relu", name='conv_5_' + str(i + 1))(
            split_tensor(ratio_split=2, id_split=i)(conv_5)
        ) for i in range(2)], mode='concat', concat_axis=1, name="conv_5")

    dense_1 = MaxPooling2D((3, 3), strides=(2, 2), name="convpool_5")(conv_5)

    dense_1 = Flatten(name="flatten")(dense_1)
    dense_1 = Dense(4096, activation='relu', name='dense_1')(dense_1)
    dense_2 = Dropout(0.5)(dense_1)
    dense_2 = Dense(4096, activation='relu', name='dense_2')(dense_2)
    dense_3 = Dropout(0.5)(dense_2)
    dense_3 = Dense(1000, name='dense_3')(dense_3)
    prediction = Activation("softmax", name="softmax")(dense_3)

    m = Model(input=inputs, output=prediction)

    if weights_path is None:
        weights_path = 'Data/alexnet_weights.h5'
    m.load_weights(weights_path)
    # Model was trained using Theano backend
    # This changes convolutional kernels from TF to TH, great accuracy improvement
    convert_all_kernels_in_model(m)

    # sgd = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True)
    # m.compile(optimizer=sgd, loss='mse')

    return m
Exemple #12
0
def VGG_19(weights_path=None, heatmap=False):
    model = Sequential()

    if heatmap:
        model.add(ZeroPadding2D((1, 1), input_shape=(3, None, None)))
    else:
        model.add(ZeroPadding2D((1, 1), input_shape=(3, 224, 224)))
    model.add(Convolution2D(64, 3, 3, activation='relu', name='conv1_1'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(64, 3, 3, activation='relu', name='conv1_2'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(128, 3, 3, activation='relu', name='conv2_1'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(128, 3, 3, activation='relu', name='conv2_2'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(256, 3, 3, activation='relu', name='conv3_1'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(256, 3, 3, activation='relu', name='conv3_2'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(256, 3, 3, activation='relu', name='conv3_3'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(256, 3, 3, activation='relu', name='conv3_4'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu', name='conv4_1'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu', name='conv4_2'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu', name='conv4_3'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu', name='conv4_4'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu', name='conv5_1'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu', name='conv5_2'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu', name='conv5_3'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu', name='conv5_4'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    if heatmap:
        model.add(Convolution2D(4096, 7, 7, activation='relu', name='dense_1'))
        model.add(Convolution2D(4096, 1, 1, activation='relu', name='dense_2'))
        model.add(Convolution2D(1000, 1, 1, name='dense_3'))
        model.add(Softmax4D(axis=1, name='softmax'))
    else:
        model.add(Flatten())
        model.add(Dense(4096, activation='relu', name='dense_1'))
        model.add(Dropout(0.5))
        model.add(Dense(4096, activation='relu', name='dense_2'))
        model.add(Dropout(0.5))
        model.add(Dense(1000, name='dense_3'))
        model.add(Activation('softmax'))

    if weights_path:
        model.load_weights(weights_path)

    return model
Exemple #13
0
classes_label = getClassNames(num_classes, dataset_path, 'test')

# -----------------------------
# Building AlexNet model
# -----------------------------

img_rows, img_cols = 224, 224
input_shape = (img_rows, img_cols, 3)

y_train = to_categorical(y_train, num_classes=num_classes)
y_val = to_categorical(y_val, num_classes=num_classes)

#Container definition and 1st layer of AlexNet.
model = Sequential()
model.add(ZeroPadding2D((2, 2), input_shape=input_shape))
model.add(Convolution2D(96, (11, 11), strides=(4, 4), padding='valid'))
model.add(Activation(activation='relu'))
model.add(BatchNormalization())
# print(model.output_shape) # network dimension before max-pooling
model.add(MaxPooling2D((3, 3), strides=(2, 2)))
# print(model.output_shape) # network dimension after max-pooling

#2nd layer
model.add(ZeroPadding2D((2, 2)))
model.add(Convolution2D(256, (5, 5), padding='valid'))
model.add(Activation(activation='relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D((3, 3), strides=(2, 2)))
# print(model.output_shape) # 2th layer dimension
#model.summary()
Exemple #14
0
conv1 = Activation("relu")
conv2 = Activation("relu")
conv3 = Activation("relu")
conv4 = Activation("relu")
conv5 = Activation("relu")
conv6 = Activation("relu")
conv7 = Activation("relu")
conv8 = Activation("relu")
conv9 = Activation("relu")
conv10 = Activation("relu")
conv11 = Activation("relu")
conv12 = Activation("relu")
conv13 = Activation("relu")

model = Sequential()
model.add(ZeroPadding2D((1, 1), input_shape=X_train.shape[1:]))
model.add(Convolution2D(64, 3, 3))
model.add(conv1)
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(64, 3, 3))
model.add(conv2)
model.add(MaxPooling2D((2, 2), strides=(2, 2)))

model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(128, 3, 3))
model.add(conv3)
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(128, 3, 3))
model.add(conv4)
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
Exemple #15
0
def get_deep_anime_model(n_outputs=1000, input_size=128):
    '''The deep neural network used for deep anime bot'''
    conv = Sequential()

    conv.add(
        Convolution2D(64,
                      3,
                      3,
                      activation='relu',
                      input_shape=(3, input_size, input_size)))
    conv.add(ZeroPadding2D((1, 1)))
    conv.add(Convolution2D(64, 3, 3, activation='relu'))
    conv.add(MaxPooling2D((2, 2), strides=(2, 2)))
    conv.add(BatchNormalization())
    # conv.add(Dropout(0.5))

    conv.add(ZeroPadding2D((1, 1)))
    conv.add(Convolution2D(128, 3, 3, activation='relu'))
    #conv.add(ZeroPadding2D((1, 1)))
    conv.add(Convolution2D(128, 1, 1, activation='relu'))
    conv.add(MaxPooling2D((2, 2), strides=(2, 2)))
    conv.add(BatchNormalization())
    # conv.add(Dropout(0.5))

    conv.add(ZeroPadding2D((1, 1)))
    conv.add(Convolution2D(256, 3, 3, activation='relu'))
    conv.add(ZeroPadding2D((1, 1)))
    conv.add(Convolution2D(256, 3, 3, activation='relu'))
    #conv.add(ZeroPadding2D((1, 1)))
    conv.add(Convolution2D(256, 1, 1, activation='relu'))
    conv.add(MaxPooling2D((2, 2), strides=(2, 2)))
    conv.add(BatchNormalization())
    #conv.add(Dropout(0.5))

    conv.add(ZeroPadding2D((1, 1)))
    conv.add(Convolution2D(512, 3, 3, activation='relu'))
    conv.add(ZeroPadding2D((1, 1)))
    conv.add(Convolution2D(512, 3, 3, activation='relu'))
    #conv.add(ZeroPadding2D((1, 1)))
    conv.add(Convolution2D(512, 1, 1, activation='relu'))
    conv.add(AveragePooling2D((8, 8), strides=(2, 2)))
    conv.add(BatchNormalization())
    # conv.add(Dropout(0.5))

    # conv.add(ZeroPadding2D((1, 1)))
    # conv.add(Convolution2D(512, 3, 3, activation='relu'))
    # conv.add(ZeroPadding2D((1, 1)))
    # conv.add(Convolution2D(512, 3, 3, activation='relu'))
    # #conv.add(ZeroPadding2D((1, 1)))
    # conv.add(Convolution2D(512, 1, 1, activation='relu'))
    # conv.add(AveragePooling2D((4, 4)))

    #conv.add(BatchNormalization())
    conv.add(Flatten())
    conv.add(Dropout(0.5))
    conv.add(Dense(2048))
    conv.add(BatchNormalization())
    conv.add(Dropout(0.7))
    conv.add(Dense(2048))
    conv.add(BatchNormalization())
    conv.add(Dropout(0.7))
    conv.add(Dense(n_outputs))
    conv.add(Activation('softmax'))
    print(conv.summary())
    return conv
Exemple #16
0
def VGGSegNet(nClasses, shape, W):
    vgg_level = 3
    input_height = shape[0]
    input_width = shape[1]
    img_input = Input(shape=(input_height, input_width, 1))

    x = Conv2D(64, (3, 3),
               activation='relu',
               padding='same',
               name='block1_conv1')(img_input)
    x = Conv2D(64, (3, 3),
               activation='relu',
               padding='same',
               name='block1_conv2')(x)
    x = MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(x)
    f1 = x
    # Block 2
    x = Conv2D(128, (3, 3),
               activation='relu',
               padding='same',
               name='block2_conv1')(x)
    x = Conv2D(128, (3, 3),
               activation='relu',
               padding='same',
               name='block2_conv2')(x)
    x = MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(x)
    f2 = x

    # Block 3
    x = Conv2D(256, (3, 3),
               activation='relu',
               padding='same',
               name='block3_conv1')(x)
    x = Conv2D(256, (3, 3),
               activation='relu',
               padding='same',
               name='block3_conv2')(x)
    x = Conv2D(256, (3, 3),
               activation='relu',
               padding='same',
               name='block3_conv3')(x)
    x = MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool')(x)
    f3 = x

    # Block 4
    x = Conv2D(512, (3, 3),
               activation='relu',
               padding='same',
               name='block4_conv1')(x)
    x = Conv2D(512, (3, 3),
               activation='relu',
               padding='same',
               name='block4_conv2')(x)
    x = Conv2D(512, (3, 3),
               activation='relu',
               padding='same',
               name='block4_conv3')(x)
    x = MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool')(x)
    f4 = x

    # Block 5
    x = Conv2D(512, (3, 3),
               activation='relu',
               padding='same',
               name='block5_conv1')(x)
    x = Conv2D(512, (3, 3),
               activation='relu',
               padding='same',
               name='block5_conv2')(x)
    x = Conv2D(512, (3, 3),
               activation='relu',
               padding='same',
               name='block5_conv3')(x)
    x = MaxPooling2D((2, 2), strides=(2, 2), name='block5_pool')(x)
    f5 = x

    x = Flatten(name='flatten')(x)
    x = Dense(4096, activation='relu', name='fc1')(x)
    x = Dense(4096, activation='relu', name='fc2')(x)
    x = Dense(1000, activation='softmax', name='predictions')(x)

    #    vgg  = Model(  img_input , x  )

    levels = [f1, f2, f3, f4, f5]

    o = levels[vgg_level]

    o = (ZeroPadding2D((1, 1)))(o)
    o = (Conv2D(512, (3, 3), padding='valid'))(o)
    o = (BatchNormalization())(o)

    o = (UpSampling2D((2, 2)))(o)
    o = (ZeroPadding2D((1, 1)))(o)
    o = (Conv2D(256, (3, 3), padding='valid'))(o)
    o = (BatchNormalization())(o)

    o = (UpSampling2D((2, 2)))(o)
    o = (ZeroPadding2D((1, 1)))(o)
    o = (Conv2D(128, (3, 3), padding='valid'))(o)
    o = (BatchNormalization())(o)

    o = (UpSampling2D((2, 2)))(o)
    o = (ZeroPadding2D((1, 1)))(o)
    o = (Conv2D(64, (3, 3), padding='valid'))(o)
    o = (BatchNormalization())(o)

    o = (UpSampling2D((2, 2)))(o)
    o = (ZeroPadding2D((1, 1)))(o)
    o = (Conv2D(64, (3, 3), padding='valid'))(o)
    o = (BatchNormalization())(o)

    o = Conv2D(nClasses, (3, 3), padding='same')(o)
    o_shape = Model(img_input, o).output_shape
    outputHeight = o_shape[1]
    outputWidth = o_shape[2]

    if nClasses > 1:
        o = (Reshape((-1, outputHeight * outputWidth)))(o)
        o = (Permute((2, 1)))(o)
        o = (Activation('softmax'))(o)
    if nClasses == 1:
        o = (Activation('sigmoid'))(o)
    model = Model(img_input, o)
    model.outputWidth = outputWidth
    model.outputHeight = outputHeight
    if W != '':
        model.load_weights(W)
    if nClasses > 1:
        model.compile(loss="categorical_crossentropy",
                      optimizer=Adam(lr=1e-4),
                      metrics=['accuracy'])
    if nClasses == 1:
        model.compile(loss="binary_crossentropy",
                      optimizer=Adam(lr=1e-4),
                      metrics=['accuracy'])
    model.summary()

    #    img_w = shape[1]
    #    img_h = shape[0]
    #    n_labels = nClasses
    #
    #    kernel = 3
    #    pad = 1
    #    pool_size = 2
    #
    #    encoding_layers = [
    #        Convolution2D(64, kernel, kernel, border_mode='same'),
    #        BatchNormalization(),
    #        Activation('relu'),
    #        Convolution2D(64, kernel, kernel, border_mode='same'),
    #        BatchNormalization(),
    #        Activation('relu'),
    #        MaxPooling2D(pool_size=(pool_size, pool_size)),
    #
    #        Convolution2D(128, kernel, kernel, border_mode='same'),
    #        BatchNormalization(),
    #        Activation('relu'),
    #        Convolution2D(128, kernel, kernel, border_mode='same'),
    #        BatchNormalization(),
    #        Activation('relu'),
    #        MaxPooling2D(pool_size=(pool_size, pool_size)),
    #
    #        Convolution2D(256, kernel, kernel, border_mode='same'),
    #        BatchNormalization(),
    #        Activation('relu'),
    #        Convolution2D(256, kernel, kernel, border_mode='same'),
    #        BatchNormalization(),
    #        Activation('relu'),
    #        Convolution2D(256, kernel, kernel, border_mode='same'),
    #        BatchNormalization(),
    #        Activation('relu'),
    #        MaxPooling2D(pool_size=(pool_size, pool_size)),
    #
    #        Convolution2D(512, kernel, kernel, border_mode='same'),
    #        BatchNormalization(),
    #        Activation('relu'),
    #        Convolution2D(512, kernel, kernel, border_mode='same'),
    #        BatchNormalization(),
    #        Activation('relu'),
    #        Convolution2D(512, kernel, kernel, border_mode='same'),
    #        BatchNormalization(),
    #        Activation('relu'),
    #        MaxPooling2D(pool_size=(pool_size, pool_size)),
    #
    #        Convolution2D(512, kernel, kernel, border_mode='same'),
    #        BatchNormalization(),
    #        Activation('relu'),
    #        Convolution2D(512, kernel, kernel, border_mode='same'),
    #        BatchNormalization(),
    #        Activation('relu'),
    #        Convolution2D(512, kernel, kernel, border_mode='same'),
    #        BatchNormalization(),
    #        Activation('relu'),
    #        MaxPooling2D(pool_size=(pool_size, pool_size)),
    #    ]
    #
    #    decoding_layers = [
    #        UpSampling2D(size=(pool_size,pool_size)),
    #        Convolution2D(512, kernel, kernel, border_mode='same'),
    #        BatchNormalization(),
    #        Activation('relu'),
    #        Convolution2D(512, kernel, kernel, border_mode='same'),
    #        BatchNormalization(),
    #        Activation('relu'),
    #        Convolution2D(512, kernel, kernel, border_mode='same'),
    #        BatchNormalization(),
    #        Activation('relu'),
    #
    #        UpSampling2D(size=(pool_size,pool_size)),
    #        Convolution2D(512, kernel, kernel, border_mode='same'),
    #        BatchNormalization(),
    #        Activation('relu'),
    #        Convolution2D(512, kernel, kernel, border_mode='same'),
    #        BatchNormalization(),
    #        Activation('relu'),
    #        Convolution2D(256, kernel, kernel, border_mode='same'),
    #        BatchNormalization(),
    #        Activation('relu'),
    #
    #        UpSampling2D(size=(pool_size,pool_size)),
    #        Convolution2D(256, kernel, kernel, border_mode='same'),
    #        BatchNormalization(),
    #        Activation('relu'),
    #        Convolution2D(256, kernel, kernel, border_mode='same'),
    #        BatchNormalization(),
    #        Activation('relu'),
    #        Convolution2D(128, kernel, kernel, border_mode='same'),
    #        BatchNormalization(),
    #        Activation('relu'),
    #
    #        UpSampling2D(size=(pool_size,pool_size)),
    #        Convolution2D(128, kernel, kernel, border_mode='same'),
    #        BatchNormalization(),
    #        Activation('relu'),
    #        Convolution2D(64, kernel, kernel, border_mode='same'),
    #        BatchNormalization(),
    #        Activation('relu'),
    #
    #        UpSampling2D(size=(pool_size,pool_size)),
    #        Convolution2D(64, kernel, kernel, border_mode='same'),
    #        BatchNormalization(),
    #        Activation('relu'),
    #        Convolution2D(n_labels, 1, 1, border_mode='valid'),
    #        BatchNormalization(),
    #    ]
    #
    #
    #    segnet_basic = Sequential()
    #
    #    segnet_basic.add(Layer(input_shape=(img_h, img_w,1)))
    #
    #
    #    segnet_basic.encoding_layers = encoding_layers
    #    for l in segnet_basic.encoding_layers:
    #        segnet_basic.add(l)
    #
    #
    #    segnet_basic.decoding_layers = decoding_layers
    #    for l in segnet_basic.decoding_layers:
    #        segnet_basic.add(l)
    #
    #
    #    segnet_basic.add(Reshape((nClasses, img_h * img_w), input_shape=(nClasses,img_h, img_w)))
    #    segnet_basic.add(Permute((2, 1)))
    #    if nClasses>1:
    #        segnet_basic.add(Activation('softmax'))
    #    if nClasses==1:
    #        segnet_basic.add(Activation('sigmoid'))
    #    if nClasses>1:
    #        segnet_basic.compile(loss="categorical_crossentropy", optimizer = Adam(lr = 1e-4) , metrics=['accuracy'] )
    #    if nClasses==1:
    #        segnet_basic.compile(loss="binary_crossentropy", optimizer = Adam(lr = 1e-4) , metrics=['accuracy'] )
    #    segnet_basic.summary()
    return model
Exemple #17
0
def test1_model12(categorical, n):
    model = Sequential()
    model.add(ZeroPadding2D((1, 1), input_shape=(224, 224, 3)))
    model.add(Convolution2D(64, (3, 3)))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(64, (3, 3)))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(64, (3, 3)))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(128, (3, 3)))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(128, (3, 3)))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(128, (3, 3)))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(256, (3, 3)))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(256, (3, 3)))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(256, (3, 3)))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, (3, 3)))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, (3, 3)))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, (3, 3)))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, (3, 3)))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, (3, 3)))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, (3, 3)))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(1024, (3, 3)))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(1024, (3, 3)))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(1024, (3, 3)))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(Flatten())
    model.add(Dense(4096))
    model.add(Dropout(0.5))
    model.add(Dense(4096))
    model.add(Dropout(0.5))
    if categorical:
        model.add(Dense(n, activation='softmax'))
    else:
        model.add(Dense(20, activation='relu'))
        model.add(Dense(n, activation='softmax'))

    return model, 'dense_3'
        self.model.save_weights(SAVE_MODEL_FOLDER+"/policy_net_weights_h07_"+str(epoch)+".h5")
        self.model.optimizer.lr = float(open(SAVE_MODEL_FOLDER+"/lr.txt").read())
            
if __name__ == "__main__":
    
    if not os.path.exists(SAVE_MODEL_FOLDER):
        os.mkdir(SAVE_MODEL_FOLDER)
        
    print "load dataset.."
    train_x, train_y = load_dataset()
    print "..finish"
    
    print "make model.."
    model = Sequential()
    
    model.add(ZeroPadding2D(padding=(1, 1), input_shape=(38, 9, 9)))
    model.add(Convolution2D(k, 5, 5))
    model.add(Activation('relu'))
    
    for i in range(0, 7):
        model = hidden_layers(model, k)
    
    model.add(ZeroPadding2D(padding=(1, 1)))
    model.add(Convolution2D(1, 1, 1))
    model.add(Activation('relu'))
    
    model.add(Flatten())
    model.add(Activation('softmax'))
    
    print "..finish"
    
Exemple #19
0
def alexnet_model(img_shape=(50, 50, 3), n_classes=2, l2_reg=0.,
    weights=None):

    # Initialize model
    alexnet = Sequential()

    # Layer 1
    alexnet.add(Conv2D(96, (11, 11), input_shape=img_shape,
        padding='same', kernel_regularizer=l2(l2_reg)))
    alexnet.add(BatchNormalization())
    alexnet.add(Activation('relu'))
    alexnet.add(MaxPooling2D(pool_size=(2, 2)))

    # Layer 2
    alexnet.add(Conv2D(256, (5, 5), padding='same'))
    alexnet.add(BatchNormalization())
    alexnet.add(Activation('relu'))
    alexnet.add(MaxPooling2D(pool_size=(2, 2)))

    # Layer 3
    alexnet.add(ZeroPadding2D((1, 1)))
    alexnet.add(Conv2D(512, (3, 3), padding='same'))
    alexnet.add(BatchNormalization())
    alexnet.add(Activation('relu'))
    alexnet.add(MaxPooling2D(pool_size=(2, 2)))

    # Layer 4
    alexnet.add(ZeroPadding2D((1, 1)))
    alexnet.add(Conv2D(1024, (3, 3), padding='same'))
    alexnet.add(BatchNormalization())
    alexnet.add(Activation('relu'))

    # Layer 5
    alexnet.add(ZeroPadding2D((1, 1)))
    alexnet.add(Conv2D(1024, (3, 3), padding='same'))
    alexnet.add(BatchNormalization())
    alexnet.add(Activation('relu'))
    alexnet.add(MaxPooling2D(pool_size=(2, 2)))

    # Layer 6
    alexnet.add(Flatten())
    alexnet.add(Dense(3072))
    alexnet.add(BatchNormalization())
    alexnet.add(Activation('relu'))
    alexnet.add(Dropout(0.5))

    # Layer 7
    alexnet.add(Dense(4096))
    alexnet.add(BatchNormalization())
    alexnet.add(Activation('relu'))
    alexnet.add(Dropout(0.5))

    # Layer 8
    alexnet.add(Dense(n_classes))
    alexnet.add(BatchNormalization())
    alexnet.add(Activation('softmax'))

    if weights is not None:
        alexnet.load_weights(weights)

    return alexnet
def hidden_layers(m, k):
    m.add(ZeroPadding2D(padding=(1, 1)))
    m.add(Convolution2D(k, 3, 3))
    m.add(Activation('relu'))
    
    return m
Exemple #21
0
def discriminator():

    image = Input(shape=(25, 25, 1))

    # block 1: normal 5x5 conv,
    # *NO* batchnorm (recommendation from [arXiv/1511.06434])
    x = Conv2D(32, (5, 5), padding='same')(image)
    #x = Conv2D(32, 5, 5, border_mode='same')(image)
    x = LeakyReLU()(x)
    x = Dropout(0.2)(x)

    # block 2: 'same' bordered 5x5 locally connected block with batchnorm and
    # 2x2 subsampling
    x = ZeroPadding2D((2, 2))(x)
    x = LocallyConnected2D(8, (5, 5), padding='valid', strides=(2, 2))(x)
    #x = LocallyConnected2D(8, 5, 5, border_mode='valid', subsample=(2, 2))(x)
    x = LeakyReLU()(x)
    x = BatchNormalization()(x)
    x = Dropout(0.2)(x)

    # block 2: 'same' bordered 5x5 locally connected block with batchnorm
    x = ZeroPadding2D((2, 2))(x)
    x = LocallyConnected2D(8, (5, 5), padding='valid')(x)
    #x = LocallyConnected2D(8, 5, 5, border_mode='valid')(x)
    x = LeakyReLU()(x)
    x = BatchNormalization()(x)
    x = Dropout(0.2)(x)

    # block 3: 'same' bordered 3x3 locally connected block with batchnorm and
    # 2x2 subsampling
    x = ZeroPadding2D((1, 1))(x)
    #x = LocallyConnected2D(8, 3, 3, border_mode='valid', subsample=(2, 2))(x)
    x = LocallyConnected2D(8, (3, 3), padding='valid', strides=(2, 2))(x)
    x = LeakyReLU()(x)
    x = BatchNormalization()(x)
    x = Dropout(0.2)(x)

    x = AveragePooling2D((2, 2))(x)
    h = Flatten()(x)

    dnn = Model(image, h)

    image = Input(shape=(25, 25, 1))

    dnn_out = dnn(image)

    # nb of features to obtain
    nb_features = 20

    # dim of kernel space
    vspace_dim = 10

    # creates the kernel space for the minibatch discrimination
    K_x = Dense3D(nb_features, vspace_dim)(dnn_out)

    minibatch_featurizer = Lambda(minibatch_discriminator,
                                  output_shape=minibatch_output_shape)

    # concat the minibatch features with the normal ones
    features = Concatenate([minibatch_featurizer(K_x), dnn_out])

    # fake output tracks binary fake / not-fake, and the auxiliary requires
    # reconstruction of latent features, in this case, labels
    fake = Dense(1, activation='sigmoid', name='generation')(features)
    aux = Dense(1, activation='sigmoid', name='auxiliary')(features)

    return Model(inputs=image, outputs=[fake, aux])
def segnet(nClasses,
           optimizer=None,
           input_height=360,
           input_width=480,
           data_format="channels_first"):
    kernel = 3
    filter_size = 64
    pad = 1
    pool_size = 2

    model = Sequential()
    if data_format == "channels_first":
        model.add(Layer(input_shape=(3, input_height, input_width)))
    else:
        model.add(Layer(input_shape=(input_height, input_width, 3)))

    # encoder
    model.add(ZeroPadding2D(padding=(pad, pad), data_format=data_format))
    model.add(
        Convolution2D(filter_size,
                      kernel,
                      kernel,
                      border_mode='valid',
                      data_format=data_format))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(
        MaxPooling2D(pool_size=(pool_size, pool_size),
                     data_format=data_format))

    model.add(ZeroPadding2D(padding=(pad, pad), data_format=data_format))
    model.add(
        Convolution2D(128,
                      kernel,
                      kernel,
                      border_mode='valid',
                      data_format=data_format))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(
        MaxPooling2D(pool_size=(pool_size, pool_size),
                     data_format=data_format))

    model.add(ZeroPadding2D(padding=(pad, pad)))
    model.add(
        Convolution2D(256,
                      kernel,
                      kernel,
                      border_mode='valid',
                      data_format=data_format))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(
        MaxPooling2D(pool_size=(pool_size, pool_size),
                     data_format=data_format))

    model.add(ZeroPadding2D(padding=(pad, pad), data_format=data_format))
    model.add(
        Convolution2D(512,
                      kernel,
                      kernel,
                      border_mode='valid',
                      data_format=data_format))
    model.add(BatchNormalization())
    model.add(Activation('relu'))

    # decoder
    model.add(ZeroPadding2D(padding=(pad, pad), data_format=data_format))
    model.add(
        Convolution2D(512,
                      kernel,
                      kernel,
                      border_mode='valid',
                      data_format=data_format))
    model.add(BatchNormalization())

    model.add(
        UpSampling2D(size=(pool_size, pool_size), data_format=data_format))
    model.add(ZeroPadding2D(padding=(pad, pad), data_format=data_format))
    model.add(
        Convolution2D(256,
                      kernel,
                      kernel,
                      border_mode='valid',
                      data_format=data_format))
    model.add(BatchNormalization())

    model.add(
        UpSampling2D(size=(pool_size, pool_size), data_format=data_format))
    model.add(ZeroPadding2D(padding=(pad, pad), data_format=data_format))
    model.add(
        Convolution2D(128,
                      kernel,
                      kernel,
                      border_mode='valid',
                      data_format=data_format))
    model.add(BatchNormalization())

    model.add(
        UpSampling2D(size=(pool_size, pool_size), data_format=data_format))
    model.add(ZeroPadding2D(padding=(pad, pad), data_format=data_format))
    model.add(
        Convolution2D(filter_size,
                      kernel,
                      kernel,
                      border_mode='valid',
                      data_format=data_format))
    model.add(BatchNormalization())

    model.add(
        Convolution2D(nClasses,
                      1,
                      1,
                      border_mode='valid',
                      data_format=data_format))

    model.outputHeight = model.output_shape[-2]
    model.outputWidth = model.output_shape[-1]

    if data_format == "channels_first":
        input_shape = (nClasses, model.output_shape[-2],
                       model.output_shape[-1])
    else:
        input_shape = (model.output_shape[-2], model.output_shape[-1],
                       nClasses)
    model.add(
        Reshape((nClasses, model.output_shape[-2] * model.output_shape[-1]),
                input_shape=input_shape))

    model.add(Permute((2, 1)))
    model.add(Activation('softmax'))

    if not optimizer is None:
        model.compile(loss="categorical_crossentropy",
                      optimizer=optimizer,
                      metrics=['accuracy'])

    return model
# set the input shape [2,128]
in_shp = list(X_train.shape[1:])
print(X_train.shape, in_shp)  # (110000, 2, 128) [2, 128]
classes = mods

# Build VT-CNN2 Neural Net model using Keras primitives --
#  - Reshape [N,2,128] to [N,1,2,128] on input
#  - Pass through 2 2DConv/ReLu layers
#  - Pass through 2 Dense layers (ReLu and Softmax)
#  - Perform categorical cross entropy optimization

dr = 0.5  # dropout rate (%)
model = models.Sequential()
model.add(Reshape([1] + in_shp, input_shape=in_shp))
model.add(ZeroPadding2D((0, 2), data_format="channels_first"))
model.add(
    Convolution2D(256, (1, 3),
                  activation="relu",
                  name="conv1",
                  data_format="channels_first"))
model.add(Dropout(dr))
model.add(ZeroPadding2D((0, 2), data_format="channels_first"))
model.add(
    Convolution2D(80, (2, 3),
                  activation="relu",
                  name="conv2",
                  data_format="channels_first"))
model.add(Dropout(dr))
model.add(Flatten())
model.add(
def vgg_image_model():
    
    x_input=Input((224,224,3))
    x=ZeroPadding2D((1,1))(x_input)
    x=Convolution2D(64, (3, 3))(x)
    x=BatchNormalization(axis=3)(x)
    x=LeakyReLU()(x)
    
    x=ZeroPadding2D((1,1))(x)
    x=Convolution2D(64,(3,3))(x)
    x=BatchNormalization(axis=3)(x)
    x=LeakyReLU()(x)
    x=MaxPooling2D((2,2), strides=(2,2))(x)
    
    x=ZeroPadding2D((1,1))(x)
    x=Convolution2D(128, (3, 3))(x)
    x=BatchNormalization(axis=3)(x)
    x=LeakyReLU()(x)
    
    x=ZeroPadding2D((1,1))(x)
    x=Convolution2D(128, (3, 3))(x)
    x=BatchNormalization(axis=3)(x)
    x=LeakyReLU()(x)
    x=MaxPooling2D((2,2), strides=(2,2))(x)
    
    x=ZeroPadding2D((1,1))(x)
    x=Convolution2D(256, (3, 3))(x)
    x=BatchNormalization(axis=3)(x)
    x=LeakyReLU()(x)
    
    x=ZeroPadding2D((1,1))(x)
    x=Convolution2D(256,(3,3))(x)
    x=BatchNormalization(axis=3)(x)
    x=LeakyReLU()(x)
   
    x=ZeroPadding2D((1,1))(x)
    x=Convolution2D(256,(3,3))(x)
    x=BatchNormalization(axis=3)(x)
    x=LeakyReLU()(x)
    x=MaxPooling2D((2,2), strides=(2,2))(x)
    
    x=ZeroPadding2D((1,1))(x)
    x=Convolution2D(512, (3, 3))(x)
    x=BatchNormalization(axis=3)(x)
    x=LeakyReLU()(x)
    
    x=ZeroPadding2D((1,1))(x)
    x=Convolution2D(512, (3, 3))(x)
    x=BatchNormalization(axis=3)(x)
    x=LeakyReLU()(x)
    
    x=ZeroPadding2D((1,1))(x)
    x=Convolution2D(512,(3,3))(x)
    x=BatchNormalization(axis=3)(x)
    x=LeakyReLU()(x)
    x=MaxPooling2D((2,2), strides=(2,2))(x)
    x=Dense(1024)(x)

    

    
    

    

    
    model_img=Model(inputs=x_input,outputs=x)

    return model_img
Exemple #25
0
def MusicTaggerCRNN(weights='msd', input_tensor=None, include_top=True):
    '''Instantiate the MusicTaggerCRNN architecture,
    optionally loading weights pre-trained
    on Million Song Dataset. Note that when using TensorFlow,
    for best performance you should set
    `image_dim_ordering="tf"` in your Keras config
    at ~/.keras/keras.json.

    The model and the weights are compatible with both
    TensorFlow and Theano. The dimension ordering
    convention used by the model is the one
    specified in your Keras config file.

    For preparing mel-spectrogram input, see
    `audio_conv_utils.py` in [applications](https://github.com/fchollet/keras/tree/master/keras/applications).
    You will need to install [Librosa](http://librosa.github.io/librosa/)
    to use it.

    # Arguments
        weights: one of `None` (random initialization)
            or "msd" (pre-training on ImageNet).
        input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
            to use as image input for the model.
        include_top: whether to include the 1 fully-connected
            layer (output layer) at the top of the network.
            If False, the network outputs 32-dim features.


    # Returns
        A Keras model instance.
    '''
    if weights not in {'msd', None}:
        raise ValueError('The `weights` argument should be either '
                         '`None` (random initialization) or `msd` '
                         '(pre-training on Million Song Dataset).')

    # Determine proper input shape
    if K.image_dim_ordering() == 'th':
        input_shape = (1, 96, 1366)
    else:
        input_shape = (96, 1366, 1)

    if input_tensor is None:
        melgram_input = Input(shape=input_shape)
    else:
        if not K.is_keras_tensor(input_tensor):
            melgram_input = Input(tensor=input_tensor, shape=input_shape)
        else:
            melgram_input = input_tensor

    # Determine input axis
    if K.image_dim_ordering() == 'th':
        channel_axis = 1
        freq_axis = 2
        time_axis = 3
    else:
        channel_axis = 3
        freq_axis = 1
        time_axis = 2

    # Input block
    x = ZeroPadding2D(padding=(0, 37))(melgram_input)
    x = BatchNormalization(axis=freq_axis, name='bn_0_freq')(x)

    # Conv block 1
    x = Convolution2D(64, 3, 3, border_mode='same', name='conv1')(x)
    x = BatchNormalization(axis=channel_axis, mode=0, name='bn1')(x)
    x = ELU()(x)
    x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name='pool1')(x)
    x = Dropout(0.1, name='dropout1')(x)

    # Conv block 2
    x = Convolution2D(128, 3, 3, border_mode='same', name='conv2')(x)
    x = BatchNormalization(axis=channel_axis, mode=0, name='bn2')(x)
    x = ELU()(x)
    x = MaxPooling2D(pool_size=(3, 3), strides=(3, 3), name='pool2')(x)
    x = Dropout(0.1, name='dropout2')(x)

    # Conv block 3
    x = Convolution2D(128, 3, 3, border_mode='same', name='conv3')(x)
    x = BatchNormalization(axis=channel_axis, mode=0, name='bn3')(x)
    x = ELU()(x)
    x = MaxPooling2D(pool_size=(4, 4), strides=(4, 4), name='pool3')(x)
    x = Dropout(0.1, name='dropout3')(x)

    # Conv block 4
    x = Convolution2D(128, 3, 3, border_mode='same', name='conv4')(x)
    x = BatchNormalization(axis=channel_axis, mode=0, name='bn4')(x)
    x = ELU()(x)
    x = MaxPooling2D(pool_size=(4, 4), strides=(4, 4), name='pool4')(x)
    x = Dropout(0.1, name='dropout4')(x)

    # reshaping
    if K.image_dim_ordering() == 'th':
        x = Permute((3, 1, 2))(x)
    x = Reshape((15, 128))(x)

    # GRU block 1, 2, output
    x = GRU(32, return_sequences=True, name='gru1')(x)
    x = GRU(32, return_sequences=False, name='gru2')(x)
    x = Dropout(0.3)(x)
    if include_top:
        x = Dense(50, activation='sigmoid', name='output')(x)

    # Create model
    model = Model(melgram_input, x)
    if weights is None:
        return model
    else:
        # Load input
        if K.image_dim_ordering() == 'tf':
            raise RuntimeError("Please set image_dim_ordering == 'th'."
                               "You can set it at ~/.keras/keras.json")

        model.load_weights('data/music_tagger_crnn_weights_%s.h5' % K._BACKEND,
                           by_name=True)
        return model
Exemple #26
0
def bcrnn(input, nclass):
    m = Conv2D(64,
               kernel_size=(3, 3),
               activation='relu',
               padding='same',
               name='conv1')(input)
    m = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name='pool1')(m)
    m = Conv2D(128,
               kernel_size=(3, 3),
               activation='relu',
               padding='same',
               name='conv2')(m)
    m = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name='pool2')(m)
    m = Conv2D(256,
               kernel_size=(3, 3),
               activation='relu',
               padding='same',
               name='conv3')(m)
    m = Conv2D(256,
               kernel_size=(3, 3),
               activation='relu',
               padding='same',
               name='conv4')(m)

    m = ZeroPadding2D(padding=(0, 1))(m)
    m = MaxPooling2D(pool_size=(2, 2),
                     strides=(2, 1),
                     padding='valid',
                     name='pool3')(m)

    m = Conv2D(512,
               kernel_size=(3, 3),
               activation='relu',
               padding='valid',
               name='conv5')(m)
    m = BatchNormalization(axis=1)(m)
    m = Conv2D(512,
               kernel_size=(3, 3),
               activation='relu',
               padding='valid',
               name='conv6')(m)
    m = BatchNormalization(axis=1)(m)
    m = ZeroPadding2D(padding=(0, 1))(m)
    m = MaxPooling2D(pool_size=(2, 2),
                     strides=(2, 1),
                     padding='valid',
                     name='pool4')(m)
    m = Conv2D(512,
               kernel_size=(2, 2),
               activation='relu',
               padding='valid',
               name='conv7')(m)

    m = Permute((2, 1, 3), name='permute')(m)
    m = TimeDistributed(Flatten(), name='flatten')(m)

    m = Bidirectional(LSTM(rnnunit, return_sequences=True), name='blstm1-1')(m)
    m = Dense(rnnunit, name='blstm1_out-1', activation='linear')(m)
    m = Bidirectional(LSTM(rnnunit, return_sequences=True), name='blstm2-1')(m)
    y_pred = Dense(nclass, name='blstm2_out-2', activation='softmax')(m)

    return y_pred
def resnet152_model(img_rows, img_cols, color_type=1, num_classes=None):
    """
    Resnet 152 Model for Keras
    Model Schema and layer naming follow that of the original Caffe implementation
    https://github.com/KaimingHe/deep-residual-networks
    ImageNet Pretrained Weights 
    Theano: https://drive.google.com/file/d/0Byy2AcGyEVxfZHhUT3lWVWxRN28/view?usp=sharing
    TensorFlow: https://drive.google.com/file/d/0Byy2AcGyEVxfeXExMzNNOHpEODg/view?usp=sharing
    Parameters:
      img_rows, img_cols - resolution of inputs
      channel - 1 for grayscale, 3 for color 
      num_classes - number of class labels for our classification task
    """
    eps = 1.1e-5

    # Handle Dimension Ordering for different backends
    global bn_axis
    if K.image_dim_ordering() == 'tf':
        bn_axis = 3
        img_input = Input(shape=(img_rows, img_cols, color_type), name='data')
    else:
        bn_axis = 1
        img_input = Input(shape=(color_type, img_rows, img_cols), name='data')

    x = ZeroPadding2D((3, 3), name='conv1_zeropadding')(img_input)
    x = Convolution2D(64, 7, 7, subsample=(2, 2), name='conv1', bias=False)(x)
    x = BatchNormalization(epsilon=eps, axis=bn_axis, name='bn_conv1')(x)
    x = Scale(axis=bn_axis, name='scale_conv1')(x)
    x = Activation('relu', name='conv1_relu')(x)
    x = MaxPooling2D((3, 3), strides=(2, 2), name='pool1')(x)

    x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1))
    x = identity_block(x, 3, [64, 64, 256], stage=2, block='b')
    x = identity_block(x, 3, [64, 64, 256], stage=2, block='c')

    x = conv_block(x, 3, [128, 128, 512], stage=3, block='a')
    for i in range(1, 8):
        x = identity_block(x, 3, [128, 128, 512], stage=3, block='b' + str(i))

    x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a')
    for i in range(1, 36):
        x = identity_block(x, 3, [256, 256, 1024], stage=4, block='b' + str(i))

    x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a')
    x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b')
    x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c')

    x_fc = AveragePooling2D((7, 7), name='avg_pool')(x)
    x_fc = Flatten()(x_fc)
    x_fc = Dense(1000, activation='softmax', name='fc1000')(x_fc)

    model = Model(img_input, x_fc)
    weights_path = resnet152_weights
    #    if K.image_dim_ordering() == 'th':
    #      # Use pre-trained weights for Theano backend
    #      weights_path = 'imagenet_models/resnet152_weights_th.h5'
    #    else:
    #      # Use pre-trained weights for Tensorflow backend
    #      weights_path = 'imagenet_models/resnet152_weights_tf.h5'

    model.load_weights(weights_path, by_name=True)

    # Truncate and replace softmax layer for transfer learning
    # Cannot use model.layers.pop() since model is not of Sequential() type
    # The method below works since pre-trained weights are stored in layers but not in the model
    x_newfc = AveragePooling2D((7, 7), name='avg_pool')(x)
    x_newfc = Flatten()(x_newfc)
    x_newfc = Dense(num_classes, activation='softmax', name='fc8')(x_newfc)

    model = Model(img_input, x_newfc)

    # Learning rate is changed to 0.001
    #    sgd = SGD(lr=1e-3, decay=1e-6, momentum=0.9, nesterov=True)
    #    model.compile(optimizer=sgd, loss='categorical_crossentropy', metrics=['accuracy'])

    return model
Exemple #28
0
def get_model(n_outputs=1000, input_size=256):
    '''Builds a Deep Convolutional Neural Network of architecture VGG-Net as described in
       paper http://arxiv.org/pdf/1409.1556.pdf and adapted with batch_norm and dropout regularization
       Returns the model ready for compilation and training or predictions
       we have commented out dropout in between the conv layers because it was not needed for our use cases. However if
       you find that your models overfit you can choose to uncomment and add them. Back into your architecture.
    '''
    conv = Sequential()

    conv.add(
        Convolution2D(64,
                      3,
                      3,
                      activation='relu',
                      input_shape=(3, input_size, input_size)))
    conv.add(ZeroPadding2D((1, 1)))
    conv.add(Convolution2D(64, 3, 3, activation='relu'))
    conv.add(MaxPooling2D((2, 2), strides=(2, 2)))
    conv.add(BatchNormalization())
    #conv.add(Dropout(0.5))

    conv.add(ZeroPadding2D((1, 1)))
    conv.add(Convolution2D(128, 3, 3, activation='relu'))
    conv.add(ZeroPadding2D((1, 1)))
    conv.add(Convolution2D(128, 3, 3, activation='relu'))
    conv.add(MaxPooling2D((2, 2), strides=(2, 2)))
    conv.add(BatchNormalization())
    #conv.add(Dropout(0.5))

    conv.add(ZeroPadding2D((1, 1)))
    conv.add(Convolution2D(256, 3, 3, activation='relu'))
    conv.add(ZeroPadding2D((1, 1)))
    conv.add(Convolution2D(256, 3, 3, activation='relu'))
    conv.add(ZeroPadding2D((1, 1)))
    conv.add(Convolution2D(256, 3, 3, activation='relu'))
    conv.add(MaxPooling2D((2, 2), strides=(2, 2)))
    conv.add(BatchNormalization())
    #conv.add(Dropout(0.5))

    conv.add(ZeroPadding2D((1, 1)))
    conv.add(Convolution2D(512, 3, 3, activation='relu'))
    conv.add(ZeroPadding2D((1, 1)))
    conv.add(Convolution2D(512, 3, 3, activation='relu'))
    conv.add(ZeroPadding2D((1, 1)))
    conv.add(Convolution2D(512, 3, 3, activation='relu'))
    conv.add(MaxPooling2D((2, 2), strides=(2, 2)))
    conv.add(BatchNormalization())
    #conv.add(Dropout(0.5))

    conv.add(ZeroPadding2D((1, 1)))
    conv.add(Convolution2D(512, 3, 3, activation='relu'))
    conv.add(ZeroPadding2D((1, 1)))
    conv.add(Convolution2D(512, 3, 3, activation='relu'))
    conv.add(ZeroPadding2D((1, 1)))
    conv.add(Convolution2D(512, 3, 3, activation='relu'))
    conv.add(MaxPooling2D((2, 2), strides=(2, 2)))
    conv.add(BatchNormalization())

    conv.add(Flatten())
    conv.add(Dropout(0.5))
    conv.add(Dense(4096))
    conv.add(BatchNormalization())
    conv.add(Dropout(0.5))
    conv.add(Dense(4096))
    conv.add(BatchNormalization())
    conv.add(Dropout(0.5))
    conv.add(Dense(n_outputs))
    conv.add(Activation('softmax'))
    print(conv.summary())
    return conv
Exemple #29
0
def VGG_16(weights_path=None):
    model = Sequential()
    model.add(ZeroPadding2D((1, 1), input_shape=(3, 224, 224)))
    model.add(Convolution2D(64, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(64, 3, 3, activation='relu'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(128, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(128, 3, 3, activation='relu'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(256, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(256, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(256, 3, 3, activation='relu'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(Flatten())
    model.add(Dense(4096, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(4096, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(1000, activation='softmax'))

    if weights_path:
        model.load_weights(weights_path)

    return model
Exemple #30
0
def VGG_16():
    '''
    Fully trainable VGG19 model
    '''

    model = Sequential()

    model.add(ZeroPadding2D((1, 1), input_shape=(64, 64, 3)))
    model.add(Convolution2D(64, 3, 3, activation='relu', name='conv1_1'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(64, 3, 3, activation='relu', name='conv1_2'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(128, 3, 3, activation='relu', name='conv2_1'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(128, 3, 3, activation='relu', name='conv2_2'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(256, 3, 3, activation='relu', name='conv3_1'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(256, 3, 3, activation='relu', name='conv3_2'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(256, 3, 3, activation='relu', name='conv3_3'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(256, 3, 3, activation='relu', name='conv3_4'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu', name='conv4_1'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu', name='conv4_2'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu', name='conv4_3'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu', name='conv4_4'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(Flatten())
    model.add(Dense(512, activation='relu', name='dense_2'))
    model.add(Dropout(0.5))
    model.add(Dense(200, name='dense_3'))
    model.add(Activation("softmax"))

    return model