def __init__(self):
     reg = tf.contrib.layers.l2_regularizer(1e-3)
     model = Sequential()
     model.add(Lambda(lambda x: x / 255.0 - 0.5, input_shape=(114, 52, 3)))
     model.add(
         Conv2D(filters=24,
                kernel_size=5,
                strides=1,
                padding='same',
                activation='relu',
                kernel_regularizer=reg))
     model.add(
         Conv2D(filters=36,
                kernel_size=5,
                strides=1,
                padding='same',
                activation='relu',
                kernel_regularizer=reg))
     model.add(
         Conv2D(filters=64,
                kernel_size=3,
                strides=1,
                padding='same',
                activation='relu',
                kernel_regularizer=reg))
     model.add(Flatten())
     model.add(Dense(50))
     model.add(Dense(10))
     model.add(Dense(6, activation='softmax', name=LOGITS))
     model.load_weights('./light_classification/weights.h5')
     self.model = model
def create_VGG16(num_fc_neurons,
                 dropout_rate,
                 num_classes=24,
                 top_model_weights_path=None,
                 img_height=224,
                 img_width=224,
                 include_loc='all',
                 activation='softmax'):
    # load pre-trained convolutional model
    base_model = VGG16(weights='imagenet',
                       include_top=False,
                       input_shape=get_input_shape(img_height, img_width))

    # build a classifier model to put on top of the convolutional model
    top_model = Sequential()
    top_model.add(Flatten(input_shape=base_model.output_shape[1:]))
    for i in range(6, 8):
        top_model.add(Dense(num_fc_neurons, name='fc' + str(i)))
        #top_model.add(BatchNormalization(axis=1, name='fc'+str(i)+'_bn'))
        top_model.add(Activation('relu', name='fc' + str(i) + '_ac'))
        top_model.add(Dropout(dropout_rate))
    top_model.add(Dense(num_classes, activation=activation,
                        name='predictions'))
    if top_model_weights_path != None:
        top_model.load_weights(top_model_weights_path)

    if include_loc == 'base':
        model = base_model
    elif include_loc == 'top':
        model = top_model
    elif include_loc == 'all':  # add the model on top of the convolutional base
        model = Model(inputs=base_model.input,
                      outputs=top_model(base_model.output))
    else:
        raise ValueError('Only "base", "top" and "all" can be included.')
    return model
Exemple #3
0
def make_vgg16(top=True, weights='imagenet', weight_path=''):
    """
    Args: top determines whether to include dense layers at the top. Weights determines
    whether to use imagenet weights or pre-trained weights, in which case the filepath
    must be specified via weight_path.
    
    Creates a convolutional neural network following the VGG16 structure. There are two options:
    the original structure with fully connected layers at the end, or a slimmed down model where the 
    FC layers are replaced by a global average pooling layer. The latter has far fewer weights.
    
    Returns the model.
    """
    model = Sequential()
    model.add(ZeroPadding2D((1, 1), input_shape=(224, 224, 3)))
    model.add(Convolution2D(64, (3, 3), activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(64, (3, 3), activation='relu'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(128, (3, 3), activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(128, (3, 3), activation='relu'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(256, (3, 3), activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(256, (3, 3), activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(256, (3, 3), activation='relu'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, (3, 3), activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, (3, 3), activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, (3, 3), activation='relu'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, (3, 3), activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, (3, 3), activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, (3, 3), activation='relu'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    if weights == 'imagenet':
        model.add(Flatten())
        model.add(Dense(4096, activation='relu'))
        model.add(Dropout(0.5))
        model.add(Dense(4096, activation='relu'))
        model.add(Dropout(0.5))
        model.add(Dense(1000, activation='softmax'))
        model.load_weights(r'D:/drivers/vgg16_weights.h5')
        if top == False:
            for i in range(7):
                model.layers.pop()
            model.outputs = [model.layers[-1].output]
            model.layers[-1].outbound_nodes = []
            model.add(Convolution2D(1024, (3, 3), activation='relu'))
            model.add(AveragePooling2D((14, 14), padding='same'))
            model.add(Flatten())
            model.add(Dense(10, activation='softmax'))
        else:
            model.layers.pop()
            model.outputs = [model.layers[-1].output]
            model.layers[-1].outbound_nodes = []
            model.add(Dense(10, activation='softmax'))
    elif weights == 'trained':
        model.add(Flatten())
        model.add(Dense(4096, activation='relu'))
        model.add(Dropout(0.5))
        model.add(Dense(4096, activation='relu'))
        model.add(Dropout(0.5))
        model.add(Dense(10, activation='softmax'))
        if top == False:
            for i in range(7):
                model.layers.pop()
            model.outputs = [model.layers[-1].output]
            model.layers[-1].outbound_nodes = []
            model.add(Convolution2D(1024, (3, 3), activation='relu'))
            model.add(AveragePooling2D((14, 14), padding='same'))
            model.add(Flatten())
            model.add(Dense(10, activation='softmax'))
        model.load_weights(weight_path)
    sgd = optimizers.SGD(lr=0.001, decay=1e-6, momentum=0.9, nesterov=True)
    model.compile(optimizer=sgd,
                  loss='sparse_categorical_crossentropy',
                  metrics=['accuracy'])
    return model
"""
If you need to load weights into a *different* architecture (with some layers in common), for instance for fine-tuning or transfer-learning, you can load weights by *layer name*:
"""
model_yaml.load_weights('to_delete_weights.h5', by_name=True)

# make sure they share the same weights: total 202 parameters
(model_json.get_weights()[0] == model_yaml.get_weights()[0]).sum()
(model_json.get_weights()[1] == model_yaml.get_weights()[1]).sum()
"""
For example
Assume original model looks like this:
"""
model1 = Sequential()
model1.add(Dense(2, input_dim=3, name='dense_1'))
model1.add(Dense(3, name='dense_2'))
model1.save_weights("weights1.h5")

# check out the weights
model1.get_weights()

# new model
model2 = Sequential()
model2.add(Dense(2, input_dim=3, name='dense_1'))  # will be loaded
model2.add(Dense(10, name='new_dense'))  # will not be loaded

# load weights from first model; will only affect the first layer, dense_1.
model2.load_weights("weights1.h5", by_name=True)

# check out the weights
model2.get_weights()