示例#1
0
文件: models.py 项目: marhs/tfm
    def get_base(self):
        """Gets base architecture of Resnet 50 Model"""
        input_shape = (3,) + self.size
        img_input = Input(shape=input_shape)
        bn_axis = 1

        x = Lambda(preprocess)(img_input)
        x = ZeroPadding2D((3, 3))(x)
        x = Convolution2D(64, 7, 7, subsample=(2, 2), name='conv1')(x)
        x = BatchNormalization(axis=bn_axis, name='bn_conv1')(x)
        x = Activation('relu')(x)
        x = MaxPooling2D((3, 3), strides=(2, 2))(x)

        x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1))
        x = identity_block(x, 3, [64, 64, 256], stage=2, block='b')
        x = identity_block(x, 3, [64, 64, 256], stage=2, block='c')

        x = conv_block(x, 3, [128, 128, 512], stage=3, block='a')

        for n in ['b','c','d']:
            x = identity_block(x, 3, [128, 128, 512], stage=3, block=n)

        x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a')

        for n in ['b','c','d', 'e', 'f']:
            x = identity_block(x, 3, [256, 256, 1024], stage=4, block=n)

        x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a')
        x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b')
        x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c')

        self.img_input = img_input
        self.model = Model(self.img_input, x)
        convert_all_kernels_in_model(self.model)
        self.model.load_weights(self.weights_file)
    def get_base(self):
        """Gets base architecture of Resnet 50 Model"""
        input_shape = (3, ) + self.size
        img_input = Input(shape=input_shape)
        bn_axis = 1

        x = Lambda(preprocess)(img_input)
        x = ZeroPadding2D((3, 3))(x)
        x = Convolution2D(64, 7, 7, subsample=(2, 2), name='conv1')(x)
        x = BatchNormalization(axis=bn_axis, name='bn_conv1')(x)
        x = Activation('relu')(x)
        x = MaxPooling2D((3, 3), strides=(2, 2))(x)

        x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1))
        x = identity_block(x, 3, [64, 64, 256], stage=2, block='b')
        x = identity_block(x, 3, [64, 64, 256], stage=2, block='c')

        x = conv_block(x, 3, [128, 128, 512], stage=3, block='a')

        for n in ['b', 'c', 'd']:
            x = identity_block(x, 3, [128, 128, 512], stage=3, block=n)

        x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a')

        for n in ['b', 'c', 'd', 'e', 'f']:
            x = identity_block(x, 3, [256, 256, 1024], stage=4, block=n)

        x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a')
        x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b')
        x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c')

        self.img_input = img_input
        self.model = Model(self.img_input, x)
        convert_all_kernels_in_model(self.model)
        self.model.load_weights(self.weights_file)
    def __init__(self, outputs, input_shape, lr=0.001, decay=0, dropout=0):
        img_input = Input(shape=input_shape)

        x = conv_block(img_input,
                       3, [32, 32, 128],
                       stage=1,
                       block="a",
                       strides=(1, 1))
        x = identity_block(x, 3, [32, 32, 128], stage=1, block="b")
        x = identity_block(x, 3, [32, 32, 128], stage=1, block="c")

        x = conv_block(x, 3, [64, 64, 256], stage=2, block="a")
        x = identity_block(x, 3, [64, 64, 256], stage=2, block="b")
        x = identity_block(x, 3, [64, 64, 256], stage=2, block="c")

        x = conv_block(x, 3, [128, 128, 512], stage=3, block="a")
        x = identity_block(x, 3, [128, 128, 512], stage=3, block="b")
        x = identity_block(x, 3, [128, 128, 512], stage=3, block="c")

        x = conv_block(x, 3, [256, 256, 1024], stage=4, block="a")
        x = identity_block(x, 3, [256, 256, 1024], stage=4, block="b")
        x = identity_block(x, 3, [256, 256, 1024], stage=4, block="c")

        x = GlobalAveragePooling2D(name="global_avg_pool")(x)
        predictions = Dense(outputs, activation="softmax")(x)

        self.model = Model(inputs=img_input, outputs=predictions)
        super().__init__(lr, decay)
def keras_model(image_size_x, image_size_y, image_channel, action_space):
    input_shape = (image_size_x, image_size_y, image_channel)
    img_input = Input(shape=input_shape)
    bn_axis = 3
    x = ZeroPadding2D((3, 3))(img_input)
    x = Convolution2D(8, 7, 7, subsample=(2, 2), name='conv1')(x)
    x = BatchNormalization(axis=bn_axis, name='bn_conv1')(x)
    x = Activation('relu')(x)
    x = MaxPooling2D((3, 3), strides=(2, 2))(x)
    x = conv_block(x, 3, [8, 8, 16], stage=2, block='a', strides=(1, 1))
    x = indentity_block(x, 3, [8, 8, 16], stage=2, block='b')
    x = indentity_block(x, 3, [8, 8, 16], stage=2, block='c')
    x = conv_block(x, 3, [16, 16, 32], stage=3, block='a')
    x = indentity_block(x, 3, [16, 16, 32], stage=3, block='b')
    x = indentity_block(x, 3, [16, 16, 32], stage=3, block='c')
    x = indentity_block(x, 3, [16, 16, 32], stage=3, block='d')
    x = conv_block(x, 3, [32, 32, 64], stage=4, block='a')
    x = indentity_block(x, 3, [32, 32, 64], stage=4, block='b')
    x = indentity_block(x, 3, [32, 32, 64], stage=4, block='c')
    x = indentity_block(x, 3, [32, 32, 64], stage=4, block='d')
    x = indentity_block(x, 3, [32, 32, 64], stage=4, block='e')
    x = indentity_block(x, 3, [32, 32, 64], stage=4, block='f')
    x = conv_block(x, 3, [64, 64, 128], stage=5, block='a')
    x = indentity_block(x, 3, [64, 64, 128], stage=5, block='b')
    x = indentity_block(x, 3, [64, 64, 128], stage=5, block='c')
    x = conv_block(x, 3, [64, 64, 256], stage=6, block='a')
    x = indentity_block(x, 3, [64, 64, 256], stage=6, block='b')
    x = indentity_block(x, 3, [64, 64, 256], stage=6, block='c')
    x = GlobalAveragePooling2D()(x)
    x = Dense(200, name='fc_feature')(x)
    x = PReLU()(x)
    x = Dense(action_space, activation='softmax', name='fc_action')(x)
    model = model(img_input, x)
    return model
示例#5
0
def resnet50_block(img_input):

    x = Conv2D(64, (7, 7), strides=(2, 2), padding='same',
               name='conv1')(img_input)
    x = BatchNormalization(axis=bn_axis, name='bn_conv1')(x)
    x = Activation('relu')(x)
    x = MaxPooling2D((3, 3), strides=(2, 2))(x)

    x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1))
    x = identity_block(x, 3, [64, 64, 256], stage=2, block='b')
    x = identity_block(x, 3, [64, 64, 256], stage=2, block='c')

    x = conv_block(x, 3, [128, 128, 512], stage=3, block='a')
    x = identity_block(x, 3, [128, 128, 512], stage=3, block='b')
    x = identity_block(x, 3, [128, 128, 512], stage=3, block='c')
    x = identity_block(x, 3, [128, 128, 512], stage=3, block='d')

    x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a')
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='b')
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='c')
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='d')
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='e')
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='f')

    return x
def create_resnet_model(num_classes, shape_input_nn):
    # @input: num of classes of the new final softmax layer, num layers to freeze
    # @output: Resnet final model with new softmax layer at the end

    #creating Resnet network
    img_input = Input(shape=shape_input_nn)
    bn_axis = 3
    x = ZeroPadding2D(padding=(3, 3), name='conv1_pad')(img_input)
    x = Conv2D(64, (7, 7), strides=(2, 2), padding='valid', name='conv1')(x)
    x = BatchNormalization(axis=bn_axis, name='bn_conv1')(x)
    x = Activation('relu')(x)
    x = MaxPooling2D((3, 3), strides=(2, 2))(x)

    x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1))
    x = identity_block(x, 3, [64, 64, 256], stage=2, block='b')
    x = identity_block(x, 3, [64, 64, 256], stage=2, block='c')

    x = conv_block(x, 3, [128, 128, 512], stage=3, block='a')
    x = identity_block(x, 3, [128, 128, 512], stage=3, block='b')
    x = identity_block(x, 3, [128, 128, 512], stage=3, block='c')
    x = identity_block(x, 3, [128, 128, 512], stage=3, block='d')

    x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a')
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='b')
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='c')
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='d')
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='e')
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='f')

    x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a')
    x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b')
    x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c')

    x_fc = AveragePooling2D((7, 7), name='avg_pool')(x)
    x_fc = Flatten()(x_fc)
    x_fc = Dense(1000, activation='softmax', name='fcnew')(x_fc)

    resnet_model = Model(img_input, x_fc)

    # load weights
    resnet_model.load_weights(WEIGHTS_PATH)

    #creating new last softmax layer
    x_new_fc = AveragePooling2D((7, 7), name='avg_pool')(x)
    x_new_fc = Flatten()(x_new_fc)
    x_new_fc = Dense(num_classes, activation='softmax', name='fcnew')(x_new_fc)

    #creating the new model
    resnet_model = Model(img_input, x_new_fc)

    return resnet_model
示例#7
0
    def center_inference_model(self, trained_model=None):
        print('Building center inference model')

        resolution = cfg.PREDICT_RESOLUTION
        grid_shape = cfg.GRID_SHAPE
        state_dims = cfg.STATE_DIMS

        ########################################################################
        # Input
        ########################################################################
        input_img = Input(batch_shape=(1, resolution[0], resolution[1],
                                       resolution[2]),
                          name='input_img')
        input_lyo = Input(batch_shape=(1, resolution[0], resolution[1],
                                       resolution[2]),
                          name='input_layout')

        # Merge
        inputs = keras.layers.concatenate([input_img, input_lyo], axis=-1)

        x = Conv2D(64, (7, 7), strides=(2, 2), name='conv1')(inputs)
        x = BatchNormalization(axis=3, name='bn_conv1')(x)
        x = Activation('relu')(x)
        x = MaxPooling2D((3, 3), strides=(2, 2))(x)
        x = conv_block(x, 3, [64, 64, 128], stage=2, block='a')
        x = conv_block(x, 3, [64, 64, 128], stage=3, block='a')
        x = conv_block(x, 3, [128, 128, 512], stage=4, block='a')

        feat = x

        # center branch
        cen_hidden = Conv2D(64, (3, 3),
                            dilation_rate=2,
                            padding='same',
                            activation='relu')(feat)
        cen_hidden = Conv2D(1, (3, 3), dilation_rate=2,
                            padding='same')(cen_hidden)
        cen_hidden = Reshape((self.cen_dims, ))(cen_hidden)
        cen_output = Activation('softmax', name='output_cen')(cen_hidden)

        ########################################################################
        # Compile inference model
        ########################################################################
        inference_model = Model(inputs=[input_img, input_lyo],
                                outputs=[feat, cen_output])

        # print(inference_model.summary())
        if trained_model is not None:
            inference_model.set_weights(
                self.get_center_branch_weights(trained_model))
        return inference_model
示例#8
0
def EnhancedHybridResSppNet(class_num, enhanced_class_num):
    _input = Input(shape=(None, None, 3))
    model = _input
    model = ZeroPadding2D((3, 3))(model)
    model = Conv2D(64, (7, 7), strides=(2, 2))(model)
    model = BatchNormalization(axis=3)(model)
    model = Activation('relu')(model)
    model = MaxPooling2D((3, 3), strides=(2, 2))(model)

    model = conv_block(model,
                       3, [64, 64, 256],
                       stage=2,
                       block='a',
                       strides=(1, 1))
    model = identity_block(model, 3, [64, 64, 256], stage=2, block='b')
    model = identity_block(model, 3, [64, 64, 256], stage=2, block='c')

    model = conv_block(model, 3, [128, 128, 512], stage=3, block='a')
    model = identity_block(model, 3, [128, 128, 512], stage=3, block='b')
    model = identity_block(model, 3, [128, 128, 512], stage=3, block='c')
    model = identity_block(model, 3, [128, 128, 512], stage=3, block='d')

    model = MaxPooling2D((2, 2))(model)

    model = SpatialPyramidPooling([1, 2, 4])(model)

    model1 = Dense(units=class_num)(model)
    model1 = Activation(activation="softmax")(model1)
    model1 = Model(_input, model1)
    model1.compile(loss="categorical_crossentropy",
                   optimizer=RMSprop(lr=1e-4, decay=1e-6),
                   metrics=['accuracy'])

    model2 = Dense(units=enhanced_class_num)(model)
    model2 = Activation(activation="softmax")(model2)
    model2 = Model(_input, model2)
    model2.compile(loss="categorical_crossentropy",
                   optimizer=RMSprop(lr=1e-4, decay=1e-6),
                   metrics=['accuracy'])

    input2 = Input(shape=(100, ))

    model3 = Concatenate((input2, model))
    model3 = Dense(units=class_num)(model3)
    model3 = Activation(activation="softmax")(model3)
    model3 = Model(inputs=[_input, input2], outputs=model3)
    model3.compile(loss="categorical_crossentropy",
                   optimizer=RMSprop(lr=1e-4, decay=1e-6),
                   metrics=['accuracy'])

    return model1, model2, model3
示例#9
0
def ResB():
    img_input = Input(shape=(32, 32, 3))
    x = Conv2D(16, 3, padding='same', name='conv1')(img_input)
    x = BatchNormalization(name='bn_conv1')(x)
    x = Activation('relu')(x)
    x = conv_block(x, 3, [16, 16, 64], stage=2, block='a', strides=(1, 1))
    x = identity_block(x, 3, [16, 16, 64], stage=2, block='b')
    x = conv_block(x, 3, [32, 32, 128], stage=3, block='a')
    x = identity_block(x, 3, [32, 32, 128], stage=3, block='b')
    x = conv_block(x, 3, [64, 64, 256], stage=4, block='a')
    x = identity_block(x, 3, [64, 64, 256], stage=4, block='b')
    x = GlobalAveragePooling2D(name='avg_pool')(x)
    x = Dense(10, activation='softmax', name='fc')(x)
    return Model(img_input, x, name='resnet')
def model_build(resnet_model, stage=3, fc_drop_rate=0.2):
    if stage >= 1:
        x = ZeroPadding2D((3, 3))(resnet_model.input)
        x = Conv2D(64, (7, 7), strides=(2, 2), name='conv1')(x)
        x = BatchNormalization(axis=3, name='bn_conv1')(x)
        x = Activation('relu')(x)
        x = MaxPooling2D((3, 3), strides=(2, 2))(x)
    if stage >= 2:
        x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1))
        x = identity_block(x, 3, [64, 64, 256], stage=2, block='b')
        x = identity_block(x, 3, [64, 64, 256], stage=2, block='c')
    if stage >= 3:
        x = conv_block(x, 3, [128, 128, 512], stage=3, block='a')
        x = identity_block(x, 3, [128, 128, 512], stage=3, block='b')
        x = identity_block(x, 3, [128, 128, 512], stage=3, block='c')
        x = identity_block(x, 3, [128, 128, 512], stage=3, block='d')
    if stage >= 4:
        x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a')
        x = identity_block(x, 3, [256, 256, 1024], stage=4, block='b')
        x = identity_block(x, 3, [256, 256, 1024], stage=4, block='c')
        x = identity_block(x, 3, [256, 256, 1024], stage=4, block='d')
        x = identity_block(x, 3, [256, 256, 1024], stage=4, block='e')
        x = identity_block(x, 3, [256, 256, 1024], stage=4, block='f')
    if stage >= 5:
        x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a')
        x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b')
        x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c')
    ## flatten and output layers
    x = AveragePooling2D((7, 7), name='avg_pool')(x)
    x = Flatten()(x)

    x = Dense(128, name='dense1')(x)
    x = BatchNormalization(axis=-1, name='dense1_bn')(x)
    x = Activation('relu', name='dense1_activation')(x)
    x = Dropout(fc_drop_rate, name='d1_drop')(x)

    x = Dense(32, name='dense2')(x)
    x = BatchNormalization(axis=-1, name='dense2_bn')(x)
    x = Activation('relu', name='dense2_activation')(x)
    x = Dropout(fc_drop_rate, name='d2_drop')(x)

    out = Dense(1, activation="sigmoid", name="output")(x)
    model = Model(inputs=[feature_in], outputs=[out])
    for layer in resnet_model.layers:
        layer.trainable = False
    model.summary()
    model = Model(inputs=[resnet_model.input], outputs=[out])

    return model
示例#11
0
def resnet_mvcnn(target_size, num_images, num_classes):
    # this is the Network to be share amongst the views
    img_input = Input(shape=target_size + (3, ))
    x = Conv2D(64, (7, 7), strides=(2, 2), padding='same',
               name='conv1')(img_input)
    #     x = BatchNormalization(axis=bn_axis, name='bn_conv1')(x)
    x = Activation('relu')(x)
    x = MaxPooling2D((3, 3), strides=(2, 2))(x)

    x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1))
    x = identity_block(x, 3, [64, 64, 256], stage=2, block='b')
    x = identity_block(x, 3, [64, 64, 256], stage=2, block='c')

    x = conv_block(x, 3, [128, 128, 512], stage=3, block='a')
    x = identity_block(x, 3, [128, 128, 512], stage=3, block='b')
    x = identity_block(x, 3, [128, 128, 512], stage=3, block='c')
    x = identity_block(x, 3, [128, 128, 512], stage=3, block='d')

    x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a')
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='b')
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='c')
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='d')
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='e')
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='f')

    x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a')
    x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b')
    x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c')

    x = AveragePooling2D((7, 7), name='avg_pool')(x)
    outp = Flatten()(x)

    shared_resnet = Model(img_input, outp)

    # one input per image
    inputs = [Input(shape=target_size + (3, )) for _ in range(num_images)]
    # encode through the shared network
    encodeds = [shared_resnet(inputs[idx]) for idx in range(num_images)]

    # rather than concatenate, this time we take the maximum and pass it through another network
    maximum_tensor = maximum(encodeds)

    predictions = Dense(num_classes, activation='softmax',
                        name='final_fc')(maximum_tensor)

    return Model(inputs=inputs, outputs=predictions)
示例#12
0
    def create(self, size, include_top):
        input_shape = (3, ) + size
        img_input = Input(shape=input_shape)
        bn_axis = 1

        x = Lambda(self.vgg_preprocess)(img_input)
        x = ZeroPadding2D((3, 3))(x)
        x = Convolution2D(64, 7, 7, subsample=(2, 2), name='conv1')(x)
        x = BatchNormalization(axis=bn_axis, name='bn_conv1')(x)
        x = Activation('relu')(x)
        x = MaxPooling2D((3, 3), strides=(2, 2))(x)

        x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1))
        x = identity_block(x, 3, [64, 64, 256], stage=2, block='b')
        x = identity_block(x, 3, [64, 64, 256], stage=2, block='c')

        x = conv_block(x, 3, [128, 128, 512], stage=3, block='a')
        for n in ['b', 'c', 'd']:
            x = identity_block(x, 3, [128, 128, 512], stage=3, block=n)
        x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a')
        for n in ['b', 'c', 'd', 'e', 'f']:
            x = identity_block(x, 3, [256, 256, 1024], stage=4, block=n)

        x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a')
        x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b')
        x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c')

        if include_top:
            x = AveragePooling2D((7, 7), name='avg_pool')(x)
            x = Flatten()(x)
            x = Dense(1000, activation='softmax', name='fc1000')(x)
            fname = 'resnet50.h5'
        else:
            fname = 'resnet_nt.h5'

        self.img_input = img_input
        self.model = Model(self.img_input, x)
        convert_all_kernels_in_model(self.model)
        self.model.load_weights(
            get_file(fname,
                     self.FILE_PATH + fname,
                     cache_subdir='models',
                     cache_dir=utils.get_keras_cache_dir()))
示例#13
0
def create_model(input_size, weights=False, summary=True):
    assert input_size == (112, 112, 3)

    res_in = Input(input_size)

    x = Conv2D(64, (7, 7), padding='same', name='conv1')(res_in)
    x = BatchNormalization(axis=3, name='bn_conv1')(x)
    x = Activation('relu')(x)
    x = MaxPooling2D((3, 3), strides=(2, 2))(x)

    x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1))
    x = identity_block(x, 3, [64, 64, 256], stage=2, block='b')
    x = identity_block(x, 3, [64, 64, 256], stage=2, block='c')

    x = conv_block(x, 3, [128, 128, 512], stage=3, block='a')
    x = identity_block(x, 3, [128, 128, 512], stage=3, block='b')
    x = identity_block(x, 3, [128, 128, 512], stage=3, block='c')
    x = identity_block(x, 3, [128, 128, 512], stage=3, block='d')

    x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a')
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='b')
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='c')
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='d')
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='e')
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='f')

    x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a')
    x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b')
    x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c')
    x = identity_block(x, 3, [512, 512, 2048], stage=5, block='d')
    x = identity_block(x, 3, [512, 512, 2048], stage=5, block='e')

    x = AveragePooling2D((7, 7), name='avg_pool')(x)
    x = Flatten()(x)

    x = Dense(500, activation='relu', name='fc3')(x)
    x = Dropout(0.25)(x)
    x = Dense(NUM_CLASSES, activation='softmax', name='predictions')(x)

    model = Model(input=res_in, output=x)    
    model.load_weights('models/resnet50_dropout_sgd_cont_declrd.h5', by_name=True)
    return model
示例#14
0
    def center_inference_model(self, trained_model=None):
        print('Building center inference model')

        resolution = cfg.PREDICT_RESOLUTION
        grid_shape = cfg.GRID_SHAPE
        state_dims = cfg.STATE_DIMS

        ########################################################################
        # Input
        ########################################################################
        input_img = Input(batch_shape=(1, resolution[0], resolution[1], resolution[2]), name='input_img')
        input_lyo = Input(batch_shape=(1, resolution[0], resolution[1], resolution[2]), name='input_layout')

        # Merge
        inputs = keras.layers.concatenate([input_img, input_lyo], axis = -1)

        x = Conv2D(64, (7, 7), strides=(2, 2), name='conv1')(inputs)
        x = BatchNormalization(axis=3, name='bn_conv1')(x)
        x = Activation('relu')(x)
        x = MaxPooling2D((3, 3), strides=(2, 2))(x)
        x = conv_block(x, 3, [64, 64, 128],   stage=2,  block='a')
        x = conv_block(x, 3, [64, 64, 128],   stage=3,  block='a')
        x = conv_block(x, 3, [128, 128, 512], stage=4,  block='a')

        feat = x

        # center branch
        cen_hidden = Conv2D(64, (3, 3), dilation_rate=2, padding='same', activation='relu')(feat)
        cen_hidden = Conv2D(1,  (3, 3), dilation_rate=2, padding='same')(cen_hidden)
        cen_hidden = Reshape((self.cen_dims, ))(cen_hidden)
        cen_output = Activation('softmax', name = 'output_cen')(cen_hidden)

        ########################################################################
        # Compile inference model
        ########################################################################
        inference_model = Model(inputs=[input_img, input_lyo], outputs=[feat, cen_output])

        # print(inference_model.summary())
        if trained_model is not None:
            inference_model.set_weights(self.get_center_branch_weights(trained_model))
        return inference_model
示例#15
0
def resnet50_block(img_input):

    if K.image_data_format() == 'channels_last':
        bn_axis = 3
    else:
        bn_axis = 1
    x = Conv2D(64, (7, 7), strides=(2, 2), padding='same',
               name='conv1')(img_input)
    x = BatchNormalization(axis=bn_axis, name='bn_conv1')(x)
    x = Activation('relu')(x)
    x = MaxPooling2D((3, 3), strides=(2, 2))(x)

    x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1))
    x = identity_block(x, 3, [64, 64, 256], stage=2, block='b')
    x = identity_block(x, 3, [64, 64, 256], stage=2, block='c')

    x = conv_block(x, 3, [128, 128, 512], stage=3, block='a')
    x = identity_block(x, 3, [128, 128, 512], stage=3, block='b')
    x = identity_block(x, 3, [128, 128, 512], stage=3, block='c')
    x = identity_block(x, 3, [128, 128, 512], stage=3, block='d')

    x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a')
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='b')
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='c')
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='d')
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='e')
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='f')

    # transpose convolution to increase resolution
    x = Conv2DTranspose(128, (4, 4), strides=(2, 2), padding="same")(x)
    x = BatchNormalization(axis=bn_axis, name='bn_trconv1')(x)
    x = Activation('relu')(x)
    #
    # x = Conv2DTranspose(128, (4, 4), strides=(2, 2), padding="same")(x)
    # x = BatchNormalization(axis=bn_axis, name='bn_trconv2')(x)
    # x = Activation('relu')(x)

    return x
示例#16
0
    def create(self, size, include_top):
        input_shape = (3,)+size
        img_input = Input(shape=input_shape)
        bn_axis = 1

        x = Lambda(self.vgg_preprocess)(img_input)
        x = ZeroPadding2D((3, 3))(x)
        x = Convolution2D(64, 7, 7, subsample=(2, 2), name='conv1')(x)
        x = BatchNormalization(axis=bn_axis, name='bn_conv1')(x)
        x = Activation('relu')(x)
        x = MaxPooling2D((3, 3), strides=(2, 2))(x)

        x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1))
        x = identity_block(x, 3, [64, 64, 256], stage=2, block='b')
        x = identity_block(x, 3, [64, 64, 256], stage=2, block='c')

        x = conv_block(x, 3, [128, 128, 512], stage=3, block='a')
        for n in ['b','c','d']: x = identity_block(x, 3, [128, 128, 512], stage=3, block=n)
        x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a')
        for n in ['b','c','d', 'e', 'f']: x = identity_block(x, 3, [256, 256, 1024], stage=4, block=n)

        x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a')
        x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b')
        x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c')

        if include_top:
            x = AveragePooling2D((7, 7), name='avg_pool')(x)
            x = Flatten()(x)
            x = Dense(1000, activation='softmax', name='fc1000')(x)
            fname = 'resnet50.h5'
        else:
            fname = 'resnet_nt.h5'

        self.img_input = img_input
        self.model = Model(self.img_input, x)
        convert_all_kernels_in_model(self.model)
        self.model.load_weights(get_file(fname, self.FILE_PATH+fname, cache_subdir='models'))
def custom_res_net_model(labesl, first_filters=16, input_shape=(224, 224, 3)):
    img_input = Input(shape=input_shape)

    x = ZeroPadding2D(padding=(3, 3), name='conv1_pad')(img_input)
    x = Conv2D(64, (7, 7), strides=(2, 2), padding='valid', name='conv1')(x)
    x = BatchNormalization(axis=1, name='bn_conv1')(x)
    x = Activation('relu')(x)
    x = MaxPooling2D((3, 3), strides=(2, 2))(x)

    x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1))
    x = identity_block(x, 3, [64, 64, 256], stage=2, block='b')
    x = identity_block(x, 3, [64, 64, 256], stage=2, block='c')

    x = AveragePooling2D((7, 7), name='avg_pool')(x)
    x = GlobalMaxPooling2D()(x)

    model = Model(img_input, x)
    save_path = "saved_models/weights.best.custom_res_net_model.hdf5"
    return model, save_path
示例#18
0
    def training_model(self):
        print('Building training model...')

        resolution = cfg.PREDICT_RESOLUTION
        grid_shape = cfg.GRID_SHAPE
        state_dims = cfg.STATE_DIMS

        # Input images
        input_imgs = Input(shape=(resolution[0], resolution[1], resolution[2]), name='input_imgs')
        input_lyos = Input(shape=(resolution[0], resolution[1], resolution[2]), name='input_layouts')
        # Intermediate ROI maps for size prediction
        input_rois = Input(shape=(state_dims[0], state_dims[1]), name='input_rois')

        # Merge
        inputs = keras.layers.concatenate([input_imgs, input_lyos], axis = -1)

        x = Conv2D(64, (7, 7), strides=(2, 2), name='conv1')(inputs)
        x = BatchNormalization(axis=3, name='bn_conv1')(x)
        x = Activation('relu')(x)
        x = MaxPooling2D((3, 3), strides=(2, 2))(x)
        x = conv_block(x, 3, [64, 64, 128],   stage=2,  block='a')
        x = conv_block(x, 3, [64, 64, 128],   stage=3,  block='a')
        x = conv_block(x, 3, [128, 128, 512], stage=4,  block='a')
        feats = x

        # center branch
        cen_hidden = Conv2D(64, (3, 3), dilation_rate=2, padding='same', activation='relu')(feats)
        cen_hidden = Conv2D(1,  (3, 3), dilation_rate=2, padding='same')(cen_hidden)
        cen_hidden = Reshape((self.cen_dims, ))(cen_hidden)
        cen_output = Activation('softmax', name = 'output_cens')(cen_hidden)

        # size branch
        rois = Reshape((state_dims[0] * state_dims[1], ))(input_rois)
        rois = RepeatVector(state_dims[2])(rois)
        rois = Permute((2, 1))(rois)
        rois = Reshape((state_dims[0], state_dims[1], state_dims[2]))(rois)

        size_hidden = Conv2D(state_dims[2], (3, 3), dilation_rate=2, padding='same', activation='relu')(feats)
        size_hidden = keras.layers.multiply([size_hidden, rois])
        size_hidden = GlobalMaxPooling2D()(size_hidden)
        size_hidden = Dense(self.size_dims, activation = 'relu')(size_hidden)
        size_output = Dense(self.size_dims, activation = 'softmax', name = 'output_sizes')(size_hidden)

        model = Model(inputs  = [input_imgs, input_lyos, input_rois], \
                      outputs = [cen_output, size_output])


        model.compile(loss = {'output_cens':  'categorical_crossentropy', \
                              'output_sizes': 'categorical_crossentropy'}, \
                      loss_weights = {'output_cens':  1.0, \
                                      'output_sizes': 2.0}, \
                      optimizer = Adam(lr=cfg.TRAIN.INITIAL_LR, \
                                          clipnorm=cfg.TRAIN.CLIP_GRADIENTS), \
                      metrics = {'output_cens':  'categorical_accuracy', \
                                 'output_sizes': 'categorical_accuracy'})

        # print(model.summary())
        # print(model.metrics_names)
        # for x in model.get_weights():
        #     print x.shape
        return model
def Resnet(input_shape=None, classes=1, bn_momentum=0):
    bn_axis = 3
    inc_angle_input = Input(shape=[1], name='inc_angle')
    img_input = Input(shape=input_shape, name='image_input')

    img_input_bn = BatchNormalization(axis=bn_axis,
                                      momentum=bn_momentum)(img_input)
    inc_angle_bn = BatchNormalization(momentum=bn_momentum)(inc_angle_input)

    image_1 = Conv2D(filters=64,
                     kernel_size=(3, 3),
                     strides=(1, 1),
                     padding='same')(img_input_bn)
    image_1 = BatchNormalization(axis=bn_axis, momentum=bn_momentum)(image_1)
    image_1 = Activation('relu')(image_1)
    image_1 = MaxPooling2D(pool_size=(2, 2), strides=(1, 1))(image_1)

    image_1 = conv_block(image_1, 3, [32, 32, 64], stage=2, block='a')
    image_1 = Dropout(0.2)(image_1)
    image_1 = identity_block(image_1, 3, [32, 32, 64], stage=2, block='b')
    image_1 = Dropout(0.2)(image_1)
    image_1 = identity_block(image_1, 3, [32, 32, 64], stage=2, block='c')

    image_1 = conv_block(image_1, 3, [32, 64, 128], stage=3, block='a')
    image_1 = Dropout(0.2)(image_1)
    image_1 = identity_block(image_1, 3, [32, 64, 128], stage=3, block='b')
    image_1 = Dropout(0.2)(image_1)
    image_1 = identity_block(image_1, 3, [32, 64, 128], stage=3, block='c')
    image_1 = Dropout(0.2)(image_1)
    image_1 = identity_block(image_1, 3, [32, 64, 128], stage=3, block='d')

    image_1 = conv_block(image_1, 3, [32, 32, 128], stage=5, block='a')
    image_1 = Dropout(0.2)(image_1)
    image_1 = identity_block(image_1, 3, [32, 32, 128], stage=5, block='b')
    image_1 = Dropout(0.2)(image_1)
    image_1 = identity_block(image_1, 3, [32, 32, 128], stage=5, block='c')

    image_1 = MaxPooling2D(pool_size=(5, 5), name='max_pool_1')(image_1)

    image_1 = Flatten()(image_1)

    image_2 = custom_conv_block(img_input_bn,
                                nf=64,
                                k=3,
                                s=1,
                                nb=6,
                                p_act='relu')
    image_2 = Dropout(0.2)(image_2)
    image_2 = BatchNormalization(momentum=bn_momentum)(
        GlobalMaxPooling2D()(image_2))

    output = Concatenate(axis=-1)([image_1, image_2, inc_angle_bn])

    # output = Dense(units=128, activation='relu', name='fc_1')(output)
    # output = Dropout(0.5)(output)

    output = Dense(classes, activation='sigmoid', name='fc_last')(output)

    model = Model(inputs=[img_input, inc_angle_input],
                  outputs=output,
                  name='simple resnet')
    return model
示例#20
0
    def _create(self, size, include_top):
        input_shape = (3, ) + size
        img_input = Input(shape=input_shape)
        bn_axis = 1

        x = Lambda(self._vgg_preprocess)(img_input)
        x = ZeroPadding2D((3, 3))(x)

        x = Conv2D(64, (7, 7), strides=(2, 2), name='conv1')(x)
        x = BatchNormalization(axis=bn_axis, name='bn_conv1')(x)
        x = Activation('relu')(x)
        x = MaxPooling2D((3, 3), strides=(2, 2))(x)

        x = conv_block(x,
                       3,
                       filters=[64, 64, 256],
                       stage=2,
                       block='a',
                       strides=(1, 1))
        x = identity_block(x, 3, filters=[64, 64, 256], stage=2, block='b')
        x = identity_block(x, 3, filters=[64, 64, 256], stage=2, block='c')

        x = conv_block(x,
                       3,
                       filters=[128, 128, 512],
                       stage=3,
                       block='a',
                       strides=(2, 2))
        for b in ['b', 'c', 'd']:
            x = identity_block(x, 3, filters=[128, 128, 512], stage=3, block=b)

        x = conv_block(x,
                       3,
                       filters=[256, 256, 1024],
                       stage=4,
                       block='a',
                       strides=(2, 2))
        for b in ['b', 'c', 'd', 'e', 'f']:
            x = identity_block(x,
                               3,
                               filters=[256, 256, 1024],
                               stage=4,
                               block=b)

        x = conv_block(x,
                       3,
                       filters=[512, 512, 2048],
                       stage=2,
                       block='a',
                       strides=(2, 2))
        x = identity_block(x, 3, filters=[512, 512, 2048], stage=5, block='b')
        x = identity_block(x, 3, filters=[512, 512, 2048], stage=5, block='c')

        if include_top:
            x = AveragePooling2D(pool_size=(7, 7),
                                 name='avg_pool',
                                 padding='valid')(x)
            x = Flatten()(x)
            x = Dense(1000, activation='softmax', name='fc1000')(x)
            fname = 'resnet50.h5'
        else:
            fname = 'resnet_nt.h5'

        self.model = Model(inputs=img_input, outputs=x, name='ResNet50')
        #convert_all_kernels_in_model(self.model)
        self.model.load_weights(
            get_file(fname, self.FILE_PATH + fname, cache_subdir='models'))
示例#21
0
def ResNet50(include_top=False,
             weights='imagenet',
             input_tensor=None,
             input_shape=None,
             pooling=None,
             classes=1):
    """Instantiates the ResNet50 architecture.
    Optionally loads weights pre-trained
    on ImageNet. Note that when using TensorFlow,
    for best performance you should set
    `image_data_format='channels_last'` in your Keras config
    at ~/.keras/keras.json.
    The model and the weights are compatible with both
    TensorFlow and Theano. The data format
    convention used by the model is the one
    specified in your Keras config file.
    # Arguments
        include_top: whether to include the fully-connected
            layer at the top of the network.
        weights: one of `None` (random initialization),
              'imagenet' (pre-training on ImageNet),
              or the path to the weights file to be loaded.
        input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
            to use as image input for the model.
        input_shape: optional shape tuple, only to be specified
            if `include_top` is False (otherwise the input shape
            has to be `(224, 224, 3)` (with `channels_last` data format)
            or `(3, 224, 224)` (with `channels_first` data format).
            It should have exactly 3 inputs channels,
            and width and height should be no smaller than 197.
            E.g. `(200, 200, 3)` would be one valid value.
        pooling: Optional pooling mode for feature extraction
            when `include_top` is `False`.
            - `None` means that the output of the model will be
                the 4D tensor output of the
                last convolutional layer.
            - `avg` means that global average pooling
                will be applied to the output of the
                last convolutional layer, and thus
                the output of the model will be a 2D tensor.
            - `max` means that global max pooling will
                be applied.
        classes: optional number of classes to classify images
            into, only to be specified if `include_top` is True, and
            if no `weights` argument is specified.
    # Returns
        A Keras model instance.
    # Raises
        ValueError: in case of invalid argument for `weights`,
            or invalid input shape.
    """
    if not (weights in {'imagenet', None} or os.path.exists(weights)):
        raise ValueError('The `weights` argument should be either '
                         '`None` (random initialization), `imagenet` '
                         '(pre-training on ImageNet), '
                         'or the path to the weights file to be loaded.')

    if weights == 'imagenet' and include_top and classes != 1000:
        raise ValueError('If using `weights` as imagenet with `include_top`'
                         ' as true, `classes` should be 1000')

    # Determine proper input shape
    input_shape = (75, 75, 3)

    if input_tensor is None:
        img_input = Input(shape=input_shape)
    else:
        if not K.is_keras_tensor(input_tensor):
            img_input = Input(tensor=input_tensor, shape=input_shape)
        else:
            img_input = input_tensor
    if K.image_data_format() == 'channels_last':
        bn_axis = 3
    else:
        bn_axis = 1

    x = Conv2D(64, (7, 7), strides=(2, 2), padding='same',
               name='conv1')(img_input)
    x = BatchNormalization(axis=bn_axis, name='bn_conv1')(x)
    x = Activation('relu')(x)
    x = MaxPooling2D((3, 3), strides=(2, 2))(x)

    x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1))
    x = identity_block(x, 3, [64, 64, 256], stage=2, block='b')
    x = identity_block(x, 3, [64, 64, 256], stage=2, block='c')

    x = conv_block(x, 3, [128, 128, 512], stage=3, block='a')
    x = identity_block(x, 3, [128, 128, 512], stage=3, block='b')
    x = identity_block(x, 3, [128, 128, 512], stage=3, block='c')
    x = identity_block(x, 3, [128, 128, 512], stage=3, block='d')

    x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a')
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='b')
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='c')
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='d')
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='e')
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='f')

    x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a')
    x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b')
    x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c')

    if include_top:
        x = Flatten()(x)
        x = Dense(classes, activation='softmax', name='fc1000')(x)

    # Ensure that the model takes into account
    # any potential predecessors of `input_tensor`.
    if input_tensor is not None:
        inputs = get_source_inputs(input_tensor)
    else:
        inputs = img_input
    # Create model.
    model = Model(inputs, x, name='resnet50')

    # load weights
    if weights == 'imagenet':
        logger.info('ok imagenet')
        if include_top:
            weights_path = get_file(
                'resnet50_weights_tf_dim_ordering_tf_kernels.h5',
                WEIGHTS_PATH,
                cache_subdir='models',
                md5_hash='a7b3fe01876f51b976af0dea6bc144eb')
        else:
            weights_path = get_file(
                'resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5',
                WEIGHTS_PATH_NO_TOP,
                cache_subdir='models',
                md5_hash='a268eb855778b3df3c7506639542a6af')
        model.load_weights(weights_path)
        if K.backend() == 'theano':
            layer_utils.convert_all_kernels_in_model(model)
            if include_top:
                maxpool = model.get_layer(name='avg_pool')
                shape = maxpool.output_shape[1:]
                dense = model.get_layer(name='fc1000')
                layer_utils.convert_dense_weights_data_format(
                    dense, shape, 'channels_first')

        if K.image_data_format() == 'channels_first' and K.backend(
        ) == 'tensorflow':
            warnings.warn('You are using the TensorFlow backend, yet you '
                          'are using the Theano '
                          'image data format convention '
                          '(`image_data_format="channels_first"`). '
                          'For best performance, set '
                          '`image_data_format="channels_last"` in '
                          'your Keras config '
                          'at ~/.keras/keras.json.')
    elif weights is not None:
        model.load_weights(weights)

    return model
示例#22
0
def resnet_dropout(include_top=False,
                   weights='imagenet',
                   input_tensor=None,
                   pooling='avg',
                   input_shape=(224, 224, 3),
                   classes=25,
                   dp_rate=0.,
                   n_retrain_layers=0):

    WEIGHTS_PATH = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.2/resnet50_weights_tf_dim_ordering_tf_kernels.h5'
    WEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.2/resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5'

    if weights not in {'imagenet', None}:
        raise ValueError('The `weights` argument should be either '
                         '`None` (random initialization) or `imagenet` '
                         '(pre-training on ImageNet).')

    if weights == 'imagenet' and include_top and classes != 1000:
        raise ValueError('If using `weights` as imagenet with `include_top`'
                         ' as true, `classes` should be 1000')

    # Determine proper input shape
    #input_shape = _obtain_input_shape(input_shape,default_size=224,min_size=197,data_format=K.image_data_format(),include_top=include_top)

    if input_tensor is None:
        img_input = Input(shape=input_shape)
    else:
        if not K.is_keras_tensor(input_tensor):
            img_input = Input(tensor=input_tensor, shape=input_shape)
        else:
            img_input = input_tensor
    if K.image_data_format() == 'channels_last':
        bn_axis = 3
    else:
        bn_axis = 1

    x = ZeroPadding2D((3, 3))(img_input)
    x = Conv2D(64, (7, 7), strides=(2, 2), name='conv1')(x)
    x = BatchNormalization(axis=bn_axis, name='bn_conv1')(x)
    x = Activation('relu')(x)
    x = MaxPooling2D((3, 3), strides=(2, 2))(x)

    x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1))
    x = identity_block(x, 3, [64, 64, 256], stage=2, block='b')
    x = identity_block(x, 3, [64, 64, 256], stage=2, block='c')

    x = conv_block(x, 3, [128, 128, 512], stage=3, block='a')
    x = identity_block(x, 3, [128, 128, 512], stage=3, block='b')
    x = identity_block(x, 3, [128, 128, 512], stage=3, block='c')
    x = identity_block(x, 3, [128, 128, 512], stage=3, block='d')

    x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a')
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='b')
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='c')
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='d')
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='e')
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='f')

    x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a')
    x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b')
    x = Dropout(dp_rate)(x)
    x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c')

    x = AveragePooling2D((7, 7), name='avg_pool')(x)

    if include_top:
        x = Flatten()(x)
        x = Dense(classes, activation='softmax', name='fc1000')(x)
    else:
        if pooling == 'avg':
            x = GlobalAveragePooling2D()(x)
        elif pooling == 'max':
            x = GlobalMaxPooling2D()(x)

    # Ensure that the model takes into account
    # any potential predecessors of `input_tensor`.
    if input_tensor is not None:
        inputs = get_source_inputs(input_tensor)
    else:
        inputs = img_input
    # Create model.
    model = Model(inputs, x, name='resnet50')

    # load weights
    if weights == 'imagenet':
        if include_top:
            weights_path = get_file(
                'resnet50_weights_tf_dim_ordering_tf_kernels.h5',
                WEIGHTS_PATH,
                cache_subdir='models',
                md5_hash='a7b3fe01876f51b976af0dea6bc144eb')
        else:
            weights_path = get_file(
                'resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5',
                WEIGHTS_PATH_NO_TOP,
                cache_subdir='models',
                md5_hash='a268eb855778b3df3c7506639542a6af')
        model.load_weights(weights_path)

    split_value = len(model.layers) + 1 - n_retrain_layers
    for layer in model.layers[:split_value]:
        layer.trainable = False
    for layer in model.layers[split_value:]:
        layer.trainable = True

    return model
示例#23
0
def custom_resnet(n=0, dp_rate=0):

    WEIGHTS_PATH = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.2/resnet50_weights_tf_dim_ordering_tf_kernels.h5'
    WEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.2/resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5'

    # Determine proper input shape
    #input_shape = _obtain_input_shape(input_shape,default_size=224,min_size=197,data_format=K.image_data_format(),include_top=include_top)

    img_input = Input(shape=(224, 224, 3))

    if K.image_data_format() == 'channels_last':
        bn_axis = 3
    else:
        bn_axis = 1

    x = ZeroPadding2D((3, 3))(img_input)
    x = Conv2D(64, (7, 7), strides=(2, 2), name='conv1')(x)
    x = BatchNormalization(axis=bn_axis, name='bn_conv1')(x)
    x = Activation('relu')(x)
    x = MaxPooling2D((3, 3), strides=(2, 2))(x)

    x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1))
    x = identity_block(x, 3, [64, 64, 256], stage=2, block='b')
    x = identity_block(x, 3, [64, 64, 256], stage=2, block='c')

    x = conv_block(x, 3, [128, 128, 512], stage=3, block='a')
    x = identity_block(x, 3, [128, 128, 512], stage=3, block='b')
    x = identity_block(x, 3, [128, 128, 512], stage=3, block='c')
    x = identity_block(x, 3, [128, 128, 512], stage=3, block='d')

    x = Dropout(dp_rate)(x)

    x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a')
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='b')
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='c')
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='d')
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='e')
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='f')

    x = Dropout(dp_rate)(x)

    x = AveragePooling2D((7, 7), name='avg_pool')(x)

    x = Flatten()(x)
    x = Dense(25, activation='softmax', name='fc1000')(x)
    # Ensure that the model takes into account
    # any potential predecessors of `input_tensor`.

    inputs = img_input
    # Create model.
    model = Model(inputs, x, name='resnet50')

    # load weights

    weights_path = get_file(
        'resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5',
        WEIGHTS_PATH_NO_TOP,
        cache_subdir='models',
        md5_hash='a268eb855778b3df3c7506639542a6af')
    model.load_weights(weights_path, by_name=True)

    split_value = True  # len(model.layers) + 1 - n
    for layer in model.layers[:split_value]:
        layer.trainable = False
    for layer in model.layers[split_value:]:
        layer.trainable = True

    return model
示例#24
0
文件: resnet.py 项目: kcyu1993/keras
def ResCovNet50(parametrics=[],
                epsilon=0.,
                mode=0,
                nb_classes=23,
                input_shape=(3, 224, 224),
                init='glorot_normal',
                cov_branch='o2transform',
                cov_mode='channel',
                dropout=False,
                cov_branch_output=None,
                dense_after_covariance=True,
                cov_block_mode=3,
                last_softmax=True,
                independent_learning=False):
    '''Instantiate the ResNet50 architecture,
    optionally loading weights pre-trained
    on ImageNet. Note that when using TensorFlow,
    for best performance you should set
    `image_dim_ordering="tf"` in your Keras config
    at ~/.keras/keras.json.

    The model and the weights are compatible with both
    TensorFlow and Theano. The dimension ordering
    convention used by the model is the one
    specified in your Keras config file.

    # Arguments
        include_top: whether to include the 3 fully-connected
            layers at the top of the network.
        weights: one of `None` (random initialization)
            or "imagenet" (pre-training on ImageNet).
        input_tensor: optional Keras tensor (i.e. xput of `layers.Input()`)
            to use as image input for the model.
        cov_mode:   {0,1,2,3 ..}
             cov_mode 0: concat in the final layer {FC256, WP 10} then FC 10
             cov_mode 1: concat in the second last {FC10, WP 10} then FC 10
             cov_mode 2: sum in the second last {relu(FC10) + relu(WP 10)} -> softmax
             cov_mode 3: sum in the second last {soft(FC10) + soft(WP 10)} -> softmax
             cov_mode 4 - 6 are Cov of other layers information, from resnet stage 2, 3, 4
                only differs during last combining phase
             cov_mode 4:  concat{FC4096, WP1, WP2, WP3} -> softmax
             cov_mode 5: concat{FC_23, WP1, WP2, WP3}
             cov_mode 6: concat{FC_23, sum(WP1, WP2, WP3)}

    # Returns
        A Keras model instance.
    '''

    # Function name
    if cov_branch == 'o2transform':
        covariance_block = covariance_block_original
    elif cov_branch == 'dense':
        covariance_block = covariance_block_vector_space
    else:
        raise ValueError('covariance cov_mode not supported')

    nb_class = nb_classes
    if cov_branch_output is None:
        cov_branch_output = nb_class

    basename = 'ResCovNet'
    if parametrics is not []:
        basename += '_para-'
        for para in parametrics:
            basename += str(para) + '_'
    basename += 'mode_{}'.format(str(mode))

    if epsilon > 0:
        basename += '-epsilon_{}'.format(str(epsilon))

    if input_shape[0] == 3:
        # Define the channel
        if K.image_dim_ordering() == 'tf':
            if input_shape[0] in {1, 3}:
                input_shape = (input_shape[1], input_shape[2], input_shape[0])

    # Determine proper input shape
    if K.image_dim_ordering() == 'th':
        input_shape = (3, 224, 224)
    else:
        input_shape = (224, 224, 3)

    if K.image_dim_ordering() == 'tf':
        bn_axis = 3
    else:
        bn_axis = 1
    input_tensor = Input(input_shape)

    x = ZeroPadding2D((3, 3))(input_tensor)
    x = Convolution2D(64, 7, 7, subsample=(2, 2), name='conv1')(x)
    x = BatchNormalization(axis=bn_axis, name='bn_conv1')(x)
    x = Activation('relu')(x)
    x = MaxPooling2D((3, 3), strides=(2, 2))(x)

    x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1))
    x = identity_block(x, 3, [64, 64, 256], stage=2, block='b')
    x = identity_block(x, 3, [64, 64, 256], stage=2, block='c')

    x = conv_block(x, 3, [128, 128, 512], stage=3, block='a')
    x = identity_block(x, 3, [128, 128, 512], stage=3, block='b')
    x = identity_block(x, 3, [128, 128, 512], stage=3, block='c')
    x = identity_block(x, 3, [128, 128, 512], stage=3, block='d')
    block1_x = x

    x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a')
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='b')
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='c')
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='d')
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='e')
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='f')
    block2_x = x

    x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a')
    x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b')
    x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c')

    block3_x = x
    x = AveragePooling2D((7, 7), name='avg_pool')(x)

    if independent_learning:
        x = ZeroPadding2D((3, 3))(input_tensor)
        x = Convolution2D(64, 7, 7, subsample=(2, 2), name='cov_conv1')(x)
        x = BatchNormalization(axis=bn_axis, name='cov_bn_conv1')(x)
        x = Activation('relu')(x)
        x = MaxPooling2D((3, 3), strides=(2, 2))(x)

        x = conv_block(x,
                       3, [64, 64, 256],
                       stage=2,
                       block='cov_a',
                       strides=(1, 1))
        x = identity_block(x, 3, [64, 64, 256], stage=2, block='cov_b')
        x = identity_block(x, 3, [64, 64, 256], stage=2, block='cov_c')

        x = conv_block(x, 3, [128, 128, 512], stage=3, block='cov_a')
        x = identity_block(x, 3, [128, 128, 512], stage=3, block='cov_b')
        x = identity_block(x, 3, [128, 128, 512], stage=3, block='cov_c')
        x = identity_block(x, 3, [128, 128, 512], stage=3, block='cov_d')
        block1_x = x

        x = conv_block(x, 3, [256, 256, 1024], stage=4, block='cov_a')
        x = identity_block(x, 3, [256, 256, 1024], stage=4, block='cov_b')
        x = identity_block(x, 3, [256, 256, 1024], stage=4, block='cov_c')
        x = identity_block(x, 3, [256, 256, 1024], stage=4, block='cov_d')
        x = identity_block(x, 3, [256, 256, 1024], stage=4, block='cov_e')
        x = identity_block(x, 3, [256, 256, 1024], stage=4, block='cov_f')
        block2_x = x

        x = conv_block(x, 3, [512, 512, 2048], stage=5, block='cov_a')
        x = identity_block(x, 3, [512, 512, 2048], stage=5, block='cov_b')
        x = identity_block(x, 3, [512, 512, 2048], stage=5, block='cov_c')

        block3_x = x
        x = AveragePooling2D((7, 7), name='avg_pool')(x)

    if cov_block_mode == 3:
        cov_input = block3_x
    elif cov_block_mode == 2:
        cov_input = block2_x
    else:
        cov_input = block1_x
    basename += '_block_{}'.format(cov_block_mode)
    if mode == 0:
        warnings.warn("Mode 0 should be replaced with ResNet50")
        if nb_class == 1000:
            return ResNet50(input_tensor=input_tensor)
        else:
            raise ValueError("Only support 1000 class nb for ResNet50")

    if mode == 1:
        cov_branch = covariance_block(cov_input,
                                      nb_class,
                                      stage=5,
                                      block='a',
                                      parametric=parametrics,
                                      cov_mode=cov_mode)
        x = Dense(nb_class, activation='softmax',
                  name='predictions')(cov_branch)

    elif mode == 2:
        cov_branch = covariance_block(cov_input,
                                      nb_class,
                                      stage=5,
                                      block='a',
                                      parametric=parametrics)
        x = Flatten()(x)
        x = Dense(nb_class, activation='relu', name='fc')(x)
        x = merge([x, cov_branch], mode='concat', name='concat')
        x = Dense(nb_class, activation='softmax', name='predictions')(x)

    elif mode == 3:
        cov_branch = covariance_block(cov_input,
                                      nb_class,
                                      stage=5,
                                      block='a',
                                      parametric=parametrics)
        cov_branch = Activation('softmax')(cov_branch)
        x = Flatten()(x)
        x = Dense(nb_class, activation='softmax', name='fc')(x)
        x = merge([x, cov_branch], mode='sum', name='sum')
        x = Dense(nb_class, activation='softmax', name='predictions')(x)
    elif mode == 4:
        cov_branch1 = covariance_block(block1_x,
                                       nb_class,
                                       stage=2,
                                       block='a',
                                       parametric=parametrics)
        cov_branch2 = covariance_block(block2_x,
                                       nb_class,
                                       stage=3,
                                       block='b',
                                       parametric=parametrics)
        cov_branch3 = covariance_block(block3_x,
                                       nb_class,
                                       stage=4,
                                       block='c',
                                       parametric=parametrics)
        x = Flatten()(x)
        x = merge([x, cov_branch1, cov_branch2, cov_branch3],
                  mode='concat',
                  name='concat')
        x = Dense(nb_class, activation='softmax', name='predictions')(x)
    elif mode == 5:
        cov_branch1 = covariance_block(block1_x,
                                       nb_class,
                                       stage=2,
                                       block='a',
                                       parametric=parametrics)
        cov_branch2 = covariance_block(block2_x,
                                       nb_class,
                                       stage=3,
                                       block='b',
                                       parametric=parametrics)
        cov_branch3 = covariance_block(block3_x,
                                       nb_class,
                                       stage=4,
                                       block='c',
                                       parametric=parametrics)
        x = Flatten()(x)
        x = Dense(nb_class, activation='relu', name='fc')(x)
        x = merge([x, cov_branch1, cov_branch2, cov_branch3],
                  mode='concat',
                  name='concat')
        x = Dense(nb_class, activation='softmax', name='predictions')(x)
    elif mode == 6:
        cov_branch1 = covariance_block(block1_x,
                                       nb_class,
                                       stage=2,
                                       block='a',
                                       parametric=parametrics)
        cov_branch2 = covariance_block(block2_x,
                                       nb_class,
                                       stage=3,
                                       block='b',
                                       parametric=parametrics)
        cov_branch3 = covariance_block(block3_x,
                                       nb_class,
                                       stage=4,
                                       block='c',
                                       parametric=parametrics)
        x = Flatten()(x)
        x = Dense(nb_class, activation='relu', name='fc')(x)
        cov_branch = merge([cov_branch1, cov_branch2, cov_branch3],
                           mode='sum',
                           name='sum')
        x = merge([x, cov_branch], mode='concat', name='concat')
        x = Dense(nb_class, activation='softmax', name='predictions')(x)
    elif mode == 7:
        cov_branch = covariance_block(cov_input,
                                      nb_class,
                                      stage=5,
                                      block='a',
                                      parametric=parametrics)
        x = Flatten()(x)
        x = Dense(nb_class, activation='relu', name='fc')(x)
        x = merge([x, cov_branch], mode='concat', name='concat')
        x = Dense(nb_class, activation='softmax', name='predictions')(x)
    else:
        raise ValueError("Mode not supported {}".format(mode))

    model = Model(input_tensor, x, name=basename)
    return model
示例#25
0
    def training_model(self):
        print('Building training model...')

        resolution = cfg.PREDICT_RESOLUTION
        grid_shape = cfg.GRID_SHAPE
        state_dims = cfg.STATE_DIMS

        # Input images
        input_imgs = Input(shape=(resolution[0], resolution[1], resolution[2]),
                           name='input_imgs')
        input_lyos = Input(shape=(resolution[0], resolution[1], resolution[2]),
                           name='input_layouts')
        # Intermediate ROI maps for size prediction
        input_rois = Input(shape=(state_dims[0], state_dims[1]),
                           name='input_rois')

        # Merge
        inputs = keras.layers.concatenate([input_imgs, input_lyos], axis=-1)

        x = Conv2D(64, (7, 7), strides=(2, 2), name='conv1')(inputs)
        x = BatchNormalization(axis=3, name='bn_conv1')(x)
        x = Activation('relu')(x)
        x = MaxPooling2D((3, 3), strides=(2, 2))(x)
        x = conv_block(x, 3, [64, 64, 128], stage=2, block='a')
        x = conv_block(x, 3, [64, 64, 128], stage=3, block='a')
        x = conv_block(x, 3, [128, 128, 512], stage=4, block='a')
        feats = x

        # center branch
        cen_hidden = Conv2D(64, (3, 3),
                            dilation_rate=2,
                            padding='same',
                            activation='relu')(feats)
        cen_hidden = Conv2D(1, (3, 3), dilation_rate=2,
                            padding='same')(cen_hidden)
        cen_hidden = Reshape((self.cen_dims, ))(cen_hidden)
        cen_output = Activation('softmax', name='output_cens')(cen_hidden)

        # size branch
        rois = Reshape((state_dims[0] * state_dims[1], ))(input_rois)
        rois = RepeatVector(state_dims[2])(rois)
        rois = Permute((2, 1))(rois)
        rois = Reshape((state_dims[0], state_dims[1], state_dims[2]))(rois)

        size_hidden = Conv2D(state_dims[2], (3, 3),
                             dilation_rate=2,
                             padding='same',
                             activation='relu')(feats)
        size_hidden = keras.layers.multiply([size_hidden, rois])
        size_hidden = GlobalMaxPooling2D()(size_hidden)
        size_hidden = Dense(self.size_dims, activation='relu')(size_hidden)
        size_output = Dense(self.size_dims,
                            activation='softmax',
                            name='output_sizes')(size_hidden)

        model = Model(inputs  = [input_imgs, input_lyos, input_rois], \
                      outputs = [cen_output, size_output])


        model.compile(loss = {'output_cens':  'categorical_crossentropy', \
                              'output_sizes': 'categorical_crossentropy'}, \
                      loss_weights = {'output_cens':  1.0, \
                                      'output_sizes': 2.0}, \
                      optimizer = Adam(lr=cfg.TRAIN.INITIAL_LR, \
                                          clipnorm=cfg.TRAIN.CLIP_GRADIENTS), \
                      metrics = {'output_cens':  'categorical_accuracy', \
                                 'output_sizes': 'categorical_accuracy'})

        # print(model.summary())
        # print(model.metrics_names)
        # for x in model.get_weights():
        #     print x.shape
        return model
示例#26
0
spatial_input = Input((224, 224, 3))
x = ZeroPadding2D((3, 3))(spatial_input)
x = Convolution2D(64, 7, 7, subsample=(2, 2), name='sconv1')(x)
x = BatchNormalization(axis=3, name='sbn_conv1')(x)
x = Activation('relu')(x)
x = MaxPooling2D((3, 3), strides=(2, 2))(x)

temporal_input = Input((224, 224, 20))
y = ZeroPadding2D((3, 3))(temporal_input)
y = Convolution2D(64, 7, 7, subsample=(2, 2), name='tconv1')(y)
y = BatchNormalization(axis=3, name='tbn_conv1')(y)
y = Activation('relu')(y)
y = MaxPooling2D((3, 3), strides=(2, 2))(y)

x = conv_block(x, 3, [64, 64, 256], stage=2, block='sa', strides=(1, 1))
x = identity_block(x, 3, [64, 64, 256], stage=2, block='sb')
x = identity_block(x, 3, [64, 64, 256], stage=2, block='sc')

y = conv_block(y, 3, [64, 64, 256], stage=2, block='ta', strides=(1, 1))
y = identity_block(y, 3, [64, 64, 256], stage=2, block='tb')
y = identity_block(y, 3, [64, 64, 256], stage=2, block='tc')

x = merge([x, y], mode='sum')
x = conv_block(x, 3, [128, 128, 512], stage=3, block='sa')
x = identity_block(x, 3, [128, 128, 512], stage=3, block='sb')
x = identity_block(x, 3, [128, 128, 512], stage=3, block='sc')
x = identity_block(x, 3, [128, 128, 512], stage=3, block='sd')

y = conv_block(y, 3, [128, 128, 512], stage=3, block='ta')
y = identity_block(y, 3, [128, 128, 512], stage=3, block='tb')