コード例 #1
0
def mobilenet_yolo_body(inputs, num_anchors, num_classes):
    #net, endpoint = inception_v2.inception_v2(inputs)
    mobilenet = MobileNet(input_tensor=inputs, weights='imagenet')

    # input: 416 x 416 x 3
    # conv_pw_13_relu :13 x 13 x 1024
    # conv_pw_11_relu :26 x 26 x 512
    # conv_pw_5_relu : 52 x 52 x 256

    f1 = mobilenet.get_layer('conv_pw_13_relu').output
    # f1 :13 x 13 x 512
    x, y1 = make_last_layers(f1, 512, num_anchors * (num_classes + 5))

    x = compose(DarknetConv2D_BN_Leaky(256, (1, 1)), UpSampling2D(2))(x)

    f2 = mobilenet.get_layer('conv_pw_11_relu').output
    # f2: 26 x 26 x 256
    x = Concatenate()([x, f2])

    x, y2 = make_last_layers(x, 256, num_anchors * (num_classes + 5))

    x = compose(DarknetConv2D_BN_Leaky(128, (1, 1)), UpSampling2D(2))(x)

    f3 = mobilenet.get_layer('conv_pw_5_relu').output
    # f3 : 52 x 52 x 128
    x = Concatenate()([x, f3])
    x, y3 = make_last_layers(x, 128, num_anchors * (num_classes + 5))

    return Model(inputs=inputs, outputs=[y1, y2, y3])
コード例 #2
0
def mobilenetV1_yolo_body(inputs, num_anchors, num_classes, alpha=0.75):
    mobilenet = MobileNet(input_tensor=inputs, weights='imagenet',
                          alpha=alpha)  # default 'imagenet'

    # input: 416 x 416 x 3
    # conv_pw_13_relu :13 x 13 x 1024
    # conv_pw_11_relu :26 x 26 x 512
    # conv_pw_5_relu : 52 x 52 x 256

    f1 = mobilenet.get_layer('conv_pw_13_relu').output
    # f1 :13 x 13 x 1024
    x, y1 = make_last_layers(f1, 512, num_anchors * (num_classes + 5))

    x = compose(DarknetConv2D_BN_Leaky(256, (1, 1)), UpSampling2D(2))(x)

    f2 = mobilenet.get_layer('conv_pw_11_relu').output
    # f2: 26 x 26 x 512
    x = Concatenate()([x, f2])

    x, y2 = make_last_layers(x, 256, num_anchors * (num_classes + 5))

    x = compose(DarknetConv2D_BN_Leaky(128, (1, 1)), UpSampling2D(2))(x)

    f3 = mobilenet.get_layer('conv_pw_5_relu').output
    # f3 : 52 x 52 x 256
    x = Concatenate()([x, f3])
    x, y3 = make_last_layers(x, 128, num_anchors * (num_classes + 5))

    return Model(inputs, [y1, y2, y3])
コード例 #3
0
def getbasemodel(model_name, include_top, weights):
	if model_name == "vgg16":
		base_model = VGG16(weights=weights)
		model = Model(input=base_model.input, output=base_model.get_layer('fc1').output)
		image_size = (224, 224)
	elif model_name == "vgg19":
		base_model = VGG19(weights=weights)
		model = Model(input=base_model.input, output=base_model.get_layer('fc1').output)
		image_size = (224, 224)
	elif model_name == "resnet50":
		base_model = ResNet50(weights=weights)
		model = Model(input=base_model.input, output=base_model.get_layer('flatten').output)
		image_size = (224, 224)
	elif model_name == "inceptionv3":
		base_model = InceptionV3(include_top=include_top, weights=weights, input_tensor=Input(shape=(299,299,3)))
		model = Model(input=base_model.input, output=base_model.get_layer('custom').output)
		image_size = (299, 299)
	elif model_name == "inceptionresnetv2":
		base_model = InceptionResNetV2(include_top=include_top, weights=weights, input_tensor=Input(shape=(299,299,3)))
		model = Model(input=base_model.input, output=base_model.get_layer('custom').output)
		image_size = (299, 299)
	elif model_name == "mobilenet":
		base_model = MobileNet(include_top=include_top, weights=weights, input_tensor=Input(shape=(224,224,3)), input_shape=(224,224,3))
		model = Model(input=base_model.input, output=base_model.get_layer('custom').output)
		image_size = (224, 224)
	elif model_name == "xception":
		base_model = Xception(weights=weights)
		model = Model(input=base_model.input, output=base_model.get_layer('avg_pool').output)
		image_size = (299, 299)
	else:
		base_model = None
	return base_model, model, image_size
コード例 #4
0
def yolo_body_mobilenet(inputs, num_anchors, num_classes):
    """Create YOLO_V3_mobilenet model CNN body in Keras."""
    mobilenet = MobileNet(input_tensor=inputs, weights='imagenet')

    # input: 416 x 416 x 3
    # conv_pw_13_relu :13 x 13 x 1024
    # conv_pw_11_relu :26 x 26 x 512
    # conv_pw_5_relu : 52 x 52 x 256

    f1 = mobilenet.get_layer('conv_pw_13_relu').output
    # f1 :13 x 13 x 1024
    x, y1 = make_last_layers(f1, 512, num_anchors * (num_classes + 5))

    x = compose(DarknetConv2D_BN_Leaky(256, (1, 1)), UpSampling2D(2))(x)

    f2 = mobilenet.get_layer('conv_pw_11_relu').output
    # f2: 26 x 26 x 512
    x = Concatenate()([x, f2])

    x, y2 = make_last_layers(x, 256, num_anchors * (num_classes + 5))

    x = compose(DarknetConv2D_BN_Leaky(128, (1, 1)), UpSampling2D(2))(x)

    f3 = mobilenet.get_layer('conv_pw_5_relu').output
    # f3 : 52 x 52 x 256
    x = Concatenate()([x, f3])
    x, y3 = make_last_layers(x, 128, num_anchors * (num_classes + 5))

    return Model(inputs=inputs, outputs=[y1, y2, y3])
コード例 #5
0
def mobilenet_yolo_body(inputs, num_anchors, num_classes):

    #create mobile net model
    mobilenet = MobileNet(input_tensor=inputs, weights='imagenet')

    # input: 416 x 416 x 3
    # conv_pw_13_relu :13 x 13 x 1024
    # conv_pw_11_relu :26 x 26 x 512
    # conv_pw_5_relu : 52 x 52 x 256

    #get output of not top layers of mobilenet
    f1 = mobilenet.get_layer('conv_pw_13_relu').output
    # f1 :13 x 13 x 1024
    # get output of last scale and get output from previous last two layer
    x, y1 = make_last_layers(f1, 512,
                             num_anchors * (num_classes + 5))  #SACLE 1

    x = compose(DarknetConv2D_BN_Leaky(256, (1, 1)), UpSampling2D(2))(x)

    f2 = mobilenet.get_layer('conv_pw_11_relu').output
    # f2: 26 x 26 x 512
    x = Concatenate()([x, f2])

    x, y2 = make_last_layers(x, 256, num_anchors * (num_classes + 5))  #SCALE 2

    x = compose(DarknetConv2D_BN_Leaky(128, (1, 1)), UpSampling2D(2))(x)

    f3 = mobilenet.get_layer('conv_pw_5_relu').output
    # f3 : 52 x 52 x 256
    x = Concatenate()([x, f3])
    x, y3 = make_last_layers(x, 128, num_anchors * (num_classes + 5))  #SCALE 3

    return Model(inputs=inputs, outputs=[y1, y2, y3])
コード例 #6
0
def uNet_Model(input_shape=(128, 128, 3), dropout_rate=dropout_rate):
    '''
    uNet with MobileNet (pretrained on imagenet) as downsampling side
    Outputs saved from five layers to concatenate on upsampling side:
    activations at conv_pw_1, conv_pw_3, conv_pw_5, conv_pw_11, conv_pw_13
    ResNet convolution blocks with Conv2DTranspose and elu used for upsampling side
    '''

    base_model = MobileNet(weights='imagenet',
                           include_top=False,
                           input_shape=(128, 128, 3))

    # Base model, with 5 layers out
    X1 = base_model.get_layer('conv_pw_1_relu').output  # 64x64, 64 filters
    X2 = base_model.get_layer('conv_pw_3_relu').output  # 32x32, 128 filters
    X3 = base_model.get_layer('conv_pw_5_relu').output  # 16x16, 256 filters
    X4 = base_model.get_layer('conv_pw_11_relu').output  # 8x8, 512 filters
    X5 = base_model.get_layer('conv_pw_13_relu').output  # 4x4, 1024 filters

    # Bottom block
    X = identity_block(X5, filters=[256, 256, 1024],
                       dropout_rate=dropout_rate)  # 4x4
    X = Add()([X, X5])  # 4x4

    # Deconvolution block 1
    X = deconvolution_block(X,
                            filters=[128, 128, 512],
                            dropout_rate=dropout_rate)  # 8x8
    X = Add()([X, X4])  # 8x8

    # Deconvolution block 2
    X = deconvolution_block(X,
                            filters=[64, 64, 256],
                            dropout_rate=dropout_rate)  # 16x16
    X = Add()([X, X3])  # 16x16

    # Deconvolution block 3
    X = deconvolution_block(X,
                            filters=[32, 32, 128],
                            dropout_rate=dropout_rate)  # 32x32
    X = Add()([X, X2])  # 32x32

    # Deconvolution block 4
    X = deconvolution_block(X, filters=[16, 16, 64],
                            dropout_rate=dropout_rate)  # 64x64
    X = Add()([X, X1])  # 64x64

    # Final deconvolution block
    X = deconvolution_block(X, filters=[16, 16, 64],
                            dropout_rate=dropout_rate)  # 128x128

    predictions = Conv2D(1, (1, 1), activation='sigmoid')(X)

    model = Model(input=base_model.input, output=predictions)

    return model
コード例 #7
0
ファイル: model.py プロジェクト: smartTZ/keras_yolov3
def yolo_body_mobilenetlite(inputs, num_anchors, num_classes):
    """Create YOLO_V3_mobilenet model CNN body in Keras."""
    mobilenet = MobileNet(input_tensor=inputs,weights='imagenet')

    # input: 416 x 416 x 3
    # conv_pw_13_relu :13 x 13 x 1024
    # conv_pw_11_relu :26 x 26 x 512
    # conv_pw_5_relu : 52 x 52 x 256
    # mobilenet.summary()
    f1 = mobilenet.get_layer('conv_pw_13_relu').output
    # f1 :13 x 13 x 1024
    
    # spp
    sp3 = MaxPooling2D(pool_size=(3,3),strides=1,padding='same')(f1)
    sp5 = MaxPooling2D(pool_size=(5,5),strides=1,padding='same')(f1)
    f1 = compose(
            Concatenate(),
            DarknetConv2D_BN_Leaky(512, (1,1)))([sp3,sp5,f1])
    # end
    
    f1 = DarknetSeparableConv2D_BN_Leaky(256,(3,3))(f1)

    y1 = DarknetConv2D(num_anchors*(num_classes+5), (1,1))(f1)
    
    f1 = compose(
            DarknetConv2D_BN_Leaky(256, (1,1)),
            UpSampling2D(2))(f1)
 
    f2 = mobilenet.get_layer('conv_pw_11_relu').output
    # f2: 26 x 26 x 512
    f2 = compose(
                Concatenate(),
                DarknetSeparableConv2D_BN_Leaky(256,(3,3))
                )([f1,f2])
    y2 = DarknetConv2D(num_anchors*(num_classes+5), (1,1))(f2)

    
    f2 = compose(
            DarknetConv2D_BN_Leaky(128, (1,1)),
            UpSampling2D(2))(f2)

    f3 = mobilenet.get_layer('conv_pw_5_relu').output
    # f3 : 52 x 52 x 256
    f3 = compose(
                Concatenate(),
                DarknetSeparableConv2D_BN_Leaky(128,(3,3))
                )([f2,f3])
    y3 = DarknetConv2D(num_anchors*(num_classes+5), (1,1))(f3)

    return Model(inputs = inputs, outputs=[y1,y2,y3])
def get_model_mobilenet_pretrained(input_shape=(75, 75, 3), inputs_meta=1):
    dropout = 0.25
    optimizer = SGD(lr=1e-3, decay=1e-6, momentum=0.9, nesterov=True)
    #Building the model

    base_model = MobileNet(weights='imagenet', include_top=False, 
                 dropout=0.2, input_shape=input_shape, classes=1)

    input_meta = Input(shape=[inputs_meta], name='meta')
    input_meta_norm = BatchNormalization()(input_meta)

    x = base_model.get_layer('conv_pw_13_relu').output

    x = GlobalMaxPooling2D()(x)
    concat = concatenate([x, input_meta_norm])
    fc1 = Dense(512, activation='relu', name='fc2')(concat)
    fc1 = Dropout(dropout)(fc1)
    fc2 = Dense(512, activation='relu', name='fc3')(fc1)
    fc2 = Dropout(dropout)(fc2)

    # Sigmoid Layer
    output = Dense(1)(fc2)
    output = Activation('sigmoid')(output)

    model = Model(inputs=[base_model.input, input_meta], outputs=output)

    model.compile(optimizer=optimizer, loss='binary_crossentropy', metrics=['accuracy'])

    return model
コード例 #9
0
def create_model(input_shape, num_class, k):
    fgc_base = MobileNet(input_shape=input_shape,
                         include_top=False,
                         weights='imagenet',
                         alpha=1.)
    fgc_base.trainable = True
    # fgc_base.summary()
    feature2 = fgc_base.get_layer("conv_pw_11_relu").output
    fc_model = Model(fgc_base.inputs[0], [fgc_base.output, feature2])

    fc_model.summary()
    input_tensor = Input(shape=input_shape)
    features = fc_model(input_tensor)
    fc_obj = GlobalMaxPool2D()(features[0])
    fc_obj = Dropout(0.7)(fc_obj)
    fc_obj = Dense(num_class, activation="softmax")(fc_obj)

    fc_part = Conv2D(filters=num_class * k,
                     kernel_size=(1, 1),
                     activation="relu")(features[1])
    fc_part = GlobalMaxPool2D()(fc_part)
    fc_part = Dropout(0.5)(fc_part)
    fc_ccp = Lambda(lambda tmp: tf.expand_dims(tmp, axis=-1))(fc_part)
    fc_ccp = AvgPool1D(pool_size=k)(fc_ccp)
    fc_ccp = Lambda(lambda tmp: tf.squeeze(tmp, [-1]))(fc_ccp)
    fc_ccp = Activation(activation="softmax")(fc_ccp)
    fc_part = Dense(num_class, activation="softmax")(fc_part)
    output = Concatenate(axis=-1)([fc_obj, fc_part, fc_ccp])

    return Model(input_tensor, output)
コード例 #10
0
ファイル: mobilenetv1.py プロジェクト: gopi231091/Faster-RCNN
def nn_base(input_tensor=None, trainable=False):


    model = MobileNet(input_tensor = input_tensor, weights = None)
    x = model.get_layer('conv_pw_13_relu').output

    return x
コード例 #11
0
def mobilenet_retinanet(num_classes,
                        backbone='mobilenet224_1.0',
                        inputs=None,
                        modifier=None,
                        **kwargs):
    alpha = float(backbone.split('_')[1])

    # choose default input
    if inputs is None:
        inputs = keras.layers.Input((None, None, 3))

    mobilenet = MobileNet(input_tensor=inputs,
                          alpha=alpha,
                          include_top=False,
                          pooling=None,
                          weights=None)

    # create the full model
    layer_names = ['conv_pw_5_relu', 'conv_pw_11_relu', 'conv_pw_13_relu']
    layer_outputs = [mobilenet.get_layer(name).output for name in layer_names]
    mobilenet = keras.models.Model(inputs=inputs,
                                   outputs=layer_outputs,
                                   name=mobilenet.name)

    # invoke modifier if given
    if modifier:
        mobilenet = modifier(mobilenet)

    return retinanet.retinanet(inputs=inputs,
                               num_classes=num_classes,
                               backbone_layers=mobilenet.outputs,
                               **kwargs)
コード例 #12
0
def LoadFeatureModel():
    print('LoadFeatureModel ###############################')
    base_model = MobileNet(weights='imagenet')
    model = Model(inputs=base_model.input,
                  outputs=base_model.get_layer(layer).output)
    model.summary()
    return model
コード例 #13
0
    def __init__(self, weights=None):
        """Either load pretrained from imagenet, or load our saved
        weights from our own training."""

        self.weights = weights  # so we can check elsewhere which model

        if weights is None:
            # Get model with pretrained weights.
            base_model = MobileNet(weights='imagenet',
                                   include_top=False,
                                   pooling='avg')

            # We'll extract features at the final pool layer.
            self.model = Model(inputs=base_model.input,
                               outputs=base_model.get_layer(
                                   'global_average_pooling2d_1').output)

        else:
            # Load the model first.
            self.model = load_model(weights)

            # Then remove the top so we get features not predictions.
            # From: https://github.com/fchollet/keras/issues/2371
            self.model.layers.pop()
            self.model.layers.pop()  # two pops to get to pool layer
            self.model.outputs = [self.model.layers[-1].output]
            self.model.output_layers = [self.model.layers[-1]]
            self.model.layers[-1].outbound_nodes = []
コード例 #14
0
def save_model10(new_model_path, conv_model_path):
	model = MobileNet(
		input_shape=(img_width, img_height, 3),
		include_top=False,
		weights=None
	)
	if pretrained:
		model = MobileNet(
			input_shape=(img_width, img_height, 3),
			include_top=False,
			weights='imagenet'
		)
	model.summary()
	transfer_layer = model.get_layer('conv_pw_13_relu')
	conv_model = Model(inputs=model.input,
					   outputs=transfer_layer.output)
	new_model = Sequential()
	new_model.add(conv_model)
	new_model.add(GlobalAveragePooling2D())
	if num_fc_layers>=1:
		new_model.add(Dense(num_fc_neurons, activation='relu'))
	if num_fc_layers>=2:
		new_model.add(Dropout(dropout))
		new_model.add(Dense(num_fc_neurons, activation='relu'))
	if num_fc_layers>=3:
		new_model.add(Dropout(dropout))
		new_model.add(Dense(num_fc_neurons, activation='relu'))
	new_model.add(Dense(num_classes, activation='softmax'))

	print(new_model.summary())

	new_model.save(new_model_path)
	conv_model.save(conv_model_path)
	return
コード例 #15
0
def cnn_model_initialization(alpha, weights, num_class, cnn_input_size=224):
    # this function is to be invoked once at the start of the pipeline
    '''
    # check and set backend
    tf = 'tensorflow'
    if K.backend() != tf:
        os.environ['KERAS_BACKEND'] = tf
        importlib.reload(K)
        assert K.backend() == tf
        print('{} backend is sucessfully set'.format(K.backend()))
    elif K.backend() == tf:
        print('{} backend has already been set'.format(K.backend()))
    '''
    # Setup the model
    time_load_model_start = timer()
    size=cnn_input_size
    # load the base model with top layer, since your weights are trained using a topped model
    base_model = MobileNet(input_shape=(size,size,3), 
                      alpha=alpha, 
                      depth_multiplier=1, 
                      dropout=1e-3, 
                      include_top=True, 
                      weights=weights, 
                      input_tensor=None, 
                      pooling=None, 
                      classes=num_class)
    # define a new model whose output is the reshape layer of the base model
    model = Model(inputs=base_model.input, outputs=base_model.get_layer('reshape_1').output)
    #model.summary()
    time_load_model_end = timer()
    time_load_model = str(round(time_load_model_end-time_load_model_start,4))
    print('Deep CNN model Mobilenet is loaded, time taken: {} seconds'.format(time_load_model))
    return model
コード例 #16
0
    def get_mobilenet_feature_extractor(self):
        '''
		Returns the mobilenet feature extractor
		'''
        mobilenet = MobileNet()
        return Model(
            inputs=mobilenet.inputs,
            output=mobilenet.get_layer("global_average_pooling2d_1").output)
コード例 #17
0
 def __init__(self):
     base_model = MobileNet(weights='imagenet')
     self.model = Model(inputs=base_model.input,
                        outputs=[
                            base_model.get_layer('conv_preds').output,
                            base_model.output
                        ])
     self.graph = tf.get_default_graph()
コード例 #18
0
 def __init__(self):
     # tf.reset_default_graph()
     self.graph1 = tf.Graph()
     base_model = MobileNet(weights='imagenet')
     self.model1 = Model(inputs=base_model.input,
                         outputs=base_model.get_layer('conv_preds').output)
     # self.base_model_class = load_model('class_model.h5')
     # self.graph1 = tf.Graph()
     self.graph = tf.get_default_graph()
コード例 #19
0
def mobile_body(inputs, num_anchors, num_classes):
    mobilenet = MobileNet(input_tensor=inputs, weights='imagenet')
    f1 = mobilenet.get_layer('conv_pw_13_relu').output
    x, y1 = make_last_layers(f1, 512, num_anchors * (num_classes + 5))
    x = compose(
        DarknetConv2D_BN_Leaky(256, (1, 1)),
        UpSampling2D(2))(x)
    f2 = mobilenet.get_layer('conv_pw_11_relu').output
    x = Concatenate()([x, f2])
    x, y2 = make_last_layers(x, 256, num_anchors * (num_classes + 5))

    x = compose(
        DarknetConv2D_BN_Leaky(128, (1, 1)),
        UpSampling2D(2))(x)
    f3 = mobilenet.get_layer('conv_pw_5_relu').output
    x = Concatenate()([x, f3])
    x, y3 = make_last_layers(x, 128, num_anchors * (num_classes + 5))

    return Model(input=inputs, outputs=[y1, y2, y3])
コード例 #20
0
ファイル: model.py プロジェクト: heylfda/EyeCatch
def get_MobileNet():
    mobilenet_model = MobileNet(weights='imagenet')

    x = mobilenet_model.get_layer('conv_preds').output
    x = Flatten()(x)
    predictions = Dense(2, activation=None)(x)

    model = Model(input=mobilenet_model.input, output=predictions)

    # for layer in resnet_model.layers:
    #     layer.trainable = False
    return model
コード例 #21
0
    def __init__(self, gpu_id=5):
        os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)

        num_class = 12
        BATCH_SIZE = 32
        k = 10

        fgc_base = MobileNet(input_shape=(224, 224, 3),
                             include_top=False,
                             weights=None,
                             alpha=1.)
        fgc_base.trainable = True
        # fgc_base.summary()
        feature2 = fgc_base.get_layer("conv_pw_11_relu").output
        fc_model = Model(fgc_base.inputs[0], [fgc_base.output, feature2])

        # fc_model.summary()

        input_tensor = Input(shape=(224, 224, 3))
        input_tensor_bn = BatchNormalization()(input_tensor)
        features = fc_model(input_tensor_bn)
        fc_obj = GlobalMaxPool2D()(features[0])
        fc_obj = Dropout(0.7)(fc_obj)
        fc_obj = Dense(12, activation="softmax")(fc_obj)

        fc_part = Conv2D(filters=num_class * k,
                         kernel_size=(1, 1),
                         activation="relu")(features[1])
        fc_part = GlobalMaxPool2D()(fc_part)
        fc_part = Dropout(0.5)(fc_part)
        fc_ccp = Lambda(lambda tmp: tf.expand_dims(tmp, axis=-1))(fc_part)
        fc_ccp = AvgPool1D(pool_size=k)(fc_ccp)
        fc_ccp = Lambda(lambda tmp: tf.squeeze(tmp, [-1]))(fc_ccp)
        fc_ccp = Activation(activation="softmax")(fc_ccp)
        fc_part = Dense(12, activation="softmax")(fc_part)
        output = Concatenate(axis=-1)([fc_obj, fc_part, fc_ccp])

        self.dfb_cnn = Model(input_tensor, output)

        lr = 0.001
        clip_value = 0.01
        self.dfb_cnn.compile(optimizer=SGD(lr=lr,
                                           momentum=0.9,
                                           decay=1e-5,
                                           nesterov=True,
                                           clipvalue=clip_value),
                             loss=ctm_loss,
                             metrics=[ctm_acc1, ctm_acck])
        path_prefix = "./datasets/model/escale/focal_loss_2_0.25/"
        # path_prefix = "./datasets/focal_loss_2_0.25/"
        self.dfb_cnn.load_weights(filepath=path_prefix + "weights.h5",
                                  skip_mismatch=True)  ######
コード例 #22
0
ファイル: image.py プロジェクト: asgundogdu/multimodalrec
    def __init__(self, weights=None, load=False):
        """Either load pretrained from imagenet, or load our saved
        weights from own training."""
        from keras.applications.mobilenet import MobileNet, preprocess_input
        self.weights = weights  # so we can check elsewhere which model

        if weights is None and not load:
            # Get model with pretrained weights.
            #base_model = mobilenet.MobileNet(weights='imagenet')
            base_model = MobileNet(include_top=False, weights='imagenet', input_tensor=Input(shape=(224,224,3)), input_shape=(224,224,3))
            model = Model(inputs=base_model.input, outputs=base_model.get_layer('conv_pw_13_relu').output)
            # We'll extract features at the final pool layer.
            self.model = model
コード例 #23
0
ファイル: train.py プロジェクト: ash-kmr/pedestriananalysis
def pretrained_small():
    model = MobileNet(input_shape=(128, 128, 3),
                      alpha=1.0,
                      depth_multiplier=1,
                      dropout=1e-3,
                      include_top=False,
                      weights='imagenet',
                      input_tensor=None,
                      pooling=None,
                      classes=7)
    layer_name = 'conv_pw_3'
    intermodel = Model(inputs=model.input,
                       outputs=model.get_layer(layer_name).output)
    return intermodel
コード例 #24
0
ファイル: baseline_mobile.py プロジェクト: abcp4/test
def make_model(output=None):
    #network configuration
    base_model = MobileNet(weights='imagenet', include_top=False)
    if output == 'final':
        return model_description, [[model_name, base_model]]

    models = []
    for layer in range(1, 6):
        layer_name = 'block' + str(layer) + '_pool'
        x = base_model.get_layer(layer_name).output
        x = GlobalAveragePooling2D()(x)
        m = Model(inputs=base_model.input, outputs=x)
        models.append(['%s_%s' % (model_name, layer_name), m])

    return model_description, models
コード例 #25
0
def mobile_net(dim1=32, dim2=32, output_features=3):
    tf.reset_default_graph()
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    set_session(tf.Session(config=config))
    from keras.applications.mobilenet import MobileNet
    from keras.models import Model
    mobi = MobileNet(input_shape=(96, 96, 2), weights=None)
    pre_output = mobi.get_layer("dropout").output
    pre_output = Flatten()(pre_output)
    pre_output = keras.layers.Dense(1000)(pre_output)
    pre_output = keras.layers.Dense(output_features)(pre_output)
    pre_output = PowerWhitening(output_dim=output_features,
                                n_iterations=50)(pre_output)
    net = Model(input=mobi.input, output=pre_output)
    net.compile(loss=generalized_sfa_loss, optimizer='nadam')
    return net
コード例 #26
0
def mobilenet_retinanet(num_classes, backbone='mobilenet224_1.0', inputs=None, **kwargs):
    alpha = float(backbone.split('_')[1])

    # choose default input
    if inputs is None:
        inputs = keras.layers.Input((None, None, 3))

    mobilenet = MobileNet(input_tensor=inputs, alpha=alpha, include_top=False, pooling=None, weights=None)

    # get last layer from each depthwise convolution blocks 3, 5, 11 and 13
    outputs = [mobilenet.get_layer(name='conv_pw_{}_relu'.format(i)).output for i in [3, 5, 11, 13]]

    # create the mobilenet backbone
    mobilenet = keras.models.Model(inputs=inputs, outputs=outputs, name=mobilenet.name)

    # create the full model
    model = retinanet.retinanet_bbox(inputs=inputs, num_classes=num_classes, backbone=mobilenet, **kwargs)

    return model
コード例 #27
0
def mobilenet_retinanet(num_classes,
                        backbone='mobilenet224_1.0',
                        inputs=None,
                        modifier=None,
                        **kwargs):
    """ Constructs a retinanet model using a mobilenet backbone.

    Args
        num_classes: Number of classes to predict.
        backbone: Which backbone to use (one of ('mobilenet128', 'mobilenet160', 'mobilenet192', 'mobilenet224')).
        inputs: The inputs to the network (defaults to a Tensor of shape (None, None, 3)).
        modifier: A function handler which can modify the backbone before using it in retinanet (this can be used to freeze backbone layers for example).

    Returns
        RetinaNet model with a MobileNet backbone.
    """
    alpha = float(backbone.split('_')[1])

    # choose default input
    if inputs is None:
        inputs = keras.layers.Input((None, None, 3))

    mobilenet = MobileNet(input_tensor=inputs,
                          alpha=alpha,
                          include_top=False,
                          pooling=None,
                          weights=None)

    # create the full model
    layer_names = ['conv_pw_5_relu', 'conv_pw_11_relu', 'conv_pw_13_relu']
    layer_outputs = [mobilenet.get_layer(name).output for name in layer_names]
    mobilenet = keras.models.Model(inputs=inputs,
                                   outputs=layer_outputs,
                                   name=mobilenet.name)

    # invoke modifier if given
    if modifier:
        mobilenet = modifier(mobilenet)

    return retinanet.retinanet(inputs=inputs,
                               num_classes=num_classes,
                               backbone_layers=mobilenet.outputs,
                               **kwargs)
コード例 #28
0
    def __init__(
        self,
        alpha=1.0,
    ):
        inputs = keras.layers.Input((None, None, 3))
        mnet = MobileNet(input_tensor=inputs,
                         alpha=alpha,
                         include_top=False,
                         pooling=None,
                         weights=None)
        backbone_layer_names = [
            'conv_pw_5_relu', 'conv_pw_11_relu', 'conv_pw_13_relu'
        ]
        backbone_outputs = [
            mnet.get_layer(name).output for name in backbone_layer_names
        ]

        self.backbone_model = Model(inputs=inputs,
                                    outputs=backbone_outputs,
                                    name=mnet.name)

        super(MobilenetBackbone, self).__init__()
コード例 #29
0
def get_model(weights='imagenet'):
    # create the base pre-trained model
    base_model = MobileNet(input_shape=(224, 224, 3),
                           weights=weights,
                           pooling='avg',
                           include_top=False,
                           alpha=0.5)

    # add a global spatial average pooling layer
    x = base_model.get_layer('conv_pw_11_relu').output
    #x = base_model.output
    x = GlobalAveragePooling2D()(x)
    # let's add a fully-connected layer
    #x = Dense(128, activation='relu')(x)
    x = Dropout(0.2)(x)
    # and a logistic layer
    predictions = Dense(len(data.classes), activation='softmax')(x)

    # this is the model we will train
    model = Model(inputs=base_model.input, outputs=predictions)
    model.summary()
    return model
コード例 #30
0
ファイル: mobilenet.py プロジェクト: AlexBlack2202/ImageAI
def mobilenet_retinanet(num_classes, backbone='mobilenet224_1.0', inputs=None, modifier=None, **kwargs):
    alpha = float(backbone.split('_')[1])

    # choose default input
    if inputs is None:
        inputs = keras.layers.Input((None, None, 3))

    mobilenet = MobileNet(input_tensor=inputs, alpha=alpha, include_top=False, pooling=None, weights=None)

    # get last layer from each depthwise convolution blocks 3, 5, 11 and 13
    outputs = [mobilenet.get_layer(name='conv_pw_{}_relu'.format(i)).output for i in [3, 5, 11, 13]]

    # create the mobilenet backbone
    mobilenet = keras.models.Model(inputs=inputs, outputs=outputs, name=mobilenet.name)

    # invoke modifier if given
    if modifier:
        mobilenet = modifier(mobilenet)

    # create the full model
    model = retinanet.retinanet_bbox(inputs=inputs, num_classes=num_classes, backbone=mobilenet, **kwargs)

    return model
コード例 #31
0
ファイル: model.py プロジェクト: iambhuvi/Keypoint-Detection
    def getModel(self):
        mobilenet = MobileNet(input_shape=(224, 224, 3),
                              include_top=False,
                              weights='imagenet')  #using mobilenet as backbone

        last_layer = mobilenet.get_layer(index=-1).output  #get the last layer
        for layer in mobilenet.layers:
            layer.trainable = True  # set all mobilenet layers trainable to True

        c1 = Conv2D(filters=1024, kernel_size=(3, 3))(last_layer)
        l1 = LeakyReLU(alpha=0.3)(c1)
        d1 = Dropout(0.3)(l1)

        c2 = Conv2D(filters=512, kernel_size=(3, 3))(d1)
        l2 = LeakyReLU(alpha=0.6)(c2)
        d2 = Dropout(0.5)(l2)

        f1 = Flatten()(d2)

        dense1 = Dense(24)(f1)
        o = LeakyReLU(alpha=0.3)(dense1)

        return Model(inputs=mobilenet.input, outputs=o)
コード例 #32
0
ファイル: test.py プロジェクト: eong2012/flower-recognition
	image_size = (224, 224)
elif model_name == "resnet50":
	base_model = ResNet50(weights=weights)
	model = Model(input=base_model.input, output=base_model.get_layer('flatten').output)
	image_size = (224, 224)
elif model_name == "inceptionv3":
	base_model = InceptionV3(include_top=include_top, weights=weights, input_tensor=Input(shape=(299,299,3)))
	model = Model(input=base_model.input, output=base_model.get_layer('custom').output)
	image_size = (299, 299)
elif model_name == "inceptionresnetv2":
	base_model = InceptionResNetV2(include_top=include_top, weights=weights, input_tensor=Input(shape=(299,299,3)))
	model = Model(input=base_model.input, output=base_model.get_layer('custom').output)
	image_size = (299, 299)
elif model_name == "mobilenet":
	base_model = MobileNet(include_top=include_top, weights=weights, input_tensor=Input(shape=(224,224,3)), input_shape=(224,224,3))
	model = Model(input=base_model.input, output=base_model.get_layer('custom').output)
	image_size = (224, 224)
elif model_name == "xception":
	base_model = Xception(weights=weights)
	model = Model(input=base_model.input, output=base_model.get_layer('avg_pool').output)
	image_size = (299, 299)
else:
	base_model = None

# get all the train labels
train_labels = os.listdir(train_path)

# get all the test images paths
test_images = os.listdir(test_path)

# loop through each image in the test data
コード例 #33
0
"""
#以mobilenet为例,finetune的过程
import tensorflow as tf
from keras.optimizers import SGD
from keras.callbacks import ModelCheckpoint, TensorBoard
from keras.applications.mobilenet import MobileNet
from keras.layers import Input, Reshape, AvgPool2D, Dropout, \
    Conv2D, BatchNormalization, Activation
from keras.models import Model

#加载预训练权重,输入大小可以设定,include_top表示是否包括顶层的全连接层
base_model = MobileNet(input_shape= (128, 128, 3), include_top = False)

#添加新层,get_layer方法可以根据层名返回该层,output用于返回该层的输出张量tensor
with tf.name_scope('output'):
    x = base_model.get_layer('conv_dw6_relu').output
    x = Conv2D(256, kernel_size=(3,3))(x)
    x = Activation('relu')(x)
    x = AvgPool2D(pool_size = (5,5))(x)
    x = Dropout(rate = 0.5)(x)
    x = Conv2D(10, kernel_size = (1,1))(x)
    predictions =  Reshape((10,))(x)


#finetune模型
model = Model(inputs= base_model.input, outputs= predictions)

#------------------------------训练新层-------------------------------------
#冻结原始层位,在编译后生效
for layer in base_model.layers:
    layer.trainable = False
コード例 #34
0
  image_size = (224, 224)
elif model_name == "resnet50":
  base_model = ResNet50(weights=weights)
  model = Model(input=base_model.input, output=base_model.get_layer('flatten').output)
  image_size = (224, 224)
elif model_name == "inceptionv3":
  base_model = InceptionV3(include_top=include_top, weights=weights, input_tensor=Input(shape=(299,299,3)))
  model = Model(input=base_model.input, output=base_model.get_layer('batch_normalization_1').output)
  image_size = (299, 299)
elif model_name == "inceptionresnetv2":
  base_model = InceptionResNetV2(include_top=include_top, weights=weights, input_tensor=Input(shape=(299,299,3)))
  model = Model(input=base_model.input, output=base_model.get_layer('batch_normalization_1').output)
  image_size = (299, 299)
elif model_name == "mobilenet":
  base_model = MobileNet(include_top=include_top, weights=weights, input_tensor=Input(shape=(224,224,3)), input_shape=(224,224,3))
  model = Model(input=base_model.input, output=base_model.get_layer('batch_normalization_1').output)
  image_size = (224, 224)
elif model_name == "xception":
  base_model = Xception(weights=weights)
  model = Model(input=base_model.input, output=base_model.get_layer('avg_pool').output)
  image_size = (299, 299)
else:
  base_model = None

print ("[INFO] successfully loaded base model and model...")

# path to training dataset
train_labels = os.listdir(train_path)

# encode the labels
print ("[INFO] encoding labels...")