def tiny_yolo4_mobilenet_body(inputs,
                              num_anchors,
                              num_classes,
                              alpha=1.0,
                              use_spp=True):
    '''Create Tiny YOLO_v4 MobileNet model CNN body in keras.'''
    mobilenet = MobileNet(input_tensor=inputs,
                          weights='imagenet',
                          include_top=False,
                          alpha=alpha)
    print('backbone layers number: {}'.format(len(mobilenet.layers)))

    # input: 416 x 416 x 3
    # conv_pw_13_relu :13 x 13 x (1024*alpha)
    # conv_pw_11_relu :26 x 26 x (512*alpha)
    # conv_pw_5_relu : 52 x 52 x (256*alpha)

    # f1 :13 x 13 x (1024*alpha) for 416 input
    f1 = mobilenet.get_layer('conv_pw_13_relu').output
    # f2: 26 x 26 x (512*alpha) for 416 input
    f2 = mobilenet.get_layer('conv_pw_11_relu').output

    f1_channel_num = int(1024 * alpha)
    f2_channel_num = int(512 * alpha)

    y1, y2 = tiny_yolo4_predictions((f1, f2), (f1_channel_num, f2_channel_num),
                                    num_anchors, num_classes, use_spp)

    return Model(inputs, [y1, y2])
def yolo4_mobilenet_body(inputs, num_anchors, num_classes, alpha=1.0):
    """Create YOLO_V4 MobileNet model CNN body in Keras."""
    mobilenet = MobileNet(input_tensor=inputs,
                          weights='imagenet',
                          include_top=False,
                          alpha=alpha)
    print('backbone layers number: {}'.format(len(mobilenet.layers)))

    # input: 416 x 416 x 3
    # conv_pw_13_relu :13 x 13 x (1024*alpha)
    # conv_pw_11_relu :26 x 26 x (512*alpha)
    # conv_pw_5_relu : 52 x 52 x (256*alpha)

    # f1: 13 x 13 x (1024*alpha) for 416 input
    f1 = mobilenet.get_layer('conv_pw_13_relu').output
    # f2: 26 x 26 x (512*alpha) for 416 input
    f2 = mobilenet.get_layer('conv_pw_11_relu').output
    # f3: 52 x 52 x (256*alpha) for 416 input
    f3 = mobilenet.get_layer('conv_pw_5_relu').output

    f1_channel_num = int(1024 * alpha)
    f2_channel_num = int(512 * alpha)
    f3_channel_num = int(256 * alpha)

    y1, y2, y3 = yolo4_predictions(
        (f1, f2, f3), (f1_channel_num, f2_channel_num, f3_channel_num),
        num_anchors, num_classes)

    return Model(inputs, [y1, y2, y3])
def yolo5lite_mobilenet_body(inputs, num_anchors, num_classes, alpha=1.0):
    """Create YOLO_V5 Lite MobileNet model CNN body in Keras."""
    mobilenet = MobileNet(input_tensor=inputs, weights='imagenet', include_top=False, alpha=alpha)
    print('backbone layers number: {}'.format(len(mobilenet.layers)))

    # input: 416 x 416 x 3
    # conv_pw_13_relu :13 x 13 x (1024*alpha)
    # conv_pw_11_relu :26 x 26 x (512*alpha)
    # conv_pw_5_relu : 52 x 52 x (256*alpha)

    # f1: 13 x 13 x (1024*alpha) for 416 input
    f1 = mobilenet.get_layer('conv_pw_13_relu').output
    # f2: 26 x 26 x (512*alpha) for 416 input
    f2 = mobilenet.get_layer('conv_pw_11_relu').output
    # f3: 52 x 52 x (256*alpha) for 416 input
    f3 = mobilenet.get_layer('conv_pw_5_relu').output

    # add SPP neck with original channel number
    f1 = yolo5_spp_neck(f1, int(1024*alpha))

    # use yolo5_small depth_multiple and width_multiple for head
    depth_multiple = 0.33
    width_multiple = 0.5

    f1_channel_num = int(1024*width_multiple)
    f2_channel_num = int(512*width_multiple)
    f3_channel_num = int(256*width_multiple)

    y1, y2, y3 = yolo5lite_predictions((f1, f2, f3), (f1_channel_num, f2_channel_num, f3_channel_num), num_anchors, num_classes, depth_multiple, width_multiple, with_spp=False)

    return Model(inputs, [y1, y2, y3])
def tiny_yolo3lite_mobilenet_body(inputs, num_anchors, num_classes, alpha=1.0):
    '''Create Tiny YOLO_v3 Lite MobileNet model CNN body in keras.'''
    mobilenet = MobileNet(input_tensor=inputs,
                          weights='imagenet',
                          include_top=False,
                          alpha=alpha)

    # input: 416 x 416 x 3
    # conv_pw_13_relu :13 x 13 x (1024*alpha)
    # conv_pw_11_relu :26 x 26 x (512*alpha)
    # conv_pw_5_relu : 52 x 52 x (256*alpha)

    x1 = mobilenet.get_layer('conv_pw_11_relu').output

    x2 = mobilenet.get_layer('conv_pw_13_relu').output
    x2 = DarknetConv2D_BN_Leaky(int(512 * alpha), (1, 1))(x2)

    y1 = compose(
        #DarknetConv2D_BN_Leaky(int(1024*alpha), (3,3)),
        Depthwise_Separable_Conv2D_BN_Leaky(filters=int(1024 * alpha),
                                            kernel_size=(3, 3),
                                            block_id_str='14'),
        DarknetConv2D(num_anchors * (num_classes + 5), (1, 1)))(x2)

    x2 = compose(DarknetConv2D_BN_Leaky(int(256 * alpha), (1, 1)),
                 UpSampling2D(2))(x2)
    y2 = compose(
        Concatenate(),
        #DarknetConv2D_BN_Leaky(int(512*alpha), (3,3)),
        Depthwise_Separable_Conv2D_BN_Leaky(filters=int(512 * alpha),
                                            kernel_size=(3, 3),
                                            block_id_str='15'),
        DarknetConv2D(num_anchors * (num_classes + 5), (1, 1)))([x2, x1])

    return Model(inputs, [y1, y2])
def deep_encoder_model():
    base_model = MobileNet(include_top=True, weights='imagenet')
    model = Model(
        inputs=base_model.input,
        outputs=base_model.get_layer('conv_pw_13').output)  ##hamper with this
    print(model.summary())
    return model
Beispiel #6
0
 def set_feature_extractor(self, name = 'mobilenet', summary = False):
     if name == 'mobilenet':
         self.feature_extractor = MobileNet(input_shape = (self.img_size, self.img_size, 3), include_top=True,weights ='imagenet')
         output = self.feature_extractor.layers[-6].output
         self.feature_extractor = tf.keras.Model(self.feature_extractor.inputs, output)
     if summary:
         self.feature_extractor.summary()
def yolo3lite_spp_mobilenet_body(inputs, num_anchors, num_classes, alpha=1.0):
    '''Create YOLO_v3 Lite SPP MobileNet model CNN body in keras.'''
    mobilenet = MobileNet(input_tensor=inputs,
                          weights='imagenet',
                          include_top=False,
                          alpha=alpha)

    # input: 416 x 416 x 3
    # conv_pw_13_relu :13 x 13 x (1024*alpha)
    # conv_pw_11_relu :26 x 26 x (512*alpha)
    # conv_pw_5_relu : 52 x 52 x (256*alpha)

    # f1: 13 x 13 x (1024*alpha)
    f1 = mobilenet.get_layer('conv_pw_13_relu').output
    # f2: 26 x 26 x (512*alpha)
    f2 = mobilenet.get_layer('conv_pw_11_relu').output
    # f3: 52 x 52 x (256*alpha)
    f3 = mobilenet.get_layer('conv_pw_5_relu').output

    f1_channel_num = int(1024 * alpha)
    f2_channel_num = int(512 * alpha)
    f3_channel_num = int(256 * alpha)

    y1, y2, y3 = yolo3lite_predictions(
        (f1, f2, f3), (f1_channel_num, f2_channel_num, f3_channel_num),
        num_anchors,
        num_classes,
        use_spp=True)

    return Model(inputs=inputs, outputs=[y1, y2, y3])
Beispiel #8
0
def tiny_yolo4lite_mobilenet_body(inputs,
                                  num_anchors,
                                  num_classes,
                                  alpha=1.0,
                                  use_spp=True):
    '''Create Tiny YOLO_v3 Lite MobileNet model CNN body in keras.'''
    mobilenet = MobileNet(input_tensor=inputs,
                          weights='imagenet',
                          include_top=False,
                          alpha=alpha)

    # input: 416 x 416 x 3
    # conv_pw_13_relu :13 x 13 x (1024*alpha)
    # conv_pw_11_relu :26 x 26 x (512*alpha)
    # conv_pw_5_relu : 52 x 52 x (256*alpha)

    # f1 :13 x 13 x (1024*alpha) for 416 input
    f1 = mobilenet.get_layer('conv_pw_13_relu').output
    # f2: 26 x 26 x (512*alpha) for 416 input
    f2 = mobilenet.get_layer('conv_pw_11_relu').output

    #feature map 1 head (13 x 13 x (512*alpha) for 416 input)
    x1 = DarknetConv2D_BN_Leaky(int(512 * alpha), (1, 1))(f1)
    if use_spp:
        x1 = Spp_Conv2D_BN_Leaky(x1, int(512 * alpha))

    #upsample fpn merge for feature map 1 & 2
    x1_upsample = compose(DarknetConv2D_BN_Leaky(int(256 * alpha), (1, 1)),
                          UpSampling2D(2))(x1)
    x2 = compose(
        Concatenate(),
        #DarknetConv2D_BN_Leaky(int(512*alpha), (3,3)),
        Depthwise_Separable_Conv2D_BN_Leaky(filters=int(512 * alpha),
                                            kernel_size=(3, 3),
                                            block_id_str='15'))(
                                                [x1_upsample, f2])

    #feature map 2 output (26 x 26 x (512*alpha) for 416 input)
    y2 = DarknetConv2D(num_anchors * (num_classes + 5), (1, 1))(x2)

    #downsample fpn merge for feature map 2 & 1
    x2_downsample = compose(
        ZeroPadding2D(((1, 0), (1, 0))),
        #DarknetConv2D_BN_Leaky(int(512*alpha), (3,3), strides=(2,2)),
        Darknet_Depthwise_Separable_Conv2D_BN_Leaky(int(512 * alpha), (3, 3),
                                                    strides=(2, 2),
                                                    block_id_str='16'))(x2)
    x1 = compose(
        Concatenate(),
        #DarknetConv2D_BN_Leaky(int(1024*alpha), (3,3)),
        Depthwise_Separable_Conv2D_BN_Leaky(filters=int(1024 * alpha),
                                            kernel_size=(3, 3),
                                            block_id_str='17'))(
                                                [x2_downsample, x1])

    #feature map 1 output (13 x 13 x (1024*alpha) for 416 input)
    y1 = DarknetConv2D(num_anchors * (num_classes + 5), (1, 1))(x1)

    return Model(inputs, [y1, y2])
Beispiel #9
0
    def __init__(self, input_size, backend_path):
        input_image = Input(shape=(input_size, input_size, 3))

        mobilenet = MobileNet(input_shape=(224, 224, 3), include_top=False)
        mobilenet.load_weights(MOBILENET_BACKEND_PATH)

        x = mobilenet(input_image)

        self.feature_extractor = Model(input_image, x)
Beispiel #10
0
def classify(arg):
    #'./data/people',
    data,sample = arg.split('/')
    path = os.path.join(data,sample)
    print(path)
    dirs = os.listdir(path)
    num_test=4
    classified_list={}
    model = MobileNet(weights='imagenet')
    if os.path.exists(os.path.join('./cloth_label.txt')):
        f = open('./cloth_label.txt','r')
        categories = f.read().split()
        f.close()
    else :
        return False
    print(categories)
    for category in categories:
        classified_list[category]=0

    for dir in dirs :
        dir_path = os.path.join(data,sample,dir)
        files = os.listdir(dir_path)
        for file in files:
            name,extension = os.path.splitext(os.path.join(dir_path,file))
            if extension=='.jpg' or extension=='.png' or extension=='.jpeg':     
                src = os.path.join(dir_path,file)
                print("src : "+src)
                img = image.load_img(src,target_size=(224,224))
                x = image.img_to_array(img)
                x = np.expand_dims(x,axis=0)
                x=preprocess_input(x)
                preds = model.predict(x)
                labels = decode_predictions(preds,top=num_test)[0]
                flag =False
                
                for label in labels:
                    label = list(label)
                    #print(classified_list.get(label[1]))
                    if classified_list.get(label[1])!=None:
                        """ save_dir='./cloth_and_people'
                        shutil.move(src,dst) """
                        """ if not(os.path.isdir(os.path.join(save_dir))):
                            os.makedirs(os.path.join(save_dir))
                        dst = os.path.join(save_dir,file) """
                        flag=True
                        break
                if not(flag):
                    label = 'etc'
                    new_filename = 'etc_'+file
                    print(new_filename)
                    os.rename(os.path.join(dir_path,file),os.path.join(dir_path,new_filename))
                    """ save_dir='./etc' """
                    """ if dir =='./data/cloth_and_people':
                        save_dir = 'etc_'+str(num_test)
                    else:
                        save_dir='etc_'+str(num_test)+'_etc' """
                    """ if not(os.path.exists(save_dir)):
Beispiel #11
0
    def __init__(self, name="resnet"):

        # 学習済みのVGG16をロード
        # 構造とともに学習済みの重みも読み込まれる
        if name == "mobilenet":
            self.model = MobileNet(weights='imagenet')
        elif name == "resnet":
            self.model = ResNet50(weights='imagenet')
        else:
            self.model = ResNet50(weights='imagenet')
        self.model.summary()
 def compute_net(self, features):
     """
     Compute MobileNet depending on mode
 """
     if self.mode == 'train_from_scratch' or self.mode == 'retrain':
         model_graph = MobileNet(weights=None, include_top=False)
     if self.mode == 'transfer':
         model_graph = MobileNet(include_top=False)
         for layer in model_graph.layers:
             layer.trainable = False
     layer = model_graph(features)
     layer = GlobalAveragePooling2D()(layer)
     return layer
    def __init__(self,
                 num_classes,
                 is_tesla_k80,
                 alpha=1,
                 dropout=1e-3,
                 pre_trained=True):
        self.is_teslaK80 = is_tesla_k80

        # input layer
        self.inputs = Input(shape=(224, 224, 3), name="input_spatial")
        # data normalization
        self.data_norm = BatchNormalization(3,
                                            name='data_norm',
                                            center=False,
                                            scale=False)

        # create the base pre-trained model
        self.mobile_net = MobileNet(
            weights='imagenet' if pre_trained else None, include_top=False)

        self.GlobalAveragePooling2D = GlobalAveragePooling2D()

        shape = (1, 1, int(1024 * alpha))
        self.Reshape_1 = Reshape(shape, name='reshape_1')
        self.Dropout = Dropout(dropout, name='dropout')
        self.Conv2D = Conv2D(num_classes, (1, 1),
                             padding='same',
                             name='conv_preds')
        self.Activation = Activation('softmax', name='act_softmax')
        self.Reshape_2 = Reshape((num_classes, ), name='reshape_2')
Beispiel #14
0
    def __init__(self, input_size):
        input_image = Input(shape=input_size)

        mobilenet = MobileNet(input_shape=input_size, include_top=False)
        if input_size[2] == 3:
            try:
                print("Loading pretrained weights: " + MOBILENET_BACKEND_PATH)
                mobilenet.load_weights(MOBILENET_BACKEND_PATH)
            except:
                print("Unable to load backend weights. Using a fresh model")
        else:
            print('pre trained weights are available just for RGB network.')

        x = mobilenet(input_image)

        self.feature_extractor = Model(input_image, x, name='MobileNet_backend')
Beispiel #15
0
def loadPretrainedWeights():
    pretrained_weights={}

    pretrained_weights['vgg16']=VGG16(weights='imagenet', include_top=False,pooling='avg')
    pretrained_weights['vgg19']=VGG19(weights='imagenet', include_top=False,pooling='avg')

    pretrained_weights['resnet50']=ResNet50(weights='imagenet', include_top=False,pooling='avg')

    pretrained_weights['inceptionv3']=InceptionV3(weights='imagenet', include_top=False,pooling='avg')
    pretrained_weights['inception-resentv2']=InceptionResNetV2(weights='imagenet', include_top=False,pooling='avg')


    pretrained_weights['xception']=Xception(weights='imagenet', include_top=False,pooling='avg')

    pretrained_weights['densenet121']=DenseNet121(weights='imagenet', include_top=False,pooling='avg')
    pretrained_weights['densenet169']=DenseNet169(weights='imagenet', include_top=False,pooling='avg')
    pretrained_weights['densenet201']=DenseNet201(weights='imagenet', include_top=False,pooling='avg')
    pretrained_weights['mobilenet']=MobileNet(weights='imagenet', include_top=False,pooling='avg')


  #N retrained_weights['nasnetlarge']=NASNetLarge(weights='imagenet', include_top=False,pooling='avg',input_shape = (224, 224, 3))
  #N pretrained_weights['nasnetmobile']=NASNetMobile(weights='imagenet', include_top=False,pooling='avg')



    
  #N  pretrained_weights['mobilenetV2']=MobileNetV2(weights='imagenet', include_top=False,pooling='avg')
    
    return pretrained_weights
Beispiel #16
0
def get_model(num_classes):
    input_tensor = Input(
        shape=(224, 224,
               3))  # this assumes K.image_data_format() == 'channels_last'

    # create the base pre-trained model
    base_model = MobileNet(input_tensor=input_tensor,
                           weights='imagenet',
                           include_top=False)

    # for layer in base_model.layers:
    #     layer.trainable = False

    x = base_model.output
    x = GlobalAveragePooling2D()(x)
    x = Dense(1024, activation='relu')(
        x
    )  # we add dense layers so that the model can learn more complex functions and classify for better results.
    x = Dense(1024, activation='relu')(x)  # dense layer 2
    x = Dense(512, activation='relu')(x)  # dense layer 3
    x = Dense(num_classes,
              activation='softmax')(x)  # final layer with softmax activation

    updatedModel = Model(base_model.input, x)

    return updatedModel
Beispiel #17
0
def mildnet_mobilenet():
    vgg_model = MobileNet(weights=None,
                          include_top=False,
                          input_shape=(224, 224, 3))
    intermediate_layer_outputs = get_layers_output_by_name(
        vgg_model, [
            "conv_dw_1_relu", "conv_dw_2_relu", "conv_dw_4_relu",
            "conv_dw_6_relu", "conv_dw_12_relu"
        ])
    convnet_output = GlobalAveragePooling2D()(vgg_model.output)
    for layer_name, output in intermediate_layer_outputs.items():
        output = GlobalAveragePooling2D()(output)
        convnet_output = concatenate([convnet_output, output])

    convnet_output = GlobalAveragePooling2D()(vgg_model.output)
    convnet_output = Dense(1024, activation='relu')(convnet_output)
    convnet_output = Dropout(0.5)(convnet_output)
    convnet_output = Dense(1024, activation='relu')(convnet_output)
    convnet_output = Lambda(lambda x: K.l2_normalize(x, axis=1))(
        convnet_output)

    first_input = Input(shape=(224, 224, 3))
    second_input = Input(shape=(224, 224, 3))

    final_model = tf.keras.models.Model(
        inputs=[first_input, second_input, vgg_model.input],
        outputs=convnet_output)

    return final_model
def create_model(trainable=True):
    model = model = MobileNet(input_shape=(IMAGE_WIDTH, IMAGE_HEIGHT, 3),
                              include_top=False,
                              alpha=ALPHA,
                              weights="imagenet")  # Load pre-trained mobilenet
    for layer in model.layers:
        layer.trainable = trainable

    # Add all the UNET layers here

    block1 = model.get_layer("conv_pw_1_relu").output
    block2 = model.get_layer("conv_pw_3_relu").output
    block3 = model.get_layer("conv_pw_5_relu").output
    block4 = model.get_layer("conv_pw_11_relu").output
    block5 = model.get_layer("conv_pw_13_relu").output

    x = Concatenate()([UpSampling2D()(block5), block4])
    x = Concatenate()([UpSampling2D()(x), block3])
    x = Concatenate()([UpSampling2D()(x), block2])
    x = Concatenate()([UpSampling2D()(x), block1])
    x = UpSampling2D()(x)

    x = Conv2D(1, kernel_size=1, activation="sigmoid")(x)
    x = Reshape((IMAGE_WIDTH, IMAGE_HEIGHT))(x)

    return Model(inputs=model.input, outputs=x)  #### Add your code here ####
def yolo2lite_mobilenet_body(inputs, num_anchors, num_classes, alpha=1.0):
    """Create YOLO_V2 Lite MobileNet model CNN body in Keras."""

    mobilenet = MobileNet(input_tensor=inputs,
                          weights='imagenet',
                          include_top=False,
                          alpha=alpha)

    # input: 416 x 416 x 3
    # mobilenet.output            : 13 x 13 x (1024*alpha)
    # conv_pw_11_relu(layers[73]) : 26 x 26 x (512*alpha)

    conv_head1 = compose(
        Depthwise_Separable_Conv2D_BN_Leaky(int(1024 * alpha), (3, 3),
                                            block_id_str='14'),
        Depthwise_Separable_Conv2D_BN_Leaky(int(1024 * alpha), (3, 3),
                                            block_id_str='15'))(
                                                mobilenet.output)

    # conv_pw_11_relu output shape: 26 x 26 x (512*alpha)
    conv_pw_11_relu = mobilenet.layers[73].output
    conv_head2 = DarknetConv2D_BN_Leaky(int(64 * alpha),
                                        (1, 1))(conv_pw_11_relu)
    # TODO: Allow Keras Lambda to use func arguments for output_shape?
    conv_head2_reshaped = Lambda(space_to_depth_x2,
                                 output_shape=space_to_depth_x2_output_shape,
                                 name='space_to_depth')(conv_head2)

    x = Concatenate()([conv_head2_reshaped, conv_head1])
    x = Depthwise_Separable_Conv2D_BN_Leaky(int(1024 * alpha), (3, 3),
                                            block_id_str='16')(x)
    x = DarknetConv2D(num_anchors * (num_classes + 5), (1, 1),
                      name='predict_conv')(x)
    return Model(inputs, x)
Beispiel #20
0
def create_model_mobnet():

    input_layer = Input(shape=(None, None, 3), name='input')
    x = input_layer

    mbnet = MobileNet(input_shape=(224, 224, 3), include_top=True)

    backbone = keras.models.clone_model(mbnet)
    for i, bblayer in enumerate(backbone.layers[1:74]):
        layer = bblayer.__class__.from_config(bblayer.get_config())
        layer.name = 'backbone_' + layer.name
        x = layer(x)

    x = end_block(x)

    model = Model(inputs=input_layer, outputs=x)

    backbone_layers = {
        'backbone_' + layer.name: layer
        for layer in backbone.layers
    }
    for layer in model.layers:
        if layer.name in backbone_layers:
            print('setting ' + layer.name)
            layer.set_weights(backbone_layers[layer.name].get_weights())

    return model
def base_net():

    base_model = MobileNet(include_top=False,
                           weights='imagenet',
                           input_shape=(300, 300, 3))

    # 4th block
    mobile_v1_conv4 = []
    for layer in base_model.layers[:74]:
        mobile_v1_conv4.append(layer)

    x = layers.Input(shape=[None, None, 3])
    out = x

    for layer in mobile_v1_conv4:
        out = layer(out)

    mobile_v1_conv4 = tf.keras.Model(x, out)

    # 7th block
    mobile_v1_conv7 = base_model.layers[75:]

    x = layers.Input(shape=[None, None, 512])
    out = x
    for layer in mobile_v1_conv7:
        out = layer(out)

    mobile_v1_conv7 = tf.keras.Model(x, out)

    return mobile_v1_conv4, mobile_v1_conv7
def get_base_model(model_name, weights_path, weight_decay=1e-4):
    """
        Define base model used in transfer learning.
    """
    if not weights_path:
        weights_path = 'imagenet'
    if model_name == 'VGG16':
        base_model = VGG16(weights=weights_path, include_top=False)
    elif model_name == 'VGG19':
        base_model = VGG19(weights=weights_path, include_top=False)
    elif model_name == 'ResNet50V1':
        base_model = awsdet.models.backbones.ResNet50(weights=None, include_top=False, weight_decay=weight_decay)
    elif model_name == 'ResNet50V2':
        base_model = awsdet.models.backbones.ResNet50V2(weights=None, include_top=False, weight_decay=weight_decay)
    elif model_name == 'Xception':
        base_model = Xception(weights=weights_path, include_top=False)
    elif model_name == 'InceptionV3':
        base_model = InceptionV3(weights=weights_path, include_top=False)
    elif model_name == 'InceptionResNetV2':
        base_model = InceptionResNetV2(weights=weights_path,
                                        include_top=False)
    elif model_name == 'MobileNet':
        base_model = MobileNet(weights=weights_path, include_top=False)
    else:
        raise ValueError(
            'Valid base model values are: "VGG16","VGG19","ResNet50V1", "ResNet50V2", "Xception", \
                            "InceptionV3","InceptionResNetV2","MobileNet".'
        )
    return base_model
def yolo3lite_spp_mobilenet_body(inputs, num_anchors, num_classes, alpha=1.0):
    '''Create YOLO_v3 Lite SPP MobileNet model CNN body in keras.'''
    mobilenet = MobileNet(input_tensor=inputs,
                          weights='imagenet',
                          include_top=False,
                          alpha=alpha)

    # input: 416 x 416 x 3
    # conv_pw_13_relu :13 x 13 x (1024*alpha)
    # conv_pw_11_relu :26 x 26 x (512*alpha)
    # conv_pw_5_relu : 52 x 52 x (256*alpha)

    f1 = mobilenet.get_layer('conv_pw_13_relu').output
    # f1 :13 x 13 x (1024*alpha)
    #x, y1 = make_depthwise_separable_last_layers(f1, int(512*alpha), num_anchors * (num_classes + 5), block_id_str='14')
    x, y1 = make_spp_depthwise_separable_last_layers(f1,
                                                     int(512 * alpha),
                                                     num_anchors *
                                                     (num_classes + 5),
                                                     block_id_str='14')

    x = compose(DarknetConv2D_BN_Leaky(int(256 * alpha), (1, 1)),
                UpSampling2D(2))(x)

    f2 = mobilenet.get_layer('conv_pw_11_relu').output
    # f2: 26 x 26 x (512*alpha)
    x = Concatenate()([x, f2])

    x, y2 = make_depthwise_separable_last_layers(x,
                                                 int(256 * alpha),
                                                 num_anchors *
                                                 (num_classes + 5),
                                                 block_id_str='15')

    x = compose(DarknetConv2D_BN_Leaky(int(128 * alpha), (1, 1)),
                UpSampling2D(2))(x)

    f3 = mobilenet.get_layer('conv_pw_5_relu').output
    # f3 : 52 x 52 x (256*alpha)
    x = Concatenate()([x, f3])
    x, y3 = make_depthwise_separable_last_layers(x,
                                                 int(128 * alpha),
                                                 num_anchors *
                                                 (num_classes + 5),
                                                 block_id_str='16')

    return Model(inputs=inputs, outputs=[y1, y2, y3])
Beispiel #24
0
def get_weights(save_dir: Path, model_name: str, dtype: str) -> str:
    """Download pre-trained imagenet weights for model.

    Args:
        save_dir: Path to where checkpoint must be downloaded.
        model_name: Type of image classification model, must be one of
        ("GoogleNet", "InceptionV1", "MobileNet", "MobileNetV2", "NASNetMobile", "DenseNet121",
         "ResNet50", "Xception", "InceptionV3") in all lower case.
        dtype: Data type of the network.

    Returns: Path to checkpoint file.

    """
    if isinstance(save_dir, str):
        save_dir = Path(save_dir)
    g = tf.Graph()
    with tf.Session(graph=g) as sess:
        keras_backend.set_floatx(dtype)
        keras_backend.set_session(sess)
        if model_name == "mobilenet":
            MobileNet(weights='imagenet')
            saver = tf.train.Saver()
        elif model_name == "mobilenetv2":
            MobileNetV2(weights='imagenet')
            saver = tf.train.Saver()
        elif model_name == "nasnetmobile":
            NASNetMobile(weights='imagenet')
            saver = tf.train.Saver()
        elif model_name == "densenet121":
            DenseNet121(weights='imagenet')
            saver = tf.train.Saver()
        elif model_name == "resnet50":
            ResNet50(weights='imagenet')
            saver = tf.train.Saver()
        elif model_name == "xception":
            Xception(weights='imagenet')
            saver = tf.train.Saver()
        elif model_name == "inceptionv3":
            InceptionV3(weights='imagenet')
            saver = tf.train.Saver()
        elif model_name in ("googleNet", "inceptionv1"):
            tar_file = get_file(
                fname='inceptionv1_tar.gz',
                origin=
                'http://download.tensorflow.org/models/inception_v1_2016_08_28.tar.gz'
            )
            tar_file_reader = tarfile.open(tar_file)
            tar_file_reader.extractall(save_dir)
            if dtype == 'float16':
                saver = convert_ckpt_to_fp16(
                    Path(save_dir, 'inception_v1.ckpt').as_posix())
            sess.run(tf.global_variables_initializer())
        else:
            raise ValueError("""Requested model type = %s not one of
            ["GoogleNet", "InceptionV1", "MobileNet", "MobileNetV2", "NASNetMobile", "DenseNet121",
            "ResNet50", "Xception", "InceptionV3"].""" % model_name)
        save_dir.mkdir(parents=True, exist_ok=True)
        return saver.save(sess,
                          Path(save_dir, f"{model_name}.ckpt").as_posix())
Beispiel #25
0
    def __init__(self):

        self.Model = MobileNet(input_shape=(224, 224, 3),
                               include_top=False,
                               pooling='avg')

        with open(
                'Models/parms.json', 'r'
        ) as f:  # load the parms used in training which contains cluster size, indexes, data_paths etc.
            self.parms_data = json.load(f)

        with open(self.parms_data["cluster_model"], 'rb') as f:
            self.cluster_model = pickle.load(f)

        self.training_imgs_dir = self.parms_data['training_data_path']

        self.knn_trees = self.load_knn_trees()
        self.knn_index_dicts = self.load_knn_index_dicts()
Beispiel #26
0
def make_model(name, input_shape, num_classes):
    if name == "VGG19":
        from tensorflow.keras.applications.vgg19 import VGG19
        base_model = VGG19(include_top=False,
                           input_shape=input_shape,
                           weights='imagenet',
                           layers=tf.keras.layers,
                           pooling="max")
    elif name == "ResNet50":
        from tensorflow.keras.applications.resnet50 import ResNet50
        base_model = ResNet50(include_top=False,
                              input_shape=input_shape,
                              weights='imagenet',
                              layers=tf.keras.layers,
                              pooling="avg")
    elif name == "VGG16":
        from tensorflow.keras.applications.vgg16 import VGG16
        base_model = VGG16(include_top=False,
                           input_shape=input_shape,
                           weights='imagenet',
                           layers=tf.keras.layers,
                           pooling="max")
    elif name == "InceptionV3":
        from tensorflow.keras.applications.inception_v3 import InceptionV3
        base_model = InceptionV3(include_top=False,
                                 input_shape=input_shape,
                                 weights='imagenet',
                                 layers=tf.keras.layers,
                                 pooling="max")
    elif name == "MobileNet":
        from tensorflow.keras.applications.mobilenet import MobileNet
        base_model = MobileNet(include_top=False,
                               input_shape=input_shape,
                               weights='imagenet',
                               layers=tf.keras.layers,
                               pooling="max")
    elif name == "VGG161":
        return pretrained_model1()
    elif name == "InceptionResNetV2":
        return pretrained_model2()
    elif name == "DenseNet121":
        return pretrained_model3()
    # model = Sequential()
    # model.add(base_model)
    # model.add(layers.Flatten())
    # model.add(layers.Dense(256, activation='relu', name="Dense1"))
    # model.add(layers.Dense(num_classes, activation='softmax', name="Dense2"))
    # print(model.summary())
    model = base_model.output
    # model=layers.Flatten()(model)
    model = layers.Dense(256, activation='relu', name="Dense1")(model)
    model = layers.Dense(num_classes, activation='softmax',
                         name="Dense2")(model)
    headmodel = Model(inputs=base_model.input, outputs=model)
    # print(headmodel.summary())
    # base_model.trainable = False
    return headmodel
Beispiel #27
0
 def build_base_model(self):
     base_network = MobileNet(input_shape=self.input_shape,
                              alpha=self.alpha,
                              weights=None,
                              include_top=False,
                              pooling='avg')
     op = Dense(128, activation='relu')(base_network.output)
     output = Lambda(lambda x: K.l2_normalize(x, axis=1))(op)
     return Model(inputs=base_network.input, outputs=output)
def get_model():
    input_tensor = Input(shape=(224, 224, 3))  # this assumes K.image_data_format() == 'channels_last'

    # create the base pre-trained model
    base_model = MobileNet(input_tensor=input_tensor, weights='imagenet', include_top=True)
    x = base_model.output

    updatedModel = Model(base_model.input, x)

    return updatedModel
def yolo2lite_mobilenet_body(inputs, num_anchors, num_classes, alpha=1.0):
    """Create YOLO_V2 Lite MobileNet model CNN body in Keras."""
    mobilenet = MobileNet(input_tensor=inputs, weights='imagenet', include_top=False, alpha=alpha)
    print('backbone layers number: {}'.format(len(mobilenet.layers)))

    # input: 416 x 416 x 3
    # mobilenet.output            : 13 x 13 x (1024*alpha)
    # conv_pw_11_relu(layers[73]) : 26 x 26 x (512*alpha)

    # f1: 13 x 13 x (1024*alpha)
    f1 = mobilenet.output
    # f2: 26 x 26 x (512*alpha)
    f2 = mobilenet.get_layer('conv_pw_11_relu').output

    f1_channel_num = int(1024*alpha)
    f2_channel_num = int(512*alpha)

    y = yolo2lite_predictions((f1, f2), (f1_channel_num, f2_channel_num), num_anchors, num_classes)
    return Model(inputs, y)
Beispiel #30
0
def setup_mobilenet_v1_model():
  base_model = MobileNet(include_top=False,
                           weights='imagenet',
                           pooling='avg',
                           input_shape=(IMG_SIZE, IMG_SIZE, 3))
  x = base_model.output
  x = tf.keras.layers.Dense(info.features['label'].num_classes, activation="softmax")(x)
  model_functional = tf.keras.Model(inputs=base_model.input, outputs=x)

  return model_functional