def __init__(self, model_file, num_classes, input_image_size):
     if not gfile.Exists(model_file) or input_image_size != (224, 224, 3):
         print("need download the model")
         mobile_net = MobileNet(weights='imagenet',
                                input_shape=input_image_size)
         self.mobile_net_model = models.Model(
             inputs=mobile_net.input,
             outputs=mobile_net.get_layer(
                 'global_average_pooling2d').output)
         print("save the downloaded model for reuse")
         mobile_net.save(model_file)
     else:
         self.mobile_net_model = models.load_model(model_file)
     classes = num_classes
     self.inputs = layers.Input(shape=(1024, ))
     self.outputs = layers.Dense(classes,
                                 activation='softmax',
                                 name='final_output')(self.inputs)
     self.one_layer_model = models.Model(inputs=[self.inputs],
                                         outputs=[self.outputs])
     final_output = layers.Dense(classes,
                                 activation='softmax',
                                 name='final_output')(
                                     self.mobile_net_model.output)
     self.final_model = models.Model(inputs=self.mobile_net_model.inputs,
                                     outputs=final_output)
예제 #2
0
def mobilenet_v1(num_classes, inputs, modifier = None):
	from tensorflow.keras.applications import MobileNet
	backbone = MobileNet(input_tensor = inputs, include_top = False, pooling = None)
	layer_names = ['conv_pw_5_relu', 'conv_pw_11_relu', 'conv_pw_13_relu']
	layer_outputs = [backbone.get_layer(name).output for name in layer_names]
	backbone = keras.models.Model(inputs=inputs, outputs=layer_outputs, name=backbone.name)
	if modifier:
		backbone = modifier(backbone)
	return backbone
def create_object_basic_model():
    MobileNet_model = MobileNet(weights='imagenet',
                                include_top=False,
                                input_shape=(IMAGE_SIZE, IMAGE_SIZE, 3))
    MobileNet_model_out = MobileNet_model.get_layer('conv_pw_13_relu').output
    MobileNet_model_out = GlobalAveragePooling2D()(MobileNet_model_out)
    MobileNet_model_out = Dense(8, activation='softmax')(MobileNet_model_out)
    model = Model(inputs=MobileNet_model.input, outputs=MobileNet_model_out)
    return model
예제 #4
0
    def __init__(self):
        #load the trained convolutional neural network
        self.ML_model = None
        with open('models/mobileNet4wheels.cpickle', 'rb') as f:
            self.ML_model = pickle.load(f)
        # load the MobileNet network and initialize the label encoder
        print("[INFO] loading MobileNet network...")
        self.MobileNet = MobileNet(weights="imagenet", include_top=False)

        # create the keynames of the dictionary
        self.fieldnames = ["feat_{}".format(i) for i in range(0, 7 * 7 * 1024)]
예제 #5
0
    def load_model(self):
        input_layer = layers.Input(shape=(self.input_shape + (3, )))

        # Loading base model
        mobilenet = MobileNet(weights="imagenet",
                              input_tensor=input_layer,
                              alpha=0.5,
                              include_top=False)

        mobilenet.trainable = False
        x = layers.GlobalAveragePooling2D()(mobilenet.output)
        model = Model(inputs=input_layer, outputs=x, name='pose_model')

        return model
예제 #6
0
def mobile_net(n_labels, input_shape):
    mobilenet_layer = MobileNet(weights='imagenet',
                                include_top=False,
                                input_shape=input_shape)
    mobilenet_layer.trainable = False

    inputs = Input(shape=input_shape)
    x = mobilenet_layer(inputs)
    x = Flatten()(x)
    x = Dense(256, activation='relu')(x)
    x = Dropout(0.2)(x)
    outputs = Dense(n_labels, activation='softmax')(x)

    return Model(inputs=inputs, outputs=outputs)
예제 #7
0
 def _get_encoder(self):
     mobile_net = MobileNet(weights='imagenet',
                            include_top=False,
                            input_shape=self.input_shape)
     layer_names = [
         'conv_pw_1_relu',
         'conv_pw_3_relu',
         'conv_pw_5_relu',
         'conv_pw_11_relu',
         'conv_pw_13_relu',
     ]
     layers = [mobile_net.get_layer(name).output for name in layer_names]
     down_stack = tf.keras.Model(inputs=mobile_net.input, outputs=layers)
     down_stack.trainable = False
     return down_stack
예제 #8
0
def mobilenet_encoder(input_shape=[224, 224, 3]):
    mn = MobileNet(weights='imagenet',
                   include_top=False,
                   input_shape=input_shape)
    mn.trainable = False
    layer_names = [
        'conv_pw_1_relu',
        'conv_pw_3_relu',
        'conv_pw_5_relu',
        'conv_pw_11_relu',
        'conv_pw_13_relu',
    ]
    layers = [mn.get_layer(name).output for name in layer_names]
    down_stack = tf.keras.Model(inputs=mn.input, outputs=layers)
    down_stack.trainable = False
    return down_stack
예제 #9
0
def main():
    # configure TF launching
    set_tf_env()

    # initialize TF MobileNet model
    original_tf_model = MobileNet(include_top=True, weights="imagenet")

    # get TF frozen graph path
    full_pb_path = get_tf_model_proto(original_tf_model)

    # read frozen graph with OpenCV API
    opencv_net = cv2.dnn.readNetFromTensorflow(full_pb_path)
    print("OpenCV model was successfully read. Model layers: \n",
          opencv_net.getLayerNames())

    # get preprocessed image
    input_img = get_preprocessed_img("../data/squirrel_cls.jpg")

    # get ImageNet labels
    imagenet_labels = get_imagenet_labels(
        "../data/dnn/classification_classes_ILSVRC2012.txt")

    # obtain OpenCV DNN predictions
    get_opencv_dnn_prediction(opencv_net, input_img, imagenet_labels)

    # obtain TF model predictions
    get_tf_dnn_prediction(original_tf_model, input_img, imagenet_labels)
def segmentation_model(input_shape=(160, 160, 3), numclass=2):

    input_layer = layers.Input(shape=input_shape)
    mobilenet = MobileNet(weights="imagenet",
                          alpha=0.5,
                          input_tensor=input_layer,
                          include_top=False)

    for layer in mobilenet.layers:
        layer.trainable = True

    # Defining custom decoder which is fast and small
    bn = conv_pw(mobilenet.output, filters=32, name='mobile_bottleneck')

    x = upblock(bn, mobilenet, 'conv_pw_11_relu', name='4')
    x = upblock(x, mobilenet, 'conv_pw_5_relu', name='3')
    x = upblock(x, mobilenet, 'conv_pw_3_relu', name='2')
    x = upblock(x, mobilenet, 'conv_pw_1_relu', name='1')

    x = layers.UpSampling2D(size=(2, 2))(x)

    output = layers.Conv2D(filters=numclass,
                           kernel_size=(1, 1),
                           padding='same',
                           activation='softmax',
                           name='final_layer')(x)

    model = Model(inputs=input_layer,
                  outputs=output,
                  name='segmentation_model_mobile')

    return model
예제 #11
0
def train_classifier_model(n_classes: int, learning_rate: float) -> Model:
    # build model
    base_model = MobileNet(
        weights="imagenet", include_top=False
    )  # imports the mobilenet model and discards the last 1000 neuron layer.
    x = base_model.output
    x = GlobalAveragePooling2D()(x)
    # we add dense layers so that the model can learn more complex functions
    # and classify for better results.
    x = Dense(1024, activation="relu")(x)
    x = Dense(1024, activation="relu")(x)  # dense layer 2
    x = Dense(512, activation="relu")(x)  # dense layer 3
    preds = Dense(n_classes, activation="softmax")(
        x)  # final layer with softmax activation
    model = Model(inputs=base_model.input, outputs=preds)

    # set only final layers trainable
    for layer in model.layers[:20]:
        layer.trainable = False
    for layer in model.layers[20:]:
        layer.trainable = True

    # compile
    opt = Adam(learning_rate=learning_rate)
    model.compile(optimizer=opt,
                  loss="categorical_crossentropy",
                  metrics=["accuracy"])
    return model
예제 #12
0
    def __init__(self,
                 model_id='mobilenet_v1',
                 input_shape=(256, 256, 3),
                 output_stride=16):
        assert model_id == 'mobilenet_v1'

        super(MobileNetV1, self).__init__()
        self.output_stride = output_stride
        self.features = MobileNet(
            input_shape=input_shape,
            include_top=False,
            weights='imagenet',
            classifier_activation=None,
        )
        self.features.trainable = False
        self.conv1 = Conv2D(1024, 1, padding='same')
        self.dconv1 = Conv2DTranspose(1024, 3, strides=2, padding='same')
        self.bn1 = BatchNormalization()
        self.conv2 = Conv2D(1024, 3, 1, padding='same')
        self.conv3 = Conv2D(1024, 3, 1, padding='same')
        self.dconv2 = Conv2DTranspose(1024, 3, strides=2, padding='same')
        self.bn2 = BatchNormalization()
        self.conv4 = Conv2D(1024, 3, 1, padding='same')
        self.conv5 = Conv2D(1024, 3, 1, padding='same')

        self.heatmap_conv1 = Conv2D(512, 3, 1, padding='same')
        self.heatmap_conv2 = Conv2D(256, 3, 1, padding='same')
        self.heatmap = Conv2D(17, 3, 1, activation='sigmoid', padding='same')

        self.offset_conv1 = Conv2D(512, 3, 1, padding='same')
        self.offset_conv2 = Conv2D(256, 3, 1, padding='same')
        self.offset = Conv2D(34, 3, 1, padding='same')
def mobilenet(input_shape: Tuple[int, int, int],
              output_shape: Tuple[int, ...],
              weights: str = 'imagenet',
              include_top: bool = False) -> Model:

    base_model = MobileNet(weights=weights,
                           include_top=include_top,
                           input_tensor=Input(shape=input_shape))

    if include_top:
        return base_model
    else:
        # Construct the head of the model that will be placed on top of the base model
        num_classes = output_shape[0]
        head_model = base_model.output
        head_model = AveragePooling2D(pool_size=(4, 4))(head_model)
        head_model = Flatten(name='flatten')(head_model)
        head_model = Dense(64, activation='relu')(head_model)
        head_model = Dropout(0.7)(head_model)

        if num_classes > 2:
            activation = 'softmax'
        else:
            activation = 'sigmoid'

        head_model = Dense(num_classes, activation=activation)(head_model)

        # Place the head Fully Connected model on top of the base model (actual model)
        model = Model(base_model.input, head_model)

        # Loop over all layers in the base model and freeze them so they won't be updated during the first training process
        for layer in base_model.layers:
            layer.trainable = False

        return model
예제 #14
0
def ejer3_imagenet(n_epochs):
    input_image = Input(shape=(32, 32, 3))
    (x_train, y_train), (x_test, y_test) = cifar10.load_data()
    x_train, y_train = preprocessing(x_train, y_train)
    x_test, y_test = preprocessing(x_test, y_test)

    model_keras = MobileNet(include_top=False,
                            weights="imagenet",
                            input_tensor=input_image)
    #model_keras.trainable = False

    model = Sequential()
    model.add(model_keras)
    model.add(Flatten())
    model.add(Dropout(0.1))
    model.add(BatchNormalization())
    model.add(Dense(10, activation='softmax'))

    model.compile(loss=losses.CategoricalCrossentropy(),
                  optimizer=optimizers.Adam(learning_rate=0.00001),
                  metrics=[metrics.CategoricalAccuracy('acc')])

    model.summary()

    history = model.fit(x_train,
                        y_train,
                        validation_data=(x_test, y_test),
                        batch_size=50,
                        epochs=n_epochs,
                        verbose=1)

    acc, val_acc, loss, val_loss = plot_ejercicio(history)

    np.savetxt("ejer3{}epochs{}_mobilenet.txt".format("_imagenet_", n_epochs),
               np.array([acc, val_acc, loss, val_loss]).T)
예제 #15
0
def mobilenet_transfer(class_number):
    """Return a classification model with a mobilenet backbone pretrained on ImageNet
    
    # Arguments:
        class_number: Number of classes / labels to detect
    """
    # Import the mobilenet model and discards the last 1000 neuron layer.
    base_model = MobileNet(input_shape=(224, 224, 3),
                           weights='imagenet',
                           include_top=False,
                           pooling='avg')

    x = base_model.output
    x = tf.keras.layers.Dense(1024, activation='relu')(x)
    x = tf.keras.layers.Dense(1024, activation='relu')(x)
    x = tf.keras.layers.Dense(512, activation='relu')(x)
    # Final layer with softmax activation
    preds = tf.keras.layers.Dense(class_number, activation='softmax')(x)
    # Build the model
    model = tf.keras.models.Model(inputs=base_model.input, outputs=preds)

    # Freeze base_model
    # for layer in base_model.layers: # <=> to [:86]
    #     layer.trainable = False
    # Freeze the first 60 layers and fine-tune the rest
    for layer in model.layers[:60]:
        layer.trainable = False
    for layer in model.layers[60:]:
        layer.trainable = True

    return model
def train_model(train_set, valid_set):
    """
    Train & Save Model
    """
    # Get the trained Mobilenet model
    mobile = MobileNet()

    # Remove Bottom 6 Layers
    prev_layers = mobile.layers[-6].output
    # Add a Dense layer with 10 out and softmax activation function for probablity
    # Uses Keras Functional API
    output = Dense(units=10, activation='softmax')(prev_layers)

    # Create The new model with the dense layer
    model = Model(inputs=mobile.input, outputs=output)

    # Disable training for all the layers except the last 21 layers
    for layer in mobile.layers[:-21]:
        layer.trainable = False

    # Print The summary of model
    model.summary()

    # Compile The model
    model.compile(optimizer=Adam(learning_rate=0.0001),
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])

    # Train model with 25 epochs
    model.fit(x=train_set, validation_data=valid_set, epochs=25, verbose=2)

    # Save the training model
    model.save(SAVED_MODEL)

    return model
예제 #17
0
    def __init__(self):
        super(mobileNet, self).__init__()

        # TODO: Select an optimizer for your network (see the documentation
        #       for tf.keras.optimizers)

        self.optimizer = tf.keras.optimizers.Adam(
            learning_rate=hp.learning_rate)

        # train mobile net from scratch
        # self.mobileNet = MobileNet(classes=hp.num_classes, weights=None)

        #  uncomment the lines below to fine tune a mobilenet trained on imagenet
        self.mobileNet = MobileNet(weights='imagenet', include_top=False)
        for i in range(len(self.mobileNet.layers)):
            self.mobileNet.layers[i].trainable = False

        self.head = [
            GlobalAveragePooling2D(),
            Dense(1024, activation='relu'),
            Dropout(0.3),
            Dense(512, activation='relu'),
            Dropout(0.3),
            Dense(hp.num_classes),
            Softmax(axis=-1)
        ]
        self.head = tf.keras.Sequential(self.head, name="mobileNet_head")
예제 #18
0
    def build():
        # Encoder: MobileNet (feature extractor)
        mobNet = MobileNet(
            input_shape=(224, 224,
                         3),  # Use 224 by 224 images with 3 channels (RGB)
            alpha=1.0,
            depth_multiplier=1,
            dropout=1e-3,
            include_top=False,  # Remove the last classifier
            weights='imagenet',  # Pretrained on ImageNet
            input_tensor=None,
            pooling=None)

        decIn = mobNet.layers[-1].output

        # Decoder
        # Upsample 1
        conv1Out = Conv2D(512, (5, 5), padding="same")(decIn)
        up1Out = UpSampling2D(size=(2, 2), interpolation="nearest")(conv1Out)
        # Upsample 2
        conv2Out = Conv2D(256, (5, 5), padding="same")(up1Out)
        up2Out = UpSampling2D(size=(2, 2), interpolation="nearest")(conv2Out)
        # Skip connection 1
        skip1 = mobNet.get_layer("conv_pw_5_relu").output
        skip1Out = Add()([up2Out, skip1])
        # Upsample 3
        conv3Out = Conv2D(128, (5, 5), padding="same")(skip1Out)
        up3Out = UpSampling2D(size=(2, 2), interpolation="nearest")(conv3Out)
        # Skip connection 2
        skip2 = mobNet.get_layer("conv_pw_3_relu").output
        skip2Out = Add()([up3Out, skip2])
        # Upsample 4
        conv4Out = Conv2D(64, (5, 5), padding="same")(skip2Out)
        up4Out = UpSampling2D(size=(2, 2), interpolation="nearest")(conv4Out)
        # Skip connection 3
        skip3 = mobNet.get_layer("conv_pw_1_relu").output
        skip3Out = Add()([up4Out, skip3])
        # Upsample 5
        conv5Out = Conv2D(32, (5, 5), padding="same")(skip3Out)
        up5Out = UpSampling2D(size=(2, 2), interpolation="nearest")(conv5Out)
        # Pointwise conv
        decOut = Conv2D(1, (1, 1), padding="same")(up5Out)

        # Combine full model
        model = Model(inputs=mobNet.input, outputs=decOut)
        return model
예제 #19
0
def build_nima_model():
    base_model = MobileNet(input_shape=(224, 224, 3), weights=None, include_top=False, pooling='avg')
    x = tf.keras.layers.Dropout(0)(base_model.output)
    x = tf.keras.layers.Dense(units=10, activation='softmax')(x)
    nima_model = tf.keras.Model(inputs=base_model.inputs, outputs=x)
    nima_model.load_weights("weights_mobilenet_aesthetic_0.07.hdf5")
    nima_model.trainable = False
    return nima_model
예제 #20
0
 def __init__(self, model_name=None):
     if model_name == 'Xception':
         base_model = Xception(weights='imagenet')
         self.preprocess_input = xception.preprocess_input
     elif model_name == 'VGG19':
         base_model = VGG19(weights='imagenet')
         self.preprocess_input = vgg19.preprocess_input
     elif model_name == 'ResNet50':
         base_model = ResNet50(weights='imagenet')
         self.preprocess_input = resnet.preprocess_input
     elif model_name == 'ResNet101':
         base_model = ResNet101(weights='imagenet')
         self.preprocess_input = resnet.preprocess_input
     elif model_name == 'ResNet152':
         base_model = ResNet152(weights='imagenet')
         self.preprocess_input = resnet.preprocess_input
     elif model_name == 'ResNet50V2':
         base_model = ResNet50V2(weights='imagenet')
         self.preprocess_input = resnet_v2.preprocess_input
     elif model_name == 'ResNet101V2':
         base_model = ResNet101V2(weights='imagenet')
         self.preprocess_input = resnet_v2.preprocess_input
     elif model_name == 'ResNet152V2':
         base_model = ResNet152V2(weights='imagenet')
         self.preprocess_input = resnet_v2.preprocess_input
     elif model_name == 'InceptionV3':
         base_model = InceptionV3(weights='imagenet')
         self.preprocess_input = inception_v3.preprocess_input
     elif model_name == 'InceptionResNetV2':
         base_model = InceptionResNetV2(weights='imagenet')
         self.preprocess_input = inception_resnet_v2.preprocess_input
     elif model_name == 'DenseNet121':
         base_model = DenseNet121(weights='imagenet')
         self.preprocess_input = densenet.preprocess_input
     elif model_name == 'DenseNet169':
         base_model = DenseNet169(weights='imagenet')
         self.preprocess_input = densenet.preprocess_input
     elif model_name == 'DenseNet201':
         base_model = DenseNet201(weights='imagenet')
         self.preprocess_input = densenet.preprocess_input
     elif model_name == 'NASNetLarge':
         base_model = NASNetLarge(weights='imagenet')
         self.preprocess_input = nasnet.preprocess_input
     elif model_name == 'NASNetMobile':
         base_model = NASNetMobile(weights='imagenet')
         self.preprocess_input = nasnet.preprocess_input
     elif model_name == 'MobileNet':
         base_model = MobileNet(weights='imagenet')
         self.preprocess_input = mobilenet.preprocess_input
     elif model_name == 'MobileNetV2':
         base_model = MobileNetV2(weights='imagenet')
         self.preprocess_input = mobilenet_v2.preprocess_input
     else:
         base_model = VGG16(weights='imagenet')
         self.preprocess_input = vgg16.preprocess_input
     self.model = Model(inputs=base_model.input,
                        outputs=base_model.layers[-2].output)
예제 #21
0
def create_model():
    mobileNetModel = MobileNet(weights='imagenet', include_top=False)
    model=Sequential()
    model.add(mobileNetModel)
    model.add(GlobalAveragePooling2D())
    model.add(Dense(20, activation='softmax', kernel_initializer='he_normal'))
    model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['acc', f1_m])

    return model
예제 #22
0
 def __init__(self):
     super(Mobile_net, self).__init__()
     self.base_model = MobileNet(weights='imagenet',
                                 include_top=False,
                                 pooling="avg",
                                 input_shape=(224, 224, 3))
     # self.global_pool = GlobalAveragePooling2D()
     self.dense_1 = Dense(1024, activation='relu')
     self.dense_2 = Dense(512, activation='relu')
     self.dense_3 = Dense(7894, activation='softmax')
def get_sess_from_keras_model():
    """
    Gets TF session from keras model
    :return: TF session
    """
    tf.keras.backend.clear_session()
    tf.keras.backend.set_learning_phase(1)
    _ = MobileNet(weights=None, input_shape=(224, 224, 3))
    sess = tf.compat.v1.keras.backend.get_session()
    return sess
예제 #24
0
    def __init__(self, input_shape, outputs):
        super(MobiTransfer, self).__init__()
        self.outputs = outputs
        self.mobile = MobileNet(input_shape=input_shape,
                                include_top=False,
                                dropout=0.1)
        self.mobile.trainable = False

        self.flat = Flatten()
        self.Dense1 = Dense(outputs, activation='softmax')
예제 #25
0
    def __init__(self, maxBin, maxBin2):
        """
		must have self.model, self.params, and self._name
		"""
        self._name = 'MobileNet-imagenet'

        self.model = MobileNet(include_top=True,
                               weights="imagenet",
                               input_shape=(224, 224, 3),
                               classes=1000)

        self.params = {
            'age hidden layer': 512,
            'gender hidden layer': 1024,
            'ethnicity hidden layer': 1024,
        }

        self.model = addOutputLayers(self.model, maxBin, maxBin2, self.params)

        self.model.compile(optimizer='Adam', loss=losses, metrics=['accuracy'])
예제 #26
0
def model_fn(input_shape, num_classes=10):
  """
      tensorflow.keras sequential x
  """

  x = inputs = Input(shape=input_shape)
  base_model =  MobileNet(input_shape=input_shape,include_top=False,weights=None,input_tensor=x, classes=num_classes)
  x = base_model.output
  x = Flatten()(x)
  x = Dense(num_classes, activation='softmax')(x)
  model = Model(inputs,x)
  return model
예제 #27
0
def transfer_mobilenet():
    mobilenet=MobileNet(include_top=False,input_shape=(32,32,3),weights='imagenet')
    mobilenet_preprocess = tf.keras.applications.mobilenet.preprocess_input

    inputs = tf.keras.Input(shape=(32,32,3))
    x = mobilenet_preprocess(inputs)
    x = mobilenet(inputs,training=False)
    x = tf.keras.layers.GlobalAveragePooling2D()(x)
    outputs = tf.keras.layers.Dense(10,activation='softmax')(x)
    custom_mobilenet = tf.keras.Model(inputs,outputs)
    custom_mobilenet.summary()
    return custom_mobilenet
예제 #28
0
 def createModel(self):
     base_model=MobileNet(input_shape=(64,64,3),weights=None,include_top=False) 
     x=base_model.output
     x=Flatten()(x)
     x=Dense(1024,activation='relu')(x) 
     x=Dense(512,activation='relu')(x) 
     x=Dense(256,activation='relu')(x) 
     preds=Dense(7,activation='softmax')(x) 
     
     model=Model(inputs=base_model.input,outputs=preds)
     
     return model
예제 #29
0
def get_bottleneck(train_datagen, val_datagen):
    """ Use a pretrained convolutional model to extract the bottleneck features """

    model_bottleneck = MobileNet(weights='imagenet',
                                 include_top=False,
                                 input_shape=(IMG_HEIGHT, IMG_WIDTH, 3))

    for layer in model_bottleneck.layers:
        layer.trainable = False

    # Get bottleneck features
    print('\nImage generators:')
    train_bottleneck_generator = train_datagen.flow_from_directory(
        TRAIN_DIR,
        color_mode='rgb',
        target_size=(IMG_HEIGHT, IMG_WIDTH),
        batch_size=BATCH_SIZE,
        class_mode=None,
        shuffle=False,
    )

    val_bottleneck_generator = val_datagen.flow_from_directory(
        VALIDATION_DIR,
        color_mode='rgb',
        target_size=(IMG_HEIGHT, IMG_WIDTH),
        batch_size=BATCH_SIZE,
        class_mode=None,
        shuffle=False,
    )

    print('\n Extracting bottleneck features:')

    train_bottleneck = model_bottleneck.predict(train_bottleneck_generator,
                                                verbose=1)
    val_bottleneck = model_bottleneck.predict(val_bottleneck_generator,
                                              verbose=1)
    train_labels = train_bottleneck_generator.classes
    val_labels = val_bottleneck_generator.classes

    return model_bottleneck, train_bottleneck, val_bottleneck, train_labels, val_labels
예제 #30
0
    def get_model():
        from tensorflow.keras.applications import MobileNet

        mobilenet = MobileNet(include_top=True,
                              weights=None,
                              input_shape=(224, 224, 3),
                              classes=1000)
        mobilenet.load_weights("mobilenet_1_0_224_tf.h5")
        inputs = {mobilenet.input_names[0]: ((1, 3, 224, 224), "float32")}

        data = {}
        np.random.seed(0)

        for name, (shape, dtype) in inputs.items():
            if dtype == "uint8":
                low, high = 0, 1
            else:
                low, high = -1, 1
            data[name] = np.random.uniform(low, high, shape).astype(dtype)

        mod, params, ref_outputs = _get_keras_model(mobilenet, inputs, data)
        return mod, params, inputs, data, ref_outputs