예제 #1
0
    def __init__(self, input_sizeW, input_sizeH):
        input_image = Input(shape=(input_sizeW, input_sizeH, 3))

        mobilenet = MobileNet(input_shape=(224, 224, 3), include_top=False)
        mobilenet.load_weights(MOBILENET_BACKEND_PATH)

        x = mobilenet(input_image)

        self.feature_extractor = Model(input_image, x)
예제 #2
0
def transfer_model(model_name, input_shape, classes_nr):
    new_input = Input(shape=(input_shape[0], input_shape[1], 3))

    if model_name == "vgg16":
        model = VGG16(include_top=False, input_tensor=new_input)
    if model_name == "densenet121":
        model = DenseNet121(include_top=False, input_tensor=new_input)
    if model_name == "inceptionv3":
        model = InceptionV3(include_top=False, input_tensor=new_input)
    if model_name == "mobilenet":
        model = MobileNet(include_top=False, input_tensor=new_input)
    if model_name == "resnet101":
        model = ResNet101(include_top=False, input_tensor=new_input)
    if model_name == "xception":
        model = Xception(include_top=False, input_tensor=new_input)

    for layer in model.layers:
        layer.trainable = False
    flat1 = layers.Flatten()(model.layers[-1].output)
    class1 = layers.Dense(1024, activation='relu')(flat1)
    drop1 = layers.Dropout(0.2)(class1)
    class2 = layers.Dense(256, activation='relu')(drop1)
    output = layers.Dense(classes_nr, activation='softmax')(class2)
    model = Model(inputs=model.inputs, outputs=output)
    return model
예제 #3
0
def predict(image1):
    model = MobileNet()
    image = load_img(image1, target_size=(224, 224))
    # convert the image pixels to a numpy array
    image = img_to_array(image)
    # reshape data for the model
    image = image.reshape((1, image.shape[0], image.shape[1], image.shape[2]))
    # prepare the image for the VGG model
    image = preprocess_input(image)
    # predict the probability across all output classes
    yhat = model.predict(image)
    # convert the probabilities to class labels
    label = decode_predictions(yhat)
    # retrieve the most likely result, e.g. highest probability
    label = label[0][0]
    return label
예제 #4
0
def Mildnet_mobilenet():
    vgg_model = MobileNet(weights=None,
                          include_top=False,
                          input_shape=(224, 224, 3))
    intermediate_layer_outputs = get_layers_output_by_name(
        vgg_model, [
            "conv_dw_1_relu", "conv_dw_2_relu", "conv_dw_4_relu",
            "conv_dw_6_relu", "conv_dw_12_relu"
        ])
    convnet_output = GlobalAveragePooling2D()(vgg_model.output)
    for layer_name, output in intermediate_layer_outputs.items():
        output = GlobalAveragePooling2D()(output)
        convnet_output = concatenate([convnet_output, output])

    convnet_output = GlobalAveragePooling2D()(vgg_model.output)
    convnet_output = Dense(1024, activation='relu')(convnet_output)
    convnet_output = Dropout(0.5)(convnet_output)
    convnet_output = Dense(1024, activation='relu')(convnet_output)
    convnet_output = Lambda(lambda x: K.l2_normalize(x, axis=1))(
        convnet_output)

    first_input = Input(shape=(224, 224, 3))
    second_input = Input(shape=(224, 224, 3))

    final_model = tf.keras.models.Model(
        inputs=[first_input, second_input, vgg_model.input],
        outputs=convnet_output)

    return final_model
예제 #5
0
파일: robotia.py 프로젝트: JorgeAmil/gazebo
def obtenerModelo(tam_entrada):
  modelo_preentrenado = MobileNet(input_shape=tam_entrada, input_tensor=tensor_entrada, weights=None)
  salida = modelo_preentrenado.output
  prediccion = Dense(2, activation="softmax")(salida)
  
  modelo_modificado = Model(inputs=tensor_entrada, outputs=prediccion)
  return modelo_modificado
예제 #6
0
    def _define_model(output_layer=-1):
        '''Define a pre-trained MobileNet model.

        Args:
            output_layer: the number of layer that output.

        Returns:
            Class of keras model with weights.
        '''
        base_model = MobileNet(weights='imagenet', include_top=False, input_shape=(224, 224, 3))
        output = base_model.layers[output_layer].output
        output = GlobalAveragePooling2D()(output)
        model = Model(inputs=base_model.input, outputs=output)
        return model
예제 #7
0
def posenet(input_shape=(224, 224, 3), base_model_name="mobilenet"):
    base_model_names = ["mobilenet"]
    if base_model_name not in base_model_names:
        raise ValueError("{} only.".format(base_model_names[0]))

    if base_model_name == "mobilenet":
        base_model = MobileNet(input_shape=input_shape, include_top=False)
        # 14 * 14 * keypoints
        target_layer = "conv_pw_11_relu"
        # new_model = Model(inputs=base_model.input,
        #                   outputs=base_model.get_layer(name=target_layer).output)
        out = base_model.get_layer(name=target_layer).output
    else:
        raise ValueError()

    _kp_maps = Conv2D(len(kp2index),
                      kernel_size=(1, 1),
                      activation="sigmoid",
                      name="heatmap")(out)
    _short_offsets = Conv2D(2 * len(kp2index),
                            kernel_size=(1, 1),
                            name="offset")(out)

    return Model(inputs=base_model.input, outputs=[_kp_maps, _short_offsets])
예제 #8
0
def default_model():
    img_in = Input(shape=(224, 224, 3),name='img_in')  
    '''
    x = img_in
    x = Convolution2D(24, (5, 5), strides=(2, 2),activation='relu')(x)
    
    x = Convolution2D(32, (3, 3), strides=(2, 2),padding='same',activation='relu')(x)
    
    x = Convolution2D(64, (3, 3), strides=(2, 2),padding='same',activation='relu')(x)
    x = Convolution2D(64, (3, 3), strides=(1, 1),padding='same',activation='relu')(x)
    
    x = Convolution2D(64, (3, 3), strides=(2, 2),padding='same',activation='relu')(x)
    x = Convolution2D(64, (3, 3), strides=(1, 1),padding='same',activation='relu')(x)
    
    x = Convolution2D(128, (3, 3), strides=(2, 2), padding='same',activation='relu')(x)
    x = Convolution2D(128, (3, 3), strides=(1, 1), padding='same',activation='relu')(x)
    
    x = Flatten(name='flattened')(x)  
    x = Dense(100, activation='relu')(x)  
    x = Dropout(.1)(x) 
    x = Dense(50, activation='relu')(x)  
    x = Dropout(.1)(x) 
    '''
    base_model = MobileNet(input_shape=None, alpha=1.0, depth_multiplier=1, dropout=1e-3, 
                           include_top=True, weights='imagenet', input_tensor=None, pooling='max', classes=1000)
    
    x = base_model.output
    
    angle_out = Dense(15, activation='softmax', name='angle_out')(x) 

    throttle_out = Dense(1, activation='relu', name='throttle_out')(x)  

    model = Model(inputs=[img_in], outputs=[angle_out, throttle_out])
    model.compile(optimizer='adam',
                  loss={'angle_out': 'categorical_crossentropy',
                        'throttle_out': 'mean_absolute_error'},
                  loss_weights={'angle_out': 0.9, 'throttle_out': 0.1},
                  metrics=["accuracy"]                 
                  )
    
    return model
예제 #9
0
파일: keras.py 프로젝트: JolinLee/donkey
def default_mobilenet():
    img_rows, img_cols, img_channel = 120,160 , 3 
    
    #--------
    base_model = MobileNet (weights=None , include_top=False,
                         input_shape=(img_rows, img_cols, img_channel))
    #---------
    
    
    x = base_model.output
    x = GlobalAveragePooling2D(data_format='channels_last')(x)
    x = Dropout(0.5)(x)
    
    angle_out = Dense(15, activation='softmax', name='angle_out')(x)
    throttle_out = Dense(1, activation='relu', name='throttle_out')(x)        
    
    model = Model(inputs=base_model.input, outputs=[angle_out,throttle_out])                 

    model.compile(loss={'angle_out': 'categorical_crossentropy',
                        'throttle_out': 'mean_absolute_error'},
              optimizer='adam', metrics=['accuracy'])
    return model
def run_model(args):
    # Configure the memory optimizer
    #gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.333)
    config = tf.ConfigProto()
    config.graph_options.rewrite_options.memory_optimization = rewriter_config_pb2.RewriterConfig.SCHEDULING_HEURISTICS
    #config.gpu_options.allow_growth=True
    config.gpu_options.per_process_gpu_memory_fraction = 0.5
    K.set_session(tf.Session(config=config))

    num_classes = args.num_classes
    batch_size = args.batch_size

    model_name = args.model
    if model_name == 'ResNet50':
        model = ResNet50(weights=None,
                         include_top=True,
                         input_shape=input_shape,
                         classes=num_classes)
    elif model_name == 'ResNet101':
        model = keras.applications.resnet.ResNet101(weights=None,
                                                    include_top=True,
                                                    input_shape=input_shape,
                                                    classes=num_classes)
    elif model_name == 'ResNet152':
        model = ResNet152(weights=None,
                          include_top=True,
                          input_shape=input_shape,
                          classes=num_classes)
    elif model_name == 'VGG16':
        model = VGG16(weights=None,
                      include_top=True,
                      input_shape=input_shape,
                      classes=num_classes)
    elif model_name == 'VGG19':
        model = VGG19(weights=None,
                      include_top=True,
                      input_shape=input_shape,
                      classes=num_classes)
    elif model_name == 'Xception':
        model = Xception(weights=None,
                         include_top=True,
                         input_shape=input_shape,
                         classes=num_classes)
    elif model_name == 'MobileNet':
        model = MobileNet(weights=None,
                          include_top=True,
                          input_shape=input_shape,
                          classes=num_classes)
    elif model_name == 'MobileNetV2':
        model = MobilenetV2(weights=None,
                            include_top=True,
                            input_shape=input_shape,
                            classes=num_classes)
    elif model_name == 'InceptionV3':
        model = InceptionV3(weights=None,
                            include_top=True,
                            input_shape=input_shape,
                            classes=num_classes)
    else:
        print('Running with ResNet50 -- the default model')
        model = ResNet50(weights=None,
                         include_top=True,
                         input_shape=input_shape,
                         classes=num_classes)
    execute_model(model, input_shape)
예제 #11
0
                                                      subset="training",
                                                      target_size=(224, 224),
                                                      batch_size=8)
validation_generator = image_generator.flow_from_directory("dataset",
                                                           subset="validation",
                                                           target_size=(224,
                                                                        224),
                                                           batch_size=8)

# Show an image from train set
Image.fromarray((next(train_generator)[0][0] * 255).astype(numpy.uint8)).show()

# Create model
mobile = MobileNet(input_shape=(224, 224, 3),
                   include_top=False,
                   weights='imagenet',
                   pooling='avg',
                   alpha=0.5)
output = Dropout(0.4)(mobile.output)
output = Dense(8, activation="relu")(output)
output = Dense(3, activation="sigmoid")(output)

model = Model(inputs=mobile.input, outputs=output)
model.summary()

# Compile model
model.compile(optimizer=Adam(amsgrad=True),
              loss="categorical_crossentropy",
              metrics=["accuracy"])

callbacks = [
예제 #12
0
def main(_):
    # input image dimensions
    img_rows, img_cols = 400, 400
    # Images are RGB.
    img_channels = 1

    # channel last -> (~/.keras/keras.json)
    model = MobileNet(input_shape=(img_rows, img_cols, img_channels),
                      weights=None,
                      classes=5)  # Binary classification
    # plot_model(model, to_file='model.png', show_shapes=True)
    model.compile(
        loss=
        'categorical_crossentropy',  # when multiclass classification, loss is categorical_crossentropy
        optimizer='adam',
        metrics=['accuracy'])

    callbacks = list()
    callbacks.append(
        ReduceLROnPlateau(factor=np.sqrt(0.1),
                          cooldown=0,
                          patience=5,
                          min_lr=0.5e-6))
    callbacks.append(EarlyStopping(min_delta=0.001, patience=10))
    callbacks.append(
        TensorBoard(histogram_freq=0,
                    write_graph=False,
                    write_grads=True,
                    write_images=True,
                    batch_size=FLAGS.batch_size))

    print('Using real-time data augmentation.')
    # This will do preprocessing and realtime data augmentation:
    train_datagen = ImageDataGenerator(
        featurewise_center=False,  # set input mean to 0 over the dataset
        samplewise_center=False,  # set each sample mean to 0
        featurewise_std_normalization=
        False,  # divide inputs by std of the dataset
        samplewise_std_normalization=False,  # divide each input by its std
        zca_whitening=False,  # apply ZCA whitening
        rotation_range=
        180,  # randomly rotate images in the range (degrees, 0 to 180)
        width_shift_range=
        0.1,  # randomly shift images horizontally (fraction of total width)
        height_shift_range=
        0.1,  # randomly shift images vertically (fraction of total height)
        horizontal_flip=True,  # randomly flip images
        vertical_flip=True,  # randomly flip images
        validation_split=0.2)

    # Compute quantities required for featurewise normalization
    # (std, mean, and principal components if ZCA whitening is applied).
    train_data_dir = "../multiclass-25k-binarize"
    train_generator = train_datagen.flow_from_directory(
        train_data_dir,
        target_size=(img_rows, img_cols),
        color_mode='grayscale',
        class_mode='categorical',
        batch_size=FLAGS.batch_size,
        subset='training')
    validation_generator = train_datagen.flow_from_directory(
        train_data_dir,
        target_size=(img_rows, img_cols),
        color_mode='grayscale',
        class_mode='categorical',
        batch_size=FLAGS.batch_size,
        subset='validation')

    # Fit the model on the batches generated by datagen.flow().
    steps_per_epoch = train_generator.n // FLAGS.batch_size
    validation_steps = validation_generator.n // FLAGS.batch_size
    model.fit_generator(train_generator,
                        steps_per_epoch=steps_per_epoch,
                        validation_data=validation_generator,
                        validation_steps=validation_steps,
                        epochs=FLAGS.epoch,
                        verbose=1,
                        callbacks=callbacks)

    # cf. https://medium.com/@vijayabhaskar96/
    # tutorial-image-classification-with-keras-flow-from-directory-and-generators-95f75ebe5720
    test_generator = train_datagen.flow_from_directory(
        "../pipe-screenshot-test-binarize",
        target_size=(img_rows, img_cols),
        color_mode='grayscale',
        class_mode=None,
        batch_size=1,
        shuffle=False)
    # Need to reset the test_generator before
    #  whenever you call the predict_generator.
    # This is important, if you forget to reset
    #  the test_generator you will get outputs in a weird order.
    test_generator.reset()
    pred = model.predict_generator(test_generator, verbose=1)
    predicted_class_indices = np.argmax(pred, axis=1)

    # Now predicted_class_indices has the predicted labels,
    #  but you can’t simply tell what the predictions are,
    #   because all you can see is numbers like 0,1,4,1,0,6…
    # and most importantly you need to map the predicted
    #  labels with their unique ids such as filenames to
    #   find out what you predicted for which image.
    labels = train_generator.class_indices
    labels = dict((v, k) for k, v in labels.items())
    predictions = [labels[k] for k in predicted_class_indices]
    filenames = test_generator.filenames
    print("filenames:", filenames)
    print("predictions:", predictions)

    try:
        os.makedirs(FLAGS.model_dir)
    except:
        pass
    model.save(os.path.join(FLAGS.model_dir, str(FLAGS.epoch) + '.h5'))
예제 #13
0
        save_images_from_wnid(cwd, categ_name, wnid, 12)
        categ_names.append(categ_name)
        
# ResNet50
    from tensorflow.python.keras.applications.resnet50 import ResNet50, preprocess_input, decode_predictions
    # Load the model architecture and the imagenet weights for the networks
    model = ResNet50(weights='imagenet') 
    #model.summary()
    #model.get_weights()[0]
    cmResNet50 = predictNevaluate(model, (224, 224), cwd, categ_names, wnids_list)
                  
# VGG16
    from tensorflow.python.keras.applications.vgg16 import VGG16, preprocess_input, decode_predictions
    model = VGG16(weights='imagenet')
    cmVGG16 = predictNevaluate(model, (224, 224), cwd, categ_names, wnids_list)
    
# MobileNet
    from tensorflow.python.keras.applications.mobilenet import MobileNet, preprocess_input, decode_predictions
    model = MobileNet(weights='imagenet')
    cmMobileNet = predictNevaluate(model, (224, 224), cwd, categ_names, wnids_list)
     
# Inception_V3
    from tensorflow.python.keras.applications.inception_v3 import InceptionV3, preprocess_input, decode_predictions
    model = InceptionV3(weights='imagenet')
    cmInception_V3 = predictNevaluate(model, (299, 299), cwd, categ_names, wnids_list)
 
# Xception
    from tensorflow.python.keras.applications.xception import Xception, preprocess_input, decode_predictions
    model = Xception(weights='imagenet')
    cmXception = predictNevaluate(model, (299, 299), cwd, categ_names, wnids_list)    
from tensorflow.python.keras import optimizers
from tensorflow.python.keras.layers import Dropout, Flatten, Dense, GlobalAveragePooling2D
from tensorflow.python.keras.applications.mobilenet import MobileNet
from tensorflow.python.keras.utils import CustomObjectScope
from tensorflow.python.keras.backend import relu

tf.logging.set_verbosity(tf.logging.INFO)


def relu6(x):
    return relu(x, max_value=6)


with CustomObjectScope({'relu6': relu6}):
    base_model = MobileNet(weights='imagenet',
                           include_top=False,
                           input_shape=(224, 224, 3))
    mid_start = 5
    all_layers = base_model.layers
    for i in range(0, mid_start):
        print(i)
        # all_layers[i].add(Flatten())
        all_layers[i].trainable = False

    #x=base_model.output
    #x=Flatten()(x)
    #x=Dense(128,activation='relu')(x)
    #x=Dropout(0.5)(x)
    #pred = Dense(2,activation='softmax')(x)
    #print base_model.summary()
                                           input_shape=input_shape,
                                           classes=num_classes)
 elif model_name == 'VGG16':
     model = VGG16(weights=None, include_top=True,
                                           input_shape=input_shape,
                                           classes=num_classes)
 elif model_name == 'VGG19':
     model = VGG19(weights=None, include_top=True,
                                           input_shape=input_shape,
                                           classes=num_classes)
 elif model_name == 'Xception':
     model = Xception(weights=None, include_top=True,
                                           input_shape=input_shape,
                                           classes=num_classes)
 elif model_name == 'MobileNet':
     model = MobileNet(weights=None, include_top=True,
                                           input_shape=input_shape,
                                           classes=num_classes)
 elif model_name == 'MobileNetV2':
     model = MobilenetV2(weights=None, include_top=True,
                                           input_shape=input_shape,
                                           classes=num_classes)
 elif model_name == 'InceptionV3':
     model = InceptionV3(weights=None, include_top=True,
                                           input_shape=input_shape,
                                           classes=num_classes)
 else:
     print('Running with ResNet50 -- the default model')
     model = ResNet50(weights=None, include_top=True,
                                           input_shape=input_shape,
                                           classes=num_classes)