Exemple #1
0
@author: user
"""

# from keras.applications.inception_v3 import InceptionV3
from keras.applications.mobilenet_v2 import MobileNetV2, decode_predictions, preprocess_input
from keras.preprocessing import image
from keras.models import Model
from keras.layers import Dense, GlobalAveragePooling2D
import numpy as np
from keras.preprocessing.image import ImageDataGenerator
from keras.optimizers import Adam
from sklearn.metrics import classification_report, confusion_matrix

# create the base pre-trained model
base_model = MobileNetV2(input_shape=(50, 50, 3),
                         weights='imagenet',
                         include_top=False)
#灰階還是照丟,train_generator那裏其實有color_mode可選,預設rgb,丟灰階他自動*3

# add a global spatial average pooling layer
x = base_model.output
x = GlobalAveragePooling2D()(x)
# let's add a fully-connected layer
x = Dense(1024, activation='relu')(x)
# and a logistic layer -- let's say we have 200 classes
predictions = Dense(2, activation='softmax')(x)

# this is the model we will train
model = Model(inputs=base_model.input, outputs=predictions)

# first: train only the top layers (which were randomly initialized)
Exemple #2
0
def mobilenetv2(image_size, num_of_classes):
    input_shape = (image_size, image_size, 3)
    base_model = MobileNetV2(weights='imagenet',
                             include_top=False,
                             pooling=None,
                             input_shape=input_shape)
    X = base_model.output

    X_Conv = Conv2D(3, (
        3,
        3,
    ),
                    strides=(1, 1),
                    padding='same',
                    name='upsampled_conv',
                    kernel_initializer=glorot_uniform(seed=0))(X)
    X_up = UpSampling2D(size=(2, 2),
                        interpolation='bilinear',
                        name='autoencoder/Upsample1')(X_Conv)
    X_feat1 = base_model.get_layer('block_13_expand_relu').output
    X_feat1 = Conv2D(3, (3, 3),
                     strides=(1, 1),
                     padding='same',
                     name='feat1_conv',
                     kernel_initializer=glorot_uniform(seed=0))(X_feat1)
    X_up = Add()([X_feat1, X_up])
    X_up = UpSampling2D(size=(2, 2),
                        interpolation='bilinear',
                        name='autoencoder/Upsample2')(X_up)
    X_feat2 = base_model.get_layer('block_6_expand_relu').output
    X_feat2 = Conv2D(3, (3, 3),
                     strides=(1, 1),
                     padding='same',
                     name='feat2_conv',
                     kernel_initializer=glorot_uniform(seed=0))(X_feat2)
    X_up = Add()([X_feat2, X_up])
    X_up = UpSampling2D(size=(2, 2),
                        interpolation='bilinear',
                        name='autoencoder/Upsample3')(X_up)
    X_feat3 = base_model.get_layer('block_3_expand_relu').output
    X_feat3 = Conv2D(3, (3, 3),
                     strides=(1, 1),
                     padding='same',
                     name='feat3_conv',
                     kernel_initializer=glorot_uniform(seed=0))(X_feat3)
    X_up = Add()([X_feat3, X_up])
    X_up = UpSampling2D(size=(2, 2),
                        interpolation='bilinear',
                        name='autoencoder/Upsample4')(X_up)
    X_feat4 = base_model.get_layer('block_1_expand_relu').output
    X_feat4 = Conv2D(3, (3, 3),
                     strides=(1, 1),
                     padding='same',
                     name='feat4_conv',
                     kernel_initializer=glorot_uniform(seed=0))(X_feat4)
    X_up = Add()([X_feat4, X_up])
    X_up = UpSampling2D(size=(2, 2),
                        interpolation='bilinear',
                        name='autoencoder')(X_up)
    X = GlobalAveragePooling2D(name='global_avg_pool')(X)
    X = Dense(256, name='Dense_1')(X)
    X = Dense(num_of_classes, name='Dense_2')(X)
    X = Activation('softmax', name='classifier')(X)

    model = Model(inputs=base_model.input, outputs=[X, X_up], name='')

    model.compile(Adam(lr=.0001),
                  loss={
                      'classifier': 'categorical_crossentropy',
                      'autoencoder': 'mean_squared_error'
                  },
                  loss_weights=[0.5, 0.5],
                  metrics={
                      'classifier': 'accuracy',
                      'autoencoder': 'accuracy'
                  })

    model.summary()

    return model
    verbose=1,
    save_best_only=True,
    mode="max",
)
lr_reduce = ReduceLROnPlateau(
    monitor="val_loss",
    factor=np.sqrt(0.1),
    patience=5,
    verbose=1,
    cooldown=0,
    min_lr=0.5e-6,
)
callbacks = [checkpoint, lr_reduce]


conv_m = MobileNetV2(weights="imagenet", include_top=False, input_shape=(size, size, 3))
conv_m.trainable = False
model = Sequential()
model.add(conv_m)
model.add(AveragePooling2D(pool_size=(7, 7)))
model.add(Flatten())
model.add(Dense(32, activation="relu"))
model.add(BatchNormalization())
model.add(Dropout(0.5))
model.add(Dense(5, activation="softmax"))

model.compile(
    loss="categorical_crossentropy",
    optimizer=SGD(lr=0.1, momentum=0.9),
    metrics=["accuracy"],
)
BATCH_SIZE = 32
STEPS_PER_EPOCH = TRAIN_COUNT // BATCH_SIZE
VALIDATION_STEPS = TEST_COUNT // BATCH_SIZE
WIDTH = 224
HEIGHT = 224

# Define directories
TRAIN_DIR = 'E:\Attempt 6\Original'
TEST_DIR = 'E:\Attempt 6\Test'
filepath_epoch = "E:/MobileNetRand-BATCH " + str(
    BATCH_SIZE) + "-{epoch:02d}-.model"

# Model used is inception with imagenet weights by default
# Remove output layer as we are replacing it with out own
base_model = MobileNetV2(weights='imagenet',
                         include_top=False,
                         input_shape=[HEIGHT, WIDTH, 3])

print(len(base_model.layers))

for layer in base_model.layers:
    layer.trainable = False
    # 60 layers works. more does not.
for layer in base_model.layers[:-50]:
    layer.trainable = True

print(len(base_model.trainable_weights))

# set pooling activation etc.
# Training new model
x = base_model.output
Exemple #5
0
def get_tst_neural_net(type):
    model = None
    custom_objects = dict()
    if type == 'mobilenet_small':
        try:
            from keras.applications.mobilenet import MobileNet
        except:
            from tensorflow.keras.applications.mobilenet import MobileNet
        model = MobileNet((128, 128, 3), depth_multiplier=1, alpha=0.25, include_top=True, weights='imagenet')
    elif type == 'mobilenet':
        try:
            from keras.applications.mobilenet import MobileNet
        except:
            from tensorflow.keras.applications.mobilenet import MobileNet
        model = MobileNet((224, 224, 3), depth_multiplier=1, alpha=1.0, include_top=True, weights='imagenet')
    elif type == 'mobilenet_v2':
        try:
            from keras.applications.mobilenet_v2 import MobileNetV2
        except:
            from tensorflow.keras.applications.mobilenet_v2 import MobileNetV2
        model = MobileNetV2((224, 224, 3), alpha=1.4, include_top=True, weights='imagenet')
    elif type == 'resnet50':
        try:
            from keras.applications.resnet50 import ResNet50
        except:
            from tensorflow.keras.applications.resnet50 import ResNet50
        model = ResNet50(input_shape=(224, 224, 3), include_top=True, weights='imagenet')
    elif type == 'inception_v3':
        try:
            from keras.applications.inception_v3 import InceptionV3
        except:
            from tensorflow.keras.applications.inception_v3 import InceptionV3
        model = InceptionV3(input_shape=(299, 299, 3), include_top=True, weights='imagenet')
    elif type == 'inception_resnet_v2':
        try:
            from keras.applications.inception_resnet_v2 import InceptionResNetV2
        except:
            from tensorflow.keras.applications.inception_resnet_v2 import InceptionResNetV2
        model = InceptionResNetV2(input_shape=(299, 299, 3), include_top=True, weights='imagenet')
    elif type == 'xception':
        try:
            from keras.applications.xception import Xception
        except:
            from tensorflow.keras.applications.xception import Xception
        model = Xception(input_shape=(299, 299, 3), include_top=True, weights='imagenet')
    elif type == 'densenet121':
        try:
            from keras.applications.densenet import DenseNet121
        except:
            from tensorflow.keras.applications.densenet import DenseNet121
        model = DenseNet121(input_shape=(224, 224, 3), include_top=True, weights='imagenet')
    elif type == 'densenet169':
        try:
            from keras.applications.densenet import DenseNet169
        except:
            from tensorflow.keras.applications.densenet import DenseNet169
        model = DenseNet169(input_shape=(224, 224, 3), include_top=True, weights='imagenet')
    elif type == 'densenet201':
        try:
            from keras.applications.densenet import DenseNet201
        except:
            from tensorflow.keras.applications.densenet import DenseNet201
        model = DenseNet201(input_shape=(224, 224, 3), include_top=True, weights='imagenet')
    elif type == 'nasnetmobile':
        try:
            from keras.applications.nasnet import NASNetMobile
        except:
            from tensorflow.keras.applications.nasnet import NASNetMobile
        model = NASNetMobile(input_shape=(224, 224, 3), include_top=True, weights='imagenet')
    elif type == 'nasnetlarge':
        try:
            from keras.applications.nasnet import NASNetLarge
        except:
            from tensorflow.keras.applications.nasnet import NASNetLarge
        model = NASNetLarge(input_shape=(331, 331, 3), include_top=True, weights='imagenet')
    elif type == 'vgg16':
        try:
            from keras.applications.vgg16 import VGG16
        except:
            from tensorflow.keras.applications.vgg16 import VGG16
        model = VGG16(input_shape=(224, 224, 3), include_top=False, pooling='avg', weights='imagenet')
    elif type == 'vgg19':
        try:
            from keras.applications.vgg19 import VGG19
        except:
            from tensorflow.keras.applications.vgg19 import VGG19
        model = VGG19(input_shape=(224, 224, 3), include_top=False, pooling='avg', weights='imagenet')
    elif type == 'multi_io':
        model = get_custom_multi_io_model()
    elif type == 'multi_model_layer_1':
        model = get_custom_model_with_other_model_as_layer()
    elif type == 'multi_model_layer_2':
        model = get_small_model_with_other_model_as_layer()
    elif type == 'Conv2DTranspose':
        model = get_Conv2DTranspose_model()
    elif type == 'RetinaNet':
        model, custom_objects = get_RetinaNet_model()
    elif type == 'conv3d_model':
        model = get_simple_3d_model()
    elif type == 'conv1d_model':
        model = get_simple_1d_model()
    return model, custom_objects
import keras
from keras.preprocessing.image import ImageDataGenerator, load_img
import os
data_dir = '/home/shichao/mount-dir/data/t2000/logo_tiny_data_train_val_test'
train_dir = os.path.join(data_dir, 'train')
validation_dir = os.path.join(data_dir, 'val')
image_size = 224

num_classes = 98
#from keras.applications.resnet50 import ResNet50

from keras.applications.mobilenet_v2 import MobileNetV2

#Load the VGG model
mobilenet_v2_conv = MobileNetV2(weights='imagenet',
                                include_top=False,
                                input_shape=(image_size, image_size, 3))
'''
# Freeze all the layers
for layer in resnet50_conv.layers[:-4]:
    layer.trainable = False
'''

# Check the trainable status of the individual layers
for layer in mobilenet_v2_conv.layers:
    print(layer, layer.trainable)

from keras import models
from keras import layers
from keras import optimizers
def create_mobilenetv2_yolov3_model(nb_class, anchors, max_box_per_image,
                                    max_grid, batch_size, warmup_batches,
                                    ignore_thresh, grid_scales, obj_scale,
                                    noobj_scale, xywh_scale, class_scale):
    input_image = Input(shape=(None, None, 3))  # net_h, net_w, 3
    true_boxes = Input(shape=(1, 1, 1, max_box_per_image, 4))
    true_yolo_1 = Input(
        shape=(None, None, len(anchors) // 6,
               4 + 1 + nb_class))  # grid_h, grid_w, nb_anchor, 5+nb_class
    true_yolo_2 = Input(
        shape=(None, None, len(anchors) // 6,
               4 + 1 + nb_class))  # grid_h, grid_w, nb_anchor, 5+nb_class
    true_yolo_3 = Input(
        shape=(None, None, len(anchors) // 6,
               4 + 1 + nb_class))  # grid_h, grid_w, nb_anchor, 5+nb_class

    base_model = MobileNetV2(include_top=False,
                             weights='imagenet',
                             input_tensor=input_image,
                             pooling=None)

    skip_36 = base_model.get_layer("block_6_expand_relu").output
    skip_61 = base_model.get_layer("block_13_expand_relu").output
    x = base_model.get_layer("block_16_project").output
    # test_layer = x
    # Layer 80 => 82
    pred_yolo_1 = _conv_block(x, [{
        'filter': 1024,
        'kernel': 3,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 80
    }, {
        'filter': (3 * (5 + nb_class)),
        'kernel': 1,
        'stride': 1,
        'bnorm': False,
        'leaky': False,
        'layer_idx': 81
    }],
                              do_skip=False)
    loss_yolo_1 = YoloLayer(
        anchors[12:], [1 * num
                       for num in max_grid], batch_size, warmup_batches,
        ignore_thresh, grid_scales[0], obj_scale, noobj_scale, xywh_scale,
        class_scale)([input_image, pred_yolo_1, true_yolo_1, true_boxes])

    # Layer 83 => 86
    x = _conv_block(x, [{
        'filter': 256,
        'kernel': 1,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 84
    }],
                    do_skip=False)
    x = UpSampling2D(2)(x)
    x = concatenate([x, skip_61])

    # Layer 87 => 91
    x = _conv_block(x, [{
        'filter': 256,
        'kernel': 1,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 87
    }, {
        'filter': 512,
        'kernel': 3,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 88
    }, {
        'filter': 256,
        'kernel': 1,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 89
    }, {
        'filter': 512,
        'kernel': 3,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 90
    }, {
        'filter': 256,
        'kernel': 1,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 91
    }],
                    do_skip=False)

    # Layer 92 => 94
    pred_yolo_2 = _conv_block(x, [{
        'filter': 512,
        'kernel': 3,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 92
    }, {
        'filter': (3 * (5 + nb_class)),
        'kernel': 1,
        'stride': 1,
        'bnorm': False,
        'leaky': False,
        'layer_idx': 93
    }],
                              do_skip=False)
    loss_yolo_2 = YoloLayer(
        anchors[6:12], [2 * num
                        for num in max_grid], batch_size, warmup_batches,
        ignore_thresh, grid_scales[1], obj_scale, noobj_scale, xywh_scale,
        class_scale)([input_image, pred_yolo_2, true_yolo_2, true_boxes])

    # Layer 95 => 98
    x = _conv_block(x, [{
        'filter': 128,
        'kernel': 1,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 96
    }],
                    do_skip=False)
    x = UpSampling2D(2)(x)
    x = concatenate([x, skip_36])

    # Layer 99 => 106
    pred_yolo_3 = _conv_block(x, [{
        'filter': 128,
        'kernel': 1,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 99
    }, {
        'filter': 256,
        'kernel': 3,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 100
    }, {
        'filter': 128,
        'kernel': 1,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 101
    }, {
        'filter': 256,
        'kernel': 3,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 102
    }, {
        'filter': 128,
        'kernel': 1,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 103
    }, {
        'filter': 256,
        'kernel': 3,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 104
    }, {
        'filter': (3 * (5 + nb_class)),
        'kernel': 1,
        'stride': 1,
        'bnorm': False,
        'leaky': False,
        'layer_idx': 105
    }],
                              do_skip=False)
    loss_yolo_3 = YoloLayer(
        anchors[:6], [4 * num for num in max_grid], batch_size, warmup_batches,
        ignore_thresh, grid_scales[2], obj_scale, noobj_scale, xywh_scale,
        class_scale)([input_image, pred_yolo_3, true_yolo_3, true_boxes])

    train_model = Model(
        [input_image, true_boxes, true_yolo_1, true_yolo_2, true_yolo_3],
        [loss_yolo_1, loss_yolo_2, loss_yolo_3])
    infer_model = Model(input_image, [pred_yolo_1, pred_yolo_2, pred_yolo_3])

    return [train_model, infer_model]
Exemple #8
0
tensorBoard = MultiLabelTensorBoard(
    log_dir='./MobileV2-MultiLabelAccuracy/logs',  # log 目录
    histogram_freq=0,
    update_freq=1000,  # 按照何等频率(samples)来计算
    batch_size=batch_size,  # 用多大量的数据计算直方图
    write_graph=True,  # 是否存储网络结构图
    write_grads=True,  # 是否可视化梯度直方图
    write_images=True)  # 是否可视化参数

train = DataGenerator(img_path[0:80000], img_label[0:80000], classes,
                      batch_size, img_target_size)
validation = DataGenerator(img_path[80000:90000], img_label[80000:90000],
                           classes, batch_size, img_target_size)

base_model = MobileNetV2(weights="imagenet",
                         include_top=False,
                         input_shape=img_target_size)

x = base_model.output
x = GlobalAveragePooling2D()(x)
predictions = Dense(len(classes), activation='sigmoid')(x)
model = Model(inputs=base_model.input, outputs=predictions)
optimizers = keras.optimizers.Adam(lr=0.001,
                                   beta_1=0.9,
                                   beta_2=0.999,
                                   epsilon=None,
                                   decay=0.0001,
                                   amsgrad=False)

model.compile(optimizer=optimizers,
              loss=MultiLableAccuracyLoss,
        cv2.imshow('ss', RGB_img)
        cv2.waitKey(0)

# inputs_shape = (562, 762, 3)
inputs_shape = (220, 220, 3)

# initial weights
b = np.zeros((2, 3), dtype='float32')
b[0, 0] = 1
b[1, 1] = 1
W = np.zeros((64, 6), dtype='float32')
weights = [W, b.flatten()]

mobile = MobileNetV2(input_shape=inputs_shape,
                     alpha=0.5,
                     include_top=False,
                     weights=None,
                     pooling='max')
model = MobileNetV2(input_shape=inputs_shape,
                    alpha=0.5,
                    include_top=True,
                    classes=7,
                    weights=None,
                    pooling='max')
# nasnet = NASNetMobile(input_shape=inputs_shape, include_top=False, weights=None)
# model = e.EfficientNetB0(include_top=False,
#                          input_shape=(128, 128, 3),
#                          weights=None,
#                          pooling='max')
# classes=7)
Exemple #10
0
    def train(self):
        base_model = MobileNetV2(
            weights='imagenet',
            include_top=False)

        x = base_model.output
        x = GlobalAveragePooling2D()(x)
        x = Dense(1024, activation='relu')(x)
        x = Dense(1024, activation='relu')(x)
        x = Dense(512, activation='relu')(x)

        preds = Dense(self.classes, activation='softmax')(x)

        self.model = Model(inputs=base_model.input, outputs=preds)

        # specify the inputs
        # specify the outputs
        # now a model has been created based on our architecture
        for i, layer in enumerate(self.model.layers):
            print(i, layer.name)

        for layer in base_model.layers:
            layer.trainable = False

        train_datagen = ImageDataGenerator(preprocessing_function=preprocess_input)

        self.train_generator = train_datagen.flow_from_directory(
            self.train_data_dir,
            target_size=(self.img_width, self.img_height),
            color_mode='rgb',
            batch_size=self.batch_size,
            class_mode='categorical',
            shuffle=True)

        test_datagen = ImageDataGenerator(preprocessing_function=preprocess_input)

        self.validation_generator = test_datagen.flow_from_directory(
            self.validation_data_dir,
            target_size=(self.img_width, self.img_height),
            color_mode='rgb',
            batch_size=self.batch_size,
            class_mode='categorical',
            shuffle=True)

        # Adam optimizer
        # loss function - categorical cross entropy
        # evaluation metric - accuracy
        self.model.compile(
            optimizer='Adam',
            loss='categorical_crossentropy',
            metrics=['accuracy'])

        step_size_train = self.train_generator.n // self.train_generator.batch_size

        step_size_validation = self.validation_generator.n // self.validation_generator.batch_size

        self.history = self.model.fit_generator(
            generator=self.train_generator,
            steps_per_epoch=step_size_train,
            validation_data=self.validation_generator,
            validation_steps=step_size_validation,
            epochs=self.epochs)
Exemple #11
0
import requests, os, json
from io import BytesIO
from scipy.spatial.distance import cosine
from sklearn.metrics import mean_squared_error
app = Flask(__name__)

if os.path.exists('product_id_list_deploy.txt'):
    PRODUCT_ID_LIST = list(
        np.loadtxt('product_id_list_deploy.txt', dtype=np.str))
else:
    PRODUCT_ID_LIST = []
if os.path.exists('features_deploy.npy'):
    FEATURES = np.load('features_deploy.npy')
else:
    FEATURES = []
MODEL = MobileNetV2(include_top=False, weights='imagenet', pooling="avg")
MODEL._make_predict_function()


def save_image_features(img, productID):
    img = np.expand_dims(img, axis=0)
    imgFeature = MODEL.predict(img)
    FEATURES.append(imgFeature[0])
    PRODUCT_ID_LIST.append(productID)
    np.save('features_deploy.npy'.format(suffix), features)
    np.savetxt('product_id_list_deploy.txt', PRODUCT_ID_LIST, fmt='%s')


def get_image_from_url(imgURL):
    img = requests.get(
        imgURL)  # something like 'https://i.imgur.com/rqCqA.jpg'
def imagenet_model(nb_filters=64, input_shape=(None, 224, 224, 3)):
    model = MobileNetV2(weights=None, classes=10)

    return model
Exemple #13
0
# plt.plot(his.history['val_acc'])
# plt.title('model_accuracy')
# plt.ylabel('accuracy')
# plt.xlabel('epoch')
# plt.legend(['train', 'test'], loc='upper left')
# plt.show()
# plt.plot(his.history['loss'])
# plt.plot(his.history['val_loss'])
# plt.ylabel('loss')
# plt.xlabel('epoch')
# plt.legend(['train', 'test'], loc='upper left')
# plt.show()
#
# model.save('./weights/model.h5')
# 使用ResNet的结构,不包括最后一层
base_model = MobileNetV2(weights='imagenet', include_top=False, pooling=None,
                   input_shape=(resize, resize, 3), classes = 2)
for layer in base_model.layers:
    layer.trainable = False
x = base_model.output
x = GlobalAveragePooling2D()(x)
x = Dense(64, activation='relu')(x)
x = Dropout(0.5)(x)
predictions = Dense(2, activation='sigmoid')(x)

model = Model(inputs=base_model.input, outputs=predictions)

model.load_weights('./weights/catdogs_model.h5')
test_image = cv2.resize(cv2.imread('./43.jpg'),(224,224))
test_image = np.asarray(test_image.astype("float32"))
test_image = test_image/255.
test_image = test_image.reshape((1,224,224,3))
Exemple #14
0
def create_model():
    print_title('create model')

    # Add STN
    input_image = Input(shape=(*MODEL_INPUT_SIZE, MODEL_INPUT_CHANNELS))
    #input_image = Input(shape=(None, None, MODEL_INPUT_CHANNELS))
    '''
    locnet = Conv2D(8, (3, 3), padding='same')(input_image)
    locnet = layers.BatchNormalization()(locnet)
    locnet = layers.Activation('relu')(locnet)
    locnet = Conv2D(8, (3, 3), padding='same')(locnet)
    locnet = layers.BatchNormalization()(locnet)
    locnet = layers.Activation('relu')(locnet)
    locnet = AvgPool2D(pool_size=(2, 2))(locnet)
    # d1 = Dropout(0.2)(m1)

    locnet = Conv2D(16, (3, 3), padding='same')(locnet)
    locnet = layers.BatchNormalization()(locnet)
    locnet = layers.Activation('relu')(locnet)
    locnet = Conv2D(16, (3, 3), padding='same')(locnet)
    locnet = layers.BatchNormalization()(locnet)
    locnet = layers.Activation('relu')(locnet)
    locnet = AvgPool2D(pool_size=(2, 2))(locnet)
    # d2 = Dropout(0.2)(m2)

    locnet = Conv2D(32, (3, 3), padding='same')(locnet)
    locnet = layers.BatchNormalization()(locnet)
    locnet = layers.Activation('relu')(locnet)
    locnet = Conv2D(32, (3, 3), padding='same')(locnet)
    locnet = layers.BatchNormalization()(locnet)
    locnet = layers.Activation('relu')(locnet)
    locnet = AvgPool2D(pool_size=(2, 2))(locnet)
    # d3 = Dropout(0.2)(m3)

    locnet = Conv2D(64, (3, 3), padding='same')(locnet)
    locnet = layers.BatchNormalization()(locnet)
    locnet = layers.Activation('relu')(locnet)
    locnet = Conv2D(64, (3, 3), padding='same')(locnet)
    locnet = layers.BatchNormalization()(locnet)
    locnet = layers.Activation('relu')(locnet)
    locnet = AvgPool2D(pool_size=(2, 2))(locnet)
    # d4 = Dropout(0.2)(m4)

    locnet = Conv2D(128, (3, 3), padding='same')(locnet)
    locnet = layers.BatchNormalization()(locnet)
    locnet = layers.Activation('relu')(locnet)
    locnet = Conv2D(128, (3, 3), padding='same')(locnet)
    locnet = layers.BatchNormalization()(locnet)
    locnet = layers.Activation('relu')(locnet)
    locnet = AvgPool2D(pool_size=(2, 2), name='locnet_encoder')(locnet)
    '''
    '''
    locnet = AvgPool2D(pool_size=(4, 4))(input_image)
    locnet_encoder_model = Xception(include_top=False, weights='imagenet', pooling=None)
                                       #input_shape=(*SAMPLING_SIZE, MODEL_INPUT_CHANNELS), pooling=None)
    locnet_encoder_model.name = 'locnet_encoder'
    locnet = locnet_encoder_model(locnet)
    '''

    # locnet = MaxPool2D(pool_size=(2, 2))(input_image)
    # locnet = Conv2D(20, (5, 5))(locnet)
    # # locnet = layers.Activation('relu')(locnet)
    #
    # locnet = MaxPool2D(pool_size=(2, 2))(locnet)
    # locnet = Conv2D(50, (5, 5))(locnet)
    # # locnet = layers.Activation('relu')(locnet)
    #
    # '''
    # locnet = MaxPool2D(pool_size=(2, 2))(locnet)
    # locnet = layers.SeparableConv2D(100, (3, 3), padding='same', use_bias=False)(locnet)
    # locnet = layers.Activation('relu')(locnet)
    #
    # locnet = MaxPool2D(pool_size=(2, 2))(locnet)
    # locnet = layers.SeparableConv2D(200, (3, 3), padding='same', use_bias=False)(locnet)
    # locnet = layers.Activation('relu')(locnet)
    # '''
    #
    # locnet = layers.SeparableConv2D(100, (5, 5), use_bias=False)(locnet)
    # # locnet = layers.Activation('relu')(locnet)
    #
    # locnet = layers.SeparableConv2D(200, (5, 5), use_bias=False, name='locnet_last_conv')(locnet)
    # # locnet = layers.Activation('relu')(locnet)
    '''

    locnet = MaxPool2D(pool_size=(2, 2))(input_image)
    locnet = Conv2D(20, (5, 5))(locnet)
    locnet = layers.Activation('relu')(locnet)

    locnet = MaxPool2D(pool_size=(2, 2))(locnet)
    locnet = Conv2D(20, (5, 5))(locnet)
    locnet = layers.Activation('relu')(locnet)

    locnet = layers.SeparableConv2D(40, (3, 3), padding='same', use_bias=False)(locnet)
    locnet = layers.Activation('relu')(locnet)

    locnet = layers.SeparableConv2D(40, (3, 3), padding='same', use_bias=False)(locnet)
    locnet = layers.Activation('relu')(locnet)

    locnet = layers.GlobalMaxPooling2D()(locnet)
    '''
    '''
    locnet = MaxPool2D(pool_size=(2, 2))(locnet)
    locnet = Conv2D(20, (5, 5))(locnet)
    locnet = MaxPool2D(pool_size=(2, 2))(locnet)
    locnet = Conv2D(20, (5, 5))(locnet)
    '''

    downsample = MaxPool2D(pool_size=(4, 4),
                           name='locnet_downsample')(input_image)

    locnet_encoder_model = MobileNetV2(include_top=False,
                                       weights='imagenet',
                                       pooling=None,
                                       input_shape=(192, 192,
                                                    MODEL_INPUT_CHANNELS))
    locnet_encoder_model.name = 'locnet_encoder'
    locnet = locnet_encoder_model(downsample)  # encoder_model.output

    # locnet = Flatten()(locnet)
    locnet_pooling = layers.GlobalAveragePooling2D(
        name='locnet_pooling')(locnet)

    input_male = Input(shape=(1, ), name='input_male')
    '''
    locnet_x_male = Dense(32, activation='relu')(input_male)

    locnet_x = concatenate([locnet_pooling, locnet_x_male], axis=-1)
    locnet_x = Dense(1024, activation='relu')(locnet_x)
    locnet_x = Dense(1024, activation='relu')(locnet_x)
    locnet_output = Dense(1, activation='tanh')(locnet_x)
    '''

    # locnet = Dense(200)(locnet)
    locnet = Dense(40, name='locnet_dense_1')(locnet_pooling)
    locnet = layers.BatchNormalization(name='locnet_batch_norm_1')(locnet)
    locnet = Activation('relu', name='locnet_activation_1')(locnet)
    # locnet = Activation('tanh')(locnet)  ### ??

    locnet_scale = Dense(10, name='locnet_before_scale_dense')(locnet)
    locnet_scale = layers.BatchNormalization(
        name='locnet_before_scale_batch_norm')(locnet_scale)
    locnet_scale = Activation(
        'relu', name='locnet_before_scale_activation')(locnet_scale)
    # locnet_scale = Activation('tanh')(locnet_scale)

    # locnet = Activation('linear')(locnet)  ### ??
    scale_weights = stn.utils.get_initial_weights_for_scale(10)
    locnet_scale = Dense(1, weights=scale_weights,
                         name='locnet_scale')(locnet_scale)
    locnet_scale = layers.BatchNormalization(
        name='locnet_scale_batch_norm')(locnet_scale)
    ###    locnet_scale = Activation('sigmoid')(locnet_scale)
    locnet_scale = Activation('sigmoid',
                              name='locnet_scale_activation')(locnet_scale)

    locnet_translate = Dense(15, name='locnet_before_translate_dense')(locnet)
    locnet_translate = layers.BatchNormalization(
        name='locnet_before_translate_batch_norm')(locnet_translate)
    locnet_translate = Activation(
        'relu', name='locnet_before_translate_activation')(locnet_translate)
    # locnet_translate = Activation('tanh')(locnet_translate)
    translate_weights = stn.utils.get_initial_weights_for_translate(15)
    locnet_translate = Dense(2,
                             weights=translate_weights,
                             name='locnet_translate')(locnet_translate)
    locnet_translate = layers.BatchNormalization(
        name='locnet_translate_batch_norm')(locnet_translate)
    ###    locnet_translate = Activation('tanh')(locnet_translate)
    locnet_translate = Activation(
        'tanh', name='locnet_translate_activation')(locnet_translate)

    x = BilinearInterpolation(SAMPLING_SIZE, name='stn_interpolation')(
        [input_image, locnet_scale, locnet_translate])

    encoder_model = Xception(include_top=False,
                             weights='imagenet',
                             pooling=None)
    # input_shape=(*SAMPLING_SIZE, MODEL_INPUT_CHANNELS), pooling=None)
    # encoder_model.summary()
    # input_image = encoder_model.input
    ###    input_male = Input(shape=(1,))

    # x_image = encoder_model(input_image)
    x_image = encoder_model(x)  # encoder_model.output
    #    x_image = layers.GlobalMaxPooling2D()(x_image)
    x_image = layers.GlobalAveragePooling2D(name='encoder_pooling')(x_image)

    x_male = Dense(32, activation='relu')(input_male)

    x = concatenate([x_image, x_male], axis=-1)
    x = Dense(1024, activation='relu')(x)
    x = Dense(1024, activation='relu')(x)
    #   x = Dense(1024, activation='tanh')(x)
    #    output = Dense(1, activation='linear')(x)
    output = Dense(1, activation='tanh')(x)

    # output = concatenate([locnet_output, output], axis=-1)
    # output = Dense(1, activation='tanh')(output)

    model = Model(inputs=[input_image, input_male], outputs=output)
    return model
Exemple #15
0
def SSD(input_shape, num_classes):
    """SSD300 architecture.

    # Arguments
        input_shape: Shape of the input image,
            expected to be either (300, 300, 3) or (3, 300, 300)(not tested).
        num_classes: Number of classes including background.

    # References
        https://arxiv.org/abs/1512.02325
    """
    alpha = 1.0
    img_size = (input_shape[1], input_shape[0])
    input_shape = (input_shape[1], input_shape[0], 3)
    mobilenetv2_input_shape = (224, 224, 3)

    Input0 = Input(input_shape)
    mobilenetv2 = MobileNetV2(input_shape=mobilenetv2_input_shape,
                              include_top=False,
                              weights='imagenet')
    FeatureExtractor = Model(
        inputs=mobilenetv2.input,
        outputs=mobilenetv2.get_layer('block_12_add').output)
    #get_3rd_layer_output = K.function([mobilenetv2.layers[114].input, K.learning_phase()],
    #                                  [mobilenetv2.layers[147].output])

    x = FeatureExtractor(Input0)
    x, pwconv3 = _isb4conv13(x,
                             filters=160,
                             alpha=alpha,
                             stride=1,
                             expansion=6,
                             block_id=13)
    #x=get_3rd_layer_output([x,1])[0]
    x = _inverted_res_block(x,
                            filters=160,
                            alpha=alpha,
                            stride=1,
                            expansion=6,
                            block_id=14)
    x = _inverted_res_block(x,
                            filters=160,
                            alpha=alpha,
                            stride=1,
                            expansion=6,
                            block_id=15)
    x = _inverted_res_block(x,
                            filters=320,
                            alpha=alpha,
                            stride=1,
                            expansion=6,
                            block_id=16)
    x, pwconv4 = Conv(x, 1280)
    x, pwconv5 = LiteConv(x, 5, 512)
    x, pwconv6 = LiteConv(x, 6, 256)
    x, pwconv7 = LiteConv(x, 7, 128)
    x, pwconv8 = LiteConv(x, 8, 128)

    pwconv3_mbox_loc_flat, pwconv3_mbox_conf_flat, pwconv3_mbox_priorbox = prediction(
        pwconv3, 3, 3, 60.0, None, [2], num_classes, img_size)
    pwconv4_mbox_loc_flat, pwconv4_mbox_conf_flat, pwconv4_mbox_priorbox = prediction(
        pwconv4, 4, 6, 105.0, 150.0, [2, 3], num_classes, img_size)
    pwconv5_mbox_loc_flat, pwconv5_mbox_conf_flat, pwconv5_mbox_priorbox = prediction(
        pwconv5, 5, 6, 150.0, 195.0, [2, 3], num_classes, img_size)
    pwconv6_mbox_loc_flat, pwconv6_mbox_conf_flat, pwconv6_mbox_priorbox = prediction(
        pwconv6, 6, 6, 195.0, 240.0, [2, 3], num_classes, img_size)
    pwconv7_mbox_loc_flat, pwconv7_mbox_conf_flat, pwconv7_mbox_priorbox = prediction(
        pwconv7, 7, 6, 240.0, 285.0, [2, 3], num_classes, img_size)
    pwconv8_mbox_loc_flat, pwconv8_mbox_conf_flat, pwconv8_mbox_priorbox = prediction(
        pwconv8, 8, 6, 285.0, 300.0, [2, 3], num_classes, img_size)

    # Gather all predictions
    mbox_loc = concatenate([
        pwconv3_mbox_loc_flat, pwconv4_mbox_loc_flat, pwconv5_mbox_loc_flat,
        pwconv6_mbox_loc_flat, pwconv7_mbox_loc_flat, pwconv8_mbox_loc_flat
    ],
                           axis=1,
                           name='mbox_loc')
    mbox_conf = concatenate([
        pwconv3_mbox_conf_flat, pwconv4_mbox_conf_flat, pwconv5_mbox_conf_flat,
        pwconv6_mbox_conf_flat, pwconv7_mbox_conf_flat, pwconv8_mbox_conf_flat
    ],
                            axis=1,
                            name='mbox_conf')
    mbox_priorbox = concatenate([
        pwconv3_mbox_priorbox, pwconv4_mbox_priorbox, pwconv5_mbox_priorbox,
        pwconv6_mbox_priorbox, pwconv7_mbox_priorbox, pwconv8_mbox_priorbox
    ],
                                axis=1,
                                name='mbox_priorbox')
    if hasattr(mbox_loc, '_keras_shape'):
        num_boxes = mbox_loc._keras_shape[-1] // 4
    elif hasattr(mbox_loc, 'int_shape'):
        num_boxes = K.int_shape(mbox_loc)[-1] // 4
    mbox_loc = Reshape((num_boxes, 4), name='mbox_loc_final')(mbox_loc)
    mbox_conf = Reshape((num_boxes, num_classes),
                        name='mbox_conf_logits')(mbox_conf)
    mbox_conf = Activation('softmax', name='mbox_conf_final')(mbox_conf)
    predictions = concatenate([mbox_loc, mbox_conf, mbox_priorbox],
                              axis=2,
                              name='predictions')
    model = Model(inputs=Input0, outputs=predictions)
    return model
Exemple #16
0
                                          batch_size=val_batch_size,
                                          class_mode='binary')

    return train_gen, val_gen


# the size of the images in the PCAM dataset
IMAGE_SIZE = 96

input_shape = (IMAGE_SIZE, IMAGE_SIZE, 3)

input = Input(input_shape)

# get the pretrained model, cut out the top layer
pretrained = MobileNetV2(input_shape=input_shape,
                         include_top=False,
                         weights=None)

# if the pretrained model it to be used as a feature extractor, and not for
# fine-tuning, the weights of the model can be frozen in the following way
# for layer in pretrained.layers:
#    layer.trainable = False

output = pretrained(input)
output = GlobalAveragePooling2D()(output)
#output = Dropout(0.5)(output)
output = Dense(1, activation='sigmoid')(output)

model = Model(input, output)

# note the lower lr compared to the cnn example
Exemple #17
0
from keras import backend as K
from keras.layers.core import Dense, Activation
from keras.optimizers import Adam
from keras.metrics import categorical_crossentropy
from keras.preprocessing.image import ImageDataGenerator
from keras.preprocessing import image
from keras.models import Model
from keras.applications import imagenet_utils
from keras.layers import Dense, GlobalAveragePooling2D

import numpy as np
from IPython.display import Image

# instantiate model and remove the top layer
base_model = MobileNetV2(
    weights='imagenet', include_top=False
)  #imports the mobilenet model and discards the last 1000 neuron layer.

# add some more layers
x = base_model.output
x = GlobalAveragePooling2D()(x)
x = Dense(1024, activation='relu')(
    x
)  # we add dense layers so that the model can learn more complex functions and classify for better results.
x = Dense(1024, activation='relu')(x)  # dense layer 2
x = Dense(512, activation='relu')(x)  # dense layer 3
preds = Dense(2,
              activation='softmax')(x)  # final layer with softmax activation

# specify the inputs/outputs to create the model
model = Model(inputs=base_model.input, outputs=preds)
Exemple #18
0
                                                batch_size=batch_size,
                                                shuffle=True)

# test data
test_datagen = ImageDataGenerator(rescale=1. / 255)
test_generator = test_datagen.flow_from_directory(
    test_root,
    target_size=(IM_WIDTH, IM_HEIGHT),
    batch_size=batch_size,
)

IM_WIDTH = 224
IM_HEIGHT = 224
Input_layer = Input((IM_WIDTH, IM_HEIGHT, 3))
model_inception = MobileNetV2(weights='imagenet',
                              include_top=False,
                              input_shape=(IM_WIDTH, IM_HEIGHT, 3))
model = model_inception(Input_layer)

model = Flatten()(model)
#model = Dense(32)(model)
model = Dense(2)(model)
model = Activation('softmax')(model)
model = Model(Input_layer, model)
for layer in model_inception.layers:
    layer.trainable = False
early_stop = EarlyStopping(monitor='val_loss', patience=3, verbose=1)
best_model = ModelCheckpoint('MobileNetModel_4.h5',
                             verbose=1,
                             save_best_only=True)
model.compile(loss='categorical_crossentropy',
Exemple #19
0
# from keras.applications.resnet50 import ResNet50, preprocess_inputls
import os
os.environ['KMP_DUPLICATE_LIB_OK']='True'

from keras.applications.mobilenet_v2 import MobileNetV2, preprocess_input
model = MobileNetV2(include_top=False, weights='imagenet',input_shape=(224,224,3))

for layer in model.layers:
    layer.trainable = True

from keras.layers import Dense, Dropout, GlobalAveragePooling2D
x = model.output
x = GlobalAveragePooling2D()(x)
x = Dropout(0.3)(x)

from keras.models import Model
#dense : 출력 class를 줄인다!
predictions = Dense(7, activation= 'softmax')(x)
model = Model(inputs = model.input, output = predictions)
print(model.summary())

from keras.preprocessing.image import ImageDataGenerator
train_datagen = ImageDataGenerator(
    rotation_range=35,
    width_shift_range=0.3,
    height_shift_range=0.3,
    preprocessing_function=preprocess_input,
    fill_mode='constant'
)

#directory = '/Users/kimwanki/developer/testcase/data/training'
Exemple #20
0
def mobilenet_v2(params):
    """Train the Xception network"""
    K.clear_session()  # Remove any existing graphs
    mst_str = dt.now().strftime("%m%d_%H%M%S")
    ### TODO: write a image resize for mobilenetv2, currently it only accept 224, 224, 3 instead of 256.

    print('\n' + '=' * 40 + '\nStarting model at {}'.format(mst_str))
    # print('Model # %s' % len(trials))
    pp = pprint.PrettyPrinter(indent=4)
    pp.pprint(params)
    ###################################
    # Set up generators
    ###################################
    train_gen = ImageDataGenerator(preprocessing_function=mobv2_preproc,
                                   **DF['image_data_generator'])
    test_gen = ImageDataGenerator(preprocessing_function=mobv2_preproc)

    ######################
    # Paths and Callbacks
    ######################
    ckpt_fpath = op.join(ckpt_dir, mst_str + '_L{val_loss:.2f}_E{epoch:02d}_weights.h5')
    tboard_model_dir = op.join(tboard_dir, mst_str)

    callbacks_phase1 = [TensorBoard(log_dir=tboard_model_dir, histogram_freq=0,
                                    write_grads=False, embeddings_freq=0,
                                    embeddings_layer_names=['dense_preoutput', 'dense_output'])]
    callbacks_phase2 = [
        TensorBoard(log_dir=tboard_model_dir, histogram_freq=0,
                    write_grads=False, embeddings_freq=0,
                    embeddings_layer_names=['dense_preoutput', 'dense_output']),
        ModelCheckpoint(ckpt_fpath, monitor='val_categorical_accuracy',
                        save_weights_only=True, save_best_only=True),
        EarlyStopping(min_delta=0.01,
                      patience=5, verbose=1),
        ReduceLROnPlateau(min_delta=0.1,
                          patience=3, verbose=1)]

    #########################
    # Construct model
    #########################
    # Get the original xception model pre-initialized weights
    ssl._create_default_https_context = ssl._create_unverified_context
    # input_tensor = Input(shape=(target_size, target_size, 3))
    base_model = MobileNetV2(
        include_top=False,
        weights='imagenet',
        input_shape=(224, 224, 3),
        pooling='avg')
        # input_tensor=input_tensor,
        # input_shape=(target_size, target_size, 3),) # Global average pooling

    x = base_model.output  # Get final layer of base XCeption model

    # Add a fully-connected layer
    x = Dense(params['dense_size'], activation=params['dense_activation'],
              kernel_initializer=params['weight_init'],
              name='dense_preoutput')(x)
    if params['dropout_rate'] > 0:
        x = Dropout(rate=params['dropout_rate'])(x)

    # Finally, add output layer
    pred = Dense(params['n_classes'],
                 activation=params['output_activation'],
                 name='dense_output')(x)

    model = Model(inputs=base_model.input, outputs=pred)

    #####################
    # Save model details
    #####################
    model_yaml = model.to_yaml()
    save_template = op.join(ckpt_dir, mst_str + '_{}.{}')
    arch_fpath = save_template.format('arch', 'yaml')
    if not op.exists(arch_fpath):
        with open(arch_fpath.format('arch', 'yaml'), 'w') as yaml_file:
            yaml_file.write(model_yaml)

    # Save params to yaml file
    params_fpath = save_template.format('params', 'yaml')
    if not op.exists(params_fpath):
        with open(params_fpath, 'w') as yaml_file:
            yaml_file.write(yaml.dump(params))
            yaml_file.write(yaml.dump(TP))
            yaml_file.write(yaml.dump(MP))
            yaml_file.write(yaml.dump(DF))

    ##########################
    # Train the new top layers
    ##########################
    # Train the top layers which we just added by setting all orig layers untrainable
    for layer in base_model.layers:
        layer.trainable = False

    # Compile the model (after setting non-trainable layers)
    model.compile(optimizer=get_optimizer(params['optimizer'],
                                          lr=params['lr_phase1']),
                  loss=params['loss'],
                  metrics=MP['metrics'])

    print('Phase 1, training near-output layer(s)')
    hist = model.fit_generator(
        train_gen.flow_from_directory(directory=op.join(data_dir, 'train'),
                                      **DF['flow_from_dir']),
        steps_per_epoch=params['steps_per_train_epo'],
        epochs=params['n_epo_phase1'],
        callbacks=callbacks_phase1,
        max_queue_size=params['max_queue_size'],
        workers=params['workers'],
        use_multiprocessing=params['use_multiprocessing'],
        class_weight=params['class_weight'],
        verbose=1)

    ###############################################
    # Train entire network to fine-tune performance
    ###############################################
    # Visualize layer names/indices to see how many layers to freeze:
    #print('Layer freeze cutoff = {}'.format(params['freeze_cutoff']))
    #for li, layer in enumerate(base_model.layers):
    #    print(li, layer.name)

    # Set all layers trainable
    for layer in model.layers:
        layer.trainable = True

    # Recompile model for second round of training
    model.compile(optimizer=get_optimizer(params['optimizer'],
                                          params['lr_phase2']),
                  loss=params['loss'],
                  metrics=MP['metrics'])

    print('/nPhase 2, training from layer {} on.'.format(params['freeze_cutoff']))
    test_iter = test_gen.flow_from_directory(
        directory=op.join(data_dir, 'test'), shuffle=False,  # Helps maintain consistency in testing phase
        **DF['flow_from_dir'])
    test_iter.reset()  # Reset for each model so it's consistent; ideally should reset every epoch

    hist = model.fit_generator(
        train_gen.flow_from_directory(directory=op.join(data_dir, 'train'),
                                      **DF['flow_from_dir']),
        steps_per_epoch=params['steps_per_train_epo'],
        epochs=params['n_epo_phase2'],
        max_queue_size=params['max_queue_size'],
        workers=params['workers'],
        use_multiprocessing=params['use_multiprocessing'],
        validation_data=test_iter,
        validation_steps=params['steps_per_test_epo'],
        callbacks=callbacks_phase2,
        class_weight=params['class_weight'],
        verbose=1)

    # Return best of last validation accuracies
    check_ind = -1 * (TP['early_stopping_patience'] + 1)
    result_dict = dict(loss=np.min(hist.history['val_loss'][check_ind:]),
                       status=STATUS_OK)

    return result_dict
import numpy as np
import tensorflow as tf
import keras
from keras.applications.mobilenet_v2 import MobileNetV2, preprocess_input
from keras.preprocessing import image
from keras.preprocessing.image import load_img
from scipy.spatial.distance import cosine

global model
model = MobileNetV2(weights='imagenet',
                    input_shape=(224, 224, 3),
                    include_top=False,
                    pooling='avg')
global graph
graph = tf.get_default_graph()


def get_embeddings(paths, model=model):
    imgs = np.array([
        preprocess_input(
            image.img_to_array(load_img(path, target_size=(224, 224, 3))))
        for path in paths
    ])

    with graph.as_default():
        embs = model.predict(imgs)
    return embs


def get_distances(target_emb, source_embs, metric=cosine):
    return [metric(target_emb, source_emb) for source_emb in source_embs]
Exemple #22
0
print('Model loaded. Check http://127.0.0.1:5000/')


# Model saved with Keras model.save()
MODEL_PATH_YOGA = 'Yoga_multiclass10epoch30.h5'
MODEL_PATH_FOOD = 'Yoga_multiclass10epoch30.h5'

#lable names of all classes being used
label_names= ['backbend' ,'bigtoepose' ,'bridge' ,'childs' ,'cobrapose' ,'corpsepose' ,'cow_face_pose' ,'crocodilepose' ,'downwarddog' ,'durvasasana' ,'eaglepose' ,'easypose' ,'eight_angle_pose' ,'feathered_peacock' ,'fire_log_pose','fireflypose' ,'fishpose' ,'gatepose' ,'halfspinaltwist' ,'heropose' ,'lionpose' ,'lord_of_dance_pose' ,'lotus' ,'lunge' ,'monkeypose' ,'mountain' ,'noosepose' ,'peacockpose' ,'plank' ,'plowpose' ,'reclining_bound_angle_pose' ,'reclining_hand-to-big-toe _pose' ,'reclining_hero_pose' ,'scalepose' ,'seatedforwardbend' ,'side_reclining_leg_lift' ,'sideplank' ,'staffpose','standing_forward_bend' ,'standing_half_forward_bend' ,'supine_spinal_twist_pose' ,'thunderboltpose' ,'tree' ,'trianglepose' ,'turtlepose' ,'upward_plank' ,'warrior1' ,'warrior2' ,'wide-angle_seated_forward_bend' ,'yogic_sleep_pose']

# Load your own trained yoga model
model_yoga = load_model(MODEL_PATH_YOGA)
model_yoga._make_predict_function()

#load food prediction model
model_food= MobileNetV2(weights='imagenet')
model_food._make_predict_function()

print('Model loaded. Start serving...')


def model_predict(img, model):
    img = img.resize((64, 64))

    # Preprocessing the image
    x = image.img_to_array(img)
    # x = np.true_divide(x, 255)
    x = np.expand_dims(x, axis=0)

    # Be careful how your trained model deals with the input
    # otherwise, it won't make correct prediction!
import config

os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'

# ---------------------------------------提取数据-划分数据集-----------------------------
img_path, person_id_original_list, nbr_persion_ids = extract_file(config.DATA_FOLDER, data="train")

train_img_path, val_img_path, train_ids, val_ids = train_test_split(img_path, person_id_original_list, test_size=0.2,
                                                                    random_state=2020)
print("numbers of train images:", len(train_img_path))
print("numbers of val images:", len(val_img_path))

# ---------------------------------------backbone------------------------------
weight_path = "/students/julyedu_510477/PersonReID_project/MobileNetV2/mobilenet_v2_weights_tf_dim_ordering_tf_kernels_0.5_96_no_top.h5"

backbone = MobileNetV2(weights=weight_path, input_shape=(config.IMG_HIGHT, config.IMG_WIGTH, 3), include_top=False, alpha=0.5,
                       pooling='max')

# backbone.summary()

gobal_pool = backbone.get_layer(index=-1).output

dropout_layer = Dropout(0.25)(gobal_pool)

dense = Dense(nbr_persion_ids, activation='softmax')(dropout_layer)

baseline_model = Model(inputs=backbone.input, outputs=dense)

# 2、加入了label smoothing 方法进行优化,同时对keras损失函数进行自定义修改
if config.USE_Label_Smoothing:
    baseline_model.compile(loss=my_categorical_crossentropy_label_smoothing,
                           optimizer=config.OPTIMIZER,
Exemple #24
0
from gevent.pywsgi import WSGIServer

import tensorflow as tf
from tensorflow import keras

from keras.applications.imagenet_utils import preprocess_input, decode_predictions
from tensorflow.keras.models import load_model
from keras.preprocessing import image

import numpy as np
from util import base64_to_pil

app = Flask(__name__)

from keras.applications.mobilenet_v2 import MobileNetV2
model = MobileNetV2(weights='imagenet')

print('Model loaded. Check http://127.0.0.1:8000/')

MODEL_PATH = 'models/your_model.h5'


def model_predict(img, model):
    img = img.resize((224, 224))

    x = image.img_to_array(img)

    x = np.expand_dims(x, axis=0)

    x = preprocess_input(x, mode='tf')
Exemple #25
0
np.random.seed(0)

### load pretrained model ########

keras.backend.clear_session(
)  # Destroys the current TF graph and creates a new one.
weights_url = ''.join([
    'https://github.com/JonathanCMitchell/',
    'mobilenet_v2_keras/releases/download/v1.1/',
    'mobilenet_v2_weights_tf_dim_ordering_tf_kernels_0.5_224.h5'
])
weights_file = 'mobilenet_v2_weights.h5'
weights_path = download_testdata(weights_url, weights_file, module='keras')
keras_mobilenet_v2 = MobileNetV2(alpha=0.5,
                                 include_top=True,
                                 weights=None,
                                 input_shape=(224, 224, 3),
                                 classes=1000)
keras_mobilenet_v2.load_weights(weights_path)

######## load test set ###########
img_url = 'https://github.com/dmlc/mxnet.js/blob/master/data/cat.png?raw=true'
img_name = 'cat.png'
img_path = download_testdata(img_url, img_name, module='data')
image = Image.open(img_path).resize((224, 224))
dtype = 'float32'


def transform_image(image):
    image = np.array(image) - np.array([123., 117., 104.])
    image /= np.array([58.395, 57.12, 57.375])
Exemple #26
0
train_data_dir = 'chest_xray/chest_xray/train'
validation_data_dir = 'chest_xray/chest_xray/test'
nb_train_samples = 5216 
nb_validation_samples = 624
epochs = 2
batch_size = 1


if K.image_data_format() == 'channels_first': 
    input_shape = (3, img_width, img_height) 
else: 
    input_shape = (img_width, img_height, 3) 
# print("Num GPUs Available: ", len(tf.config.experimental.list_physical_devices('GPU')))

from keras.applications.mobilenet_v2 import MobileNetV2
model = MobileNetV2(weights=None, classes=2)
# model = Sequential() 
# model.add(Conv2D(64, (3, 3), input_shape=input_shape)) 
# model.add(Activation('relu')) 
# model.add(Conv2D(64, (3, 3))) 
# model.add(Activation('relu')) 
# model.add(MaxPooling2D(pool_size=(2, 2))) 

# model.add(Conv2D(128, (3, 3))) 
# model.add(Activation('relu'))
# model.add(Conv2D(128, (3, 3))) 
# model.add(Activation('relu')) 
# model.add(MaxPooling2D(pool_size=(2, 2)))

# model.add(Conv2D(256, (3, 3))) 
# model.add(Activation('relu')) 
from sklearn.model_selection import train_test_split
import numpy as np
from keras.layers import Dense, Activation, Flatten
from keras.preprocessing.image import ImageDataGenerator
from keras.applications.mobilenet_v2 import MobileNetV2
from keras.models import Model

IMG_SIZE = [224, 224]

X = np.load('X_array_cone.npy')
Y = np.load('Y_array_cone.npy')

x_train, x_test, y_train, y_test = train_test_split(X, Y, test_size=0.1)

mobile = MobileNetV2(input_shape=IMG_SIZE + [3],
                     weights='imagenet',
                     include_top=False)

for layer in mobile.layers:
    layer.trainable = False

x = Flatten()(mobile.output)

prediction = Dense(units=1, activation="sigmoid")(x)

model = Model(inputs=mobile.input, outputs=prediction)

model.summary()

model.compile(loss="binary_crossentropy",
              optimizer="adam",
def generate_base_model(model_name, lam, dropout_rate, import_weights):
    if model_name in ['VGG16', 'VGG19']:
        if model_name == 'VGG16':
            from keras.applications.vgg16 import VGG16
            base_model = VGG16(include_top=False,
                               weights=import_weights,
                               input_shape=(224, 224, 3))
        elif model_name == 'VGG19':
            from keras.applications.vgg19 import VGG19
            base_model = VGG19(include_top=False,
                               weights=import_weights,
                               input_shape=(224, 224, 3))
        x = base_model.output
        x = Flatten()(x)
        x = Dense(4096,
                  activation='relu',
                  kernel_regularizer=regularizers.l2(lam))(x)
        x = Dropout(dropout_rate)(x)
        x = Dense(4096,
                  activation='relu',
                  kernel_regularizer=regularizers.l2(lam))(x)
        x = Dropout(dropout_rate)(x)
    elif model_name in ['MobileNet', 'MobileNetV2']:
        if model_name == 'MobileNet':
            from keras.applications.mobilenet import MobileNet
            base_model = MobileNet(include_top=False,
                                   weights=import_weights,
                                   input_shape=(224, 224, 3))
        elif model_name == 'MobileNetV2':
            from keras.applications.mobilenet_v2 import MobileNetV2
            base_model = MobileNetV2(include_top=False,
                                     weights=import_weights,
                                     input_shape=(224, 224, 3))
        x = base_model.output
        x = GlobalAveragePooling2D()(x)
    elif model_name in ['DenseNet121', 'DenseNet169', 'DenseNet201']:
        if model_name == 'DenseNet121':
            from keras.applications.densenet import DenseNet121
            base_model = DenseNet121(include_top=True,
                                     weights=import_weights,
                                     input_shape=(224, 224, 3))
        elif model_name == 'DenseNet169':
            from keras.applications.densenet import DenseNet169
            base_model = DenseNet169(include_top=True,
                                     weights=import_weights,
                                     input_shape=(224, 224, 3))
        elif model_name == 'DenseNet201':
            from keras.applications.densenet import DenseNet201
            base_model = DenseNet201(include_top=True,
                                     weights=import_weights,
                                     input_shape=(224, 224, 3))
        base_model = Model(base_model.inputs, base_model.layers[-2].output)
        x = base_model.output
    elif model_name in ['NASNetMobile', 'NASNetLarge']:
        if model_name == 'NASNetMobile':
            from keras.applications.nasnet import NASNetMobile
            base_model = NASNetMobile(include_top=True,
                                      weights=import_weights,
                                      input_shape=(224, 224, 3))
        elif model_name == 'NASNetLarge':
            from keras.applications.nasnet import NASNetLarge
            base_model = NASNetLarge(include_top=True,
                                     weights=import_weights,
                                     input_shape=(331, 331, 3))
        base_model = Model(base_model.inputs, base_model.layers[-2].output)
        x = base_model.output
    elif model_name == 'Xception':
        from keras.applications.xception import Xception
        base_model = Xception(include_top=False,
                              weights=import_weights,
                              input_shape=(299, 299, 3))
        x = base_model.output
        x = GlobalAveragePooling2D()(x)
    elif model_name == 'InceptionV3':
        from keras.applications.inception_v3 import InceptionV3
        base_model = InceptionV3(include_top=False,
                                 weights=import_weights,
                                 input_shape=(299, 299, 3))
        x = base_model.output
        x = GlobalAveragePooling2D()(x)
    elif model_name == 'InceptionResNetV2':
        from keras.applications.inception_resnet_v2 import InceptionResNetV2
        base_model = InceptionResNetV2(include_top=False,
                                       weights=import_weights,
                                       input_shape=(299, 299, 3))
        x = base_model.output
        x = GlobalAveragePooling2D()(x)
    return x, base_model.input
Exemple #29
0
# Convert class vectors to binary class matrices.
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)

if args.resume:
    print('resume from checkpoint')
    message = job_name + ' b_end'
    send_signal.send(args.node, 10002, message)
    model = keras.models.load_model(save_file)
    message = job_name + ' c_end'
    send_signal.send(args.node, 10002, message)
else:
    print('train from start')
    model = models.Sequential()
    
    base_model = MobileNetV2(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None)
    
    #base_model.summary()
    
    #pdb.set_trace()
    
    model.add(base_model)
    model.add(layers.Flatten())
    #model.add(layers.BatchNormalization())
    #model.add(layers.Dense(128, activation='relu'))
    #model.add(layers.Dropout(0.5))
    #model.add(layers.BatchNormalization())
    #model.add(layers.Dense(64, activation='relu'))
    #model.add(layers.Dropout(0.5))
    #model.add(layers.BatchNormalization())
    model.add(layers.Dense(10, activation='softmax'))
classes = [
    'House sparrow', 'Great tit', 'Eurasian blue tit', 'Eurasian magpie',
    'Eurasian jay'
]
seed = random.randint(1, 1000)

#Dit uncommenten als je met GPU wilt trainen

# tf.debugging.set_log_device_placement(True)
# print("Num GPUs Available: ", len(tf.config.experimental.list_physical_devices('GPU')))

a = 224
b = 224
ImageFile.LOAD_TRUNCATED_IMAGES = True
base_model = MobileNetV2(include_top=False,
                         weights='imagenet',
                         input_shape=(a, b, 3))  #(216, 384, 3)
base_model.trainable = False
x = base_model.output
x = GlobalAveragePooling2D()(x)
# x = Dropout(rate = .2)(x)
x = BatchNormalization()(x)
x = Dense(1280,
          activation='relu',
          kernel_initializer=glorot_uniform(seed),
          bias_initializer='zeros')(x)
# x = Dropout(rate = .2)(x)
x = BatchNormalization()(x)
predictions = Dense(len(classes),
                    activation='softmax',
                    kernel_initializer='random_uniform',