예제 #1
0
import tensorflow as tf
from tensorflow.keras.applications.resnet50 import ResNet50
from tensorflow.keras.applications import MobileNetV2
from tensorflow.keras.preprocessing import image
from tensorflow.keras.applications.resnet50 import preprocess_input

from Unet_from_encoder import *
import Losses as losses

physical_devices = tf.config.experimental.list_physical_devices('GPU')
tf.config.experimental.set_memory_growth(physical_devices[0], True)

size = 128
inp = tf.keras.layers.Input(shape=(size, size, 3))
encoder_model = MobileNetV2(input_tensor=inp,
                            weights='imagenet',
                            include_top=False)
for l in encoder_model.layers:
    l.trainable = False

unet = UNET(128, 3, encoder_model, filter_start=16)
encoder_model = []
unet.model.compile(optimizer='Adam',
                   loss=losses.bce_and_jac,
                   metrics=['binary_crossenropy', losses.dice_coef])
예제 #2
0
파일: train.py 프로젝트: rednafi/prinumco
# pointing the train and test directory
base_dir = "./"
train_folder = base_dir + "dataset/train/"
test_folder = base_dir + "dataset/test/"

# defining the optimizer
adam = Adam(lr=3e-4, beta_1=0.9, beta_2=0.999)

# Keras model
input_shape = (96, 96, 3)
num_classes = 10


# Defining the model
base_model = MobileNetV2(
    include_top=False, input_shape=input_shape, classes=num_classes
)

x = base_model.output
x = GlobalMaxPooling2D()(x)
x = Dense(1000, activation="relu")(x)
x = Dropout(0.5)(x)
x = Dense(512, activation="relu")(x)
x = Dropout(0.5)(x)
x = Dense(128, activation="relu")(x)
predictions = Dense(num_classes, activation="softmax")(x)

model = Model(inputs=base_model.input, outputs=predictions)
model.compile(loss="categorical_crossentropy", optimizer=adam, metrics=["accuracy"])

예제 #3
0
(trainX, testX, trainY, testY) = train_test_split(data,
                                                  labels,
                                                  test_size=0.20,
                                                  stratify=labels,
                                                  random_state=42)

aug = ImageDataGenerator(rotation_range=20,
                         zoom_range=0.15,
                         width_shift_range=0.2,
                         height_shift_range=0.2,
                         shear_range=0.15,
                         horizontal_flip=True,
                         fill_mode="nearest")

baseModel = MobileNetV2(weights="imagenet",
                        include_top=False,
                        input_tensor=Input(shape=(224, 224, 3)))

headModel = baseModel.output
headModel = AveragePooling2D(pool_size=(7, 7))(headModel)
headModel = Flatten(name="flatten")(headModel)
headModel = Dense(128, activation="relu")(headModel)
headModel = Dropout(0.5)(headModel)
headModel = Dense(2, activation="softmax")(headModel)

model = Model(inputs=baseModel.input, outputs=headModel)

for layer in baseModel.layers:
    layer.trainable = False

print("[INFO] compiling model...")
예제 #4
0
import tensorflow as tf
from tensorflow.keras.applications import MobileNetV2
import tensorflow.keras.datasets as datasets
import matplotlib.pyplot as plt

# Check TF version
print(tf.__version__)
print(tf.keras.__version__)

# Get Mnist Data
mnist = datasets.mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
print(x_train.shape)
print(y_train.shape)
print(x_test.shape)
print(y_test.shape)

# Visualize Data
plt.imshow(x_train[0])
plt.title('label:' + str(y_train[0]))
plt.show()

# Get MobilenetV2 model
model = MobileNetV2(weights='imagenet', include_top=False)
model.summary()
              weights='imagenet')),
 ("DenseNet201",
  DenseNet201(input_shape=IMG_SHAPE,
              include_top=False,
              weights='imagenet')),
 ("InceptionResNetV2",
  InceptionResNetV2(input_shape=IMG_SHAPE,
                    include_top=False,
                    weights='imagenet')),
 ("MobileNet",
  MobileNet(input_shape=IMG_SHAPE,
            include_top=False,
            weights='imagenet')),
 ("MobileNetV2",
  MobileNetV2(input_shape=IMG_SHAPE,
              include_top=False,
              weights='imagenet')),
 ("ResNet101",
  ResNet101(input_shape=IMG_SHAPE,
            include_top=False,
            weights='imagenet')),
 ("ResNet101V2",
  ResNet101V2(input_shape=IMG_SHAPE,
              include_top=False,
              weights='imagenet')),
 ("ResNet152",
  ResNet152(input_shape=IMG_SHAPE,
            include_top=False,
            weights='imagenet')),
 ("ResNet152V2",
  ResNet152V2(input_shape=IMG_SHAPE,
예제 #6
0
 def backbone(x_in):
     if backbone_type == 'ResNet50':
         return ResNet50(input_shape=x_in.shape[1:],
                         include_top=False,
                         weights=weights)(x_in)
     elif backbone_type == 'ResNet50V2':
         return ResNet50V2(input_shape=x_in.shape[1:],
                           include_top=False,
                           weights=weights)(x_in)
     elif backbone_type == 'ResNet101V2':
         return ResNet101V2(input_shape=x_in.shape[1:],
                            include_top=False,
                            weights=weights)(x_in)
     elif backbone_type == 'InceptionResNetV2':
         return InceptionResNetV2(input_shape=x_in.shape[1:],
                                  include_top=False,
                                  weights=weights)(x_in)
     elif backbone_type == 'InceptionV3':
         return InceptionV3(input_shape=x_in.shape[1:],
                            include_top=False,
                            weights=weights)(x_in)
     elif backbone_type == 'MobileNet':
         return MobileNet(input_shape=x_in.shape[1:],
                          include_top=False,
                          weights=weights)(x_in)
     elif backbone_type == 'MobileNetV2':
         return MobileNetV2(input_shape=x_in.shape[1:],
                            include_top=False,
                            weights=weights)(x_in)
     elif backbone_type == 'NASNetLarge':
         model = NASNetLarge(input_shape=x_in.shape[1:],
                             include_top=False,
                             weights=None)
         model.load_weights(WEIGHTS_DIR + "nasnet_large_no_top.h5")
         return model(x_in)
     elif backbone_type == 'NASNetMobile':
         model = NASNetMobile(input_shape=x_in.shape[1:],
                              include_top=False,
                              weights=None)
         model.load_weights(WEIGHTS_DIR + "nasnet_mobile_no_top.h5")
         return model(x_in)
     elif backbone_type == 'Xception':
         return Xception(input_shape=x_in.shape[1:],
                         include_top=False,
                         weights=weights)(x_in)
     elif backbone_type == 'MobileNetV3Small':
         model = MobileNetV3Small(input_shape=x_in.shape[1:],
                                  include_top=False,
                                  weights=None)
         model.load_weights(WEIGHTS_DIR + "mobilenet_v3_small_notop.ckpt")
         return model(x_in)
     elif backbone_type == 'MobileNetV3Large':
         model = MobileNetV3Large(input_shape=x_in.shape[1:],
                                  include_top=False,
                                  weights=None)
         model.load_weights(WEIGHTS_DIR + "mobilenet_v3_large_notop.ckpt")
         return model(x_in)
     elif backbone_type == 'EfficientNetLite0':
         model = EfficientNetLite0(input_shape=x_in.shape[1:],
                                   include_top=False,
                                   weights=None)
         model.load_weights(WEIGHTS_DIR + "efficientnet_lite0_notop.ckpt")
         return model(x_in)
     elif backbone_type == 'EfficientNetLite1':
         model = EfficientNetLite1(input_shape=x_in.shape[1:],
                                   include_top=False,
                                   weights=None)
         model.load_weights(WEIGHTS_DIR + "efficientnet_lite1_notop.ckpt")
         return model(x_in)
     elif backbone_type == 'EfficientNetLite2':
         model = EfficientNetLite2(input_shape=x_in.shape[1:],
                                   include_top=False,
                                   weights=None)
         model.load_weights(WEIGHTS_DIR + "efficientnet_lite2_notop.ckpt")
         return model(x_in)
     elif backbone_type == 'EfficientNetLite3':
         model = EfficientNetLite3(input_shape=x_in.shape[1:],
                                   include_top=False,
                                   weights=None)
         model.load_weights(WEIGHTS_DIR + "efficientnet_lite3_notop.ckpt")
         return model(x_in)
     elif backbone_type == 'EfficientNetLite4':
         model = EfficientNetLite4(input_shape=x_in.shape[1:],
                                   include_top=False,
                                   weights=None)
         model.load_weights(WEIGHTS_DIR + "efficientnet_lite4_notop.ckpt")
         return model(x_in)
     elif backbone_type == 'EfficientNetLite5':
         model = EfficientNetLite5(input_shape=x_in.shape[1:],
                                   include_top=False,
                                   weights=None)
         model.load_weights(WEIGHTS_DIR + "efficientnet_lite5_notop.ckpt")
         return model(x_in)
     elif backbone_type == 'EfficientNetLite6':
         model = EfficientNetLite6(input_shape=x_in.shape[1:],
                                   include_top=False,
                                   weights=None)
         model.load_weights(WEIGHTS_DIR + "efficientnet_lite6_notop.ckpt")
         return model(x_in)
     elif backbone_type == 'EfficientNetB0':
         model = EfficientNetB0(input_shape=x_in.shape[1:],
                                include_top=False,
                                weights=None)
         model.load_weights(WEIGHTS_DIR + "efficientnetb0_notop.ckpt")
         return model(x_in)
     elif backbone_type == 'EfficientNetB1':
         model = EfficientNetB1(input_shape=x_in.shape[1:],
                                include_top=False,
                                weights=None)
         model.load_weights(WEIGHTS_DIR + "efficientnetb1_notop.ckpt")
         return model(x_in)
     elif backbone_type == 'EfficientNetB2':
         model = EfficientNetB2(input_shape=x_in.shape[1:],
                                include_top=False,
                                weights=None)
         model.load_weights(WEIGHTS_DIR + "efficientnetb2_notop.ckpt")
         return model(x_in)
     elif backbone_type == 'EfficientNetB3':
         model = EfficientNetB3(input_shape=x_in.shape[1:],
                                include_top=False,
                                weights=None)
         model.load_weights(WEIGHTS_DIR + "efficientnetb3_notop.ckpt")
         return model(x_in)
     elif backbone_type == 'EfficientNetB4':
         model = EfficientNetB4(input_shape=x_in.shape[1:],
                                include_top=False,
                                weights=None)
         model.load_weights(WEIGHTS_DIR + "efficientnetb4_notop.ckpt")
         return model(x_in)
     elif backbone_type == 'EfficientNetB5':
         model = EfficientNetB5(input_shape=x_in.shape[1:],
                                include_top=False,
                                weights=None)
         model.load_weights(WEIGHTS_DIR + "efficientnetb5_notop.ckpt")
         return model(x_in)
     elif backbone_type == 'EfficientNetB6':
         model = EfficientNetB6(input_shape=x_in.shape[1:],
                                include_top=False,
                                weights=None)
         if use_pretrain:
             model.load_weights(WEIGHTS_DIR + "efficientnetb6_notop.ckpt")
         return model(x_in)
     elif backbone_type == 'EfficientNetB7':
         model = EfficientNetB7(input_shape=x_in.shape[1:],
                                include_top=False,
                                weights=None)
         model.load_weights(WEIGHTS_DIR + "efficientnetb7_notop.ckpt")
         return model(x_in)
     elif backbone_type == 'MnasNetA1':
         return MnasNetModel(input_shape=x_in.shape[1:],
                             include_top=False,
                             weights=None,
                             name="MnasNetA1")(x_in)
     elif backbone_type == 'MnasNetB1':
         return MnasNetModel(input_shape=x_in.shape[1:],
                             include_top=False,
                             weights=None,
                             name="MnasNetB1")(x_in)
     elif backbone_type == 'MnasNetSmall':
         return MnasNetModel(input_shape=x_in.shape[1:],
                             include_top=False,
                             weights=None,
                             name="MnasNetSmall")(x_in)
     else:
         raise TypeError('backbone_type error!')
def QSSD_MOBILENETV2(
    config,
    label_maps,
    num_predictions=10,
    is_training=True,
):
    model_config = config["model"]
    input_shape = (model_config["input_size"], model_config["input_size"], 3)
    num_classes = len(label_maps) + 1  # for background class
    l2_reg = model_config["l2_regularization"]
    kernel_initializer = model_config["kernel_initializer"]
    default_quads_config = model_config["default_quads"]
    extra_box_for_ar_1 = model_config["extra_box_for_ar_1"]
    #

    input_tensor = Input(shape=input_shape)
    input_tensor = ZeroPadding2D(padding=(2, 2))(input_tensor)

    base_network = MobileNetV2(
        input_tensor=input_tensor,
        alpha=config["model"]["width_multiplier"],
        classes=num_classes,
        weights='imagenet',
        include_top=False
    )
    base_network = Model(inputs=base_network.input, outputs=base_network.get_layer('block_16_project_BN').output)
    base_network.get_layer("input_1")._name = "input"
    for layer in base_network.layers:
        base_network.get_layer(layer.name)._kernel_initializer = "he_normal"
        base_network.get_layer(layer.name)._kernel_regularizer = l2(l2_reg)
        layer.trainable = False  # each layer of the base network should not be trainable

    conv_13 = base_network.get_layer("block_13_expand_relu").output
    conv_16 = base_network.get_layer('block_16_project_BN').output

    def conv_block_1(x, filters, name):
        x = Conv2D(
            filters=filters,
            kernel_size=(1, 1),
            padding="valid",
            kernel_initializer='he_normal',
            kernel_regularizer=l2(l2_reg),
            name=name,
            use_bias=False)(x)
        x = BatchNormalization(name=f"{name}/bn")(x)
        x = ReLU(name=f"{name}/relu")(x)
        return x

    def conv_block_2(x, filters, name):
        x = Conv2D(
            filters=filters,
            kernel_size=(3, 3),
            padding="same",
            kernel_initializer='he_normal',
            kernel_regularizer=l2(l2_reg),
            name=name,
            use_bias=False,
            strides=(2, 2))(x)
        x = BatchNormalization(name=f"{name}/bn")(x)
        x = ReLU(name=f"{name}/relu")(x)
        return x
    conv17_1 = conv_block_1(x=conv_16, filters=256, name="conv17_1")
    conv17_2 = conv_block_2(x=conv17_1, filters=512, name="conv17_2")
    conv18_1 = conv_block_1(x=conv17_2, filters=128, name="conv18_1")
    conv18_2 = conv_block_2(x=conv18_1, filters=256, name="conv18_2")
    conv19_1 = conv_block_1(x=conv18_2, filters=128, name="conv19_1")
    conv19_2 = conv_block_2(x=conv19_1, filters=256, name="conv19_2")
    conv20_1 = conv_block_1(x=conv19_2, filters=128, name="conv20_1")
    conv20_2 = conv_block_2(x=conv20_1, filters=256, name="conv20_2")
    model = Model(inputs=base_network.input, outputs=conv20_2)

    # construct the prediction layers (conf, loc, & default_boxes)
    scales = np.linspace(
        default_quads_config["min_scale"],
        default_quads_config["max_scale"],
        len(default_quads_config["layers"])
    )
    mbox_conf_layers = []
    mbox_quad_layers = []
    for i, layer in enumerate(default_quads_config["layers"]):
        num_default_quads = get_number_default_quads(
            aspect_ratios=layer["aspect_ratios"],
            angles=layer["angles"],
            extra_box_for_ar_1=extra_box_for_ar_1
        )
        x = model.get_layer(layer["name"]).output
        layer_name = layer["name"]

        layer_mbox_conf = Conv2D(
            filters=num_default_quads * num_classes,
            kernel_size=(3, 3),
            padding='same',
            kernel_initializer=kernel_initializer,
            kernel_regularizer=l2(l2_reg),
            name=f"{layer_name}_mbox_conf")(x)
        layer_mbox_conf_reshape = Reshape(
            (-1, num_classes), name=f"{layer_name}_mbox_conf_reshape")(layer_mbox_conf)
        layer_mbox_quad = Conv2D(
            filters=num_default_quads * 8,
            kernel_size=(3, 3),
            padding='same',
            kernel_initializer=kernel_initializer,
            kernel_regularizer=l2(l2_reg),
            name=f"{layer_name}_mbox_quad")(x)
        layer_mbox_quad_reshape = Reshape(
            (-1, 8), name=f"{layer_name}_mbox_quad_reshape")(layer_mbox_quad)
        mbox_conf_layers.append(layer_mbox_conf_reshape)
        mbox_quad_layers.append(layer_mbox_quad_reshape)

    # concentenate class confidence predictions from different feature map layers
    mbox_conf = Concatenate(axis=-2, name="mbox_conf")(mbox_conf_layers)
    mbox_conf_softmax = Activation(
        'softmax', name='mbox_conf_softmax')(mbox_conf)
    # concentenate object quad predictions from different feature map layers
    mbox_quad = Concatenate(axis=-2, name="mbox_quad")(mbox_quad_layers)

    if is_training:
        # concatenate confidence score predictions, bounding box predictions, and default boxes
        predictions = Concatenate(
            axis=-1, name='predictions')([mbox_conf_softmax, mbox_quad])
        return Model(inputs=base_network.input, outputs=predictions)

    mbox_default_quads_layers = []
    for i, layer in enumerate(default_quads_config["layers"]):
        num_default_quads = get_number_default_quads(
            aspect_ratios=layer["aspect_ratios"],
            angles=layer["angles"],
            extra_box_for_ar_1=extra_box_for_ar_1
        )
        x = model.get_layer(layer["name"]).output
        layer_name = layer["name"]
        layer_default_quads = DefaultQuads(
            image_shape=input_shape,
            scale=scales[i],
            next_scale=scales[i+1] if i +
            1 <= len(default_quads_config["layers"]) - 1 else 1,
            aspect_ratios=layer["aspect_ratios"],
            angles=layer["angles"],
            variances=default_quads_config["variances"],
            extra_box_for_ar_1=extra_box_for_ar_1,
            name=f"{layer_name}_default_quads")(x)
        layer_default_quads_reshape = Reshape(
            (-1, 16), name=f"{layer_name}_default_quads_reshape")(layer_default_quads)
        mbox_default_quads_layers.append(layer_default_quads_reshape)

    # concentenate default boxes from different feature map layers
    mbox_default_quads = Concatenate(
        axis=-2, name="mbox_default_quads")(mbox_default_quads_layers)
    predictions = Concatenate(axis=-1, name='predictions')(
        [mbox_conf_softmax, mbox_quad, mbox_default_quads])
    # decoded_predictions = DecodeQSSDPredictions(
    #     input_size=model_config["input_size"],
    #     num_predictions=num_predictions,
    #     name="decoded_predictions"
    # )(predictions)

    return Model(inputs=base_network.input, outputs=predictions)
    callbacks = [callback],
    validation_data=val_generator,
    workers=4
)

score = model.evaluate(val_generator,verbose=2)
print('Test loss:', score[0])
print('Test accuracy:', score[1])

"""## MobileNetV2 

"""

from tensorflow.keras.applications import MobileNetV2

net= MobileNetV2(include_top=False, weights='imagenet', input_tensor=Input(shape=(150,150,3))) 

for layer in net.layers[:-5]:
    layer.trainable = False

x = net.output
x = Flatten()(x)
x = Dropout(0.5)(x)
output_layer = Dense(1, activation='sigmoid', name='sigmoid')(x)
model = Model(inputs=net.input, outputs=output_layer)

# initiate RMSprop optimizer
opt = keras.optimizers.RMSprop(lr=0.0001, decay=1e-6)

# Train the model using RMSprop
model.compile(loss='binary_crossentropy',
예제 #9
0
def feature_extractor(inputs):
    feature_extractor_model = MobileNetV2(weights='imagenet', include_top=False, input_shape=(224, 224, 3))
    return feature_extractor_model(inputs)
예제 #10
0
                                                    class_mode='categorical')
validation_generator = test_datagen.flow_from_directory(
    '../data/dataset/val',
    target_size=(224, 224),
    batch_size=32,
    class_mode='categorical',
    shuffle=False)
test_generator = test_datagen.flow_from_directory('../data/dataset/test',
                                                  target_size=(224, 224),
                                                  batch_size=32,
                                                  class_mode='categorical',
                                                  shuffle=False)

# instantiate base model (mobilenetV2)
base_model = MobileNetV2(weights="imagenet",
                         include_top=False,
                         input_shape=(224, 224, 3))


# define head layers for transfer learning
def add_new_last_layer(base_model, nb_classes=2):
    """Add last layer to the convnet
    Args:
    base_model: keras model excluding top
    nb_classes: # of classes
    Returns:
    new keras model with last layer
    """
    # Get the output shape of the models last layer
    x = base_model.output
    # construct new head for the transferred model
                                                   shuffle=True,
                                                   seed=42,
                                                   subset='validation')

STEP_SIZE_TRAIN = train_gen.n // train_gen.batch_size
STEP_SIZE_VALID = validation_gen.n // validation_gen.batch_size

clToInt_dict = train_gen.class_indices
clToInt_dict = dict((k, v) for v, k in clToInt_dict.items())

# define the model

weight_path = '../../h5_files/mobilenet_v2_weights_tf_dim_ordering_tf_kernels_1.0_224.h5'

pre_model = MobileNetV2(input_shape=(224, 224, 3),
                        weights=None,
                        include_top=True)

pre_model.load_weights(weight_path)

for layer in pre_model.layers:
    layer.trainable = False

conn_layer = pre_model.get_layer('block_12_add')
conn_output = conn_layer.output

x = Conv2D(128, (3, 3), activation='relu')(conn_output)
x = MaxPool2D(2, 2)(x)
x = Conv2D(256, (3, 3), activation='relu')(conn_output)
x = MaxPool2D(2, 2)(x)
x = Flatten()(x)
예제 #12
0
                                                     class_mode="categorical",
                                                     shuffle=True,
                                                     subset='training')

# Set as validation data
validation_generator = data_generator.flow_from_directory(
    dataset_path,
    target_size=(IMG_SIZE, IMG_SIZE),
    batch_size=BATCH_SIZE,
    class_mode="categorical",
    shuffle=False,
    subset='validation')

# Load the pre-trained model and remove the head FC layer
base_model = MobileNetV2(weights="imagenet",
                         include_top=False,
                         input_tensor=Input(shape=(IMG_SIZE, IMG_SIZE, 3)))

# Construct the head of the model that will be placed on top of the base model
head_model = base_model.output
head_model = AveragePooling2D(pool_size=(7, 7))(head_model)
head_model = Flatten(name="flatten")(head_model)
head_model = Dense(128, activation="relu")(head_model)
head_model = Dropout(0.5)(head_model)
head_model = Dense(NUM_CLASS, activation="softmax")(head_model)

# Place the head FC model on top of the base model (this will become the actual model we will train)
model = Model(inputs=base_model.input, outputs=head_model)

# Loop over all layers in the base model and freeze them so they will *not* be updated during the first training process
for layer in base_model.layers:
예제 #13
0
from tensorflow.keras.models import Sequential
from tensorflow.keras.applications.vgg16 import preprocess_input
from keras.utils.np_utils import to_categorical
from tensorflow.keras.applications import VGG16, VGG19, Xception
from tensorflow.keras.applications import ResNet101, ResNet101V2, ResNet152, ResNet152V2
from tensorflow.keras.applications import ResNet50, ResNet50V2
from tensorflow.keras.applications import InceptionV3, InceptionResNetV2
from tensorflow.keras.applications import MobileNet, MobileNetV2
from tensorflow.keras.applications import DenseNet121, DenseNet169, DenseNet201
from tensorflow.keras.applications import NASNetLarge, NASNetMobile
from tensorflow.keras.applications import EfficientNetB0, EfficientNetB1

(x_train, y_train), (x_test, y_test) = cifar10.load_data()

MobileNetV2 = MobileNetV2(weights='imagenet',
                          include_top=False,
                          input_shape=(32, 32, 3))
# print(model.weights)

# ============== 전처리 ===================
x_train = preprocess_input(x_train)
x_test = preprocess_input(x_test)
x_train = x_train.astype('float32') / 255.  # 전처리
x_test = x_test.astype('float32') / 255.  # 전처리

#다중분류 y원핫코딩
y_train = to_categorical(y_train)  #(50000, 10)
y_test = to_categorical(y_test)  #(10000, 10)

# ============== 모델링 =====================
MobileNetV2.trainable = False  # 훈련을 안시키겠다, 저장된 가중치 사용
예제 #14
0
    image = cv2.resize(image, (224, 224))
    image = preprocess_input(image)
    # image = np.repeat(image[..., np.newaxis], 3, -1)
    val_images.append(image)

val_images = np.array(val_images)
val_labels = np.array(val_labels)
print("=> Loaded {} validation images".format(val_images.shape[0]))
val_labels = lb.transform(val_labels)

train_images, train_labels = shuffle(train_images, train_labels)
val_images, val_labels = shuffle(val_images, val_labels)

# model
base_model = MobileNetV2(weights=None,
                         include_top=False,
                         input_shape=(224, 224, 3))
x = Flatten()(base_model.output)
predictions = Dense(4, activation='softmax')(x)
for layer in base_model.layers:
    layer.trainable = True
model = Model(inputs=base_model.input, outputs=predictions)

checkpoint = ModelCheckpoint("MobileNetV2.h5",
                             monitor='val_loss',
                             verbose=1,
                             save_best_only=True,
                             save_weights_only=False,
                             mode='min',
                             period=1)
early = EarlyStopping(monitor='val_loss',
예제 #15
0
from tensorflow.python.keras.layers.core import Dropout
import numpy as np
# from IPython.display import Image
from tensorflow.keras.applications import MobileNetV2
# from tensorflow import keras
from tensorflow.keras.applications.mobilenet import preprocess_input
# from tensorflow.keras import layers
from tensorflow.keras.layers import Dense,Flatten,Input,Dropout
from tensorflow.keras import Sequential
import cv2
from keras.preprocessing import image
import time
import os

image_size = 100
mobile  = MobileNetV2(weights='imagenet',include_top=False,input_shape =(image_size,image_size,3),alpha = 0.35)
print(mobile.summary())
my_model = Sequential()
input = Input(shape=(image_size,image_size,3),name = 'image_input')
my_model.add(mobile)  
my_model.add(Dropout(0.5)) 
my_model.add(Flatten())
my_model.add(Dense(4, activation='softmax'))

allWeights = np.load('./robot/SignalsAllWeightsV2.npy',allow_pickle=True)
i=0
for l in my_model.layers:
    print(l.name)
    weightsArray = []
    weights = l.get_weights()
    for subLayer in weights:
예제 #16
0
def getProposedModelA(size=224,
                      seq_len=32,
                      cnn_weight='imagenet',
                      cnn_trainable=True,
                      lstm_type='sepconv',
                      weight_decay=2e-5,
                      frame_diff_interval=1,
                      mode="both",
                      cnn_dropout=0.25,
                      lstm_dropout=0.25,
                      dense_dropout=0.3,
                      seed=42):
    """parameters:
    size = height/width of each frame,
    seq_len = number of frames in each sequence,
    cnn_weight= None or 'imagenet'
    mode = "only_frames" or "only_differences" or "both"
       returns:
    model
    """
    print('cnn_trainable:', cnn_trainable)
    print('cnn dropout : ', cnn_dropout)
    print('dense dropout : ', dense_dropout)
    print('lstm dropout :', lstm_dropout)

    if mode == "both":
        frames = True
        differences = True
    elif mode == "only_frames":
        frames = True
        differences = False
    elif mode == "only_differences":
        frames = False
        differences = True

    if frames:

        frames_input = Input(shape=(seq_len, size, size, 3),
                             name='frames_input')
        frames_cnn = MobileNetV2(input_shape=(size, size, 3),
                                 alpha=0.35,
                                 weights='imagenet',
                                 include_top=False)
        frames_cnn = Model(inputs=[frames_cnn.layers[0].input],
                           outputs=[frames_cnn.layers[-30].output
                                    ])  # taking only upto block 13

        for layer in frames_cnn.layers:
            layer.trainable = cnn_trainable

        frames_cnn = TimeDistributed(frames_cnn,
                                     name='frames_CNN')(frames_input)
        frames_cnn = TimeDistributed(LeakyReLU(alpha=0.1),
                                     name='leaky_relu_1_')(frames_cnn)
        frames_cnn = TimeDistributed(Dropout(cnn_dropout, seed=seed),
                                     name='dropout_1_')(frames_cnn)

        if lstm_type == 'sepconv':
            frames_lstm = SepConvLSTM2D(
                filters=64,
                kernel_size=(3, 3),
                padding='same',
                return_sequences=False,
                dropout=lstm_dropout,
                recurrent_dropout=lstm_dropout,
                name='SepConvLSTM2D_1',
                kernel_regularizer=l2(weight_decay),
                recurrent_regularizer=l2(weight_decay))(frames_cnn)
        elif lstm_type == 'conv':
            frames_lstm = ConvLSTM2D(
                filters=64,
                kernel_size=(3, 3),
                padding='same',
                return_sequences=False,
                dropout=lstm_dropout,
                recurrent_dropout=lstm_dropout,
                name='ConvLSTM2D_1',
                kernel_regularizer=l2(weight_decay),
                recurrent_regularizer=l2(weight_decay))(frames_cnn)
        elif lstm_type == 'asepconv':
            frames_lstm = AttenSepConvLSTM2D(
                filters=64,
                kernel_size=(3, 3),
                padding='same',
                return_sequences=False,
                dropout=lstm_dropout,
                recurrent_dropout=lstm_dropout,
                name='AttenSepConvLSTM2D_1',
                kernel_regularizer=l2(weight_decay),
                recurrent_regularizer=l2(weight_decay))(frames_cnn)
        else:
            raise Exception("lstm type not recognized!")

        frames_lstm = BatchNormalization(axis=-1)(frames_lstm)

    if differences:

        frames_diff_input = Input(shape=(seq_len - frame_diff_interval, size,
                                         size, 3),
                                  name='frames_diff_input')
        frames_diff_cnn = MobileNetV2(input_shape=(size, size, 3),
                                      alpha=0.35,
                                      weights='imagenet',
                                      include_top=False)
        frames_diff_cnn = Model(inputs=[frames_diff_cnn.layers[0].input],
                                outputs=[frames_diff_cnn.layers[-30].output
                                         ])  # taking only upto block 13

        for layer in frames_diff_cnn.layers:
            layer.trainable = cnn_trainable

        frames_diff_cnn = TimeDistributed(
            frames_diff_cnn, name='frames_diff_CNN')(frames_diff_input)
        frames_diff_cnn = TimeDistributed(
            LeakyReLU(alpha=0.1), name='leaky_relu_2_')(frames_diff_cnn)
        frames_diff_cnn = TimeDistributed(Dropout(cnn_dropout, seed=seed),
                                          name='dropout_2_')(frames_diff_cnn)

        if lstm_type == 'sepconv':
            frames_diff_lstm = SepConvLSTM2D(
                filters=64,
                kernel_size=(3, 3),
                padding='same',
                return_sequences=False,
                dropout=lstm_dropout,
                recurrent_dropout=lstm_dropout,
                name='SepConvLSTM2D_2',
                kernel_regularizer=l2(weight_decay),
                recurrent_regularizer=l2(weight_decay))(frames_diff_cnn)
        elif lstm_type == 'conv':
            frames_diff_lstm = ConvLSTM2D(
                filters=64,
                kernel_size=(3, 3),
                padding='same',
                return_sequences=False,
                dropout=lstm_dropout,
                recurrent_dropout=lstm_dropout,
                name='ConvLSTM2D_2',
                kernel_regularizer=l2(weight_decay),
                recurrent_regularizer=l2(weight_decay))(frames_diff_cnn)
        elif lstm_type == 'asepconv':
            frames_diff_lstm = AttenSepConvLSTM2D(
                filters=64,
                kernel_size=(3, 3),
                padding='same',
                return_sequences=False,
                dropout=lstm_dropout,
                recurrent_dropout=lstm_dropout,
                name='AttenSepConvLSTM2D_2',
                kernel_regularizer=l2(weight_decay),
                recurrent_regularizer=l2(weight_decay))(frames_diff_cnn)
        else:
            raise Exception("lstm type not recognized!")

        frames_diff_lstm = BatchNormalization(axis=-1)(frames_diff_lstm)

    if frames:
        x1 = MaxPooling2D((2, 2))(frames_lstm)

    if differences:
        x2 = MaxPooling2D((2, 2))(frames_diff_lstm)

    if mode == "both":
        x = Add()([x1, x2])
    elif mode == "only_frames":
        x = x1
    elif mode == "only_differences":
        x = x2

    x = LeakyReLU(alpha=0.1)(x)
    x = Flatten()(x)
    x = Dense(64)(x)
    x = LeakyReLU(alpha=0.1)(x)
    x = Dense(16)(x)
    x = LeakyReLU(alpha=0.1)(x)
    x = Dropout(dense_dropout, seed=seed)(x)
    predictions = Dense(1, activation='sigmoid')(x)

    if mode == "both":
        model = Model(inputs=[frames_input, frames_diff_input],
                      outputs=predictions)
    elif mode == "only_frames":
        model = Model(inputs=frames_input, outputs=predictions)
    elif mode == "only_differences":
        model = Model(inputs=frames_diff_input, outputs=predictions)

    return model
class train():
    ap = argparse.ArgumentParser()
    ap.add_argument("-d",
                    "--dataset",
                    required=True,
                    help="path to input dataset")
    ap.add_argument("-p",
                    "--plot",
                    type=str,
                    default="plot.png",
                    help="path to output loss/accuracy plot")
    ap.add_argument("-m",
                    "--model",
                    type=str,
                    default="mask_detector.model",
                    help="path to output face mask detector model")
    args = vars(ap.parse_args())

    # inicializacion learning rate, batch size
    INIT_LR = 1e-4
    EPOCHS = 20
    BS = 32
    os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
    print("[INFO] loading images...")
    imagePaths = list(paths.list_images(args["dataset"]))
    data = []
    labels = []

    for imagePath in imagePaths:
        label = imagePath.split(os.path.sep)[-2]
        image = load_img(imagePath, target_size=(224, 224))
        image = img_to_array(image)
        image = preprocess_input(image)
        data.append(image)
        labels.append(label)
    data = np.array(data, dtype="float32")
    labels = np.array(labels)
    lb = LabelBinarizer()
    labels = lb.fit_transform(labels)
    labels = to_categorical(labels)

    (trainX, testX, trainY, testY) = train_test_split(data,
                                                      labels,
                                                      test_size=0.20,
                                                      stratify=labels,
                                                      random_state=42)

    aug = ImageDataGenerator(rotation_range=20,
                             zoom_range=0.15,
                             width_shift_range=0.2,
                             height_shift_range=0.2,
                             shear_range=0.15,
                             horizontal_flip=True,
                             fill_mode="nearest")

    baseModel = MobileNetV2(weights="imagenet",
                            include_top=False,
                            input_tensor=Input(shape=(224, 224, 3)))
    headModel = baseModel.output
    headModel = AveragePooling2D(pool_size=(7, 7))(headModel)
    headModel = Flatten(name="flatten")(headModel)
    headModel = Dense(128, activation="relu")(headModel)
    headModel = Dropout(0.5)(headModel)
    headModel = Dense(2, activation="softmax")(headModel)

    model = Model(inputs=baseModel.input, outputs=headModel)

    for layer in baseModel.layers:
        layer.trainable = False
    print("[INFO] compiling model...")
    opt = Adam(lr=INIT_LR, decay=INIT_LR / EPOCHS)
    model.compile(loss="binary_crossentropy",
                  optimizer=opt,
                  metrics=["accuracy"])

    H = model.fit(aug.flow(trainX, trainY, batch_size=BS),
                  steps_per_epoch=len(trainX) // BS,
                  validation_data=(testX, testY),
                  validation_steps=len(testX) // BS,
                  epochs=EPOCHS)

    print("[INFO] evaluating network...")
    predIdxs = model.predict(testX, batch_size=BS)
    predIdxs = np.argmax(predIdxs, axis=1)

    print(
        classification_report(testY.argmax(axis=1),
                              predIdxs,
                              target_names=lb.classes_))

    print("[INFO] saving mask detector model...")
    model.save(args["model"], save_format="h5")

    def plot_picture(self):
        #print(H.history)
        N = EPOCHS
        plt.style.use("ggplot")
        plt.figure()
        plt.plot(np.arange(0, N), H.history["loss"], label="train_loss")
        plt.plot(np.arange(0, N), H.history["val_loss"], label="val_loss")
        plt.plot(np.arange(0, N), H.history["acc"], label="train_accuracy")
        plt.plot(np.arange(0, N), H.history["val_acc"], label="val_accuracy")
        plt.title("Training Loss and Accuracy")
        plt.xlabel("Epoch #")
        plt.ylabel("Loss/Accuracy")
        plt.legend(loc="lower left")
        plt.savefig(args["plot"])
예제 #18
0
    shuffle=True,
    subset='validation')

test = testGen.flow_from_directory(
    TESTPATH,
    target_size=(224, 224),
    classes=['with_mask', 'without_mask'],
    class_mode='categorical',
    batch_size=BATCH_SIZE,
    shuffle=True,
)

# Model building
mob = MobileNetV2(
    input_shape=(224, 224, 3),
    include_top=False,
    weights='imagenet',
)
mob.trainable = False

model = Sequential()
model.add(mob)
model.add(GlobalAveragePooling2D())
model.add(Dense(64, activation='relu'))
model.add(Dropout(0.3))
model.add(Dense(2, activation='softmax'))
model.summary()

# serialize model to JSON
model_json = model.to_json()
with open("files\\model\\model_mask.json", "w") as json_file:
                                  batch_size=BATCH_SIZE,
                                  shuffle=True,
                                  class_mode='raw',
                                  target_size=(224, 224),
                                  subset='validation')
test_gen = gen.flow_from_dataframe(dataframe=test_df,
                                   x_col='path',
                                   y_col=columns,
                                   batch_size=BATCH_SIZE,
                                   shuffle=True,
                                   class_mode='raw',
                                   target_size=(224, 224))

# Define model
base_model = MobileNetV2(weights="imagenet",
                         include_top=False,
                         input_tensor=tf.keras.layers.Input(shape=(224, 224,
                                                                   3)))

head_model = base_model.output
head_model = tf.keras.layers.GlobalMaxPooling2D()(head_model)

x = tf.keras.layers.Dense(512)(head_model)
x = tf.keras.layers.BatchNormalization()(x)
x = tf.keras.layers.Activation('relu')(x)

x = tf.keras.layers.Dense(128)(x)
x = tf.keras.layers.BatchNormalization()(x)
x = tf.keras.layers.Activation('relu')(x)

x = tf.keras.layers.Dense(64)(x)
x = tf.keras.layers.BatchNormalization()(x)
예제 #20
0
if arg.model == 'resnet':
    from tensorflow.keras.applications import ResNet50
    model = ResNet50(include_top=True,
                     weights=None,
                     input_tensor=None,
                     input_shape=(224, 224, 3),
                     pooling=None,
                     classes=1000)

if arg.model == 'mobilenet':
    from tensorflow.keras.applications import MobileNetV2
    model = MobileNetV2(alpha=1.0,
                        include_top=True,
                        weights=None,
                        input_tensor=None,
                        pooling=None,
                        classes=1000,
                        classifier_activation='softmax',
                        input_shape=(224, 224, 3))

model.summary()

shape = [1, 224, 224, 3]
picture = np.ones(shape, dtype=np.float32)
#picture1 = np.random.rand(1,224,224,3)

nSteps = 50
for i in range(0, nSteps):
    #picture = np.random.rand(1,224,224,3)
    if i == nSteps - 1:
        tf.profiler.experimental.start('mobilenet_logdir' + str(i))
예제 #21
0
import numpy as np
import tensorflow as tf

from rmac import RMAC

from tensorflow.keras.models import Model
from tensorflow.keras.layers import Dense, Lambda

# load the pretinrained network from Keras Applications
from tensorflow.keras.applications import MobileNetV2
from tensorflow.keras.applications.mobilenet_v2 import preprocess_input
from tensorflow.keras.preprocessing.image import load_img, img_to_array


# load the base model
base_model = MobileNetV2()

# check the architecture and see where to attach our RMAC layer
# print(base_model.summary())

# create the new model consisting of the base model and a RMAC layer
layer = "out_relu"
base_out = base_model.get_layer(layer).output

rmac = RMAC(base_out.shape, levels=5, norm_fm=True, sum_fm=True)

# add RMAC layer on top
rmac_layer = Lambda(rmac.rmac, input_shape=base_model.output_shape, name="rmac_"+layer)

out = rmac_layer(base_out)
#out = Dense(1024)(out) # fc to desired dimensionality
예제 #22
0
def get_arch(arg, input_shape, classes, **kwargs):
    input_tensor = Input(shape=input_shape)

    if "Normalization" in kwargs:
        if kwargs["Normalization"] == "BatchNormalization":
            kwargs["Normalization"] = BatchNormalization
        elif kwargs["Normalization"] == "LayerNormalization":
            kwargs["Normalization"] = LayerNormalization
        elif kwarfs["Normalization"] == "NoNormalization":
            kwargs["Normalization"] = NoNormalization
        # if its not a string assume its a normalization layer
        elif type(kwargs["Normalization"]) == str:
            print("Warning: couldn't understand your normalization")
            kwargs["Normalization"] = NoNormalization

    if arg == "AlexNet":
        return AlexNet(input_tensor=input_tensor, classes=classes, **kwargs)
    elif arg == "SmolAlexNet":
        return SmolAlexNet(input_tensor=input_tensor,
                           classes=classes,
                           **kwargs)
    elif arg == "VGG16":
        return VGG16(input_tensor=input_tensor,
                     classes=classes,
                     weights=None,
                     **kwargs)
    elif arg == "VGG19":
        return VGG19(input_tensor=input_tensor,
                     classes=classes,
                     weights=None,
                     **kwargs)
    elif arg == "ResNet50":
        return ResNet50(input_tensor=input_tensor,
                        classes=classes,
                        weights=None,
                        **kwargs)
    elif arg == "ResNet152":
        return ResNet152(input_tensor=input_tensor,
                         classes=classes,
                         weights=None,
                         **kwargs)
    elif arg == "CifarResNet":
        return CifarResNet(3, input_tensor=input_tensor, classes=classes)
    elif arg == "DenseNet169":
        return DenseNet169(input_tensor=input_tensor,
                           classes=classes,
                           weights=None,
                           **kwargs)
    elif arg == "DenseNet121":
        return DenseNet121(input_tensor=input_tensor,
                           classes=classes,
                           weights=None,
                           **kwargs)
    elif arg == "MobileNetV2":
        return MobileNetV2(input_tensor=input_tensor,
                           classes=classes,
                           weights=None,
                           **kwargs)
    elif arg == "DenseNetCifar":
        return DenseNetCifar(input_tensor, classes, 12, 16)
    else:
        show_available()
        raise Exception(arg + " not an available architecture")
예제 #23
0
	test_size=0.20,stratify=labels,random_state=42)

# construct the training image generator for data augmentation
aug = ImageDataGenerator(
	rotation_range=20,
	zoom_range=0.15,
	width_shift_range=0.2,
	height_shift_range=0.2,
	shear_range=0.15,
	horizontal_flip=True,
	fill_mode="nearest")

#------------------Building the model--------------
#Using MobileNet as our base model for fine tuning . We will leave out the top layer
# and ourselves add a few layers to make it compatible with the output we expect
baseModel=MobileNetV2(weights="imagenet",include_top=False,
	input_tensor=Input(shape=(image_size[0],image_size[1],3)))

output=baseModel.output
output=AveragePooling2D(pool_size=(7,7))(output)
output=Flatten()(output)
output=Dense(128,activation="relu")(output)
output=Dropout(0.4)(output)
output=Dense(2,activation="softmax")(output)

model=Model(inputs=baseModel.input,outputs=output)
for layer in baseModel.layers:
	layer.trainable = False

optim=Adam(lr=lr)
model.compile(loss="binary_crossentropy", optimizer=optim,metrics=["accuracy"])
#------Training the model on our dataset---------------
예제 #24
0
 def getKerasModel(*args, **kwargs):
     return MobileNetV2(*args, **kwargs)
예제 #25
0
def get_model(
    vocab_size=20000,
    input_shape=(None, None, 3),
    model="mobilenet",
    weights="imagenet",
    embedding_size=300,
    lr=0.0001,
    W=None,
    trainable=False,
):
    input_1 = Input(input_shape)
    input_2 = Input(shape=(1, ))
    input_3 = Input(shape=(1, ))

    _norm = Lambda(lambda x: K.l2_normalize(x, axis=-1))

    if model == "mobilenet":
        base_model = MobileNetV2(include_top=False,
                                 input_shape=input_shape,
                                 weights=weights)
    else:
        base_model = ResNet50(include_top=False,
                              input_shape=input_shape,
                              weights=weights)

    x1 = base_model(input_1)
    out1 = GlobalMaxPooling2D()(Dropout(0.1)(x1))
    out2 = GlobalAveragePooling2D()(x1)
    image_representation = Concatenate(axis=-1)([out1, out2])
    image_representation = Dropout(0.1)(image_representation)

    image_representation = Dense(50, name="img_repr")(image_representation)

    image_representation = _norm(image_representation)
    if W is not None:
        embed = Embedding(vocab_size,
                          embedding_size,
                          name="embed",
                          weights=[W],
                          trainable=trainable)
    else:
        embed = Embedding(vocab_size, embedding_size, name="embed")

    dense_label = Dense(50, name="label_repr")

    x2 = embed(input_2)
    x2 = Flatten()(x2)
    x2 = Dropout(0.1)(x2)
    x2 = dense_label(x2)

    x3 = embed(input_3)
    x3 = Flatten()(x3)
    x3 = Dropout(0.1)(x3)
    x3 = dense_label(x3)

    label1 = _norm(x2)
    label2 = _norm(x3)

    x = Concatenate(axis=-1)([image_representation, label1, label2])

    model = Model([input_1, input_2, input_3], x)

    model_image = Model(input_1, image_representation)
    model_label = Model([input_2], label1)

    model.compile(loss=triplet_loss, optimizer=Adam(lr))
    model_image.compile(loss="mae", optimizer=Adam(lr))
    model_label.compile(loss="mae", optimizer=Adam(lr))

    model.summary()
    model_image.summary()
    model_label.summary()

    return model, model_image, model_label
예제 #26
0
plt.subplot(122)
plt.plot(history_benchmark.history['loss'])
plt.plot(history_benchmark.history['val_loss'])
plt.title('Loss vs Epochs')
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.legend(['Train', 'Validation'], loc='upper right')

benchmark_test_loss, benchmark_test_accuracy = benchmark_model.evaluate(
    x=images_test, y=labels_test)

print(f'Test loss: {np.round(benchmark_test_loss, 2)}')
print(f'Test accuracy: {np.round(benchmark_test_accuracy, 2)}')

mobile_net_v2_model = MobileNetV2()

mobile_net_v2_model.summary()


def build_feature_extractor_model(model):
    input_layer = model.inputs
    output_layer = model.get_layer('global_average_pooling2d').output
    return Model(inputs=input_layer, outputs=output_layer)


feature_extractor = build_feature_extractor_model(mobile_net_v2_model)
print('\nFeature extractor model:\n')
feature_extractor.summary()

예제 #27
0
from model.utils import Param


def arg():
    parser = argparse.ArgumentParser()
    parser.add_argument("cfg", help="config path", type=str)
    parser.add_argument("epoch", help="epoch", type=int)
    return parser.parse_args()


if __name__ == "__main__":
    args = arg()
    config_path = args.cfg
    params = Param(config_path)
    baseModel = MobileNetV2(include_top=False,
                            weights=None,
                            input_shape=(224, 224, 3),
                            pooling="avg")
    fc = tf.keras.layers.Dense(128, activation=None,
                               name="embeddings")(baseModel.output)
    l2 = tf.math.l2_normalize(fc)

    model = Model(inputs=baseModel.input, outputs=l2)
    model.load_weights(config_path + f"epoch-{args.epoch}")

    model._set_inputs(inputs=tf.random.normal(shape=(1, params.INPUT_SIZE[0],
                                                     params.INPUT_SIZE[1], 3)))
    converter = tf.lite.TFLiteConverter.from_keras_model(model)
    tflite_model = converter.convert()

    logdir = os.path.join(config_path, "tflite/")
    if not os.path.exists(logdir):
예제 #28
0
from tensorflow.keras.applications import MobileNetV2
from tensorflow.keras import models
from tensorflow.keras import layers

conv_base = MobileNetV2(weights='imagenet',
                        include_top=False,
                        input_shape=(64, 64, 3))
conv_base.trainable = False

model = models.Sequential()
model.add(conv_base)
model.add(layers.Flatten())
model.add(layers.Dense(256, activation='relu'))
model.add(layers.Dense(1000, activation='softmax'))

# model.summary()
conv_base.summary()
예제 #29
0
def get_mobilenet_SSD(image_size, num_classes):
    mobilenet = MobileNetV2(input_shape=image_size,
                            include_top=False,
                            weights="imagenet")
    for layer in mobilenet.layers:
        layer._name = layer.name + '_base'

    x = layers.BatchNormalization(
        beta_initializer='glorot_uniform', gamma_initializer='glorot_uniform')(
            mobilenet.get_layer(name='block_8_add_base').output)
    conf1 = layers.Conv2D(4 * 4 * num_classes, kernel_size=3,
                          padding='same')(x)
    conf1 = layers.Reshape(
        (conf1.shape[1] * conf1.shape[2] * conf1.shape[3] // num_classes,
         num_classes))(conf1)
    loc1 = layers.Conv2D(4 * 4 * 4, kernel_size=3, padding='same')(x)
    loc1 = layers.Reshape(
        (loc1.shape[1] * loc1.shape[2] * loc1.shape[3] // 4, 4))(loc1)

    x = layers.MaxPool2D(3, 1, padding='same')(
        mobilenet.get_layer(name='block_12_add_base').output)
    x = layers.Conv2D(1024,
                      3,
                      padding='same',
                      dilation_rate=6,
                      activation='relu')(x)
    x = layers.Conv2D(1024, 1, padding='same', activation='relu')(x)
    conf2 = layers.Conv2D(6 * num_classes, kernel_size=3, padding='same')(x)
    conf2 = layers.Reshape(
        (conf2.shape[1] * conf2.shape[2] * conf2.shape[3] // num_classes,
         num_classes))(conf2)
    loc2 = layers.Conv2D(6 * 4, kernel_size=3, padding='same')(x)
    loc2 = layers.Reshape(
        (loc2.shape[1] * loc2.shape[2] * loc2.shape[3] // 4, 4))(loc2)

    x = layers.Conv2D(256, 1, activation='relu')(x)
    x = layers.Conv2D(512, 3, strides=2, padding='same', activation='relu')(x)
    conf3 = layers.Conv2D(6 * num_classes, kernel_size=3, padding='same')(x)
    conf3 = layers.Reshape(
        (conf3.shape[1] * conf3.shape[2] * conf3.shape[3] // num_classes,
         num_classes))(conf3)
    loc3 = layers.Conv2D(6 * 4, kernel_size=3, padding='same')(x)
    loc3 = layers.Reshape(
        (loc3.shape[1] * loc3.shape[2] * loc3.shape[3] // 4, 4))(loc3)

    x = layers.Conv2D(128, 1, activation='relu')(x)
    x = layers.Conv2D(256, 3, strides=2, padding='same', activation='relu')(x)
    conf4 = layers.Conv2D(6 * num_classes, kernel_size=3, padding='same')(x)
    conf4 = layers.Reshape(
        (conf4.shape[1] * conf4.shape[2] * conf4.shape[3] // num_classes,
         num_classes))(conf4)
    loc4 = layers.Conv2D(6 * 4, kernel_size=3, padding='same')(x)
    loc4 = layers.Reshape(
        (loc4.shape[1] * loc4.shape[2] * loc4.shape[3] // 4, 4))(loc4)

    x = layers.Conv2D(128, 1, activation='relu')(x)
    x = layers.Conv2D(256, 3, activation='relu')(x)
    conf5 = layers.Conv2D(4 * num_classes, kernel_size=3, padding='same')(x)
    conf5 = layers.Reshape(
        (conf5.shape[1] * conf5.shape[2] * conf5.shape[3] // num_classes,
         num_classes))(conf5)
    loc5 = layers.Conv2D(4 * 4, kernel_size=3, padding='same')(x)
    loc5 = layers.Reshape(
        (loc5.shape[1] * loc5.shape[2] * loc5.shape[3] // 4, 4))(loc5)

    x = layers.Conv2D(128, 1, activation='relu')(x)
    x = layers.Conv2D(256, 3, activation='relu')(x)
    conf6 = layers.Conv2D(4 * num_classes, kernel_size=3, padding='same')(x)
    conf6 = layers.Reshape(
        (conf6.shape[1] * conf6.shape[2] * conf6.shape[3] // num_classes,
         num_classes))(conf6)
    loc6 = layers.Conv2D(4 * 4, kernel_size=3, padding='same')(x)
    loc6 = layers.Reshape(
        (loc6.shape[1] * loc6.shape[2] * loc6.shape[3] // 4, 4))(loc6)

    confs = layers.concatenate([conf1, conf2, conf3, conf4, conf5, conf6],
                               axis=1)
    locs = layers.concatenate([loc1, loc2, loc3, loc4, loc5, loc6], axis=1)
    model = tf.keras.Model(inputs=mobilenet.layers[0].output,
                           outputs=[confs, locs])

    return model
예제 #30
0
                                                target_size=(224, 224),
                                                batch_size=batch_size,
                                                color_mode="rgb",
                                                class_mode='categorical',
                                                shuffle=False)

found_classes = list(train_batches.class_indices.keys())
print('Classes Found:', found_classes)

assert all(a == b for a, b in zip(found_classes, class_names)
           ), 'Found classes are different than static classes names\
                                                                please modify class_names in python file'

# Model architecture
mobilenet = MobileNetV2(weights='imagenet',
                        include_top=True,
                        input_shape=(224, 224, 3))
mobilenet.layers.pop()

# for layer in mobilenet.layers :
#     layer.trainable = False

model = Sequential()
model.add(mobilenet)
model.add(Dense(5, activation='softmax', name='predictions'))
model.summary()

# Model training
num_train = 4736
num_val = 3568
num_epoch = 25