예제 #1
0
# grab the list of images that we'll be describing then randomly
# shuffle them to allow for easy training and testing splits via
# array slicing during training time
print("[INFO] loading images...")
imagePaths = list(paths.list_images(args["dataset"]))
random.shuffle(imagePaths)

# extract the class labels from the image paths then encode the
# labels
labels = [p.split(os.path.sep)[-2] for p in imagePaths]
le = LabelEncoder()
labels = le.fit_transform(labels)

# load the VGG16 network
print("[INFO] loading network...")
model = VGG16(weights="imagenet", include_top=False)

# initialize the HDF5 dataset writer, then store the class label
# names in the dataset
dataset = HDF5DatasetWriter((len(imagePaths), 512 * 7 * 7),
                            args["output"],
                            dataKey="features",
                            bufSize=args["buffer_size"])
dataset.storeClassLabels(le.classes_)

# initialize the progress bar
widgets = [
    "Extracting Features: ",
    progressbar.Percentage(), " ",
    progressbar.Bar(), " ",
    progressbar.ETA()
예제 #2
0
plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()

plt.show()


# In[23]:


from keras.applications import VGG16

conv_base = VGG16(weights='imagenet',
                  include_top=False,
                  input_shape=(150, 150, 3))


# In[24]:


vgg_model = Sequential()
vgg_model.add(conv_base)
vgg_model.add(Flatten())
vgg_model.add(Dense(32, activation='relu'))
vgg_model.add(Dense(5, activation='softmax'))


# In[25]:
예제 #3
0
def vgg_conv():
    conv_model = VGG16(include_top=False,
                       weights='imagenet',
                       input_shape=(224, 224, 3))
    return conv_model
# # Face Detection with VGG16
#
# ### Loading the VGG16 Model

# In[1]:

from keras.applications import VGG16

# VGG16 was designed to work on 224 x 224 pixel input images sizes
img_rows = 224
img_cols = 224

#Loads the VGG16 model
model = VGG16(weights='imagenet',
              include_top=False,
              input_shape=(img_rows, img_cols, 3))

# ### Inpsecting each layer

# In[2]:

# Let's print our layers
for (i, layer) in enumerate(model.layers):
    print(str(i) + " " + layer.__class__.__name__, layer.trainable)

# ### Let's freeze all layers except the top 4

# In[3]:

from keras.applications import VGG16
예제 #5
0
# add augmented training data
augmented_imgs, augmented_measurements = [], []
for img, measure in zip(images, measurements):
    augmented_imgs.append(img)
    augmented_imgs.append(np.fliplr(img))
    augmented_measurements.append(measure)
    augmented_measurements.append(-measure)

x_train = np.array(augmented_imgs)
y_train = np.array(augmented_measurements)

# Load the pre-trained VGG model
from keras.applications import VGG16
vgg_conv = VGG16(weights='imagenet',
                 include_top=False,
                 input_shape=(160, 320, 3))

# Freeze the required layers
# Freeze the layers except the last 4 layers
for layer in vgg_conv.layers[:-4]:
    layer.trainable = False

# Check the trainable status of the individual layers
for layer in vgg_conv.layers:
    print(layer, layer.trainable)

# Create the model
from keras import models
from keras import layers
from keras import optimizers
예제 #6
0
    data.append(image)

    label = int(i.split(os.path.sep)[-2][9:])
    labels.append(label)
data = np.array(data, dtype='float') / 255.0
labels = np.array(labels)
# (x_train, x_test, y_train, y_test) = train_test_split(data, labels, random_state=42)
x_train=data
x_test=data
y_train=labels
y_test=labels

y_train = np_utils.to_categorical(y_train, num_classes=997)
y_test = np_utils.to_categorical(y_test, num_classes=997)

base_model=VGG16(weights='imagenet', include_top=False, input_shape=(64, 64, 3))
x=base_model.output
x=Flatten()(x)
x=Dense(500,activation='relu')(x)
y=Dense(997,activation='softmax')(x)
model=Model(inputs=base_model.input,outputs=y)
for layer in base_model.layers:
    layer.trainable=False

model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
datagen = ImageDataGenerator(rotation_range=20, width_shift_range=0.1, height_shift_range=0.1, shear_range=0.2,
                             zoom_range=0.2,
                             horizontal_flip=True, fill_mode='nearest')

x = model.fit_generator(datagen.flow(x_train, y_train), validation_data=(x_test, y_test), epochs=50, verbose=2)
# x=model.fit(x_train,y_train,epochs=50,shuffle=True,verbose=2,validation_split=0.25)
예제 #7
0
data = data.astype("float") / 255.0

# Split the data into training data (75%) and testing data (25%)
(train_x, test_x, train_y, test_y) = train_test_split(data,
                                                      labels,
                                                      test_size=0.25,
                                                      random_state=42)

# Convert the labels from integers to vectors
lb = LabelBinarizer()
train_y = lb.fit_transform(train_y)
test_y = lb.fit_transform(test_y)

# load the VGG16 network, ensuring the head FC layer sets are left off
baseModel = VGG16(weights="imagenet",
                  include_top=False,
                  input_tensor=Input(shape=(224, 224, 3)))

# initialilze the new head of the network, a set of FC layers
# followed by a softmax classifier
headModel = FCHeadNet.build(baseModel, len(classNames), 256)

# place the head FC model on top of the base model -- this will
# become the actual model we will train
model = Model(inputs=baseModel.input, output=headModel)

# loop over all layers in the base model and freeze them so they
# will not be updated during the training process
for layer in baseModel.layers:
    layer.trainable = False
예제 #8
0
    target_size=(224, 224),
    batch_size=batch_size,
    class_mode='categorical',
    shuffle=True)
val_generator = val_datagen.flow_from_directory(
    '/home/zyh/PycharmProjects/baidu_dog/crop_val',
    target_size=(224, 224),
    batch_size=batch_size,
    class_mode='categorical',
    shuffle=True)

if os.path.exists('dog_single_vgg16.h5'):
    model = load_model('dog_single_vgg16.h5')
else:
    input_tensor = Input(shape=(224, 224, 3))
    base_model = VGG16(include_top=True, weights='imagenet')
    base_model.layers.pop()
    base_model.outputs = [base_model.layers[-1].output]
    base_model.layers[-1].outbound_nodes = []
    base_model.output_layers = [base_model.layers[-1]]

    input = Input(shape=(224, 224, 3), name='img')
    feature = base_model(input)
    pred_layer = Dense(units=100, activation='softmax', name='prob')(
        Dropout(0.5)(Dense(4096, activation='relu')(feature)))
    model = Model(inputs=input, outputs=pred_layer)
    plot_model(model, to_file='vgg-16--.png')
    for i, layer in enumerate(base_model.layers):
        layer.trainable = False
    model.compile(optimizer='adam',
                  loss={'prob': 'categorical_crossentropy'},
예제 #9
0
validataion_generator = validataion.flow_from_directory(
    path + '/valid',
    target_size=(img_shape[0], img_shape[1]),
    batch_size=val_batch_size,
    class_mode='categorical',
    shuffle=True)

test_generator = test.flow_from_directory(path + '/test',
                                          target_size=(img_shape[0],
                                                       img_shape[1]),
                                          batch_size=val_batch_size,
                                          class_mode='categorical',
                                          shuffle=True)
##################################################################
vgg = VGG16(weights='imagenet', include_top=False, input_shape=img_shape)
##################################################################
for layer in vgg.layers[:-3]:
    layer.trainable = False
##################################################################
model = Sequential()
model.add(vgg)

model.add(Flatten())
model.add(Dense(1024, activation='relu'))
model.add(Dropout(0.2))

model.add(Dense(6, activation='softmax'))

model.summary()
##################################################################
예제 #10
0
K.set_learning_phase(False)

# Directories of pretrained models/data
data_loc = 'trained_models/lord/data/celeba_test.npz'

# Load data
data = np.load(data_loc)
x_d_test = np.copy(data['imgs'] / 255.)
y_d_test = np.copy(data['classes'])
# Rearrange y_test as ordinal classes (since absolute value of class doesn't matter)
_, y_d_test_ordinal = np.unique(y_d_test, return_inverse=True)

# Instantiate and load VGGFace wit VGG16 core
latent_dim = 128
input_img = Input(shape=(64, 64, 3))
core_model = VGG16(input_shape=(64, 64, 3), include_top=False)
encoded = core_model(input_img)
# Feature layer
encoded = Flatten()(encoded)
encoded = Dense(latent_dim, activation='linear',
                kernel_constraint=UnitNorm())(encoded)
# Create shared model
model = Model(input_img, encoded)

# Load weights
core_folder = 'trained_models/proposed'
core_weights = 'steps16_lr10.0_last'
target_weights = '%s/%s.h5' % (core_folder, core_weights)
model.load_weights(target_weights)

# Attack parameters
예제 #11
0
                                                    batch_size=batch_size,
                                                    class_mode='categorical')

validation_generator = valid_datagen.flow_from_directory(
    args["val_dir"],
    target_size=(img_size, img_size),
    batch_size=batch_size,
    class_mode='categorical',
    shuffle=False)

##### Step-3:
############ Create VGG-16 network graph without the last layers and load imagenet pretrained weights
############ Default image size is 160
print('loading the model and the pre-trained weights...')

base_model = VGG16(include_top=False, weights=None)

i = 0
for layer in base_model.layers:
    layer.trainable = True
    i = i + 1
    print(i, layer.name)

##### Step-4:
############ Add the top as per number of classes in our dataset
############ Note that we are using Dropout layer with value of 0.2, i.e. we are discarding 20% weights
############

x = base_model.output
x = Dense(128)(x)
x = GlobalAveragePooling2D()(x)
예제 #12
0
 def __init__(self):
     self.__model = VGG16()
     if isfile(MODEL_WEIGHTS_FILE):
         self.__model.load_weights(MODEL_WEIGHTS_FILE)
예제 #13
0
training_label = []
testing_imgs = []
testing_label = []
dim = 200

training_imgs = np.load(
    "/content/drive/My Drive/Dataset/all_binary_training_imgs.npy")
training_label = np.load(
    "/content/drive/My Drive/Dataset/all_binary_training_label.npy")
testing_imgs = np.load(
    "/content/drive/My Drive/Dataset/all_binary_testing_imgs.npy")
testing_label = np.load(
    "/content/drive/My Drive/Dataset/all_binary_testing_label.npy")

#Get back the convolutional part of a VGG network trained
model_vgg16_conv = VGG16(weights='imagenet', include_top=False)

# change trainable bool parameter to false if need to remove layers
#for layer in model_vgg16_conv.layers:
#layer.trainable = False

#Create your own input format (here dim*dim*3)
input = Input(shape=(dim, dim, 3))

#Use The Generated Convolution Layers Model
output_vgg16_conv = model_vgg16_conv(input)

#Add the fully-connected layers
x = Flatten(name='flatten')(output_vgg16_conv)
x = Dense(4096, activation='relu', name='fc1')(x)
#x = Dropout(0.5)(x)
예제 #14
0
def train(datapath: str, savepath: str = None) -> None:
    # dimensions of our images.
    img_width, img_height = 150, 150

    train_data_dir = os.path.join(datapath, 'train')
    validation_data_dir = os.path.join(datapath, 'test')
    nb_train_samples = 30000
    nb_validation_samples = 900
    epochs = 2
    batch_size = 16

    # Build the VGG16 network
    input_tensor = Input(shape=(150, 150, 3))
    base_model = VGG16(weights='imagenet',
                       include_top=False,
                       input_tensor=input_tensor)
    # Add an additional MLP model at the "top" (end) of the network
    top_model = Sequential()
    top_model.add(Flatten(input_shape=base_model.output_shape[1:]))
    top_model.add(Dense(1, activation='sigmoid'))
    model = Model(input=base_model.input, output=top_model(base_model.output))

    # Freeze all the layers in the original model (fine-tune only the added Dense layers)
    for layer in model.layers[:len(model.layers) -
                              1]:  # Freeze all but final layers
        layer.trainable = False

    # Compile the model with a SGD/momentum optimizer and a slow learning rate.
    model.compile(loss='binary_crossentropy',
                  optimizer=optimizers.SGD(lr=1e-3, momentum=0.9),
                  metrics=['accuracy'])

    # Prepare data augmentation configuration
    train_datagen = ImageDataGenerator(rescale=1. / 255,
                                       shear_range=0.2,
                                       zoom_range=0.2,
                                       horizontal_flip=True)

    test_datagen = ImageDataGenerator(rescale=1. / 255)

    train_generator = train_datagen.flow_from_directory(
        str(train_data_dir),
        target_size=(img_height, img_width),
        batch_size=batch_size,
        class_mode='binary')

    validation_generator = test_datagen.flow_from_directory(
        str(validation_data_dir),
        target_size=(img_height, img_width),
        batch_size=batch_size,
        class_mode='binary')

    # Fine-tune the model
    model.fit_generator(train_generator,
                        samples_per_epoch=nb_train_samples // batch_size,
                        epochs=epochs,
                        validation_data=validation_generator,
                        validation_steps=nb_validation_samples)

    if savepath:
        model.save(savepath)
os.chdir(filepath)

import numpy as np

import ssl
ssl._create_default_https_context = ssl._create_unverified_context

from keras.applications import VGG16

from keras.optimizers import SGD, Adam

HEIGHT = 192
WIDTH = 192

base_model = VGG16(weights='imagenet',
                   include_top=False,
                   input_shape=(HEIGHT, WIDTH, 3))

FC_LAYERS = [1024, 1024, 1024, 1024]
dropout = 0.4

from functions import build_finetune_model

finetune_model = build_finetune_model(base_model,
                                      dropout=dropout,
                                      fc_layers=FC_LAYERS,
                                      num_classes=2)

#
#dataset_train = dataset_train[0:100]
#label_train = label_train[0:100]
예제 #16
0
a8 = cv2.imread('8.jpg', cv2.IMREAD_UNCHANGED)
a9 = cv2.imread('9.jpg', cv2.IMREAD_UNCHANGED)
a0_r = cv2.resize(a0, (70,70), interpolation = cv2.INTER_AREA)
a1_r = cv2.resize(a1, (70,70), interpolation = cv2.INTER_AREA)
a2_r = cv2.resize(a2, (70,70), interpolation = cv2.INTER_AREA)
a3_r = cv2.resize(a3, (70,70), interpolation = cv2.INTER_AREA)
a4_r = cv2.resize(a4, (70,70), interpolation = cv2.INTER_AREA)
a5_r = cv2.resize(a5, (70,70), interpolation = cv2.INTER_AREA)
a6_r = cv2.resize(a6, (70,70), interpolation = cv2.INTER_AREA)
a7_r = cv2.resize(a7, (70,70), interpolation = cv2.INTER_AREA)
a8_r = cv2.resize(a8, (70,70), interpolation = cv2.INTER_AREA)
a9_r = cv2.resize(a9, (70,70), interpolation = cv2.INTER_AREA)
a=[a0_r,a1_r,a2_r,a3_r,a4_r,a5_r,a6_r,a7_r,a8_r,a9_r]
#########################################################################
image_size = 224
vgg_conv = VGG16(weights=None, include_top=False, input_shape=(image_size, image_size, 3))
model = models.Sequential()
for layer in vgg_conv.layers[:-4]: 
    model.add(layer)
model.add(layers.Flatten())
model.add(layers.Dense(400, activation='relu'))
model.add(layers.Dropout(0.5))
model.add(layers.Dense(200, activation='relu'))
model.add(layers.Dropout(0.5))
model.add(layers.Dense(10, activation='softmax'))
model.load_weights('VGG16_traffic_sign_weights.h5')
#########################################################################
lower_red = np.array([110,25,30])
upper_red = np.array([200,250,200])

def prep(b):
예제 #17
0
    test_datagen = ImageDataGenerator(rescale=1. / 255)

    train_generator = train_datagen.flow_from_directory(
        TRAIN_PATH,
        target_size=(img_width, img_height),
        batch_size=batch_size,
        class_mode='binary')

    validation_generator = test_datagen.flow_from_directory(
        VALID_PATH,
        target_size=(img_width, img_height),
        batch_size=batch_size,
        class_mode='binary')

    base_model = VGG16(include_top=False, input_shape=input_shape)

    top_model = Sequential()
    top_model.add(Flatten(input_shape=base_model.output_shape[1:]))
    top_model.add(Dense(256, activation='relu'))
    top_model.add(Dropout(.5))
    top_model.add(Dense(1, activation='sigmoid'))

    model = Model(input=base_model.input, output=top_model(base_model.output))

    for layer in model.layers[:19]:
        layer.trainable = False

    model.compile(loss='binary_crossentropy',
                  optimizer=Adam(),
                  metrics=['accuracy'])
def vgg_16():
    weights_path = pretrained_models_path + pretrained_models["vgg_16"]["weights"]
    net = VGG16(include_top=False, weights=weights_path)
    for layer in net.layers:
        layer.trainable = False
    return net
예제 #19
0
from keras.preprocessing.image import load_img, img_to_array
from keras.applications import VGG16
import numpy as np
import os

# 1) Feature Vectors
base_model = VGG16(include_top=False,
                   weights='imagenet',
                   input_shape=(224, 224, 3))
# file path we want to test
dirname_path = 'test_data'
# Get total number of images
count = 0
for root, dirs, files in os.walk(dirname_path):
    for each in files:
        if each != '.DS_Store':
            count += 1
images_array = np.zeros((count, 224, 224, 3))

t = 0
vgg16_feature_list = []
true_label = []  # Ground Truth list
name_list = []  # file name list
dir_path_list = os.listdir(dirname_path)
if '.DS_Store' in dir_path_list:
    dir_path_list.remove('.DS_Store')
dir_path_list.sort()
class_item = 0  # index of category
for dirname in dir_path_list:
    file_path = dirname_path + '/' + dirname
    file_path_list = os.listdir(file_path)
예제 #20
0
def extract_features(model_name, split_data, output_dir, save_metrics):
    batch_size = 16
    keras_augmentation = True
    my_augmentation = False
    min_train_size = 500
    shuffle_train = True
    padding = 'ruling_gray'
    buffer_size = 1000
    target_shape = (128, 128, 3)
    if keras_augmentation:
        keras_aug = image.ImageDataGenerator(shear_range=0.1,
                                             horizontal_flip=True,
                                             vertical_flip=True,
                                             rotation_range=30,
                                             zoom_range=0.1,
                                             width_shift_range=0.05,
                                             height_shift_range=0.05,
                                             fill_mode="nearest")
    else:
        keras_aug = None
    if model_name == 'VGG16':
        model = VGG16(include_top=False,
                      weights='imagenet',
                      input_shape=target_shape)
    elif model_name == 'VGG19':
        model = VGG19(include_top=False,
                      weights='imagenet',
                      input_shape=target_shape)
    elif model_name == 'DenseNet':
        model = DenseNet121(include_top=False,
                            weights='imagenet',
                            input_shape=target_shape)
    else:
        raise ValueError(f"Unkown model name: {model_name}")
    features_shape = np.prod(model.layers[-1].output_shape[1:])
    print('feature shape:', features_shape)
    train_X, train_y, test_X, test_y, val_X, val_y, class_names = split_data
    classes = len(class_names)
    if min_train_size > 0:
        train_X, train_y = file_tools.broadcast_samples(
            train_X, train_y, min_train_size)
    # Prepare image generators
    train_gen = image_tools.img_generator(train_X, train_y, classes,
                                          batch_size, target_shape, padding,
                                          shuffle_train, my_augmentation,
                                          keras_aug)
    val_gen = image_tools.img_generator(val_X, val_y, classes, batch_size,
                                        target_shape, padding)
    test_gen = image_tools.img_generator(test_X, test_y, classes, batch_size,
                                         target_shape, padding)

    # for i in range(10):
    #     n = next(train_gen)[0][0]
    #     image_tools.plot_img(n)

    db_path = os.path.join(output_dir, 'features.h5')
    print('[INFO] Processing training data.')
    train_writer = hdf5_tools.HDF5Writer(db_path,
                                         (len(train_y), features_shape),
                                         group_name='train',
                                         data_name='features',
                                         buf_size=buffer_size)
    extract_in_batches(train_writer, train_gen,
                       np.ceil(len(train_y) / batch_size), model, target_shape,
                       features_shape)
    print('[INFO] Processing validation data.')
    val_writer = hdf5_tools.HDF5Writer(db_path, (len(val_y), features_shape),
                                       group_name='validation',
                                       data_name='features',
                                       buf_size=buffer_size)
    extract_in_batches(val_writer, val_gen, np.ceil(len(val_y) / batch_size),
                       model, target_shape, features_shape)
    print('[INFO] Processing testing data.')
    test_writer = hdf5_tools.HDF5Writer(db_path, (len(test_y), features_shape),
                                        group_name='test',
                                        data_name='features',
                                        buf_size=buffer_size)
    test_writer.store_class_labels(class_names)
    extract_in_batches(test_writer, test_gen,
                       np.ceil(len(test_y) / batch_size), model, target_shape,
                       features_shape)

    if save_metrics:
        with open(os.path.join(output_dir, 'parameters.txt'), 'w') as fh:
            fh.write(f'model: {model_name}\n')
            fh.write(f'min train size: {min_train_size}\n')
            fh.write(f'my augmentation: {my_augmentation}\n')
            fh.write(f'keras augmentation: {keras_augmentation}\n')
            fh.write(f'target shape: {target_shape}\n')
            fh.write(f'feature shape: {features_shape}\n')
            fh.write(f'padding: {padding}\n')
        print(f"[INFO] Features extracted to '{db_path}''")

    return db_path
    train_set_x, train_set_y, dev_set_x, dev_set_y, test_set_x, test_set_y, classes = from_splitted_hdf5(
        data_dir)

    # for quick testing

    #train_set_x = train_set_x[:5, :, :, :]
    #train_set_y = train_set_y[:5, :]
    #dev_set_x = dev_set_x[:5, :, :, :]
    #dev_set_y = dev_set_y[:5, :]
    #test_set_x = test_set_x[:5, :, :, :]
    #test_set_y = test_set_y[:5, :]

    if args.pretrained_model == 'VGG16':
        pretrained_model = VGG16(weights='imagenet',
                                 include_top=False,
                                 input_shape=train_set_x.shape[1:])
    elif args.pretrained_model == 'VGG19':
        pretrained_model = VGG19(weights='imagenet',
                                 include_top=False,
                                 input_shape=train_set_x.shape[1:])
    elif args.pretrained_model == 'InceptionV3':
        pretrained_model = InceptionV3(weights='imagenet',
                                       include_top=False,
                                       input_shape=train_set_x.shape[1:])
    elif args.pretrained_model == 'Xception':
        pretrained_model = Xception(weights='imagenet',
                                    include_top=False,
                                    input_shape=train_set_x.shape[1:])
    elif args.pretrained_model == 'MobileNet':
        pretrained_model = MobileNet(weights='imagenet',
def feature_extraction(pre_trained_model='VGG16',
                       pooling_mode='avg',
                       classes=9,
                       data_augm_enabled=False):
    """ConvNet as fixed feature extractor, consist of taking the convolutional base of a previously-trained network,
    running the new data through it, and training a new classifier on top of the output.
    (i.e. train only the randomly initialized top layers while freezing all convolutional layers of the original model).

    # Arguments
        pre_trained_model: one of `VGG16`, `VGG19`, `ResNet50`, `VGG16_Places365`
        pooling_mode: Optional pooling_mode mode for feature extraction
            - `None` means that the output of the model will be
                the 4D tensor output of the
                last convolutional layer.
            - `avg` means that global average pooling_mode
                will be applied to the output of the
                last convolutional layer, and thus
                the output of the model will be a 2D tensor.
            - `max` means that global max pooling_mode will
                be applied.
        classes: optional number of classes to classify images into,
                            only to be specified if `weights` argument is `None`.
        data_augm_enabled: whether to augment the samples during training

    # Returns
        A Keras model instance.

    # Raises
        ValueError: in case of invalid argument for `pre_trained_model`, `pooling_mode` or invalid input shape.
    """

    if not (pre_trained_model
            in {'VGG16', 'VGG19', 'ResNet50', 'VGG16_Places365'}):
        raise ValueError(
            'The `pre_trained_model` argument should be either '
            '`VGG16`, `VGG19`, `ResNet50`, '
            'or `VGG16_Places365`. Other models will be supported in future releases. '
        )

    if not (pooling_mode in {'avg', 'max', 'flatten'}):
        raise ValueError('The `pooling_mode` argument should be either '
                         '`avg` (GlobalAveragePooling2D), `max` '
                         '(GlobalMaxPooling2D), '
                         'or `flatten` (Flatten).')

    # Define the name of the model and its weights
    weights_name = 'cost_sensitive_feature_extraction_' + pre_trained_model + '_' + pooling_mode + '_pool_weights_tf_dim_ordering_tf_kernels.h5'

    augm_samples_weights_name = 'cost_sensitive_augm_feature_extraction_' + pre_trained_model + '_' + pooling_mode + '_pool_weights_tf_dim_ordering_tf_kernels.h5'

    model_log = logs_dir + 'cost_sensitive_feature_extraction_' + pre_trained_model + '_' + pooling_mode + '_pool_log.csv'
    csv_logger = CSVLogger(model_log, append=True, separator=',')

    input_tensor = Input(shape=(224, 224, 3))

    # create the base pre-trained model for warm-up
    if pre_trained_model == 'VGG16':
        base_model = VGG16(weights='imagenet',
                           include_top=False,
                           input_tensor=input_tensor)

    elif pre_trained_model == 'VGG19':
        base_model = VGG19(weights='imagenet',
                           include_top=False,
                           input_tensor=input_tensor)

    elif pre_trained_model == 'ResNet50':
        base_model = ResNet50(weights='imagenet',
                              include_top=False,
                              input_tensor=input_tensor)

    elif pre_trained_model == 'VGG16_Places365':
        base_model = VGG16_Places365(weights='places',
                                     include_top=False,
                                     input_tensor=input_tensor)

    print('\n \n')
    print('The plain `' + pre_trained_model +
          '` pre-trained convnet was successfully initialised.\n')

    x = base_model.output

    # Now we set-up transfer learning process - freeze all but the penultimate layer
    # and re-train the last Dense layer with 9 final outputs representing probabilities for HRA classes.
    # Build a  randomly initialised classifier model to put on top of the convolutional model

    # both `avg`and `max`result in the same size of the Dense layer afterwards
    # Both Flatten and GlobalAveragePooling2D are valid options. So is GlobalMaxPooling2D.
    # Flatten will result in a larger Dense layer afterwards, which is more expensive
    # and may result in worse overfitting. But if you have lots of data, it might also perform better.
    # https://github.com/keras-team/keras/issues/8470
    if pooling_mode == 'avg':
        x = GlobalAveragePooling2D(name='GAP')(x)
    elif pooling_mode == 'max':
        x = GlobalMaxPooling2D(name='GMP')(x)
    elif pooling_mode == 'flatten':
        x = Flatten(name='FLATTEN')(x)

    x = Dense(256, activation='relu',
              name='FC1')(x)  # let's add a fully-connected layer

    # When random init is enabled, we want to include Dropout,
    # otherwise when loading a pre-trained HRA model we want to omit
    # Dropout layer so the visualisations are done properly (there is an issue if it is included)
    x = Dropout(0.5, name='DROPOUT')(x)
    # and a logistic layer with the number of classes defined by the `classes` argument
    predictions = Dense(classes, activation='softmax',
                        name='PREDICTIONS')(x)  # new softmax layer

    # this is the transfer learning model we will train
    model = Model(inputs=base_model.input, outputs=predictions)

    print(
        'Randomly initialised classifier was successfully added on top of the original pre-trained conv. base. \n'
    )

    print(
        'Number of trainable weights before freezing the conv. base of the original pre-trained convnet: '
        '' + str(len(model.trainable_weights)))

    # first: train only the top layers (which were randomly initialized)
    # i.e. freeze all convolutional layers of the preliminary base model
    for layer in base_model.layers:
        layer.trainable = False

    print(
        'Number of trainable weights after freezing the conv. base of the pre-trained convnet: '
        '' + str(len(model.trainable_weights)))

    print('\n')

    # compile the warm_up_model (should be done *after* setting layers to non-trainable)

    model.compile(optimizer=SGD(lr=0.0001, momentum=0.9),
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])
    model.summary()

    # # The attribute model.metrics_names will give you the display labels for the scalar outputs.
    # print warm_up_model.metrics_names

    if data_augm_enabled:
        print(
            'Using augmented samples for training. This may take a while ! \n')

        t = now()

        history = model.fit_generator(augmented_train_generator,
                                      steps_per_epoch=nb_train_samples //
                                      batch_size,
                                      epochs=feature_extraction_epochs,
                                      callbacks=[csv_logger],
                                      class_weight=class_weight)

        print(
            'Training time for re-training the last Dense layer using augmented samples: %s'
            % (now() - t))

        model.save_weights(feature_extraction_dir + augm_samples_weights_name)
        print('Model weights using augmented samples were saved as `' +
              augm_samples_weights_name + '`')
        print('\n')

    else:
        t = now()
        history = model.fit_generator(train_generator,
                                      steps_per_epoch=nb_train_samples //
                                      batch_size,
                                      epochs=feature_extraction_epochs,
                                      callbacks=[csv_logger],
                                      class_weight=class_weight)

        print('Training time for re-training the last Dense layer: %s' %
              (now() - t))

        model.save_weights(feature_extraction_dir + weights_name)
        print('Model weights were saved as `' + weights_name + '`')
        print('\n')

    return model
예제 #23
0
    # Print model summary
    model.summary()

    # Number of epochs
    epochs = 10

    # Train the network
    model.fit(x_train,
              y_train,
              epochs=epochs,
              shuffle=True,
              verbose=1,
              validation_split=0.2)

    # Evaluate the model
    acc = model.evaluate(x_test, y_test, verbose=1)
    print('\nTest accuracy: %.2f%%' % (acc[1] * 100))

    return x_train, x_test, y_train, y_test, acc


x_train, x_test, y_train, y_test, acc = main()

# VGG16
pretrained_model = VGG16(include_top=False, weights='imagenet')
pretrained_model.summary()

vgg_feature_train = pretrained_model.predict(train_image)
vgg_feature_test = pretrained_model.predict(test_image)
 def __init__(self):
     self.matrix_res = None
     self.similarity_deep = None
     self.model = VGG16(include_top=False, weights='imagenet')
     self.matrix_idx_to_item_id = None
     self.item_id_to_matrix_idx = None
예제 #25
0
def tune_conv_layers(x_train, y_train, x_test, y_test, top_model, nb_layers):
    # load vgg pretrained base
    vgg = VGG16(weights='imagenet', include_top=False)

    # freeze all layers except those being re-trained
    for layer in vgg.layers[:-nb_layers]:
        layer.trainable = False

    # standard input shape for vgg16
    inputs = Input(shape=IMG_SIZE + (3, ))

    # stack vgg on input shape
    vgg = vgg(inputs)

    # SANITY CHECK: don't retrain top dense layers
    for layer in top_model.layers:
        layer.trainable = False
    # ^ no cosistent improvement...

    # stack our pre-trained top dense layers
    # (returns tensor)
    model = top_model(vgg)

    # convert output tensor to keras functional model
    model = Model(inputs=inputs, outputs=model)

    # compile with SGD w/ slow learning rate
    model.compile(
        loss='categorical_crossentropy',
        # recommended by Keras, best of tried
        # (decay and/or lower lr did not help)
        optimizer=SGD(lr=1e-4, momentum=0.9),
        metrics=['accuracy'])

    # get total number of training samples
    nb_train_samples = x_train.shape[0]

    # use data augmentation for training data
    # DON'T rescale (already done when converting images to numpy arrays)
    datagen = ImageDataGenerator(shear_range=0.2,
                                 zoom_range=0.2,
                                 horizontal_flip=True)
    train_gen = datagen.flow(x_train, y_train, batch_size=BATCH_SIZE)

    # train full model
    model.fit_generator(
        train_gen,
        steps_per_epoch=nb_train_samples // BATCH_SIZE,
        #  model.fit(x_train, y_train,
        #  batch_size = BATCH_SIZE,
        epochs=EPOCHS,
        validation_data=(x_test, y_test),
    )

    # return trained model
    return model

    # get final model score
    score = model.evaluate(x_test, y_test)

    # only return accuracy
    return score[1]
예제 #26
0
#the_input = Input(shape=(the_height, the_width, 3))
#
#u1 = SeparableConv2D(filters=64,
#                     kernel_size=(3, 3),
#                     data_format="channels_last",
#                     dilation_rate=1,
#                     depthwise_regularizer=l2(0.0001),
#                     pointwise_regularizer=l2(0.0001),
#                     padding="same")(the_input)
#u1 = BatchNormalization()(u1)
#u1 = Activation("relu")(u1)
#u1 = Dropout(rate=the_rate)(u1)

conv_base = VGG16(
    weights=
    "/kaggle/input/vgg16/vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5",
    include_top=False,
    input_shape=(the_height, the_width, 3))

print(conv_base.summary())

for layer in conv_base.layers:
    layer.trainable = False

# u1 = conv_base.layers[9].output

u1 = conv_base.get_layer('block2_conv2').output

u2 = SeparableConv2D(filters=128,
                     kernel_size=(3, 3),
                     data_format="channels_last",
예제 #27
0
    grads /= (K.sqrt(K.mean(K.square(grads))) + 1e-5)

    iterate = K.function([model.input], [loss, grads])

    input_img_data = np.random.random((1, size, size, 3)) * 20 + 128.

    step = 1.
    for i in range(40):
        loss_value, grads_value = iterate([input_img_data])
        input_img_data += grads_value * step

    img = input_img_data[0]
    return deprocess_image(img)


model = VGG16(weights='imagenet', include_top=False)

# layer_name = 'block3_conv1'
# filter_index = 0
#
# plt.imshow(generate_pattern(layer_name, filter_index))
# plt.show()

layer_name = 'block4_conv1'
size = 64
margin = 5

results = np.zeros((8 * size + 7 * margin, 8 * size + 7 * margin, 3))

for i in range(8):
    for j in range(8):
예제 #28
0
def load(inputs, l2_regularization=5e-4):
    x, x1 = inputs

    # Block 1
    conv1_1 = Conv2D(64, (3, 3),
                     activation='relu',
                     padding='same',
                     kernel_initializer='he_normal',
                     kernel_regularizer=l2(l2_regularization),
                     name='block1_conv1')(x1)
    conv1_2 = Conv2D(64, (3, 3),
                     activation='relu',
                     padding='same',
                     kernel_initializer='he_normal',
                     kernel_regularizer=l2(l2_regularization),
                     name='block1_conv2')(conv1_1)
    pool1 = MaxPooling2D(pool_size=(2, 2),
                         strides=(2, 2),
                         padding='same',
                         name='block1_pool')(conv1_2)

    # Block 2
    conv2_1 = Conv2D(128, (3, 3),
                     activation='relu',
                     padding='same',
                     kernel_initializer='he_normal',
                     kernel_regularizer=l2(l2_regularization),
                     name='block2_conv1')(pool1)
    conv2_2 = Conv2D(128, (3, 3),
                     activation='relu',
                     padding='same',
                     kernel_initializer='he_normal',
                     kernel_regularizer=l2(l2_regularization),
                     name='block2_conv2')(conv2_1)
    pool2 = MaxPooling2D(pool_size=(2, 2),
                         strides=(2, 2),
                         padding='same',
                         name='block2_pool')(conv2_2)

    # Block 3
    conv3_1 = Conv2D(256, (3, 3),
                     activation='relu',
                     padding='same',
                     kernel_initializer='he_normal',
                     kernel_regularizer=l2(l2_regularization),
                     name='block3_conv1')(pool2)
    conv3_2 = Conv2D(256, (3, 3),
                     activation='relu',
                     padding='same',
                     kernel_initializer='he_normal',
                     kernel_regularizer=l2(l2_regularization),
                     name='block3_conv2')(conv3_1)
    conv3_3 = Conv2D(256, (3, 3),
                     activation='relu',
                     padding='same',
                     kernel_initializer='he_normal',
                     kernel_regularizer=l2(l2_regularization),
                     name='block3_conv3')(conv3_2)
    pool3 = MaxPooling2D(pool_size=(2, 2),
                         strides=(2, 2),
                         padding='same',
                         name='block3_pool')(conv3_3)

    # Block 4
    conv4_1 = Conv2D(512, (3, 3),
                     activation='relu',
                     padding='same',
                     kernel_initializer='he_normal',
                     kernel_regularizer=l2(l2_regularization),
                     name='block4_conv1')(pool3)
    conv4_2 = Conv2D(512, (3, 3),
                     activation='relu',
                     padding='same',
                     kernel_initializer='he_normal',
                     kernel_regularizer=l2(l2_regularization),
                     name='block4_conv2')(conv4_1)
    conv4_3 = Conv2D(512, (3, 3),
                     activation='relu',
                     padding='same',
                     kernel_initializer='he_normal',
                     kernel_regularizer=l2(l2_regularization),
                     name='block4_conv3')(conv4_2)
    pool4 = MaxPooling2D(pool_size=(2, 2),
                         strides=(2, 2),
                         padding='same',
                         name='block4_pool')(conv4_3)

    # Block 5
    conv5_1 = Conv2D(512, (3, 3),
                     activation='relu',
                     padding='same',
                     kernel_initializer='he_normal',
                     kernel_regularizer=l2(l2_regularization),
                     name='block5_conv1')(pool4)
    conv5_2 = Conv2D(512, (3, 3),
                     activation='relu',
                     padding='same',
                     kernel_initializer='he_normal',
                     kernel_regularizer=l2(l2_regularization),
                     name='block5_conv2')(conv5_1)
    conv5_3 = Conv2D(512, (3, 3),
                     activation='relu',
                     padding='same',
                     kernel_initializer='he_normal',
                     kernel_regularizer=l2(l2_regularization),
                     name='block5_conv3')(conv5_2)
    pool5 = MaxPooling2D(pool_size=(3, 3),
                         strides=(1, 1),
                         padding='same',
                         name='block5_pool')(conv5_3)

    net = Model(inputs=x, outputs=pool5)

    # Get VGG16 weights
    base_net = VGG16(include_top=False, weights='imagenet', input_tensor=x)
    weights = base_net.get_weights()
    weights = {
        'block1_conv1': [weights[0], weights[1]],
        'block1_conv2': [weights[2], weights[3]],
        'block2_conv1': [weights[4], weights[5]],
        'block2_conv2': [weights[6], weights[7]],
        'block3_conv1': [weights[8], weights[9]],
        'block3_conv2': [weights[10], weights[11]],
        'block3_conv3': [weights[12], weights[13]],
        'block4_conv1': [weights[14], weights[15]],
        'block4_conv2': [weights[16], weights[17]],
        'block4_conv3': [weights[18], weights[19]],
        'block5_conv1': [weights[20], weights[21]],
        'block5_conv2': [weights[22], weights[23]],
        'block5_conv3': [weights[24], weights[25]],
    }

    # Set weights to customized layers
    for layer_name, layer_weights in weights.items():
        net.get_layer(layer_name).set_weights(layer_weights)

    return net
예제 #29
0
# model = ResNet101V2()
# model = ResNet152()
# model = ResNet152V2()
# model = ResNet50()
# model = ResNet50V2()
# model = InceptionV3()
# model = InceptionResNetV2()
# model = MobileNet()
# model = MobileNetV2()
# model = DenseNet121()
# model = DenseNet169()
# model = DenseNet201()
# model = NASNetLarge()
# model = NASNetMobile()

vgg16 = VGG16(include_top=False, weights='imagenet',
              input_shape=(224, 224, 3))  # (None, 224, 224, 3)
# vgg16.summary()

act = 'relu'
model = Sequential()

model.add(vgg16)
model.add(Flatten())
model.add(Dense(256))
model.add(BatchNormalization())
model.add(Activation(act))
model.add(Dense(10, activation='softmax'))

model.summary()

# 잘만든 모델 가져다 쓰는 거 = 전이학습
예제 #30
0
    y_test)

# 표준화
x_train, x_test = x_train.astype('float32') / 255, x_test.astype(
    'float32') / 255

# dim
wid, hei, depth = 32, 32, 3

x_train = np.resize(x_train, (len(x_train), wid, hei, depth))
x_test = np.resize(x_test, (len(x_test), wid, hei, depth))
print(x_train.shape, x_test.shape)  # (60000, 32, 32, 3) (10000, 32, 32, 3)

# 모델링
conv_base = VGG16(weights='imagenet',
                  include_top=False,
                  input_shape=(wid, hei, depth))
model = Sequential()
model.add(conv_base)
model.add(Flatten())
model.add(Dense(512, activation='relu'))
model.add(LeakyReLU(alpha=0.1))
model.add(Dense(10, activation='softmax'))

conv_base.summary()
model.summary()

model.compile(loss='categorical_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])
early_stopping_callback = EarlyStopping(monitor='val_loss', patience=7)