Ejemplo n.º 1
0
                         horizontal_flip=True,
                         fill_mode="nearest")

# load the RGB means for the training set
means = json.loads(open(config.DATASET_MEAN).read())

# initialize the image preprocessors
sp = SimplePreprocessor(227, 227)
pp = PatchPreprocessor(227, 227)
mp = MeanPreprocessor(means["R"], means["G"], means["B"])
iap = ImageToArrayPreprocessor()

# initialize the training and validation dataset generators
trainGen = HDF5DatasetGenerator(config.TRAIN_HDF5,
                                bSize,
                                aug=aug,
                                preprocessors=[pp, mp, iap],
                                classes=2)
valGen = HDF5DatasetGenerator(config.VAL_HDF5,
                              bSize,
                              preprocessors=[sp, mp, iap],
                              classes=2)

# initialize the optimizer
opt = Adam(lr=1e-3)
model = AlexNet.build(width=227, height=227, depth=3, classes=2, reg=0.0002)
model.compile(loss="binary_crossentropy", optimizer=opt, metrics=["accuracy"])

# construct the set of callbacks
path = os.path.sep.join([config.OUTPUT_PATH, "{}.png".format(os.getpid())])
callbacks = [TrainingMonitor(path)]
from sklearn.metrics import classification_report, confusion_matrix
import numpy as np

trainAug = tf.keras.preprocessing.image.ImageDataGenerator(
    rotation_range=10,
    zoom_range=0.1,
    horizontal_flip=True,
    rescale=1 / 255.0,
    fill_mode="nearest")
valAug = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1 / 255.0)
testAug = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1 / 255.0)
iap = ImageToArrayPreprocessor()
# initialize the training and validation dataset generators
trainGen = HDF5DatasetGenerator(config.train_hdf5,
                                config.batchSize,
                                aug=trainAug,
                                preprocessors=[iap],
                                classes=config.numClasses)
valGen = HDF5DatasetGenerator(config.vald_hdf5,
                              config.batchSize,
                              aug=valAug,
                              preprocessors=[iap],
                              classes=config.numClasses)
testGen = HDF5DatasetGenerator(config.test_hdf5,
                               config.batchSize,
                               aug=testAug,
                               preprocessors=[iap],
                               classes=config.numClasses)

trainData, trainLabels = makeOneMatrix(trainGen)
valData, valLabels = makeOneMatrix(valGen)
Ejemplo n.º 3
0
from keras.models import load_model
import keras.backend as K 
import argparse
import os

ap = argparse.ArgumentParser()
ap.add_argument("-c", "--checkpoints", required = True, help = "path to output checkpoint directory")
ap.add_argument("-m", "--model", type = str, help = "path to *specific* model checkpoint to load")
ap.add_argument("-s", "----start-epoch", type = int, default = 0, help = "epoch to restart training at")
args = vars(ap.parse_args())

trainAug = ImageDataGenerator(rotation_range = 10, zoom_range = 0.1, horizontal_flip = True, rescale = 1/255.0, fill_mode = "nearest")
valAug = ImageDataGenerator(rescale = 1/255.0)
iap = ImageToArrayPreprocessor()

trainGen = HDF5DatasetGenerator(config.TRAIN_HDF5, config.BATCH_SIZE, aug = trainAug, preprocessors = [iap], classes = config.NUM_CLASSES)
valGen = HDF5DatasetGenerator(config.VAL_HDF5, config.BATCH_SIZE, aug = valAug, preprocessors = [iap], classes = config.NUM_CLASSES)

if args["model"] is None:
    print("[INFO] compiling model...")
    model = EmotionVGGNet.build(width = 48, height = 48, depth = 1, classes = config.NUM_CLASSES)
    opt = Adam(lr = 1e-3)
    model.compile(loss = "categorical_crossentropy", optimizer = opt, metrics = ["accuracy"])

else:
    print("[INFO] loading {}...".format(args["model"]))
    model = load_model(args["model"])

    print("[INFO] old learning rate: {}".format(K.get_value(model.optimizer.lr)))
    K.set_value(model.optimizer.lr, 1e-3)
    print("[INFO] new learning rate: {}".format(K.get_value(model.optimizer.lr)))
Ejemplo n.º 4
0
                         shear_range=0.15,
                         horizontal_flip=True,
                         fill_mode="nearest")

# load the RGB means for the training set
means = json.loads(open(config.DATASET_MEAN).read())

# initialize the image preprocessors
sp = SimplePreprocessor(64, 64)
mp = MeanPreprocessor(means["R"], means["G"], means["B"])
iap = ImageToArrayPreprocessor()

# initialize the training and validation dataset generators
trainGen = HDF5DatasetGenerator(config.TRAIN_HDF5,
                                64,
                                aug=aug,
                                preprocessors=[sp, mp, iap],
                                classes=config.NUM_CLASSES)
valGen = HDF5DatasetGenerator(config.VAL_HDF5,
                              64,
                              preprocessors=[sp, mp, iap],
                              classes=config.NUM_CLASSES)

# if there is no specific model checkpoint supplied, then initialize
# the network and complie the model
if args["model"] is None:
    print("[INFO] compiling model...")
    model = DeeperGoogLeNet.build(width=64,
                                  height=64,
                                  depth=3,
                                  classes=config.NUM_CLASSES,
Ejemplo n.º 5
0
iap = ImageToArrayPreprocessor()
mp = MeanPreprocessor(rMean=means["R"], gMean=means["G"], bMean=means["B"])
pp = PatchPreprocessor(width=227, height=227)
sp = SimplePreprocessor(width=227, height=227)

aug = ImageDataGenerator(rotation_range=30,
                         width_shift_range=0.15,
                         shear_range=0.2,
                         zoom_range=0.2,
                         fill_mode="nearest",
                         horizontal_flip=True)

train_generator = HDF5DatasetGenerator(db_path=configs.TRAIN_HDF5,
                                       batch_size=configs.BATCH_SIZE,
                                       preprocessors=[pp, mp, iap],
                                       aug=aug,
                                       binarize=True,
                                       classes=2)

val_generator = HDF5DatasetGenerator(db_path=configs.TEST_HDF5,
                                     batch_size=configs.BATCH_SIZE,
                                     preprocessors=[sp, mp, iap],
                                     aug=None,
                                     binarize=True,
                                     classes=2)

path = os.path.sep.join([configs.OUTPUT_PATH, f"{os.getpid()}.png"])
callbacks = [TrainingMonitor(plot_path=path)]

opt = Adam(lr=1e-3)
model = AlexNet.build(width=227, height=227, classes=configs.NUM_CLASSES)
# initialize the image preprocessors
sp = SimplePreprocessor(227, 227)
mp = MeanPreprocessor(means["R"], means["G"], means["B"])
cp = CropPreprocessor(227, 227)
iap = ImageToArrayPreprocessor()

# load the pretrained network
print("[INFO] loading model...")
model = load_model(config.MODEL_PATH)

# initialize the testing dataset generator, then make predictions on
# the testing data
print("[INFO] predicting on test data (no crops)...")
testGen = HDF5DatasetGenerator(config.TEST_HDF5,
                               64,
                               preprocessors=[sp, mp, iap],
                               classes=2)
predictions = model.predict_generator(testGen.generator(),
                                      steps=testGen.numImages // 64,
                                      max_queue_size=64 * 2)

# compute the rank-1 and rank-5 accuracies
(rank1, _) = rank5_accuracy(predictions, testGen.db["labels"])
print("[INFO] rank-1: {:.2f}%".format(rank1 * 100))
testGen.close()

# re-initialize the testing set generator, this time excluding the
# 'SimplePreprocessor'
testGen = HDF5DatasetGenerator(config.TEST_HDF5,
                               64,
                               preprocessors=[mp],
Ejemplo n.º 7
0
from pyimagesearch.preprocessing.meanpreprocessor import MeanPreprocessor
from pyimagesearch.preprocessing.simplepreprocessor import SimplePreprocessor
from pyimagesearch.preprocessing.imagetoarraypreprocessor import ImageToArrayPreprocessor

# load the RGB mean values
means = json.loads(open(configs.DATASET_MEAN).read())

# initialize the preprocessors
ip = ImageToArrayPreprocessor()
cp = CropPreprocessor(width=227, height=227)
sp = SimplePreprocessor(width=227, height=227)
mp = MeanPreprocessor(rMean=means["R"], gMean=means["G"], bMean=means["B"])

# initialize testing datasets generation
test_gen = HDF5DatasetGenerator(configs.TEST_HDF5,
                                feature_ref_name="data",
                                batch_size=configs.BATCH_SIZE,
                                preprocessors=[sp, mp, ip])

# load model
model = load_model(configs.MODEL_PATH)

# predict test generator
predictions = model.predict(test_gen.generate(passes=1),
                            steps=test_gen.num_images // configs.BATCH_SIZE)

# compute and display rank-1 accuracy
(rank1, _) = rank5_accuracy(y_true=test_gen.db["labels"], y_pred=predictions)
print("[INFO] rank-1: {:.2f}%".format(rank1 * 100))

# close generator
test_gen.close()
Ejemplo n.º 8
0

def super_res_generator(inputDataGen, targetDataGen):
    # start an infinite loop for the training data
    while True:
        # grab the next input images and target outputs, discarding
        # the class labels (which are irrelevant)
        inputData = next(inputDataGen)[0]
        targetData = next(targetDataGen)[0]

        # yield a tuple of the input data and target data
        yield (inputData, targetData)


# initialize the input images and target output images generators
inputs = HDF5DatasetGenerator(config.INPUTS_DB, config.BATCH_SIZE)
targets = HDF5DatasetGenerator(config.OUTPUTS_DB, config.BATCH_SIZE)

# initialize the model and optimizer
print("[INFO] compiling model...")
opt = Adam(lr=0.001, decay=0.001 / config.NUM_EPOCHS)
model = SRCNN.build(width=config.INPUT_DIM, height=config.INPUT_DIM, depth=3)
model.compile(loss="mse", optimizer=opt)

# train the model using our generators
H = model.fit_generator(super_res_generator(inputs.generator(),
                                            targets.generator()),
                        steps_per_epoch=inputs.numImages // config.BATCH_SIZE,
                        epochs=config.NUM_EPOCHS,
                        verbose=1)
Ejemplo n.º 9
0
from pyimagesearch.utils.ranked import rank5_accuarcy
from pyimagesearch.io.hdf5datasetgenerator import HDF5DatasetGenerator
from keras.models import load_model
import json

# load the RGB means for the trainging set
means = json.loads(open(config.DATASET_MEAN).read())

# initialize the image preprocessors
sp = SimplePreprocessor(64, 64)
mp = MeanPreprocessor(means["R"], means["G"], means["B"])
iap = ImageToArrayPreprocessor()

# initialize the testing dataset generator
testGen = HDF5DatasetGenerator(config.TEST_HDF5, 64,
                               preprocessors=[sp, mp, iap],
                               classes=cofig.NUM_CLASSES)

# load the pre-trained network
print("[INFO] loading model...")
model = load_model(config.MODEL_PATH)

# make predictions on the testing data
print("[INFO] predicting on test data...")
predictions = model.predict_generator(testGen.generator(),
                                      steps=testGen.numImages // 64,
                                      max_queue_size=64 * 2)

# compute the rank-1 and rank-5 accuracies
(rank1, rank5) = rank5_accuarcy(predictions, testGen.db["labels"])
print("[INFO] rank-1: {:.2f}%".format(rank1 * 100))
Ejemplo n.º 10
0
from config import emotion_config as config
from pyimagesearch.preprocessing.imagetoarraypreprocessor import ImageToArrayPreprocessor
from pyimagesearch.io.hdf5datasetgenerator import HDF5DatasetGenerator
from keras.preprocessing.image import ImageDataGenerator
from keras.models import load_model
import argparse

ap = argparse.ArgumentParser(rescale=1 / 255.0)
iap = ImageToArrayPreprocessor()

testGen = HDF5DatasetGenerator(config.TEST_HDF5,
                               config.BATCH_SIZE,
                               aug=testAug,
                               preprocessors=[iap],
                               classes=config.NUM_CLASSES)

print("[INFO] loading {}...".format(args["model"]))
model = load_model(args["model"])

(loss,
 acc) = model.evaluate_generator(testGen.generator(),
                                 steps=testGen.numImages // config.BATCH_SIZE,
                                 max_queue_size=config.BATCH_SIZE * 2)
print("[INFO] accuracy: {:.2f}".format(acc * 100))
testGen.close()
Ejemplo n.º 11
0
    type=str,
    help="path to HDF database",
    default="../datasets/kaggle_dog_vs_cat/hdf5/test_features.hdf5")
ap.add_argument("-val",
                "--val-db",
                type=str,
                help="path to HDF database",
                default="../datasets/kaggle_dog_vs_cat/hdf5/val_features.hdf5")

args = vars(ap.parse_args())

# dataset generator
print("[INFO] Loading datasets...")
train_gen = HDF5DatasetGenerator(binarize=True,
                                 db_path=args["train_db"],
                                 classes=args["classes"],
                                 feature_ref_name="data",
                                 batch_size=args["batch_size"])
test_gen = HDF5DatasetGenerator(binarize=True,
                                db_path=args["test_db"],
                                classes=args["classes"],
                                feature_ref_name="data",
                                batch_size=args["batch_size"])
val_gen = HDF5DatasetGenerator(binarize=True,
                               db_path=args["val_db"],
                               classes=args["classes"],
                               feature_ref_name="data",
                               batch_size=args["batch_size"])


def cal_steps(num_images, batch_size):