コード例 #1
0
    def testNetwork(self, trained_model_path, test_dataset_path):
        test_augmentation = ImageDataGenerator(rescale=1 / 255)
        iap = ImageToArrayPreprocessor()

        # get file details
        config = BuildDataSet(base_path=self.base_path,
                              num_classes=self.num_classes)

        test_generation = HDF5DatasetGenerator(test_dataset_path,
                                               64,
                                               aug=test_augmentation,
                                               preprocessors=[iap],
                                               classes=config.num_classes)

        # load pre-trained model to test accuracy
        print("\n Loading model: {0}".format(trained_model_path))

        trained_model = load_model(trained_model_path)

        # evaluate model against test set
        print("Evaluate model against test set")
        (test_loss, test_acc) = trained_model.evaluate_generator(
            test_generation.generator(),
            steps=test_generation.numImages // config.batch_size,
            max_queue_size=config.batch_size * 2)

        print("\n \n FINAL MODEL ACCURACY: {:.2f} %".format(test_acc * 100))

        print(
            "\n \n *********************Testing Complete*********************\n"
        )
        return
コード例 #2
0
## augmentation
means = json.loads(open(DATASET_MEAN, "r").read())
mp = MeanPreprocessor(means["R"], means["G"], means["B"])
sp = SimplePreprocessor(64, 64)
iap = ImageToArrayPreprocessor()
#aap = AspectAwarePreprocessor(64, 64)
#sdl = SimpleDatasetLoader(preprocessors=[aap, mp, iap])

aug = ImageDataGenerator(horizontal_flip=True, )

# initiate data genrators
trainGen = HDF5DatasetGenerator(TRAIN_HDF5,
                                64,
                                aug=aug,
                                preprocessors=[sp, mp, iap],
                                binarize=False,
                                classes=NUM_CLASSES,
                                mode="r+")
valGen = HDF5DatasetGenerator(VAL_HDF5,
                              64,
                              aug=aug,
                              preprocessors=[sp, mp, iap],
                              binarize=False,
                              classes=NUM_CLASSES,
                              mode="r+")

# apply label smoothing to one-hot encoded labels
if SMOOTH_ONEHOT == True:
    print("[INFO] smoothing train labels...")
    train_labels = np.zeros((trainGen.numImages, NUM_CLASSES))
コード例 #3
0
ファイル: train_alexnet.py プロジェクト: zlyin/dogs_vs_cats
# initiate data augmentor for trainingset
aug = ImageDataGenerator(
    rotation_range=20,
    zoom_range=0.15,
    width_shift_range=0.2,
    height_shift_range=0.2,
    shear_range=0.15,
    horizontal_flip=True,
    fill_mode="nearest",
)

trainGen = HDF5DatasetGenerator(
    config.TRAIN_HDF5,
    BATCH_SIZE,
    # extract Patch => want to learn discrimitive patterns
    # substract mean, convert to keras array
    preprocessors=[pp, mp, iap],
    aug=aug,
    classes=2,
)

# initiate data augmentor for trainval set
valGen = HDF5DatasetGenerator(
    config.VAL_HDF5,
    BATCH_SIZE,
    # RESIZE the org image => validate/test on the whole image
    # substract mean, convert to keras array
    preprocessors=[pp, mp, iap],
    classes=2,
)
コード例 #4
0
from imagetoarraypreprocessor import ImageToArrayPreprocessor
from simplepreprocessor import SimplePreprocessor
from meanpreprocessor import MeanPreprocessor
from ranked import rank5_accuracy
from hdf5datasetgenerator import HDF5DatasetGenerator
from keras.models import load_model
import json

means = json.loads(open(config.DATASET_MEAN).read())

sp = SimplePreprocessor(64, 64)
mp = MeanPreprocessor(means["R"], means["G"], means["B"])
iap = ImageToArrayPreprocessor()

testGen = HDF5DatasetGenerator(config.TRAIN_HDF5,
                               64,
                               preprocessors=[sp, mp, iap],
                               classes=config.NUM_CLASSES)

print("[INFO] loading model...")
model = load_model(config.MODEL_PATH)

print("[INFO] predictiong on test data...")
preds = model.predict_generator(testGen.generator(),
                                steps=testGen.numImages // 64,
                                max_queue_size=64 * 2)

(rank_1, rank_5) = rank5_accuracy(preds, testGen.db["labels"])
print("[INFO] rank-1:{:.2f}".format(rank_1 * 100))
print("[INFO] rank-5:{:.2f}".format(rank_5 * 100))

testGen.close()
コード例 #5
0
aug = ImageDataGenerator(rotation_range=18,
                         zoom_range=0.15,
                         width_shift_range=0.1,
                         height_shift_range=0.1,
                         shear_range=0.15,
                         horizontal_flip=True,
                         fill_mode="nearest")

means = json.loads(open(config.DATASET_MEAN).read())

sp = SimplePreprocessor(64, 64)
mp = MeanPreprocessor(means["R"], means["G"], means["B"])
iap = ImageToArrayPreprocessor()
trainGen = HDF5DatasetGenerator(config.TRAIN_HDF5,
                                64,
                                aug=aug,
                                preprocessors=[sp, mp, iap],
                                classes=config.NUM_CLASSES)
valGen = HDF5DatasetGenerator(config.VAL_HDF5,
                              64,
                              preprocessors=[sp, mp, iap],
                              classes=config.NUM_CLASSES)

figPath = os.path.sep.join([args["output"], "{}.png".format(os.getpid())])
jsonPath = os.path.sep.join([args["output"], "{}.json".format(os.getpid())])
callbacks = [
    TrainingMonitor(figPath, jsonPath=jsonPath),
    LearningRateScheduler(poly_decay)
]

print("[INFO] compiling model...")
コード例 #6
0
from alexnet import Alexnet
from keras.preprocessing.image import ImageDataGenerator
from keras.optimizers import Adam
import json
import os

aug = ImageDataGenerator(rotation_range=20, zoom_range=0.15, width_shift_range=0.2, 
	height_shift_range=0.2, shear_range=0.15, horizontal_flip=True, fill_mode="nearest")

means = json.loads(open(config.DATASET_MEAN).read())
sp = SimplePreprocessor(227, 227)
pp = PatchPreprocessor(227, 227)
mp = MeanPreprocessor(means["R"], means["G"], means["B"])
iap = ImageToArrayPreprocessor()

trainGen = HDF5DatasetGenerator(config.TRAIN_HDF5, 128, aug=aug, preprocessors=[pp, mp, iap], classes=2)
valGen = HDF5DatasetGenerator(config.VAL_HDF5, 128, aug=aug, preprocessors=[sp, mp, iap], classes=2)

print("[INFO] compiling model...")
opt = Adam(lr=1e-3)
model = Alexnet.build(width=227, height=227, depth=3, classes=2, reg=0.0002)
model.compile(loss="binary_crossentropy", optimizer=opt, metrics=["accuracy"])

path = os.path.sep.join([config.OUTPUT_PATH, "{}.png".format(os.getpid())])
callbacks = [TrainingMonitor(path)]

model.fit_generator(trainGen.generator(), steps_per_epoch=trainGen.numImages // 128, 
	validation_data=valGen.generator(), validation_steps=valGen.numImages // 128, epochs=75, 
	max_queue_size=128*2, callbacks=callbacks, verbose=1)

print("[INFO] serializing model...")
    def train_dataset(self, num_classes, pretrained_model_name, new_model_name, new_learning_rate, num_epochs):
        # calling other supporting classes to get the training
        config = BuildDataSet(self.base_path, num_classes)   # getting the constructor variables
        (input_csv_file, train_HDF5, val_HDF5, test_HDF5) = config.config_dataset()     # getting the returned file

        # construction template for training and validation data augmentation using keras functions

        training_data_augmentation = ImageDataGenerator(rotation_range=25, zoom_range=0.5, horizontal_flip=True,
                                                        rescale=(1/255))
        validation_data_augmentation = ImageDataGenerator(rescale=(1/255))

        # Initialize image to array preprocessor class used by Adrian's HDF5 data generator
        iap = ImageToArrayPreprocessor()

        # Now using Adrian's function for data generation
        training_generator = HDF5DatasetGenerator(train_HDF5, config.batch_size, aug=training_data_augmentation,
                                                  preprocessors=[iap], classes=config.num_classes)

        validation_generator = HDF5DatasetGenerator(val_HDF5, config.batch_size, aug=validation_data_augmentation,
                                                    preprocessors=[iap], classes=config.num_classes)

        if pretrained_model_name is None:
            # Compile model and start training from EPOCH 1

            # set Adam Optimizer to default rate
            opt = Adam(lr=1e-3)
            emo_model = Emonet(config.num_classes)
            # emo_model = Emonet_extend(config.num_classes)
            emo_model.compile(loss="categorical_crossentropy", optimizer=opt, metrics=["accuracy"])
        else:
            emo_model = load_model(pretrained_model_name)
            if new_learning_rate is None:
                old_learning_rate = K.get_value(emo_model.optimizer.lr)
                new_learning_rate = old_learning_rate / 10
                K.set_value(emo_model.optimizer.lr, new_learning_rate)
            else:
                old_learning_rate = K.get_value(emo_model.optimizer.lr)
                K.set_value(emo_model.optimizer.lr, new_learning_rate)

            print("\n Changing learning rate from {0} to {1}".format(old_learning_rate, new_learning_rate))

        # list of keras callbacks
        checkpoint_filepath = os.path.join(config.output_path, "emotion_weights-{epoch:02d}.hdf5")
        emotion_callbacks = [EarlyStopping(monitor='val_loss', patience=10, verbose=1),
                           ModelCheckpoint(checkpoint_filepath, monitor='val_acc', verbose=1, period=5)]

        # check number of epochs
        if num_epochs is None:
            num_epochs = 50

        print('\n\n*************TRAINING START*******************\n')

        emotion_train = emo_model.fit_generator(training_generator.generator(),
                                                steps_per_epoch=training_generator.numImages/config.batch_size,
                                                validation_data=validation_generator.generator(),
                                                validation_steps=validation_generator.numImages/config.batch_size,
                                                epochs=num_epochs, max_queue_size=config.batch_size*2,
                                                callbacks=emotion_callbacks)

        # close the training and validation generators
        training_generator.close()
        validation_generator.close()

        emo_model.save(filepath=os.path.join(config.output_path, new_model_name))

        print('\n\n*************TRAINING COMPLETE**********************\n')

        self.model_plot_history(emotion_train)
        return
コード例 #8
0
train_datagen = ImageDataGenerator(rescale=1.0 / 255,
                                   rotation_range=20,
                                   width_shift_range=0.15,
                                   height_shift_range=0.15,
                                   zoom_range=0.1,
                                   shear_range=0.2,
                                   horizontal_flip=True,
                                   fill_mode='nearest')

val_datagen = ImageDataGenerator(rescale=1.0 / 255)

iap = ImageToArrayPreprocess()
trainGen = HDF5DatasetGenerator(config.TRAIN_HDF5,
                                config.BATCH_SIZE,
                                aug=train_datagen,
                                preprocessors=[iap],
                                classes=config.NUM_CLASSES)
valGen = HDF5DatasetGenerator(config.VAL_HDF5,
                              config.BATCH_SIZE,
                              aug=val_datagen,
                              preprocessors=[iap],
                              classes=config.NUM_CLASSES)

EPOCHS = 100
INIT_LR = 1e-2
DECAY_RATE = 1.0
FACTOR = 0.1

lr_decay_1 = LearningRateScheduler(lambda epoch: INIT_LR *
                                   (1 / (1 + DECAY_RATE * epoch)))
コード例 #9
0
from hdf5datasetgenerator import HDF5DatasetGenerator
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.models import load_model
import argparse

ap = argparse.ArgumentParser()
ap.add_argument("-m",
                "--model",
                type=str,
                help="path to model checkpoint to load")
args = vars(ap.parse_args())

testGen = ImageDataGenerator(rescale=1.0 / 255)

iap = ImageToArrayPreprocess()

testAugGen = HDF5DatasetGenerator(config.TEST_HDF5,
                                  config.BATCH_SIZE,
                                  aug=testGen,
                                  preprocessors=[iap],
                                  classes=config.NUM_CLASSES)

model = load_model(args['model'])

loss, acc = model.evaluate_generator(testAugGen.generator(),
                                     steps=testAugGen.numImages //
                                     config.BATCH_SIZE)

print('[Accuracy] {:.2f}'.format(acc * 100))

testAugGen.close()
コード例 #10
0
# initialize the image preprocessors
sp = SimplePreprocessor(224, 224)
mp = MeanPreprocessor(means["R"], means["G"], means["B"])
#cp = CropPreprocessor(227, 227)
iap = ImageToArrayPreprocessor()
#
# load the pretrained network
print("[INFO] loading model...")
model = load_model(config.Model_PATH)
#
classNames = {0: "Non-Recyclable", 1: "Organic", 2: "Recyclable"}
# initialize the testing dataset generator, then make predictions on
# the testing data
print("[INFO] predicting on test data (no crops)...")
testGen = HDF5DatasetGenerator(config.TEST_HDF5,
                               32,
                               preprocessors=[sp, mp, iap],
                               classes=len(classNames))

# reset the testing generator and then use our trained model to
# make predictions on the data
predictions = model.predict_generator(testGen.generator(),
                                      steps=testGen.numImages // 32,
                                      max_queue_size=10)

print(
    classification_report(testGen.db["labels"],
                          predictions.argmax(axis=1),
                          target_names=classNames))

# In[ ]:
コード例 #11
0
assert os.path.exists(args["model"])

## hyperparameters
BATCH = 64
MODEL_PATH = args["model"]

## load RGB means of training data
means = json.loads(open(cfg.DATASET_MEAN).read())

# image preprocessors
sp = SimplePreprocessor(64, 64)
mp = MeanPreprocessor(means["R"], means["G"], means["B"])
iap = ImageToArrayPreprocessor()

# create testGen
testGen = HDF5DatasetGenerator(cfg.TEST_HDF5, BATCH, \
        preprocessors=[sp, mp, iap], classes=cfg.NUM_CLASSES)

# load model & make predictions
print("[INFO] loading %s ..." % MODEL_PATH)
model = load_model(MODEL_PATH)

print("[INFO] predicting on the test data...")
predictions = model.predict_generator(
    testGen.generator(),
    steps=testGen.numImages // BATCH,
    max_queue_size=BATCH * 2,
)

# compute rank1 & rank5 accs
(rank1, rank5) = rank5_accuracy(predictions, testGen.db["labels"])
print("[INFO] rank-1 acc = {:.3f}%".format(rank1 * 100))
コード例 #12
0
ファイル: predict_v2.py プロジェクト: zlyin/Kaggle_iWildCam
data_class_weights = list(class_weights_dict.values())
print("[INFO] class weights (min, max) =",
      (min(data_class_weights), max(data_class_weights)))

# augmentation
sp = SimplePreprocessor(64, 64)
iap = ImageToArrayPreprocessor()
means = json.loads(open(DATASET_MEAN).read())
mp = MeanPreprocessor(means["R"], means["G"], means["B"])

# initiate data genrators
if MODE == "evaluate":
    print("[INFO] evaluating on validation set...")
    valGen = HDF5DatasetGenerator(VAL_HDF5,
                                  BATCH,
                                  preprocessors=[sp, mp, iap],
                                  binarize=False,
                                  classes=NUM_CLASSES)
elif MODE == "predict":
    print("[INFO] predicting on test set...")
    testGen = HDF5DatasetGenerator(TEST_HDF5,
                                   BATCH,
                                   preprocessors=[sp, mp, iap],
                                   binarize=False)
    imageIds = [name.split(".")[0] for name in testGen.db["labels"]]
"""
- load in models & predict
"""
with tf.device("/cpu:0"):
    model = load_model(MODEL, custom_objects={"f1_score": f1_score})
コード例 #13
0
#print("[INFO] predicting with AlexNet...")
#model = load_model(os.path.join(config.OUTPUT_PATH, \
#        "model-alexnet-075-0.2944_without_padding_10283.hdf5"))

print("[INFO] predicting with AlexNet2 (with padding)...")
model = load_model(os.path.join(config.OUTPUT_PATH, \
        "model-alexnet2-075-0.2972_with_padding_9299.hdf5"))



## initiate HDF5DataGenerator for valset
print("[INFO] evaluating on valset WITHOUT crop/TTA ...")
testGen1 = HDF5DatasetGenerator(
        config.TEST_HDF5, 
        BATCH_SIZE,
        # resize, substract mean, convert to keras array
        preprocessors=[sp, mp, iap],
        classes=2,
        )

predictions1 = model.predict_generator(
        testGen1.generator(),
        steps=testGen1.numImages // BATCH_SIZE, 
        max_queue_size=BATCH_SIZE,
        )

rank1acc1, _ = rank5_accuracy(predictions1, testGen1.db["labels"])
print("[INFO] rank-1 accuracy = {:.2f}%".format(rank1acc1 * 100))
testGen1.close()

コード例 #14
0

## load pre-trained model & initiate HDF5DataGenerator for valset
print("[INFO] predicting with ResNet50...")
model = load_model(os.path.join(OUTPUT_PATH, \
        "model-resnet50_new_head-002-0.0974-11163.hdf5"))   # for test 6
#        "model-resnet50_new_head-003-0.0933-5503.hdf5"))   # for test 5


print("[INFO] evaluating on valset WITHOUT crop/TTA ...")
valGen1 = HDF5DatasetGenerator(
        VAL_HDF5,
        BATCH_SIZE,
        # resize, substract mean, convert to keras array
        #preprocessors=[pp, mp, iap],  
        #preprocessors=[pp, iap],  
        #preprocessors=[sp, iap],  
        #preprocessors=[sp, mp, iap],  
        preprocessors=[aap, mp, iap],  
        classes=NUM_CLASSES,
        )


## do prediction & calculate scores
predictions1 = model.predict_generator(
        valGen1.generator(),
        steps=valGen1.numImages // BATCH_SIZE + 1, 
        max_queue_size=BATCH_SIZE,
        )

print("[INFO] log loss =", log_loss(valGen1.db["labels"], predictions1))
コード例 #15
0
    cv2.imshow("org", image)
    #cvs[:image.shape[0], :image.shape[1], :] = image

    copy = image.copy()
    for p in [mp]:
        copy = p.preprocess(copy)

    cvs = np.zeros(copy.shape)
    cvs[:, :, :] = copy
    cv2.imshow("compare", cvs)
    cv2.waitKey(0)

    pass

## validate HDF5 dataset
valGen1 = HDF5DatasetGenerator(
    VAL_HDF5,  ### VALset all dogs????
    #TEST_HDF5,
    #TRAIN_HDF5,
    #TRAINVAL_HDF5,
    BATCH_SIZE,
    # resize, substract mean, convert to keras array
    #preprocessors=[pp, mp, iap],
    classes=NUM_CLASSES,
)
I, L = [], []
for images, labels in valGen1.generator(passes=1):
    print(labels.shape)
    print(labels[0])
    sys.exit()
コード例 #16
0
import progressbar
import json

means = json.loads(open(config.DATASET_MEAN).read())

sp = SimplePreprocessor(227, 227)
mp = MeanPreprocessor(means["R"], means["G"], means["B"])
cp = CropPreprocessor(227, 227)
iap = ImageToArrayPreprocessor()

print("[INFO] loading model...")
model = load_model(config.MODEL_PATH)

print("[INFO] predicting on test data(no crop)...")
testGen = HDF5DatasetGenerator(config.TEST_HDF5,
                               64,
                               preprocessors=[sp, mp, iap],
                               classes=2)
predictions = model.predict_generator(testGen.generator(),
                                      steps=testGen.numImages // 64,
                                      max_queue_size=64 * 2)

(rank_1, _) = rank5_accuracy(predictions, testGen.db["labels"])
print("[INFO] rank-1:{:.2f}%".format(rank_1 * 100))
testGen.close()

testGen = HDF5DatasetGenerator(config.TEST_HDF5,
                               64,
                               preprocessors=[mp],
                               classes=2)
predictions = []