示例#1
0
    def testNetwork(self, trained_model_path, test_dataset_path):
        test_augmentation = ImageDataGenerator(rescale=1 / 255)
        iap = ImageToArrayPreprocessor()

        # get file details
        config = BuildDataSet(base_path=self.base_path,
                              num_classes=self.num_classes)

        test_generation = HDF5DatasetGenerator(test_dataset_path,
                                               64,
                                               aug=test_augmentation,
                                               preprocessors=[iap],
                                               classes=config.num_classes)

        # load pre-trained model to test accuracy
        print("\n Loading model: {0}".format(trained_model_path))

        trained_model = load_model(trained_model_path)

        # evaluate model against test set
        print("Evaluate model against test set")
        (test_loss, test_acc) = trained_model.evaluate_generator(
            test_generation.generator(),
            steps=test_generation.numImages // config.batch_size,
            max_queue_size=config.batch_size * 2)

        print("\n \n FINAL MODEL ACCURACY: {:.2f} %".format(test_acc * 100))

        print(
            "\n \n *********************Testing Complete*********************\n"
        )
        return
    def __init__(self,
                 input_path,
                 image_path,
                 image_path_val,
                 validation=0.25,
                 test=0,
                 verbose=False,
                 included_folders=[],
                 image_size=224,
                 only_val=False):
        self.input = input_path
        self.image_path = image_path
        self.image_path_val = image_path_val
        self.validation = validation
        self.test = test
        self.verbose = verbose
        self.included_folders = included_folders
        self.image_size = image_size
        self.only_val = only_val

        # Image preprocessors
        self.preprocessors = [
            AspectAwarePreprocessor(224, 224),
            ImageToArrayPreprocessor()
        ]

        if self.validation < 0 or self.validation > 1:
            raise ValueError(
                'Error, validation must be a float between 0 and 1')
        if self.test < 0 or self.test > 1:
            raise ValueError('Error, test must be a float between 0 and 1')

        self.train_split = round(1 - (self.validation + self.test), 2)
        if self.train_split < 0:
            raise ValueError(
                'Error, validation and test can\'t add to more than 1')

        print("Input split: train {}%, validation {}%, test {}%".format(
            self.train_split * 100, self.validation * 100, self.test * 100))
        if self.verbose:
            print("===== Dataset =====")
示例#3
0
# load encoded_class to category_id mapping...
mapping_dict = json.loads(open(ID_MAPPING, "r").read())
encodedLabel_to_className = mapping_dict["encodedLabel_to_className"]
className_to_categoryID = mapping_dict["className_to_categoryID"]

# load submission.csv & reset 0
submission = pd.read_csv("./sample_submission.csv")
submission["Category"] = [0] * submission.shape[0]
print("[INFO] sample_sumission\n")
print(submission.head())
print("[INFO] expect to predict =", submission.shape)

## augmentation
aap = AspectAwarePreprocessor(64, 64)
iap = ImageToArrayPreprocessor()
means = json.loads(open(DATASET_MEAN).read())
mp = MeanPreprocessor(means["R"], means["G"], means["B"])
sdl = SimpleDatasetLoader(preprocessors=[aap, mp, iap], mode="test")

# load in images
print("[INFO] loading test images....")
imagePaths = list(paths.list_images(args["dataset"]))
print("[INFO] fetched %d images to test" % len(imagePaths))

data, names = sdl.load(imagePaths, verbose=1e4)
testX = data.astype("float") / 255.0
imageIds = [name.split(".")[0] for name in names]

## load in models & predict
with tf.device("/cpu:0"):
    def train_dataset(self, num_classes, pretrained_model_name, new_model_name, new_learning_rate, num_epochs):
        # calling other supporting classes to get the training
        config = BuildDataSet(self.base_path, num_classes)   # getting the constructor variables
        (input_csv_file, train_HDF5, val_HDF5, test_HDF5) = config.config_dataset()     # getting the returned file

        # construction template for training and validation data augmentation using keras functions

        training_data_augmentation = ImageDataGenerator(rotation_range=25, zoom_range=0.5, horizontal_flip=True,
                                                        rescale=(1/255))
        validation_data_augmentation = ImageDataGenerator(rescale=(1/255))

        # Initialize image to array preprocessor class used by Adrian's HDF5 data generator
        iap = ImageToArrayPreprocessor()

        # Now using Adrian's function for data generation
        training_generator = HDF5DatasetGenerator(train_HDF5, config.batch_size, aug=training_data_augmentation,
                                                  preprocessors=[iap], classes=config.num_classes)

        validation_generator = HDF5DatasetGenerator(val_HDF5, config.batch_size, aug=validation_data_augmentation,
                                                    preprocessors=[iap], classes=config.num_classes)

        if pretrained_model_name is None:
            # Compile model and start training from EPOCH 1

            # set Adam Optimizer to default rate
            opt = Adam(lr=1e-3)
            emo_model = Emonet(config.num_classes)
            # emo_model = Emonet_extend(config.num_classes)
            emo_model.compile(loss="categorical_crossentropy", optimizer=opt, metrics=["accuracy"])
        else:
            emo_model = load_model(pretrained_model_name)
            if new_learning_rate is None:
                old_learning_rate = K.get_value(emo_model.optimizer.lr)
                new_learning_rate = old_learning_rate / 10
                K.set_value(emo_model.optimizer.lr, new_learning_rate)
            else:
                old_learning_rate = K.get_value(emo_model.optimizer.lr)
                K.set_value(emo_model.optimizer.lr, new_learning_rate)

            print("\n Changing learning rate from {0} to {1}".format(old_learning_rate, new_learning_rate))

        # list of keras callbacks
        checkpoint_filepath = os.path.join(config.output_path, "emotion_weights-{epoch:02d}.hdf5")
        emotion_callbacks = [EarlyStopping(monitor='val_loss', patience=10, verbose=1),
                           ModelCheckpoint(checkpoint_filepath, monitor='val_acc', verbose=1, period=5)]

        # check number of epochs
        if num_epochs is None:
            num_epochs = 50

        print('\n\n*************TRAINING START*******************\n')

        emotion_train = emo_model.fit_generator(training_generator.generator(),
                                                steps_per_epoch=training_generator.numImages/config.batch_size,
                                                validation_data=validation_generator.generator(),
                                                validation_steps=validation_generator.numImages/config.batch_size,
                                                epochs=num_epochs, max_queue_size=config.batch_size*2,
                                                callbacks=emotion_callbacks)

        # close the training and validation generators
        training_generator.close()
        validation_generator.close()

        emo_model.save(filepath=os.path.join(config.output_path, new_model_name))

        print('\n\n*************TRAINING COMPLETE**********************\n')

        self.model_plot_history(emotion_train)
        return
示例#5
0
args = vars(parser.parse_args())

## cache vars
B = 128
modelname = args["model"]

ModelBanks = {
    "alexnet":
    load_model("./output/model-alexnet-075-0.2944_without_padding_10283.hdf5"),
    "alexnet2":
    load_model("./output/model-alexnet2-075-0.2972_with_padding_9299.hdf5"),
}
model = ModelBanks[modelname]

aap = AspectAwarePreprocessor(256, 256)
iap = ImageToArrayPreprocessor()
cp1 = CropPreprocessor(227, 227)  # 10-crops TTA

## list & sort imagePaths in testset
#imagePaths = sorted(list(paths.list_images("./data/test1")))
imagePaths = sorted(list(paths.list_images("./data/redux-edition/test")))
N = len(imagePaths)
useTTA = args["TTA"]  # MUST be str!!!

print("[INFO] using %s model..." % modelname)
predictions = []
submission = pd.read_csv("./sample_submission.csv")  # columns = [id,label]

# preprocess batch images & do prediction
if useTTA == "True":
    print("[INFO] applying TTA..")
示例#6
0
import config.dogs_vs_cats_config as config
from keras.models import load_model
import json
import numpy as np
import matplotlib
matplotlib.use("Agg")
from tqdm import tqdm


# parameters
BATCH_SIZE = 64

## initiate all image preprocessors
sp = SimplePreprocessor(227, 227)
pp = PatchPreprocessor(227, 227)
iap = ImageToArrayPreprocessor()
cp = CropPreprocessor(227, 227)

# load in RGB mean values of training set
trainmeans = json.loads(open("./output/dogs_vs_cats_train_mean.json").read())
mp = MeanPreprocessor(trainmeans["R"], trainmeans["G"], trainmeans["B"])


## load pre-trained model
#print("[INFO] predicting with AlexNet...")
#model = load_model(os.path.join(config.OUTPUT_PATH, \
#        "model-alexnet-075-0.2944_without_padding_10283.hdf5"))

print("[INFO] predicting with AlexNet2 (with padding)...")
model = load_model(os.path.join(config.OUTPUT_PATH, \
        "model-alexnet2-075-0.2972_with_padding_9299.hdf5"))