Exemplo n.º 1
0
    def trainTopModel(self, trainLabels, validLabels, fold):
        from keras import optimizers
        print('[INFO] Training top model type ' + (str(self.model.topType)))
        trainData, validationData = self.logger.loadBottlenecks(fold)
        trainLabels = utils.transformLabelsToCategorical(
            trainLabels, self.numClasses)
        validationLabels = utils.transformLabelsToCategorical(
            validLabels, self.numClasses)

        self.model.topModel.compile(optimizer=optimizers.RMSprop(lr=1e-6),
                                    loss='categorical_crossentropy',
                                    metrics=['accuracy'])

        import time
        t = time.process_time()
        historyGenerated = self.model.topModel.fit(
            trainData,
            trainLabels,
            epochs=self.epochs,
            batch_size=self.batchSize,
            validation_data=(validationData, validationLabels))

        trainingTime = time.process_time() - t

        self.logger.saveWeightsTopModel(self.model, fold)

        return trainingTime, historyGenerated
Exemplo n.º 2
0
    def trainTopModel(self, trainData, trainLabels, validData, validLabels,
                      fold):
        from keras import optimizers
        import math
        print('[INFO] Training top model type ' + (str(self.model.topType)))

        self.model.topModel.compile(
            optimizer='rmsprop',
            loss='categorical_crossentropy',
            metrics=['accuracy'],
        )

        trainSamples = len(trainLabels)
        validationSamples = len(validLabels)
        predictSizeTrain = int(math.ceil(trainSamples / self.batchSize))
        predictSizeValidation = int(
            math.ceil(validationSamples / self.batchSize))

        trainLabels = utils.transformLabelsToCategorical(
            trainLabels, self.numClasses)
        validLabels = utils.transformLabelsToCategorical(
            validLabels, self.numClasses)

        # Data augmentation
        trainDatagen = ImageDataGenerator(rescale=1. / 255,
                                          shear_range=0.2,
                                          zoom_range=0.2,
                                          horizontal_flip=True)
        generatorTrain = trainDatagen.flow(trainData,
                                           trainLabels,
                                           shuffle=False,
                                           batch_size=self.batchSize)

        valDatagen = ImageDataGenerator(rescale=1. / 255)
        generatorVal = valDatagen.flow(validData,
                                       validLabels,
                                       shuffle=False,
                                       batch_size=self.batchSize)

        import time
        t = time.process_time()
        historyGenerated = self.model.topModel.fit_generator(
            generatorTrain,
            steps_per_epoch=predictSizeTrain,
            epochs=self.epochs,
            validation_data=generatorVal,
            validation_steps=predictSizeValidation)

        trainingTime = time.process_time() - t

        self.logger.saveWeightsTopModel(self.model, fold)

        return trainingTime, historyGenerated
Exemplo n.º 3
0
    def testFineTuneModel(self, validationData, validationLabels, trainingTime,
                          historyGenerated, fold, numLayersFreeze):
        print('[INFO] testing fine tune model')
        fullModel = self.addTopModel()
        validationSamples = len(validationLabels)

        validationLabels = utils.transformLabelsToCategorical(
            validationLabels, self.numClasses)

        testDatagen = ImageDataGenerator(rescale=1. / 255)
        generator = testDatagen.flow(validationData,
                                     validationLabels,
                                     shuffle=False,
                                     batch_size=self.batchSize)

        from keras import optimizers
        fullModel.compile(optimizer=optimizers.RMSprop(lr=1e-6),
                          loss='categorical_crossentropy',
                          metrics=['accuracy'])
        testLoss, testAccuracy = fullModel.evaluate_generator(
            generator, steps=validationSamples / self.batchSize)

        self.logger.saveDataFineTuneModel(historyGenerated,
                                          (testLoss, testAccuracy),
                                          trainingTime, fold, numLayersFreeze,
                                          self.model.topType)

        import gc
        del fullModel
        gc.collect()
Exemplo n.º 4
0
    def testFineTuneModel(self, validationData, validationLabels, trainingTime,
                          historyGenerated, fold, numLayersFreeze):
        print('[INFO] testing fine tune model')
        validationSamples = len(validationLabels)

        validationLabels = utils.transformLabelsToCategorical(
            validationLabels, self.numClasses)

        testDatagen = ImageDataGenerator(rescale=1. / 255)
        generator = testDatagen.flow(validationData,
                                     validationLabels,
                                     shuffle=False,
                                     batch_size=self.batchSize)

        testLoss, testAccuracy = self.model.topModel.evaluate_generator(
            generator, steps=validationSamples / self.batchSize)

        self.logger.saveDataFineTuneModel(historyGenerated,
                                          (testLoss, testAccuracy),
                                          trainingTime, fold, numLayersFreeze,
                                          self.model.topType)

        import gc
        del self.model.topModel
        gc.collect()
Exemplo n.º 5
0
    def testTopModel(self, validationData, validationLabels, trainingTime,
                     historyGenerated, fold):
        print('[INFO] testing top model type ' + (str(self.model.topType)))

        validationSamples = len(validationLabels)

        validationLabels = utils.transformLabelsToCategorical(
            validationLabels, self.numClasses)

        testDatagen = ImageDataGenerator(rescale=1. / 255)
        generator = testDatagen.flow(validationData,
                                     validationLabels,
                                     shuffle=False,
                                     batch_size=self.batchSize)

        testLoss, testAccuracy = self.model.topModel.evaluate_generator(
            generator, steps=validationSamples / self.batchSize)

        self.logger.saveDataTopModel(historyGenerated,
                                     (testLoss, testAccuracy), trainingTime,
                                     fold, self.model.topType)
Exemplo n.º 6
0
    def fineTune(self, trainData, trainLabels, validData, validLabels,
                 numLayersFreeze, bestTopType, pathBestTop, fold):
        import math

        self.model.topModel = self.logger.loadWeights(self.model.topModel,
                                                      pathBestTop)
        self.model.topType = modelType.modelType[bestTopType]

        fullModel = self.addTopModel()

        # Freeze layers
        for layer in fullModel.layers[:numLayersFreeze]:
            layer.trainable = False

        import tensorflow as tf
        run_opts = tf.RunOptions(report_tensor_allocations_upon_oom=True)

        from keras import optimizers
        fullModel.compile(loss='categorical_crossentropy',
                          optimizer=optimizers.SGD(lr=1e-4, momentum=0.9),
                          metrics=['accuracy'],
                          options=run_opts)

        trainSamples = len(trainLabels)
        validationSamples = len(validLabels)
        predictSizeTrain = int(math.ceil(trainSamples / self.batchSize))
        predictSizeValidation = int(
            math.ceil(validationSamples / self.batchSize))

        trainLabels = utils.transformLabelsToCategorical(
            trainLabels, self.numClasses)
        validLabels = utils.transformLabelsToCategorical(
            validLabels, self.numClasses)

        print('[INFO] Fine tuning with ' + str(numLayersFreeze) +
              ' layers freeze.')

        # Data augmentation
        trainDatagen = ImageDataGenerator(rescale=1. / 255,
                                          shear_range=0.2,
                                          zoom_range=0.2,
                                          horizontal_flip=True)
        generatorTrain = trainDatagen.flow(trainData,
                                           trainLabels,
                                           shuffle=False,
                                           batch_size=self.batchSize)

        valDatagen = ImageDataGenerator(rescale=1. / 255)
        generatorVal = valDatagen.flow(validData,
                                       validLabels,
                                       shuffle=False,
                                       batch_size=self.batchSize)

        import time
        t = time.process_time()
        historyGenerated = fullModel.fit_generator(
            generatorTrain,
            steps_per_epoch=predictSizeTrain,
            epochs=10,
            validation_data=generatorVal,
            validation_steps=predictSizeValidation)

        trainingTime = time.process_time() - t

        self.logger.saveWeightsFineTune(fullModel, fold, numLayersFreeze)

        import gc
        del fullModel
        gc.collect()

        return trainingTime, historyGenerated