コード例 #1
0
    def configure(self):
        """ Reconfigures the neural network in case any parameter changed.

        It must be used after calling the methods named as `set_*`. It can be
        used after all the setters are run (not need to configure for each one)
        See `set_optimizer`, `set_model`, `set_batch_size`
        """
        if any(not os.path.exists(path)
               for path in (self.train_path, self.validation_path,
                            self.test_path)):
            self.logger.info('ERROR: some dataset directory do not exists!')

        image_width, image_height = \
            self.model.input_shape[1], self.model.input_shape[2]

        # create data generator objects
        data_gen_train = DataGenerator(image_width, image_height,
                                       self.batch_size, self.train_path)

        data_gen_vali = DataGenerator(image_width, image_height,
                                      self.batch_size, self.validation_path)

        data_gen_test = DataGenerator(image_width, image_height,
                                      self.batch_size, self.test_path)

        data_gen_train.configure(DataGeneratorConfig.NORM_AND_TRANSFORM)
        data_gen_vali.configure(DataGeneratorConfig.NORMALISE)
        data_gen_test.configure(DataGeneratorConfig.NORMALISE)

        self.train_generator = \
            data_gen_train.get_single(path=self.train_path)
        self.validation_generator = \
            data_gen_vali.get_single(path=self.validation_path)
        self.test_generator = \
            data_gen_test.get_single(path=self.test_path,
                                     shuffle=False)
コード例 #2
0
def function_to_optimize(bounds):  # type: (ndarray) -> int
    b = bounds.astype(np.int64)
    batch_size, fc1_size, fc2_size = b[:, 0][0], b[:, 1][0], b[:, 2][0]
    logger.info('Bounds in action {}'.format(bounds))

    base_model = get_base_model()
    logger.debug('Trainability of the layers:')
    model = modify(base_model, fc1_size, fc2_size, dropout=False)

    for layer in model.layers:
        logger.debug([layer.name, layer.trainable])

    data_gen = DataGenerator(img_width, img_height, batch_size,
                             SMALL_TRAIN_PATH)
    data_gen.configure(DataGeneratorConfig.NORM_AND_TRANSFORM)

    train_generator, test_generator, validation_generator = data_gen.get(
        train_path=SMALL_TRAIN_PATH,
        test_path=TEST_PATH,
        validate_path=TEST_PATH)

    init = time.time()
    history = model.fit_generator(train_generator,
                                  steps_per_epoch=(int(
                                      400 * 1881 / 1881 // batch_size) + 1),
                                  epochs=number_of_epoch,
                                  validation_data=validation_generator,
                                  validation_steps=807 // batch_size)

    end = time.time()
    logger.info('[Training] Done in ' + str(end - init) + ' secs.\n')

    init = time.time()
    scores = model.evaluate_generator(test_generator, steps=807 // 64)
    end = time.time()
    logger.info('[Evaluation] Done in ' + str(end - init) + ' secs.\n')

    # Get ground truth
    test_labels = test_generator.classes

    # Predict test images
    predictions_raw = model.predict_generator(test_generator)
    predictions = []
    for prediction in predictions_raw:
        predictions.append(np.argmax(prediction))
    # Evaluate results
    evaluator = Evaluator(test_labels, predictions,
                          label_list=list([0, 1, 2, 3, 4, 5, 6, 7]))

    logger.info(
        'Evaluator \n'
        'Acc (model)\n'
        'Accuracy: {} \n'
        'Precision: {} \n'
        'Recall: {} \n'
        'Fscore: {}'.
        format(scores[1], evaluator.accuracy, evaluator.precision,
               evaluator.recall, evaluator.fscore) + '\n')
    cm = evaluator.confusion_matrix()

    # Plot the confusion matrix on test data
    logger.info('Confusion matrix:\n')
    logger.info(cm)
    logger.info('Final accuracy: ' + str(evaluator.accuracy) + '\n')
    end = time.time()
    logger.info('Done in ' + str(end - init) + ' secs.\n')

    # list all data in history
    if plot_history:
        do_plotting(history=history, history2=None, cm=cm)

    logger.info(
        'Param to optimize [Accuracy] is: {}'.format(evaluator.accuracy))
    return evaluator.accuracy
コード例 #3
0
def main():
    base_model = get_base_model()
    logger.debug('Trainability of the layers:')
    model = modify_model_before_block4(base_model, dropout=False)
    # model = modify_model_before_block3(base_model, dropout=False)
    for layer in model.layers:
        logger.debug([layer.name, layer.trainable])

    # Get train, validation and test dataset
    # preprocessing_function=preprocess_input,
    # data_generator = ImageDataGenerator(**DataGeneratorConfig.DEFAULT)
    # data_generator = ImageDataGenerator(**DataGeneratorConfig.CONFIG1)

    data_gen = DataGenerator(img_width, img_height, batch_size,
                             SMALL_TRAIN_PATH)
    data_gen.configure(DataGeneratorConfig.NORMALISE)

    train_generator, test_generator, validation_generator = data_gen.get(
        train_path=SMALL_TRAIN_PATH,
        test_path=TEST_PATH,
        validate_path=TEST_PATH)

    init = time.time()
    history = model.fit_generator(train_generator,
                                  steps_per_epoch=(int(
                                      400 * 1881 / 1881 // batch_size) + 1),
                                  epochs=number_of_epoch,
                                  validation_data=validation_generator,
                                  validation_steps=807 // 64)

    # unlock all layers and train
    model = unlock_layers(model)
    history2 = model.fit_generator(train_generator,
                                   steps_per_epoch=(int(
                                       400 * 1881 / 1881 // batch_size) + 1),
                                   epochs=number_of_epoch,
                                   validation_data=validation_generator,
                                   validation_steps=807 // 64)
    end = time.time()
    logger.info('[Training] Done in ' + str(end - init) + ' secs.\n')

    init = time.time()
    scores = model.evaluate_generator(test_generator, steps=807 // 64)
    end = time.time()
    logger.info('[Evaluation] Done in ' + str(end - init) + ' secs.\n')

    # Get ground truth
    test_labels = test_generator.classes

    # Predict test images
    predictions_raw = model.predict_generator(test_generator)
    predictions = []
    for prediction in predictions_raw:
        predictions.append(np.argmax(prediction))
    # Evaluate results
    evaluator = Evaluator(test_labels, predictions,
                          label_list=list([0, 1, 2, 3, 4, 5, 6, 7]))

    logger.info(
        'Evaluator \n'
        'Acc (model)\n'
        'Accuracy: {} \n'
        'Precision: {} \n'
        'Recall: {} \n'
        'Fscore: {}'.
        format(scores[1], evaluator.accuracy, evaluator.precision,
               evaluator.recall, evaluator.fscore) + '\n')
    cm = evaluator.confusion_matrix()

    # Plot the confusion matrix on test data
    logger.info('Confusion matrix:\n')
    logger.info(cm)
    logger.info('Final accuracy: ' + str(evaluator.accuracy) + '\n')
    end = time.time()
    logger.info('Done in ' + str(end - init) + ' secs.\n')

    # list all data in history
    if plot_history:
        do_plotting(history, history2, cm)