示例#1
0
    def evaluate_performance(self, visual_words_test, test_labels, do_plotting,
                             train_data):
        # Test the classification accuracy
        print('Testing the SVM classifier...')
        init = time.time()
        test_data = self.stdSlr.transform(visual_words_test)
        accuracy = 100 * self.clf.score(test_data, test_labels)

        predictions = self.clf.predict(test_data)
        evaluator = Evaluator(test_labels, predictions)
        print(
            'Evaluator \nAccuracy: {} \nPrecision: {} \nRecall: {} \nFscore: {}'
            .format(evaluator.accuracy, evaluator.precision, evaluator.recall,
                    evaluator.fscore))

        cm = evaluator.confusion_matrix()

        # Plot the confusion matrix on test data
        print('Confusion matrix:')
        print(cm)
        if do_plotting:
            plt.matshow(cm)
            plt.title('Confusion matrix')
            plt.colorbar()
            plt.ylabel('True label')
            plt.xlabel('Predicted label')
            plt.show()

        end = time.time()
        print('Done in ' + str(end - init) + ' secs.')
        print('Final accuracy: ' + str(accuracy))
示例#2
0
    def evaluate_performance_SVM(self, features, test_labels, do_plotting):
        # Test the classification accuracy
        colorprint(Color.BLUE, 'Testing the SVM classifier...\n')
        init = time.time()
        test_data = self.stdSlr.transform(features)
        accuracy = 100 * self.clf.score(test_data, test_labels)

        predictions = self.clf.predict(test_data)
        evaluator = Evaluator(test_labels,
                              predictions,
                              label_list=list([0, 1, 2, 3, 4, 5, 6, 7]))

        colorprint(
            Color.BLUE,
            'Evaluator \nAccuracy: {} \nPrecision: {} \nRecall: {} \nFscore: {}'
            .format(evaluator.accuracy, evaluator.precision, evaluator.recall,
                    evaluator.fscore) + '\n')
        cm = evaluator.confusion_matrix()

        # Plot the confusion matrix on test data
        colorprint(Color.BLUE, 'Confusion matrix:\n')
        colorprint(Color.BLUE, cm)
        print(cm)
        if do_plotting:
            plt.matshow(cm)
            plt.title('Confusion matrix')
            plt.colorbar()
            plt.ylabel('True label')
            plt.xlabel('Predicted label')
            plt.show()
            plt.savefig('cm.jpg')

        end = time.time()
        colorprint(Color.BLUE, 'Done in ' + str(end - init) + ' secs.\n')
        colorprint(Color.BLUE, 'Final accuracy: ' + str(accuracy) + '\n')
def main(feature_extractor, classifier, n_threads=1):
    do_plotting = False

    # Read the train and test files
    database = Database(DATA_PATH)
    train_images, test_images, train_labels, test_labels = database.get_data()

    # Load or compute descriptors for training
    descriptors, labels = database.load_in_memory('train', feature_extractor,
                                                  train_images, train_labels)
    # Train a classifier with train dataset
    print('Trainning model...')
    classifier.train(descriptors, labels)

    # BUG: the test descriptors cannot be saved the same way as the train ones
    # cause these have to be checked by-image not by-blob. A try is using the
    # function predict_images_pool_2 but is not finished and needs a change in
    # the Database implenentation. Idea: always saving one descriptor file per
    # image and then group them in the case of training and keep them
    # separated for prediction

    # Load or compute descriptors for testing
    # descriptors, labels = load_in_memory(database, 'test',
    #                                      test_images, test_labels)

    # FIXME: do something with descriptors and labels
    # Assess classifier with test dataset
    print('Testing classifier...')
    if n_threads == 1:
        predicted_class = predict_images(test_images, test_labels)
    else:
        print('Predicting test images')
        predicted_class = predict_images_pool(test_images, n_threads)
        # predicted_class = predict_images_pool_2(descriptors.tolist(), n_threads)

    # Evaluate performance metrics
    evaluator = Evaluator(test_labels, predicted_class)

    print('Evaluator \nAccuracy: {} \nPrecision: {} \nRecall: {} \nFscore: {}'.
          format(evaluator.accuracy, evaluator.precision, evaluator.recall,
                 evaluator.fscore))

    cm = evaluator.confusion_matrix()

    # Plot the confusion matrix on test data
    print('Confusion matrix:')
    print(cm)
    if do_plotting:
        plt.matshow(cm)
        plt.title('Confusion matrix')
        plt.colorbar()
        plt.ylabel('True label')
        plt.xlabel('Predicted label')
        plt.show()
示例#4
0
    def plot_results(self):
        # plot classification results

        colorprint(Color.BLUE, 'Getting classification results...\n')
        init = time.time()

        # this is the dataset configuration we will use for testing:
        # only rescaling
        test_datagen = ImageDataGenerator(rescale=1. / 255)
        # this is a generator that will read pictures found in
        # subfolers of 'data/test', and indefinitely generate
        # batches of augmented image data

        test_generator = test_datagen.flow_from_directory(
            self.DATASET_DIR + '/test',
            target_size=(self.IMG_SIZE, self.IMG_SIZE),
            batch_size=self.BATCH_SIZE,
            classes=[
                'coast', 'forest', 'highway', 'inside_city', 'mountain',
                'Opencountry', 'street', 'tallbuilding'
            ],
            class_mode='categorical',
            shuffle=False)
        # Get ground truth
        test_labels = test_generator.classes

        # Predict test images
        predictions_raw = self.model.predict_generator(test_generator)
        predictions = []
        for prediction in predictions_raw:
            predictions.append(np.argmax(prediction))
        # Evaluate results
        evaluator = Evaluator(test_labels,
                              predictions,
                              label_list=list([0, 1, 2, 3, 4, 5, 6, 7]))

        #
        scores = self.model.evaluate_generator(test_generator)
        colorprint(
            Color.BLUE,
            'Evaluator \nAcc (model)\nAccuracy: {} \nPrecision: {} \nRecall: {} \nFscore: {}'
            .format(scores[1], evaluator.accuracy, evaluator.precision,
                    evaluator.recall, evaluator.fscore) + '\n')
        cm = evaluator.confusion_matrix()

        # Plot the confusion matrix on test data
        colorprint(Color.BLUE, 'Confusion matrix:\n')
        colorprint(Color.BLUE, cm)
        print(cm)

        plt.matshow(cm)
        plt.title('Confusion matrix')
        plt.colorbar()
        plt.ylabel('True label')
        plt.xlabel('Predicted label')
        plt.show()
        plt.savefig('cm.jpg')
        colorprint(Color.BLUE,
                   'Final accuracy: ' + str(evaluator.accuracy) + '\n')

        end = time.time()
        colorprint(Color.BLUE, 'Done in ' + str(end - init) + ' secs.\n')
示例#5
0
def function_to_optimize(bounds):  # type: (ndarray) -> int
    b = bounds.astype(np.int64)
    batch_size, fc1_size, fc2_size = b[:, 0][0], b[:, 1][0], b[:, 2][0]
    logger.info('Bounds in action {}'.format(bounds))

    base_model = get_base_model()
    logger.debug('Trainability of the layers:')
    model = modify(base_model, fc1_size, fc2_size, dropout=False)

    for layer in model.layers:
        logger.debug([layer.name, layer.trainable])

    data_gen = DataGenerator(img_width, img_height, batch_size,
                             SMALL_TRAIN_PATH)
    data_gen.configure(DataGeneratorConfig.NORM_AND_TRANSFORM)

    train_generator, test_generator, validation_generator = data_gen.get(
        train_path=SMALL_TRAIN_PATH,
        test_path=TEST_PATH,
        validate_path=TEST_PATH)

    init = time.time()
    history = model.fit_generator(train_generator,
                                  steps_per_epoch=(int(
                                      400 * 1881 / 1881 // batch_size) + 1),
                                  epochs=number_of_epoch,
                                  validation_data=validation_generator,
                                  validation_steps=807 // batch_size)

    end = time.time()
    logger.info('[Training] Done in ' + str(end - init) + ' secs.\n')

    init = time.time()
    scores = model.evaluate_generator(test_generator, steps=807 // 64)
    end = time.time()
    logger.info('[Evaluation] Done in ' + str(end - init) + ' secs.\n')

    # Get ground truth
    test_labels = test_generator.classes

    # Predict test images
    predictions_raw = model.predict_generator(test_generator)
    predictions = []
    for prediction in predictions_raw:
        predictions.append(np.argmax(prediction))
    # Evaluate results
    evaluator = Evaluator(test_labels, predictions,
                          label_list=list([0, 1, 2, 3, 4, 5, 6, 7]))

    logger.info(
        'Evaluator \n'
        'Acc (model)\n'
        'Accuracy: {} \n'
        'Precision: {} \n'
        'Recall: {} \n'
        'Fscore: {}'.
        format(scores[1], evaluator.accuracy, evaluator.precision,
               evaluator.recall, evaluator.fscore) + '\n')
    cm = evaluator.confusion_matrix()

    # Plot the confusion matrix on test data
    logger.info('Confusion matrix:\n')
    logger.info(cm)
    logger.info('Final accuracy: ' + str(evaluator.accuracy) + '\n')
    end = time.time()
    logger.info('Done in ' + str(end - init) + ' secs.\n')

    # list all data in history
    if plot_history:
        do_plotting(history=history, history2=None, cm=cm)

    logger.info(
        'Param to optimize [Accuracy] is: {}'.format(evaluator.accuracy))
    return evaluator.accuracy
示例#6
0
def main():
    base_model = get_base_model()
    logger.debug('Trainability of the layers:')
    model = modify_model_before_block4(base_model, dropout=False)
    # model = modify_model_before_block3(base_model, dropout=False)
    for layer in model.layers:
        logger.debug([layer.name, layer.trainable])

    # Get train, validation and test dataset
    # preprocessing_function=preprocess_input,
    # data_generator = ImageDataGenerator(**DataGeneratorConfig.DEFAULT)
    # data_generator = ImageDataGenerator(**DataGeneratorConfig.CONFIG1)

    data_gen = DataGenerator(img_width, img_height, batch_size,
                             SMALL_TRAIN_PATH)
    data_gen.configure(DataGeneratorConfig.NORMALISE)

    train_generator, test_generator, validation_generator = data_gen.get(
        train_path=SMALL_TRAIN_PATH,
        test_path=TEST_PATH,
        validate_path=TEST_PATH)

    init = time.time()
    history = model.fit_generator(train_generator,
                                  steps_per_epoch=(int(
                                      400 * 1881 / 1881 // batch_size) + 1),
                                  epochs=number_of_epoch,
                                  validation_data=validation_generator,
                                  validation_steps=807 // 64)

    # unlock all layers and train
    model = unlock_layers(model)
    history2 = model.fit_generator(train_generator,
                                   steps_per_epoch=(int(
                                       400 * 1881 / 1881 // batch_size) + 1),
                                   epochs=number_of_epoch,
                                   validation_data=validation_generator,
                                   validation_steps=807 // 64)
    end = time.time()
    logger.info('[Training] Done in ' + str(end - init) + ' secs.\n')

    init = time.time()
    scores = model.evaluate_generator(test_generator, steps=807 // 64)
    end = time.time()
    logger.info('[Evaluation] Done in ' + str(end - init) + ' secs.\n')

    # Get ground truth
    test_labels = test_generator.classes

    # Predict test images
    predictions_raw = model.predict_generator(test_generator)
    predictions = []
    for prediction in predictions_raw:
        predictions.append(np.argmax(prediction))
    # Evaluate results
    evaluator = Evaluator(test_labels, predictions,
                          label_list=list([0, 1, 2, 3, 4, 5, 6, 7]))

    logger.info(
        'Evaluator \n'
        'Acc (model)\n'
        'Accuracy: {} \n'
        'Precision: {} \n'
        'Recall: {} \n'
        'Fscore: {}'.
        format(scores[1], evaluator.accuracy, evaluator.precision,
               evaluator.recall, evaluator.fscore) + '\n')
    cm = evaluator.confusion_matrix()

    # Plot the confusion matrix on test data
    logger.info('Confusion matrix:\n')
    logger.info(cm)
    logger.info('Final accuracy: ' + str(evaluator.accuracy) + '\n')
    end = time.time()
    logger.info('Done in ' + str(end - init) + ' secs.\n')

    # list all data in history
    if plot_history:
        do_plotting(history, history2, cm)