Example #1
0
def main(argv):
    print(tf.version.VERSION)
    image_size = 224
    test_run = 'zC'

    # load the data
    (x_train, y_train), (x_test, y_test) = odir.load_data(image_size, 1)

    class_names = [
        'Normal', 'Diabetes', 'Glaucoma', 'Cataract', 'AMD', 'Hypertension',
        'Myopia', 'Others'
    ]

    # plot data input
    plotter = Plotter(class_names)
    plotter.plot_input_images(x_train, y_train)

    x_test_drawing = x_test

    # normalize input based on model
    normalizer = Normalizer()
    x_test = normalizer.normalize_vgg16(x_test)

    # load one of the test runs
    model = tf.keras.models.load_model(
        r'C:\Users\thund\Source\Repos\TFM-ODIR\models\image_classification\modelvgg100.h5'
    )
    model.summary()

    # display the content of the model
    baseline_results = model.evaluate(x_test, y_test, verbose=2)
    for name, value in zip(model.metrics_names, baseline_results):
        print(name, ': ', value)
    print()

    # test a prediction
    test_predictions_baseline = model.predict(x_test)
    plotter.plot_confusion_matrix_generic(y_test, test_predictions_baseline,
                                          test_run, 0)

    # save the predictions
    prediction_writer = Prediction(test_predictions_baseline, 400)
    prediction_writer.save()
    prediction_writer.save_all(y_test)

    # show the final score
    score = FinalScore()
    score.output()

    # plot output results
    plotter.plot_output(test_predictions_baseline, y_test, x_test_drawing)
Example #2
0
plt.plot(history.history['accuracy'], label='accuracy')
plt.plot(history.history['val_accuracy'], label='val_accuracy')
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.legend(loc='lower right')
plt.savefig(os.path.join(newfolder, 'plot2.png'))
plt.show()

# display the content of the model
baseline_results = model.evaluate(x_test, y_test, verbose=2)
for name, value in zip(model.metrics_names, baseline_results):
    print(name, ': ', value)
print()

# test a prediction
test_predictions_baseline = model.predict(x_test)
plotter.plot_confusion_matrix_generic(y_test, test_predictions_baseline,
                                      os.path.join(newfolder, 'plot3.png'), 0)

# save the predictions
prediction_writer = Prediction(test_predictions_baseline, 400, newfolder)
prediction_writer.save()
prediction_writer.save_all(y_test)

# show the final score
score = FinalScore(newfolder)
score.output()

# plot output results
plotter.plot_output(test_predictions_baseline, y_test, x_test_drawing,
                    os.path.join(newfolder, 'plot4.png'))
def main(argv):
    print(tf.version.VERSION)
    image_size = 128

    (x_train, y_train), (x_test, y_test) = odir.load_data(image_size)

    x_train, x_test = x_train / 255.0, x_test / 255.0
    x_train = (x_train - x_train.mean()) / x_train.std()
    x_test = (x_test - x_test.mean()) / x_test.std()

    defined_metrics = [
        tf.keras.metrics.BinaryAccuracy(name='accuracy'),
        tf.keras.metrics.Precision(name='precision'),
        tf.keras.metrics.Recall(name='recall'),
        tf.keras.metrics.AUC(name='auc'),
    ]

    factory = Factory((image_size, image_size, 3), defined_metrics)

    model = factory.compile(ModelTypes.inception_v1)
    class_names = [
        'Normal', 'Diabetes', 'Glaucoma', 'Cataract', 'AMD', 'Hypertension',
        'Myopia', 'Others'
    ]

    # plot data input
    plotter = Plotter(class_names)
    print("Training")

    class_weight = {
        0: 1.,
        1: 1.583802025,
        2: 8.996805112,
        3: 10.24,
        4: 10.05714286,
        5: 14.66666667,
        6: 10.7480916,
        7: 2.505338078
    }

    callback = tf.keras.callbacks.EarlyStopping(monitor='val_loss',
                                                patience=3,
                                                mode='min',
                                                verbose=1)

    history = model.fit_generator(generator=generator(x_train, y_train),
                                  steps_per_epoch=len(x_train),
                                  epochs=30,
                                  verbose=1,
                                  callbacks=[callback],
                                  validation_data=(x_test, y_test),
                                  shuffle=True)

    print("plotting")
    plotter.plot_metrics(history, 'inception_1', 2)
    print("saving")
    model.save('model_inception_30.h5')

    # Hide meanwhile for now
    plt.plot(history.history['accuracy'], label='accuracy')
    plt.plot(history.history['val_accuracy'], label='val_accuracy')
    plt.xlabel('Epoch')
    plt.ylabel('Accuracy')
    plt.legend(loc='lower right')
    plt.savefig('image_run2' + 'inception_1' + '.png')
    plt.show()

    test_loss, test_acc = model.evaluate(x_test, y_test, verbose=2)
    print(test_acc)

    test_predictions_baseline = model.predict(x_test)
    plotter.plot_confusion_matrix_generic(y_test, test_predictions_baseline,
                                          'inception_1', 0)

    # save the predictions
    prediction_writer = Prediction(test_predictions_baseline, 400)
    prediction_writer.save()
    prediction_writer.save_all(y_test)

    # show the final score
    score = FinalScore()
    score.output()