예제 #1
0
def test_predictions():
    # prepare test data
    image_data_generator = ImageDataGenerator(rescale=(1.0 / 255))

    # load test dog images
    test_dog_image_list = glob.glob(DEFAULT_TEST_DATA_DIR + "/dogs/dog*.jpg")
    test_dog_images = []
    for dog_file in test_dog_image_list:
        dog_image = image.load_img(dog_file,
                                   target_size=(img_height, img_width))
        rescaled_dog_image = image_data_generator.standardize(
            image.img_to_array(dog_image))

        test_dog_images.append(rescaled_dog_image)

    # load test cat images
    test_cat_image_list = glob.glob(DEFAULT_TEST_DATA_DIR + "/cats/cat*.jpg")
    test_cat_images = []
    for cat_file in test_cat_image_list:
        cat_image = image.load_img(cat_file,
                                   target_size=(img_height, img_width))
        rescaled_cat_image = image_data_generator.standardize(
            image.img_to_array(cat_image))

        test_cat_images.append(rescaled_cat_image)

    # load trained cnn model
    cnn_model = load_cnn_model(DEFAULT_MODEL_FILE)

    print("\nTest Trained CNN Model with Dog Images: =======================")
    for dog_image in test_dog_images:
        prediction = cnn_model.predict(np.array([dog_image]), batch_size=1)
        print(" Predicted Probability of Dog Image is {}".format(
            prediction[0]))

    print("\nTest Trained CNN Model with Cat Images: =======================")
    for cat_image in test_cat_images:
        prediction = cnn_model.predict(np.array([cat_image]), batch_size=1)
        print(" Predicted Probability of Dog Image is {}".format(
            prediction[0]))
예제 #2
0
import numpy as np
import os

from time import process_time
from time import process_time_ns

#test batch predikce
start = process_time()

img_dir = "K:/data/manual test"
batch = np.zeros((len(os.listdir(img_dir)), 256, 256, 3))
for i, img in enumerate(os.listdir(img_dir)):
    loadedimg = image.load_img(os.path.join(img_dir, img),
                               target_size=(256, 256))
    batch[i, ] = loadedimg
batch = datagen.standardize(batch)
predictions = model.predict_proba(batch, batch_size=len(
    os.listdir(img_dir)))  #.tolist()[0]
finalPreds = []
for prediction in predictions:
    print(prediction)
    dictionary = {}
    for label in class_names:
        class_name = class_names.get(label)
        dictionary.update({class_name: prediction[label]})
    finalPreds.append(dictionary)
assignDict = {}
filelist = os.listdir(img_dir)
for i, pred in enumerate(finalPreds):
    print(i)
    assignDict.update({filelist[i]: pred})
예제 #3
0
y_train, y_test_low, y_test_high, y_test_random, X_train, X_test_low, X_test_high, X_test_random = \
    train_test_many_split(y, X, groups=data['ID'], side_test_size=0.05, random_test_size=0.1)

# Get and train data generator
# TODO: solid angle rotations
datagen = ImageDataGenerator(
    featurewise_center=True,
    featurewise_std_normalization=True,
    # rotation_range=30,
    horizontal_flip=True,
    vertical_flip=True,
)
datagen.fit(X_train)
# Standardize validation datasets
X_test_random = datagen.standardize(X_test_random)
X_test_high = datagen.standardize(X_test_high)
X_test_low = datagen.standardize(X_test_low)

# Train model
# TODO: send some images to tensorboard
callbacks = [
    CustomTensorBoard(log_dir=get_tensorboard_dir(args, timestamp,
                                                  learning_rate, batch_size),
                      validation_data={
                          'low': (X_test_low, y_test_low),
                          'high': (X_test_high, y_test_high)
                      }),
    EarlyStopping(monitor='val_loss',
                  patience=patience,
                  restore_best_weights=True),