import json
import tensorflow as tf

# solve failed to get convolution algorithm
cfg = tf.compat.v1.ConfigProto()
cfg.gpu_options.allow_growth = True
session = tf.compat.v1.InteractiveSession(config=cfg)

# load the RGB means for the training set
means = json.loads(open(config.DATASET_MEAN).read())

# initialize the image preprocessors
sp = SimplePreprocessor(227, 227)
mp = MeanPreprocessor(means["R"], means["G"], means["B"])
cp = CropPreprocessor(227, 227)
iap = ImageToArrayPreprocessor()

# load the pretrained network
print("[INFO] loading model...")
model = load_model(config.MODEL_PATH)

# initialize the testing dataset generator, then make predictions on
# the testing data
print("[INFO] predicting on test data (no crops)...")
testGen = HDF5DatasetGenerator(config.TEST_HDF5,
                               64,
                               preprocessors=[sp, mp, iap],
                               classes=2)
predictions = model.predict_generator(testGen.generator(),
                                      steps=testGen.numImages // 64,
                                      max_queue_size=64 * 2)
import matplotlib.pyplot as plt
import numpy as np
import argparse

# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-d", "--dataset", required=True, help="path to input dataset")
args = vars(ap.parse_args())

# grab the list of images that we'll be describing
print("[INFO] loading images...")
imagePaths = list(paths.list_images(args["dataset"]))

# initialize the image preprocessors
sp = SimplePreprocessor(32, 32)
iap = ImageToArrayPreprocessor()

# load the dataset from disk then scale the raw pixel intensities
# to the range [0, 1]
sdl = SimpleDatasetLoader(preprocessors=[sp, iap])
(data, labels) = sdl.load(imagePaths, verbose=500)
data = data.astype("float") / 255.0

# partition the data into training and testing splits using 75% of
# the data for training and the remaining 25% for testing
(trainX, testX, trainY, testY) = train_test_split(data,
                                                  labels,
                                                  test_size=0.25,
                                                  random_state=42)

# convert the labels from integers to vectors
Ejemplo n.º 3
0
from keras.models import load_model

sys.append("../")
from pyimagesearch.utils.ranked import rank5_accuracy
from dog_vs_cat.configs import dog_vs_cat_configs as configs
from pyimagesearch.io.hdf5datasetgenerator import HDF5DatasetGenerator
from pyimagesearch.preprocessing.croppreprocessor import CropPreprocessor
from pyimagesearch.preprocessing.meanpreprocessor import MeanPreprocessor
from pyimagesearch.preprocessing.simplepreprocessor import SimplePreprocessor
from pyimagesearch.preprocessing.imagetoarraypreprocessor import ImageToArrayPreprocessor

# load the RGB mean values
means = json.loads(open(configs.DATASET_MEAN).read())

# initialize the preprocessors
ip = ImageToArrayPreprocessor()
cp = CropPreprocessor(width=227, height=227)
sp = SimplePreprocessor(width=227, height=227)
mp = MeanPreprocessor(rMean=means["R"], gMean=means["G"], bMean=means["B"])

# initialize testing datasets generation
test_gen = HDF5DatasetGenerator(configs.TEST_HDF5,
                                feature_ref_name="data",
                                batch_size=configs.BATCH_SIZE,
                                preprocessors=[sp, mp, ip])

# load model
model = load_model(configs.MODEL_PATH)

# predict test generator
predictions = model.predict(test_gen.generate(passes=1),