示例#1
0
testGen = HDF5DatasetGenerator(config.TEST_HDF5,
                               config.BATH_SIZE,
                               preprocessors=[mp],
                               classes=3)
predictions = []

widgets = [
    "Evaluating: ",
    progressbar.Percentage(), " ",
    progressbar.Bar(), " ",
    progressbar.ETA()
]
pbar = progressbar.ProgressBar(maxval=testGen.numImages // config.BATH_SIZE,
                               widgets=widgets).start()

for (i, (images, labels)) in enumerate(testGen.generator(passes=1)):

    for image in images:

        crops = cp.preprocess(image)
        crops = np.array([iap.preprocess(c) for c in crops], dtype="float32")

        pred = model.predict(crops)
        predictions.append(pred.mean(axis=0))

    pbar.update(i)

pbar.finish()
(rank1, _) = rank5_accuracy(predictions, testGen.db["labels"])
print("[INFO] rank-1: {:.2f}%".format(rank1 * 100))
testGen.close()
import argparse
import os

ap = argparse.ArgumentParser()
ap.add_argument("-d", "--dataset", required=True,
help="Path to the dataset")
args = vars(ap.parse_args())

print("[INFO] loading images...")
imagePaths = list(paths.list_images(args["dataset"]))
classNames = [pt.split(os.path.sep) for pt in imagePaths]
classNames = [str(x) for x in np.unique(classNames)]

aap = AspectAwarePreprocessor(64, 64)
iap = ImageToArrayPreprocessor()
print(iap.preprocess(imagePaths))
sdl = SimpleDatasetLoader(preprocessors=[aap, iap])
(data, labels) = sdl.load(imagePaths, verbose=500)
data = data.astype("float") / 255.0

(trainX, testX, trainY, test Y) = train_test_split(data, labels, 
test_size=0.25, random_state=42)

trainY = LabelBinarizer().fit_transform(trainY)
testY = LabelBinarizer().fit_transform(testY)

print("[INFO] compling model...")
opt = SGD(lr=0.05)
model = MiniVGGNet.build(width=64, height=64, depth=3,
classes=len(classNames))
model.compile(loss="categorical_crossentropy", optimizer=opt,
predictions = model.predict_generator(test_generator.generator(), steps=test_generator.num_images // 64,
                                      max_queue_size=10)

rank1, _ = rank5_accuracy(predictions, test_generator.db['labels'])
print(f'[INFO] rank-1: {rank1 * 100: .2f}%')
test_generator.close()

# Let's now test with oversampling
test_generator = HDF5DatasetGenerator(config.TEST_HDF5, 64, preprocessors=[mean_preprocessor], classes=2)
predictions = []

widgets = ['Evaluating: ', progressbar.Percentage(), ' ', progressbar.Bar(), ' ', progressbar.ETA()]
progress_bar = progressbar.ProgressBar(maxval=test_generator.num_images // 64, widgets=widgets).start()

for i, (images, labels) in enumerate(test_generator.generator(passes=1)):
    for image in images:
        crops = crop_preprocessor.preprocess(image)
        crops = np.array([image_to_array_preprocessor.preprocess(c) for c in crops], dtype='float32')

        # We need to average the predictions on the crops to get a final prediction.
        prediction = model.predict(crops)
        predictions.append(prediction.mean(axis=0))

    progress_bar.update(i)

progress_bar.finish()
print('[INFO] Predicting on test data (with crops)...')
rank1, _ = rank5_accuracy(predictions, test_generator.db['labels'])
print(f'[INFO] Rank-1: {rank1 * 100:.2f}%')
test_generator.close()