Exemplo n.º 1
0
from deepergooglenet.config import tiny_imagenet_config as config
from pyImageSearch.utils.ranked import rank5_accuracy
from pyImageSearch.preprocessing.imagetoarrayprocessor import ImageToArrayProcessor
from pyImageSearch.preprocessing.simpleProcessor import SimplePreprocessor
from pyImageSearch.dataset.simpleDatasetLoader import SimpleDatasetLoader
from pyImageSearch.io.hdf5datasetgenerator import HDF5DataGenerator
from keras.models import load_model
import json

means = json.loads(open(config.DATASET_MEANS).read())

sp = SimplePreprocessor(64, 64)
mp = MeanPreprocessor(means["R"], means["G"], means["B"])
iap = ImageToArrayProcessor()

testGen = HDF5DataGenerator(config.TEST_HDF5,64,aug=aug,preprocessors=[sp,mp,iap],\
                                classes=config.NUM_CLASSES)

print("[INFO] loading model")
model = load_model(config.MODEL_PATH)

print("[INFO] prediction on test Data ")
predictions = model.predict_generator(testGen.generator(),
                                      steps=testGen.numImages // 64,
                                      max_queue_size=64 * 2)

(rank1, rank5) = rank5_accuracy(predictions, testGen.db["labels"])
print("[INFO] rank-1 :{:.2f}".format(rank1 * 100))
print("[INFO] rank-5 :{:.2f}".format(rank5 * 100))

testGen.close()
Exemplo n.º 2
0

ap = argparse.ArgumentParser()
ap.add_argument("-d","--dataset",required=True)
ap.add_argument("-m","--model",required=True)
args = vars(ap.parse_args())

classLabels = ["cat","dog","panda"]

print("going to load images")
imagePaths = np.array(list(paths.list_images(args["dataset"])))
idx = np.random.randint(0,len(imagePaths),size=(10,))
imagesPaths = imagePaths[idx]


sp = SimplePreprocessor(32,32)
iap = ImageToArrayProcessor()

sdl = SimpleDatasetLoader(preprocessor=[sp,iap])
(data,label) =sdl.load(image_path,verbose=500)

data = data.astype("float32")/255.0



print("loading models ...")
model = load_model(args["model"])

print("predicting ")
preds=model.predict(data,batch_size=32).argmax(axis=1)
from pyImageSearch.preprocessing.patchpreprocessor import PatchPreporcessor
from pyImageSearch.preprocessing.simpleProcessor import SimplePreprocessor
from pyImageSearch.preprocessing.croppreprocess import CropPreprocessor
from pyImageSearch.io.hdf5datasetgenerator import HDF5DataGenerator
from pyImageSearch.nn.conv.alexnet import AlexNet
from dog_vs_cat.config import dog_vs_cat_config as config
from pyImageSearch.utils.ranked import rank5_accuracy
from keras.models import load_model
import numpy as np
import progressbar
import json

means = json.loads(open(config.DATASET_MEAN).read())

#initailize the preprocessors:
sp = SimplePreprocessor(227, 227)
mp = MeanPreprocessor(means["R"], means["G"], means["B"])
cp = CropPreprocessor(227, 227)
iap = ImageToArrayProcessor()

#load the pretrained model
print("[INFO] loading thr pretrained model")
model = load_model(config.MODEL_PATH)

#predicting on test datawitout cropping :
testGen = HDF5DataGenerator(config.TEST_HDF5,
                            64,
                            preprocessors=[sp, mp, iap],
                            classes=2)
predictions = model.predict_generator(testGen.generator(),
                                      steps=testGen.numImages // 64)
Exemplo n.º 4
0
ap = argparse.ArgumentParser()
ap.add_argument("-d","--dataset",required=True)
ap.add_argument("-m","--model",required=True)
args = vars(ap.parse_args())

aug = ImageDataGenerator(rotation_range=30,width_shift_range=0.1,\
height_shift_range=0.1,shear_range=0.2,zoom_range=0.2,horizontal_flip=True,fill_mode="nearest")

print("[INFO] loading images")
imagePaths = list(paths.list_images(args["dataset"]))
classNames = [pt.split(os.path.sep)[-2] for pt in imagePaths]
classNames = [str(x) for x in np.unique(classNames)]
print("len of images " , len(imagePaths))
print("class names ",classNames)
##initialize the preprocessing steps ::
sp = SimplePreprocessor(224,224)
aap = AspectAwarePreprocessor(224,224)
iap = ImageToArrayProcessor()

sdl = SimpleDatasetLoader(preprocessor=[sp,iap])
(data,label) = sdl.load(imagePaths,verbose=500)
print("shape ",data.shape )
data = data.astype("float32") / 255.0



# partition our data into training and test sets
(trainX, testX, trainY, testY) = train_test_split(data, label, test_size=0.25,
    random_state=42)

# convert the labels from integers to vectors