ap = argparse.ArgumentParser() ap.add_argument("-d", "--dataset", required=True, help="path to input dataset") args = vars(ap.parse_args()) # grab the list of images that we'll be describing print("[INFO] loading images...") imagePaths = list(paths.list_images(args["dataset"])) # initialize the image preprocessors sp = SimplePreprocessor(32, 32) iap = ImageToArrayPreprocessor() # load the dataset from disk then scale the raw pixel intensities # to the range [0, 1] sdl = SimpleDatasetLoader(preprocessors=[sp, iap]) (data, labels) = sdl.load(imagePaths, verbose=500) data = data.astype("float") / 255.0 # partition the data into training and testing splits using 75% of # the data for training and the remaining 25% for testing (trainX, testX, trainY, testY) = train_test_split(data, labels, test_size=0.25, random_state=42) # convert the labels from integers to vectors trainY = LabelBinarizer().fit_transform(trainY) testY = LabelBinarizer().fit_transform(testY) # initialize the optimizer and model print("[INFO] compiling model...")
args = vars(ap.parse_args()) classLabels = ["cat", "dog", "panda"] print("[INFO] sampling images...") #imagePaths = np.array(list(paths.list_images(args["dataset"]))) imagePaths = np.array(list(paths.list_images("datasets/animals"))) print(len(imagePaths)) idxs = np.random.randint(0, len(imagePaths), size=(10, )) imagePaths = imagePaths[idxs] sp = SimplePreprocessor(32, 32) iap = ImageToArrayPreprocessor() sdl = SimpleDatasetLoader(preprocessors=[sp, iap]) (data, labels) = sdl.load(imagePaths) data = data.astype("float") / 255.0 print("[INFO] loading pre-trained network...") model = load_model(args["model"]) print("[INFO] predicting....") preds = model.predict(data, batch_size=32).argmax(axis=1) for (i, imagePath) in enumerate(imagePaths): image = cv2.imread(imagePath) cv2.putText(image, "Label: {}".format(classLabels[preds[i]]), (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2) cv2.imshow("Image", image) cv2.waitKey(0)