Esempio n. 1
0
# construct the argument parser and parse the command line arguments
ap = argparse.ArgumentParser()
ap.add_argument("-c", "--conf", required=True, help="path to configuration file")
args = vars(ap.parse_args())

# load the configuration, label encoder, and classifier
print("[INFO] loading model...")
conf = Conf(args["conf"])
le = cPickle.loads(open(conf["label_encoder_path"]).read())
model = cPickle.loads(open(conf["classifier_path"]+ conf["model"] + ".cpickle").read())

imagePaths = list(paths.list_images(conf["evaluation_path"]))

oe = OverfeatExtractor()
(truelabels, images) = dataset.build_batch(imagePaths, conf["overfeat_fixed_size"])
features = oe.describe(images)
controlImages = dataset.build_batch_control_evaluation(imagePaths, conf["overfeat_fixed_size"])
featuresControl = oe.describe(controlImages)
features = [x+y for (x,y) in zip(features,featuresControl)]

truelabels = [x[0:x.find(':')] for x in truelabels]
labels = []
for (label, vector) in zip(truelabels, features):
    prediction = model.predict(np.atleast_2d(vector))[0]
    print(prediction)
    prediction = le.inverse_transform(prediction)
    labels.append(prediction)
    print("[INFO] predicted: {}, true label: {}".format(prediction,label))

print(accuracy_score(truelabels,labels))
Esempio n. 2
0
# label encoder
print("[INFO] encoding labels...")
le = LabelEncoder()
le.fit([p.split("/")[-2] for p in imagePaths])

# initialize the Overfeat extractor and the Overfeat indexer
print("[INFO] initializing network...")
oe = GoogleNetExtractor()
oi = GoogleNetIndexer(conf["features_path"], estNumImages=len(imagePaths))
print("[INFO] starting feature extraction...")

# loop over the image paths in batches
for (i, paths) in enumerate(
        dataset.chunk(imagePaths, conf["googlenet_batch_size"])):
    # load the set of images from disk and describe them
    (labels, images) = dataset.build_batch(paths, conf["googlenet_fixed_size"])
    controlImages = dataset.build_batch_control(paths,
                                                conf["googlenet_fixed_size"])
    features = oe.describe(images)
    featuresControl = oe.describe(controlImages)
    #features = [x+y for (x,y) in zip(featuresImages,featuresControl)]

    # loop over each set of (label, vector) pair and add them to the indexer
    for (label, vector) in zip(labels, features):
        oi.add(label, vector)

    # check to see if progress should be displayed
    if i > 0:
        oi._debug("processed {} images".format(
            (i + 1) * conf["googlenet_batch_size"], msgType="[PROGRESS]"))