示例#1
0
    "--plain",
    required=False,
    help="path to the plain image",
    default="/home/joheras/Escritorio/Research/Fungi/FungiImages/plain.jpg")
args = vars(ap.parse_args())
conf = Conf(args["conf"])

control = cv2.imread(args["control"])
control = cv2.resize(control, (100, 100))
plain = cv2.imread(args["plain"])
plain = cv2.resize(plain, (100, 100))
image = cv2.imread(args["image"])
image = cv2.resize(image, (100, 100))

le = cPickle.loads(open(conf["label_encoder_path"]).read())
oe = GoogleNetExtractor()
features = oe.describe(
    np.array([
        dataset.prepare_image(cv2.imread(args["image"]),
                              conf["googlenet_fixed_size"])
    ],
             dtype="float"))

model = cPickle.loads(
    open(conf["classifier_path"] + conf["model"] + ".cpickle").read())
prediction = model.predict_proba(np.atleast_2d(features))[0]
prediction = le.inverse_transform(np.argmax(prediction))

# Construct the different ranges depending on the classification
if prediction == '-':
    combinations = [(control * float(100 - n) / 100 +
示例#2
0
# shuffle the image paths to ensure randomness -- this will help make our
# training and testing split code more efficient
random.seed(42)
random.shuffle(imagePaths)

# determine the set of possible class labels from the image dataset assuming
# that the images are in {directory}/{filename} structure and create the
# label encoder
print("[INFO] encoding labels...")
le = LabelEncoder()
le.fit([p.split("/")[-2] for p in imagePaths])

# initialize the Overfeat extractor and the Overfeat indexer
print("[INFO] initializing network...")
oe = GoogleNetExtractor()
oi = GoogleNetIndexer(conf["features_path"], estNumImages=len(imagePaths))
print("[INFO] starting feature extraction...")

# loop over the image paths in batches
for (i, paths) in enumerate(
        dataset.chunk(imagePaths, conf["googlenet_batch_size"])):
    # load the set of images from disk and describe them
    (labels, images) = dataset.build_batch(paths, conf["googlenet_fixed_size"])
    controlImages = dataset.build_batch_control(paths,
                                                conf["googlenet_fixed_size"])
    features = oe.describe(images)
    featuresControl = oe.describe(controlImages)
    #features = [x+y for (x,y) in zip(featuresImages,featuresControl)]

    # loop over each set of (label, vector) pair and add them to the indexer
示例#3
0
# construct the argument parser and parse the command line arguments
ap = argparse.ArgumentParser()
ap.add_argument("-c",
                "--conf",
                required=True,
                help="path to configuration file")
ap.add_argument("-i",
                "--image",
                required=True,
                help="path to the image to predict")
args = vars(ap.parse_args())

# load the configuration, label encoder, and classifier
print("[INFO] loading model...")
conf = Conf(args["conf"])
le = cPickle.loads(open(conf["label_encoder_path"]).read())
model = cPickle.loads(
    open(conf["classifier_path"] + conf["model"] + ".cpickle").read())

imagePath = args["image"]

oe = GoogleNetExtractor()
(labels, images) = dataset.build_batch([imagePath],
                                       conf["googlenet_fixed_size"])
features = oe.describe(images)
for (label, vector) in zip(labels, features):
    prediction = model.predict(np.atleast_2d(vector))[0]
    print(prediction)
    prediction = le.inverse_transform(prediction)
    print("[INFO] predicted: {}".format(prediction))