Exemplo n.º 1
0
def categoryOfFungiImage(image, imageControl, oe):
    # load the configuration, label encoder, and classifier
    print("[INFO] loading model...")
    conf = Conf(
        "/home/joheras/pythonprojects/api/cv_api/fungi_classification/googlenetwithcontrol/conf/fungi.json"
    )
    le = cPickle.loads(open(conf["label_encoder_path"]).read())
    model = cPickle.loads(
        open(conf["classifier_path"] + conf["model"] + ".cpickle").read())

    image = dataset.prepare_image(image, conf["googlenet_fixed_size"])
    features = oe.describe([image])
    imageControl = dataset.prepare_image(imageControl,
                                         conf["googlenet_fixed_size"])
    featuresControl = oe.describe([imageControl])
    #features = features+featuresControl
    prediction = model.predict(np.atleast_2d(features[0]))[0]
    prediction = le.inverse_transform(prediction)
    return prediction
Exemplo n.º 2
0
def categoryOfFungiImage(image, oe):
    # load the configuration, label encoder, and classifier
    #print("[INFO] loading model...")
    conf = Conf(
        "/home/joheras/pythonprojects/api/cv_api/fungi_classification/overfeat/conf/fungi.json"
    )
    le = cPickle.loads(open(conf["label_encoder_path"]).read())
    model = cPickle.loads(
        open(conf["classifier_path"] + conf["model"] + ".cpickle").read())

    imagePaths = list(paths.list_images(conf["evaluation_path"]))

    image = dataset.prepare_image(image, conf["overfeat_fixed_size"])
    features = oe.describe([image])
    #truelabels = [x[0:x.find(':')] for x in truelabels]
    prediction = model.predict(np.atleast_2d(features[0]))[0]
    prediction = le.inverse_transform(prediction)
    return prediction
Exemplo n.º 3
0
                help="path to configuration file",
                default="conf/fungi.json")
ap.add_argument(
    "-i",
    "--image",
    required=False,
    help="path to the image to classify",
    default=
    "/home/joheras/Escritorio/Research/Fungi/FungiImages/decoloracion/fucsina/control.jpg"
)
args = vars(ap.parse_args())
conf = Conf(args["conf"])

le = cPickle.loads(open(conf["label_encoder_path"]).read())
oe = OverfeatExtractor(conf["overfeat_layer_num"])
features = oe.describe(
    np.array([
        dataset.prepare_image(cv2.imread(args["image"]),
                              conf["overfeat_fixed_size"])
    ],
             dtype="float"))

model = cPickle.loads(
    open(conf["classifier_path"] + conf["model"] + ".cpickle").read())
prediction = model.predict_proba(np.atleast_2d(features))[0]
prediction = le.inverse_transform(np.argmax(prediction))
image = cv2.imread(args["image"])
cv2.putText(image, prediction, (10, 35), cv2.FONT_HERSHEY_SIMPLEX, 1.0,
            (0, 255, 0), 3)
cv2.imshow("Image", image)
cv2.waitKey(0)
Exemplo n.º 4
0
    default="/home/joheras/Escritorio/Research/Fungi/FungiImages/plain.jpg")
args = vars(ap.parse_args())
conf = Conf(args["conf"])

control = cv2.imread(args["control"])
control = cv2.resize(control, (100, 100))
plain = cv2.imread(args["plain"])
plain = cv2.resize(plain, (100, 100))
image = cv2.imread(args["image"])
image = cv2.resize(image, (100, 100))

le = cPickle.loads(open(conf["label_encoder_path"]).read())
oe = GoogleNetExtractor()
features = oe.describe(
    np.array([
        dataset.prepare_image(cv2.imread(args["image"]),
                              conf["googlenet_fixed_size"])
    ],
             dtype="float"))

model = cPickle.loads(
    open(conf["classifier_path"] + conf["model"] + ".cpickle").read())
prediction = model.predict_proba(np.atleast_2d(features))[0]
prediction = le.inverse_transform(np.argmax(prediction))

# Construct the different ranges depending on the classification
if prediction == '-':
    combinations = [(control * float(100 - n) / 100 +
                     plain * float(n) / 100).astype("uint8")
                    for n in range(0, 26, 1)]
    combinationPercentage = [n for n in range(0, 26, 1)]
elif prediction == '+':
Exemplo n.º 5
0
import cPickle

import cv2
import numpy as np

from model.overfeat import OverfeatExtractor
from model.utils import Conf
from model.utils import dataset

# construct the argument parser and parse the command line arguments
ap = argparse.ArgumentParser()
ap.add_argument("-c", "--conf", required=False, help="path to configuration file",default="conf/fungi.json")
ap.add_argument("-i", "--image", required=False, help="path to the image to classify",default="/home/joheras/Escritorio/Research/Fungi/FungiImages/decoloracion/fucsina/control.jpg")
ap.add_argument("-t", "--control", required=False, help="path to the control image",default="/home/joheras/Escritorio/Research/Fungi/FungiImages/decoloracion/fucsina/control.jpg")
args = vars(ap.parse_args())
conf = Conf(args["conf"])

le = cPickle.loads(open(conf["label_encoder_path"]).read())
oe = OverfeatExtractor(conf["overfeat_layer_num"])
features = oe.describe(np.array([dataset.prepare_image(cv2.imread(args["image"]), conf["overfeat_fixed_size"])], dtype="float"))
featuresControl = oe.describe(np.array([dataset.prepare_image(cv2.imread(args["control"]), conf["overfeat_fixed_size"])], dtype="float"))


model = cPickle.loads(open(conf["classifier_path"]+ conf["model"] + ".cpickle").read())
prediction = model.predict_proba(np.atleast_2d(features+featuresControl))[0]
prediction = le.inverse_transform(np.argmax(prediction))
image = cv2.imread(args["image"])
cv2.putText(image, prediction, (10, 35), cv2.FONT_HERSHEY_SIMPLEX, 1.0,
		(0, 255, 0), 3)
cv2.imshow("Image", image)
cv2.waitKey(0)