def choiceResult(dbName,imageName,temp=""):
    print "Choice --- Executing."
    if(not(temp == "")):
        testImage = dbName+"/"+imageName+"/"+temp
    else:
        testImage = dbName+"/"+imageName
    print testImage
    if platform.system()=="Windows":
        look="\\"
    else:
        look="/"
    choiceImage = ""
    #do a prediction based on algorithm
    if(algoName == "eigenface"):
        result = utility.predict(testing[testing_answer.index(testImage)], training_answer, W, mu, projections)
        choiceImage = "eigenface_yale_choice"
    elif(algoName == "fisherface"):
        result = utility.predict(testing[testing_answer.index(testImage)], training_answer, W, mu, projections)
        choiceImage = "fisherface_yale_choice"
    elif(algoName == "sift"):
        result = sift.localPredict(descriptors_testing[testing_answer.index(testImage)], training_answer, descriptors_training)
        choiceImage = "sift_yale_choice"
    elif(algoName == "lem"):
        result = lem.predict_hausdroff(testing_corners[testing_answer.index(testImage)], training_corners, training_answer)
        choiceImage = "lem_yale_choice"

    #test the image for success based on databases
    if(database == "yale A"):
        heading=result[result.find(look)+len(look):result.find('.')]+" MATCHED WITH "+ testImage[testImage.find(look)+len(look):testImage.find('.')]
        if(result[:result.find('.')] == testImage[:testImage.find('.')]):
            print "HIT!"
            status="hit"
        else:
            status="miss"
        
    elif(database == "orl"):
        heading=result[result.find(look)+len(look):result.find(look,11)]+" MATCHED WITH "+ testImage[testImage.find(look)+len(look):testImage.find(look,11)]
        if(result[:result.find(look,12)] == testImage[:testImage.find(look,12)]):
            print "HIT"
            status="hit"
        else:
            status="miss"
    elif(database == "yale B"):
        pass

    #plot the visuals
    utility.choicePlot(testImage, result, choiceImage)
    os.system("convert "+choiceImage+" "+choiceImage+".png")
    os.system("rm static/images/"+choiceImage+".png")
    os.system("mv "+choiceImage+".png static/images/"+choiceImage+".png")
    return render_template("choiceResult.html", filename = "/static/images/"+choiceImage+".png", status=status, heading=heading)
Beispiel #2
0
def main():
    user_args = get_input_args()

    model, class_labels = utility.load_saved_model()
    cat_to_name = utility.load_json(user_args.category_names)
    probs, labels, _ = utility.predict(user_args.input, model, user_args.top_k,
                                       cat_to_name, class_labels,
                                       user_args.gpu)

    print("------------------Processing------------------")
    for i in range(len(probs)):
        result_label = labels[i]
        result_prob = probs[i] * 100
        print("The probability of the {} is {:.2f} %.".format(
            result_label, result_prob))
def main():
    
    args = init_argparse()
    
    with open('cat_to_name.json', 'r') as f:
        cat_to_name = json.load(f)
        
    model.to(gpu)
    
    model= utility.load_checkpoint('save_dir')
    
    utiltiy.process_image('input_file')
    
    top_f_probability.power().numpy(), f_names=utility.predict(process_image, model, 'topk', 'device', 'flower_names')
    
    utility.probability(top_f_probability, top_f_classes)
Beispiel #4
0
def test(filename):

    fp = open(filename, 'rb')
    data = pickle.load(fp)
    fp.close()

    weights = data.get('weights')
    weights = helper.lists_toarray(weights)

    model = utility.getNewModel()
    model.set_weights(weights)

    X, Y = load_datasets("../",
                         ["mnist-test-images.npy", "mnist-test-labels.npy"])

    pred = utility.predict(model, X)
    print(np.argmax(pred, axis=1), Y)
    print(sum(np.argmax(pred, axis=1) == Y) / Y.shape[0])
Beispiel #5
0
def main():
    print('Predict')
    in_arg = get_input_args()
    print("Command Line Arguments:\n input =", in_arg.input, "\n checkpoint =",
          in_arg.checkpoint, "\n top_k =", in_arg.top_k, "\n category_names =",
          in_arg.category_names, "\n gpu =", in_arg.gpu)

    # Load checkpoint
    model, checkpoint = load_checkpoint(in_arg.checkpoint)

    # Load catagory mapping dictionary
    cat_to_name = category_mapping(in_arg.category_names)

    # Process the image to return a transposed_image
    transposed_image = process_image(in_arg.input)

    # Get the prediction for an image file.
    top_classes = predict(transposed_image, model, in_arg.top_k, cat_to_name,
                          in_arg.gpu)
    # Print the chart with the top classes and probabilities.
    print(top_classes)
 def eigenTest(self):
     result = utility.predict(self.testing[self.testing_answer.index(self.testImage)], self.training_answer, self.W, self.mu, self.projections)
     print result, self.testImage
     if(result[6:9] == self.testImage[6:9]):
         print "HIT!"
     if(self.db == "yale"):
         utility.choicePlot("yalefaces/"+self.testImage, "yalefaces/"+result, self.choiceImage)
         print "Finished plotting image"
     elif(self.db == "orl"):
         utility.choicePlot("orl_faces/"+self.testImage, "orl_faces/"+result, self.choiceImage)
     elif(self.db == "db3"):
         pass
     self.choiceResult = tk.Frame(self)
     self.choiceResult.place(in_=self.algorithm, x=0,y=0,relwidth=1,relheight=1)
     choiceImage = ImageTk.PhotoImage(file=self.choiceImage)
     labelChoice = tk.Label(self.choiceResult, image=choiceImage)
     labelChoice.image = choiceImage
     labelChoice.pack(side="top")
     print "should happen"
     choiceBack = tk.Button(self.choiceResult, text="Back", command=lambda: self.algorithm.lift())
     choiceBack.pack(side="top")
     self.choiceResult.lift()

def check_arguments():
    """This function checks the user inputs before using predict function"""
    if not path.isfile(image_path):
        print(image_path, "file cannot be found!")
        sys.exit()
    if not path.isfile(model_path):
        print(model_path, "model cannot be found!")
        sys.exit()
    if not path.isfile(labels_dic_file):
        print(labels_dic_file, "file cannot be found!")
        sys.exit()
    if  topk <=0:
        print("The topk cannot be less than 1")
        sys.exit()


# checking user inputs
check_arguments()

# loading labels json file as a dictionary
labels = utility.file_to_dic(labels_dic_file)

#_model, _model_info = utility.load_model()
#print(_model)
#print(_model_info)

# predicts the image and prints out the results
prediction = utility.predict(image_path, model_path, labels, topk, device)             
print(prediction)
from torch import optim
import torch.nn.functional as F
from torchvision import datasets, transforms
import torchvision.models as models
from collections import OrderedDict
import PIL
from PIL import Image
import numpy as np
import utility
import argparse
import json

ap = argparse.ArgumentParser(description='predict class of flower')
ap.add_argument('path_img', default='aipnd-project/flowers/test/10/image_07117.jpg', nargs='*', action="store", type = str)
ap.add_argument('checkpoint', default='checkpoint.pth', nargs='*', action="store", type = str)
ap.add_argument('--top_k', default=5, dest="top_k", action="store", type=int)
ap.add_argument('--labels', dest="labels", action="store", default='cat_to_name.json')
ap.add_argument('--gpu', default="gpu", nargs = '*', action="store", dest="gpu")

pa = ap.parse_args()
path_image = pa.path_img
path = pa.checkpoint

with open('cat_to_name.json', 'r') as json_file:
    cat_to_name = json.load(json_file)
    
model = utility.load_checkpoint(path)

utility.predict(path_image, model, topk=5)
                   
Beispiel #9
0
        while True:
            # read video frame
            ret, raw_img = cap.read()
            # process frames
            if raw_img is not None:
                h, w, _ = raw_img.shape
                img = cv2.cvtColor(raw_img, cv2.COLOR_BGR2RGB)
                img = cv2.resize(img, (640, 480))
                img_mean = np.array([127, 127, 127])
                img = (img - img_mean) / 128
                img = np.transpose(img, [2, 0, 1])
                img = np.expand_dims(img, axis=0)
                img = img.astype(np.float32)

                confidences, boxes = ort_session.run(None, {input_name: img})
                boxes, labels, probs = predict(w, h, confidences, boxes, 0.7)

                # if face detected
                if boxes.shape[0] > 0:
                    x1, y1, x2, y2 = boxes[0, :]
                    gray = cv2.cvtColor(raw_img, cv2.COLOR_BGR2GRAY)
                    aligned_face = fa.align(
                        raw_img, gray,
                        dlib.rectangle(left=x1, top=y1, right=x2, bottom=y2))
                    aligned_face = cv2.resize(aligned_face, (112, 112))

                    cv2.imwrite(f'faces/tmp/{label}_{frame_count}.jpg',
                                aligned_face)

                    aligned_face = aligned_face - 127.5
                    aligned_face = aligned_face * 0.0078125
Beispiel #10
0
                  default="False",
                  help='use gpu to speed up inference')

args = args.parse_args()
image_path = args.input_img
top_k = args.top_k
gpu = args.gpu
checkpoint = args.checkpoint
category_names = args.category_names
image_path = image_path[1]
path_list = image_path.split('/')

import json
with open(category_names, 'r') as f:
    flower_to_name = json.load(f)
flower_species = len(flower_to_name)
target_class = flower_to_name[path_list[-2]]  # To get the category
print("Target class: " + target_class)

model = u.load_checkpoint(checkpoint)
value, kclass = u.predict(image_path, model, gpu, top_k)

idx_to_class = {model.class_to_idx[i]: i
                for i in model.class_to_idx.keys()}  # dict comprehension
classes = [flower_to_name[idx_to_class[c]]
           for c in kclass]  # list comprehension

data = {'Predicted Class': classes, 'Probablity': value}
dataframe = pd.DataFrame(data)
print(dataframe.to_string(index=False))
Beispiel #11
0
params = ['hypercube', 'ball']

for i in range(len(params)):

    print('-----scenario', i + 1, '-----')
    param = params[i]
    if param == 'hypercube':
        samples[labels == 1] = hypercube_proj(samples_pos)
        samples[labels == -1] = hypercube_proj(samples_neg)
    else:
        samples[labels == 1] = ball_proj(samples_pos)
        samples[labels == -1] = ball_proj(samples_neg)

    samples_train, samples_test, labels_train, labels_test = \
        train_test_split(samples, labels, test_size=400, random_state=0)
    w_initial = np.zeros(dim + 1)

    pl.figure(1, (5, 4))
    pl.scatter(samples_train[:, 0], samples_train[:, 1], c=labels_train)
    pl.show()
    T = len(samples_train)
    w_hat, _ = stochastic_grad_decent(w_initial, samples_train, labels_train, \
            alpha=0.1, max_iterations=T, proj=param)
    print(w_hat)
    predicted_labels = predict(w_hat, samples_test)
    # print(predicted_labels)
    # print(labels_train)
    t_precision = predicted_labels[np.where(
        predicted_labels == labels_test)].size / float(labels_test.size) * 100
    print('Accuracy on the training set: %s%%' % round(t_precision, 2))
Beispiel #12
0
                    help='path to the image to be classified')
parser.add_argument('checkpoint',
                    type=str,
                    help='path to fetch the saved checkpoint')
parser.add_argument('--top_k',
                    type=int,
                    default=3,
                    help='top K most likely cases')
parser.add_argument('--category_names',
                    type=str,
                    default='cat_to_name.json',
                    help='images mapping to real names')
parser.add_argument('--gpu',
                    type=bool,
                    default=True,
                    help='Whether or not to use gpu or cpu')
args = parser.parse_args()

with open(args.category_names, 'r') as f:
    cat_to_name = json.load(f)  #read the json file into a dict

img = args.input_image_path
model = load_checkpoint(args.checkpoint)
device = torch.device(
    "cuda" if args.gpu and torch.cuda.is_available() else "cpu")
probabilities, classes = predict(img, model.to(device), args.top_k)
category_names = [cat_to_name[str(cl)] for cl in classes]
for name, prob in zip(category_names, probabilities):
    print("The Model predicts {} with a probability of {}%\n".format(
        name, int(prob * 100)))
import json

parser = argparse.ArgumentParser()
parser.add_argument('input', type=str)
parser.add_argument('--checkpoint', default='icp.pth', type=str)
parser.add_argument('--top_k', default=5, type=int)
parser.add_argument('--mapping', default='cat_to_name.json', type=str)
parser.add_argument('--gpu', action='store_true', default=False)
args = parser.parse_args()

# Load trained model
model = load_model(args.checkpoint)
with open(args.mapping, 'r') as mapping_file:
    model.class_to_idx = json.load(mapping_file)

# Convert picture to model input
picture_path = args.input
model_input = tf.from_numpy(process_image(picture_path))

# Get prediction from model
if args.gpu:
    device = 'cuda'
else:
    device = 'cpu'
model.to(device)
results = predict(model, model_input, top_k=args.top_k, device=device)

# Display results
for name, probability in results:
    print('{}: {}%'.format(name, round(float(probability) * 100, 2)))
data_dir = 'flowers'
test_dir = data_dir + '/test'
img_path = results.img_path if results.img_path != None else (
    test_dir + '/' + '10/image_07090.jpg')
checkpoint_path = results.checkpoint_path if results.checkpoint_path != None else 'checkpoint.pth'
cat_to_name = results.cat_to_name if results.cat_to_name != None else 'cat_to_name.json'
device = util.devices[
    'gpu'] if results.device == 'gpu' or torch.cuda.is_available() else 'cpu'
top_k = results.top_k if results.top_k != None else 5

print('-------------------------')
print('img_path used: ', img_path)
print('checkpoint_path used: ', checkpoint_path)
print('device used: ', device)
print('top_k Used: ', top_k)
print('cat_to_name Used: ', cat_to_name)
#-----------------------------------------------------------
model = helper.load_checkpoint('checkpoint.pth')
model
#print(model)

img_label = img_path.split(test_dir + '/')[1].split('/')[0]
cat_to_name = helper.load_cat_to_name(cat_to_name)
title = cat_to_name[img_label]
print('-------------------------')
print('flower category = ', title)

probs, labs, flowers = util.predict(img_path, model, cat_to_name)
print('probs = ', probs)
print('labs = ', labs)
print('flowers = ', flowers)
parse.add_argument('--category_name',
                   action='store',
                   dest='category_name',
                   default='cat_to_name.json')
parse.add_argument(
    '--gpu',
    action='store',
    dest='gpu',
    default='cuda',
    help='cpu used by default, to use GPU please specify --gpu "cuda"')

inputArg = parse.parse_args()

imagePath = inputArg.image_loc
checkpoint = inputArg.check_loc
topK = inputArg.top_k
device = inputArg.gpu
categoryName = inputArg.category_name

with open(categoryName, 'r') as f:
    cat_to_name = json.load(f)

model = loadModel(checkpoint, device)
probs, flowerList = predict(imagePath, model, topK, device)

flowerNames = []
for flower in flowerList:
    flowerNames.append(cat_to_name[flower])
for i in range(0, len(flowerNames)):
    print(flowerNames[i])
    print(probs[i])