Exemple #1
0
results = parser.parse_args()

img_path = results.img_path
checkpoint_path = results.checkpoint_path
top_k = results.top_k
category_names = results.category_names
gpu = results.switch

if gpu == True:
    using_gpu = torch.cuda.is_available()
    device = 'gpu'
    print('gpu On')
else:
    print('gpu Off')
    device = 'cpu'

model = utility_functions.load_checkpoint(checkpoint_path)
processed_image = utility_functions.process_image(img_path)
probs, classes = utility_functions.predict(processed_image, model, top_k,
                                           device)
# Label mapping
cat_to_name = utility_functions.labeling(category_names)

labels = []
for class_index in classes:
    labels.append(cat_to_name[str(class_index)])

# Converting from tensor to numpy-array
print('Name of class: ', labels)
print('Probability: ', probs)
                    help='Set a switch to use GPU')
results = parser.parse_args()

img_path = results.img_path
checkpoint_path = results.checkpoint_path
top_k = results.top_k
category_names = results.category_names
gpu = results.switch

if gpu == True:
    using_gpu = torch.cuda.is_available()
    device = 'gpu'
    print('gpu On')
else:
    print('gpu Off')
    device = 'cpu'

model = utility_functions.load_checkpoint(checkpoint_path)
image_path = utility_functions.process_image(img_path)
probs, classes = utility_functions.predict(image_path, model, top_k, device)
# Label mapping
cat_to_name = utility_functions.labeling(category_names)

labels = []
for class_index in classes:
    labels.append(cat_to_name[str(class_index)])

# Converting from tensor to numpy-array
print('Name of class: ', labels)
print('Probability: ', probs)
Exemple #3
0
        print("")
        print("Could not load the provided saved model. Please try again.")
        exit()

    # check that top_k input is valid
    try:
        if top_k <= 0 or type(top_k) != int:
            raise
    except:
        print("")
        print("top_k needs to be an integer value greater than 0")
        exit()

    # execute the image prediction
    try:
        probs, classes = predict(image_path, reloaded_keras_model, top_k)
    except:
        print("")
        print("Could not load predict on the provided image file.")
        exit()

    # load and read the image flower json to map index to class name
    try:
        with open(category_names, "r") as f:
            class_names = json.load(f)
    except:
        print("")
        print("Could not load the provided json file.")
        exit()

    df_sorted = sorted_class_prob(class_names, classes, probs, False)
Exemple #4
0
path_image = pa.input_img
number_of_outputs = pa.top_k
power = pa.gpu
input_img = pa.input_img
path = pa.checkpoint

training_loader, testing_loader, validation_loader = utility_functions.load_data(
)

utility_functions.nn_setup(structure, dropout, hidden_layer1, lr, power)
model_conv, criterion, optimizer_conv = nn_setup(structure, dropout,
                                                 hidden_layer1, lr, power)
utility_functions.load_checkpoint(path)

with open('cat_to_name.json', 'r') as json_file:
    cat_to_name = json.load(json_file)

probabilities = utility_functions.predict(path_image, model_conv,
                                          number_of_outputs, power)

labels = [
    cat_to_name[str(index + 1)] for index in np.array(probabilities[1][0])
]
probability = np.array(probabilities[0][0])

i = 0
while i < number_of_outputs:
    print("{} with a probability of {}".format(labels[i], probability[i]))
    i += 1

print("Prediction")
Exemple #5
0
# make the dictionary from category(file) name to flower name
with open(args["category_names"]) as f:
    cat_to_name = json.load(f)

# create the torch.device object
if args["gpu"] == True:
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
else:
    device = torch.device("cpu")

# load the file and recreate the model
model, cat_to_idx = model_functions.load_checkpoint(args["checkpoint_path"],
                                                    device)

# make the dictionary from idx (of output) to the category(file name)
idx_to_cat = {val: key for key, val in cat_to_idx.items()}

# transfer the model to gpu if availavle
model = model.to(device)

# predict the top k probability and categories
top_p, top_class = utility_functions.predict(args["image_path"], model, device,
                                             args["topk"])

# print out the prediction in the console
for i in range(args["topk"]):
    label = cat_to_name[idx_to_cat[top_class[0][i].item()]]
    clas = top_class[0][i].item()
    prob = top_p[0][i].item()
    print("No.{} prediction : {} [{}] ({}%)".format(i + 1, label, clas, prob))
Exemple #6
0
    # load model checkpoint
    try:
        model, model_class_to_idx = load_checkpoint(
            checkout_point_dir + "/checkpoint.pth", model)
    except:
        print("Can not find provided model checkpoint.  Please try again.")
        exit()

    # predicts the flower image's class
    try:
        if gpu == "gpu":
            device = torch.device(
                "cuda:0" if torch.cuda.is_available() else "cpu")
        else:
            device = "cpu"
        probs, classes = predict(image_path, device, model, top_k)
    except:
        print("Can not find provided image or provided top K is invalid.  "
              "Please try again.")
        exit()

    # prints the most probable flower classes and corresponding probabilities
    try:
        df = align_class_prob(category_names, model_class_to_idx, classes,
                              probs)
        print(df)
    except:
        print(
            "Can not find provided category name file provided or is invalid. "
            "Please try again.")
        exit()