def main():
   
    parser = predict_args.get_args()
    parser.add_argument('--version',
                        action='version',
                        version='%(prog)s ' + __version__ + ' by ' + __author__)
    cli_args = parser.parse_args()

    # CPU
    device = torch.device("cpu")

    # GPU
    if cli_args.use_gpu:
        device = torch.device("cuda:0")

    with open(cli_args.categories_json, 'r') as f:
        cat_to_name = json.load(f)

    # model
    my_model = load_checkpoint(device, cli_args.checkpoint_file)

    top_prob, top_classes = predict(cli_args.path_to_image, my_model, cli_args.top_k)

    label = top_classes[0]
    prob = top_prob[0]


    for i in range(len(top_prob)):
        print(f"{cat_to_name[top_classes[i]]:<25} {top_prob[i]*100:.2f}%")
def main():
    # load the cli args
    parser = predict_args.get_args()
    cli_args = parser.parse_args()

    use_cuda = cli_args.use_gpu

    # load categories
    with open(cli_args.categories_json, 'r') as f:
        cat_to_name = json.load(f)

    model_transfer = load_from_checkpoint(cli_args.checkpoint_file)
    if use_cuda:
        model_transfer.cuda()
    top_probs, top_classes = predict(cli_args.path_to_image, model_transfer, use_cuda, topk=cli_args.top_k)
    
    label = top_classes[0]
    prob = top_probs[0]

    print(f'Input\n---------------------------------')

    print(f'Image\t\t:\t{cli_args.path_to_image}')
    print(f'Model\t\t:\t{cli_args.checkpoint_file}')

    print(f'\nPrediction\n---------------------------------')

    print(f'Flower\t\t:\t{cat_to_name[label]}')
    print(f'Label\t\t:\t{label}')
    print(f'Probability\t:\t{prob*100:.2f}%')

    print(f'\nTop K\n---------------------------------')

    for i in range(len(top_probs)):
        print(f"{cat_to_name[top_classes[i]]:<25} {top_probs[i]*100:.2f}%")
Example #3
0
def main():
    args = predict_args.get_args()
    print(args)

    image_path = args.image_path
    model, optimizer = predict_utils.load_checkpoint(args.checkpoint,
                                                     args.arch, args.gpu)
    probs, classes = predict(image_path, model, args.top_k)
    cat_to_name = predict_utils.load_category_names(args.category_names)
    names = np.array([cat_to_name[i] for i in classes])
    for name, prob in zip(names, probs):
        print("{}: {}".format(name, str(prob)))
Example #4
0
def main():
    # load the cli args
    parser = predict_args.get_args()

    cli_args = parser.parse_args()

    # Start with CPU
    device = torch.device("cpu")

    # Requested GPU
    if cli_args.use_gpu:
        device = torch.device("cuda:0")

    
   
    # load the saved model
    model = load_model(cli_args.checkpoint_file)
    #print(model)
    
    
    
    # Run the prediction
    predict(cli_args.path_to_image, model, topk=5)

    top_probs, top_labels, top_flowers = predict(cli_args.path_to_image, model)
    print(top_probs)
    top_probs = top_probs[0].detach().numpy() #converts from tensor to nparray
    print(top_probs)

    
    flower_num = cli_args.path_to_image.split('/')[2]
    title_ = cat_to_name[flower_num] # Calls dictionary for name        

    
    label = top_labels[0]

    prob = top_probs[0]
    print(prob)
    # display the results
    print(f'Parameters\n---------------------------------')

    print(f'Image  : {cli_args.path_to_image}')
    print(f'Model  : {cli_args.checkpoint_file}')
    print(f'Device : {device}')

    print(f'\nPrediction\n---------------------------------')

    print(f'Flower      : {cat_to_name[flower_num]}')
    print(f'Label       : {label}')
    print(f'Probability : {prob*100:.2f}')
def main():
    """
        Image Classification Prediction
    """
    # load the cli args
    parser = predict_args.get_args()
    parser.add_argument('--version',
                        action='version',
                        version='%(prog)s ' + __version__ + ' by ' +
                        __author__)
    cli_args = parser.parse_args()

    # Start with CPU
    device = torch.device("cpu")

    # Requested GPU
    if cli_args.use_gpu:
        device = torch.device("cuda:0")

    # load categories
    with open(cli_args.categories_json, 'r') as f:
        cat_to_name = json.load(f)

    # load model
    chkp_model = load_checkpoint(device, cli_args.checkpoint_file)

    top_prob, top_classes = predict(cli_args.path_to_image, chkp_model,
                                    cli_args.top_k)

    label = top_classes[0]
    prob = top_prob[0]

    print(f'Parameters\n---------------------------------')

    print(f'Image  : {cli_args.path_to_image}')
    print(f'Model  : {cli_args.checkpoint_file}')
    print(f'Device : {device}')

    print(f'\nPrediction\n---------------------------------')

    print(f'Flower      : {cat_to_name[label]}')
    print(f'Label       : {label}')
    print(f'Probability : {prob*100:.2f}%')

    print(f'\nTop K\n---------------------------------')

    for i in range(len(top_prob)):
        print(f"{cat_to_name[top_classes[i]]:<25} {top_prob[i]*100:.2f}%")
Example #6
0
def main():
    parser = predict_args.get_args()
    kg_args = parser.parse_args()

    #start with CPU
    device = torch.device("cpu")
    #requested GPU
    if kg_args.gpu and torch.cuda.is_available():
        device = torch.device("cuda:0")
        
    #category names
    with open(kg_args.cat_names, 'r') as f:
        cat_to_name = json.load(f)
    
    label = top_classes[0]
    prob = top_prob[0]   
Example #7
0
def main():
    """
        Image Classification Network Trainer
        Student: Louis Bove
        Credit for assistance: https://github.com/cjimti/aipnd-project & https://github.com/DMells/Convolutional-Neural-Networks-Project
    """

    parser = predict_args.get_args()
    cli_args = parser.parse_args()

    # Load using CPU and then request GPU
    device = torch.device("cpu")
    if cli_args.use_gpu:
        device = torch.device("cuda:0")

    # Load needed categories and model
    with open(cli_args.categories_json, 'r') as f:
        cat_to_name = json.load(f)

    checkpoint_model = load_checkpoint(device, cli_args.checkpoint)

    top_prob, top_classes = predict(cli_args.path_to_image, checkpoint_model,
                                    cli_args.top_k)

    label = top_classes[0]
    prob = top_prob[0]

    print(f'Parameters\n')

    print(f'Current image - {cli_args.path_to_image}')
    print(f'Model used - {cli_args.checkpoint}')
    print(f'Device used - {device}')

    print(f'\nPrediction\n')

    print(f'Flower      : {cat_to_name[label]}')
    print(f'Label       : {label}')
    print(f'Probability : {prob*100:.2f}%')

    print(f'\nTop K\n')

    for i in range(len(top_prob)):
        print(f"{cat_to_name[top_classes[i]]:<25} {top_prob[i]*100:.2f}%")
def main():
    parser = predict_args.get_args()
    parser.add_argument('--version', action='version', version='%(prog)s  '+ __version__+ __author__)
    kg_args = parser.parse_args()

    #start with CPU
    device = torch.device("cpu")
    #requested GPU
    if kg_args.gpu and torch.cuda.is_available():
        device = torch.device("cuda:0")
        
    #category names
    with open(kg_args.cat_names, 'r') as f:
        cat_to_name = json.load(f)
    
    #load model
    checkpoint_model = load_checkpoint(device, kg_args.checkpoint)
    img = Image.open(kg_args.image_path)
    image = process_image(img)
    topk_probs, classes = predict(kg_args.image_path, checkpoint_model, kg_args.top_num)
    
    label = top_classes[0]
    prob = top_prob[0]    
def main():
    args = predict_args.get_args()

    device = torch.device("cpu")

    if args.use_gpu:
        device = torch.device("cuda:0")

    # load categories
    with open(args.categories_json, 'r') as f:
        cat_to_name = json.load(f)

    # load model
    model = load_checkpoint(device, args.checkpoint_file)

    top_prob, top_classes = predict(device, args.path_to_image, model,
                                    args.top_k)

    label = top_classes[0]
    prob = top_prob[0]

    print(f'Parameters\n---------------------------------')

    print(f'Image  : {args.path_to_image}')
    print(f'Model  : {args.checkpoint_file}')
    print(f'Device : {device}')

    print(f'\nPrediction\n---------------------------------')

    print(f'Flower      : {cat_to_name[label]}')
    print(f'Label       : {label}')
    print(f'Probability : {prob*100:.2f}%')

    print(f'\nTop K\n---------------------------------')

    for i in range(len(top_prob)):
        print(f"{cat_to_name[top_classes[i]]:<25} {top_prob[i]*100:.2f}%")
Example #10
0
# returns print statements of the trained model's best guesses

import json
import torch
from torch import nn
from torchvision import datasets, transforms, models
from collections import OrderedDict
from torch import optim
import predict_args
from PIL import Image
import numpy as np
import torch.nn.functional as F

parser = predict_args.get_args()
args = parser.parse_args()

with open(args.category_names, 'r') as f:
    cat_to_name = json.load(f)

def get_model():
    if args.vgg == 1:
        model = models.vgg11(pretrained = True)
    elif args.vgg == 2:
        model = models.vgg13(pretrained = True)
    elif args.vgg == 3:
        model = models.vgg16(pretrained = True)
    elif args.vgg == 4:
        model = models.vgg19(pretrained = True)

    if args.alexnet:
        model = models.alexnet(pretrained = True)
import matplotlib.pyplot as plt
import torch
import numpy as np
from torch import nn
from torch import optim
import torch.nn.functional as F
from torchvision import datasets, transforms, models
from PIL import Image
import json
import predict_args
import os

args = predict_args.get_args()
print(args)


def load_checkpoint(filepath):
    checkpoint = torch.load(filepath)
    if checkpoint['arch'] == 'vgg19_bn':
        model = models.vgg19_bn()
    classifier = nn.Sequential(nn.Linear(25088, checkpoint['hidden_units']),
                               nn.ReLU(), nn.Dropout(p=0.5),
                               nn.Linear(checkpoint['hidden_units'], 102),
                               nn.LogSoftmax(dim=1))
    model.classifier = classifier
    model.load_state_dict(checkpoint['state_dict'])
    model.class_to_idx = checkpoint['class_to_idx']
    optimizer = optim.Adam(model.classifier.parameters(), lr=0.001)
    optimizer.load_state_dict(checkpoint['optim_state'])

    return model, optimizer