Beispiel #1
0
def main():
    # load args
    in_args = load_args()
    # load the model
    model = utility.load_checkpoint(in_args.checkpoint, train=False)
    # predict
    utility.sanityChecking(in_args.image, in_args.category_names, model,
                           in_args.gpu, in_args.top_k)
def predict( image, checkpoint = "", top_k = 1, gpu = False,\
            category_names = 'cat_to_name.json' ):
    ''' Predict the class (or classes) of an image using a trained deep learning model.
    '''
    
    # Implement the code to predict the class from an image file
    if checkpoint == "":
        print ("Check_point save path is not selected.")
        return
    else:
        try:
            predict_model = load_checkpoint( checkpoint )
        except:
            print ("Error occurs when loading model!")
            return             
               
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

    if gpu:
        if device.type != 'cuda':
            print ( "Please enable GPU mode." )
            return
        
    predict_model.to( device )
    predict_model.eval()
    
    flower_image = Image.open( image )
    image_processed = process_image( flower_image )
    image_processed = torch.from_numpy(image_processed)
    image_processed.resize_((1, 3, 224, 224))
    image_processed = image_processed.to(device).float()
    
    cat_to_name = load_cat_to_name( category_names )
    if cat_to_name is None:
        print ("Cannot find category_names file!")
        return 

    with torch.no_grad():
        outputs = predict_model.forward(image_processed)
        probs, indexes  = torch.topk(outputs.data, top_k)
        probs = np.exp(probs).numpy().ravel()
        
        idx_to_class = { value:key for key,value in predict_model.class_to_idx.items() }
        
        classes =  [idx_to_class[int(idx)] for idx in list(indexes.cpu().numpy().ravel())]
        flowers = [ cat_to_name[class_idx] for class_idx in classes ]
    
    for index in range(0, top_k) :
        print ( "flower name:{} probability:{}".format( flowers[index], probs[index] ))
    
    return probs, flowers
Beispiel #3
0
def predict(in_args):
    img_path = in_args.input
    checkpoint = in_args.checkpoint
    cat_to_name_file = in_args.category_names
    top_k = in_args.top_k

    # Load model
    model = u.load_checkpoint(checkpoint)

    # Predict classes
    prob, classes = predict_helper(img_path, model, top_k)

    # Display results
    u.display_results(model.class_to_idx, prob, classes, cat_to_name_file,
                      img_path, top_k)
def main():
    
    args = init_argparse()
    
    with open('cat_to_name.json', 'r') as f:
        cat_to_name = json.load(f)
        
    model.to(gpu)
    
    model= utility.load_checkpoint('save_dir')
    
    utiltiy.process_image('input_file')
    
    top_f_probability.power().numpy(), f_names=utility.predict(process_image, model, 'topk', 'device', 'flower_names')
    
    utility.probability(top_f_probability, top_f_classes)
Beispiel #5
0
def main():
    args = parse_arguments()
    category_to_name = loading_labels_to_names(args.category_names)
    model = load_checkpoint(args.checkpoint)

    probabilities, classes = predict(args.imagepath, model, args.top_k,
                                     args.gpu)
    flower_names = []
    for clss in classes:
        flower_names.append(category_to_name[str(clss)])

    n = 0
    for n in range(len(flower_names)):
        print("Flower name: {}     Class Probability: {}".format(
            flower_names[n], probabilities[n]))
        n += 1
Beispiel #6
0
def PredictTheFlower():
    #Load the args
    args = parse_args()
    #Load the Model
    model = load_checkpoint(args.checkpoint)
    cat_to_name = load_cat_names(args.category_names)
    path = args.filepath
    probs, classes = predict(path, model, args.top_k, args.gpu)
    flowername =  [cat_to_name[str(index)] for index in classes ]
    probabilities = probs
    
    print('selects file : ' + path) 
    print('flower names : ', flowername)
    print('probabilities : ', probabilities)
    
    # prints out classes corrspoding to probs 
    j=0 
    while j < len(flowername):
        print("{} has the probability of {}".format(flowername[j], probabilities[j] ))
        j = j+1
def predict():
    args = utility.get_predict_inputs()
    image = utility.process_image(args.imagepath)

    model = utility.load_checkpoint(args.checkpoint)
    model.to(args.gpu)

    image.unsqueeze_(0)
    image = image.to(args.gpu)
    output = model.forward(image)
    ps = torch.exp(output)
    props, index = ps.topk(args.top_k)

    with torch.no_grad():
        props, index = props.to('cpu')[0].detach().numpy(), index.to(
            'cpu')[0].detach().numpy()

    idx_to_class = {
        idx: class_name
        for class_name, idx in model.class_to_idx.items()
    }

    cats = []

    for i in index:
        cats.append(idx_to_class[i])

    if args.category_names:
        cat_to_name = utility.label_mapping(args.category_names)
        names = []

        for cat in cats:
            names.append(cat_to_name[cat])

        print(*props)
        print(*names)

    else:
        print(*props)
        print(*cats)
from torch import optim
import torch.nn.functional as F
from torchvision import datasets, transforms
import torchvision.models as models
from collections import OrderedDict
import PIL
from PIL import Image
import numpy as np
import utility
import argparse
import json

ap = argparse.ArgumentParser(description='predict class of flower')
ap.add_argument('path_img', default='aipnd-project/flowers/test/10/image_07117.jpg', nargs='*', action="store", type = str)
ap.add_argument('checkpoint', default='checkpoint.pth', nargs='*', action="store", type = str)
ap.add_argument('--top_k', default=5, dest="top_k", action="store", type=int)
ap.add_argument('--labels', dest="labels", action="store", default='cat_to_name.json')
ap.add_argument('--gpu', default="gpu", nargs = '*', action="store", dest="gpu")

pa = ap.parse_args()
path_image = pa.path_img
path = pa.checkpoint

with open('cat_to_name.json', 'r') as json_file:
    cat_to_name = json.load(json_file)
    
model = utility.load_checkpoint(path)

utility.predict(path_image, model, topk=5)
                   
Beispiel #9
0
def main():
    global args
    args = parser.parse_args()

    # Default settings
    default_model = 'resnet18'
    my_topk = 1
    sz = 224

    # Take actions based upon initial arguments

    if args.gpu:
        # Check for GPU and CUDA libraries
        HAS_CUDA = torch.cuda.is_available()
        if not HAS_CUDA:
            sys.exit('No Cuda capable GPU detected')
    else:
        HAS_CUDA = False

    # Check how many classes to predict (if argument will only predict top class
    if args.topk:
        my_topk = args.topk

    # load cat_to_name file if specified
    if args.category_names:
        names_given = True
        cat_to_names_file = args.category_names
        with open(cat_to_names_file, 'r') as f:
            cat_to_name = json.load(f)

    else:
        names_given = False

    checkpoint_dir = args.checkpoint

    # Recreate model to facilitate prediction

    print('Loading checkpoint...')
    lr = 0.01  # not actually needed but used to avoid problems with function call - need to add named parameters to avoid this
    cf_mgr, pt_model = utility.load_checkpoint(checkpoint_dir, lr, HAS_CUDA)

    # Load image file
    filename = args.path_to_image
    if not os.path.isfile(filename):
        sys.exit('path to image is not a valid file')

    # Run prediction and obtain probabilities of classes plus the image file
    class_probs, class_ids, class_labels, image = predict(
        filename, cf_mgr, my_topk)

    class_labels_tidy = []
    for i, cln in enumerate(class_labels):
        class_labels_tidy.append(str(int(cln)))

    if names_given:
        class_names = []
        for i, cln in enumerate(class_labels):
            class_names.append(cat_to_name[str(int(cln))])

    # Print out results
    print(f'Predictions for Image {filename}')
    print(f'   Top 5 predicted probabilities: {np.around(class_probs,5)}')
    print(f'   Top 5 predicted class category: {class_labels_tidy}')
    if names_given:
        print(f'   Top 5 predicted class_names: {class_names}')
    print('')
Beispiel #10
0
    # Convert indices to classes
    idx_to_class = {val: key for key, val in num_classes.items()}  
    top_labels = [idx_to_class[lab] for lab in top_labs]
    top_flowers = [cat_to_name[idx_to_class[lab]] for lab in top_labs]
    return top_probs, top_labels, top_flowers    

ap = argparse.ArgumentParser(description='predict.py')
ap.add_argument('input_img', default='/home/workspace/paind-project/flowers/test/102/image_08015.jpg',action="store", type = str)
ap.add_argument('--checkpoint', default='/home/workspace/paind-project/checkpoint.pth',action="store",type = str)
ap.add_argument('--top_k', default=5, dest="top_k", action="store", type=int)
ap.add_argument('--category_names', dest="category_names", action="store", default='cat_to_name.json')
ap.add_argument('--gpu', dest="gpu", action="store", default=True)

with open('cat_to_name.json', 'r') as f:
    cat_to_name = json.load(f)
    
    
pa = ap.parse_args()
path_image = pa.input_img
number_of_outputs = pa.top_k
power = pa.gpu
input_img = pa.input_img
path = pa.checkpoint

trainloader, validloader, testloader, train_dataset, valid_dataset,test_dataset,  = utility.create_loaders('/home/workspace/paind-project/flowers/')
model,arch,num_classes = utility.load_checkpoint(path)

probs, classes, top_flowers = predict(input_img, model, num_classes,number_of_outputs)
print('The output of model prediction is:')
print(probs)
print(classes)
Beispiel #11
0
                  default="False",
                  help='use gpu to speed up inference')

args = args.parse_args()
image_path = args.input_img
top_k = args.top_k
gpu = args.gpu
checkpoint = args.checkpoint
category_names = args.category_names
image_path = image_path[1]
path_list = image_path.split('/')

import json
with open(category_names, 'r') as f:
    flower_to_name = json.load(f)
flower_species = len(flower_to_name)
target_class = flower_to_name[path_list[-2]]  # To get the category
print("Target class: " + target_class)

model = u.load_checkpoint(checkpoint)
value, kclass = u.predict(image_path, model, gpu, top_k)

idx_to_class = {model.class_to_idx[i]: i
                for i in model.class_to_idx.keys()}  # dict comprehension
classes = [flower_to_name[idx_to_class[c]]
           for c in kclass]  # list comprehension

data = {'Predicted Class': classes, 'Probablity': value}
dataframe = pd.DataFrame(data)
print(dataframe.to_string(index=False))
Beispiel #12
0
def main():

    global args
    args = parser.parse_args()

    # Default settings
    default_model = 'resnet18'
    default_bs = 16
    sz = 224

    # Take actions based upon initial arguments

    if args.gpu:
        # Check for GPU and CUDA libraries
        HAS_CUDA = torch.cuda.is_available()
        if not HAS_CUDA:
            sys.exit('No Cuda capable GPU detected')
    else:
        HAS_CUDA = False

    checkpoint_dir = args.save_dir

    # Define hyper-parameters

    # Note - allow dropout to be changed when resuming model

    tmp = args.dropout
    tmp = re.sub("[\[\]]", "", tmp)
    drops = [float(item) for item in tmp.split(',')]

    lr = args.learning_rate

    epochs = args.epochs

    # All arguments imported, will start to setup model depending upon whether restarting from checkpoint or
    # from scratch

    if args.resume:
        if os.path.isdir(args.resume):
            print('Loading checkpoint...')
            sol_mgr, pt_model = utility.load_checkpoint(
                args.resume, lr, HAS_CUDA)

    else:
        # Define hidden layer details (note - if resuming will continue with values used earlier

        tmp = args.hidden_units
        tmp = re.sub("[\[\]]", "", tmp)
        n_hid = [int(item) for item in tmp.split(',')]

        # check data directory exists and assign
        data_dir = args.data_directory
        # Check it exists
        if not os.path.exists(data_dir):
            sys.exit('Data directory does not exist')

        # Create model, datasets etc from scratch
        # create datasets and dataloaders
        phrases = ['train', 'valid', 'test']

        # Define data transforms
        data_transforms = {
            'train':
            transforms.Compose([
                transforms.RandomResizedCrop(sz),
                transforms.RandomHorizontalFlip(),
                transforms.ToTensor(),
                transforms.Normalize([0.485, 0.456, 0.406],
                                     [0.229, 0.224, 0.225])
            ]),
            'valid':
            transforms.Compose([
                transforms.Resize(256),
                transforms.CenterCrop(sz),
                transforms.ToTensor(),
                transforms.Normalize([0.485, 0.456, 0.406],
                                     [0.229, 0.224, 0.225])
            ]),
            'test':
            transforms.Compose([
                transforms.Resize(256),
                transforms.CenterCrop(sz),
                transforms.ToTensor(),
                transforms.Normalize([0.485, 0.456, 0.406],
                                     [0.229, 0.224, 0.225])
            ]),
        }

        bs = args.batch_size

        data = Data_Manager(data_dir, phrases, data_transforms, bs)

        # Load cat_to_name
        cat_to_name = utility.load_classes('cat_to_name.json')
        num_cat = len(cat_to_name)

        # Load pre-trained model
        if args.arch is not None:
            pt_model = args.arch
        else:
            pt_model = default_model
        model_pt = models.__dict__[pt_model](pretrained=True)
        num_ftrs = model_pt.fc.in_features

        # Create classifier model
        img_cl = Composite_Classifier(model_pt, n_hid, drops, num_cat)
        # Move to CUDA if available
        if HAS_CUDA:
            img_cl.cuda()

        # Define losses and hyper-parameters
        criterion = nn.CrossEntropyLoss()
        # Optimise just the parameters of the classifier layers
        optimizer_ft = optim.SGD(img_cl.cf_layers.parameters(),
                                 lr=lr,
                                 momentum=0.9)

        exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft,
                                               step_size=7,
                                               gamma=0.1)

        # Freeze the pre-trained model layers
        for param in img_cl.model_pt.parameters():
            param.requires_grad = False

        # Create model manager to control training, validation, test and predict with the model and data
        sol_mgr = Solution_Manager(img_cl,
                                   criterion,
                                   optimizer_ft,
                                   exp_lr_scheduler,
                                   data,
                                   phrases,
                                   HAS_CUDA=HAS_CUDA)
        sol_mgr.model.class_to_idx = data.image_datasets['train'].class_to_idx

    # Train model
    sol_mgr.train(epochs=epochs)

    # Evaluate model against test set
    sol_mgr.test_with_dl()

    # Save Checkpoint
    utility.save_checkpoint(args.save_dir, sol_mgr, pt_model, HAS_CUDA)
Beispiel #13
0
                    help='path to the image to be classified')
parser.add_argument('checkpoint',
                    type=str,
                    help='path to fetch the saved checkpoint')
parser.add_argument('--top_k',
                    type=int,
                    default=3,
                    help='top K most likely cases')
parser.add_argument('--category_names',
                    type=str,
                    default='cat_to_name.json',
                    help='images mapping to real names')
parser.add_argument('--gpu',
                    type=bool,
                    default=True,
                    help='Whether or not to use gpu or cpu')
args = parser.parse_args()

with open(args.category_names, 'r') as f:
    cat_to_name = json.load(f)  #read the json file into a dict

img = args.input_image_path
model = load_checkpoint(args.checkpoint)
device = torch.device(
    "cuda" if args.gpu and torch.cuda.is_available() else "cpu")
probabilities, classes = predict(img, model.to(device), args.top_k)
category_names = [cat_to_name[str(cl)] for cl in classes]
for name, prob in zip(category_names, probabilities):
    print("The Model predicts {} with a probability of {}%\n".format(
        name, int(prob * 100)))