Ejemplo n.º 1
0
def predict(image_path, model, gpu, topk):
    ''' Predict the class (or classes) of an image using a trained deep learning model.
    '''

    device = torch.device(
        "cuda" if torch.cuda.is_available() and gpu else "cpu")
    model.eval()
    model.to(device)

    image = process_image(image_path)
    inputs = image.unsqueeze_(0).to(device)

    with torch.no_grad():
        output = model.forward(inputs)
        tensor_probs, tensor_labels = torch.topk(output, topk)
        top_probs = tensor_probs.exp()

    idx_to_class = {model.class_to_idx[key]: key for key in model.class_to_idx}
    classes = list()

    probs = top_probs.tolist()[0]
    labels = tensor_labels.tolist()[0]

    for label in labels:
        classes.append(idx_to_class[label])

    return probs, classes
def predict(image_path, model, topk, gpu_f):
    ''' Predict the class (or classes) of an image using a trained deep learning model.
    Arguments
        ---------
        image_path: Pathh to image
        model: the pretrained model
        topk: integer, number of class probabilities to be displayed
    '''

    # Prepare image
    Tensor_image = process_image(image_path)
    Tensor_image, device = hw_control(Tensor_image, gpu_f)
    Tensor_image.unsqueeze_(0)
    Tensor_image = Tensor_image.float()

    with torch.no_grad():
        # Put the model in evaluation mode
        model.eval()
        # Forward propagation through the model
        output = model.forward(Tensor_image)
        ps = torch.exp(output)
    print(ps)
    print(ps.max())
    top_k_prob = ps.topk(topk)
    #Make sure the module is back to training mode
    model.train()

    # Return the results as numpy arrays
    probablities = top_k_prob[0][0].cpu().numpy()
    classes = top_k_prob[1][0].cpu().numpy()
    return probablities, classes
Ejemplo n.º 3
0
def main():
    in_arg = get_input_args()  # Creates and returns command line arguments

    print('\nPath To Image:\n', in_arg.path_to_image, '\n', '\nCheckpoint:\n',
          in_arg.checkpoint, '\n')

    print('Optional Command Line Arguments:\n', 'Top K [--top_k]: ',
          in_arg.top_k, '\n', 'Category Names [--category_names]: ',
          in_arg.category_names, '\n', 'GPU [--gpu]: ', in_arg.gpu, '\n')

    label_count, hidden_units, arch, class_to_idx, classifier_state_dict, epochs = mod.load_checkpoint(
        in_arg.checkpoint, in_arg.gpu)  # Load checkpoint

    model = mod.build_model(label_count, hidden_units, arch,
                            class_to_idx)  # Build model

    model.classifier.load_state_dict(classifier_state_dict)
    criterion = nn.NLLLoss()

    image = util.process_image(in_arg.path_to_image)  # Pre-process image

    labels = util.get_labels(
        in_arg.category_names)  # Get dict of categories mapped to real names

    mod.predict(image, model, labels, in_arg.top_k,
                in_arg.gpu)  # Prints Top K Labels and Probabilities
Ejemplo n.º 4
0
def predict(image_path, saved_model, top_k, category_names):

    im = Image.open(image_path)
    test_image = np.asarray(im)
    processed_test_image = process_image(test_image)
    processed_test_image = np.expand_dims(processed_test_image, axis=0)
    load_model = tf.keras.models.load_model(
        saved_model, custom_objects={'KerasLayer': hub.KerasLayer})
    ps = load_model.predict(processed_test_image)

    # top flower prediction
    val, ind = tf.math.top_k(ps[0], k=1)
    probs = list(val.numpy())
    classes = list(ind.numpy())
    print('top prediction class is :\n', classes)
    print('top prediction label is :\n',
          [category_names[str(n + 1)] for n in classes])
    print('probability of it is :\n', probs)

    # top k flower predictions
    val, ind = tf.math.top_k(ps[0], k=top_k)
    probs = list(val.numpy())
    classes = list(ind.numpy())
    print('classes of top k are:\n', classes)
    print('labels of top k are:\n',
          [category_names[str(n + 1)] for n in classes])
    print('probabilities of top k are :\n', probs)
Ejemplo n.º 5
0
def model_prediction(image_path, model, topk, device):
    if torch.cuda.is_available() and device == 'gpu':
        model.to('cuda')

    img = process_image(image_path)
    img = img.unsqueeze_(0)
    img = img.float()

    if device == 'gpu':
        with torch.no_grad():
            output = model.forward(img.cuda())
    else:
        with torch.no_grad():
            output = model.forward(img)

    prob = torch.exp(output)
    top_p, top_class = prob.topk(topk)

    idx_to_class = {val: key for key, val in
                    model.class_to_idx.items()}
    mapped_classes = list()

    #      print('TOP CLASS', top_class.cpu().numpy())

    for label in top_class.cpu().numpy()[0]:
        mapped_classes.append(idx_to_class[label])

    return top_p, mapped_classes
Ejemplo n.º 6
0
def main():

    parser.add_argument(
        'image_dir',
        action="store",
        default=
        "/home/workspace/ImageClassifier/flowers/test/10/image_07090.jpg")
    parser.add_argument(
        'checkpoint',
        action="store",
        default="/home/workspace/ImageClassifier/checkpoint.pth")
    parser.add_argument('--top_k',
                        action="store",
                        dest="top_k",
                        type=int,
                        default=5)
    parser.add_argument(
        '--category_names',
        action="store",
        dest="classes_names",
        default="/home/workspace/ImageClassifier/cat_to_name.json")
    parser.add_argument('--gpu', action="store", dest="device", default="gpu")

    p_args = parser.parse_args()

    #gpu or cpu
    device = torch.device("cpu")
    if torch.cuda.is_available() and p_args.device == 'gpu':
        device = torch.device("cuda:0")
    #load the categories names
    cat_names = ut.load_categories(p_args.classes_names)
    #load the checkpoint
    chkpoint = ut.load_checkpoint(p_args.checkpoint)
    #configure the model
    model = chkpoint['model']
    model.load_state_dict(chkpoint['model_state'])
    #process the image
    image = ut.process_image(p_args.image_dir)
    #predict
    probs, classes = brain.predict(image, model, p_args.top_k, device)

    #configure the probabilities and class names
    probs = probs.cpu().numpy().flatten().tolist()
    classes = classes.cpu().numpy().flatten().tolist()
    index_to_class = {value: key for key, value in model.class_to_idx.items()}
    print("\n\n\n\n{:30s}{:20s}".format("Class Name", " Probability"))
    for i in range(len(classes)):
        name = [cat_names[index_to_class[classes[i]]]][0]
        probability = probs[i] * 100
        print("{:30s}{:0.3f}".format(name, probability))
Ejemplo n.º 7
0
def main(args):
    model = baseModel.Xception_Model(input_shape=(299, 299, 3),
                                     batch_size=128,
                                     num_classes=103,
                                     trainable=False)
    model.sumary()
    model.load_model(args.model)

    res_file = open(args.out, 'w')
    res_file.write('id,predicted\n')

    img_files, f_ids = util.list_imgs(args.data)
    nb_sample = len(f_ids)
    print("Number of test image: ", nb_sample)
    nb_steps = int(np.ceil(len(f_ids) / args.batch_size))
    error_ids = []
    for step in range(nb_steps):
        t_imgs, t_ids = [], []
        print("Process with batch ", step)
        for idx in range(args.batch_size):

            if ((step * args.batch_size + idx) > nb_sample - 1):
                break
            # print("image: ", f_ids[step*args.batch_size + idx])
            try:
                t_img = util.process_image(img_files[step * args.batch_size +
                                                     idx])
                # if (np.asarray(t_img).shape == (299,299,3)):
                t_imgs.append(t_img)
                t_ids.append(f_ids[step * args.batch_size + idx])
            except:
                print("Error: ", f_ids[step * args.batch_size + idx])
                error_ids.append(f_ids[step * args.batch_size + idx])
                continue
                # res_file.write(f_ids[step*args.batch_size + idx]+'\n')
        predicted_res = util.get_top_k_(model, np.squeeze(t_imgs))

        for j, idn in enumerate(t_ids):
            res_file.write(
                str(idn) + ', ' + class_mapping[predicted_res[j][-1]] + ' ' +
                class_mapping[predicted_res[j][-2]] + ' ' +
                class_mapping[predicted_res[j][-3]] + '\n')

    for err in error_ids:
        res_file.write(
            str(err) + ', ' + str(0) + ' ' + str(0) + ' ' + str(0) + '\n')
    res_file.close()
    print("Done....")
def main():

    # Function to retrieve command line arguments entered by the user
    in_arg = get_input_args()

    #Load pre-trained model from checkpoint
    model = load_checkpoint(in_arg.saved_checkpoint, in_arg.gpu)
    #print(in_arg.saved_checkpoint)
    #print(model)

    # Process Image
    processed_image = process_image(in_arg.image_path)

    # Classify Prediction
    top_probs, top_labels, top_flowers = predict(in_arg.image_path, model,
                                                 in_arg.category_names,
                                                 in_arg.top_k)
Ejemplo n.º 9
0
def predict(image_path, model_path, top_k=5, category_names=None):
    """
    Returns a set prediction of what flower the given input image is, 
    ordered descending on predicted probability.
    
    INPUTS
    image_path: path to image file
    model_path: path to saved prediction model. Assumes tensorflow model
    top_k: top K predictions to return
    category_names: Path to json mapping class numbers to flower names. If not specified, 
    the returned output only includes class numbers. 
    
    RETURNS
    classes: Classes of predicted flowers in descending order based on probabilities.
    Will be of integers if `category_names`is unspecified. Else, includes names of 
    predicted flowers. 
    probs: Proabilities of predicted classes in descending order. 
    """
    #process image from image_path
    image = process_image(image_path)

    #load the model from model_path
    model = load_model(model_path)

    #make predictions on the image with the model
    ps = model.predict(image)

    #sort indices from predictions descending based on probabilities
    psi = np.argsort(-ps)

    #get top K probabilities from classes
    classes = psi[0][:top_k]

    #use classes as indices to find the top K predictions
    probs = np.take(ps, classes)

    #adding 1 to index to get correct class values
    classes += 1

    #check if category names are specified and translate classes to class_names.
    if (category_names):
        class_names = open_json(category_names)
        class_str = [str(x) for x in classes]
        classes = [class_names.get(x) for x in class_str]

    return classes, probs
def predict(image_path, model, category_names, topk):
    #Predict the class (or classes) of an image using a trained deep learning model

    # Code to predict the class from an image file
    processed_image = process_image(image_path)
    processed_image.unsqueeze_(0)
    probs = torch.exp(model.forward(processed_image))
    top_probs, top_labs = probs.topk(topk)

    with open(category_names, 'r') as f:
        cat_to_name = json.load(f)

    idx_to_class = {}
    for key, value in model.class_to_idx.items():
        idx_to_class[value] = key

    np_top_labs = top_labs[0].numpy()

    top_labels = []
    for label in np_top_labs:
        top_labels.append(int(idx_to_class[label]))

    top_flowers = [cat_to_name[str(lab)] for lab in top_labels]
    top_probs = top_probs[0].detach().numpy()  #converts from tensor to nparray
    #print(top_flowers)
    #print(top_probs)
    #print(top_labels)
    #print(topk)
    #print('top flowers length')
    #print(len(top_flowers))
    #print(len(top_probs))

    #printing out the top K classes along with associated probabilities
    i = 0  # this prints out top k classes and probs as according to user
    while i < topk:
        print("{} with a probability of {}".format(top_flowers[i],
                                                   top_probs[i]))
        #print("{} with a probability of {}".format(top_flowers[i]))
        #print('top_probs: ', top_probs[i])
        i += 1  # cycle through

    return top_probs, top_labels, top_flowers
Ejemplo n.º 11
0
def predict(image_path, model, topk=args.top_k, device='cuda'):
    ''' Predict the class (or classes) of an image using a trained deep learning model.
    '''
    # TODO: Implement the code to predict the class from an image file
    processed = utilities.process_image(image_path)
    processed = torch.from_numpy(processed).type(torch.FloatTensor)
    processed.unsqueeze_(0)

    # send to cuda if selected
    if args.gpu:
        processed = processed.to(device)
    log_output = model.forward(processed)
    probs = torch.exp(log_output)
    top_probs, top_labs = probs.topk(topk)[0].detach().cpu().numpy().tolist(
    )[0], probs.topk(topk)[1].detach().cpu().numpy().tolist()[0]
    inv_map = {v: k for k, v in model.class_to_idx.items()}
    top_labs = [inv_map[top_labs[x]] for x in range(len(top_labs))]
    print("Image Class: {}".format(top_labs))
    print("Probability: {}".format(top_probs))
    return top_probs, top_labs
Ejemplo n.º 12
0
def predict(image_path, model, topk):
    """
    Predict the class (or classes) of an image using a trained deep learning model.

    :param image_path:
    :param model:
    :param topk:
    :return:
    """

    image = Image.open(image_path)
    image = process_image(image)
    image_tensor = torch.from_numpy(image)
    # refactor tensor shape to cope with model specification
    image_tensor = image_tensor.view(1, *image_tensor.shape)
    image_tensor = image_tensor.type(torch.FloatTensor)
    with torch.no_grad():
        model.eval()

        output = model.forward(image_tensor)
        probs = torch.exp(output)
        class_probs, class_idx = probs.topk(topk, dim=1)
        return class_probs, class_idx
Ejemplo n.º 13
0
def predict(image_path, model, topk):
    ''' Predict the class (or classes) of an image using a trained deep learning model.
    '''

    #Implement the code to predict the class from an image file
    #turn off dropout
    model.eval()
    topk = in_arg.kclass
    image = process_image(in_arg.imagefile)
    image = torch.from_numpy(np.array([image])).float()

    cuda = torch.cuda.is_available()
    if in_arg.gpu == 'gpu' and cuda:
        device = torch.device('cuda')
        model.cuda()
        FloatTensor = torch.cude.FloatTensor
    else:
        device = torch.device('cpu')
        model.cpu()
        FloatTensor = torch.FloatTensor

    #feed forward
    output = model.forward(image)
    ps = torch.exp(output)

    probs = torch.topk(ps, topk)[0].tolist()
    index_k = torch.topk(ps, topk)[1].tolist()

    idx_to_class = {value: key for key, value in model.class_to_idx.items()}
    print('top {} probabilities:'.format(topk), probs)
    #print('index-k:', index_k)
    #correlate the top 5 indices with their categories
    classes = []
    for i in index_k[0]:
        classes.append(idx_to_class[i])

    return probs, classes
Ejemplo n.º 14
0
def predict(image_path, model, topk=top_k_var):
    '''
    Predict the class (or classes) of an image using a trained deep learning model.
    '''

    # Preprocess image
    image = process_image(image_path)

    # Convert to Torch Tensor
    image = torch.from_numpy(image)

    # Move tensor onto device
    image = image.to(device)

    # Convert input image to float to avoid RuntimeError: expected Double Tensor
    image = image.float()

    # Add batch dimension PyTorch is expecting
    image.unsqueeze_(0)

    # model.to(device)
    model.eval()

    with torch.no_grad():
        output = model.forward(image)

    # Our output is log_softmax, so we take the inverse (exp) to get back softmax distribution
    all_probs = torch.exp(output)

    # topk returns a tuple of (values, indices)
    topk_tuple = torch.topk(all_probs, topk)

    probs = topk_tuple[0]
    classes = topk_tuple[1]

    return probs, classes
Ejemplo n.º 15
0
def main():
    in_arg = get_input_args()

    # initiate the variables passed through command line
    image_path = in_arg.image_path
    checkpoint_path = in_arg.checkpoint_path
    top_k = in_arg.top_k
    category_names = in_arg.category_names
    gpu = in_arg.gpu

    # Correct the variables if necessary to avoid incorrect calculations
    # Collect error messages what variables have been changed to what values
    error_messages = []

    if (top_k <= 0):
        top_k = 1
        error_messages.append("top_k was corrected to 1")
    elif (top_k > 5):
        top_k = 5
        error_messages.append("top_k was corrected to 5")

    if path.isfile(image_path) and path.isfile(
            checkpoint_path) and path.isfile(
                category_names):  # check if all files are existing

        # load the categoy names file which connects category indices with indices predicted by the model
        with open(category_names, 'r') as f:
            cat_to_name = json.load(f)

        # use the folder of the specified file as category index
        title_idx = image_path.split("/")[-2]

        # find the name by matching the category index with the indices in the category names file
        img_label = [v for k, v in cat_to_name.items() if k == title_idx]
        img_label = img_label[0]
        print(f"Image label: {img_label}")

        # use GPU power if available for prediction
        if gpu:
            device = torch.device(
                "cuda" if torch.cuda.is_available() else "cpu")
        else:
            device = "cpu"

        # load the model from the specified classifier path
        model = load_model(checkpoint_path)
        model = model.to(device)

        # freeze all model parameters to save speed and put model
        for param in model.parameters():
            param.requires_grad = False

        # switch to evaluation mode
        model.eval()

        # prepare the specified image so that it can be used by the model
        img = process_image(image_path)
        img = img[None, :, :, :]
        img = img.float()
        img = img.to(device)

        # deactive all gradients to further speed up the performance and calculate the log prob outputs
        with torch.no_grad():
            logps = model.forward(img)

        # convert to probability values
        ps = torch.exp(logps)

        # save the top probabilities and their category indices
        top_p, top_class = ps.topk(top_k, dim=1)
        top_p = np.array(top_p).reshape(top_k)
        top_class = np.array(top_class)

        # find the the category indices for the predicted top indices
        top_classes = [
            k for k, v in model.class_to_idx.items() if v in top_class
        ]
        # match the top category indices with their category names
        names = [cat_to_name[v] for v in top_classes if v in cat_to_name]

        for i, name in enumerate(names):
            print(f"{name}: ..... {format(top_p[i],'.2f')}")

    else:
        print("Incorrect paths to files - please check!")

    # print out error messages if any
    if (len(error_messages)):
        for v in error_messages:
            print(v)
Ejemplo n.º 16
0
from network import Network

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

parser_sec = argparse.ArgumentParser()
parser_sec.add_argument('b_img_path', action='store')  # image path
parser_sec.add_argument('checkpoint', action='store')  # checkpoint file
parser_sec.add_argument('--top_k', nargs='?', type=int)
parser_sec.add_argument('--category_names', nargs='?')
parser_sec.add_argument('--gpu', action='store_true')

args = parser_sec.parse_args()
args = vars(args)

arg_sec_inputs = []
for key, value in args.items():
    arg_sec_inputs.append(value)

if arg_sec_inputs[3] is None:
    arg_sec_inputs[3] = 'cat_to_name.json'

with open(arg_sec_inputs[3], 'r') as f:
    cat_to_name = json.load(f)

Network.model = utilities.load_checkpoint('saves/mycheckpoint.pth')

image = utilities.process_image(arg_sec_inputs[0])

utilities.predict_image(image, Network.model, True, cat_to_name,
                        arg_sec_inputs[2])