def main():
    start_time = time()

    in_args = get_input_args()

    # Check for GPU
    use_gpu = torch.cuda.is_available() and in_args.gpu

    if in_args.verbose:
        print("Predicting on {} using {}".format("GPU" if use_gpu else "CPU",
                                                 in_args.checkpoint))

    # Loads a pretrained model
    model = model_helper.load_checkpoint(in_args.checkpoint, in_args.verbose)

    # Move tensors to GPU if available
    if use_gpu:
        model.cuda()

    # Load category mapping dictionary
    use_mapping_file = False

    if in_args.category_names:
        with open(in_args.category_names, 'r') as f:
            cat_to_name = json.load(f)
            use_mapping_file = True

    # Get prediction
    number_of_results = in_args.top_k if in_args.top_k else 1

    probs, classes = model_helper.predict(in_args.input, model, use_gpu,
                                          number_of_results)

    # Print results
    if number_of_results > 1:
        print("\nTop {} Classes predicted for '{}':".format(
            len(classes), in_args.input))

        if use_mapping_file:
            print("\n{:<30} {}".format("Flower", "Probability"))
            print("{:<30} {}".format("------", "-----------"))
        else:
            print("\n{:<10} {}".format("Class", "Probability"))
            print("{:<10} {}".format("------", "-----------"))

        for i in range(0, len(classes)):
            if use_mapping_file:
                print("{:<30} {:.2f}".format(
                    get_title(classes[i], cat_to_name), probs[i]))
            else:
                print("{:<10} {:.2f}".format(classes[i], probs[i]))
    else:
        print("\nMost likely image class is '{}' with probability of {:.2f}".
              format(
                  get_title(classes[0], cat_to_name)
                  if use_mapping_file else classes[0], probs[0]))

    # Computes overall runtime in seconds & prints it in hh:mm:ss format
    end_time = time()
    utility.print_elapsed_time(end_time - start_time)
def main():
    start_time = time()

    in_args = get_input_args()
    use_gpu = torch.cuda.is_available() and in_args.gpu

    print("Predicting on {} using {}".format("GPU" if use_gpu else "CPU",
                                             in_args.checkpoint))

    model = model_helper.load_checkpoint(in_args.checkpoint)

    if use_gpu:
        model.cuda()

    use_mapping_file = False

    if in_args.category_names:
        with open(in_args.category_names, 'r') as f:
            cat_to_name = json.load(f)
            use_mapping_file = True

    probs, classes = model_helper.predict(in_args.input, model, use_gpu,
                                          in_args.top_k)

    print("\nTop {} Classes predicted for '{}':".format(
        len(classes), in_args.input))

    if use_mapping_file:
        print("\n{:<30} {}".format("Flower", "Probability"))
        print("{:<30} {}".format("------", "-----------"))
    else:
        print("\n{:<10} {}".format("Class", "Probability"))
        print("{:<10} {}".format("------", "-----------"))

    for i in range(0, len(classes)):
        if use_mapping_file:
            print("{:<30} {:.2f}".format(get_title(classes[i], cat_to_name),
                                         probs[i]))
        else:
            print("{:<10} {:.2f}".format(classes[i], probs[i]))

    end_time = time()
    utility.print_elapsed_time(end_time - start_time)
Esempio n. 3
0
def predict_test(tester, arch, enable_gpu):
    checkpoint_dir = testing_dir + '/gpu' if enable_gpu else '/cpu'
    checkpoint = checkpoint_dir + '/' + arch + '_checkpoint.pth'

    model = model_helper.load_checkpoint(checkpoint)

    if enable_gpu:
        model.cuda()

    probs, classes = model_helper.predict(test_image, model, enable_gpu, top_k)

    tester.assertEqual(len(classes), top_k, 'Incorrect number of results')
    tester.assertEqual(classes[0], correct_prediction_class,
                       'Incorrect prediction')

    with open(category_names, 'r') as f:
        cat_to_name = json.load(f)

    tester.assertEqual(cat_to_name[classes[0]], correct_prediction_category,
                       'Incorrect prediction')
Esempio n. 4
0
def main():
    Input_aruguments = argument_parser()
    print("Loading checkpoints in-progress.")
    model = model_helper.load_checkpoint(Input_aruguments.checkpoint)
    print("Loading checkpoints completed. Checking for GPU, please wait.")
    gpu_check = torch.cuda.is_available() and Input_aruguments.gpu
    if gpu_check:
        model.cuda()
        print("GPU Device available.")
    else:
        warnings.warn(
            'No GPU found. Please use a GPU to train your neural network.')
    use_mapping_file = False
    if Input_aruguments.category_names:
        with open(Input_aruguments.category_names, 'r') as f:
            cat_to_name = json.load(f)
            use_mapping_file = True
    print("Prediction in-progress. Please wait.")
    probs, classes = model_helper.predict(Input_aruguments.input, model,
                                          gpu_check, Input_aruguments.top_k)

    print("\nTop {} Classes predicted for '{}':".format(
        len(classes), Input_aruguments.input))
    if use_mapping_file:
        print("\n{:<30} {}".format("Flower", "Probability"))
        print("{:<30} {}".format("------", "-----------"))
    else:
        print("\n{:<10} {}".format("Class", "Probability"))
        print("{:<10} {}".format("------", "-----------"))

    for i in range(0, len(classes)):
        if use_mapping_file:
            print("{:<30} {:.2f}".format(get_title(classes[i], cat_to_name),
                                         probs[i]))
        else:
            print("{:<10} {:.2f}".format(classes[i], probs[i]))
def main():
    Input_aruguments = argument_parser()
    print("Chosen Learning rate is {}, Hidden Units is {} and Epochs are {}".
          format(Input_aruguments.learning_rate, Input_aruguments.hidden_units,
                 Input_aruguments.epochs))

    batch_size = 64

    gpu_check = torch.cuda.is_available() and Input_aruguments.gpu
    if gpu_check:
        print("GPU Device available.")
    else:
        warnings.warn(
            "No GPU found. Please use a GPU to train your neural network.")

    print("Data loading started.")
    train_dir = Input_aruguments.data_dir + '/train'
    valid_dir = Input_aruguments.data_dir + '/valid'
    test_dir = Input_aruguments.data_dir + '/test'

    data_transforms = {
        'training_sets':
        transforms.Compose([
            transforms.RandomRotation(30),
            transforms.RandomResizedCrop(224),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
        ]),
        'validation_sets':
        transforms.Compose([
            transforms.Resize(256),
            transforms.CenterCrop(224),
            transforms.ToTensor(),
            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
        ]),
        'testing_sets':
        transforms.Compose([
            transforms.Resize(256),
            transforms.CenterCrop(224),
            transforms.ToTensor(),
            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
        ])
    }

    # Load the datasets with ImageFolder
    image_datasets = {
        'training_sets':
        datasets.ImageFolder(train_dir,
                             transform=data_transforms['training_sets']),
        'validation_sets':
        datasets.ImageFolder(valid_dir,
                             transform=data_transforms['validation_sets']),
        'testing_sets':
        datasets.ImageFolder(test_dir,
                             transform=data_transforms['testing_sets'])
    }

    # Using the image datasets and the transforms, define the dataloaders
    dataloaders = {
        'training_sets':
        torch.utils.data.DataLoader(image_datasets['training_sets'],
                                    batch_size,
                                    shuffle=True),
        'validation_sets':
        torch.utils.data.DataLoader(image_datasets['validation_sets'],
                                    batch_size,
                                    shuffle=True),
        'testing_sets':
        torch.utils.data.DataLoader(image_datasets['testing_sets'],
                                    batch_size,
                                    shuffle=True)
    }
    print("Data loading completed. Model creation in-progress, please wait.")
    model, optimizer, criterion = model_helper.create_model(
        Input_aruguments.arch, Input_aruguments.hidden_units,
        Input_aruguments.learning_rate,
        image_datasets['training_sets'].class_to_idx)

    print("Model creation completed. Moving to GPU if available, please wait.")
    if gpu_check:
        model.cuda()
        criterion.cuda()

    print("Training started, please wait it might take upto 5 mins.")
    model_helper.train(model, criterion, optimizer, Input_aruguments.epochs,
                       dataloaders['training_sets'],
                       dataloaders['validation_sets'], gpu_check)
    print("Training completed. Saving checkpoints, please wait.")
    model_helper.save_checkpoint(model, optimizer, batch_size,
                                 Input_aruguments.learning_rate,
                                 Input_aruguments.arch,
                                 Input_aruguments.hidden_units,
                                 Input_aruguments.epochs)
    print("Saving checkpoints complete. Validating model, please wait.")
    test_loss, accuracy = model_helper.validate(model, criterion,
                                                dataloaders['testing_sets'],
                                                gpu_check)
    print("Validation Accuracy: {:.3f}".format(accuracy))
    image_path = 'flower_data/test/66/image_05582.jpg'
    print("Predication for: {}".format(image_path))
    probs, classes = model_helper.predict(image_path, model, gpu_check)
    print(probs)
    print(classes)
Esempio n. 6
0
    sysargs = parser.parse_args()
    print(sysargs)

    # Load model checkpoint
    checkpoint = torch.load(sysargs.checkpoint)

    # Use checkpoint to load model
    num_epochs, model, optimizer = model_helper.load_checkpoint(checkpoint)

    # Enable CUDA if available
    device = torch.device("cuda:0" if (
        sysargs.gpu and torch.cuda.is_available()) else "cpu")

    # Make predictions
    probs, classes = model_helper.predict(sysargs.image_path, model, device,
                                          sysargs.top_k)

    # Load category names if provided, print top K predictions
    print("Top {} predictions:".format(sysargs.top_k))
    print("----------")

    if sysargs.category_names:
        with open(sysargs.category_names, 'r') as f:
            cat_to_name = json.load(f)
        flower_names = data_helper.get_flower_names(classes, cat_to_name)
        to_print = [pred for pred in zip(probs, classes, flower_names)]
        for k in range(sysargs.top_k):
            print(
                "{0}. Class: {1:4} Name: {2:20} Probability: {3:.2f}%".format(
                    k + 1, to_print[k][1], to_print[k][2],
                    to_print[k][0] * 100))
Esempio n. 7
0
                    dest='top_k',
                    type=float,
                    default=5,
                    help='top K most likely classes')

args = parser.parse_args()
print("\n")
print("---------Entered Arguments----------")
print("Image path         = {!r}".format(args.img_path))
print("Checkpoint         = {!r}".format(args.checkpoint))
print("Enable gpu         = {!r}".format(args.gpu))
print("Top K              = {!r}".format(args.top_k))
print("------------------------------------\n")

print("Loading saved model ...\n")

device = torch.device("cuda:0" if args.gpu else "cpu")
model, class_to_idx, cat_to_name = model_helper.load_saved_model(
    args.checkpoint, device)
idx_to_class = {v: k for k, v in class_to_idx.items()}

print("Predicting ...\n")

ps, top_categories = model_helper.predict(args.img_path, model, args.top_k,
                                          cat_to_name, idx_to_class)
print('Results of predicting image category for ("{}"):'.format(
    args.img_path.split('/')[-1]))
print('Top {} Categories: '.format(args.top_k), '\n', '\t', top_categories)
print('Top {} Probabilities: '.format(args.top_k), '\n', '\t', ps)
print("\nPrediction completed successfully.\n")
Esempio n. 8
0
                    action="store_true",
                    default=False,
                    help='Use GPU for inference.')

arguments = parser.parse_args()
print('Running predict.py with the following arguments {}'.format(arguments))

# Load checkpoint and rebuild model
model = model_helper.load_checkpoint(arguments.checkpoint)

# Preprocess image into image tensor
image_tensor = model_helper.process_image(arguments.input)

# Return top K most likely classes and their probabilities
probabilities, topk_classes = model_helper.predict(image_tensor,
                                                   model,
                                                   arguments.gpu,
                                                   arguments.top_k,
                                                   arguments.category_names,
                                                   debug=True)

position = 1
for p, c in zip(probabilities, topk_classes):
    if arguments.category_names is not None:
        print('#{} Prediction: {} with {:.4f}% accuracy.'.format(
            position, c, p * 100))
    else:
        print('#{} Prediction: Class {} with {:.4f}% accuracy.'.format(
            position, c, p * 100))
    position += 1
Esempio n. 9
0
# Get validation error                 #
#                                      #
########################################

# Make validation
valid_user_id, valid_movie_id, valid_data_id, valid_rating = \
        reader.get_valid_umdr(user_id, movie_id, date_id, rating, data_idx)
print "Number of samples for valid case: ", valid_user_id.shape

# Load data in
U, V = mh.load_um_data(suffix="_K%d_reg%d.dat" % (K, reg),
                       U_shape=(M, K),
                       V_shape=(N, K))

# Run on validation set
pred_rating = mh.predict(valid_user_id, valid_movie_id, U, V)
pred_rating = mh.trim_pred(pred_rating)
rms_error = mh.rms_err(pred_rating, valid_rating)
print "validation error = %.15g" % rms_error

########################################
#                                      #
# Get probe error                      #
# NOTE: validation error seems to be   #
#       very low, whereas probe seems  #
#       to be around the quiz error..  #
#       Maybe use probe for choosing   #
#       models?                        #
########################################

# Make probe
Esempio n. 10
0
def main():
    start_time = time()

    in_args = get_input_args()

    use_gpu = torch.cuda.is_available() and in_args.gpu

    print("Training on {} using {}".format("GPU" if use_gpu else "CPU",
                                           in_args.arch))

    print("Learning rate:{}, Hidden Units:{}, Epochs:{}".format(
        in_args.learning_rate, in_args.hidden_units, in_args.epochs))

    if not os.path.exists(in_args.save_dir):
        os.makedirs(in_args.save_dir)

    training_dir = in_args.data_dir + '/train'
    validation_dir = in_args.data_dir + '/valid'
    testing_dir = in_args.data_dir + '/test'

    data_transforms = {
        'training':
        transforms.Compose([
            transforms.RandomRotation(30),
            transforms.RandomResizedCrop(224),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
        ]),
        'validation':
        transforms.Compose([
            transforms.Scale(256),
            transforms.CenterCrop(224),
            transforms.ToTensor(),
            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
        ]),
        'testing':
        transforms.Compose([
            transforms.Scale(256),
            transforms.CenterCrop(224),
            transforms.ToTensor(),
            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
        ])
    }

    dirs = {
        'training': training_dir,
        'validation': validation_dir,
        'testing': testing_dir
    }

    image_datasets = {
        x: datasets.ImageFolder(dirs[x], transform=data_transforms[x])
        for x in ['training', 'validation', 'testing']
    }

    dataloaders = {
        x: torch.utils.data.DataLoader(image_datasets[x],
                                       batch_size=64,
                                       shuffle=True)
        for x in ['training', 'validation', 'testing']
    }

    model, optimizer, criterion = model_helper.create_model(
        in_args.arch, in_args.hidden_units, in_args.learning_rate,
        image_datasets['training'].class_to_idx)

    if use_gpu:
        model.cuda()
        criterion.cuda()

    model_helper.train(model, criterion, optimizer, in_args.epochs,
                       dataloaders['training'], dataloaders['validation'],
                       use_gpu)

    file_path = in_args.save_dir + '/' + in_args.arch + \
        '_epoch' + str(in_args.epochs) + '.pth'

    model_helper.save_checkpoint(file_path, model, optimizer, in_args.arch,
                                 in_args.hidden_units, in_args.epochs)

    test_loss, accuracy = model_helper.validate(model, criterion,
                                                dataloaders['testing'],
                                                use_gpu)
    print("Post load Validation Accuracy: {:.3f}".format(accuracy))
    image_path = 'flowers/test/28/image_05230.jpg'
    print("Predication for: {}".format(image_path))
    probs, classes = model_helper.predict(image_path, model, use_gpu)
    print(probs)
    print(classes)

    end_time = time()
    utility.print_elapsed_time(end_time - start_time)