def main(): start_time = time() in_args = get_input_args() # Check for GPU use_gpu = torch.cuda.is_available() and in_args.gpu if in_args.verbose: print("Predicting on {} using {}".format("GPU" if use_gpu else "CPU", in_args.checkpoint)) # Loads a pretrained model model = model_helper.load_checkpoint(in_args.checkpoint, in_args.verbose) # Move tensors to GPU if available if use_gpu: model.cuda() # Load category mapping dictionary use_mapping_file = False if in_args.category_names: with open(in_args.category_names, 'r') as f: cat_to_name = json.load(f) use_mapping_file = True # Get prediction number_of_results = in_args.top_k if in_args.top_k else 1 probs, classes = model_helper.predict(in_args.input, model, use_gpu, number_of_results) # Print results if number_of_results > 1: print("\nTop {} Classes predicted for '{}':".format( len(classes), in_args.input)) if use_mapping_file: print("\n{:<30} {}".format("Flower", "Probability")) print("{:<30} {}".format("------", "-----------")) else: print("\n{:<10} {}".format("Class", "Probability")) print("{:<10} {}".format("------", "-----------")) for i in range(0, len(classes)): if use_mapping_file: print("{:<30} {:.2f}".format( get_title(classes[i], cat_to_name), probs[i])) else: print("{:<10} {:.2f}".format(classes[i], probs[i])) else: print("\nMost likely image class is '{}' with probability of {:.2f}". format( get_title(classes[0], cat_to_name) if use_mapping_file else classes[0], probs[0])) # Computes overall runtime in seconds & prints it in hh:mm:ss format end_time = time() utility.print_elapsed_time(end_time - start_time)
def main(): start_time = time() in_args = get_input_args() use_gpu = torch.cuda.is_available() and in_args.gpu print("Training on {} using {}".format("GPU" if use_gpu else "CPU", in_args.arch)) print( "Architecture:{}, Learning rate:{}, Hidden Units:{}, Epochs:{}".format( in_args.arch, in_args.learning_rate, in_args.hidden_units, in_args.epochs)) dataloaders, class_to_idx = model_helper.get_dataloders(in_args.data_dir) model, optimizer, criterion = model_helper.create_model( in_args.arch, in_args.learning_rate, in_args.hidden_units, class_to_idx) if use_gpu: model.cuda() criterion.cuda() else: torch.set_num_threads(in_args.num_threads) model_helper.train(model, criterion, optimizer, in_args.epochs, dataloaders['training'], dataloaders['validation'], use_gpu) if in_args.save_dir: if not os.path.exists(in_args.save_dir): os.makedirs(in_args.save_dir) file_path = in_args.save_dir + '/' + in_args.arch + '_checkpoint.pth' else: file_path = in_args.arch + '_checkpoint.pth' model_helper.save_checkpoint(file_path, model, optimizer, in_args.arch, in_args.learning_rate, in_args.hidden_units, in_args.epochs) test_loss, accuracy = model_helper.validate(model, criterion, dataloaders['testing'], use_gpu) print("Test Accuracy: {:.3f}".format(accuracy)) end_time = time() utility.print_elapsed_time(end_time - start_time)
def main(): start_time = time() in_args = get_input_args() use_gpu = torch.cuda.is_available() and in_args.gpu print("Predicting on {} using {}".format("GPU" if use_gpu else "CPU", in_args.checkpoint)) model = model_helper.load_checkpoint(in_args.checkpoint) if use_gpu: model.cuda() use_mapping_file = False if in_args.category_names: with open(in_args.category_names, 'r') as f: cat_to_name = json.load(f) use_mapping_file = True probs, classes = model_helper.predict(in_args.input, model, use_gpu, in_args.top_k) print("\nTop {} Classes predicted for '{}':".format( len(classes), in_args.input)) if use_mapping_file: print("\n{:<30} {}".format("Flower", "Probability")) print("{:<30} {}".format("------", "-----------")) else: print("\n{:<10} {}".format("Class", "Probability")) print("{:<10} {}".format("------", "-----------")) for i in range(0, len(classes)): if use_mapping_file: print("{:<30} {:.2f}".format(get_title(classes[i], cat_to_name), probs[i])) else: print("{:<10} {:.2f}".format(classes[i], probs[i])) end_time = time() utility.print_elapsed_time(end_time - start_time)
def main(): start_time = time() in_args = get_input_args() # Check for GPU use_gpu = torch.cuda.is_available() and in_args.gpu # Print parameter information if use_gpu: print("Training on GPU{}".format( " with pinned memory" if in_args.pin_memory else ".")) else: print("Training on CPU using {} threads.".format(in_args.num_threads)) print("Architecture:{}, Learning rate:{}, Hidden Units:{}, Epochs:{}".format( in_args.arch, in_args.learning_rate, in_args.hidden_units, in_args.epochs)) # Get dataloaders for training dataloaders, class_to_idx = model_helper.get_dataloders(in_args.data_dir, use_gpu, in_args.num_workers, in_args.pin_memory) # Create model model, optimizer, criterion = model_helper.create_model(in_args.arch, in_args.learning_rate, in_args.hidden_units, class_to_idx) # Move tensors to GPU if available if use_gpu: model.cuda() criterion.cuda() else: torch.set_num_threads(in_args.num_threads) # Train the network model_helper.train(model, criterion, optimizer, in_args.epochs, dataloaders['training'], dataloaders['validation'], use_gpu) # Save trained model if in_args.save_dir: # Create save directory if required if not os.path.exists(in_args.save_dir): os.makedirs(in_args.save_dir) # Save checkpoint in save directory file_path = in_args.save_dir + '/' + in_args.arch + '_checkpoint.pth' else: # Save checkpoint in current directory file_path = in_args.arch + '_checkpoint.pth' model_helper.save_checkpoint(file_path, model, optimizer, in_args.arch, in_args.learning_rate, in_args.hidden_units, in_args.epochs) # Get prediction accuracy using test dataset test_loss, accuracy = model_helper.validate( model, criterion, dataloaders['testing'], use_gpu) print("Testing Accuracy: {:.3f}".format(accuracy)) # Computes overall runtime in seconds & prints it in hh:mm:ss format end_time = time() utility.print_elapsed_time(end_time - start_time)
def main(): start_time = time() in_args = get_input_args() use_gpu = torch.cuda.is_available() and in_args.gpu print("Training on {} using {}".format("GPU" if use_gpu else "CPU", in_args.arch)) print("Learning rate:{}, Hidden Units:{}, Epochs:{}".format( in_args.learning_rate, in_args.hidden_units, in_args.epochs)) if not os.path.exists(in_args.save_dir): os.makedirs(in_args.save_dir) training_dir = in_args.data_dir + '/train' validation_dir = in_args.data_dir + '/valid' testing_dir = in_args.data_dir + '/test' data_transforms = { 'training': transforms.Compose([ transforms.RandomRotation(30), transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]), 'validation': transforms.Compose([ transforms.Scale(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]), 'testing': transforms.Compose([ transforms.Scale(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]) } dirs = { 'training': training_dir, 'validation': validation_dir, 'testing': testing_dir } image_datasets = { x: datasets.ImageFolder(dirs[x], transform=data_transforms[x]) for x in ['training', 'validation', 'testing'] } dataloaders = { x: torch.utils.data.DataLoader(image_datasets[x], batch_size=64, shuffle=True) for x in ['training', 'validation', 'testing'] } model, optimizer, criterion = model_helper.create_model( in_args.arch, in_args.hidden_units, in_args.learning_rate, image_datasets['training'].class_to_idx) if use_gpu: model.cuda() criterion.cuda() model_helper.train(model, criterion, optimizer, in_args.epochs, dataloaders['training'], dataloaders['validation'], use_gpu) file_path = in_args.save_dir + '/' + in_args.arch + \ '_epoch' + str(in_args.epochs) + '.pth' model_helper.save_checkpoint(file_path, model, optimizer, in_args.arch, in_args.hidden_units, in_args.epochs) test_loss, accuracy = model_helper.validate(model, criterion, dataloaders['testing'], use_gpu) print("Post load Validation Accuracy: {:.3f}".format(accuracy)) image_path = 'flowers/test/28/image_05230.jpg' print("Predication for: {}".format(image_path)) probs, classes = model_helper.predict(image_path, model, use_gpu) print(probs) print(classes) end_time = time() utility.print_elapsed_time(end_time - start_time)