Exemple #1
0
def main():
    start_time = time()
    in_args = get_input_args()
    use_gpu = torch.cuda.is_available() and in_args.gpu

    print("Training on {} using {}".format("GPU" if use_gpu else "CPU",
                                           in_args.arch))

    print(
        "Architecture:{}, Learning rate:{}, Hidden Units:{}, Epochs:{}".format(
            in_args.arch, in_args.learning_rate, in_args.hidden_units,
            in_args.epochs))

    dataloaders, class_to_idx = model_helper.get_dataloders(in_args.data_dir)

    model, optimizer, criterion = model_helper.create_model(
        in_args.arch, in_args.learning_rate, in_args.hidden_units,
        class_to_idx)

    if use_gpu:
        model.cuda()
        criterion.cuda()
    else:
        torch.set_num_threads(in_args.num_threads)

    model_helper.train(model, criterion, optimizer, in_args.epochs,
                       dataloaders['training'], dataloaders['validation'],
                       use_gpu)

    if in_args.save_dir:
        if not os.path.exists(in_args.save_dir):
            os.makedirs(in_args.save_dir)

        file_path = in_args.save_dir + '/' + in_args.arch + '_checkpoint.pth'
    else:
        file_path = in_args.arch + '_checkpoint.pth'

    model_helper.save_checkpoint(file_path, model, optimizer, in_args.arch,
                                 in_args.learning_rate, in_args.hidden_units,
                                 in_args.epochs)

    test_loss, accuracy = model_helper.validate(model, criterion,
                                                dataloaders['testing'],
                                                use_gpu)
    print("Test Accuracy: {:.3f}".format(accuracy))

    end_time = time()
    utility.print_elapsed_time(end_time - start_time)
def main():
    Input_aruguments = argument_parser()
    print("Chosen Learning rate is {}, Hidden Units is {} and Epochs are {}".
          format(Input_aruguments.learning_rate, Input_aruguments.hidden_units,
                 Input_aruguments.epochs))

    batch_size = 64

    gpu_check = torch.cuda.is_available() and Input_aruguments.gpu
    if gpu_check:
        print("GPU Device available.")
    else:
        warnings.warn(
            "No GPU found. Please use a GPU to train your neural network.")

    print("Data loading started.")
    train_dir = Input_aruguments.data_dir + '/train'
    valid_dir = Input_aruguments.data_dir + '/valid'
    test_dir = Input_aruguments.data_dir + '/test'

    data_transforms = {
        'training_sets':
        transforms.Compose([
            transforms.RandomRotation(30),
            transforms.RandomResizedCrop(224),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
        ]),
        'validation_sets':
        transforms.Compose([
            transforms.Resize(256),
            transforms.CenterCrop(224),
            transforms.ToTensor(),
            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
        ]),
        'testing_sets':
        transforms.Compose([
            transforms.Resize(256),
            transforms.CenterCrop(224),
            transforms.ToTensor(),
            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
        ])
    }

    # Load the datasets with ImageFolder
    image_datasets = {
        'training_sets':
        datasets.ImageFolder(train_dir,
                             transform=data_transforms['training_sets']),
        'validation_sets':
        datasets.ImageFolder(valid_dir,
                             transform=data_transforms['validation_sets']),
        'testing_sets':
        datasets.ImageFolder(test_dir,
                             transform=data_transforms['testing_sets'])
    }

    # Using the image datasets and the transforms, define the dataloaders
    dataloaders = {
        'training_sets':
        torch.utils.data.DataLoader(image_datasets['training_sets'],
                                    batch_size,
                                    shuffle=True),
        'validation_sets':
        torch.utils.data.DataLoader(image_datasets['validation_sets'],
                                    batch_size,
                                    shuffle=True),
        'testing_sets':
        torch.utils.data.DataLoader(image_datasets['testing_sets'],
                                    batch_size,
                                    shuffle=True)
    }
    print("Data loading completed. Model creation in-progress, please wait.")
    model, optimizer, criterion = model_helper.create_model(
        Input_aruguments.arch, Input_aruguments.hidden_units,
        Input_aruguments.learning_rate,
        image_datasets['training_sets'].class_to_idx)

    print("Model creation completed. Moving to GPU if available, please wait.")
    if gpu_check:
        model.cuda()
        criterion.cuda()

    print("Training started, please wait it might take upto 5 mins.")
    model_helper.train(model, criterion, optimizer, Input_aruguments.epochs,
                       dataloaders['training_sets'],
                       dataloaders['validation_sets'], gpu_check)
    print("Training completed. Saving checkpoints, please wait.")
    model_helper.save_checkpoint(model, optimizer, batch_size,
                                 Input_aruguments.learning_rate,
                                 Input_aruguments.arch,
                                 Input_aruguments.hidden_units,
                                 Input_aruguments.epochs)
    print("Saving checkpoints complete. Validating model, please wait.")
    test_loss, accuracy = model_helper.validate(model, criterion,
                                                dataloaders['testing_sets'],
                                                gpu_check)
    print("Validation Accuracy: {:.3f}".format(accuracy))
    image_path = 'flower_data/test/66/image_05582.jpg'
    print("Predication for: {}".format(image_path))
    probs, classes = model_helper.predict(image_path, model, gpu_check)
    print(probs)
    print(classes)
Exemple #3
0
def main():
    start_time = time()

    in_args = get_input_args()

    # Check for GPU
    use_gpu = torch.cuda.is_available() and in_args.gpu

    # Print parameter information
    if use_gpu:
        print("Training on GPU{}".format(
            " with pinned memory" if in_args.pin_memory else "."))
    else:
        print("Training on CPU using {} threads.".format(in_args.num_threads))

    print("Architecture:{}, Learning rate:{}, Hidden Units:{}, Epochs:{}".format(
        in_args.arch, in_args.learning_rate, in_args.hidden_units, in_args.epochs))

    # Get dataloaders for training
    dataloaders, class_to_idx = model_helper.get_dataloders(in_args.data_dir,
                                                            use_gpu,
                                                            in_args.num_workers,
                                                            in_args.pin_memory)

    # Create model
    model, optimizer, criterion = model_helper.create_model(in_args.arch,
                                                            in_args.learning_rate,
                                                            in_args.hidden_units,
                                                            class_to_idx)

    # Move tensors to GPU if available
    if use_gpu:
        model.cuda()
        criterion.cuda()
    else:
        torch.set_num_threads(in_args.num_threads)

    # Train the network
    model_helper.train(model,
                       criterion,
                       optimizer,
                       in_args.epochs,
                       dataloaders['training'],
                       dataloaders['validation'],
                       use_gpu)

    # Save trained model
    if in_args.save_dir:

        # Create save directory if required
        if not os.path.exists(in_args.save_dir):
            os.makedirs(in_args.save_dir)

         # Save checkpoint in save directory
        file_path = in_args.save_dir + '/' + in_args.arch + '_checkpoint.pth'
    else:
        # Save checkpoint in current directory
        file_path = in_args.arch + '_checkpoint.pth'

    model_helper.save_checkpoint(file_path,
                                 model,
                                 optimizer,
                                 in_args.arch,
                                 in_args.learning_rate,
                                 in_args.hidden_units,
                                 in_args.epochs)

    # Get prediction accuracy using test dataset
    test_loss, accuracy = model_helper.validate(
        model, criterion, dataloaders['testing'], use_gpu)
    print("Testing Accuracy: {:.3f}".format(accuracy))

    # Computes overall runtime in seconds & prints it in hh:mm:ss format
    end_time = time()
    utility.print_elapsed_time(end_time - start_time)
Exemple #4
0
def main():
    start_time = time()

    in_args = get_input_args()

    use_gpu = torch.cuda.is_available() and in_args.gpu

    print("Training on {} using {}".format("GPU" if use_gpu else "CPU",
                                           in_args.arch))

    print("Learning rate:{}, Hidden Units:{}, Epochs:{}".format(
        in_args.learning_rate, in_args.hidden_units, in_args.epochs))

    if not os.path.exists(in_args.save_dir):
        os.makedirs(in_args.save_dir)

    training_dir = in_args.data_dir + '/train'
    validation_dir = in_args.data_dir + '/valid'
    testing_dir = in_args.data_dir + '/test'

    data_transforms = {
        'training':
        transforms.Compose([
            transforms.RandomRotation(30),
            transforms.RandomResizedCrop(224),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
        ]),
        'validation':
        transforms.Compose([
            transforms.Scale(256),
            transforms.CenterCrop(224),
            transforms.ToTensor(),
            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
        ]),
        'testing':
        transforms.Compose([
            transforms.Scale(256),
            transforms.CenterCrop(224),
            transforms.ToTensor(),
            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
        ])
    }

    dirs = {
        'training': training_dir,
        'validation': validation_dir,
        'testing': testing_dir
    }

    image_datasets = {
        x: datasets.ImageFolder(dirs[x], transform=data_transforms[x])
        for x in ['training', 'validation', 'testing']
    }

    dataloaders = {
        x: torch.utils.data.DataLoader(image_datasets[x],
                                       batch_size=64,
                                       shuffle=True)
        for x in ['training', 'validation', 'testing']
    }

    model, optimizer, criterion = model_helper.create_model(
        in_args.arch, in_args.hidden_units, in_args.learning_rate,
        image_datasets['training'].class_to_idx)

    if use_gpu:
        model.cuda()
        criterion.cuda()

    model_helper.train(model, criterion, optimizer, in_args.epochs,
                       dataloaders['training'], dataloaders['validation'],
                       use_gpu)

    file_path = in_args.save_dir + '/' + in_args.arch + \
        '_epoch' + str(in_args.epochs) + '.pth'

    model_helper.save_checkpoint(file_path, model, optimizer, in_args.arch,
                                 in_args.hidden_units, in_args.epochs)

    test_loss, accuracy = model_helper.validate(model, criterion,
                                                dataloaders['testing'],
                                                use_gpu)
    print("Post load Validation Accuracy: {:.3f}".format(accuracy))
    image_path = 'flowers/test/28/image_05230.jpg'
    print("Predication for: {}".format(image_path))
    probs, classes = model_helper.predict(image_path, model, use_gpu)
    print(probs)
    print(classes)

    end_time = time()
    utility.print_elapsed_time(end_time - start_time)