def main(): # user inputs from command line in_arg = get_input_args() # load and process data into training, validation and test data sets trainloader, validationloader, testloader, train_data = load_and_transform( in_arg.data_dir) # load pre-trained nn and build classifier with user inputs (loss criterion & optimizer are fixed) to create the model model, criterion, optimizer = build_classifier(in_arg.arch, in_arg.in_features, in_arg.hidden_layers, in_arg.output_size, in_arg.learning_rate) # Train the model trained_model = train(in_arg.epochs, trainloader, validationloader, optimizer, model, criterion, in_arg.gpu) # saving the model save_model(trained_model, optimizer, in_arg.saving_dir, in_arg.arch, in_arg.learning_rate, in_arg.epochs, train_data)
gpu_mode = results.gpu # Load and preprocess data trainloader, testloader, validloader, train_data, test_data, valid_data = load_data( data_dir) # Load pretrained model pre_tr_model = results.pretrained_model model = getattr(models, pre_tr_model)(pretrained=True) # Build and attach new classifier if arch == "vgg11" or arch == "vgg13" or arch == "vgg16" or arch == "vgg19": input_units = model.classifier[0].in_features elif arch == "densenet121" or arch == "densenet161": input_units = model.classifier.in_features build_classifier(model, input_units, hidden_units, dropout) # Recommended to use NLLLoss when using Softmax criterion = nn.NLLLoss() # Using Adam optimizer which makes use of momentum to avoid local minima optimizer = optim.Adam(model.classifier.parameters(), learning_rate) # Train model model, optimizer = train_model(model, epochs, trainloader, validloader, criterion, optimizer, gpu_mode) # Test model test_model(model, testloader, gpu_mode) # Save model save_model(model, train_data, optimizer, save_dir, epochs)
epochs = results.num_epochs gpu_mode = results.gpu arch = results.pretrained_model # Load and preprocess data train_loader, test_loader, validate_loader, train_data, test_data, validate_data = load_data( data_dir) # Load pretrained model pre_train_model = results.pretrained_model model = getattr(models, pre_train_model)(pretrained=True) # Build and attach new classifier input_unit_count = model.classifier[0].in_features build_classifier(model, input_unit_count, hidden_unit_count, dropout_rate) # Using a NLLLoss as output is LogSoftmax criterion = nn.NLLLoss() # Using Adam optimiser algorithm - uses concept of momentum to add fractions # to previous gradient descents to minimize loss function optimizer = optim.Adam(model.classifier.parameters(), learning_rate) # Train model model, optimizer = train_model(model, epochs, train_loader, validate_loader, criterion, optimizer, gpu_mode) # Test model test_model(model, test_loader, gpu_mode)
learning_rate = results.learning_rate epochs = results.epochs dropout = results.dropout input_unit = results.input_unit hidden_unit = results.hidden_unit class_number = results.class_number structure = results.structure gpu_mode = results.gpu # load and preprocess data image_datasets_train, image_datasets_valid, image_datasets_test, dataloaders_train, dataloaders_valid, dataloaders_test = load_data( data_dir) # load pretrained model and add new classifiers model = getattr(models, arch)(pretrained=True) build_classifier(model, input_unit, hidden_unit, class_number, dropout) # set criterion and optimizer criterion = nn.NLLLoss() optimizer = optim.Adam(model.classifier.parameters(), lr=learning_rate) # model training and validation print('prerequisite has been fulfiled, now start training & validation...') model = train_and_validation_the_model(gpu_mode, model, learning_rate, epochs, dataloaders_train, dataloaders_valid, criterion, optimizer) # model testing print('\n now start testing...') test_model(model, dataloaders_test, gpu_mode)