def start_training(): ''' Function takes parameters as command line arguments and allows operators to train and save a deep learning model for later use. ''' in_arg = get_input_args() device = torch.device( "cuda" if torch.cuda.is_available() and in_arg.gpu == "gpu" else "cpu") trainloader, testloader, validationloader, train_data, test_transforms = data_loading( ) print("\nCreating model...\n") model, optimizer, criterion = create_model(device) print("\nCommencing training...\n") model, optimizer = train_model(device, model, trainloader, optimizer, validationloader, criterion) print("\nTraining Complete.\n") save_checkpoint(train_data, model, optimizer)
def main(): parser = argparse.ArgumentParser() parser.add_argument( 'data_dir', type=str, default='flowers', help='Root folder were all the training images are located.') parser.add_argument('--save_dir', type=str, default='ckPoint', help='Directory where the checkpoints are saved') parser.add_argument('--arch', dest='architecture', default='vgg13', action='store', choices=['vgg13', 'densenet121'], help='CNN Architecture type: vgg13, densenet121') parser.add_argument('--learning_rate', type=float, default=0.01, help='Value of the Learning Rate (default 0.01)') parser.add_argument('--hidden_units', type=int, default=512, help='Number of hidden units') parser.add_argument('--epochs', type=int, default=6, help='Number of Epoch for the training') parser.add_argument('--gpu', dest='gpu', default=False, action='store_true', help='Please Use GPU for training') args = parser.parse_args() print( "\n\n##########################################################################################" ) print("The Neural Network has been created with the following Parametres:") print("The data for the training is in the folder: {}".format( args.data_dir)) print( "The checkpoint with the save details of the network will be saved in the folder: {}" .format(args.save_dir)) print("The CCN Architecture of the is: {}".format(args.architecture)) print("The Learning Rate is: {}".format(args.learning_rate)) print("The number of Hidden Units is: {}".format(args.hidden_units)) print("The model will run for {} epochs ".format(args.epochs)) if args.gpu: print("The model will run use the GPU ".format(args.gpu)) else: print("....Please add the GPU......") exit() print( "##########################################################################################\n\n" ) print(args.data_dir) artitecture = args.architecture hidden = args.hidden_units learn_rate = args.learning_rate epochs = args.epochs filename = args.save_dir gpu = True data_dir = args.data_dir train_dir = data_dir + '/train' valid_dir = data_dir + '/valid' test_dir = data_dir + '/test' batch_size = 32 train_transforms = transforms.Compose([ transforms.Resize(224), transforms.RandomRotation(30), transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) ]) valid_transforms = transforms.Compose([ transforms.RandomResizedCrop(224), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]) test_transforms = transforms.Compose([ transforms.RandomResizedCrop(224), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]) train_data = datasets.ImageFolder(train_dir, transform=train_transforms) valid_data = datasets.ImageFolder(valid_dir, transform=valid_transforms) test_data = datasets.ImageFolder(test_dir, transform=test_transforms) trainloader = torch.utils.data.DataLoader(train_data, batch_size=batch_size, shuffle=True) validloader = torch.utils.data.DataLoader(valid_data, batch_size=batch_size, shuffle=True) testloader = torch.utils.data.DataLoader(test_data, batch_size=batch_size, shuffle=True) print("start") optimizer, model = training_functions.define_model(artitecture=artitecture, hidden=hidden, learn_rate=learn_rate, gpu=True) print("phase1") training_functions.train_model(model=model, learn_rate=learn_rate, epochs=epochs, gpu=True) print("phase2") model.class_to_idx = train_data.class_to_idx training_functions.save_status(model=model, filename=filename, artitecture=artitecture, hidden=hidden) print("phase3")
lr=rate_pretrain, weight_decay=weight_pretrain) optimizers = [optimizer, optimizer_pretrain] scheduler = lr_scheduler.StepLR(optimizer, step_size=sched_step, gamma=sched_gamma) scheduler_pretrain = lr_scheduler.StepLR(optimizer_pretrain, step_size=sched_step_pretrain, gamma=sched_gamma_pretrain) schedulers = [scheduler, scheduler_pretrain] if args.mode == 'train_full': model = training_functions.train_model(model, dataloader, criteria, optimizers, schedulers, epochs, params) elif args.mode == 'pretrain': model = training_functions.pretraining(model, dataloader, criteria, optimizers, schedulers, epochs, params) # Save final model torch.save(model.state_dict(), name_net + '.pt') # Close files f.close() if board: writer.close()
import argparse from training_functions import train_model # dimensions of our images. img_width, img_height = 128, 128 # default arguments default_train_data_dir = './data/spectrograms/' default_validation_data_dir = './data/spectrograms-test/' default_models_folder = './models' default_model_name = 'model' # script parser = argparse.ArgumentParser() parser.add_argument('-t', '--train', default=default_train_data_dir, help='Training data folder. Default: {}'.format(default_train_data_dir)) parser.add_argument('-v', '--validation', default=default_validation_data_dir, help='Validation data folder. Default: {}'.format(default_validation_data_dir)) parser.add_argument('-m', '--models', default=default_models_folder, help='Models folder. Default: {}'.format(default_models_folder)) parser.add_argument('-n', '--name', default=default_model_name, help='Model name, without ".h5". Default: {}'.format(default_model_name)) args = parser.parse_args() model = train_model( train_data_dir=args.train, validation_data_dir=args.validation, image_dimensions=(img_width, img_height), model_name=args.name, models_folder=args.models )
# loop over files files = [f for f in os.scandir(dir_entry.path) if not f.is_dir()] num_files = round(fraction * len(files)) files_to_copy = random.sample(files, num_files) for file_entry in files_to_copy: shutil.copy(file_entry.path, os.path.join(temp_category_dir, file_entry.name)) num_training_samples += 1 # train model on the data subset: _ = train_model(train_data_dir=temp_data_dir, validation_data_dir=validation_data_dir, image_dimensions=(img_width, img_height), model_name=model_name.format(fraction), models_folder=models_folder, num_epochs=max(round(fraction * 50), 15)) # load model (because latest model may have collapsed, not necessarily the best) model = load_model( os.path.join(models_folder, model_name.format(fraction) + '.h5')) # evaulate y_total = [] y_pred_total = [] for i in range(num_batches): [X, Y] = validation_generator.next() Y_pred = model.predict(X)
import argparse from training_functions import train_model # dimensions of our images. img_width, img_height = 128, 128 # default data folder train_data_dir = './data/spectrograms/' validation_data_dir = './data/spectrograms-test/' models_folder = './models' model_name = 'model-with-tire-screech-10' # script parser = argparse.ArgumentParser() parser.add_argument('-t', '--train', help='Training data folder. Default: {}'.format(train_data_dir)) parser.add_argument('-v', '--validation', help='Validation data folder. Default: {}'.format(validation_data_dir)) parser.add_argument('-n', '--name', help='Model name. Default: {}'.format(model_name)) args = parser.parse_args() if args.train: train_data_dir = args.train if args.validation: validation_data_dir = args.validation if args.name: model_name = args.name model = train_model( train_data_dir=train_data_dir, validation_data_dir=validation_data_dir, image_dimensions=(img_width, img_height), model_name=model_name, models_folder=models_folder )