# Check for GPU availability: device = my_models.device_gpu_cpu() print('using device:', device) dtype = torch.float32 # we will be using float # Constant to control how frequently we print train loss print_every = 100 train = True test = True model = None if train: # Create models: model = my_models.model_2() my_models.test_model_size(model, dtype) # test model size output: optimizer = optim.Adadelta(model.parameters()) # Train model: model, loss_data = my_models.train_model(model, optimizer, train_loader, val_loader, device, dtype, epoches=2) # Save model to file: torch.save(model.state_dict(), MODEL_PATH + MODEL_NAME)
# Check for GPU availability: device = my_models.device_gpu_cpu() print('using device:', device) dtype = torch.float32 # we will be using float train = True train_loops = 20 loss_data = [] if train: for tl in range(train_loops): # Create models: model_1 = my_models.model_2() model_2 = my_models.model_2() model = (model_1, model_2) #my_models.test_model_size(model, dtype) # test model size output: optimizer = optim.Adadelta(model.parameters()) # Train model: model, current_loss_data = my_models.train_model_5(model, optimizer, train_loader, test_loader, device, dtype, epoches=4, print_every=5) loss_data.append(current_loss_data) # Save model to file: #torch.save(model.state_dict(), MODEL_PATH + MODEL_NAME) print('Training Loop {}/{} is Finished !'.format(tl + 1, train_loops)) print() # Add test accuracy and save data to file: