sys.exit() print(net) optimizer = optim.Adam(net.params, lr=learning_rate) criterion = nn.CrossEntropyLoss() '''Convert to cuda if available''' if torch.cuda.is_available() and cuda: print("CUDA is available, training on GPU") print("Number of available devices: {}".format(torch.cuda.device_count())) print("Using device: {}".format(cuda_device)) torch.cuda.device(args.device) net.cuda() criterion = criterion.cuda() else: print("CUDA is NOT available, training on CPU") '''Train and evaluate model''' for i in range(1, num_epochs + 1): train(i, net, trainloader, criterion, optimizer, cuda, batch_size) print("Results on training set") evaluate(i, net, trainloader, criterion, cuda, batch_size) print("Results on validation set") evaluate(i, net, testloader, criterion, cuda, batch_size) net.cpu() torch.save(net.state_dict(), model_path + name + "_" + str(i) + ".pth") if torch.cuda.is_available() and cuda: net.cuda() if i % 2 == 0: '''Decay learning rate''' learning_rate = learning_rate * 0.95 optimizer = optim.Adam(net.params, lr=learning_rate) print(f"Learning rate: {learning_rate}")