torch.manual_seed(123456) dataloaders, dataset_sizes = data_process_lisa(batch_size=128) device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") model_ft = Net() model_ft.apply(weights_init) #model_ft.load_state_dict(torch.load('../donemodel/'+args.model)) model_ft.to(device) # model_ft = nn.DataParallel(model,device_ids=[0,1]) # use multiple gpus criterion = nn.CrossEntropyLoss() optimizer_ft = optim.Adam(model_ft.parameters(), lr=0.01) # Decay LR by a factor of 0.1 every 7 epochs exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=10, gamma=0.1) model_ft = pgd_train_model(model_ft, criterion, optimizer_ft, exp_lr_scheduler, num_epochs=30) test(model_ft, dataloaders, dataset_sizes) torch.save(model_ft.state_dict(), '../donemodel/new_linf_model050.pt') # output model
model_ft.load_state_dict(torch.load('../donemodel/' + args.model)) #model_ft.load_weights() model_ft.to(device) # model_ft = nn.DataParallel(model,device_ids=[0,1]) criterion = nn.CrossEntropyLoss() #optimizer_ft = optim.SGD(model_ft.parameters(), lr=0.001, momentum=0.9) optimizer_ft = optim.Adam(model_ft.parameters(), lr=0.01) #optimizer_ft = optim.Adadelta(model_ft.parameters(), lr=0.1, rho=0.9, eps=1e-06, weight_decay=0.01) # Decay LR by a factor of 0.1 every 7 epochs exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=100, gamma=0.3) model_ft = sticker_train_model(model_ft, criterion, optimizer_ft, exp_lr_scheduler, args.alpha, args.iters, args.search, num_epochs=args.epochs) test(model_ft, dataloaders, dataset_sizes) torch.save( model_ft.state_dict(), '../donemodel/new_sticker_model0' + str(args.out) + str(seed) + '.pt')