total_epoch=5, after_scheduler=scheduler) optimizer.zero_grad() optimizer.step() lr_scheduler.step() util.check_dir('../data/models/') best_model, loss_dict, top1_acc_dict, top5_acc_dict = train_model( data_loaders, data_sizes, name, model, criterion, optimizer, lr_scheduler, num_epochs=num_epochs, device=device) # 保存最好的模型参数 # util.save_model(best_model.cpu(), '../data/models/best_%s.pth' % name) res_loss[name] = loss_dict res_top1_acc[name] = top1_acc_dict res_top5_acc[name] = top5_acc_dict print('train %s done' % name) print() util.save_png('loss', res_loss) util.save_png('top-1 acc', res_top1_acc) util.save_png('top-5 acc', res_top5_acc)
# print(model) model = model.to(device) criterion = nn.CrossEntropyLoss() optimizer = optim.Adam(model.parameters(), lr=1e-3) lr_schduler = optim.lr_scheduler.StepLR(optimizer, step_size=8, gamma=0.96) util.check_dir('./data/models/') best_model, loss_dict, acc_dict = train_model(data_loaders, data_sizes, name, model, criterion, optimizer, lr_schduler, num_epochs=100, device=device) # 保存最好的模型参数 util.save_model(best_model, './data/models/best_%s.pth' % name) res_loss[name] = loss_dict res_acc[name] = acc_dict print('train %s done' % name) print() util.save_png('loss', res_loss) util.save_png('acc', res_acc)