return source_loader, target_train_loader, target_test_loader if __name__ == '__main__': torch.manual_seed(0) source_name = "amazon" target_name = "webcam" print('Src: %s, Tar: %s' % (source_name, target_name)) source_loader, target_train_loader, target_test_loader = load_data( source_name, target_name, CFG['data_path']) model = models.Transfer_Net(CFG['n_class'], transfer_loss='mmd', base_net='resnet50').to(DEVICE) optimizer = torch.optim.SGD([ { 'params': model.base_network.parameters() }, { 'params': model.bottleneck_layer.parameters(), 'lr': 10 * CFG['lr'] }, { 'params': model.classifier_layer.parameters(), 'lr': 10 * CFG['lr'] }, ], lr=CFG['lr'],
# make_dot was moved to https://github.com/szagoruyko/pytorchviz from torchviz import make_dot if __name__ == "__main__": torch.manual_seed(0) source_name = "cmf" target_name = "casia" # print('Src: %s, Tar: %s' % (source_name, target_name)) # source_loader, target_train_loader, target_test_loader = load_data( # source_name, target_name, CFG['data_path']) model = models.Transfer_Net(CFG["n_class"], transfer_loss="mmd", base_net="alexnet").to(DEVICE) optimizer = torch.optim.SGD( [ { "params": model.base_network.parameters() }, { "params": model.bottleneck_layer.parameters(), "lr": 10 * CFG["lr"] }, { "params": model.classifier_layer.parameters(), "lr": 10 * CFG["lr"] }, ],
def main(loaders, final_test_loaders, excel_files, im): excel, sheet_src, sheet_tar = excel_files # 将excel的参数包解压出来 # 参数包准备 model = models.Transfer_Net(CFG).to(DEVICE) # 创建model,并且将其拷贝到指定的device中 params = spparams_creator(model, CFG) # 生成用于模型训练的参数 if ('RevGrad' in CFG['tranmodel']): # 对于RevGrad模型,需要单独设计优化器与衰减器 optimizer0 = torch.optim.SGD(params[0], lr=CFG['lr'], momentum=CFG['momentum'], weight_decay=CFG['l2_decay']) # 主干的参数 optimizer1 = torch.optim.SGD(params[1], lr=CFG['lr'], momentum=CFG['momentum'], weight_decay=CFG['l2_decay']) # domclf的参数 scheduler0 = StepLR(optimizer0, step_size=1, gamma=0.95) # 学习速率衰减方式 scheduler1 = StepLR(optimizer1, step_size=1, gamma=0.95) # 学习速率衰减方式 optimizer = [optimizer0, optimizer1] # 将两个优化器放在list中便于参数传递 scheduler = [scheduler0, scheduler1] # else: # 对于DDC优化器与迭代器分别只有1个 optimizer = torch.optim.SGD(params, lr=CFG['lr'], momentum=CFG['momentum'], weight_decay=CFG['l2_decay']) scheduler = StepLR(optimizer, step_size=1, gamma=0.95) # 学习速率衰减方式 # 开始训练 result_trace = train(loaders, model, optimizer, scheduler, CFG) # 用train_utils文件中的train函数进行训练 # 绘制训练曲线 acc_train, acc_test = np.array(result_trace[0]), np.array( result_trace[3]) # index=0是 acc_train_src, acc_train_tar, acc_test_src, acc_test_tar = acc_train[:, 0], acc_train[:, 6], acc_test[:, 0], acc_test[:, 6] # 第0列是源域最终投票正确率,第6列是目标域最终投票正确率 x = [i for i in range(len(acc_train_src))] y = [acc_train_src, acc_train_tar, acc_test_src, acc_test_tar] # x,y,用于画图 plot_curve(x, y, path_name_curve + '/' + CFG['tranmodel'] + '_ACC_end.png', xlabel='Epoch', ylabel='ACC', title=CFG['tranmodel'] + 'ACC', legend=[ 'acc_train_src', 'acc_train_tar', 'acc_test_src', 'acc_test_tar' ]) #调用自定义函数进行画图 # 测试结果 acc_train_val_src = [acc_train_src[-1], acc_test_src[-1]] # 训练集和验证集的收敛正确率,源域和目标域 acc_train_val_tar = [acc_train_tar[-1], acc_test_tar[-1]] acc_test_srcs, acc_test_tars = final_test( model, CFG, final_test_loaders) # 对多个平行测试集进行测试的正确率 acc_4excel_src = acc_train_val_src + [ 0, 0 ] + acc_test_srcs # 将训练集验证集的正确率,[0,0](用于占位),平行测试集的正确率 一起拼接成list,以便于写入excel acc_4excel_tar = acc_train_val_tar + [0, 0] + acc_test_tars save_excel_4final_test(acc_4excel_src, acc_4excel_tar, excel, sheet_src, sheet_tar, im, CFG) excel.save(dir_now + '/测试结果.xls') # pt文件保存 if ('RevGrad' in CFG['tranmodel']): # 对于RevGrad需要保存两个优化器与两个衰减器 pt = { 'model': model.state_dict(), 'optimizer0': optimizer[0].state_dict(), 'optimizer1': optimizer[1].state_dict(), 'scheduler0': scheduler[0].state_dict(), 'scheduler1': scheduler[1].state_dict(), 'CFG': CFG, 'result_trace': result_trace, 'test_result': [acc_test_srcs, acc_test_tars] } else: # 对于DDC优化器与迭代器分别只有1个 pt = { 'model': model.state_dict(), 'optimizer': optimizer.state_dict(), 'scheduler': scheduler.state_dict(), 'CFG': CFG, 'result_trace': result_trace, 'test_result': [acc_test_srcs, acc_test_tars] } torch.save(pt, dir_now + '/' + CFG['tranmodel'] + '_end.pt')
return source_loader, target_train_loader, target_test_loader if __name__ == '__main__': torch.manual_seed(0) source_name = "amazon" target_name = "webcam" print('Src: %s, Tar: %s' % (source_name, target_name)) source_loader, target_train_loader, target_test_loader = load_data( source_name, target_name, args.data) model = models.Transfer_Net(args.n_class, transfer_loss=args.trans_loss, base_net=args.model).to(DEVICE) optimizer = torch.optim.SGD([ { 'params': model.base_network.parameters() }, { 'params': model.bottleneck_layer.parameters(), 'lr': 10 * args.lr }, { 'params': model.classifier_layer.parameters(), 'lr': 10 * args.lr }, ], lr=args.lr,
CFG['backbone'] = args.backbone CFG['lambda'] = np.logspace(args.lambda_initial, args.lambda_final, CFG['epoch']) torch.manual_seed(0) source_name = args.source target_name = "RealWorld" print('Src: %s, Tar: %s' % (source_name, target_name)) source_loader, target_train_loader, target_test_loader = load_data( source_name, target_name, CFG['data_path']) model = models.Transfer_Net(CFG['n_class'], transfer_loss='coral', base_net=CFG['backbone']).to(DEVICE) optimizer = torch.optim.SGD([ { 'params': model.base_network.parameters() }, { 'params': model.bottleneck_layer.parameters(), 'lr': 10 * CFG['lr'] }, { 'params': model.classifier_layer.parameters(), 'lr': 10 * CFG['lr'] }, ], lr=CFG['lr'],