def train(args, model, ad_net, random_layer, train_loader, train_loader1, optimizer, optimizer_ad, epoch, start_epoch, method): model.train() len_source = len(train_loader) len_target = len(train_loader1) if len_source > len_target: num_iter = len_source else: num_iter = len_target for batch_idx in range(num_iter): if batch_idx % len_source == 0: iter_source = iter(train_loader) if batch_idx % len_target == 0: iter_target = iter(train_loader1) data_source, label_source = iter_source.next() data_source, label_source = data_source.cuda(), label_source.cuda() data_target, label_target = iter_target.next() data_target = data_target.cuda() print('data_source:', data_source.shape, data_target.shape) optimizer.zero_grad() optimizer_ad.zero_grad() feature, output = model(torch.cat((data_source, data_target), 0)) loss = nn.CrossEntropyLoss()(output.narrow(0, 0, data_source.size(0)), label_source) softmax_output = nn.Softmax(dim=1)(output) if epoch > start_epoch: if method == 'CDAN-E': entropy = loss_func.Entropy(softmax_output) loss += loss_func.CDAN( [feature, softmax_output], ad_net, entropy, network.calc_coeff(num_iter * (epoch - start_epoch) + batch_idx), random_layer) elif method == 'CDAN': loss += loss_func.CDAN([feature, softmax_output], ad_net, None, None, random_layer) elif method == 'DANN': loss += loss_func.DANN(feature, ad_net) else: raise ValueError('Method cannot be recognized.') loss.backward() optimizer.step() if epoch > start_epoch: optimizer_ad.step() if (batch_idx + epoch * num_iter) % args.log_interval == 0: print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format( epoch, batch_idx * args.batch_size, num_iter * args.batch_size, 100. * batch_idx / num_iter, loss.item()))
def train(args, config, model, ad_net, random_layer, train_loader, train_loader1, optimizer, optimizer_ad, epoch): model.train() len_source = len(train_loader) len_target = len(train_loader1) if len_source > len_target: num_iter = len_source else: num_iter = len_target total_loss = 0 for batch_idx in range(num_iter): if batch_idx % len_source == 0: iter_source = iter(train_loader) if batch_idx % len_target == 0: iter_target = iter(train_loader1) data_source, label_source = iter_source.next() data_source, label_source = data_source.cuda(), label_source.cuda() data_target, label_target = iter_target.next() data_target = data_target.cuda() optimizer.zero_grad() optimizer_ad.zero_grad() feature_source, output_source = model(data_source) feature_target, output_target = model(data_target) feature = torch.cat((feature_source, feature_target), 0) output = torch.cat((output_source, output_target), 0) labels_target_fake = torch.max(nn.Softmax(dim=1)(output_target), 1)[1] labels = torch.cat((label_source, labels_target_fake)) loss = nn.CrossEntropyLoss()(output.narrow(0, 0, data_source.size(0)), label_source) softmax_output = nn.Softmax(dim=1)(output) if epoch > 0: entropy = loss_func.Entropy(softmax_output) loss += loss_func.CDAN([feature, softmax_output], ad_net, entropy, network.calc_coeff(num_iter * (epoch - 0) + batch_idx), random_layer) mdd_loss = args.mdd_weight * loss_func.mdd_digit( feature, labels, args.left_weight, args.right_weight, args.weight) loss = loss + mdd_loss total_loss += loss.data loss.backward() optimizer.step() if epoch > 0: optimizer_ad.step() if (batch_idx + epoch * num_iter) % args.log_interval == 0: print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format( epoch, batch_idx * args.batch_size, num_iter * args.batch_size, 100. * batch_idx / num_iter, loss.item())) log_str = "total_loss:{}\n".format(total_loss) config["out_file"].write(log_str) config["out_file"].flush() print(log_str)
def train(dataloader_src, dataloader_tgt, discriminator, classifier, train_epochs, writer): discriminator.train() classifier.train() loss_clf = nn.CrossEntropyLoss() # 复习一下:momentum就是上次更新的方向和这次的梯度反向一样,那么这次就加快速度; # weight_decay就是 L2 regularization optimizer = optim.SGD(itertools.chain(classifier.parameters(), discriminator.parameters()), lr=1e-3, momentum=0.9, weight_decay=0.0009) loss_clf_ = transfer_loss = 0 for epoch in range(train_epochs): for (imgs_src, labels_src), (imgs_tgt, labels_tgt) in zip(dataloader_src, dataloader_tgt): imgs_src = Variable(imgs_src.type(FloatTensor)).reshape( imgs_src.shape[0], -1) labels_src = Variable(labels_src.type(LongTensor)) imgs_tgt = Variable(imgs_tgt.type(FloatTensor)).reshape( imgs_tgt.shape[0], -1) labels_tgt = Variable(labels_tgt.type(FloatTensor)) # train source domain fea_src, pred_src = classifier(imgs_src) fea_tgt, pred_tgt = classifier(imgs_tgt) fea = torch.cat((fea_src, fea_tgt), 0) pred = torch.cat((pred_src, pred_tgt), 0) # 计算概率 softmax_out = nn.Softmax(dim=1)(pred) # 计算熵和discriminator loss entropy = loss.Entropy(softmax_out) transfer_loss = loss.CDAN([fea, softmax_out], discriminator, entropy, networks.calc_coeff(epoch)) # classifier loss loss_clf_ = loss_clf(pred_src, labels_src) with OptimizerManager([optimizer]): total_loss = transfer_loss + loss_clf_ total_loss.backward() if epoch % 5 == 0: acc_src, acc_tgt = evaluate(classifier, dataloader_src, dataloader_tgt) writer.add_scalar('Train/loss_c_src', loss_clf_, epoch) writer.add_scalar('Train/transfer_loss', transfer_loss, epoch) writer.add_scalar('Evaluate/Acc_src', acc_src, epoch) writer.add_scalar('Evaluate/Acc_tgt', acc_tgt, epoch)
def train(args, model, ad_net, random_layer, train_loader, train_loader1, optimizer, optimizer_ad, epoch, start_epoch, method): model.train() len_source = len(train_loader) len_target = len(train_loader1) if len_source > len_target: num_iter = len_source else: num_iter = len_target for batch_idx in range(num_iter): if batch_idx % len_source == 0: iter_source = iter(train_loader) if batch_idx % len_target == 0: iter_target = iter(train_loader1) data_source, label_source = iter_source.next() data_source, label_source = data_source.cuda(), label_source.cuda() data_target, label_target = iter_target.next() data_target = data_target.cuda() optimizer.zero_grad() optimizer_ad.zero_grad() feature_source, output_source = model(data_source) feature_target, output_target = model(data_target) feature = torch.cat((feature_source, feature_target), 0) output = torch.cat((output_source, output_target), 0) labels_target_fake = torch.max(nn.Softmax(dim=1)(output_target), 1)[1] labels = torch.cat((label_source, labels_target_fake)) loss = nn.CrossEntropyLoss()(output.narrow(0, 0, data_source.size(0)), label_source) softmax_output = nn.Softmax(dim=1)(output) if epoch > start_epoch: entropy = loss_func.Entropy(softmax_output) loss += loss_func.CDAN( [feature, softmax_output], ad_net, entropy, network.calc_coeff(num_iter * (epoch - start_epoch) + batch_idx), random_layer) loss = loss + args.mdd_weight * loss_func.mdd_digit( feature, labels ) + args.entropic_weight * loss_func.EntropicConfusion(feature) loss.backward() optimizer.step() if epoch > start_epoch: optimizer_ad.step() if (batch_idx + epoch * num_iter) % args.log_interval == 0: print('Train Epoch: {} [{}/{} ({:.4f}%)]\tLoss: {:.6f}'.format( epoch, batch_idx * args.batch_size, num_iter * args.batch_size, 100. * batch_idx / num_iter, loss.item()))
def train(config): ## set pre-process prep_dict = {} prep_config = config["prep"] prep_dict["source"] = prep.image_train(**config["prep"]['params']) prep_dict["target"] = prep.image_train(**config["prep"]['params']) if prep_config["test_10crop"]: prep_dict["test"] = prep.image_test_10crop(**config["prep"]['params']) else: prep_dict["test"] = prep.image_test(**config["prep"]['params']) ## prepare data dsets = {} dset_loaders = {} data_config = config["data"] train_bs = data_config["source"]["batch_size"] test_bs = data_config["test"]["batch_size"] dsets["source"] = ImageList(open(data_config["source"]["list_path"]).readlines(), \ transform=prep_dict["source"]) dset_loaders["source"] = DataLoader(dsets["source"], batch_size=train_bs, \ shuffle=True, num_workers=0, drop_last=True) dsets["target"] = ImageList(open(data_config["target"]["list_path"]).readlines(), \ transform=prep_dict["target"]) dset_loaders["target"] = DataLoader(dsets["target"], batch_size=train_bs, \ shuffle=True, num_workers=0, drop_last=True) if prep_config["test_10crop"]: for i in range(10): dsets["test"] = [ImageList(open(data_config["test"]["list_path"]).readlines(), \ transform=prep_dict["test"][i]) for i in range(10)] dset_loaders["test"] = [DataLoader(dset, batch_size=test_bs, \ shuffle=False, num_workers=0) for dset in dsets['test']] else: dsets["test"] = ImageList(open(data_config["test"]["list_path"]).readlines(), \ transform=prep_dict["test"]) dset_loaders["test"] = DataLoader(dsets["test"], batch_size=test_bs, \ shuffle=False, num_workers=0) class_num = config["network"]["params"]["class_num"] ## set base network net_config = config["network"] base_network = net_config["name"](**net_config["params"]) # base_network = base_network.cuda() ## 添加判别器D_s,D_t,生成器G_s2t,G_t2s z_dimension = 256 D_s = network.models["Discriminator"]() # D_s = D_s.cuda() G_s2t = network.models["Generator"](z_dimension, 1024) # G_s2t = G_s2t.cuda() D_t = network.models["Discriminator"]() # D_t = D_t.cuda() G_t2s = network.models["Generator"](z_dimension, 1024) # G_t2s = G_t2s.cuda() criterion_GAN = torch.nn.MSELoss() criterion_cycle = torch.nn.L1Loss() criterion_identity = torch.nn.L1Loss() criterion_Sem = torch.nn.L1Loss() optimizer_G = torch.optim.Adam(itertools.chain(G_s2t.parameters(), G_t2s.parameters()), lr=0.0003) optimizer_D_s = torch.optim.Adam(D_s.parameters(), lr=0.0003) optimizer_D_t = torch.optim.Adam(D_t.parameters(), lr=0.0003) fake_S_buffer = ReplayBuffer() fake_T_buffer = ReplayBuffer() classifier_optimizer = torch.optim.Adam(base_network.parameters(), lr=0.0003) ## 添加分类器 classifier1 = net.Net(256,class_num) # classifier1 = classifier1.cuda() classifier1_optim = optim.Adam(classifier1.parameters(), lr=0.0003) ## add additional network for some methods if config["loss"]["random"]: random_layer = network.RandomLayer([base_network.output_num(), class_num], config["loss"]["random_dim"]) ad_net = network.AdversarialNetwork(config["loss"]["random_dim"], 1024) else: random_layer = None ad_net = network.AdversarialNetwork(base_network.output_num() * class_num, 1024) if config["loss"]["random"]: random_layer.cuda() # ad_net = ad_net.cuda() parameter_list = base_network.get_parameters() + ad_net.get_parameters() ## set optimizer optimizer_config = config["optimizer"] optimizer = optimizer_config["type"](parameter_list, \ **(optimizer_config["optim_params"])) param_lr = [] for param_group in optimizer.param_groups: param_lr.append(param_group["lr"]) schedule_param = optimizer_config["lr_param"] lr_scheduler = lr_schedule.schedule_dict[optimizer_config["lr_type"]] gpus = config['gpu'].split(',') if len(gpus) > 1: ad_net = nn.DataParallel(ad_net, device_ids=[int(i) for i in gpus]) base_network = nn.DataParallel(base_network, device_ids=[int(i) for i in gpus]) ## train len_train_source = len(dset_loaders["source"]) len_train_target = len(dset_loaders["target"]) transfer_loss_value = classifier_loss_value = total_loss_value = 0.0 best_acc = 0.0 for i in range(config["num_iterations"]): if i % config["test_interval"] == config["test_interval"] - 1: base_network.train(False) temp_acc = image_classification_test(dset_loaders, \ base_network, test_10crop=prep_config["test_10crop"]) temp_model = nn.Sequential(base_network) if temp_acc > best_acc: best_acc = temp_acc best_model = temp_model now = datetime.datetime.now() d = str(now.month) + '-' + str(now.day) + ' ' + str(now.hour) + ':' + str(now.minute) + ":" + str( now.second) torch.save(best_model, osp.join(config["output_path"], "{}_to_{}_best_model_acc-{}_{}.pth.tar".format(args.source, args.target, best_acc, d))) log_str = "iter: {:05d}, precision: {:.5f}".format(i, temp_acc) config["out_file"].write(log_str + "\n") config["out_file"].flush() print(log_str) if i % config["snapshot_interval"] == 0: torch.save(nn.Sequential(base_network), osp.join(config["output_path"], \ "{}_to_{}_iter_{:05d}_model_{}.pth.tar".format(args.source, args.target, i, str( datetime.datetime.utcnow())))) print("it_train: {:05d} / {:05d} start".format(i, config["num_iterations"])) loss_params = config["loss"] ## train one iter classifier1.train(True) base_network.train(True) ad_net.train(True) optimizer = lr_scheduler(optimizer, i, **schedule_param) optimizer.zero_grad() if i % len_train_source == 0: iter_source = iter(dset_loaders["source"]) if i % len_train_target == 0: iter_target = iter(dset_loaders["target"]) inputs_source, labels_source = iter_source.next() inputs_target, labels_target = iter_target.next() # inputs_source, inputs_target, labels_source = inputs_source.cuda(), inputs_target.cuda(), labels_source.cuda() # 提取特征 features_source, outputs_source = base_network(inputs_source) features_target, outputs_target = base_network(inputs_target) features = torch.cat((features_source, features_target), dim=0) outputs = torch.cat((outputs_source, outputs_target), dim=0) softmax_out = nn.Softmax(dim=1)(outputs) outputs_source1 = classifier1(features_source.detach()) outputs_target1 = classifier1(features_target.detach()) outputs1 = torch.cat((outputs_source1,outputs_target1),dim=0) softmax_out1 = nn.Softmax(dim=1)(outputs1) softmax_out = (1-args.cla_plus_weight)*softmax_out + args.cla_plus_weight*softmax_out1 if config['method'] == 'CDAN+E': entropy = loss.Entropy(softmax_out) transfer_loss = loss.CDAN([features, softmax_out], ad_net, entropy, network.calc_coeff(i), random_layer) elif config['method'] == 'CDAN': transfer_loss = loss.CDAN([features, softmax_out], ad_net, None, None, random_layer) elif config['method'] == 'DANN': transfer_loss = loss.DANN(features, ad_net) else: raise ValueError('Method cannot be recognized.') classifier_loss = nn.CrossEntropyLoss()(outputs_source, labels_source) # Cycle num_feature = features_source.size(0) # =================train discriminator T real_label = Variable(torch.ones(num_feature)) # real_label = Variable(torch.ones(num_feature)).cuda() fake_label = Variable(torch.zeros(num_feature)) # fake_label = Variable(torch.zeros(num_feature)).cuda() # 训练生成器 optimizer_G.zero_grad() # Identity loss same_t = G_s2t(features_target.detach()) loss_identity_t = criterion_identity(same_t, features_target) same_s = G_t2s(features_source.detach()) loss_identity_s = criterion_identity(same_s, features_source) # Gan loss fake_t = G_s2t(features_source.detach()) pred_fake = D_t(fake_t) loss_G_s2t = criterion_GAN(pred_fake, labels_source.float()) fake_s = G_t2s(features_target.detach()) pred_fake = D_s(fake_s) loss_G_t2s = criterion_GAN(pred_fake, labels_source.float()) # cycle loss recovered_s = G_t2s(fake_t) loss_cycle_sts = criterion_cycle(recovered_s, features_source) recovered_t = G_s2t(fake_s) loss_cycle_tst = criterion_cycle(recovered_t, features_target) # sem loss pred_recovered_s = base_network.fc(recovered_s) pred_fake_t = base_network.fc(fake_t) loss_sem_t2s = criterion_Sem(pred_recovered_s, pred_fake_t) pred_recovered_t = base_network.fc(recovered_t) pred_fake_s = base_network.fc(fake_s) loss_sem_s2t = criterion_Sem(pred_recovered_t, pred_fake_s) loss_cycle = loss_cycle_tst + loss_cycle_sts weights = args.weight_in_lossG.split(',') loss_G = float(weights[0]) * (loss_identity_s + loss_identity_t) + \ float(weights[1]) * (loss_G_s2t + loss_G_t2s) + \ float(weights[2]) * loss_cycle + \ float(weights[3]) * (loss_sem_s2t + loss_sem_t2s) # 训练softmax分类器 outputs_fake = classifier1(fake_t.detach()) # 分类器优化 classifier_loss1 = nn.CrossEntropyLoss()(outputs_fake, labels_source) classifier1_optim.zero_grad() classifier_loss1.backward() classifier1_optim.step() total_loss = loss_params["trade_off"] * transfer_loss + classifier_loss + args.cyc_loss_weight*loss_G total_loss.backward() optimizer.step() optimizer_G.step() ###### Discriminator S ###### optimizer_D_s.zero_grad() # Real loss pred_real = D_s(features_source.detach()) loss_D_real = criterion_GAN(pred_real, real_label) # Fake loss fake_s = fake_S_buffer.push_and_pop(fake_s) pred_fake = D_s(fake_s.detach()) loss_D_fake = criterion_GAN(pred_fake, fake_label) # Total loss loss_D_s = loss_D_real + loss_D_fake loss_D_s.backward() optimizer_D_s.step() ################################### ###### Discriminator t ###### optimizer_D_t.zero_grad() # Real loss pred_real = D_t(features_target.detach()) loss_D_real = criterion_GAN(pred_real, real_label) # Fake loss fake_t = fake_T_buffer.push_and_pop(fake_t) pred_fake = D_t(fake_t.detach()) loss_D_fake = criterion_GAN(pred_fake, fake_label) # Total loss loss_D_t = loss_D_real + loss_D_fake loss_D_t.backward() optimizer_D_t.step() print("it_train: {:05d} / {:05d} over".format(i, config["num_iterations"])) now = datetime.datetime.now() d = str(now.month)+'-'+str(now.day)+' '+str(now.hour)+':'+str(now.minute)+":"+str(now.second) torch.save(best_model, osp.join(config["output_path"], "{}_to_{}_best_model_acc-{}_{}.pth.tar".format(args.source, args.target, best_acc,d))) return best_acc
def train(config): # set pre-process prep_config = config["prep"] prep_dict = {} prep_dict["source"] = prep.image_train(**config["prep"]['params']) prep_dict["target"] = prep.image_train(**config["prep"]['params']) if prep_config["test_10crop"]: prep_dict["test"] = prep.image_test_10crop(**config["prep"]['params']) else: prep_dict["test"] = prep.image_test(**config["prep"]['params']) # prepare data dsets = {} dset_loaders = {} data_config = config["data"] train_bs = data_config["source"]["batch_size"] test_bs = data_config["test"]["batch_size"] dsets["source"] = datasets.ImageFolder(data_config['source']['list_path'], transform=prep_dict["source"]) dset_loaders['source'] = getdataloader(dsets['source'], batchsize=train_bs, num_workers=4, drop_last=True, weightsampler=True) dsets["target"] = datasets.ImageFolder(data_config['target']['list_path'], transform=prep_dict["target"]) dset_loaders["target"] = DataLoader(dsets["target"], batch_size=train_bs, shuffle=True, num_workers=4, drop_last=True) if prep_config["test_10crop"]: for i in range(10): dsets["test"] = [datasets.ImageFolder(data_config['test']['list_path'], transform=prep_dict["test"][i]) for i in range(10)] dset_loaders["test"] = [DataLoader(dset, batch_size=test_bs, shuffle=False, num_workers=4) for dset in dsets['test']] else: dsets["test"] = datasets.ImageFolder(data_config['test']['list_path'], transform=prep_dict["test"]) dset_loaders["test"] = DataLoader(dsets["test"], batch_size=test_bs, shuffle=False, num_workers=4) class_num = config["network"]["params"]["class_num"] # set base network net_config = config["network"] base_network = net_config["name"](**net_config["params"]) base_network = base_network.cuda() # set test_ad_net test_ad_net = network.AdversarialNetwork(base_network.output_num(), 1024, test_ad_net=True) test_ad_net = test_ad_net.cuda() # add additional network for some methods if config['method'] == 'DANN': random_layer = None ad_net = network.AdversarialNetwork(base_network.output_num(), 1024) elif config['method'] == 'MADA': random_layer = None ad_net = network.AdversarialNetworkClassGroup(base_network.output_num(), 1024, class_num) elif config['method'] == 'proposed': if config['loss']['random']: random_layer = network.RandomLayer([base_network.output_num(), class_num], config['loss']['random_dim']) ad_net = network.AdversarialNetwork(config['loss']['random_dim'], 1024) ad_net_group = network.AdversarialNetworkGroup(config['loss']['random_dim'], 256, class_num, config['center_threshold']) else: random_layer = None ad_net = network.AdversarialNetwork(base_network.output_num(), 1024) ad_net_group = network.AdversarialNetworkGroup(base_network.output_num(), 1024, class_num, config['center_threshold']) elif config['method'] == 'base': pass else: if config["loss"]["random"]: random_layer = network.RandomLayer([base_network.output_num(), class_num], config["loss"]["random_dim"]) ad_net = network.AdversarialNetwork(config["loss"]["random_dim"], 1024) else: random_layer = None ad_net = network.AdversarialNetwork(base_network.output_num() * class_num, 1024) if config["loss"]["random"] and config['method'] != 'base' and config['method'] != 'DANN' and config['method'] != 'MADA': random_layer.cuda() if config['method'] != 'base': ad_net = ad_net.cuda() if config['method'] == 'proposed': ad_net_group = ad_net_group.cuda() # set parameters if config['method'] == 'proposed': parameter_list = base_network.get_parameters() + test_ad_net.get_parameters() + ad_net.get_parameters() + ad_net_group.get_parameters() elif config['method'] == 'base': parameter_list = base_network.get_parameters() + test_ad_net.get_parameters() elif config['method'] == 'MADA': parameter_list = base_network.get_parameters() + test_ad_net.get_parameters() + ad_net.get_parameters() else: parameter_list = base_network.get_parameters() + test_ad_net.get_parameters() + ad_net.get_parameters() # set optimizer optimizer_config = config["optimizer"] optimizer = optimizer_config["type"](parameter_list, **(optimizer_config["optim_params"])) param_lr = [] for param_group in optimizer.param_groups: param_lr.append(param_group["lr"]) schedule_param = optimizer_config["lr_param"] lr_scheduler = lr_schedule.schedule_dict[optimizer_config["lr_type"]] # parallel gpus = config['gpu'].split(',') if len(gpus) > 1: base_network = nn.DataParallel(base_network) test_ad_net = nn.DataParallel(test_ad_net) if config['method'] == 'DANN': ad_net = nn.DataParallel(ad_net) elif config['method'] == 'proposed': if config['loss']['random']: random_layer = nn.DataParallel(random_layer) ad_net = nn.DataParallel(ad_net) #将ad_net_group设置成并行将会引发error,原因可能是由于ad_net_group的输出不是tensor类型,parallel还不能支持。 #ad_net_group = nn.DataParallel(ad_net_group) else: ad_net = nn.DataParallel(ad_net) #ad_net_group = nn.DataParallel(ad_net_group) elif config['method'] == 'base': pass else: # CDAN+E if config["loss"]["random"]: random_layer = nn.DataParallel(random_layer) ad_net = nn.DataParallel(ad_net) # CDAN else: ad_net = nn.DataParallel(ad_net) ## train len_train_source = len(dset_loaders["source"]) len_train_target = len(dset_loaders["target"]) transfer_loss_value = classifier_loss_value = total_loss_value = 0.0 best_acc = 0.0 for i in range(config["num_iterations"]): if i % config["test_interval"] == config["test_interval"] - 1: base_network.train(False) # eval() == train(False) is True temp_acc = image_classification_test(dset_loaders, base_network, test_10crop=prep_config["test_10crop"]) temp_model = nn.Sequential(base_network) if temp_acc > best_acc: best_acc = temp_acc best_model = temp_model log_str = "iter: {:05d}, precision: {:.5f}".format(i, temp_acc) config["out_file"].write(log_str + "\n") config["out_file"].flush() print(log_str) # if i % config["snapshot_interval"] == 0: # torch.save(nn.Sequential(base_network), osp.join(config["output_path"], # "iter_{:05d}_model.pth.tar".format(i))) loss_params = config["loss"] # train one iter base_network.train(True) if config['method'] != 'base': ad_net.train(True) if config['method'] == 'proposed': ad_net_group.train(True) # lr_scheduler optimizer = lr_scheduler(optimizer, i, **schedule_param) optimizer.zero_grad() if i % len_train_source == 0: iter_source = iter(dset_loaders["source"]) if i % len_train_target == 0: iter_target = iter(dset_loaders["target"]) inputs_source, labels_source = iter_source.next() inputs_target, labels_target = iter_target.next() inputs_source, inputs_target, labels_source = inputs_source.cuda(), inputs_target.cuda(), labels_source.cuda() features_source, outputs_source = base_network(inputs_source) features_target, outputs_target = base_network(inputs_target) if config['tsne']: # feature visualization by using T-SNE if i == int(0.98*config['num_iterations']): features_source_total = features_source.cpu().detach().numpy() features_target_total = features_target.cpu().detach().numpy() elif i > int(0.98*config['num_iterations']) and i < int(0.98*config['num_iterations'])+10: features_source_total = np.concatenate((features_source_total, features_source.cpu().detach().numpy())) features_target_total = np.concatenate((features_target_total, features_target.cpu().detach().numpy())) elif i == int(0.98*config['num_iterations'])+10: for index in range(config['tsne_num']): features_embeded = TSNE(perplexity=10,n_iter=5000).fit_transform(np.concatenate((features_source_total, features_target_total))) fig = plt.figure() plt.scatter(features_embeded[:len(features_embeded)//2, 0], features_embeded[:len(features_embeded)//2, 1], c='r', s=1) plt.scatter(features_embeded[len(features_embeded)//2:, 0], features_embeded[len(features_embeded)//2:, 1], c='b', s=1) plt.savefig(osp.join(config["output_path"], config['method']+'-'+str(index)+'.png')) plt.close() else: pass assert features_source.size(0) == features_target.size(0), 'The batchsize must be same' assert outputs_source.size(0) == outputs_target.size(0), 'The batchsize must be same' # source first, target second features = torch.cat((features_source, features_target), dim=0) outputs = torch.cat((outputs_source, outputs_target), dim=0) # output the A_distance if i % config["test_interval"] == config["test_interval"] - 1: A_distance = cal_A_distance(test_ad_net, features) config['A_distance_file'].write(str(A_distance)+'\n') config['A_distance_file'].flush() softmax_out = nn.Softmax(dim=1)(outputs) if config['method'] == 'CDAN+E': entropy = loss.Entropy(softmax_out) transfer_loss = loss.CDAN([features, softmax_out], ad_net, entropy, network.calc_coeff(i), random_layer) elif config['method'] == 'CDAN': transfer_loss = loss.CDAN([features, softmax_out], ad_net, None, None, random_layer) elif config['method'] == 'DANN': transfer_loss = loss.DANN(features, ad_net) elif config['method'] == 'MADA': transfer_loss = loss.MADA(features, softmax_out, ad_net) elif config['method'] == 'proposed': entropy = loss.Entropy(softmax_out) transfer_loss = loss.proposed([features, outputs], labels_source, ad_net, ad_net_group, entropy, network.calc_coeff(i), i, random_layer, config['loss']['trade_off23']) elif config['method'] == 'base': pass else: raise ValueError('Method cannot be recognized.') test_domain_loss = loss.DANN(features.clone().detach(), test_ad_net) classifier_loss = nn.CrossEntropyLoss()(outputs_source, labels_source) if config['method'] == 'base': total_loss = classifier_loss + test_domain_loss else: total_loss = loss_params["trade_off"] * transfer_loss + classifier_loss + test_domain_loss total_loss.backward() optimizer.step() # torch.save(best_model, osp.join(config["output_path"], "best_model.pth.tar")) return best_acc
def train(config): #################################################### # Tensorboard setting #################################################### #tensor_writer = SummaryWriter(config["tensorboard_path"]) #################################################### # Data setting #################################################### prep_dict = {} # 데이터 전처리 transforms 부분 prep_dict["source"] = prep.image_train(**config['prep']['params']) prep_dict["target"] = prep.image_train(**config["prep"]['params']) prep_dict["test"] = prep.image_test(**config['prep']['params']) dsets = {} dsets["source"] = datasets.ImageFolder(config['s_dset_path'], transform=prep_dict["source"]) dsets["target"] = datasets.ImageFolder(config['t_dset_path'], transform=prep_dict['target']) dsets['test'] = datasets.ImageFolder(config['t_dset_path'], transform=prep_dict['test']) data_config = config["data"] train_source_bs = data_config["source"][ "batch_size"] #원본은 source와 target 모두 source train bs로 설정되었는데 이를 수정함 train_target_bs = data_config['target']['batch_size'] test_bs = data_config["test"]["batch_size"] dset_loaders = {} dset_loaders["source"] = DataLoader( dsets["source"], batch_size=train_source_bs, shuffle=True, num_workers=4, drop_last=True ) # 원본은 drop_last=True, 이렇게 해야 마지막까지 source, target에서 동일한 수로 배치 생성가능 dset_loaders["target"] = DataLoader(dsets["target"], batch_size=train_target_bs, shuffle=True, num_workers=4, drop_last=True) dset_loaders['test'] = DataLoader(dsets['test'], batch_size=test_bs, shuffle=False, num_workers=4, drop_last=False) #################################################### # Network Setting #################################################### class_num = config["network"]['params']['class_num'] net_config = config["network"] """ config['network'] = {'name': network.ResNetFc, 'params': {'resnet_name': args.net, 'use_bottleneck': True, 'bottleneck_dim': 256, 'new_cls': True, 'class_num': args.class_num, 'type' : args.type} } """ base_network = net_config["name"](**net_config["params"]) #network.py에 정의된 ResNetFc() 클래스 호출 base_network = base_network.cuda() # ResNetFc(Resnet, True, 256, True, 12) if config["loss"]["random"]: random_layer = network.RandomLayer( [base_network.output_num(), class_num], config["loss"]["random_dim"]) random_layer.cuda() ad_net = network.AdversarialNetwork(config["loss"]["random_dim"], 1024) else: random_layer = None ad_net = network.AdversarialNetwork( base_network.output_num() * class_num, 1024) # 왜 class 수 만큼 곱하지? ad_net = ad_net.cuda() parameter_list = base_network.get_parameters() + ad_net.get_parameters() #################################################### # Env Setting #################################################### #gpus = config['gpu'].split(',') #if len(gpus) > 1 : #ad_net = nn.DataParallel(ad_net, device_ids=[int(i) for i in gpus]) #base_network = nn.DataParallel(base_network, device_ids=[int(i) for i in gpus]) #################################################### # Optimizer Setting #################################################### optimizer_config = config['optimizer'] optimizer = optimizer_config["type"](parameter_list, **(optimizer_config["optim_params"])) # optim.SGD #config['optimizer'] = {'type': optim.SGD, #'optim_params': {'lr': args.lr, #'momentum': 0.9, #'weight_decay': 0.0005, #'nestrov': True}, #'lr_type': "inv", #'lr_param': {"lr": args.lr, #'gamma': 0.001, # 이거 0.01이여야 하지 않나? #'power': 0.75 #} param_lr = [] for param_group in optimizer.param_groups: param_lr.append(param_group['lr']) schedule_param = optimizer_config['lr_param'] lr_scheduler = lr_schedule.schedule_dict[ optimizer_config["lr_type"]] # return optimizer #################################################### # Train #################################################### len_train_source = len(dset_loaders["source"]) len_train_target = len(dset_loaders["target"]) transfer_loss_value = 0.0 classifier_loss_value = 0.0 total_loss_value = 0.0 best_acc = 0.0 batch_size = config["data"]["source"]["batch_size"] for i in range( config["num_iterations"]): # num_iterations수의 batch가 학습에 사용됨 sys.stdout.write("Iteration : {} \r".format(i)) sys.stdout.flush() loss_params = config["loss"] base_network.train(True) ad_net.train(True) optimizer = lr_scheduler(optimizer, i, **schedule_param) optimizer.zero_grad() if i % len_train_source == 0: iter_source = iter(dset_loaders["source"]) if i % len_train_target == 0: iter_target = iter(dset_loaders["target"]) inputs_source, labels_source = iter_source.next() inputs_target, labels_target = iter_target.next() inputs_source, labels_source = inputs_source.cuda( ), labels_source.cuda() inputs_target = inputs_target.cuda() inputs = torch.cat((inputs_source, inputs_target), dim=0) features, outputs, tau, cur_mean_source, cur_mean_target, output_mean_source, output_mean_target = base_network( inputs) softmax_out = nn.Softmax(dim=1)(outputs) outputs_source = outputs[:batch_size] outputs_target = outputs[batch_size:] if config['method'] == 'CDAN+E' or config['method'] == 'CDAN_TransNorm': entropy = loss.Entropy(softmax_out) transfer_loss = loss.CDAN([features, softmax_out], ad_net, entropy, network.calc_coeff(i), random_layer) elif config['method'] == 'CDAN': transfer_loss = loss.CDAN([features, softmax_out], ad_net, None, None, random_layer) elif config['method'] == 'DANN': pass # 나중에 정리하기 else: raise ValueError('Method cannot be recognized') classifier_loss = nn.CrossEntropyLoss()(outputs_source, labels_source) total_loss = loss_params["trade_off"] * transfer_loss + classifier_loss total_loss.backward() optimizer.step() #tensor_writer.add_scalar('total_loss', total_loss.i ) #tensor_writer.add_scalar('classifier_loss', classifier_loss, i) #tensor_writer.add_scalar('transfer_loss', transfer_loss, i) #################################################### # Test #################################################### if i % config["test_interval"] == config["test_interval"] - 1: # test interval 마다 base_network.train(False) temp_acc = image_classification_test(dset_loaders, base_network) temp_model = nn.Sequential(base_network) if temp_acc > best_acc: best_acc = temp_acc best_model = temp_model ACC = round(best_acc, 2) * 100 torch.save( best_model, os.path.join(config["output_path"], "iter_{}_model.pth.tar".format(ACC))) log_str = "iter: {:05d}, precision: {:.5f}".format(i, temp_acc) config["out_file"].write(log_str + "\n") config["out_file"].flush() print(log_str)
def train(args, validate=False, label=None): ## set pre-process if validate: dset_loaders = data_load_y(args, label) else: dset_loaders = data_load(args) class_num = args.class_num class_weight_src = torch.ones(class_num, ).cuda() ################################################################################################## ## set base network if args.net == 'resnet101': netG = utils.ResBase101().cuda() elif args.net == 'resnet50': netG = utils.ResBase50().cuda() netF = utils.ResClassifier(class_num=class_num, feature_dim=netG.in_features, bottleneck_dim=args.bottleneck_dim).cuda() max_len = max(len(dset_loaders["source"]), len(dset_loaders["target"])) args.max_iter = args.max_epoch * max_len ad_flag = False if args.method in {'DANN', 'DANNE'}: ad_net = utils.AdversarialNetwork(args.bottleneck_dim, 1024, max_iter=args.max_iter).cuda() ad_flag = True if args.method in {'CDAN', 'CDANE'}: ad_net = utils.AdversarialNetwork(args.bottleneck_dim * class_num, 1024, max_iter=args.max_iter).cuda() random_layer = None ad_flag = True optimizer_g = optim.SGD(netG.parameters(), lr=args.lr * 0.1) optimizer_f = optim.SGD(netF.parameters(), lr=args.lr) if ad_flag: optimizer_d = optim.SGD(ad_net.parameters(), lr=args.lr) base_network = nn.Sequential(netG, netF) if args.pl.startswith('atdoc_na'): mem_fea = torch.rand(len(dset_loaders["target"].dataset), args.bottleneck_dim).cuda() mem_fea = mem_fea / torch.norm(mem_fea, p=2, dim=1, keepdim=True) mem_cls = torch.ones(len(dset_loaders["target"].dataset), class_num).cuda() / class_num if args.pl == 'atdoc_nc': mem_fea = torch.rand(args.class_num, args.bottleneck_dim).cuda() mem_fea = mem_fea / torch.norm(mem_fea, p=2, dim=1, keepdim=True) source_loader_iter = iter(dset_loaders["source"]) target_loader_iter = iter(dset_loaders["target"]) #### list_acc = [] best_ent = 100 for iter_num in range(1, args.max_iter + 1): base_network.train() lr_scheduler(optimizer_g, init_lr=args.lr * 0.1, iter_num=iter_num, max_iter=args.max_iter) lr_scheduler(optimizer_f, init_lr=args.lr, iter_num=iter_num, max_iter=args.max_iter) if ad_flag: lr_scheduler(optimizer_d, init_lr=args.lr, iter_num=iter_num, max_iter=args.max_iter) try: inputs_source, labels_source = source_loader_iter.next() except: source_loader_iter = iter(dset_loaders["source"]) inputs_source, labels_source = source_loader_iter.next() try: inputs_target, _, idx = target_loader_iter.next() except: target_loader_iter = iter(dset_loaders["target"]) inputs_target, _, idx = target_loader_iter.next() inputs_source, inputs_target, labels_source = inputs_source.cuda( ), inputs_target.cuda(), labels_source.cuda() if args.method == 'srconly' and args.pl == 'none': features_source, outputs_source = base_network(inputs_source) else: features_source, outputs_source = base_network(inputs_source) features_target, outputs_target = base_network(inputs_target) features = torch.cat((features_source, features_target), dim=0) outputs = torch.cat((outputs_source, outputs_target), dim=0) softmax_out = nn.Softmax(dim=1)(outputs) eff = utils.calc_coeff(iter_num, max_iter=args.max_iter) if args.method[-1] == 'E': entropy = loss.Entropy(softmax_out) else: entropy = None if args.method in {'CDAN', 'CDANE'}: transfer_loss = loss.CDAN([features, softmax_out], ad_net, entropy, eff, random_layer) elif args.method in {'DANN', 'DANNE'}: transfer_loss = loss.DANN(features, ad_net, entropy, eff) elif args.method == 'DAN': transfer_loss = eff * loss.DAN(features_source, features_target) elif args.method == 'DAN_Linear': transfer_loss = eff * loss.DAN_Linear(features_source, features_target) elif args.method == 'JAN': transfer_loss = eff * loss.JAN( [features_source, softmax_out[0:args.batch_size, :]], [features_target, softmax_out[args.batch_size::, :]]) elif args.method == 'JAN_Linear': transfer_loss = eff * loss.JAN_Linear( [features_source, softmax_out[0:args.batch_size, :]], [features_target, softmax_out[args.batch_size::, :]]) elif args.method == 'CORAL': transfer_loss = eff * loss.CORAL(features_source, features_target) elif args.method == 'DDC': transfer_loss = loss.MMD_loss()(features_source, features_target) elif args.method == 'srconly': transfer_loss = torch.tensor(0.0).cuda() else: raise ValueError('Method cannot be recognized.') src_ = loss.CrossEntropyLabelSmooth(reduction='none', num_classes=class_num, epsilon=args.smooth)( outputs_source, labels_source) weight_src = class_weight_src[labels_source].unsqueeze(0) classifier_loss = torch.sum( weight_src * src_) / (torch.sum(weight_src).item()) total_loss = transfer_loss + classifier_loss eff = iter_num / args.max_iter if args.pl == 'none': pass elif args.pl == 'square': softmax_out = nn.Softmax(dim=1)(outputs_target) square_loss = -torch.sqrt((softmax_out**2).sum(dim=1)).mean() total_loss += args.tar_par * eff * square_loss elif args.pl == 'bsp': sigma_loss = bsp_loss(features) total_loss += args.tar_par * sigma_loss elif args.pl == 'bnm': softmax_out = nn.Softmax(dim=1)(outputs_target) bnm_loss = -torch.norm(softmax_out, 'nuc') cof = torch.tensor( np.sqrt(np.min(softmax_out.size())) / softmax_out.size(0)) bnm_loss *= cof total_loss += args.tar_par * eff * bnm_loss elif args.pl == "mcc": softmax_out = nn.Softmax(dim=1)(outputs_target) ent_weight = 1 + torch.exp(-loss.Entropy(softmax_out)).detach() ent_weight /= ent_weight.sum() cov_tar = softmax_out.t().mm( torch.diag(softmax_out.size(0) * ent_weight)).mm(softmax_out) mcc_loss = (torch.diag(cov_tar) / cov_tar.sum(dim=1)).mean() total_loss -= args.tar_par * eff * mcc_loss elif args.pl == 'ent': softmax_out = nn.Softmax(dim=1)(outputs_target) ent_loss = torch.mean(loss.Entropy(softmax_out)) ent_loss /= torch.log(torch.tensor(class_num + 0.0)) total_loss += args.tar_par * eff * ent_loss elif args.pl[0:3] == 'npl': softmax_out = nn.Softmax(dim=1)(outputs_target) softmax_out = softmax_out**2 / ((softmax_out**2).sum(dim=0)) weight_, pred = torch.max(softmax_out, 1) loss_ = nn.CrossEntropyLoss(reduction='none')(outputs_target, pred) classifier_loss = torch.sum( weight_ * loss_) / (torch.sum(weight_).item()) total_loss += args.tar_par * eff * classifier_loss elif args.pl == 'atdoc_nc': mem_fea_norm = mem_fea / torch.norm( mem_fea, p=2, dim=1, keepdim=True) dis = torch.mm(features_target.detach(), mem_fea_norm.t()) _, pred = torch.max(dis, dim=1) classifier_loss = nn.CrossEntropyLoss()(outputs_target, pred) total_loss += args.tar_par * eff * classifier_loss elif args.pl.startswith('atdoc_na'): dis = -torch.mm(features_target.detach(), mem_fea.t()) for di in range(dis.size(0)): dis[di, idx[di]] = torch.max(dis) _, p1 = torch.sort(dis, dim=1) w = torch.zeros(features_target.size(0), mem_fea.size(0)).cuda() for wi in range(w.size(0)): for wj in range(args.K): w[wi][p1[wi, wj]] = 1 / args.K weight_, pred = torch.max(w.mm(mem_cls), 1) if args.pl == 'atdoc_na_now': classifier_loss = nn.CrossEntropyLoss()(outputs_target, pred) else: loss_ = nn.CrossEntropyLoss(reduction='none')(outputs_target, pred) classifier_loss = torch.sum( weight_ * loss_) / (torch.sum(weight_).item()) total_loss += args.tar_par * eff * classifier_loss optimizer_g.zero_grad() optimizer_f.zero_grad() if ad_flag: optimizer_d.zero_grad() total_loss.backward() optimizer_g.step() optimizer_f.step() if ad_flag: optimizer_d.step() if args.pl.startswith('atdoc_na'): base_network.eval() with torch.no_grad(): features_target, outputs_target = base_network(inputs_target) features_target = features_target / torch.norm( features_target, p=2, dim=1, keepdim=True) softmax_out = nn.Softmax(dim=1)(outputs_target) if args.pl == 'atdoc_na_nos': outputs_target = softmax_out else: outputs_target = softmax_out**2 / ( (softmax_out**2).sum(dim=0)) mem_fea[idx] = (1.0 - args.momentum) * mem_fea[ idx] + args.momentum * features_target.clone() mem_cls[idx] = (1.0 - args.momentum) * mem_cls[ idx] + args.momentum * outputs_target.clone() if args.pl == 'atdoc_nc': base_network.eval() with torch.no_grad(): features_target, outputs_target = base_network(inputs_target) softmax_t = nn.Softmax(dim=1)(outputs_target) _, pred_t = torch.max(softmax_t, 1) onehot_t = torch.eye(args.class_num)[pred_t].cuda() center_t = torch.mm(features_target.t(), onehot_t) / (onehot_t.sum(dim=0) + 1e-8) mem_fea = (1.0 - args.momentum ) * mem_fea + args.momentum * center_t.t().clone() if iter_num % int(args.eval_epoch * max_len) == 0: base_network.eval() if args.dset == 'VISDA-C': acc, py, score, y, tacc = utils.cal_acc_visda( dset_loaders["test"], base_network) args.out_file.write(tacc + '\n') args.out_file.flush() _ent = loss.Entropy(score) mean_ent = 0 for ci in range(args.class_num): mean_ent += _ent[py == ci].mean() mean_ent /= args.class_num else: acc, py, score, y = utils.cal_acc(dset_loaders["test"], base_network) mean_ent = torch.mean(loss.Entropy(score)) list_acc.append(acc * 100) if best_ent > mean_ent: best_ent = mean_ent val_acc = acc * 100 best_y = y best_py = py best_score = score log_str = 'Task: {}, Iter:{}/{}; Accuracy = {:.2f}%; Mean Ent = {:.4f}'.format( args.name, iter_num, args.max_iter, acc * 100, mean_ent) args.out_file.write(log_str + '\n') args.out_file.flush() print(log_str + '\n') idx = np.argmax(np.array(list_acc)) max_acc = list_acc[idx] final_acc = list_acc[-1] log_str = '\n==========================================\n' log_str += '\nVal Acc = {:.2f}\nMax Acc = {:.2f}\nFin Acc = {:.2f}\n'.format( val_acc, max_acc, final_acc) args.out_file.write(log_str + '\n') args.out_file.flush() # torch.save(base_network.state_dict(), osp.join(args.output_dir, args.log + ".pt")) # sio.savemat(osp.join(args.output_dir, args.log + ".mat"), {'y':best_y.cpu().numpy(), # 'py':best_py.cpu().numpy(), 'score':best_score.cpu().numpy()}) return best_y.cpu().numpy().astype(np.int64)
def train(config): ## set pre-process prep_dict = {} prep_config = config["prep"] prep_dict["source"] = prep.image_train(**config["prep"]['params']) prep_dict["target"] = prep.image_train(**config["prep"]['params']) if prep_config["test_10crop"]: prep_dict["test"] = prep.image_test_10crop(**config["prep"]['params']) else: prep_dict["test"] = prep.image_test(**config["prep"]['params']) ## prepare data dsets = {} dset_loaders = {} data_config = config["data"] train_bs = data_config["source"]["batch_size"] test_bs = data_config["test"]["batch_size"] dsets["source"] = ImageList(open(data_config["source"]["list_path"]).readlines(), \ transform=prep_dict["source"]) dset_loaders["source"] = DataLoader(dsets["source"], batch_size=train_bs, \ shuffle=True, num_workers=4, drop_last=True) dsets["target"] = ImageList(open(data_config["target"]["list_path"]).readlines(), \ transform=prep_dict["target"]) dset_loaders["target"] = DataLoader(dsets["target"], batch_size=train_bs, \ shuffle=True, num_workers=4, drop_last=True) if prep_config["test_10crop"]: for i in range(10): dsets["test"] = [ImageList(open(data_config["test"]["list_path"]).readlines(), \ transform=prep_dict["test"][i]) for i in range(10)] dset_loaders["test"] = [DataLoader(dset, batch_size=test_bs, \ shuffle=False, num_workers=4) for dset in dsets['test']] else: dsets["test"] = ImageList(open(data_config["test"]["list_path"]).readlines(), \ transform=prep_dict["test"]) dset_loaders["test"] = DataLoader(dsets["test"], batch_size=test_bs, \ shuffle=False, num_workers=4) class_num = config["network"]["params"]["class_num"] crit = LabelSmoothingLoss(smoothing=0.05, classes=class_num)#标签平滑操作 ## set base network net_config = config["network"] base_network = net_config["name"](**net_config["params"]) base_network = base_network.cuda() # 加载基础网络结构 ## add additional network for some methods if config["loss"]["random"]: random_layer = network.RandomLayer([base_network.output_num(), class_num], config["loss"]["random_dim"]) ad_net = network.AdversarialNetwork(config["loss"]["random_dim"], 1024) else: random_layer = None ad_net = network.AdversarialNetwork(base_network.output_num() * class_num, 1024) # 对抗网络结构 if config["loss"]["random"]: random_layer.cuda() ad_net = ad_net.cuda() parameter_list = base_network.get_parameters() + ad_net.get_parameters() ## set optimizer optimizer_config = config["optimizer"] optimizer = optimizer_config["type"](parameter_list, \ **(optimizer_config["optim_params"])) #中心损失函数 criterion_centor=CenterLoss(num_classes=class_num,feat_dim=256,use_gpu=True) optimizer_centerloss=torch.optim.SGD(criterion_centor.parameters(),lr=config['lr']) param_lr = [] for param_group in optimizer.param_groups: param_lr.append(param_group["lr"]) schedule_param = optimizer_config["lr_param"] lr_scheduler = lr_schedule.schedule_dict[optimizer_config["lr_type"]] gpus = config['gpu'].split(',') if len(gpus) > 1: ad_net = nn.DataParallel(ad_net, device_ids=[int(i) for i in gpus]) base_network = nn.DataParallel(base_network, device_ids=[int(i) for i in gpus]) ## train len_train_source = len(dset_loaders["source"]) len_train_target = len(dset_loaders["target"]) transfer_loss_value = classifier_loss_value = total_loss_value = 0.0 best_acc = 0.0 start_time = time.time() for i in range(config["num_iterations"]): if i % config["test_interval"] == config["test_interval"] - 1: # 在这里进行测试的工作 base_network.train(False) temp_acc = image_classification_test(dset_loaders, \ base_network, test_10crop=prep_config["test_10crop"]) temp_model = nn.Sequential(base_network) if temp_acc > best_acc: best_acc = temp_acc best_model = temp_model log_str = "iter: {:05d}, precision: {:.5f}".format(i, temp_acc) config["out_file"].write(log_str + "\n") config["out_file"].flush() print(log_str) end_time = time.time() print('iter {} cost time {:.4f} sec.'.format(i, end_time - start_time)) # 打印时间间隔 start_time = time.time() if i % config["snapshot_interval"] == 0: torch.save(nn.Sequential(base_network), osp.join(config["output_path"], \ "iter_{:05d}_model.pth.tar".format(i))) loss_params = config["loss"] ## train one iter base_network.train(True) # 训练模式 ad_net.train(True) optimizer = lr_scheduler(optimizer, i, **schedule_param) # optimizer_centerloss=lr_scheduler(optimizer_centerloss, i, **schedule_param) optimizer.zero_grad() optimizer_centerloss.zero_grad() if i % len_train_source == 0: iter_source = iter(dset_loaders["source"]) if i % len_train_target == 0: iter_target = iter(dset_loaders["target"]) inputs_source, labels_source = iter_source.next() inputs_target, labels_target = iter_target.next() inputs_source, inputs_target, labels_source = inputs_source.cuda(), inputs_target.cuda(), labels_source.cuda() features_source, outputs_source = base_network(inputs_source) features_target, outputs_target = base_network(inputs_target) features = torch.cat((features_source, features_target), dim=0) outputs = torch.cat((outputs_source, outputs_target), dim=0) softmax_out = nn.Softmax(dim=1)(outputs) if config['method'] == 'CDAN+E': entropy = loss.Entropy(softmax_out) transfer_loss = loss.CDAN([features, softmax_out], ad_net, entropy, network.calc_coeff(i), random_layer) elif config['method'] == 'CDAN': transfer_loss = loss.CDAN([features, softmax_out], ad_net, None, None, random_layer) elif config['method'] == 'DANN': transfer_loss = loss.DANN(features, ad_net) else: raise ValueError('Method cannot be recognized.') # classifier_loss = nn.CrossEntropyLoss()(outputs_source, labels_source) # 源域的分类损失 classifier_loss = crit(outputs_source, labels_source) # 源域的分类损失,标签平滑操作 # 目标域的熵正则化操作 outputs_target=outputs[len(inputs_source):,:] t_logit = outputs_target t_prob = torch.softmax(t_logit,dim=1) t_entropy_loss = get_entropy_loss(t_prob) # 计算目标域的熵的损失 entropy_loss = 0.05 * (t_entropy_loss) # 计算中心损失函数 loss_centor = criterion_centor(features_source, labels_source) # 中心损失计算 total_loss = loss_params["trade_off"] * transfer_loss + classifier_loss + config['centor_w']*loss_centor if i % config["test_interval"] == config["test_interval"] - 1: print('total loss: {:.4f}, transfer loss: {:.4f}, classifier loss: {:.4f}, centor loss: {:.4f}'.format( total_loss.item(),transfer_loss.item(),classifier_loss.item(),config['centor_w']*loss_centor.item() )) total_loss.backward() optimizer.step() # by doing so, weight_cent would not impact on the learning of centers for param in criterion_centor.parameters(): param.grad.data *= (1. / config['centor_w']) optimizer_centerloss.step() torch.save(best_model, osp.join(config["output_path"], "best_model.pth.tar")) return best_acc
def train(args, model, ad_net, source_samples, source_labels, target_samples, target_labels, optimizer, optimizer_ad, epoch, start_epoch, method, source_label_distribution, out_wei_file, cov_mat, pseudo_target_label, class_weights, true_weights): model.train() cov_mat[:] = 0.0 pseudo_target_label[:] = 0.0 len_source = source_labels.shape[0] len_target = target_labels.shape[0] size = max(len_source, len_target) num_iter = int(size / args.batch_size) for batch_idx in range(num_iter): t = time.time() source_idx = np.random.choice(len_source, args.batch_size) target_idx = np.random.choice(len_target, args.batch_size) data_source, label_source = source_samples[source_idx], source_labels[ source_idx] data_target, _ = target_samples[target_idx], target_labels[target_idx] optimizer.zero_grad() optimizer_ad.zero_grad() feature, output = model(torch.cat((data_source, data_target), 0)) if 'IW' in method: ys_onehot = torch.zeros(args.batch_size, 10).to(args.device) ys_onehot.scatter_(1, label_source.view(-1, 1), 1) # Compute weights on source data. if 'ORACLE' in method: weights = torch.mm(ys_onehot, true_weights) else: weights = torch.mm(ys_onehot, model.im_weights) source_preds, target_preds = output[:args.batch_size], output[ args.batch_size:] # Compute the aggregated distribution of pseudo-label on the target domain. pseudo_target_label += torch.sum(F.softmax(target_preds, dim=1), dim=0).view(-1, 1).detach() # Update the covariance matrix on the source domain as well. cov_mat += torch.mm( F.softmax(source_preds, dim=1).transpose(1, 0), ys_onehot).detach() loss = torch.mean( nn.CrossEntropyLoss(weight=class_weights, reduction='none') (output.narrow(0, 0, data_source.size(0)), label_source) * weights) / 10.0 else: loss = nn.CrossEntropyLoss()(output.narrow(0, 0, data_source.size(0)), label_source) if epoch > start_epoch: if method == 'CDAN-E': softmax_output = nn.Softmax(dim=1)(output) entropy = loss_func.Entropy(softmax_output) loss += loss_func.CDAN( [feature, softmax_output], ad_net, entropy, network.calc_coeff(num_iter * (epoch - start_epoch) + batch_idx), None, device=args.device) elif 'IWCDAN-E' in method: softmax_output = nn.Softmax(dim=1)(output) entropy = loss_func.Entropy(softmax_output) loss += loss_func.CDAN( [feature, softmax_output], ad_net, entropy, network.calc_coeff(num_iter * (epoch - start_epoch) + batch_idx), None, weights=weights, device=args.device) elif method == 'CDAN': softmax_output = nn.Softmax(dim=1)(output) loss += loss_func.CDAN([feature, softmax_output], ad_net, None, None, None, device=args.device) elif 'IWCDAN' in method: softmax_output = nn.Softmax(dim=1)(output) loss += loss_func.CDAN([feature, softmax_output], ad_net, None, None, None, weights=weights, device=args.device) elif method == 'DANN': loss += loss_func.DANN(feature, ad_net, args.device) elif 'IWDAN' in method: dloss = loss_func.IWDAN(feature, ad_net, weights) loss += args.mu * dloss elif method == 'NANN': pass else: raise ValueError('Method cannot be recognized.') loss.backward() optimizer.step() if epoch > start_epoch and method != 'NANN': optimizer_ad.step() if 'IW' in method and epoch > start_epoch: pseudo_target_label /= args.batch_size * num_iter cov_mat /= args.batch_size * num_iter # Recompute the importance weight by solving a QP. model.im_weights_update(source_label_distribution, pseudo_target_label.cpu().detach().numpy(), cov_mat.cpu().detach().numpy(), args.device) current_weights = [ round(x, 4) for x in model.im_weights.data.cpu().numpy().flatten() ] write_list(out_wei_file, [ np.linalg.norm(current_weights - true_weights.cpu().numpy().flatten()) ] + current_weights) print( np.linalg.norm(current_weights - true_weights.cpu().numpy().flatten()), current_weights)
def train(config): ## set pre-process prep_dict = {} prep_config = config["prep"] prep_dict["source"] = prep.image_train(**config["prep"]['params']) prep_dict["target"] = prep.image_train(**config["prep"]['params']) if prep_config["test_10crop"]: prep_dict["test"] = prep.image_test_10crop(**config["prep"]['params']) else: prep_dict["test"] = prep.image_test(**config["prep"]['params']) ## prepare data dsets = {} dset_loaders = {} data_config = config["data"] train_bs = data_config["source"]["batch_size"] test_bs = data_config["test"]["batch_size"] dsets["source"] = ImageList(open(data_config["source"]["list_path"]).readlines(), \ transform=prep_dict["source"]) dset_loaders["source"] = DataLoader(dsets["source"], batch_size=train_bs, \ shuffle=True, num_workers=0, drop_last=True) dsets["target"] = ImageList(open(data_config["target"]["list_path"]).readlines(), \ transform=prep_dict["target"]) dset_loaders["target"] = DataLoader(dsets["target"], batch_size=train_bs, \ shuffle=True, num_workers=0, drop_last=True) if prep_config["test_10crop"]: for i in range(10): dsets["test"] = [ImageList(open(data_config["test"]["list_path"]).readlines(), \ transform=prep_dict["test"][i]) for i in range(10)] dset_loaders["test"] = [DataLoader(dset, batch_size=test_bs, \ shuffle=False, num_workers=0) for dset in dsets['test']] else: dsets["test"] = ImageList(open(data_config["test"]["list_path"]).readlines(), \ transform=prep_dict["test"]) dset_loaders["test"] = DataLoader(dsets["test"], batch_size=test_bs, \ shuffle=False, num_workers=0) class_num = config["network"]["params"]["class_num"] ## set base network net_config = config["network"] base_network = net_config["name"](**net_config["params"]) base_network = base_network.cuda() ## add additional network for some methods if config["loss"]["random"]: random_layer = network.RandomLayer( [base_network.output_num(), class_num], config["loss"]["random_dim"]) ad_net = network.AdversarialNetwork(config["loss"]["random_dim"], 1024) else: random_layer = None ad_net = network.AdversarialNetwork( base_network.output_num() * class_num, 1024) if config["loss"]["random"]: random_layer.cuda() ad_net = ad_net.cuda() parameter_list = base_network.get_parameters() + ad_net.get_parameters() ## set optimizer optimizer_config = config["optimizer"] optimizer = optimizer_config["type"](parameter_list, \ **(optimizer_config["optim_params"])) param_lr = [] for param_group in optimizer.param_groups: param_lr.append(param_group["lr"]) schedule_param = optimizer_config["lr_param"] lr_scheduler = lr_schedule.schedule_dict[optimizer_config["lr_type"]] gpus = config['gpu'].split(',') if len(gpus) > 1: ad_net = nn.DataParallel(ad_net, device_ids=[int(i) for i in gpus]) base_network = nn.DataParallel(base_network, device_ids=[int(i) for i in gpus]) ## train len_train_source = len(dset_loaders["source"]) len_train_target = len(dset_loaders["target"]) best_acc = 0.0 best_model = nn.Sequential(base_network) each_log = "" for i in range(config["num_iterations"]): if i % config["test_interval"] == config["test_interval"] - 1: base_network.train(False) temp_acc = image_classification_test(dset_loaders, \ base_network, test_10crop=prep_config["test_10crop"]) temp_model = nn.Sequential(base_network) if temp_acc > best_acc: best_acc = temp_acc best_model = temp_model log_str = "iter: {:05d}, precision: {:.5f}, transfer_loss:{:.4f}, classifier_loss:{:.4f}, total_loss:{:.4f}" \ .format(i, temp_acc, transfer_loss.item(), classifier_loss.item(), total_loss.item()) config["out_file"].write(log_str + "\n") config["out_file"].flush() print(log_str) config["out_file"].write(each_log) config["out_file"].flush() each_log = "" loss_params = config["loss"] ## train one iter base_network.train(True) ad_net.train(True) optimizer = lr_scheduler(optimizer, i, **schedule_param) optimizer.zero_grad() if i % len_train_source == 0: iter_source = iter(dset_loaders["source"]) if i % len_train_target == 0: iter_target = iter(dset_loaders["target"]) inputs_source, labels_source = iter_source.next() inputs_target, labels_target = iter_target.next() inputs_source, inputs_target, labels_source = inputs_source.cuda( ), inputs_target.cuda(), labels_source.cuda() features_source, outputs_source = base_network(inputs_source) features_target, outputs_target = base_network(inputs_target) features = torch.cat((features_source, features_target), dim=0) outputs = torch.cat((outputs_source, outputs_target), dim=0) softmax_out = nn.Softmax(dim=1)(outputs) labels_target_fake = torch.max(nn.Softmax(dim=1)(outputs_target), 1)[1] labels = torch.cat((labels_source, labels_target_fake)) entropy = loss.Entropy(softmax_out) transfer_loss = loss.CDAN([features, softmax_out], ad_net, entropy, network.calc_coeff(i), random_layer) classifier_loss = nn.CrossEntropyLoss()(outputs_source, labels_source) mdd_loss = loss.mdd_loss(features=features, labels=labels, left_weight=args.left_weight, right_weight=args.right_weight) max_entropy_loss = loss.EntropicConfusion(features) total_loss = loss_params["trade_off"] * transfer_loss \ + args.cls_weight * classifier_loss \ + args.mdd_weight * mdd_loss \ + args.entropic_weight * max_entropy_loss total_loss.backward() optimizer.step() log_str = "iter: {:05d},transfer_loss:{:.4f}, classifier_loss:{:.4f}, mdd_loss:{:4f}," \ "max_entropy_loss:{:.4f},total_loss:{:.4f}" \ .format(i, transfer_loss.item(), classifier_loss.item(), mdd_loss.item(), max_entropy_loss.item(), total_loss.item()) each_log += log_str + "\n" torch.save( best_model, config['model_output_path'] + "{}_{}_p-{}_e-{}".format( config['log_name'], str(best_acc), str(config["mdd_weight"]), str(config["entropic_weight"]))) return best_acc
def train(args): ## set pre-process dset_loaders = data_load(args) class_num = args.class_num class_weight_src = torch.ones(class_num, ).cuda() ################################################################################################## ## set base network if args.net == 'resnet34': netG = utils.ResBase34().cuda() elif args.net == 'vgg16': netG = utils.VGG16Base().cuda() netF = utils.ResClassifier(class_num=class_num, feature_dim=netG.in_features, bottleneck_dim=args.bottleneck_dim).cuda() max_len = max(len(dset_loaders["source"]), len(dset_loaders["target"])) args.max_iter = args.max_epoch * max_len ad_flag = False if args.method == 'DANN': ad_net = utils.AdversarialNetwork(args.bottleneck_dim, 1024, max_iter=args.max_iter).cuda() ad_flag = True if args.method == 'CDANE': ad_net = utils.AdversarialNetwork(args.bottleneck_dim * class_num, 1024, max_iter=args.max_iter).cuda() random_layer = None ad_flag = True optimizer_g = optim.SGD(netG.parameters(), lr=args.lr * 0.1) optimizer_f = optim.SGD(netF.parameters(), lr=args.lr) if ad_flag: optimizer_d = optim.SGD(ad_net.parameters(), lr=args.lr) base_network = nn.Sequential(netG, netF) if args.pl.startswith('atdoc_na'): mem_fea = torch.rand( len(dset_loaders["target"].dataset) + len(dset_loaders["ltarget"].dataset), args.bottleneck_dim).cuda() mem_fea = mem_fea / torch.norm(mem_fea, p=2, dim=1, keepdim=True) mem_cls = torch.ones( len(dset_loaders["target"].dataset) + len(dset_loaders["ltarget"].dataset), class_num).cuda() / class_num if args.pl == 'atdoc_nc': mem_fea = torch.rand(args.class_num, args.bottleneck_dim).cuda() mem_fea = mem_fea / torch.norm(mem_fea, p=2, dim=1, keepdim=True) source_loader_iter = iter(dset_loaders["source"]) target_loader_iter = iter(dset_loaders["target"]) ltarget_loader_iter = iter(dset_loaders["ltarget"]) # ### list_acc = [] best_val_acc = 0 for iter_num in range(1, args.max_iter + 1): # print(iter_num) base_network.train() lr_scheduler(optimizer_g, init_lr=args.lr * 0.1, iter_num=iter_num, max_iter=args.max_iter) lr_scheduler(optimizer_f, init_lr=args.lr, iter_num=iter_num, max_iter=args.max_iter) if ad_flag: lr_scheduler(optimizer_d, init_lr=args.lr, iter_num=iter_num, max_iter=args.max_iter) try: inputs_source, labels_source = source_loader_iter.next() except: source_loader_iter = iter(dset_loaders["source"]) inputs_source, labels_source = source_loader_iter.next() try: inputs_target, _, idx = target_loader_iter.next() except: target_loader_iter = iter(dset_loaders["target"]) inputs_target, _, idx = target_loader_iter.next() try: inputs_ltarget, labels_ltarget, lidx = ltarget_loader_iter.next() except: ltarget_loader_iter = iter(dset_loaders["ltarget"]) inputs_ltarget, labels_ltarget, lidx = ltarget_loader_iter.next() inputs_ltarget, labels_ltarget = inputs_ltarget.cuda( ), labels_ltarget.cuda() inputs_source, inputs_target, labels_source = inputs_source.cuda( ), inputs_target.cuda(), labels_source.cuda() if args.method == 'srconly' and args.pl == 'none': features_source, outputs_source = base_network(inputs_source) features_ltarget, outputs_ltarget = base_network(inputs_ltarget) else: features_ltarget, outputs_ltarget = base_network(inputs_ltarget) features_source, outputs_source = base_network(inputs_source) features_target, outputs_target = base_network(inputs_target) features_target = torch.cat((features_ltarget, features_target), dim=0) outputs_target = torch.cat((outputs_ltarget, outputs_target), dim=0) features = torch.cat((features_source, features_target), dim=0) outputs = torch.cat((outputs_source, outputs_target), dim=0) softmax_out = nn.Softmax(dim=1)(outputs) eff = utils.calc_coeff(iter_num, max_iter=args.max_iter) if args.method[-1] == 'E': entropy = loss.Entropy(softmax_out) else: entropy = None if args.method == 'CDANE': transfer_loss = loss.CDAN([features, softmax_out], ad_net, entropy, eff, random_layer) elif args.method == 'DANN': transfer_loss = loss.DANN(features, ad_net, entropy, eff) elif args.method == 'srconly': transfer_loss = torch.tensor(0.0).cuda() else: raise ValueError('Method cannot be recognized.') src_ = loss.CrossEntropyLabelSmooth(reduction='none', num_classes=class_num, epsilon=args.smooth)( outputs_source, labels_source) weight_src = class_weight_src[labels_source].unsqueeze(0) classifier_loss = torch.sum( weight_src * src_) / (torch.sum(weight_src).item()) total_loss = transfer_loss + classifier_loss ltar_ = loss.CrossEntropyLabelSmooth(reduction='none', num_classes=class_num, epsilon=args.smooth)( outputs_ltarget, labels_ltarget) weight_src = class_weight_src[labels_ltarget].unsqueeze(0) ltar_classifier_loss = torch.sum( weight_src * ltar_) / (torch.sum(weight_src).item()) total_loss += ltar_classifier_loss eff = iter_num / args.max_iter if not args.pl == 'none': outputs_target = outputs_target[-args.batch_size // 3:, :] features_target = features_target[-args.batch_size // 3:, :] if args.pl == 'none': pass elif args.pl == 'square': softmax_out = nn.Softmax(dim=1)(outputs_target) square_loss = -torch.sqrt((softmax_out**2).sum(dim=1)).mean() total_loss += args.tar_par * eff * square_loss elif args.pl == 'bsp': sigma_loss = bsp_loss(features) total_loss += args.tar_par * sigma_loss elif args.pl == 'ent': softmax_out = nn.Softmax(dim=1)(outputs_target) ent_loss = torch.mean(loss.Entropy(softmax_out)) ent_loss /= torch.log(torch.tensor(class_num + 0.0)) total_loss += args.tar_par * eff * ent_loss elif args.pl == 'bnm': softmax_out = nn.Softmax(dim=1)(outputs_target) bnm_loss = -torch.norm(softmax_out, 'nuc') cof = torch.tensor( np.sqrt(np.min(softmax_out.size())) / softmax_out.size(0)) bnm_loss *= cof total_loss += args.tar_par * eff * bnm_loss elif args.pl == 'mcc': softmax_out = nn.Softmax(dim=1)(outputs_target) ent_weight = 1 + torch.exp(-loss.Entropy(softmax_out)).detach() ent_weight /= ent_weight.sum() cov_tar = softmax_out.t().mm( torch.diag(softmax_out.size(0) * ent_weight)).mm(softmax_out) mcc_loss = (torch.diag(cov_tar) / cov_tar.sum(dim=1)).mean() total_loss -= args.tar_par * eff * mcc_loss elif args.pl == 'npl': softmax_out = nn.Softmax(dim=1)(outputs_target) softmax_out = softmax_out**2 / ((softmax_out**2).sum(dim=0)) weight_, pred = torch.max(softmax_out, 1) loss_ = nn.CrossEntropyLoss(reduction='none')(outputs_target, pred) classifier_loss = torch.sum( weight_ * loss_) / (torch.sum(weight_).item()) total_loss += args.tar_par * eff * classifier_loss elif args.pl == 'atdoc_nc': mem_fea_norm = mem_fea / torch.norm( mem_fea, p=2, dim=1, keepdim=True) dis = torch.mm(features_target.detach(), mem_fea_norm.t()) _, pred = torch.max(dis, dim=1) classifier_loss = nn.CrossEntropyLoss()(outputs_target, pred) total_loss += args.tar_par * eff * classifier_loss elif args.pl.startswith('atdoc_na'): dis = -torch.mm(features_target.detach(), mem_fea.t()) for di in range(dis.size(0)): dis[di, idx[di]] = torch.max(dis) _, p1 = torch.sort(dis, dim=1) w = torch.zeros(features_target.size(0), mem_fea.size(0)).cuda() for wi in range(w.size(0)): for wj in range(args.K): w[wi][p1[wi, wj]] = 1 / args.K weight_, pred = torch.max(w.mm(mem_cls), 1) if args.pl.startswith('atdoc_na_now'): classifier_loss = nn.CrossEntropyLoss()(outputs_target, pred) else: loss_ = nn.CrossEntropyLoss(reduction='none')(outputs_target, pred) classifier_loss = torch.sum( weight_ * loss_) / (torch.sum(weight_).item()) total_loss += args.tar_par * eff * classifier_loss optimizer_g.zero_grad() optimizer_f.zero_grad() if ad_flag: optimizer_d.zero_grad() total_loss.backward() optimizer_g.step() optimizer_f.step() if ad_flag: optimizer_d.step() if args.pl.startswith('atdoc_na'): base_network.eval() with torch.no_grad(): features_target, outputs_target = base_network(inputs_target) features_target = features_target / torch.norm( features_target, p=2, dim=1, keepdim=True) softmax_out = nn.Softmax(dim=1)(outputs_target) if args.pl.startswith('atdoc_na_nos'): outputs_target = softmax_out else: outputs_target = softmax_out**2 / ( (softmax_out**2).sum(dim=0)) mem_fea[idx] = (1.0 - args.momentum) * mem_fea[ idx] + args.momentum * features_target.clone() mem_cls[idx] = (1.0 - args.momentum) * mem_cls[ idx] + args.momentum * outputs_target.clone() with torch.no_grad(): features_ltarget, outputs_ltarget = base_network( inputs_ltarget) features_ltarget = features_ltarget / torch.norm( features_ltarget, p=2, dim=1, keepdim=True) softmax_out = nn.Softmax(dim=1)(outputs_ltarget) if args.pl.startswith('atdoc_na_nos'): outputs_ltarget = softmax_out else: outputs_ltarget = softmax_out**2 / ( (softmax_out**2).sum(dim=0)) mem_fea[lidx + len(dset_loaders["target"].dataset)] = (1.0 - args.momentum) * \ mem_fea[lidx + len(dset_loaders["target"].dataset)] + args.momentum * features_ltarget.clone() mem_cls[lidx + len(dset_loaders["target"].dataset)] = (1.0 - args.momentum) * \ mem_cls[lidx + len(dset_loaders["target"].dataset)] + args.momentum * outputs_ltarget.clone() if args.pl == 'atdoc_nc': base_network.eval() with torch.no_grad(): feat_u, outputs_target = base_network(inputs_target) softmax_t = nn.Softmax(dim=1)(outputs_target) _, pred_t = torch.max(softmax_t, 1) onehot_tu = torch.eye(args.class_num)[pred_t].cuda() feat_l, outputs_target = base_network(inputs_ltarget) softmax_t = nn.Softmax(dim=1)(outputs_target) _, pred_t = torch.max(softmax_t, 1) onehot_tl = torch.eye(args.class_num)[pred_t].cuda() center_t = ((torch.mm(feat_u.t(), onehot_tu) + torch.mm( feat_l.t(), onehot_tl))) / (onehot_tu.sum(dim=0) + onehot_tl.sum(dim=0) + 1e-8) mem_fea = (1.0 - args.momentum ) * mem_fea + args.momentum * center_t.t().clone() if iter_num % int(args.eval_epoch * max_len) == 0: base_network.eval() acc, py, score, y = utils.cal_acc(dset_loaders["test"], base_network) val_acc, _, _, _ = utils.cal_acc(dset_loaders["val"], base_network) list_acc.append(acc * 100) if best_val_acc <= val_acc: best_val_acc = val_acc best_acc = acc best_y = y best_py = py best_score = score log_str = 'Task: {}, Iter:{}/{}; Accuracy = {:.2f}%; Val Acc = {:.2f}%'.format( args.name, iter_num, args.max_iter, acc * 100, val_acc * 100) args.out_file.write(log_str + '\n') args.out_file.flush() print(log_str + '\n') val_acc = best_acc * 100 idx = np.argmax(np.array(list_acc)) max_acc = list_acc[idx] final_acc = list_acc[-1] log_str = '\n==========================================\n' log_str += '\nVal Acc = {:.2f}\nMax Acc = {:.2f}\nFin Acc = {:.2f}\n'.format( val_acc, max_acc, final_acc) args.out_file.write(log_str + '\n') args.out_file.flush()
def train(config): ## set pre-process prep_dict = {} prep_config = config["prep"] prep_dict["source"] = prep.image_train(**config["prep"]['params']) prep_dict["target"] = prep.image_train(**config["prep"]['params']) if prep_config["test_10crop"]: prep_dict["test"] = prep.image_test_10crop(**config["prep"]['params']) else: prep_dict["test"] = prep.image_test(**config["prep"]['params']) ## prepare data dsets = {} dset_loaders = {} data_config = config["data"] train_bs = data_config["source"]["batch_size"] test_bs = data_config["test"]["batch_size"] dsets["source"] = ImageList(open(data_config["source"]["list_path"]).readlines(), \ transform=prep_dict["source"]) dset_loaders["source"] = DataLoader(dsets["source"], batch_size=train_bs, \ shuffle=True, num_workers=4, drop_last=True) dsets["target"] = ImageList(open(data_config["target"]["list_path"]).readlines(), \ transform=prep_dict["target"]) dset_loaders["target"] = DataLoader(dsets["target"], batch_size=train_bs, \ shuffle=True, num_workers=4, drop_last=True) if prep_config["test_10crop"]: for i in range(10): dsets["test"] = [ImageList(open(data_config["test"]["list_path"]).readlines(), \ transform=prep_dict["test"][i]) for i in range(10)] dset_loaders["test"] = [DataLoader(dset, batch_size=test_bs, \ shuffle=False, num_workers=4) for dset in dsets['test']] else: dsets["test"] = ImageList(open(data_config["test"]["list_path"]).readlines(), \ transform=prep_dict["test"]) dset_loaders["test"] = DataLoader(dsets["test"], batch_size=test_bs, \ shuffle=False, num_workers=4) class_num = config["network"]["params"]["class_num"] ## set base network net_config = config["network"] base_network = net_config["name"](**net_config["params"]) base_network = base_network.cuda() with torch.no_grad(): cluster_data_loader = {} cluster_data_loader["source"] = DataLoader(dsets["source"], batch_size=100, \ shuffle=True, num_workers=0, drop_last=True) cluster_data_loader["target"] = DataLoader(dsets["source"], batch_size=100, \ shuffle=True, num_workers=0, drop_last=True) ## add additional network for some methods if config["loss"]["random"]: random_layer = network.RandomLayer([base_network.output_num(), class_num], config["loss"]["random_dim"]) ad_net = network.AdversarialNetwork(config["loss"]["random_dim"], 1024) else: random_layer = None ad_net = network.AdversarialNetwork(base_network.output_num() * class_num, 1024) if config["loss"]["random"]: random_layer.cuda() ad_net = ad_net.cuda() parameter_list = base_network.get_parameters() + ad_net.get_parameters() ## set optimizer optimizer_config = config["optimizer"] optimizer = optimizer_config["type"](parameter_list, \ **(optimizer_config["optim_params"])) param_lr = [] for param_group in optimizer.param_groups: param_lr.append(param_group["lr"]) schedule_param = optimizer_config["lr_param"] lr_scheduler = lr_schedule.schedule_dict[optimizer_config["lr_type"]] gpus = config['gpu'].split(',') if len(gpus) > 1: ad_net = nn.DataParallel(ad_net, device_ids=[int(i) for i in gpus]) base_network = nn.DataParallel(base_network, device_ids=[int(i) for i in gpus]) # dset_loaders["ps_target"]=[] ## train len_train_source = len(dset_loaders["source"]) # len_train_target = len(dset_loaders["ps_target"]) transfer_loss_value = classifier_loss_value = total_loss_value = 0.0 best_acc = 0.0 for i in range(config["num_iterations"]): lamb = adaptation_factor((i+1)/10000) cls_lamb = adaptation_factor(5*(i+1)/10000) epoch = int(i / len_train_source) if i% len_train_source ==0: testing = True pl_update=True print_loss =True # print("epoch: {} ".format(int(i / len_train_source))) if epoch % 5 ==0 and pl_update: pl_update= False # del dset_loaders["ps_target"] pseudo_labeled_targets,target_g_ctr, source_g_ctr = pseudo_labeling(base_network, cluster_data_loader, class_num) global_source_ctr = source_g_ctr.detach_() global_target_ctr = target_g_ctr.detach_() if len(pseudo_labeled_targets["label_list"]) !=0: print("new pl at epoch {}".format(epoch)) pseudo_dataset = PS_ImageList(pseudo_labeled_targets, transform=prep_dict["target"]) dset_loaders["ps_target"] = DataLoader(pseudo_dataset, batch_size=train_bs, \ shuffle=False, num_workers=0, drop_last=True) len_train_target = len(dset_loaders["ps_target"]) else: print("no pl at epoch {}".format(epoch)) # print("pseudo labeling done") # print(i) # if i % config["test_interval"] == config["test_interval"] - 1: if epoch % 5 ==0 and testing and i>0: base_network.train(False) temp_acc,v_loss = image_classification_test(dset_loaders, \ base_network, test_10crop=prep_config["test_10crop"]) temp_model = nn.Sequential(base_network) if temp_acc > best_acc: best_acc = temp_acc best_model = temp_model log_str = "iter: {:05d}, precision: {:.5f}".format(i, temp_acc) config["out_file"].write(log_str + "\n") config["out_file"].flush() print(log_str) testing=False now = datetime.now() current_time = now.strftime("%H:%M:%S") print("epoch: {} ".format(int(i / len_train_source))) print("time: {} ".format(current_time)) print("best acc: {} ".format(best_acc)) print("loss: {} ".format(v_loss)) print("adaptation rate : {}".format(lamb)) print("learning rare : {} {} {} {}".format(optimizer.param_groups[0]["lr"],optimizer.param_groups[1]["lr"],optimizer.param_groups[2]["lr"],optimizer.param_groups[3]["lr"])) print("------------") if i % config["snapshot_interval"] == 0: torch.save(nn.Sequential(base_network), osp.join(config["output_path"], \ "iter_{:05d}_model.pth.tar".format(i))) loss_params = config["loss"] ## train one iter base_network.train(True) ad_net.train(True) optimizer = lr_scheduler(optimizer, i, **schedule_param) optimizer.zero_grad() ### if i % len_train_source == 0: iter_source = iter(dset_loaders["source"]) if i % len_train_target == 0: # print(i,len_train_target) iter_target = iter(dset_loaders["ps_target"]) try: inputs_source, labels_source, _ = iter_source.next() inputs_target, labels_target = iter_target.next() except StopIteration: iter_target = iter(dset_loaders["ps_target"]) inputs_target, labels_target = iter_target.next() inputs_source, inputs_target, labels_source, labels_target = inputs_source.cuda(), inputs_target.cuda(), labels_source.cuda(), labels_target.cuda() features_source, outputs_source = base_network(inputs_source) features_target, outputs_target = base_network(inputs_target) ##class_aware batch_source_centroids = utils.get_batch_centers(features_source, labels_source, class_num) batch_target_centroids = utils.get_batch_centers(features_target,labels_target, class_num) # if i==0: # global_source_ctr = batch_source_centroids # global_target_ctr = batch_target_centroids # if i>0: batch_source_centroids = ctr_adapt_factor* global_source_ctr + (1- ctr_adapt_factor) * batch_source_centroids batch_target_centroids = ctr_adapt_factor * global_target_ctr + (1 - ctr_adapt_factor) * batch_target_centroids global_source_ctr = batch_source_centroids.clone().detach_() global_target_ctr = batch_target_centroids.clone().detach_() # # global_source_ctr = global_source_ctr.cpu().data.numpy() # global_target_ctr.detach_() # ctr_alignment_loss = utils.cosine_distance(global_source_ctr,global_target_ctr,cross=False) # source_p2c_Distances = 0 - utils.cosine_distance(features_source, global_source_ctr, cross=True) # # target_p2c_Distances = 0 - utils.cosine_distance(features_target, global_target_ctr, cross=True) # # # # zero_ctrs_s = torch.unique(torch.where(global_source_ctr==0)[0]) # zero_ctrs_t = torch.unique(torch.where(global_target_ctr == 0)[0]) alignment_index = [] identity = np.eye(class_num) ctr_alignment_count =0 pos = [] post = [] neg =[] negt =[] index_s = np.empty([0,1]) index_t = np.empty([0,1]) itt=0 triplets ={} # with torch.no_grad(): labels = labels_source.cpu().data.numpy() labelt = labels_target.cpu().data.numpy() # zero_ctrs_s = zero_ctrs_s.cpu().data.numpy() # zero_ctrs_t = zero_ctrs_t.cpu().data.numpy() #####npair # labels = labels.cpu().data.numpy() n_pairs = [] for label in set(labels): label_mask = (labels == label) label_indices = np.where(label_mask)[0] if len(label_indices) < 1: continue anchor = np.random.choice(label_indices, 1, replace=False) n_pairs.append([anchor, np.array([label])]) n_pairs = np.array(n_pairs) n_negatives = [] for i in range(len(n_pairs)): negative = np.concatenate([n_pairs[:i, 1], n_pairs[i + 1:, 1]]) n_negatives.append(negative) n_negatives = np.array(n_negatives) n_pairs_s = torch.LongTensor(n_pairs) n_neg_s = torch.LongTensor(n_negatives) n_pairs = [] for label in set(labelt): label_mask = (labelt == label) label_indices = np.where(label_mask)[0] if len(label_indices) < 1: continue anchor = np.random.choice(label_indices, 1, replace=False) n_pairs.append([anchor, np.array([label])]) n_pairs = np.array(n_pairs) n_negatives = [] for i in range(len(n_pairs)): negative = np.concatenate([n_pairs[:i, 1], n_pairs[i + 1:, 1]]) n_negatives.append(negative) n_negatives = np.array(n_negatives) n_pairs_t = torch.LongTensor(n_pairs) n_neg_t = torch.LongTensor(n_negatives) # return torch.LongTensor(n_pairs), torch.LongTensor(n_negatives) ##### for it in range(class_num): label_mask = (labels == it) label_maskt = (labelt == it) idx = np.where(label_mask)[0] idxt = np.where(label_maskt)[0] # idx = torch.flatten(torch.nonzero(labels_source== torch.tensor(it).cuda())) if len(idx) !=0: index_s =np.append(index_s,idx) pos += [it for cc in range(len(idx))] mask = 1- identity[it,:] neg_id = np.nonzero(mask.flatten())[0].flatten() # neg_idx = np.where(np.in1d(neg_id,zero_ctrs_s)!=True)[0] neg += [[neg_id] for cc in range(len(idx))] if len(idxt) !=0: index_t = np.append(index_t, idxt) post += [it for cc in range(len(idxt))] maskt = 1- identity[it,:] neg_idt = np.nonzero(maskt.flatten())[0].flatten() # neg_idxt = np.where(np.in1d(neg_idt, zero_ctrs_t))[0] negt += [[neg_idt] for cc in range(len(idxt))] # negt += [[neg_idt] for cc in range(len(idxt))] # alignment_ctr_idx =idx[torch.nonzero(torch.where(idx ==idxt, idx,0))] if len(idx) != 0 and len(idxt) !=0: ctr_alignment_count +=1 alignment_index +=[it] # alignment_loss +=[utils.cosine_distance(batch_source_centroids[it], batch_source_centroids[it], cross=False)] # tempp = torch.cat(source_loss,0) # posetives_s = torch.cat(pos, dim=0) # negatives_s = torch.cat(neg, dim=0) # posetives_t = torch.cat(post, dim=0) # negatives_t = torch.cat(negt, dim=0) # a_i = torch.LongTensor(index_s.flatten()).cuda() # a_p = torch.LongTensor(pos).cuda() # a_n = torch.LongTensor(neg).cuda() ctr_alignment_loss =0 anchors_s = features_source[index_s.flatten(),:] positive_s = global_source_ctr[pos,:] negative_s = global_source_ctr[neg].squeeze(1) # n_pairs_s = n_pairs_s.cuda().squeeze(2) # n_neg_s = n_neg_s.cuda().squeeze(2) # anchors_s = features_source[n_pairs_s[:, 0]] # positive_s = global_source_ctr[n_pairs_s[:, 1]] # negative_s = global_source_ctr[n_neg_s] # # n_pairs_t = n_pairs_t.cuda().squeeze(2) # # n_neg_t = n_neg_t.cuda().squeeze(2) # anchors_t = features_source[n_pairs_t[:, 0]] # positive_t = global_source_ctr[n_pairs_t[:, 1]] # negative_t = global_source_ctr[n_neg_t] # anchors_s.retain_graph=True # positive_s.retain_graph=True # negative_s.retain_graph=True anchors_t = features_target[index_t.flatten(), :] positive_t = global_target_ctr[post, :] negative_t = global_target_ctr[negt].squeeze(1) # FAT_loss = torch.empty([],requires_grad=True) # FAT_loss.requires_grad = True # FAT_loss.retain_grad() # nfat_s = Variable(n_pair_loss(anchors_s,positive_s, negative_s,class_num,train_bs)) # nfat_t = Variable(n_pair_loss(anchors_t,positive_t, negative_t,class_num,train_bs)) # FAT_loss.requires_grad = True # FAT_loss.retain_grad() FAT_loss = n_pair_loss(anchors_s,positive_s, negative_s,class_num,train_bs) + n_pair_loss(anchors_t,positive_t, negative_t,class_num,train_bs)/2 if len(alignment_index) != 0: ctr_alignment_loss = torch.sum(utils.cosine_distance(batch_source_centroids[alignment_index], batch_target_centroids[alignment_index], cross=False))#/ctr_alignment_count # source_batch_FAT_Loss = torch.mean(torch.cat(source_loss,0), 0)/class_num # target_batch_FAT_Loss = torch.mean(torch.cat(target_loss,0),0)/class_num # # FAT_loss = source_batch_FAT_Loss.add(target_batch_FAT_Loss) ## # print("train loss: ", FAT_loss) # ctr_alignment_loss.grad_required =True # ctr_alignment_loss.retain_grad() features = torch.cat((features_source, features_target), dim=0) outputs = torch.cat((outputs_source, outputs_target), dim=0) softmax_out = nn.Softmax(dim=1)(outputs) if config['method'] == 'CDAN+E': entropy = loss.Entropy(softmax_out) transfer_loss = loss.CDAN([features, softmax_out], ad_net, entropy, network.calc_coeff(i), random_layer) elif config['method'] == 'CDAN': transfer_loss = loss.CDAN([features, softmax_out], ad_net, None, None, random_layer) elif config['method'] == 'DANN': transfer_loss = loss.DANN(features, ad_net) else: raise ValueError('Method cannot be recognized.') classifier_loss = nn.CrossEntropyLoss()(outputs_source/(2), labels_source) total_loss = loss_params["trade_off"] * (transfer_loss) + classifier_loss if lamb >.1: cls_lamb = 1.0 else: cls_lamb = 10*lamb # total_loss = lamb * ( FAT_loss + 10*ctr_alignment_loss) + (transfer_loss) + cls_lamb*classifier_loss # total_loss =transfer_loss + lamb * (FAT_loss + ctr_alignment_loss) + classifier_loss # FAT_loss.backward(retain_graph=True) # optimizer.zero_grad() total_loss.backward() optimizer.step() # my_lr_scheduler.step() if epoch % 5 ==0 and print_loss: print("fat loss ", FAT_loss)#.grad_fn, FAT_loss.requires_grad) print("ctr align: ", ctr_alignment_loss) print("tot: ", total_loss) print("clss: ",classifier_loss) print("trs: ", transfer_loss) print("++++++++++++++++++++++++end of epoch++++++++++++++++++++") print_loss =False
def train(config): ## Define start time start_time = time.time() ## set pre-process prep_dict = {} prep_config = config["prep"] prep_dict["source"] = prep.image_train(**config["prep"]['params']) prep_dict["target"] = prep.image_train(**config["prep"]['params']) prep_dict["test"] = prep.image_test(**config["prep"]['params']) ## prepare data print("Preparing data", flush=True) dsets = {} dset_loaders = {} data_config = config["data"] train_bs = data_config["source"]["batch_size"] test_bs = data_config["test"]["batch_size"] root_folder = data_config["root_folder"] dsets["source"] = ImageList(open(osp.join(root_folder, data_config["source"]["list_path"])).readlines(), \ transform=prep_dict["source"], root_folder=root_folder, ratios=config["ratios_source"]) dset_loaders["source"] = DataLoader(dsets["source"], batch_size=train_bs, \ shuffle=True, num_workers=4, drop_last=True) dsets["target"] = ImageList(open(osp.join(root_folder, data_config["target"]["list_path"])).readlines(), \ transform=prep_dict["target"], root_folder=root_folder, ratios=config["ratios_target"]) dset_loaders["target"] = DataLoader(dsets["target"], batch_size=train_bs, \ shuffle=True, num_workers=4, drop_last=True) dsets["test"] = ImageList(open( osp.join(root_folder, data_config["test"]["list_path"])).readlines(), transform=prep_dict["test"], root_folder=root_folder, ratios=config["ratios_test"]) dset_loaders["test"] = DataLoader(dsets["test"], batch_size=test_bs, \ shuffle=False, num_workers=4) test_path = os.path.join(root_folder, data_config["test"]["dataset_path"]) if os.path.exists(test_path): print('Found existing dataset for test', flush=True) with open(test_path, 'rb') as f: [test_samples, test_labels] = pickle.load(f) test_labels = torch.LongTensor(test_labels).to(config["device"]) else: print('Missing test dataset', flush=True) print('Building dataset for test and writing to {}'.format(test_path), flush=True) dset_test = ImageList(open( osp.join(root_folder, data_config["test"]["list_path"])).readlines(), transform=prep_dict["test"], root_folder=root_folder, ratios=config['ratios_test']) loaded_dset_test = LoadedImageList(dset_test) test_samples, test_labels = loaded_dset_test.samples.numpy( ), loaded_dset_test.targets.numpy() with open(test_path, 'wb') as f: pickle.dump([test_samples, test_labels], f) class_num = config["network"]["params"]["class_num"] test_samples, test_labels = sample_ratios(test_samples, test_labels, config['ratios_test']) # compute labels distribution on the source and target domain source_label_distribution = np.zeros((class_num)) for img in dsets["source"].imgs: source_label_distribution[img[1]] += 1 print("Total source samples: {}".format(np.sum(source_label_distribution)), flush=True) print("Source samples per class: {}".format(source_label_distribution), flush=True) source_label_distribution /= np.sum(source_label_distribution) print("Source label distribution: {}".format(source_label_distribution), flush=True) target_label_distribution = np.zeros((class_num)) for img in dsets["target"].imgs: target_label_distribution[img[1]] += 1 print("Total target samples: {}".format(np.sum(target_label_distribution)), flush=True) print("Target samples per class: {}".format(target_label_distribution), flush=True) target_label_distribution /= np.sum(target_label_distribution) print("Target label distribution: {}".format(target_label_distribution), flush=True) mixture = (source_label_distribution + target_label_distribution) / 2 jsd = (scipy.stats.entropy(source_label_distribution, qk=mixture) \ + scipy.stats.entropy(target_label_distribution, qk=mixture)) / 2 print("JSD : {}".format(jsd), flush=True) test_label_distribution = np.zeros((class_num)) for img in test_labels: test_label_distribution[int(img.item())] += 1 print("Test samples per class: {}".format(test_label_distribution), flush=True) test_label_distribution /= np.sum(test_label_distribution) print("Test label distribution: {}".format(test_label_distribution), flush=True) write_list(config["out_wei_file"], [round(x, 4) for x in test_label_distribution]) write_list(config["out_wei_file"], [round(x, 4) for x in source_label_distribution]) write_list(config["out_wei_file"], [round(x, 4) for x in target_label_distribution]) true_weights = torch.tensor( target_label_distribution / source_label_distribution, dtype=torch.float, requires_grad=False)[:, None].to(config["device"]) print("True weights : {}".format(true_weights[:, 0].cpu().numpy())) config["out_wei_file"].write(str(jsd) + "\n") ## set base network net_config = config["network"] base_network = net_config["name"](**net_config["params"]) base_network = base_network.to(config["device"]) ## add additional network for some methods if config["loss"]["random"]: random_layer = network.RandomLayer( [base_network.output_num(), class_num], config["loss"]["random_dim"]) ad_net = network.AdversarialNetwork(config["loss"]["random_dim"], 1024) else: random_layer = None if 'CDAN' in config['method']: ad_net = network.AdversarialNetwork( base_network.output_num() * class_num, 1024) else: ad_net = network.AdversarialNetwork(base_network.output_num(), 1024) if config["loss"]["random"]: random_layer.to(config["device"]) ad_net = ad_net.to(config["device"]) parameter_list = ad_net.get_parameters() + base_network.get_parameters() parameter_list[-1]["lr_mult"] = config["lr_mult_im"] ## set optimizer optimizer_config = config["optimizer"] optimizer = optimizer_config["type"](parameter_list, \ **(optimizer_config["optim_params"])) param_lr = [] for param_group in optimizer.param_groups: param_lr.append(param_group["lr"]) schedule_param = optimizer_config["lr_param"] lr_scheduler = lr_schedule.schedule_dict[optimizer_config["lr_type"]] # Maintain two quantities for the QP. cov_mat = torch.tensor(np.zeros((class_num, class_num), dtype=np.float32), requires_grad=False).to(config["device"]) pseudo_target_label = torch.tensor(np.zeros((class_num, 1), dtype=np.float32), requires_grad=False).to( config["device"]) # Maintain one weight vector for BER. class_weights = torch.tensor(1.0 / source_label_distribution, dtype=torch.float, requires_grad=False).to(config["device"]) gpus = config['gpu'].split(',') if len(gpus) > 1: ad_net = nn.DataParallel(ad_net, device_ids=[int(i) for i in gpus]) base_network = nn.DataParallel(base_network, device_ids=[int(i) for i in gpus]) ## train len_train_source = len(dset_loaders["source"]) len_train_target = len(dset_loaders["target"]) transfer_loss_value = classifier_loss_value = total_loss_value = 0.0 best_acc = 0.0 print("Preparations done in {:.0f} seconds".format(time.time() - start_time), flush=True) print("Starting training for {} iterations using method {}".format( config["num_iterations"], config['method']), flush=True) start_time_test = start_time = time.time() for i in range(config["num_iterations"]): if i % config["test_interval"] == config["test_interval"] - 1: base_network.train(False) temp_acc = image_classification_test_loaded( test_samples, test_labels, base_network) temp_model = nn.Sequential(base_network) if temp_acc > best_acc: best_acc = temp_acc log_str = " iter: {:05d}, sec: {:.0f}, class: {:.5f}, da: {:.5f}, precision: {:.5f}".format( i, time.time() - start_time_test, classifier_loss_value, transfer_loss_value, temp_acc) config["out_log_file"].write(log_str + "\n") config["out_log_file"].flush() print(log_str, flush=True) if 'IW' in config['method']: current_weights = [ round(x, 4) for x in base_network.im_weights.data.cpu().numpy().flatten() ] # write_list(config["out_wei_file"], current_weights) print(current_weights, flush=True) start_time_test = time.time() if i % 500 == -1: print("{} iterations in {} seconds".format( i, time.time() - start_time), flush=True) loss_params = config["loss"] ## train one iter base_network.train(True) ad_net.train(True) optimizer = lr_scheduler(optimizer, i, **schedule_param) optimizer.zero_grad() t = time.time() if i % len_train_source == 0: iter_source = iter(dset_loaders["source"]) if i % len_train_target == 0: iter_target = iter(dset_loaders["target"]) inputs_source, label_source = iter_source.next() inputs_target, _ = iter_target.next() inputs_source, inputs_target, label_source = inputs_source.to( config["device"]), inputs_target.to( config["device"]), label_source.to(config["device"]) features_source, outputs_source = base_network(inputs_source) features_target, outputs_target = base_network(inputs_target) features = torch.cat((features_source, features_target), dim=0) outputs = torch.cat((outputs_source, outputs_target), dim=0) softmax_out = nn.Softmax(dim=1)(outputs) if 'IW' in config['method']: ys_onehot = torch.zeros(train_bs, class_num).to(config["device"]) ys_onehot.scatter_(1, label_source.view(-1, 1), 1) # Compute weights on source data. if 'ORACLE' in config['method']: weights = torch.mm(ys_onehot, true_weights) else: weights = torch.mm(ys_onehot, base_network.im_weights) source_preds, target_preds = outputs[:train_bs], outputs[train_bs:] # Compute the aggregated distribution of pseudo-label on the target domain. pseudo_target_label += torch.sum(F.softmax(target_preds, dim=1), dim=0).view(-1, 1).detach() # Update the covariance matrix on the source domain as well. cov_mat += torch.mm( F.softmax(source_preds, dim=1).transpose(1, 0), ys_onehot).detach() if config['method'] == 'CDAN-E': classifier_loss = nn.CrossEntropyLoss()(outputs_source, label_source) entropy = loss.Entropy(softmax_out) transfer_loss = loss.CDAN([features, softmax_out], ad_net, entropy, network.calc_coeff(i), random_layer) total_loss = loss_params["trade_off"] * \ transfer_loss + classifier_loss elif 'IWCDAN-E' in config['method']: classifier_loss = torch.mean( nn.CrossEntropyLoss(weight=class_weights, reduction='none') (outputs_source, label_source) * weights) / class_num entropy = loss.Entropy(softmax_out) transfer_loss = loss.CDAN([features, softmax_out], ad_net, entropy, network.calc_coeff(i), random_layer, weights=weights, device=config["device"]) total_loss = loss_params["trade_off"] * \ transfer_loss + classifier_loss elif config['method'] == 'CDAN': classifier_loss = nn.CrossEntropyLoss()(outputs_source, label_source) transfer_loss = loss.CDAN([features, softmax_out], ad_net, None, None, random_layer) total_loss = loss_params[ "trade_off"] * transfer_loss + classifier_loss elif 'IWCDAN' in config['method']: classifier_loss = torch.mean( nn.CrossEntropyLoss(weight=class_weights, reduction='none') (outputs_source, label_source) * weights) / class_num transfer_loss = loss.CDAN([features, softmax_out], ad_net, None, None, random_layer, weights=weights) total_loss = loss_params["trade_off"] * \ transfer_loss + classifier_loss elif config['method'] == 'DANN': classifier_loss = nn.CrossEntropyLoss()(outputs_source, label_source) transfer_loss = loss.DANN(features, ad_net, config["device"]) total_loss = loss_params["trade_off"] * \ transfer_loss + classifier_loss elif 'IWDAN' in config['method']: classifier_loss = torch.mean( nn.CrossEntropyLoss(weight=class_weights, reduction='none') (outputs_source, label_source) * weights) / class_num transfer_loss = loss.IWDAN(features, ad_net, weights) total_loss = loss_params["trade_off"] * \ transfer_loss + classifier_loss elif config['method'] == 'NANN': classifier_loss = nn.CrossEntropyLoss()(outputs_source, label_source) total_loss = classifier_loss else: raise ValueError('Method cannot be recognized.') total_loss.backward() optimizer.step() transfer_loss_value = 0 if config[ 'method'] == 'NANN' else transfer_loss.item() classifier_loss_value = classifier_loss.item() total_loss_value = transfer_loss_value + classifier_loss_value if ('IW' in config['method'] ) and i % (config["dataset_mult_iw"] * len_train_source ) == config["dataset_mult_iw"] * len_train_source - 1: pseudo_target_label /= train_bs * \ len_train_source * config["dataset_mult_iw"] cov_mat /= train_bs * len_train_source * config["dataset_mult_iw"] print(i, np.sum(cov_mat.cpu().detach().numpy()), train_bs * len_train_source) # Recompute the importance weight by solving a QP. base_network.im_weights_update( source_label_distribution, pseudo_target_label.cpu().detach().numpy(), cov_mat.cpu().detach().numpy(), config["device"]) current_weights = [ round(x, 4) for x in base_network.im_weights.data.cpu().numpy().flatten() ] write_list(config["out_wei_file"], [ np.linalg.norm(current_weights - true_weights.cpu().numpy().flatten()) ] + current_weights) print( np.linalg.norm(current_weights - true_weights.cpu().numpy().flatten()), current_weights) cov_mat[:] = 0.0 pseudo_target_label[:] = 0.0 return best_acc
def train(args, model, ad_net, random_layer, train_loader, train_loader1, optimizer, optimizer_ad, epoch, start_epoch, method, D_s, D_t, G_s2t, G_t2s, criterion_Sem, criterion_GAN, criterion_cycle, criterion_identity, optimizer_G, optimizer_D_t, optimizer_D_s, classifier1, classifier1_optim, fake_S_buffer, fake_T_buffer): model.train() len_source = len(train_loader) len_target = len(train_loader1) if len_source > len_target: num_iter = len_source else: num_iter = len_target for batch_idx in range(num_iter): if batch_idx % len_source == 0: iter_source = iter(train_loader) if batch_idx % len_target == 0: iter_target = iter(train_loader1) data_source, label_source = iter_source.next() # data_source, label_source = data_source.cuda(), label_source.cuda() data_target, label_target = iter_target.next() # data_target = data_target.cuda() optimizer.zero_grad() optimizer_ad.zero_grad() features_source, outputs_source = model(data_source) features_target, outputs_target = model(data_target) features = torch.cat((features_source, features_target), dim=0) outputs = torch.cat((outputs_source, outputs_target), dim=0) loss = nn.CrossEntropyLoss()(outputs.narrow(0, 0, data_source.size(0)), label_source) softmax_output = nn.Softmax(dim=1)(outputs) output1 = classifier1(features) softmax_output1 = nn.Softmax(dim=1)(output1) softmax_output = (1 - args.cla_plus_weight) * softmax_output + args.cla_plus_weight * softmax_output1 if epoch > start_epoch: if method == 'CDAN-E': entropy = loss_func.Entropy(softmax_output) loss += loss_func.CDAN([features, softmax_output], ad_net, entropy, network.calc_coeff(num_iter*(epoch-start_epoch)+batch_idx), random_layer) elif method == 'CDAN': loss += loss_func.CDAN([features, softmax_output], ad_net, None, None, random_layer) elif method == 'DANN': loss += loss_func.DANN(features, ad_net) else: raise ValueError('Method cannot be recognized.') # Cycle num_feature = features.size(0) # =================train discriminator T real_label = Variable(torch.ones(num_feature)) # real_label = Variable(torch.ones(num_feature)).cuda() fake_label = Variable(torch.zeros(num_feature)) # fake_label = Variable(torch.zeros(num_feature)).cuda() # 训练生成器 optimizer_G.zero_grad() # Identity loss same_t = G_s2t(features_target) loss_identity_t = criterion_identity(same_t, features_target) same_s = G_t2s(features_source) loss_identity_s = criterion_identity(same_s, features_source) # Gan loss fake_t = G_s2t(features_source) pred_fake = D_t(fake_t) loss_G_s2t = criterion_GAN(pred_fake, label_source.float()) fake_s = G_t2s(features_target) pred_fake = D_s(fake_s) loss_G_t2s = criterion_GAN(pred_fake, label_source.float()) # cycle loss recovered_s = G_t2s(fake_t) loss_cycle_sts = criterion_cycle(recovered_s, features_source) recovered_t = G_s2t(fake_s) loss_cycle_tst = criterion_cycle(recovered_t, features_target) # sem loss pred_recovered_s = model.classifier(recovered_s) pred_fake_t = model.classifier(fake_t) loss_sem_t2s = criterion_Sem(pred_recovered_s, pred_fake_t) pred_recovered_t = model.classifier(recovered_t) pred_fake_s = model.classifier(fake_s) loss_sem_s2t = criterion_Sem(pred_recovered_t, pred_fake_s) loss_cycle = loss_cycle_tst + loss_cycle_sts weight_in_loss_g = args.weight_in_loss_g.split(',') loss_G = float(weight_in_loss_g[0]) * (loss_identity_s + loss_identity_t) + \ float(weight_in_loss_g[1]) * (loss_G_s2t + loss_G_t2s) + \ float(weight_in_loss_g[2]) * loss_cycle + \ float(weight_in_loss_g[3]) * (loss_sem_s2t + loss_sem_t2s) # 训练softmax分类器 outputs_fake = classifier1(fake_t.detach()) # 分类器优化 classifier_loss1 = nn.CrossEntropyLoss()(outputs_fake, label_source) classifier1_optim.zero_grad() classifier_loss1.backward() classifier1_optim.step() total_loss = loss + args.cyc_loss_weight * loss_G total_loss.backward() optimizer.step() optimizer_G.step() ###### Discriminator S ###### optimizer_D_s.zero_grad() # Real loss pred_real = D_s(features_source.detach()) loss_D_real = criterion_GAN(pred_real, real_label) # Fake loss fake_s = fake_S_buffer.push_and_pop(fake_s) pred_fake = D_s(fake_s.detach()) loss_D_fake = criterion_GAN(pred_fake, fake_label) # Total loss loss_D_s = loss_D_real + loss_D_fake loss_D_s.backward() optimizer_D_s.step() ################################### ###### Discriminator t ###### optimizer_D_t.zero_grad() # Real loss pred_real = D_t(features_target.detach()) loss_D_real = criterion_GAN(pred_real, real_label) # Fake loss fake_t = fake_T_buffer.push_and_pop(fake_t) pred_fake = D_t(fake_t.detach()) loss_D_fake = criterion_GAN(pred_fake, fake_label) # Total loss loss_D_t = loss_D_real + loss_D_fake loss_D_t.backward() optimizer_D_t.step() if epoch > start_epoch: optimizer_ad.step() if (batch_idx + epoch * num_iter) % args.log_interval == 0: print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}\tLoss+G: {:.6f}'.format( epoch, batch_idx * args.batch_size, num_iter * args.batch_size, 100. * batch_idx / num_iter, loss.item(), total_loss.item()))
def train(args, model, ad_net, random_layer, train_loader, train_loader1, optimizer, optimizer_ad, epoch, start_epoch, method, ccp): cl_method = 'ga' #choices=['ga', 'nn', 'free', 'pc', 'forward'] meta_method = 'free' if cl_method == 'ga' else cl_method K = 10 model.train() len_source = len(train_loader) len_target = len(train_loader1) if len_source > len_target: num_iter = len_source else: num_iter = len_target for batch_idx in range(num_iter): if batch_idx % len_source == 0: iter_source = iter(train_loader) if batch_idx % len_target == 0: iter_target = iter(train_loader1) data_source, label_source = iter_source.next() data_source, label_source = data_source.cuda(), label_source.cuda() data_target, label_target = iter_target.next() data_target = data_target.cuda() optimizer.zero_grad() optimizer_ad.zero_grad() feature, output = model(torch.cat((data_source, data_target), 0)) #err_s_label, loss_vector = non_negative_loss (f=output.narrow(0, 0, data_source.size(0)), K=10, labels=label_source, ccp=ccp,beta=0) loss, loss_vector = chosen_loss_c(f=output.narrow( 0, 0, data_source.size(0)), K=K, labels=label_source, ccp=ccp, meta_method=meta_method) #loss = nn.CrossEntropyLoss()(output.narrow(0, 0, data_source.size(0)), label_source) softmax_output = nn.Softmax(dim=1)(output) if cl_method == 'ga': if torch.min(loss_vector).item() < 0: loss_vector_with_zeros = torch.cat( (loss_vector.view(-1, 1), torch.zeros( K, requires_grad=True).view(-1, 1).to(device)), 1) min_loss_vector, _ = torch.min(loss_vector_with_zeros, dim=1) loss = torch.sum(min_loss_vector) loss.backward(retain_graph=True) for group in optimizer.param_groups: for p in group['params']: p.grad = -1 * p.grad else: loss.backward(retain_graph=True) else: loss.backward(retain_graph=True) optimizer.step() optimizer.zero_grad() if epoch > start_epoch: if method == 'CDAN-E': softmax_output = Tsharpen(softmax_output) entropy = loss_func.Entropy(softmax_output) loss2 = loss_func.CDAN( [feature, softmax_output], ad_net, entropy, network.calc_coeff(num_iter * (epoch - start_epoch) + batch_idx), random_layer) elif method == 'CDAN': loss2 = loss_func.CDAN([feature, softmax_output], ad_net, None, None, random_layer) elif method == 'DANN': loss2 = loss_func.DANN(feature, ad_net) else: raise ValueError('Method cannot be recognized.') if epoch > start_epoch: loss2.backward() optimizer.step() optimizer_ad.step() if (batch_idx + epoch * num_iter) % args.log_interval == 0: print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss1: {:.6f}'.format( epoch, batch_idx * args.batch_size, num_iter * args.batch_size, 100. * batch_idx / num_iter, loss.item()))
def train(config): ## set pre-process prep_dict = {} prep_config = config["prep"] prep_dict["source"] = prep.image_train(**config["prep"]['params']) prep_dict["target"] = prep.image_train(**config["prep"]['params']) if prep_config["test_10crop"]: prep_dict["test"] = prep.image_test_10crop(**config["prep"]['params']) else: prep_dict["test"] = prep.image_test(**config["prep"]['params']) ## prepare data dsets = {} dset_loaders = {} data_config = config["data"] train_bs = data_config["source"]["batch_size"] test_bs = data_config["test"]["batch_size"] dsets["source"] = ImageList(open(data_config["source"]["list_path"]).readlines(), \ transform=prep_dict["source"]) dset_loaders["source"] = DataLoader(dsets["source"], batch_size=train_bs, \ shuffle=True, num_workers=4, drop_last=True) dsets["target"] = ImageList(open(data_config["target"]["list_path"]).readlines(), \ transform=prep_dict["target"]) dset_loaders["target"] = DataLoader(dsets["target"], batch_size=train_bs, \ shuffle=True, num_workers=4, drop_last=True) # if prep_config["test_10crop"]: # for i in range(10): # dsets["test"] = [ImageList(open(data_config["test"]["list_path"]).readlines(), \ # transform=prep_dict["test"][i]) for i in range(10)] # dset_loaders["test"] = [DataLoader(dset, batch_size=test_bs, \ # shuffle=False, num_workers=4) for dset in dsets['test']] # else: # dsets["test"] = ImageList(open(data_config["test"]["list_path"]).readlines(), \ # transform=prep_dict["test"]) # dset_loaders["test"] = DataLoader(dsets["test"], batch_size=test_bs, \ # shuffle=False, num_workers=4) class_num = config["network"]["params"]["class_num"] ## set base network net_config = config["network"] base_network = net_config["name"](**net_config["params"]) base_network = base_network.cuda() ## add additional network for some methods if config["loss"]["random"]: random_layer = network.RandomLayer( [base_network.output_num(), class_num], config["loss"]["random_dim"]) ad_net = network.AdversarialNetwork(config["loss"]["random_dim"], 1024) else: random_layer = None ad_net = network.AdversarialNetwork( base_network.output_num() * class_num, 1024) if config["loss"]["random"]: random_layer.cuda() ad_net = ad_net.cuda() parameter_list = base_network.get_parameters() + ad_net.get_parameters() ## set optimizer optimizer_config = config["optimizer"] optimizer = optimizer_config["type"](parameter_list, \ **(optimizer_config["optim_params"])) param_lr = [] for param_group in optimizer.param_groups: param_lr.append(param_group["lr"]) schedule_param = optimizer_config["lr_param"] lr_scheduler = lr_schedule.schedule_dict[optimizer_config["lr_type"]] gpus = config['gpu'].split(',') if len(gpus) > 1: ad_net = nn.DataParallel(ad_net, device_ids=[int(i) for i in gpus]) base_network = nn.DataParallel(base_network, device_ids=[int(i) for i in gpus]) ## train len_train_source = len(dset_loaders["source"]) len_train_target = len(dset_loaders["target"]) transfer_loss_value = classifier_loss_value = total_loss_value = 0.0 best_acc = 0.0 for i in range(config["num_iterations"]): # if i % config["test_interval"] == config["test_interval"] - 1: # base_network.train(False) # temp_acc = image_classification_test(dset_loaders, \ # base_network, test_10crop=prep_config["test_10crop"]) # temp_model = nn.Sequential(base_network) # if temp_acc > best_acc: # best_acc = temp_acc # best_model = temp_model # log_str = "iter: {:05d}, precision: {:.5f}".format(i, temp_acc) # config["out_file"].write(log_str+"\n") # config["out_file"].flush() # print(log_str) if i % config["snapshot_interval"] == 0: torch.save(nn.Sequential(base_network), osp.join(config["output_path"], \ "iter_{:05d}_model.pth.tar".format(i))) loss_params = config["loss"] ## train one iter base_network.train(True) ad_net.train(True) optimizer = lr_scheduler(optimizer, i, **schedule_param) optimizer.zero_grad() if i % len_train_source == 0: iter_source = iter(dset_loaders["source"]) if i % len_train_target == 0: iter_target = iter(dset_loaders["target"]) inputs_source, labels_source = iter_source.next() inputs_target, labels_target = iter_target.next() inputs_source, inputs_target, labels_source = inputs_source.cuda( ), inputs_target.cuda(), labels_source.cuda() features_source, outputs_source = base_network(inputs_source) features_target, outputs_target = base_network(inputs_target) features = torch.cat((features_source, features_target), dim=0) outputs = torch.cat((outputs_source, outputs_target), dim=0) softmax_out = nn.Softmax(dim=1)(outputs) if config['method'] == 'CDAN+E': entropy = loss.Entropy(softmax_out) transfer_loss = loss.CDAN([features, softmax_out], ad_net, entropy, network.calc_coeff(i), random_layer) elif config['method'] == 'CDAN': transfer_loss = loss.CDAN([features, softmax_out], ad_net, None, None, random_layer) elif config['method'] == 'DANN': transfer_loss = loss.DANN(features, ad_net) else: raise ValueError('Method cannot be recognized.') classifier_loss = nn.CrossEntropyLoss()(outputs_source, labels_source) if i % 10 == 0: print('iter: ', i, 'classifier_loss: ', classifier_loss.data, 'transfer_loss: ', transfer_loss.data) total_loss = loss_params["trade_off"] * transfer_loss + classifier_loss total_loss.backward() optimizer.step() torch.save(best_model, osp.join(config["output_path"], "best_model.pth.tar")) return best_acc
def train(args, i, model, ad_net, ad_w_net, train_loader, train_loader1, optimizer, optimizer_ad, optimizer_ad_w, epoch, start_epoch, method, random_layer=None): model.train() len_source = len(train_loader) len_target = len(train_loader1) if len_source > len_target: num_iter = len_source else: num_iter = len_target for batch_idx in range(num_iter): if batch_idx % len_source == 0: iter_source = iter(train_loader) if batch_idx % len_target == 0: iter_target = iter(train_loader1) data_source, label_source = iter_source.next() data_source, label_source = data_source.cuda(), label_source.cuda() data_target, label_target = iter_target.next() data_target = data_target.cuda() optimizer.zero_grad() optimizer_ad.zero_grad() optimizer_ad_w.zero_grad() feature, output = model(torch.cat((data_source, data_target), 0)) train_progress = (epoch - 1 + 1. * (batch_idx + 1) / num_iter) / args.epochs temp = loss_func.calc_temp(train_progress, alpha=args.alpha, max_iter=1., temp_max=args.temp_max) w_s, w_t = loss_func.w_from_ad(feature, ad_w_net, temp=temp, weight=(args.weight == 1)) w = torch.cat([w_s, w_t], dim=0) loss = (w_s.detach() * nn.CrossEntropyLoss(reduction='none')( output.narrow(0, 0, data_source.size(0)), label_source)).mean() softmax_output = nn.Softmax(dim=1)(output) if epoch > start_epoch: if method == 'CDAN': loss += loss_func.CDAN([feature, softmax_output], ad_net, w_s=w_s, w_t=w_t, random_layer=random_layer) elif method == 'DANN': loss += loss_func.DANN(feature, ad_net, w_s=w_s, w_t=w_t, hook=True) elif method == 'Y_DAN': loss += loss_func.Y_DAN([feature, softmax_output], ad_net, w_s=w_s, w_t=w_t) else: raise ValueError('Method cannot be recognized.') loss.backward(retain_graph=True) ad_w_net.zero_grad() optimizer.step() if epoch > start_epoch: optimizer_ad.step() invariance_loss = loss_func.DANN(feature.detach(), ad_w_net, hook=False) invariance_loss.backward() optimizer_ad_w.step() if (batch_idx + epoch * num_iter) % args.log_interval == 0: print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format( epoch, batch_idx * args.batch_size, num_iter * args.batch_size, 100. * batch_idx / num_iter, loss.item()))