def train(config):
    ## set pre-process
    prep_dict = {}
    prep_config = config["prep"]
    prep_dict["source"] = prep.image_train(**config["prep"]['params'])
    prep_dict["target"] = prep.image_train(**config["prep"]['params'])
    if prep_config["test_10crop"]:
        prep_dict["test"] = prep.image_test_10crop(**config["prep"]['params'])
    else:
        prep_dict["test"] = prep.image_test(**config["prep"]['params'])

    ## prepare data
    dsets = {}
    dset_loaders = {}
    data_config = config["data"]
    train_bs = data_config["source"]["batch_size"]
    test_bs = data_config["test"]["batch_size"]
    dsets["source"] = ImageList(open(data_config["source"]["list_path"]).readlines(), \
                                transform=prep_dict["source"])
    # print(prep_dict["source"])
    dset_loaders["source"] = DataLoader(dsets["source"], batch_size=train_bs, \
            shuffle=True, num_workers=4, drop_last=True)
    dsets["target"] = ImageList(open(data_config["target"]["list_path"]).readlines(), \
                                transform=prep_dict["target"])
    dset_loaders["target"] = DataLoader(dsets["target"], batch_size=train_bs, \
            shuffle=True, num_workers=4, drop_last=True)

    if prep_config["test_10crop"]:
        for i in range(10):
            dsets["test"] = [ImageList(open(data_config["test"]["list_path"]).readlines(), \
                                transform=prep_dict["test"][i]) for i in range(10)]
            dset_loaders["test"] = [DataLoader(dset, batch_size=test_bs, \
                                shuffle=False, num_workers=4) for dset in dsets['test']]
    else:
        dsets["test"] = ImageList(open(data_config["test"]["list_path"]).readlines(), \
                                transform=prep_dict["test"])
        dset_loaders["test"] = DataLoader(dsets["test"], batch_size=test_bs, \
                                shuffle=False, num_workers=4)

    class_num = config["network"]["params"]["class_num"]

    ## set base network
    net_config = config["network"]
    base_network = net_config["name"](**net_config["params"])
    base_network = base_network.cuda()
    # base_network = base_network.cpu()

    ## add additional network for some methods
    if config["loss"]["random"]:
        random_layer = network.RandomLayer(
            [base_network.output_num(), class_num],
            config["loss"]["random_dim"])
        ad_net = network.AdversarialNetwork(config["loss"]["random_dim"], 1024)
    else:
        random_layer = None
        ad_net = network.AdversarialNetwork(
            base_network.output_num() * class_num, 1024)
    if config["loss"]["random"]:
        random_layer.cuda()
    ad_net = ad_net.cuda()
    # ad_net = ad_net.cpu()
    parameter_list = base_network.get_parameters() + ad_net.get_parameters()

    ## set optimizer
    optimizer_config = config["optimizer"]
    optimizer = optimizer_config["type"](parameter_list, \
                    **(optimizer_config["optim_params"]))
    param_lr = []
    for param_group in optimizer.param_groups:
        param_lr.append(param_group["lr"])
    schedule_param = optimizer_config["lr_param"]
    lr_scheduler = lr_schedule.schedule_dict[optimizer_config["lr_type"]]

    gpus = config['gpu'].split(',')
    if len(gpus) > 1:
        ad_net = nn.DataParallel(ad_net, device_ids=[int(i) for i in gpus])
        base_network = nn.DataParallel(base_network,
                                       device_ids=[int(i) for i in gpus])

    ## train
    len_train_source = len(dset_loaders["source"])
    len_train_target = len(dset_loaders["target"])
    transfer_loss_value = classifier_loss_value = total_loss_value = 0.0
    best_acc = 0.0
    for i in range(config["num_iterations"]):
        if i % config["test_interval"] == config["test_interval"] - 1:
            base_network.train(False)
            temp_acc = image_classification_test(dset_loaders, \
                base_network, test_10crop=prep_config["test_10crop"])
            temp_model = nn.Sequential(base_network)
            if temp_acc > best_acc:
                best_acc = temp_acc
                best_model = temp_model
            log_str = "iter: {:05d}, precision: {:.5f}".format(i, temp_acc)
            config["out_file"].write(log_str + "\n")
            config["out_file"].flush()
            print(log_str)
        if i % config["snapshot_interval"] == 0:
            torch.save(nn.Sequential(base_network), osp.join(config["output_path"], \
                "iter_{:05d}_model.pth.tar".format(i)))
        print("it_train: {:05d} / {:05d} start".format(
            i, config["num_iterations"]))
        loss_params = config["loss"]
        ## train one iter
        base_network.train(True)
        ad_net.train(True)
        optimizer = lr_scheduler(optimizer, i, **schedule_param)
        optimizer.zero_grad()
        if i % len_train_source == 0:
            iter_source = iter(dset_loaders["source"])
        if i % len_train_target == 0:
            iter_target = iter(dset_loaders["target"])
        inputs_source, labels_source = iter_source.next()
        inputs_target, labels_target = iter_target.next()
        inputs_source, inputs_target, labels_source = inputs_source.cuda(
        ), inputs_target.cuda(), labels_source.cuda()
        # inputs_source, inputs_target, labels_source = inputs_source.cpu(), inputs_target.cpu(), labels_source.cpu()
        features_source, outputs_source = base_network(inputs_source)
        features_target, outputs_target = base_network(inputs_target)
        features = torch.cat((features_source, features_target), dim=0)
        outputs = torch.cat((outputs_source, outputs_target), dim=0)
        softmax_out = nn.Softmax(dim=1)(outputs)
        if config['method'] == 'CDAN+E':
            entropy = loss.Entropy(softmax_out)
            transfer_loss = loss.CDAN([features, softmax_out], ad_net, entropy,
                                      network.calc_coeff(i), random_layer)
        elif config['method'] == 'CDAN':
            transfer_loss = loss.CDAN([features, softmax_out], ad_net, None,
                                      None, random_layer)
        elif config['method'] == 'DANN':
            transfer_loss = loss.DANN(features, ad_net)
        else:
            raise ValueError('Method cannot be recognized.')
        classifier_loss = nn.CrossEntropyLoss()(outputs_source, labels_source)
        total_loss = loss_params["trade_off"] * transfer_loss + classifier_loss
        total_loss.backward()
        optimizer.step()
        print("it_train: {:05d} / {:05d} over".format(
            i, config["num_iterations"]))
    torch.save(best_model, osp.join(config["output_path"],
                                    "best_model.pth.tar"))
    return best_acc
def train(args, model, ad_net, random_layer, train_loader, train_loader1,
          optimizer, optimizer_ad, epoch, start_epoch, method, D_s, D_t, G_s2t,
          G_t2s, criterion_Sem, criterion_GAN, criterion_cycle,
          criterion_identity, optimizer_G, optimizer_D_t, optimizer_D_s,
          classifier1, classifier1_optim, fake_S_buffer, fake_T_buffer):
    model.train()
    len_source = len(train_loader)
    len_target = len(train_loader1)
    if len_source < len_target:
        num_iter = len_source
    else:
        num_iter = len_target

    for batch_idx in range(num_iter - 1):
        if batch_idx % len_source == 0:
            iter_source = iter(train_loader)
        if batch_idx % len_target == 0:
            iter_target = iter(train_loader1)
        data_source, label_source = iter_source.next()
        # data_source, label_source = data_source.cuda(), label_source.cuda()
        data_target, label_target = iter_target.next()
        # data_target = data_target.cuda()

        optimizer.zero_grad()
        optimizer_ad.zero_grad()

        features_source, outputs_source = model(data_source)
        features_target, outputs_target = model(data_target)
        features = torch.cat((features_source, features_target), dim=0)
        outputs = torch.cat((outputs_source, outputs_target), dim=0)
        #feature, output = model(torch.cat((data_source, data_target), 0))

        loss = nn.CrossEntropyLoss()(outputs.narrow(0, 0, data_source.size(0)),
                                     label_source)
        softmax_output = nn.Softmax(dim=1)(outputs)

        output1 = classifier1(features)
        softmax_output1 = nn.Softmax(dim=1)(output1)
        softmax_output = (
            1 - args.cla_plus_weight
        ) * softmax_output + args.cla_plus_weight * softmax_output1

        if epoch > start_epoch:
            if method == 'CDAN-E':
                entropy = loss_func.Entropy(softmax_output)
                loss += loss_func.CDAN(
                    [features, softmax_output], ad_net, entropy,
                    network.calc_coeff(num_iter * (epoch - start_epoch) +
                                       batch_idx), random_layer)
            elif method == 'CDAN':
                loss += loss_func.CDAN([features, softmax_output], ad_net,
                                       None, None, random_layer)
            elif method == 'DANN':
                loss += loss_func.DANN(features, ad_net)
            else:
                raise ValueError('Method cannot be recognized.')

        # Cycle
        num_feature = features.size(0)
        # =================train discriminator T
        real_label = Variable(torch.ones(num_feature))
        # real_label = Variable(torch.ones(num_feature)).cuda()
        fake_label = Variable(torch.zeros(num_feature))
        # fake_label = Variable(torch.zeros(num_feature)).cuda()

        # 训练生成器
        optimizer_G.zero_grad()

        # Identity loss
        same_t = G_s2t(features_target)
        loss_identity_t = criterion_identity(same_t, features_target)

        same_s = G_t2s(features_source)
        loss_identity_s = criterion_identity(same_s, features_source)

        # Gan loss
        fake_t = G_s2t(features_source)
        pred_fake = D_t(fake_t)
        loss_G_s2t = criterion_GAN(pred_fake, label_source.float())

        fake_s = G_t2s(features_target)
        pred_fake = D_s(fake_s)
        loss_G_t2s = criterion_GAN(pred_fake, label_source.float())

        # cycle loss
        recovered_s = G_t2s(fake_t)
        loss_cycle_sts = criterion_cycle(recovered_s, features_source)

        recovered_t = G_s2t(fake_s)
        loss_cycle_tst = criterion_cycle(recovered_t, features_target)

        # sem loss
        pred_recovered_s = model.classifier(recovered_s)
        pred_fake_t = model.classifier(fake_t)
        loss_sem_t2s = criterion_Sem(pred_recovered_s, pred_fake_t)

        pred_recovered_t = model.classifier(recovered_t)
        pred_fake_s = model.classifier(fake_s)
        loss_sem_s2t = criterion_Sem(pred_recovered_t, pred_fake_s)

        loss_cycle = loss_cycle_tst + loss_cycle_sts
        weight_in_loss_g = args.weight_in_loss_g.split(',')
        loss_G = float(weight_in_loss_g[0]) * (loss_identity_s + loss_identity_t) + \
                 float(weight_in_loss_g[1]) * (loss_G_s2t + loss_G_t2s) + \
                 float(weight_in_loss_g[2])* loss_cycle + \
                 float(weight_in_loss_g[3]) * (loss_sem_s2t + loss_sem_t2s)

        # 训练softmax分类器
        outputs_fake = classifier1(fake_t.detach())
        # 分类器优化
        classifier_loss1 = nn.CrossEntropyLoss()(outputs_fake, label_source)
        classifier1_optim.zero_grad()
        classifier_loss1.backward()
        classifier1_optim.step()

        total_loss = loss + args.cyc_loss_weight * loss_G
        total_loss.backward()
        optimizer.step()
        optimizer_G.step()

        ###### Discriminator S ######
        optimizer_D_s.zero_grad()

        # Real loss
        pred_real = D_s(features_source.detach())
        loss_D_real = criterion_GAN(pred_real, real_label)

        # Fake loss
        fake_s = fake_S_buffer.push_and_pop(fake_s)
        pred_fake = D_s(fake_s.detach())
        loss_D_fake = criterion_GAN(pred_fake, fake_label)

        # Total loss
        loss_D_s = loss_D_real + loss_D_fake
        loss_D_s.backward()

        optimizer_D_s.step()
        ###################################

        ###### Discriminator t ######
        optimizer_D_t.zero_grad()

        # Real loss
        pred_real = D_t(features_target.detach())
        loss_D_real = criterion_GAN(pred_real, real_label)

        # Fake loss
        fake_t = fake_T_buffer.push_and_pop(fake_t)
        pred_fake = D_t(fake_t.detach())
        loss_D_fake = criterion_GAN(pred_fake, fake_label)

        # Total loss
        loss_D_t = loss_D_real + loss_D_fake
        loss_D_t.backward()
        optimizer_D_t.step()

        if epoch > start_epoch:
            optimizer_ad.step()
        if (batch_idx + epoch * num_iter) % args.log_interval == 0:
            print(
                'Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}\tLoss+G: {:.6f}'
                .format(epoch, batch_idx * args.batch_size,
                        num_iter * args.batch_size,
                        100. * batch_idx / num_iter, loss.item(),
                        total_loss.item()))
Exemplo n.º 3
0
def train(args, model, ad_net, random_layer, train_loader, train_loader1,
          optimizer, optimizer_ad, epoch, start_epoch, method, ccp):
    cl_method = 'ga'  #choices=['ga', 'nn', 'free', 'pc', 'forward']
    meta_method = 'free' if cl_method == 'ga' else cl_method
    K = 10

    model.train()
    len_source = len(train_loader)
    len_target = len(train_loader1)
    if len_source > len_target:
        num_iter = len_source
    else:
        num_iter = len_target

    for batch_idx in range(num_iter):
        if batch_idx % len_source == 0:
            iter_source = iter(train_loader)
        if batch_idx % len_target == 0:
            iter_target = iter(train_loader1)
        data_source, label_source = iter_source.next()
        data_source, label_source = data_source.cuda(), label_source.cuda()
        data_target, label_target = iter_target.next()
        data_target = data_target.cuda()
        optimizer.zero_grad()
        optimizer_ad.zero_grad()
        feature, output = model(torch.cat((data_source, data_target), 0))
        #err_s_label, loss_vector = non_negative_loss (f=output.narrow(0, 0, data_source.size(0)), K=10, labels=label_source, ccp=ccp,beta=0)
        loss, loss_vector = chosen_loss_c(f=output.narrow(
            0, 0, data_source.size(0)),
                                          K=K,
                                          labels=label_source,
                                          ccp=ccp,
                                          meta_method=meta_method)
        #loss = nn.CrossEntropyLoss()(output.narrow(0, 0, data_source.size(0)), label_source)
        softmax_output = nn.Softmax(dim=1)(output)
        if cl_method == 'ga':
            if torch.min(loss_vector).item() < 0:
                loss_vector_with_zeros = torch.cat(
                    (loss_vector.view(-1, 1), torch.zeros(
                        K, requires_grad=True).view(-1, 1).to(device)), 1)
                min_loss_vector, _ = torch.min(loss_vector_with_zeros, dim=1)
                loss = torch.sum(min_loss_vector)
                loss.backward(retain_graph=True)
                for group in optimizer.param_groups:
                    for p in group['params']:
                        p.grad = -1 * p.grad
            else:
                loss.backward(retain_graph=True)
        else:
            loss.backward(retain_graph=True)
        optimizer.step()
        optimizer.zero_grad()
        if epoch > start_epoch:
            if method == 'CDAN-E':
                softmax_output = Tsharpen(softmax_output)
                entropy = loss_func.Entropy(softmax_output)
                loss2 = loss_func.CDAN(
                    [feature, softmax_output], ad_net, entropy,
                    network.calc_coeff(num_iter * (epoch - start_epoch) +
                                       batch_idx), random_layer)
            elif method == 'CDAN':
                loss2 = loss_func.CDAN([feature, softmax_output], ad_net, None,
                                       None, random_layer)
            elif method == 'DANN':
                loss2 = loss_func.DANN(feature, ad_net)
            else:
                raise ValueError('Method cannot be recognized.')
        if epoch > start_epoch:
            loss2.backward()
            optimizer.step()
            optimizer_ad.step()
        if (batch_idx + epoch * num_iter) % args.log_interval == 0:
            print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss1: {:.6f}'.format(
                epoch, batch_idx * args.batch_size, num_iter * args.batch_size,
                100. * batch_idx / num_iter, loss.item()))
Exemplo n.º 4
0
def train(config):
    ## set pre-process
    prep_dict = {}
    prep_config = config["prep"]
    prep_dict["source"] = prep.image_train(**config["prep"]['params'])
    prep_dict["target"] = prep.image_train(**config["prep"]['params'])
    if prep_config["test_10crop"]:
        prep_dict["test"] = prep.image_test_10crop(**config["prep"]['params'])
    else:
        prep_dict["test"] = prep.image_test(**config["prep"]['params'])

    ## prepare data
    dsets = {}
    dset_loaders = {}
    data_config = config["data"]
    train_bs = data_config["source"]["batch_size"]
    test_bs = data_config["test"]["batch_size"]
    dsets["source"] = ImageList(open(data_config["source"]["list_path"]).readlines(), \
                                transform=prep_dict["source"])
    dset_loaders["source"] = DataLoader(dsets["source"], batch_size=train_bs, \
                                        shuffle=True, num_workers=0, drop_last=True)
    dsets["target"] = ImageList(open(data_config["target"]["list_path"]).readlines(), \
                                transform=prep_dict["target"])
    dset_loaders["target"] = DataLoader(dsets["target"], batch_size=train_bs, \
                                        shuffle=True, num_workers=0, drop_last=True)

    if prep_config["test_10crop"]:
        for i in range(10):
            dsets["test"] = [ImageList(open(data_config["test"]["list_path"]).readlines(), \
                                       transform=prep_dict["test"][i]) for i in range(10)]
            dset_loaders["test"] = [DataLoader(dset, batch_size=test_bs, \
                                               shuffle=False, num_workers=0) for dset in dsets['test']]
    else:
        dsets["test"] = ImageList(open(data_config["test"]["list_path"]).readlines(), \
                                  transform=prep_dict["test"])
        dset_loaders["test"] = DataLoader(dsets["test"], batch_size=test_bs, \
                                          shuffle=False, num_workers=0)

    class_num = config["network"]["params"]["class_num"]

    ## set base network
    net_config = config["network"]
    base_network = net_config["name"](**net_config["params"])
    base_network = base_network.cuda()

    ## add additional network for some methods
    if config["loss"]["random"]:
        random_layer = network.RandomLayer(
            [base_network.output_num(), class_num],
            config["loss"]["random_dim"])
        ad_net = network.AdversarialNetwork(config["loss"]["random_dim"], 1024)
    else:
        random_layer = None
        ad_net = network.AdversarialNetwork(
            base_network.output_num() * class_num, 1024)
    if config["loss"]["random"]:
        random_layer.cuda()
    ad_net = ad_net.cuda()
    parameter_list = base_network.get_parameters() + ad_net.get_parameters()

    ## set optimizer
    optimizer_config = config["optimizer"]
    optimizer = optimizer_config["type"](parameter_list, \
                                         **(optimizer_config["optim_params"]))
    param_lr = []
    for param_group in optimizer.param_groups:
        param_lr.append(param_group["lr"])
    schedule_param = optimizer_config["lr_param"]
    lr_scheduler = lr_schedule.schedule_dict[optimizer_config["lr_type"]]

    gpus = config['gpu'].split(',')
    if len(gpus) > 1:
        ad_net = nn.DataParallel(ad_net, device_ids=[int(i) for i in gpus])
        base_network = nn.DataParallel(base_network,
                                       device_ids=[int(i) for i in gpus])

    ## train
    len_train_source = len(dset_loaders["source"])
    len_train_target = len(dset_loaders["target"])
    best_acc = 0.0
    best_model = nn.Sequential(base_network)
    each_log = ""
    for i in range(config["num_iterations"]):
        if i % config["test_interval"] == config["test_interval"] - 1:

            base_network.train(False)
            temp_acc = image_classification_test(dset_loaders, \
                                                 base_network, test_10crop=prep_config["test_10crop"])
            temp_model = nn.Sequential(base_network)
            if temp_acc > best_acc:
                best_acc = temp_acc
                best_model = temp_model
            log_str = "iter: {:05d}, precision: {:.5f}, transfer_loss:{:.4f}, classifier_loss:{:.4f}, total_loss:{:.4f}" \
                .format(i, temp_acc, transfer_loss.item(), classifier_loss.item(), total_loss.item())
            config["out_file"].write(log_str + "\n")
            config["out_file"].flush()
            print(log_str)

            config["out_file"].write(each_log)
            config["out_file"].flush()
            each_log = ""
        loss_params = config["loss"]
        ## train one iter
        base_network.train(True)
        ad_net.train(True)
        optimizer = lr_scheduler(optimizer, i, **schedule_param)
        optimizer.zero_grad()
        if i % len_train_source == 0:
            iter_source = iter(dset_loaders["source"])
        if i % len_train_target == 0:
            iter_target = iter(dset_loaders["target"])
        inputs_source, labels_source = iter_source.next()
        inputs_target, labels_target = iter_target.next()
        inputs_source, inputs_target, labels_source = inputs_source.cuda(
        ), inputs_target.cuda(), labels_source.cuda()
        features_source, outputs_source = base_network(inputs_source)
        features_target, outputs_target = base_network(inputs_target)
        features = torch.cat((features_source, features_target), dim=0)
        outputs = torch.cat((outputs_source, outputs_target), dim=0)
        softmax_out = nn.Softmax(dim=1)(outputs)
        labels_target_fake = torch.max(nn.Softmax(dim=1)(outputs_target), 1)[1]
        labels = torch.cat((labels_source, labels_target_fake))
        entropy = loss.Entropy(softmax_out)
        transfer_loss = loss.CDAN([features, softmax_out], ad_net, entropy,
                                  network.calc_coeff(i), random_layer)

        classifier_loss = nn.CrossEntropyLoss()(outputs_source, labels_source)
        mdd_loss = loss.mdd_loss(features=features,
                                 labels=labels,
                                 left_weight=args.left_weight,
                                 right_weight=args.right_weight)
        max_entropy_loss = loss.EntropicConfusion(features)
        total_loss = loss_params["trade_off"] * transfer_loss \
                     + args.cls_weight * classifier_loss \
                     + args.mdd_weight * mdd_loss \
                     + args.entropic_weight * max_entropy_loss
        total_loss.backward()
        optimizer.step()
        log_str = "iter: {:05d},transfer_loss:{:.4f}, classifier_loss:{:.4f}, mdd_loss:{:4f}," \
                  "max_entropy_loss:{:.4f},total_loss:{:.4f}" \
            .format(i, transfer_loss.item(), classifier_loss.item(), mdd_loss.item(),
                    max_entropy_loss.item(), total_loss.item())
        each_log += log_str + "\n"

    torch.save(
        best_model, config['model_output_path'] + "{}_{}_p-{}_e-{}".format(
            config['log_name'], str(best_acc), str(config["mdd_weight"]),
            str(config["entropic_weight"])))
    return best_acc
def train(args, model, ad_net, source_samples, source_labels, target_samples,
          target_labels, optimizer, optimizer_ad, epoch, start_epoch, method,
          source_label_distribution, out_wei_file, cov_mat,
          pseudo_target_label, class_weights, true_weights):
    model.train()

    cov_mat[:] = 0.0
    pseudo_target_label[:] = 0.0

    len_source = source_labels.shape[0]
    len_target = target_labels.shape[0]

    size = max(len_source, len_target)
    num_iter = int(size / args.batch_size)

    for batch_idx in range(num_iter):
        t = time.time()
        source_idx = np.random.choice(len_source, args.batch_size)
        target_idx = np.random.choice(len_target, args.batch_size)
        data_source, label_source = source_samples[source_idx], source_labels[
            source_idx]
        data_target, _ = target_samples[target_idx], target_labels[target_idx]

        optimizer.zero_grad()
        optimizer_ad.zero_grad()
        feature, output = model(torch.cat((data_source, data_target), 0))

        if 'IW' in method:
            ys_onehot = torch.zeros(args.batch_size, 10).to(args.device)
            ys_onehot.scatter_(1, label_source.view(-1, 1), 1)
            # Compute weights on source data.
            if 'ORACLE' in method:
                weights = torch.mm(ys_onehot, true_weights)
            else:
                weights = torch.mm(ys_onehot, model.im_weights)

            source_preds, target_preds = output[:args.batch_size], output[
                args.batch_size:]
            # Compute the aggregated distribution of pseudo-label on the target domain.
            pseudo_target_label += torch.sum(F.softmax(target_preds, dim=1),
                                             dim=0).view(-1, 1).detach()
            # Update the covariance matrix on the source domain as well.
            cov_mat += torch.mm(
                F.softmax(source_preds, dim=1).transpose(1, 0),
                ys_onehot).detach()

            loss = torch.mean(
                nn.CrossEntropyLoss(weight=class_weights, reduction='none')
                (output.narrow(0, 0, data_source.size(0)), label_source) *
                weights) / 10.0
        else:
            loss = nn.CrossEntropyLoss()(output.narrow(0, 0,
                                                       data_source.size(0)),
                                         label_source)

        if epoch > start_epoch:
            if method == 'CDAN-E':
                softmax_output = nn.Softmax(dim=1)(output)
                entropy = loss_func.Entropy(softmax_output)
                loss += loss_func.CDAN(
                    [feature, softmax_output],
                    ad_net,
                    entropy,
                    network.calc_coeff(num_iter * (epoch - start_epoch) +
                                       batch_idx),
                    None,
                    device=args.device)

            elif 'IWCDAN-E' in method:
                softmax_output = nn.Softmax(dim=1)(output)
                entropy = loss_func.Entropy(softmax_output)
                loss += loss_func.CDAN(
                    [feature, softmax_output],
                    ad_net,
                    entropy,
                    network.calc_coeff(num_iter * (epoch - start_epoch) +
                                       batch_idx),
                    None,
                    weights=weights,
                    device=args.device)

            elif method == 'CDAN':
                softmax_output = nn.Softmax(dim=1)(output)
                loss += loss_func.CDAN([feature, softmax_output],
                                       ad_net,
                                       None,
                                       None,
                                       None,
                                       device=args.device)

            elif 'IWCDAN' in method:
                softmax_output = nn.Softmax(dim=1)(output)
                loss += loss_func.CDAN([feature, softmax_output],
                                       ad_net,
                                       None,
                                       None,
                                       None,
                                       weights=weights,
                                       device=args.device)

            elif method == 'DANN':
                loss += loss_func.DANN(feature, ad_net, args.device)

            elif 'IWDAN' in method:
                dloss = loss_func.IWDAN(feature, ad_net, weights)
                loss += args.mu * dloss

            elif method == 'NANN':
                pass

            else:
                raise ValueError('Method cannot be recognized.')

        loss.backward()
        optimizer.step()

        if epoch > start_epoch and method != 'NANN':
            optimizer_ad.step()

    if 'IW' in method and epoch > start_epoch:
        pseudo_target_label /= args.batch_size * num_iter
        cov_mat /= args.batch_size * num_iter
        # Recompute the importance weight by solving a QP.
        model.im_weights_update(source_label_distribution,
                                pseudo_target_label.cpu().detach().numpy(),
                                cov_mat.cpu().detach().numpy(), args.device)
        current_weights = [
            round(x, 4) for x in model.im_weights.data.cpu().numpy().flatten()
        ]
        write_list(out_wei_file, [
            np.linalg.norm(current_weights -
                           true_weights.cpu().numpy().flatten())
        ] + current_weights)
        print(
            np.linalg.norm(current_weights -
                           true_weights.cpu().numpy().flatten()),
            current_weights)
Exemplo n.º 6
0
def train(config):
    ## set pre-process
    prep_dict = {}
    prep_config = config["prep"]
    prep_dict["source"] = prep.image_train(**config["prep"]['params'])
    prep_dict["target"] = prep.image_train(**config["prep"]['params'])
    if prep_config["test_10crop"]:
        prep_dict["test"] = prep.image_test_10crop(**config["prep"]['params'])
    else:
        prep_dict["test"] = prep.image_test(**config["prep"]['params'])

    ## prepare data
    dsets = {}
    dset_loaders = {}
    data_config = config["data"]
    train_bs = data_config["source"]["batch_size"]
    test_bs = data_config["test"]["batch_size"]
    dsets["source"] = ImageList(open(data_config["source"]["list_path"]).readlines(), \
                                transform=prep_dict["source"])
    dset_loaders["source"] = DataLoader(dsets["source"], batch_size=train_bs, \
                                        shuffle=True, num_workers=4, drop_last=True)
    dsets["target"] = ImageList(open(data_config["target"]["list_path"]).readlines(), \
                                transform=prep_dict["target"])
    dset_loaders["target"] = DataLoader(dsets["target"], batch_size=train_bs, \
                                        shuffle=True, num_workers=4, drop_last=True)

    if prep_config["test_10crop"]:
        for i in range(10):
            dsets["test"] = [ImageList(open(data_config["test"]["list_path"]).readlines(), \
                                       transform=prep_dict["test"][i]) for i in range(10)]
            dset_loaders["test"] = [DataLoader(dset, batch_size=test_bs, \
                                               shuffle=False, num_workers=4) for dset in dsets['test']]
    else:
        dsets["test"] = ImageList(open(data_config["test"]["list_path"]).readlines(), \
                                  transform=prep_dict["test"])
        dset_loaders["test"] = DataLoader(dsets["test"], batch_size=test_bs, \
                                          shuffle=False, num_workers=4)

    class_num = config["network"]["params"]["class_num"]

    ## set base network
    net_config = config["network"]
    base_network = net_config["name"](**net_config["params"])
    base_network = base_network.cuda()


    with torch.no_grad():
        cluster_data_loader = {}
        cluster_data_loader["source"] = DataLoader(dsets["source"], batch_size=100, \
                                                   shuffle=True, num_workers=0, drop_last=True)
        cluster_data_loader["target"] = DataLoader(dsets["source"], batch_size=100, \
                                                   shuffle=True, num_workers=0, drop_last=True)


    ## add additional network for some methods




    if config["loss"]["random"]:
        random_layer = network.RandomLayer([base_network.output_num(), class_num], config["loss"]["random_dim"])
        ad_net = network.AdversarialNetwork(config["loss"]["random_dim"], 1024)
    else:
        random_layer = None
        ad_net = network.AdversarialNetwork(base_network.output_num() * class_num, 1024)
    if config["loss"]["random"]:
        random_layer.cuda()
    ad_net = ad_net.cuda()
    parameter_list = base_network.get_parameters() + ad_net.get_parameters()

    ## set optimizer
    optimizer_config = config["optimizer"]
    optimizer = optimizer_config["type"](parameter_list, \
                                         **(optimizer_config["optim_params"]))
    param_lr = []
    for param_group in optimizer.param_groups:
        param_lr.append(param_group["lr"])
    schedule_param = optimizer_config["lr_param"]
    lr_scheduler = lr_schedule.schedule_dict[optimizer_config["lr_type"]]

    gpus = config['gpu'].split(',')
    if len(gpus) > 1:
        ad_net = nn.DataParallel(ad_net, device_ids=[int(i) for i in gpus])
        base_network = nn.DataParallel(base_network, device_ids=[int(i) for i in gpus])

    # dset_loaders["ps_target"]=[]
    ## train
    len_train_source = len(dset_loaders["source"])
    # len_train_target = len(dset_loaders["ps_target"])
    transfer_loss_value = classifier_loss_value = total_loss_value = 0.0
    best_acc = 0.0
    for i in range(config["num_iterations"]):
        lamb = adaptation_factor((i+1)/10000)
        cls_lamb = adaptation_factor(5*(i+1)/10000)
        epoch = int(i / len_train_source)
        if i% len_train_source ==0:
            testing = True
            pl_update=True
            print_loss =True
            # print("epoch: {} ".format(int(i / len_train_source)))
        if epoch % 5 ==0 and pl_update:
            pl_update= False
            # del dset_loaders["ps_target"]
            pseudo_labeled_targets,target_g_ctr, source_g_ctr = pseudo_labeling(base_network, cluster_data_loader, class_num)
            global_source_ctr = source_g_ctr.detach_()
            global_target_ctr = target_g_ctr.detach_()
            if len(pseudo_labeled_targets["label_list"]) !=0:
                print("new pl at epoch {}".format(epoch))

                pseudo_dataset = PS_ImageList(pseudo_labeled_targets, transform=prep_dict["target"])

                dset_loaders["ps_target"] = DataLoader(pseudo_dataset, batch_size=train_bs, \
                                                       shuffle=False, num_workers=0, drop_last=True)
                len_train_target = len(dset_loaders["ps_target"])
            else:
                print("no pl at epoch {}".format(epoch))
            # print("pseudo labeling done")
        # print(i)




        # if i % config["test_interval"] == config["test_interval"] - 1:

        if epoch % 5 ==0 and testing and i>0:


            base_network.train(False)
            temp_acc,v_loss = image_classification_test(dset_loaders, \
                                                 base_network, test_10crop=prep_config["test_10crop"])
            temp_model = nn.Sequential(base_network)
            if temp_acc > best_acc:
                best_acc = temp_acc
                best_model = temp_model
            log_str = "iter: {:05d}, precision: {:.5f}".format(i, temp_acc)
            config["out_file"].write(log_str + "\n")
            config["out_file"].flush()
            print(log_str)
            testing=False

            now = datetime.now()
            current_time = now.strftime("%H:%M:%S")
            print("epoch: {} ".format(int(i / len_train_source)))
            print("time: {} ".format(current_time))
            print("best acc: {} ".format(best_acc))
            print("loss: {} ".format(v_loss))
            print("adaptation rate : {}".format(lamb))
            print("learning rare : {} {} {} {}".format(optimizer.param_groups[0]["lr"],optimizer.param_groups[1]["lr"],optimizer.param_groups[2]["lr"],optimizer.param_groups[3]["lr"]))
            print("------------")
        if i % config["snapshot_interval"] == 0:
            torch.save(nn.Sequential(base_network), osp.join(config["output_path"], \
                                                             "iter_{:05d}_model.pth.tar".format(i)))

        loss_params = config["loss"]
        ## train one iter
        base_network.train(True)
        ad_net.train(True)
        optimizer = lr_scheduler(optimizer, i, **schedule_param)

        optimizer.zero_grad()



        ###
        if i % len_train_source == 0:
            iter_source = iter(dset_loaders["source"])
        if i % len_train_target == 0:
            # print(i,len_train_target)
            iter_target = iter(dset_loaders["ps_target"])
        try:
            inputs_source, labels_source, _ = iter_source.next()
            inputs_target, labels_target = iter_target.next()
        except StopIteration:
            iter_target = iter(dset_loaders["ps_target"])
            inputs_target, labels_target = iter_target.next()

        inputs_source, inputs_target, labels_source, labels_target = inputs_source.cuda(), inputs_target.cuda(), labels_source.cuda(), labels_target.cuda()



        features_source, outputs_source = base_network(inputs_source)
        features_target, outputs_target = base_network(inputs_target)

        ##class_aware
        batch_source_centroids = utils.get_batch_centers(features_source, labels_source, class_num)
        batch_target_centroids = utils.get_batch_centers(features_target,labels_target, class_num)

        # if i==0:
        #     global_source_ctr = batch_source_centroids
        #     global_target_ctr = batch_target_centroids
        # if i>0:
        batch_source_centroids = ctr_adapt_factor* global_source_ctr + (1- ctr_adapt_factor) * batch_source_centroids
        batch_target_centroids = ctr_adapt_factor * global_target_ctr + (1 - ctr_adapt_factor) * batch_target_centroids
        global_source_ctr = batch_source_centroids.clone().detach_()
        global_target_ctr = batch_target_centroids.clone().detach_()
        #
        # global_source_ctr = global_source_ctr.cpu().data.numpy()
        # global_target_ctr.detach_()

        # ctr_alignment_loss = utils.cosine_distance(global_source_ctr,global_target_ctr,cross=False)



        # source_p2c_Distances = 0 - utils.cosine_distance(features_source, global_source_ctr, cross=True)
        #
        # target_p2c_Distances = 0 - utils.cosine_distance(features_target, global_target_ctr, cross=True)
        #
        #
        #
        # zero_ctrs_s = torch.unique(torch.where(global_source_ctr==0)[0])
        # zero_ctrs_t = torch.unique(torch.where(global_target_ctr == 0)[0])
        alignment_index = []
        identity = np.eye(class_num)
        ctr_alignment_count =0
        pos = []
        post = []
        neg =[]
        negt =[]
        index_s = np.empty([0,1])
        index_t = np.empty([0,1])
        itt=0
        triplets ={}
        # with torch.no_grad():

        labels = labels_source.cpu().data.numpy()
        labelt = labels_target.cpu().data.numpy()
        # zero_ctrs_s = zero_ctrs_s.cpu().data.numpy()
        # zero_ctrs_t = zero_ctrs_t.cpu().data.numpy()

        #####npair
        # labels = labels.cpu().data.numpy()
        n_pairs = []

        for label in set(labels):
            label_mask = (labels == label)
            label_indices = np.where(label_mask)[0]
            if len(label_indices) < 1:
                continue
            anchor = np.random.choice(label_indices, 1, replace=False)
            n_pairs.append([anchor, np.array([label])])

        n_pairs = np.array(n_pairs)

        n_negatives = []
        for i in range(len(n_pairs)):
            negative = np.concatenate([n_pairs[:i, 1], n_pairs[i + 1:, 1]])
            n_negatives.append(negative)

        n_negatives = np.array(n_negatives)
        n_pairs_s = torch.LongTensor(n_pairs)
        n_neg_s = torch.LongTensor(n_negatives)

        n_pairs = []
        for label in set(labelt):
            label_mask = (labelt == label)
            label_indices = np.where(label_mask)[0]
            if len(label_indices) < 1:
                continue
            anchor = np.random.choice(label_indices, 1, replace=False)
            n_pairs.append([anchor, np.array([label])])

        n_pairs = np.array(n_pairs)

        n_negatives = []
        for i in range(len(n_pairs)):
            negative = np.concatenate([n_pairs[:i, 1], n_pairs[i + 1:, 1]])
            n_negatives.append(negative)

        n_negatives = np.array(n_negatives)
        n_pairs_t = torch.LongTensor(n_pairs)
        n_neg_t = torch.LongTensor(n_negatives)
        # return torch.LongTensor(n_pairs), torch.LongTensor(n_negatives)
        #####

        for it in range(class_num):
            label_mask = (labels == it)
            label_maskt = (labelt == it)
            idx = np.where(label_mask)[0]
            idxt = np.where(label_maskt)[0]
            # idx = torch.flatten(torch.nonzero(labels_source== torch.tensor(it).cuda()))
            if len(idx) !=0:
                index_s =np.append(index_s,idx)
                pos += [it for cc in range(len(idx))]
                mask = 1- identity[it,:]
                neg_id = np.nonzero(mask.flatten())[0].flatten()

                # neg_idx = np.where(np.in1d(neg_id,zero_ctrs_s)!=True)[0]
                neg += [[neg_id] for cc in range(len(idx))]

            if len(idxt) !=0:
                index_t = np.append(index_t, idxt)
                post += [it for cc in range(len(idxt))]
                maskt = 1- identity[it,:]
                neg_idt = np.nonzero(maskt.flatten())[0].flatten()
                # neg_idxt = np.where(np.in1d(neg_idt, zero_ctrs_t))[0]
                negt += [[neg_idt] for cc in range(len(idxt))]
                # negt += [[neg_idt] for cc in range(len(idxt))]

            # alignment_ctr_idx =idx[torch.nonzero(torch.where(idx ==idxt, idx,0))]
            if len(idx) != 0 and len(idxt) !=0:
                ctr_alignment_count +=1
                alignment_index +=[it]
                    # alignment_loss +=[utils.cosine_distance(batch_source_centroids[it], batch_source_centroids[it], cross=False)]
        # tempp = torch.cat(source_loss,0)
        # posetives_s = torch.cat(pos, dim=0)
        # negatives_s = torch.cat(neg, dim=0)
        # posetives_t = torch.cat(post, dim=0)
        # negatives_t = torch.cat(negt, dim=0)
        # a_i = torch.LongTensor(index_s.flatten()).cuda()
        # a_p = torch.LongTensor(pos).cuda()
        # a_n = torch.LongTensor(neg).cuda()
        ctr_alignment_loss =0
        anchors_s = features_source[index_s.flatten(),:]
        positive_s = global_source_ctr[pos,:]
        negative_s = global_source_ctr[neg].squeeze(1)
        # n_pairs_s = n_pairs_s.cuda().squeeze(2)
        # n_neg_s = n_neg_s.cuda().squeeze(2)
        # anchors_s = features_source[n_pairs_s[:, 0]]
        # positive_s = global_source_ctr[n_pairs_s[:, 1]]
        # negative_s = global_source_ctr[n_neg_s]
        #
        # n_pairs_t = n_pairs_t.cuda().squeeze(2)
        #
        # n_neg_t = n_neg_t.cuda().squeeze(2)
        # anchors_t = features_source[n_pairs_t[:, 0]]
        # positive_t = global_source_ctr[n_pairs_t[:, 1]]
        # negative_t = global_source_ctr[n_neg_t]
        # anchors_s.retain_graph=True
        # positive_s.retain_graph=True
        # negative_s.retain_graph=True

        anchors_t = features_target[index_t.flatten(), :]
        positive_t = global_target_ctr[post, :]
        negative_t = global_target_ctr[negt].squeeze(1)
        # FAT_loss = torch.empty([],requires_grad=True)
        # FAT_loss.requires_grad = True
        # FAT_loss.retain_grad()
        # nfat_s = Variable(n_pair_loss(anchors_s,positive_s, negative_s,class_num,train_bs))
        # nfat_t = Variable(n_pair_loss(anchors_t,positive_t, negative_t,class_num,train_bs))
        # FAT_loss.requires_grad = True
        # FAT_loss.retain_grad()
        FAT_loss = n_pair_loss(anchors_s,positive_s, negative_s,class_num,train_bs) + n_pair_loss(anchors_t,positive_t, negative_t,class_num,train_bs)/2

        if len(alignment_index) != 0:
            ctr_alignment_loss = torch.sum(utils.cosine_distance(batch_source_centroids[alignment_index], batch_target_centroids[alignment_index], cross=False))#/ctr_alignment_count
        # source_batch_FAT_Loss = torch.mean(torch.cat(source_loss,0), 0)/class_num
        # target_batch_FAT_Loss = torch.mean(torch.cat(target_loss,0),0)/class_num
        #
        # FAT_loss = source_batch_FAT_Loss.add(target_batch_FAT_Loss)
        ##
        # print("train loss: ", FAT_loss)
        # ctr_alignment_loss.grad_required =True
        # ctr_alignment_loss.retain_grad()
        features = torch.cat((features_source, features_target), dim=0)
        outputs = torch.cat((outputs_source, outputs_target), dim=0)
        softmax_out = nn.Softmax(dim=1)(outputs)
        if config['method'] == 'CDAN+E':
            entropy = loss.Entropy(softmax_out)
            transfer_loss = loss.CDAN([features, softmax_out], ad_net, entropy, network.calc_coeff(i), random_layer)
        elif config['method'] == 'CDAN':
            transfer_loss = loss.CDAN([features, softmax_out], ad_net, None, None, random_layer)
        elif config['method'] == 'DANN':
            transfer_loss = loss.DANN(features, ad_net)
        else:
            raise ValueError('Method cannot be recognized.')
        classifier_loss = nn.CrossEntropyLoss()(outputs_source/(2), labels_source)


        total_loss = loss_params["trade_off"] * (transfer_loss) + classifier_loss
         if lamb >.1:
            cls_lamb = 1.0
        else:
            cls_lamb = 10*lamb

        # total_loss = lamb * ( FAT_loss + 10*ctr_alignment_loss) + (transfer_loss) + cls_lamb*classifier_loss
        # total_loss =transfer_loss + lamb * (FAT_loss + ctr_alignment_loss) + classifier_loss
        # FAT_loss.backward(retain_graph=True)
        # optimizer.zero_grad()
        total_loss.backward()
        optimizer.step()
        # my_lr_scheduler.step()
        if epoch % 5 ==0 and print_loss:
            print("fat loss ", FAT_loss)#.grad_fn, FAT_loss.requires_grad)
            print("ctr align:  ", ctr_alignment_loss)
            print("tot: ", total_loss)
            print("clss: ",classifier_loss)
            print("trs: ", transfer_loss)
            print("++++++++++++++++++++++++end of epoch++++++++++++++++++++")

            print_loss =False
Exemplo n.º 7
0
def train(config):

    ####################################################
    # Data setting
    ####################################################

    prep_dict = {} # 데이터 전처리 transforms 부분
    prep_dict["source"] = prep.image_train(**config['prep']['params'])
    prep_dict["target"] = prep.image_train(**config["prep"]['params'])
    prep_dict["test"] = prep.image_test(**config['prep']['params'])

    dsets = {}
    dsets["source"]= datasets.ImageFolder(config['s_dset_path'], transform=prep_dict["source"])
    dsets["target"]= datasets.ImageFolder(config['t_dset_path'], transform=prep_dict['target'])
    dsets['test']=datasets.ImageFolder(config['t_dset_path'],transform=prep_dict['test'])


    data_config = config["data"]
    train_source_bs = data_config["source"]["batch_size"]   #원본은 source와 target 모두 source train bs로 설정되었는데 이를 수정함
    train_target_bs = data_config['target']['batch_size']
    test_bs = data_config["test"]["batch_size"]

    dset_loaders = {}
    dset_loaders["source"]=DataLoader(dsets["source"], batch_size=train_source_bs, shuffle=True, num_workers=4, drop_last=True) # 원본은 drop_last=True
    dset_loaders["target"] = DataLoader(dsets["target"], batch_size=train_target_bs, shuffle=True, num_workers=4, drop_last=True)
    dset_loaders['test'] = DataLoader(dsets['test'], batch_size=test_bs, shuffle=False, num_workers=4, drop_last=False)

    ####################################################
    # Network Setting
    ####################################################

    class_num = config["network"]['params']['class_num']

    net_config = config["network"]
    """
        config['network'] = {'name': network.ResNetFC,
                         'params': {'resnet_name': args.net,
                                    'use_bottleneck': True,
                                    'bottleneck_dim': 256,
                                    'new_cls': True,
                                    'class_num': args.class_num}
                         }
    """

    base_network = net_config["name"](**net_config["params"]) #network.ResNetFC
    base_network = base_network.cuda() # ResNetFC(Resnet, True, 256, True, 12)

    if config["loss"]["random"]:
        random_layer = network.RandomLayer([base_network.output_num(), class_num],
                                           config["loss"]["random_dim"]
                                           )
        random_layer.cuda()
        ad_net = network.AdversarialNetwork(config["loss"]["random_dim"], 1024)
    else:
        random_layer = None
        ad_net = network.AdversarialNetwork(base_network.output_num()*class_num, 1024)

    ad_net = ad_net.cuda()

    parameter_list = base_network.get_parameters() + ad_net.get_parameters()

    ####################################################
    # Env Setting
    ####################################################

    #gpus = config['gpu'].split(',')
    #if len(gpus) > 1 :
        #ad_net = nn.DataParallel(ad_net, device_ids=[int(i) for i in gpus])
        #base_network = nn.DataParallel(base_network, device_ids=[int(i) for i in gpus])

    ####################################################
    # Optimizer Setting
    ####################################################

    optimizer_config = config['optimizer']
    optimizer = optimizer_config["type"](parameter_list, **(optimizer_config["optim_params"]))
    # optim.SGD
    '''
    config['optimizer'] = {'type': optim.SGD,
                           'optim_params': {'lr': args.lr,
                                            'momentum': 0.9,
                                            'weight_decay': 0.0005,
                                            'nestrov': True},
                           'lr_type': "inv",
                           'lr_param': {"lr": args.lr,
                                        'gamma': 0.001,
                                        'power': 0.75
                                        }
    '''
    param_lr = []
    for param_group in optimizer.param_groups:
        param_lr.append(param_group['lr'])
    schedule_param = optimizer_config['lr_param']
    lr_scheduler = lr_schedule.schedule_dict[optimizer_config["lr_type"]]


    ####################################################
    # Train
    ####################################################

    len_train_source = len(dset_loaders["source"])
    len_train_target = len(dset_loaders["target"])

    transfer_loss_value = 0.0
    classifier_loss_value = 0.0
    total_loss_value = 0.0

    best_acc = 0.0

    for i in range(config["num_iterations"]): # num_iterations = batch 수
        sys.stdout.write("Iteration : {} \r".format(i))
        sys.stdout.flush()

        loss_params = config["loss"]

        base_network.train(True)
        ad_net.train(True)

        optimizer = lr_scheduler(optimizer, i, **schedule_param)
        optimizer.zero_grad()

        if i % len_train_source == 0:
            iter_source = iter(dset_loaders["source"])
        if i % len_train_target == 0:
            iter_target = iter(dset_loaders["target"])

        inputs_source, labels_source = iter_source.next()
        inputs_target, labels_target = iter_target.next()

        inputs_source, labels_source = inputs_source.cuda(), labels_source.cuda()
        inputs_target = inputs_target.cuda()

        features_source, outputs_source = base_network(inputs_source)
        features_target, outputs_target = base_network(inputs_target)

        features = torch.cat((features_source, features_target), dim = 0)
        outputs = torch.cat((outputs_source, outputs_target), dim=0)

        softmax_out = nn.Softmax(dim=1)(outputs)

        if config['method'] == 'CDAN+E':
            entropy = loss.Entropy(softmax_out)
            transfer_loss = loss.CDAN([features, softmax_out], ad_net, entropy, network.calc_coeff(i), random_layer)
        elif config['method'] == 'CDAN':
            transfer_loss = loss.CDAN([features, softmax_out], ad_net, None, None, random_layer)
        elif config['method'] == 'DANN':
            pass # 나중에 정리하기
        else:
            raise ValueError('Method cannot be recognized')

        classifier_loss = nn.CrossEntropyLoss()(outputs_source, labels_source)
        total_loss = loss_params["trade_off"] * transfer_loss + classifier_loss

        total_loss.backward()
        optimizer.step()



        ####################################################
        # Test
        ####################################################
        if i % config["test_interval"] == config["test_interval"] - 1:
            # test interval 마다
            base_network.train(False)
            temp_acc = image_classification_test(dset_loaders, base_network)
            temp_model = nn.Sequential(base_network)
            if temp_acc > best_acc:
                best_acc = temp_acc
                best_model = temp_model
                ACC = round(best_acc, 2) * 100
                torch.save(best_model, os.path.join(config["output_path"], "iter_{}_model.pth.tar".format(ACC)))
            log_str = "iter: {:05d}, precision: {:.5f}".format(i, temp_acc)
            config["out_file"].write(log_str + "\n")
            config["out_file"].flush()
            print(log_str)
def train(config):

    ## Define start time
    start_time = time.time()

    ## set pre-process
    prep_dict = {}
    prep_config = config["prep"]
    prep_dict["source"] = prep.image_train(**config["prep"]['params'])
    prep_dict["target"] = prep.image_train(**config["prep"]['params'])
    prep_dict["test"] = prep.image_test(**config["prep"]['params'])

    ## prepare data
    print("Preparing data", flush=True)
    dsets = {}
    dset_loaders = {}
    data_config = config["data"]
    train_bs = data_config["source"]["batch_size"]
    test_bs = data_config["test"]["batch_size"]
    root_folder = data_config["root_folder"]
    dsets["source"] = ImageList(open(osp.join(root_folder, data_config["source"]["list_path"])).readlines(), \
                                transform=prep_dict["source"], root_folder=root_folder, ratios=config["ratios_source"])
    dset_loaders["source"] = DataLoader(dsets["source"], batch_size=train_bs, \
            shuffle=True, num_workers=4, drop_last=True)
    dsets["target"] = ImageList(open(osp.join(root_folder, data_config["target"]["list_path"])).readlines(), \
                                transform=prep_dict["target"], root_folder=root_folder, ratios=config["ratios_target"])
    dset_loaders["target"] = DataLoader(dsets["target"], batch_size=train_bs, \
            shuffle=True, num_workers=4, drop_last=True)

    dsets["test"] = ImageList(open(
        osp.join(root_folder, data_config["test"]["list_path"])).readlines(),
                              transform=prep_dict["test"],
                              root_folder=root_folder,
                              ratios=config["ratios_test"])
    dset_loaders["test"] = DataLoader(dsets["test"], batch_size=test_bs, \
                            shuffle=False, num_workers=4)

    test_path = os.path.join(root_folder, data_config["test"]["dataset_path"])
    if os.path.exists(test_path):
        print('Found existing dataset for test', flush=True)
        with open(test_path, 'rb') as f:
            [test_samples, test_labels] = pickle.load(f)
            test_labels = torch.LongTensor(test_labels).to(config["device"])
    else:
        print('Missing test dataset', flush=True)
        print('Building dataset for test and writing to {}'.format(test_path),
              flush=True)
        dset_test = ImageList(open(
            osp.join(root_folder,
                     data_config["test"]["list_path"])).readlines(),
                              transform=prep_dict["test"],
                              root_folder=root_folder,
                              ratios=config['ratios_test'])
        loaded_dset_test = LoadedImageList(dset_test)
        test_samples, test_labels = loaded_dset_test.samples.numpy(
        ), loaded_dset_test.targets.numpy()
        with open(test_path, 'wb') as f:
            pickle.dump([test_samples, test_labels], f)

    class_num = config["network"]["params"]["class_num"]
    test_samples, test_labels = sample_ratios(test_samples, test_labels,
                                              config['ratios_test'])

    # compute labels distribution on the source and target domain
    source_label_distribution = np.zeros((class_num))
    for img in dsets["source"].imgs:
        source_label_distribution[img[1]] += 1
    print("Total source samples: {}".format(np.sum(source_label_distribution)),
          flush=True)
    print("Source samples per class: {}".format(source_label_distribution),
          flush=True)
    source_label_distribution /= np.sum(source_label_distribution)
    print("Source label distribution: {}".format(source_label_distribution),
          flush=True)
    target_label_distribution = np.zeros((class_num))
    for img in dsets["target"].imgs:
        target_label_distribution[img[1]] += 1
    print("Total target samples: {}".format(np.sum(target_label_distribution)),
          flush=True)
    print("Target samples per class: {}".format(target_label_distribution),
          flush=True)
    target_label_distribution /= np.sum(target_label_distribution)
    print("Target label distribution: {}".format(target_label_distribution),
          flush=True)
    mixture = (source_label_distribution + target_label_distribution) / 2
    jsd = (scipy.stats.entropy(source_label_distribution, qk=mixture) \
            + scipy.stats.entropy(target_label_distribution, qk=mixture)) / 2
    print("JSD : {}".format(jsd), flush=True)

    test_label_distribution = np.zeros((class_num))
    for img in test_labels:
        test_label_distribution[int(img.item())] += 1
    print("Test samples per class: {}".format(test_label_distribution),
          flush=True)
    test_label_distribution /= np.sum(test_label_distribution)
    print("Test label distribution: {}".format(test_label_distribution),
          flush=True)
    write_list(config["out_wei_file"],
               [round(x, 4) for x in test_label_distribution])
    write_list(config["out_wei_file"],
               [round(x, 4) for x in source_label_distribution])
    write_list(config["out_wei_file"],
               [round(x, 4) for x in target_label_distribution])
    true_weights = torch.tensor(
        target_label_distribution / source_label_distribution,
        dtype=torch.float,
        requires_grad=False)[:, None].to(config["device"])
    print("True weights : {}".format(true_weights[:, 0].cpu().numpy()))
    config["out_wei_file"].write(str(jsd) + "\n")

    ## set base network
    net_config = config["network"]
    base_network = net_config["name"](**net_config["params"])
    base_network = base_network.to(config["device"])

    ## add additional network for some methods
    if config["loss"]["random"]:
        random_layer = network.RandomLayer(
            [base_network.output_num(), class_num],
            config["loss"]["random_dim"])
        ad_net = network.AdversarialNetwork(config["loss"]["random_dim"], 1024)
    else:
        random_layer = None
        if 'CDAN' in config['method']:
            ad_net = network.AdversarialNetwork(
                base_network.output_num() * class_num, 1024)
        else:
            ad_net = network.AdversarialNetwork(base_network.output_num(),
                                                1024)
    if config["loss"]["random"]:
        random_layer.to(config["device"])
    ad_net = ad_net.to(config["device"])
    parameter_list = ad_net.get_parameters() + base_network.get_parameters()
    parameter_list[-1]["lr_mult"] = config["lr_mult_im"]

    ## set optimizer
    optimizer_config = config["optimizer"]
    optimizer = optimizer_config["type"](parameter_list, \
                    **(optimizer_config["optim_params"]))
    param_lr = []
    for param_group in optimizer.param_groups:
        param_lr.append(param_group["lr"])
    schedule_param = optimizer_config["lr_param"]
    lr_scheduler = lr_schedule.schedule_dict[optimizer_config["lr_type"]]

    # Maintain two quantities for the QP.
    cov_mat = torch.tensor(np.zeros((class_num, class_num), dtype=np.float32),
                           requires_grad=False).to(config["device"])
    pseudo_target_label = torch.tensor(np.zeros((class_num, 1),
                                                dtype=np.float32),
                                       requires_grad=False).to(
                                           config["device"])
    # Maintain one weight vector for BER.
    class_weights = torch.tensor(1.0 / source_label_distribution,
                                 dtype=torch.float,
                                 requires_grad=False).to(config["device"])

    gpus = config['gpu'].split(',')
    if len(gpus) > 1:
        ad_net = nn.DataParallel(ad_net, device_ids=[int(i) for i in gpus])
        base_network = nn.DataParallel(base_network,
                                       device_ids=[int(i) for i in gpus])

    ## train
    len_train_source = len(dset_loaders["source"])
    len_train_target = len(dset_loaders["target"])
    transfer_loss_value = classifier_loss_value = total_loss_value = 0.0
    best_acc = 0.0

    print("Preparations done in {:.0f} seconds".format(time.time() -
                                                       start_time),
          flush=True)
    print("Starting training for {} iterations using method {}".format(
        config["num_iterations"], config['method']),
          flush=True)
    start_time_test = start_time = time.time()
    for i in range(config["num_iterations"]):
        if i % config["test_interval"] == config["test_interval"] - 1:
            base_network.train(False)
            temp_acc = image_classification_test_loaded(
                test_samples, test_labels, base_network)
            temp_model = nn.Sequential(base_network)
            if temp_acc > best_acc:
                best_acc = temp_acc
            log_str = "  iter: {:05d}, sec: {:.0f}, class: {:.5f}, da: {:.5f}, precision: {:.5f}".format(
                i,
                time.time() - start_time_test, classifier_loss_value,
                transfer_loss_value, temp_acc)
            config["out_log_file"].write(log_str + "\n")
            config["out_log_file"].flush()
            print(log_str, flush=True)
            if 'IW' in config['method']:
                current_weights = [
                    round(x, 4) for x in
                    base_network.im_weights.data.cpu().numpy().flatten()
                ]
                # write_list(config["out_wei_file"], current_weights)
                print(current_weights, flush=True)
            start_time_test = time.time()
        if i % 500 == -1:
            print("{} iterations in {} seconds".format(
                i,
                time.time() - start_time),
                  flush=True)

        loss_params = config["loss"]
        ## train one iter
        base_network.train(True)
        ad_net.train(True)
        optimizer = lr_scheduler(optimizer, i, **schedule_param)
        optimizer.zero_grad()

        t = time.time()
        if i % len_train_source == 0:
            iter_source = iter(dset_loaders["source"])
        if i % len_train_target == 0:
            iter_target = iter(dset_loaders["target"])
        inputs_source, label_source = iter_source.next()
        inputs_target, _ = iter_target.next()
        inputs_source, inputs_target, label_source = inputs_source.to(
            config["device"]), inputs_target.to(
                config["device"]), label_source.to(config["device"])
        features_source, outputs_source = base_network(inputs_source)
        features_target, outputs_target = base_network(inputs_target)
        features = torch.cat((features_source, features_target), dim=0)
        outputs = torch.cat((outputs_source, outputs_target), dim=0)
        softmax_out = nn.Softmax(dim=1)(outputs)

        if 'IW' in config['method']:
            ys_onehot = torch.zeros(train_bs, class_num).to(config["device"])
            ys_onehot.scatter_(1, label_source.view(-1, 1), 1)

            # Compute weights on source data.
            if 'ORACLE' in config['method']:
                weights = torch.mm(ys_onehot, true_weights)
            else:
                weights = torch.mm(ys_onehot, base_network.im_weights)

            source_preds, target_preds = outputs[:train_bs], outputs[train_bs:]
            # Compute the aggregated distribution of pseudo-label on the target domain.
            pseudo_target_label += torch.sum(F.softmax(target_preds, dim=1),
                                             dim=0).view(-1, 1).detach()
            # Update the covariance matrix on the source domain as well.
            cov_mat += torch.mm(
                F.softmax(source_preds, dim=1).transpose(1, 0),
                ys_onehot).detach()

        if config['method'] == 'CDAN-E':
            classifier_loss = nn.CrossEntropyLoss()(outputs_source,
                                                    label_source)
            entropy = loss.Entropy(softmax_out)
            transfer_loss = loss.CDAN([features, softmax_out], ad_net, entropy,
                                      network.calc_coeff(i), random_layer)
            total_loss = loss_params["trade_off"] * \
                transfer_loss + classifier_loss

        elif 'IWCDAN-E' in config['method']:

            classifier_loss = torch.mean(
                nn.CrossEntropyLoss(weight=class_weights, reduction='none')
                (outputs_source, label_source) * weights) / class_num

            entropy = loss.Entropy(softmax_out)
            transfer_loss = loss.CDAN([features, softmax_out],
                                      ad_net,
                                      entropy,
                                      network.calc_coeff(i),
                                      random_layer,
                                      weights=weights,
                                      device=config["device"])
            total_loss = loss_params["trade_off"] * \
                transfer_loss + classifier_loss

        elif config['method'] == 'CDAN':

            classifier_loss = nn.CrossEntropyLoss()(outputs_source,
                                                    label_source)
            transfer_loss = loss.CDAN([features, softmax_out], ad_net, None,
                                      None, random_layer)
            total_loss = loss_params[
                "trade_off"] * transfer_loss + classifier_loss

        elif 'IWCDAN' in config['method']:

            classifier_loss = torch.mean(
                nn.CrossEntropyLoss(weight=class_weights, reduction='none')
                (outputs_source, label_source) * weights) / class_num

            transfer_loss = loss.CDAN([features, softmax_out],
                                      ad_net,
                                      None,
                                      None,
                                      random_layer,
                                      weights=weights)
            total_loss = loss_params["trade_off"] * \
                transfer_loss + classifier_loss

        elif config['method'] == 'DANN':
            classifier_loss = nn.CrossEntropyLoss()(outputs_source,
                                                    label_source)
            transfer_loss = loss.DANN(features, ad_net, config["device"])
            total_loss = loss_params["trade_off"] * \
                transfer_loss + classifier_loss

        elif 'IWDAN' in config['method']:

            classifier_loss = torch.mean(
                nn.CrossEntropyLoss(weight=class_weights, reduction='none')
                (outputs_source, label_source) * weights) / class_num

            transfer_loss = loss.IWDAN(features, ad_net, weights)
            total_loss = loss_params["trade_off"] * \
                transfer_loss + classifier_loss

        elif config['method'] == 'NANN':
            classifier_loss = nn.CrossEntropyLoss()(outputs_source,
                                                    label_source)
            total_loss = classifier_loss
        else:
            raise ValueError('Method cannot be recognized.')

        total_loss.backward()
        optimizer.step()

        transfer_loss_value = 0 if config[
            'method'] == 'NANN' else transfer_loss.item()
        classifier_loss_value = classifier_loss.item()
        total_loss_value = transfer_loss_value + classifier_loss_value

        if ('IW' in config['method']
            ) and i % (config["dataset_mult_iw"] * len_train_source
                       ) == config["dataset_mult_iw"] * len_train_source - 1:

            pseudo_target_label /= train_bs * \
                len_train_source * config["dataset_mult_iw"]
            cov_mat /= train_bs * len_train_source * config["dataset_mult_iw"]
            print(i, np.sum(cov_mat.cpu().detach().numpy()),
                  train_bs * len_train_source)

            # Recompute the importance weight by solving a QP.
            base_network.im_weights_update(
                source_label_distribution,
                pseudo_target_label.cpu().detach().numpy(),
                cov_mat.cpu().detach().numpy(), config["device"])
            current_weights = [
                round(x, 4)
                for x in base_network.im_weights.data.cpu().numpy().flatten()
            ]
            write_list(config["out_wei_file"], [
                np.linalg.norm(current_weights -
                               true_weights.cpu().numpy().flatten())
            ] + current_weights)
            print(
                np.linalg.norm(current_weights -
                               true_weights.cpu().numpy().flatten()),
                current_weights)

            cov_mat[:] = 0.0
            pseudo_target_label[:] = 0.0

    return best_acc
Exemplo n.º 9
0
def train(args):
    # prepare data
    dsets = {}
    dset_loaders = {}
    dsets["source"] = ImageList(open(args.source_list).readlines(), \
                                transform=image_train())
    dset_loaders["source"] = DataLoader(dsets["source"], batch_size=args.batch_size, \
                                        shuffle=True, num_workers=4, drop_last=True)
    dsets["target"] = ImageList(open('data/{}/pseudo_list/{}_{}_list.txt'
                                     ''.format(args.dataset,args.source,args.target)).readlines(),
                                transform=image_train(),pseudo=True)
    dset_loaders["target"] = DataLoader(dsets["target"], batch_size=args.batch_size, \
                                        shuffle=True, num_workers=4, drop_last=True)

    dsets["test"] = ImageList(open(args.target_list).readlines(), \
                              transform=image_test())
    dset_loaders["test"] = DataLoader(dsets["test"], batch_size=2 * args.batch_size, \
                                      shuffle=False, num_workers=4)

    #model
    model = network.ResNet(class_num=args.num_class).cuda()
    adv_net = network.AdversarialNetwork(in_feature=model.output_num(),hidden_size=1024,max_iter=2000).cuda()
    parameter_classifier = [model.get_parameters()[2]]
    parameter_feature = model.get_parameters()[0:2] + adv_net.get_parameters()
    optimizer_classifier = torch.optim.SGD(parameter_classifier,lr=args.lr,momentum=0.9,weight_decay=0.005)
    optimizer_feature = torch.optim.SGD(parameter_feature,lr=args.lr,momentum=0.9,weight_decay=0)

    gpus = args.gpu_id.split(',')
    if len(gpus) > 1:
        adv_net = nn.DataParallel(adv_net, device_ids=[int(i) for i in gpus])
        model = nn.DataParallel(model, device_ids=[int(i) for i in gpus])

    ## train
    len_train_source = len(dset_loaders["source"])
    len_train_target = len(dset_loaders["target"])
    best_acc = 0.0
    best_model = copy.deepcopy(model)

    Cs_memory = torch.zeros(args.num_class, 256).cuda()
    Ct_memory = torch.zeros(args.num_class, 256).cuda()

    for i in range(args.max_iter):
        if i % args.test_interval == args.test_interval - 1:
            model.train(False)
            temp_acc = image_classification_test(dset_loaders, model)
            if temp_acc > best_acc:
                best_acc = temp_acc
                best_model = copy.deepcopy(model)
            log_str = "\n iter: {:05d}, \t precision: {:.4f},\t best_acc:{:.4f}".format(i, temp_acc, best_acc)
            args.log_file.write(log_str)
            args.log_file.flush()
            print(log_str)
        if i % args.snapshot_interval == args.snapshot_interval -1:
            if not os.path.exists('snapshot'):
                os.mkdir('snapshot')
            if not os.path.exists('snapshot/save'):
                os.mkdir('snapshot/save')
            torch.save(best_model,'snapshot/save/best_model.pk')

        model.train(True)
        adv_net.train(True)
        optimizer_classifier = lr_schedule.inv_lr_scheduler(optimizer_classifier,i)
        optimizer_feature = lr_schedule.inv_lr_scheduler(optimizer_feature, i)

        if i % len_train_source == 0:
            iter_source = iter(dset_loaders["source"])
        if i % len_train_target == 0:
            iter_target = iter(dset_loaders["target"])
        inputs_source, labels_source = iter_source.next()
        inputs_target, pseudo_labels_target, weights = iter_target.next()
        inputs_source, labels_source = inputs_source.cuda(),  labels_source.cuda()
        inputs_target, pseudo_labels_target = inputs_target.cuda(), pseudo_labels_target.cuda()
        weights = weights.type(torch.Tensor).cuda()

        features_source, outputs_source = model(inputs_source)
        features_target, outputs_target = model(inputs_target)
        features = torch.cat((features_source, features_target), dim=0)

        source_class_loss = nn.CrossEntropyLoss()(outputs_source, labels_source)
        adv_loss = utils.loss_adv(features,adv_net)
        H = torch.mean(utils.Entropy(F.softmax(outputs_target, dim=1)))
        target_robust_loss = utils.robust_pseudo_loss(outputs_target,pseudo_labels_target,weights)

        classifier_loss = source_class_loss + target_robust_loss
        optimizer_classifier.zero_grad()
        classifier_loss.backward(retain_graph=True)
        optimizer_classifier.step()

        if args.baseline == 'MSTN':
            lam = network.calc_coeff(i,max_iter=2000)
        elif args.baseline =='DANN':
            lam = 0.0
        pseu_labels_target = torch.argmax(outputs_target, dim=1)
        loss_sm, Cs_memory, Ct_memory = utils.SM(features_source, features_target, labels_source, pseu_labels_target,
                                                Cs_memory, Ct_memory)
        feature_loss = classifier_loss + adv_loss + lam*loss_sm + lam*H
        optimizer_feature.zero_grad()
        feature_loss.backward()
        optimizer_feature.step()

        print('step:{: d},\t,source_class_loss:{:.4f},\t,target_robust_loss:{:.4f}'
              ''.format(i, source_class_loss.item(),target_robust_loss.item()))

        Cs_memory.detach_()
        Ct_memory.detach_()

    return best_acc, best_model
Exemplo n.º 10
0
def train_distill(teacher, args):
    # prepare data
    dsets = {}
    dset_loaders = {}
    dsets["source"] = ImageList(open(args.source_list).readlines(), \
                                transform=image_train())
    dset_loaders["source"] = DataLoader(dsets["source"], batch_size=args.batch_size, \
                                        shuffle=True, num_workers=2, drop_last=True)
    dsets["target"] = ImageList(open(args.target_list).readlines(), \
                                transform=image_train(),  params=args)
    dset_loaders["target"] = DataLoader(dsets["target"], batch_size=args.batch_size, \
                                        shuffle=True, num_workers=2, drop_last=True)

    dsets["test"] = ImageList(open(args.target_list).readlines(), \
                              transform=image_test())
    dset_loaders["test"] = DataLoader(dsets["test"], batch_size=2 * args.batch_size, \
                                      shuffle=False, num_workers=2)

    #model
    model = network.ResNet(class_num=args.num_class).cuda()
    adv_net = network.AdversarialNetwork(in_feature=model.output_num(),hidden_size=1024, max_iter=args.max_iter).cuda()
    parameter_list = model.get_parameters() + adv_net.get_parameters()
    optimizer = torch.optim.SGD(parameter_list,lr=args.lr,momentum=0.9,weight_decay=0.005)
    # model, optimizer = amp.initialize(model, optimizer, opt_level='O1', verbosity=0)

    gpus = args.gpu_id.split(',')
    if len(gpus) > 1:
        adv_net = nn.DataParallel(adv_net, device_ids=[int(i) for i in gpus])
        model = nn.DataParallel(model, device_ids=[int(i) for i in gpus])

    ## train
    len_train_source = len(dset_loaders["source"])
    len_train_target = len(dset_loaders["target"])
    best_acc = 0.0
    best_model = copy.deepcopy(model)
    print_interval = (args.test_interval // 10)
    nt_cent = utils.NTXentLoss('cuda', args.batch_size, 0.2, True)
    
    Cs_memory = torch.zeros(args.num_class, 256).cuda()
    Ct_memory = torch.zeros(args.num_class, 256).cuda()

    max_batch = 100
    queue_size = args.batch_size * max_batch
    queue_data = [torch.randn(queue_size, 256).cuda(), torch.randn(queue_size, args.num_class).cuda()]
    queue_data_w = [torch.randn(queue_size, 256).cuda(), torch.randn(queue_size, args.num_class).cuda()]
    # queue_data = [torch.randn(queue_size, 256).cuda(), torch.randn(queue_size, 256).cuda()]

    queue_labels = [torch.ones(queue_size).cuda() * (args.num_class+1), torch.ones(queue_size).cuda() * (args.num_class+1)]
    queue_ptr = torch.zeros(1, dtype=torch.long)

    queue_weight = np.power(np.linspace(.0, 1.0, max_batch), 3)

    queue_weight = np.repeat(queue_weight, args.batch_size)


    best_ema_acc = 0.0
    for i in range(args.max_iter):
        if i % args.test_interval == args.test_interval - 1:
            model.train(False)
            temp_acc = image_classification_test(dset_loaders, model)
            if temp_acc > best_acc:
                best_acc = temp_acc
                best_model = copy.deepcopy(model)
            log_str = "\niter: {:05d}, \t precision: {:.4f},\t best_acc:{:.4f}".format(i, temp_acc, best_acc)
            args.log_file.write(log_str)
            args.log_file.flush()
            print(log_str)

            
            temp_acc = image_classification_test(dset_loaders, teacher)
            if temp_acc > best_ema_acc:
                best_ema_acc = temp_acc
                # best_model = copy.deepcopy(model)
            log_str = "\niter: {:05d}, \t precision: {:.4f},\t best_ema_acc:{:.4f}".format(i, temp_acc, best_ema_acc)
            args.log_file.write(log_str)
            args.log_file.flush()
            print(log_str)
        # if i % args.snapshot_interval == args.snapshot_interval -1:
        #     if not os.path.exists(args.save_dir):
        #         os.mkdir(args.save_dir)
        #     torch.save(best_model,os.path.join(args.save_dir, 'initial_model.pk'))

        model.train(True)
        adv_net.train(True)
        teacher.train(False)
        optimizer = lr_schedule.inv_lr_scheduler(optimizer,i)

        if i % len_train_source == 0:
            iter_source = iter(dset_loaders["source"])
        if i % len_train_target == 0:
            iter_target = iter(dset_loaders["target"])
        inputs_source, labels_source = iter_source.next()
        inputs_target, _, inputs_target_mosaic_w, inputs_target_mosaic_s, labels_target = iter_target.next()
        inputs_source, inputs_target, labels_source = inputs_source.cuda(), inputs_target.cuda(), labels_source.cuda()
        inputs_target_mosaic_w, inputs_target_mosaic_s =  inputs_target_mosaic_w.cuda(), inputs_target_mosaic_s.cuda()
        features_source, outputs_source = model(inputs_source)
        features_target, outputs_target = model(inputs_target)
        features = torch.cat((features_source, features_target), dim=0)
        with torch.no_grad():
            features_target_teacher, outputs_target_teacher = teacher(inputs_target)

        adv_loss = utils.loss_adv(features,adv_net)

        H = torch.mean(utils.Entropy(F.softmax(outputs_target, dim=1)))

        if args.baseline == 'MSTN':
            lam = network.calc_coeff(i)
        elif args.baseline =='DANN':
            lam = 0.0
        prob_max, pseu_labels_target = torch.max(F.softmax(outputs_target, dim=1), dim=1)
        loss_sm, Cs_memory, Ct_memory = utils.SM(features_source, features_target, labels_source, pseu_labels_target,
                                                Cs_memory, Ct_memory)

        # classifier_loss = nn.CrossEntropyLoss()(outputs_source, labels_source)
        classifier_loss = 4*utils.cross_entropy_with_logits(outputs_target / 4.0, F.softmax(outputs_target_teacher / 4.0, dim=1)) + nn.CrossEntropyLoss()(outputs_source, labels_source)
        total_loss = classifier_loss + lam * loss_sm + adv_loss + network.calc_coeff((i-100), high=0.1, max_iter=100)*H

        prob_max, pseu_labels_target = torch.max(F.softmax(outputs_target, dim=1), dim=1)
    
        optimizer.zero_grad()

        total_loss.backward()
        optimizer.step()
        optimizer.zero_grad()

        mosaic_loss_target = torch.zeros(1)

        if i < args.max_iter // 5 * 2:
            alpha = 0.0
        else:
            alpha = 0.5
        with _disable_tracking_bn_stats(model):
            mosaic_features_target_w, mosaic_outputs_target_w = model(inputs_target_mosaic_w)
            mosaic_features_target_s, mosaic_outputs_target_s = model(inputs_target_mosaic_s)
            with torch.no_grad():
                
                features_list_w = [mosaic_features_target_w, F.softmax(mosaic_outputs_target_w, dim=1)]

                features_target_, outputs_target_ = model(inputs_target)
                outputs_target = alpha * outputs_target_ + (1. - alpha) * outputs_target_teacher

                prob_max, pseu_labels_target = torch.max(F.softmax(outputs_target, dim=1), dim=1)
                features_list = [features_target_, F.softmax(outputs_target, dim=1)]
                labels_list = [pseu_labels_target, pseu_labels_target]

                utils.rightshift(queue_weight, args.batch_size)
                for j in range(len(features_list)):
                    queue_data[j][queue_ptr:queue_ptr+args.batch_size, :] = features_list[j]
                    queue_data_w[j][queue_ptr:queue_ptr+args.batch_size, :] = features_list_w[j]
                    queue_labels[j][queue_ptr:queue_ptr+args.batch_size] = labels_list[j]
                pre_ptr = int(queue_ptr)
                ptr = ((i+1) % max_batch) * args.batch_size
                queue_ptr[0] = ptr


            mosaic_loss_target = (nt_cent(queue_data[1].detach(), F.softmax(mosaic_outputs_target_w, dim=1), queue_labels[1], 
                pseu_labels_target.float(), queue_weight, pre_ptr, class_level=False) +
                                    1.*nt_cent(queue_data_w[1].detach(), F.softmax(mosaic_outputs_target_s, dim=1), queue_labels[1], 
                pseu_labels_target.float(), queue_weight, pre_ptr, class_level=False)) * network.calc_coeff(i, high=0.3, max_iter=50)
            mosaic_loss = mosaic_loss_target * 1.0

            # mosaic_loss = utils.cross_entropy_with_logits(mosaic_outputs_target, F.softmax(outputs_target*1.5, dim=1)) * (network.calc_coeff(i, high=0.5, max_iter=2000))
            # mosaic_loss += 0.4*(torch.abs(F.softmax(outputs_target, dim=1).detach() - F.softmax(mosaic_outputs_target, dim=1)).sum(1)).mean(0)

            mosaic_loss.backward()
            optimizer.step()


        if i % print_interval == 0:
            log_str = 'step:{: d},\t,class_loss:{:.4f},\t,adv_loss:{:.4f}\t,mosaic_loss:{:.4f}\t,mean_prob:{:.4f}'.format(i, classifier_loss.item(),
                                                        adv_loss.item(), mosaic_loss_target.item(),prob_max.mean().item())
            print(log_str)
            args.log_file.write('\n'+log_str)
            args.log_file.flush()

        Cs_memory.detach_()
        Ct_memory.detach_()

    return best_acc, best_model
Exemplo n.º 11
0
def train(config):
    ## set pre-process
    prep_dict = {}
    dsets = {}
    dset_loaders = {}
    data_config = config["data"]
    prep_config = config["prep"]
    prep_dict["source"] = prep.image_target(**config["prep"]['params'])
    prep_dict["target"] = prep.image_target(**config["prep"]['params'])
    prep_dict["test"] = prep.image_test(**config["prep"]['params'])

    ## prepare data
    train_bs = data_config["source"]["batch_size"]
    test_bs = data_config["test"]["batch_size"]
    dsets["source"] = ImageList(open(data_config["source"]["list_path"]).readlines(), \
                                transform=prep_dict["source"])
    dset_loaders["source"] = DataLoader(dsets["source"], batch_size=train_bs, \
            shuffle=True, num_workers=4, drop_last=True)
    dsets["target"] = ImageList(open(data_config["target"]["list_path"]).readlines(), \
                                transform=prep_dict["target"])
    dset_loaders["target"] = DataLoader(dsets["target"], batch_size=train_bs, \
            shuffle=True, num_workers=4, drop_last=True)

    dsets["test"] = ImageList(open(data_config["test"]["list_path"]).readlines(), \
                            transform=prep_dict["test"])
    dset_loaders["test"] = DataLoader(dsets["test"], batch_size=test_bs, \
                                shuffle=False, num_workers=4)

    ## set base network
    class_num = config["network"]["params"]["class_num"]
    net_config = config["network"]
    base_network = net_config["name"](**net_config["params"])
    base_network = base_network.cuda()

    ## add additional network for some methods
    ad_net = network.AdversarialNetwork(class_num, 1024)
    ad_net = ad_net.cuda()

    ## set optimizer
    parameter_list = base_network.get_parameters() + ad_net.get_parameters()
    optimizer_config = config["optimizer"]
    optimizer = optimizer_config["type"](parameter_list, \
                    **(optimizer_config["optim_params"]))
    param_lr = []
    for param_group in optimizer.param_groups:
        param_lr.append(param_group["lr"])
    schedule_param = optimizer_config["lr_param"]
    lr_scheduler = lr_schedule.schedule_dict[optimizer_config["lr_type"]]

    #multi gpu
    gpus = config['gpu'].split(',')
    if len(gpus) > 1:
        ad_net = nn.DataParallel(
            ad_net, device_ids=[int(i) for i, k in enumerate(gpus)])
        base_network = nn.DataParallel(
            base_network, device_ids=[int(i) for i, k in enumerate(gpus)])

    ## train
    len_train_source = len(dset_loaders["source"])
    len_train_target = len(dset_loaders["target"])
    transfer_loss_value = classifier_loss_value = total_loss_value = 0.0
    best_acc = 0.0
    for i in range(config["num_iterations"]):
        #test
        if i % config["test_interval"] == config["test_interval"] - 1:
            base_network.train(False)
            temp_acc = image_classification_test(dset_loaders,
                                                 base_network,
                                                 gvbg=config["GVBG"])
            temp_model = nn.Sequential(base_network)
            if temp_acc > best_acc:
                best_acc = temp_acc
                best_model = temp_model
            log_str = "iter: {:05d}, precision: {:.5f}".format(i, temp_acc)
            config["out_file"].write(log_str + "\n")
            config["out_file"].flush()
            print(log_str)
        #save model
        if i % config["snapshot_interval"] == 0:
            torch.save(base_network.state_dict(), osp.join(config["output_path"], \
                "iter_{:05d}_model.pth.tar".format(i)))

        ## train one iter
        base_network.train(True)
        ad_net.train(True)
        loss_params = config["loss"]
        optimizer = lr_scheduler(optimizer, i, **schedule_param)
        optimizer.zero_grad()

        #dataloader
        if i % len_train_source == 0:
            iter_source = iter(dset_loaders["source"])
        if i % len_train_target == 0:
            iter_target = iter(dset_loaders["target"])

        #network
        inputs_source, labels_source = iter_source.next()
        inputs_target, _ = iter_target.next()
        inputs_source, inputs_target, labels_source = inputs_source.cuda(
        ), inputs_target.cuda(), labels_source.cuda()
        features_source, outputs_source, focal_source = base_network(
            inputs_source, gvbg=config["GVBG"])
        features_target, outputs_target, focal_target = base_network(
            inputs_target, gvbg=config["GVBG"])
        features = torch.cat((features_source, features_target), dim=0)
        outputs = torch.cat((outputs_source, outputs_target), dim=0)
        focals = torch.cat((focal_source, focal_target), dim=0)
        softmax_out = nn.Softmax(dim=1)(outputs)

        #loss calculation
        transfer_loss, mean_entropy, gvbg, gvbd = loss.GVB(
            [softmax_out, focals],
            ad_net,
            network.calc_coeff(i),
            GVBD=config['GVBD'])
        classifier_loss = nn.CrossEntropyLoss()(outputs_source, labels_source)
        total_loss = loss_params[
            "trade_off"] * transfer_loss + classifier_loss + config[
                "GVBG"] * gvbg + abs(config['GVBD']) * gvbd

        if i % config["print_num"] == 0:
            log_str = "iter: {:05d}, transferloss: {:.5f}, classifier_loss: {:.5f}, mean entropy:{:.5f}, gvbg:{:.5f}, gvbd:{:.5f}".format(
                i, transfer_loss, classifier_loss, mean_entropy, gvbg, gvbd)
            config["out_file"].write(log_str + "\n")
            config["out_file"].flush()
            #print(log_str)

        total_loss.backward()
        optimizer.step()
    torch.save(best_model, osp.join(config["output_path"],
                                    "best_model.pth.tar"))
    return best_acc
Exemplo n.º 12
0
def train(config):
    ## set pre-process
    prep_dict = {}
    prep_config = config["prep"]
    prep_dict["source"] = prep.image_train(**config["prep"]['params'])
    prep_dict["target"] = prep.image_train(**config["prep"]['params'])
    if prep_config["test_10crop"]:
        prep_dict["test"] = prep.image_test_10crop(**config["prep"]['params'])
    else:
        prep_dict["test"] = prep.image_test(**config["prep"]['params'])

    ## prepare data
    dsets = {}
    dset_loaders = {}
    data_config = config["data"]
    train_bs = data_config["source"]["batch_size"]
    test_bs = data_config["test"]["batch_size"]
    dsets["source"] = ImageList(open(data_config["source"]["list_path"]).readlines(), \
                                transform=prep_dict["source"])
    dset_loaders["source"] = DataLoader(dsets["source"], batch_size=train_bs, \
                                        shuffle=True, num_workers=0, drop_last=True)
    dsets["target"] = ImageList(open(data_config["target"]["list_path"]).readlines(), \
                                transform=prep_dict["target"])
    dset_loaders["target"] = DataLoader(dsets["target"], batch_size=train_bs, \
                                        shuffle=True, num_workers=0, drop_last=True)

    if prep_config["test_10crop"]:
        for i in range(10):
            dsets["test"] = [ImageList(open(data_config["test"]["list_path"]).readlines(), \
                                       transform=prep_dict["test"][i]) for i in range(10)]
            dset_loaders["test"] = [DataLoader(dset, batch_size=test_bs, \
                                               shuffle=False, num_workers=0) for dset in dsets['test']]
    else:
        dsets["test"] = ImageList(open(data_config["test"]["list_path"]).readlines(), \
                                  transform=prep_dict["test"])
        dset_loaders["test"] = DataLoader(dsets["test"], batch_size=test_bs, \
                                          shuffle=False, num_workers=0)

    class_num = config["network"]["params"]["class_num"]

    ## set base network
    net_config = config["network"]
    base_network = net_config["name"](**net_config["params"])
    base_network = base_network.cuda()

    ## 添加判别器D_s,D_t,生成器G_s2t,G_t2s

    z_dimension = 256
    D_s = network.models["Discriminator"]()
    D_s = D_s.cuda()
    G_s2t = network.models["Generator"](z_dimension, 1024)
    G_s2t = G_s2t.cuda()

    D_t = network.models["Discriminator"]()
    D_t = D_t.cuda()
    G_t2s = network.models["Generator"](z_dimension, 1024)
    G_t2s = G_t2s.cuda()

    criterion_GAN = torch.nn.MSELoss()
    criterion_cycle = torch.nn.L1Loss()
    criterion_identity = torch.nn.L1Loss()
    criterion_Sem = torch.nn.L1Loss()

    optimizer_G = torch.optim.Adam(itertools.chain(G_s2t.parameters(),
                                                   G_t2s.parameters()),
                                   lr=0.0003)
    optimizer_D_s = torch.optim.Adam(D_s.parameters(), lr=0.0003)
    optimizer_D_t = torch.optim.Adam(D_t.parameters(), lr=0.0003)

    fake_S_buffer = ReplayBuffer()
    fake_T_buffer = ReplayBuffer()

    classifier_optimizer = torch.optim.Adam(base_network.parameters(),
                                            lr=0.0003)
    ## 添加分类器
    classifier1 = net.Net(256, class_num)
    classifier1 = classifier1.cuda()
    classifier1_optim = optim.Adam(classifier1.parameters(), lr=0.0003)

    ## add additional network for some methods
    if config["loss"]["random"]:
        random_layer = network.RandomLayer(
            [base_network.output_num(), class_num],
            config["loss"]["random_dim"])
        ad_net = network.AdversarialNetwork(config["loss"]["random_dim"], 1024)
    else:
        random_layer = None
        ad_net = network.AdversarialNetwork(
            base_network.output_num() * class_num, 1024)
    if config["loss"]["random"]:
        random_layer.cuda()
    ad_net = ad_net.cuda()
    parameter_list = base_network.get_parameters() + ad_net.get_parameters()

    ## set optimizer
    optimizer_config = config["optimizer"]
    optimizer = optimizer_config["type"](parameter_list, \
                                         **(optimizer_config["optim_params"]))
    param_lr = []
    for param_group in optimizer.param_groups:
        param_lr.append(param_group["lr"])
    schedule_param = optimizer_config["lr_param"]
    lr_scheduler = lr_schedule.schedule_dict[optimizer_config["lr_type"]]

    gpus = config['gpu'].split(',')
    if len(gpus) > 1:
        ad_net = nn.DataParallel(ad_net, device_ids=[int(i) for i in gpus])
        base_network = nn.DataParallel(base_network,
                                       device_ids=[int(i) for i in gpus])

    ## train
    len_train_source = len(dset_loaders["source"])
    len_train_target = len(dset_loaders["target"])
    transfer_loss_value = classifier_loss_value = total_loss_value = 0.0
    best_acc = 0.0
    for i in range(config["num_iterations"]):
        if i % config["test_interval"] == config["test_interval"] - 1:
            base_network.train(False)
            temp_acc = image_classification_test(dset_loaders, \
                                                 base_network, test_10crop=prep_config["test_10crop"])
            temp_model = nn.Sequential(base_network)
            if temp_acc > best_acc:
                best_acc = temp_acc
                best_model = temp_model

                now = datetime.datetime.now()
                d = str(now.month) + '-' + str(now.day) + ' ' + str(
                    now.hour) + ':' + str(now.minute) + ":" + str(now.second)
                torch.save(
                    best_model,
                    osp.join(
                        config["output_path"],
                        "{}_to_{}_best_model_acc-{}_{}.pth.tar".format(
                            args.source, args.target, best_acc, d)))
            log_str = "iter: {:05d}, precision: {:.5f}".format(i, temp_acc)
            config["out_file"].write(log_str + "\n")
            config["out_file"].flush()

            print(log_str)
        if i % config["snapshot_interval"] == 0:
            torch.save(nn.Sequential(base_network), osp.join(config["output_path"], \
                                                             "{}_to_{}_iter_{:05d}_model_{}.pth.tar".format(args.source,
                                                                                                            args.target,
                                                                                                            i, str(
                                                                     datetime.datetime.utcnow()))))

        loss_params = config["loss"]
        ## train one iter
        classifier1.train(True)
        base_network.train(True)
        ad_net.train(True)
        optimizer = lr_scheduler(optimizer, i, **schedule_param)
        optimizer.zero_grad()

        if i % len_train_source == 0:
            iter_source = iter(dset_loaders["source"])
        if i % len_train_target == 0:
            iter_target = iter(dset_loaders["target"])
        inputs_source, labels_source = iter_source.next()
        inputs_target, labels_target = iter_target.next()
        inputs_source, inputs_target, labels_source = inputs_source.cuda(
        ), inputs_target.cuda(), labels_source.cuda()

        # 提取特征
        features_source, outputs_source = base_network(inputs_source)
        features_target, outputs_target = base_network(inputs_target)
        features = torch.cat((features_source, features_target), dim=0)
        outputs = torch.cat((outputs_source, outputs_target), dim=0)
        softmax_out = nn.Softmax(dim=1)(outputs)

        outputs_source1 = classifier1(features_source.detach())
        outputs_target1 = classifier1(features_target.detach())
        outputs1 = torch.cat((outputs_source1, outputs_target1), dim=0)
        softmax_out1 = nn.Softmax(dim=1)(outputs1)

        softmax_out = (1 - args.cla_plus_weight
                       ) * softmax_out + args.cla_plus_weight * softmax_out1

        if config['method'] == 'CDAN+E':
            entropy = loss.Entropy(softmax_out)
            transfer_loss = loss.CDAN([features, softmax_out], ad_net, entropy,
                                      network.calc_coeff(i), random_layer)
        elif config['method'] == 'CDAN':
            transfer_loss = loss.CDAN([features, softmax_out], ad_net, None,
                                      None, random_layer)
        elif config['method'] == 'DANN':
            transfer_loss = loss.DANN(features, ad_net)
        else:
            raise ValueError('Method cannot be recognized.')
        classifier_loss = nn.CrossEntropyLoss()(outputs_source, labels_source)

        # Cycle
        num_feature = features_source.size(0)
        # =================train discriminator T
        real_label = Variable(torch.ones(num_feature)).cuda()
        fake_label = Variable(torch.zeros(num_feature)).cuda()

        # 训练生成器
        optimizer_G.zero_grad()

        # Identity loss
        same_t = G_s2t(features_target.detach())
        loss_identity_t = criterion_identity(same_t, features_target)

        same_s = G_t2s(features_source.detach())
        loss_identity_s = criterion_identity(same_s, features_source)

        # Gan loss
        fake_t = G_s2t(features_source.detach())
        pred_fake = D_t(fake_t)
        loss_G_s2t = criterion_GAN(pred_fake, labels_source.float())

        fake_s = G_t2s(features_target.detach())
        pred_fake = D_s(fake_s)
        loss_G_t2s = criterion_GAN(pred_fake, labels_source.float())

        # cycle loss
        recovered_s = G_t2s(fake_t)
        loss_cycle_sts = criterion_cycle(recovered_s, features_source)

        recovered_t = G_s2t(fake_s)
        loss_cycle_tst = criterion_cycle(recovered_t, features_target)

        # sem loss
        pred_recovered_s = base_network.fc(recovered_s)
        pred_fake_t = base_network.fc(fake_t)
        loss_sem_t2s = criterion_Sem(pred_recovered_s, pred_fake_t)

        pred_recovered_t = base_network.fc(recovered_t)
        pred_fake_s = base_network.fc(fake_s)
        loss_sem_s2t = criterion_Sem(pred_recovered_t, pred_fake_s)

        loss_cycle = loss_cycle_tst + loss_cycle_sts
        weights = args.weight_in_lossG.split(',')
        loss_G = float(weights[0]) * (loss_identity_s + loss_identity_t) + \
                 float(weights[1]) * (loss_G_s2t + loss_G_t2s) + \
                 float(weights[2]) * loss_cycle + \
                 float(weights[3]) * (loss_sem_s2t + loss_sem_t2s)

        # 训练softmax分类器
        outputs_fake = classifier1(fake_t.detach())
        # 分类器优化
        classifier_loss1 = nn.CrossEntropyLoss()(outputs_fake, labels_source)
        classifier1_optim.zero_grad()
        classifier_loss1.backward()
        classifier1_optim.step()

        total_loss = loss_params[
            "trade_off"] * transfer_loss + classifier_loss + args.cyc_loss_weight * loss_G
        total_loss.backward()
        optimizer.step()
        optimizer_G.step()

        ###### Discriminator S ######
        optimizer_D_s.zero_grad()

        # Real loss
        pred_real = D_s(features_source.detach())
        loss_D_real = criterion_GAN(pred_real, real_label)

        # Fake loss
        fake_s = fake_S_buffer.push_and_pop(fake_s)
        pred_fake = D_s(fake_s.detach())
        loss_D_fake = criterion_GAN(pred_fake, fake_label)

        # Total loss
        loss_D_s = loss_D_real + loss_D_fake
        loss_D_s.backward()

        optimizer_D_s.step()
        ###################################

        ###### Discriminator t ######
        optimizer_D_t.zero_grad()

        # Real loss
        pred_real = D_t(features_target.detach())
        loss_D_real = criterion_GAN(pred_real, real_label)

        # Fake loss
        fake_t = fake_T_buffer.push_and_pop(fake_t)
        pred_fake = D_t(fake_t.detach())
        loss_D_fake = criterion_GAN(pred_fake, fake_label)

        # Total loss
        loss_D_t = loss_D_real + loss_D_fake
        loss_D_t.backward()
        optimizer_D_t.step()
    now = datetime.datetime.now()
    d = str(now.month) + '-' + str(now.day) + ' ' + str(now.hour) + ':' + str(
        now.minute) + ":" + str(now.second)
    torch.save(
        best_model,
        osp.join(
            config["output_path"],
            "{}_to_{}_best_model_acc-{}_{}.pth.tar".format(
                args.source, args.target, best_acc, d)))
    return best_acc