Example #1
0
def train_src(model, params, data_loader, device):
    """Train classifier for source domain."""
    ####################
    # 1. setup network #
    ####################

    # set train state for Dropout and BN layers
    model.train()

    # setup criterion and optimizer
    optimizer = optim.Adam(model.parameters(), lr=params.lr)
    loss_class = nn.NLLLoss()

    ####################
    # 2. train network #
    ####################

    for epoch in range(params.num_epochs_src):
        for step, (images, labels) in enumerate(data_loader):
            # make images and labels variable
            images = images.to(device)
            labels = labels.squeeze_().to(device)

            # zero gradients for optimizer
            optimizer.zero_grad()

            # compute loss for critic
            preds = model(images)
            loss = loss_class(preds, labels)

            # optimize source classifier
            loss.backward()
            optimizer.step()

            # print step info
            if ((step + 1) % params.log_step_src == 0):
                print("Epoch [{}/{}] Step [{}/{}]: loss={}".format(epoch + 1, params.num_epochs_src, step + 1,
                                                                   len(data_loader), loss.data[0]))

        # eval model on test set
        if ((epoch + 1) % params.eval_step_src == 0):
            eval(model, data_loader, flag='source')
            model.train()

        # save model parameters
        if ((epoch + 1) % params.save_step_src == 0):
            save_model(model, params.src_dataset + "-source-classifier-{}.pt".format(epoch + 1))

    # save final model
    save_model(model, params.src_dataset + "-source-classifier-final.pt")

    return model
Example #2
0
def train_src(model, src_data_loader, tgt_data_loader_eval, device, params):
    """Train classifier for source domain."""
    ####################
    # 1. setup network #
    ####################

    # setup criterion and optimizer

    parameter_list = [
        {
            "params": get_parameters(model.features, 'weight'),
            "lr": 0.001
        },
        {
            "params": get_parameters(model.features, 'bias'),
            "lr": 0.002
        },
        {
            "params": get_parameters(model.fc, 'weight'),
            "lr": 0.01
        },
        {
            "params": get_parameters(model.fc, 'bias'),
            "lr": 0.02
        },
    ]
    optimizer = optim.SGD(parameter_list, momentum=0.9)
    criterion = nn.CrossEntropyLoss()

    ####################
    # 2. train network #
    ####################
    global_step = 0
    for epoch in range(params.num_epochs):
        for step, (images, labels) in enumerate(src_data_loader):
            model.train()
            global_step += 1
            adjust_learning_rate(optimizer, global_step)

            # make images and labels variable
            images = images.to(device)
            labels = labels.to(device)

            # zero gradients for optimizer
            optimizer.zero_grad()

            # compute loss for critic
            preds = model(images)
            loss = criterion(preds, labels)

            # optimize source classifier
            loss.backward()
            optimizer.step()

            # print step info
            if (global_step % params.log_step == 0):
                print("Epoch [{:4d}] Step [{:4d}]: loss={:.5f}".format(
                    epoch + 1, global_step, loss.data.item()))

            # eval model on test set
            if (global_step % params.eval_step == 0):
                eval(model, src_data_loader, device)
                eval(model, tgt_data_loader_eval, device)

            # save model parameters
            if (global_step % params.save_step == 0):
                save_model(
                    model, params.src_dataset +
                    "-source-classifier-{}.pt".format(global_step), params)

        # end
        if (global_step > params.max_step):
            break

    # save final model
    save_model(model, params.src_dataset + "-source-classifier-final.pt",
               params)

    return model
Example #3
0
    src_data_loader_eval = get_data_loader(params.src_dataset,
                                           dataset_root=params.dataset_root,
                                           batch_size=params.batch_size_eval,
                                           train=False)
    tgt_data_loader = get_data_loader(params.tgt_dataset,
                                      dataset_root=params.dataset_root,
                                      batch_size=params.batch_size,
                                      train=True)
    tgt_data_loader_eval = get_data_loader(params.tgt_dataset,
                                           dataset_root=params.dataset_root,
                                           batch_size=params.batch_size_eval,
                                           train=False)

    # load models
    #model = AlexModel_LRN().to(device)
    model = AlexModel().to(device)

    # training model
    print("training model")
    if not (model.restored and params.model_trained):
        model = train_src(model, src_data_loader, src_data_loader_eval,
                          tgt_data_loader, tgt_data_loader_eval, device,
                          params)

    # eval trained model
    print("eval trained model")
    eval(model, tgt_data_loader, device)

    # end
    print("done")
Example #4
0

params = Config()

# init random seed
init_random_seed(params.manual_seed)

# load dataset
src_data_loader = get_data_loader(params.src_dataset, params.dataset_root,
                                  params.batch_size)
tgt_data_loader = get_data_loader(params.tgt_dataset, params.dataset_root,
                                  params.batch_size)

# load dann model
dann = init_model(net=AlexModel(), restore=None)

# train dann model
print("Start training dann model.")

if not (dann.restored and params.dann_restore):
    dann = train_dann(dann, params, src_data_loader, tgt_data_loader,
                      tgt_data_loader)

# eval dann model
print("Evaluating dann for source domain")
eval(dann, src_data_loader)
print("Evaluating dann for target domain")
eval(dann, tgt_data_loader)

print('done')
Example #5
0
    init_random_seed(params.manual_seed)

    # init device
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

    # load dataset
    src_data_loader = get_data_loader(
        params.src_dataset, dataset_root=params.dataset_root, batch_size=params.batch_size, train=True)
    src_data_loader_eval = get_data_loader(
        params.src_dataset, dataset_root=params.dataset_root, batch_size=params.batch_size_eval, train=False)
    tgt_data_loader = get_data_loader(
        params.tgt_dataset, dataset_root=params.dataset_root, batch_size=params.batch_size, train=True)
    tgt_data_loader_eval = get_data_loader(
        params.tgt_dataset, dataset_root=params.dataset_root, batch_size=params.batch_size_eval, train=False)

    # load models
    model = ResModel().to(device)

    # training model
    print("training model")
    if not (model.restored and params.model_trained):
        model = train_src(model, src_data_loader, src_data_loader_eval,
                          tgt_data_loader, tgt_data_loader_eval, device, params)

    # eval trained model
    print("eval trained model")
    eval(model, tgt_data_loader_eval, device)

    # end
    print("done")
Example #6
0
def train_dann(model, params, src_data_loader, tgt_data_loader,
               tgt_data_loader_eval, device):
    """Train dann."""
    ####################
    # 1. setup network #
    ####################

    # setup criterion and optimizer

    if params.src_dataset == 'mnist' or params.tgt_dataset == 'mnist':
        print("training mnist task")
        optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.9)
    else:
        print("training office task")
        parameter_list = [{
            "params": model.features.parameters(),
            "lr": 0.001
        }, {
            "params": model.fc.parameters(),
            "lr": 0.001
        }, {
            "params": model.bottleneck.parameters()
        }, {
            "params": model.classifier.parameters()
        }, {
            "params": model.discriminator.parameters()
        }]
        optimizer = optim.SGD(parameter_list, lr=0.01, momentum=0.9)

    criterion = nn.CrossEntropyLoss()

    ####################
    # 2. train network #
    ####################

    for epoch in range(params.num_epochs):
        # set train state for Dropout and BN layers
        model.train()
        # zip source and target data pair
        len_dataloader = min(len(src_data_loader), len(tgt_data_loader))
        data_zip = enumerate(zip(src_data_loader, tgt_data_loader))
        for step, ((images_src, class_src), (images_tgt, _)) in data_zip:

            p = float(step + epoch * len_dataloader) / \
                params.num_epochs / len_dataloader
            alpha = 2. / (1. + np.exp(-10 * p)) - 1

            if params.src_dataset == 'mnist' or params.tgt_dataset == 'mnist':
                adjust_learning_rate(optimizer, p)
            else:
                adjust_learning_rate_office(optimizer, p)

            # prepare domain label
            size_src = len(images_src)
            size_tgt = len(images_tgt)
            label_src = torch.zeros(size_src).long().to(device)  # source 0
            label_tgt = torch.ones(size_tgt).long().to(device)  # target 1

            # make images variable
            class_src = class_src.to(device)
            images_src = images_src.to(device)
            images_tgt = images_tgt.to(device)

            # zero gradients for optimizer
            optimizer.zero_grad()

            # train on source domain
            src_class_output, src_domain_output = model(input_data=images_src,
                                                        alpha=alpha)
            src_loss_class = criterion(src_class_output, class_src)
            src_loss_domain = criterion(src_domain_output, label_src)

            # train on target domain
            _, tgt_domain_output = model(input_data=images_tgt, alpha=alpha)
            tgt_loss_domain = criterion(tgt_domain_output, label_tgt)

            loss = src_loss_class + src_loss_domain + tgt_loss_domain

            # optimize dann
            loss.backward()
            optimizer.step()

            # print step info
            if ((step + 1) % params.log_step == 0):
                print(
                    "Epoch [{:4d}/{}] Step [{:2d}/{}]: src_loss_class={:.6f}, src_loss_domain={:.6f}, tgt_loss_domain={:.6f}, loss={:.6f}"
                    .format(epoch + 1, params.num_epochs, step + 1,
                            len_dataloader, src_loss_class.data.item(),
                            src_loss_domain.data.item(),
                            tgt_loss_domain.data.item(), loss.data.item()))

        # eval model
        if ((epoch + 1) % params.eval_step == 0):
            print("eval on target domain")
            eval(model, tgt_data_loader, device, flag='target')
            print("eval on source domain")
            eval(model, src_data_loader, device, flag='source')

        # save model parameters
        if ((epoch + 1) % params.save_step == 0):
            save_model(
                model, params.model_root, params.src_dataset + '-' +
                params.tgt_dataset + "-dann-{}.pt".format(epoch + 1))

    # save final model
    save_model(
        model, params.model_root,
        params.src_dataset + '-' + params.tgt_dataset + "-dann-final.pt")

    return model
Example #7
0
def train_src(model, src_data_loader, src_data_loader_eval, tgt_data_loader,
              tgt_data_loader_eval, device, params):
    """Train classifier for source domain."""
    ####################
    # 1. setup network #
    ####################

    # set train state for Dropout and BN layers
    model.train()

    # setup criterion and optimizer
    parameter_list = [
        {
            "params": model.features.parameters(),
            "lr": 0.001
        },
        {
            "params": model.fc.parameters(),
            "lr": 0.01
        },
    ]
    optimizer = optim.SGD(parameter_list, momentum=0.9)
    criterion = nn.CrossEntropyLoss()

    ####################
    # 2. train network #
    ####################

    for epoch in range(params.num_epochs):
        model.train()
        len_dataloader = len(src_data_loader)
        for step, (images, labels) in enumerate(src_data_loader):
            p = float(step + epoch * len_dataloader) / \
                params.num_epochs / len_dataloader
            adjust_learning_rate(optimizer, p)

            # make images and labels variable
            images = images.to(device)
            labels = labels.to(device)

            # zero gradients for optimizer
            optimizer.zero_grad()

            # compute loss for critic
            preds = model(images)
            loss = criterion(preds, labels)

            # optimize source classifier
            loss.backward()
            optimizer.step()

            # print step info
            if ((step + 1) % params.log_step == 0):
                print("Epoch [{:4d}/{:4d}] Step [{:2d}/{:2d}]: loss={:.5f}".
                      format(epoch + 1, params.num_epochs, step + 1,
                             len(src_data_loader), loss.data.item()))

        # eval model on test set
        if ((epoch + 1) % params.eval_step == 0):
            eval(model, src_data_loader_eval, device)
            eval(model, tgt_data_loader_eval, device)

        # save model parameters
        if ((epoch + 1) % params.save_step == 0):
            save_model(
                model, params.src_dataset +
                "-source-classifier-{}.pt".format(epoch + 1), params)

    # save final model
    save_model(model, params.src_dataset + "-source-classifier-final.pt",
               params)

    return model
Example #8
0
def train_tgt(src_encoder, src_classifier, tgt_encoder, critic,
              src_data_loader, tgt_data_loader, params):
    """Train encoder for target domain."""
    ####################
    # 1. setup network #
    ####################

    # setup criterion and optimizer
    criterion = nn.CrossEntropyLoss()
    optimizer_tgt = optim.Adam(tgt_encoder.parameters(),
                               lr=params.tgt_learning_rate,
                               betas=(params.beta1, params.beta2),
                               weight_decay=2.5e-5)
    optimizer_critic = optim.Adam(critic.parameters(),
                                  lr=params.critic_learning_rate,
                                  betas=(params.beta1, params.beta2),
                                  weight_decay=2.5e-5)
    len_data_loader = min(len(src_data_loader), len(tgt_data_loader))

    ####################
    # 2. train network #
    ####################

    for epoch in range(params.num_epochs):

        # set train state for Dropout and BN layers
        tgt_encoder.train()
        critic.train()

        # zip source and target data pair
        data_zip = enumerate(zip(src_data_loader, tgt_data_loader))
        for step, ((images_src, _), (images_tgt, _)) in data_zip:
            ###########################
            # 2.1 train discriminator #
            ###########################

            # make images variable
            images_src = make_variable(images_src)
            images_tgt = make_variable(images_tgt)

            # zero gradients for optimizer
            optimizer_critic.zero_grad()

            # extract and concat features
            feat_src = src_encoder(images_src)
            feat_tgt = tgt_encoder(images_tgt)
            feat_concat = torch.cat((feat_src, feat_tgt), 0)

            # predict on discriminator
            pred_src = critic(feat_src.detach())
            pred_tgt = critic(feat_tgt.detach())

            # prepare real and fake label
            label_src = make_variable(torch.ones(feat_src.size(0)).long())
            label_tgt = make_variable(torch.zeros(feat_tgt.size(0)).long())

            # compute loss for critic
            loss_critic = criterion(pred_src, label_src) + criterion(
                pred_tgt, label_tgt)
            loss_critic.backward()

            # optimize critic
            optimizer_critic.step()

            pred_cls_src = torch.squeeze(pred_src.max(1)[1])
            pred_cls_tgt = torch.squeeze(pred_tgt.max(1)[1])
            acc = ((pred_cls_src == label_src).float().mean() +
                   (pred_cls_tgt == label_tgt).float().mean()) / 2

            ############################
            # 2.2 train target encoder #
            ############################

            # zero gradients for optimizer
            optimizer_critic.zero_grad()
            optimizer_tgt.zero_grad()

            # extract and target features
            feat_tgt = tgt_encoder(images_tgt)

            # predict on discriminator
            pred_tgt = critic(feat_tgt)

            # prepare fake labels
            label_tgt = make_variable(torch.ones(feat_tgt.size(0)).long())

            # compute loss for target encoder
            loss_tgt = criterion(pred_tgt, label_tgt)
            loss_tgt.backward()

            # optimize target encoder
            optimizer_tgt.step()

            #######################
            # 2.3 print step info #
            #######################
            if ((step + 1) % params.log_step == 0):
                print("Epoch [{:3d}/{:3d}] Step [{:3d}/{:3d}]:"
                      "critic_loss={:.5f} tgt_loss={:.5f} acc={:.5f}".format(
                          epoch + 1, params.num_epochs, step + 1,
                          len_data_loader, loss_critic.data[0],
                          loss_tgt.data[0], acc.data[0]))

        #############################
        # 2.4 eval training model #
        #############################
        if ((epoch + 1) % params.eval_step == 0):
            print("eval model on source data")
            eval(tgt_encoder, src_classifier, src_data_loader)
            print("eval model on target data")
            eval(tgt_encoder, src_classifier, tgt_data_loader)

        #############################
        # 2.5 save model parameters #
        #############################
        if ((epoch + 1) % params.save_step == 0):
            torch.save(
                critic.state_dict(),
                os.path.join(
                    params.model_root,
                    "{}-{}-critic-{}.pt".format(params.src_dataset,
                                                params.tgt_dataset,
                                                epoch + 1)))
            torch.save(
                tgt_encoder.state_dict(),
                os.path.join(
                    params.model_root, "{}-{}-target-encoder-{}.pt".format(
                        params.src_dataset, params.tgt_dataset, epoch + 1)))

    torch.save(
        critic.state_dict(),
        os.path.join(
            params.model_root,
            "{}-{}-critic-final.pt".format(params.src_dataset,
                                           params.tgt_dataset)))
    torch.save(
        tgt_encoder.state_dict(),
        os.path.join(
            params.model_root,
            "{}-{}-target-encoder-final.pt".format(params.src_dataset,
                                                   params.tgt_dataset)))
    return tgt_encoder