Ejemplo n.º 1
0
    def __init__(self, opt, nclasses, ndomains, mean, std, source_trainloader, source_valloader, targetloader):

        self.source_trainloader = source_trainloader
        self.source_valloader = source_valloader
        self.targetloader = targetloader
        self.opt = opt
        self.mean = mean
        self.std = std
        self.best_val = 0
        self.best_test = 0
        self.nclasses = nclasses
        self.ndomains = ndomains
        
        # Defining networks and optimizers
        self.netF1 = models._netF(opt)
        self.netF2 = models._netF(opt)
        self.netC1 = models._netC(opt, nclasses)
        self.netC2 = models._netC(opt, ndomains)
        self.netC3 = models._netC(opt, ndomains)
        self.netG = models._netG(opt, (opt.ndf*2)*2)
        self.netD = models._netD(opt, nclasses, ndomains)

        # Weight initialization
        self.netF1.apply(utils.weights_init)
        self.netF2.apply(utils.weights_init)
        self.netC1.apply(utils.weights_init)
        self.netC2.apply(utils.weights_init)
        self.netC3.apply(utils.weights_init)
        self.netG.apply(utils.weights_init)
        self.netD.apply(utils.weights_init)

        # Defining loss criterions
        self.criterion_c = nn.CrossEntropyLoss()
        self.criterion_s = nn.BCELoss()

        if opt.gpu>=0:
            self.netF1.cuda()
            self.netF2.cuda()
            self.netC1.cuda()
            self.netC2.cuda()
            self.netC3.cuda()
            self.netG.cuda()
            self.netD.cuda()
            self.criterion_c.cuda()
            self.criterion_s.cuda()

        # Defining optimizers
        self.optimizerF1 = optim.Adam(self.netF1.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
        self.optimizerF2 = optim.Adam(self.netF2.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
        self.optimizerC1 = optim.Adam(self.netC1.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
        self.optimizerC2 = optim.Adam(self.netC2.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
        self.optimizerC3 = optim.Adam(self.netC3.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
        self.optimizerG = optim.Adam(self.netG.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
        self.optimizerD = optim.Adam(self.netD.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))

        # Other variables
        self.real_label_val = 1
        self.fake_label_val = 0
Ejemplo n.º 2
0
    def __init__(self, opt, nclasses, source_trainloader, source_valloader, targetloader):

        self.source_trainloader = source_trainloader
        self.source_valloader = source_valloader
        self.targetloader = targetloader
        self.opt = opt
        self.best_val = 0
        self.best_test = 0
        
        # Defining networks and optimizers
        self.nclasses = nclasses
        self.netF = models._netF(opt)
        self.netC = models._netC(opt, nclasses)

        # Weight initialization
        self.netF.apply(utils.weights_init)
        self.netC.apply(utils.weights_init)

        # Defining loss criterions
        self.criterion = nn.CrossEntropyLoss()

        if opt.gpu>=0:
            self.netF.cuda()
            self.netC.cuda()
            self.criterion.cuda()

        # Defining optimizers
        self.optimizerF = optim.Adam(self.netF.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
        self.optimizerC = optim.Adam(self.netC.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
Ejemplo n.º 3
0
    def __init__(self, opt, nclasses, source_trainloader, source_valloader):

        self.source_trainloader = source_trainloader
        self.source_valloader = source_valloader
        self.opt = opt
        self.best_val = 0

        # Defining networks and optimizers
        self.nclasses = nclasses
        self.netF = models._netF(opt)
        self.netC = models._netC(opt, nclasses)

        # Weight initialization
        self.netF.apply(utils.weights_init)
        self.netC.apply(utils.weights_init)

        # Defining loss criterions
        self.criterion = nn.CrossEntropyLoss()

        if opt.gpu >= 0:
            self.netF.cuda()
            self.netC.cuda()
            self.criterion.cuda()

        # Defining optimizers
        self.optimizerF = optim.Adam(self.netF.parameters(),
                                     lr=opt.lr,
                                     betas=(opt.beta1, 0.999))
        self.optimizerC = optim.Adam(self.netC.parameters(),
                                     lr=opt.lr,
                                     betas=(opt.beta1, 0.999))
Ejemplo n.º 4
0
def get_label_list(target_list, predict_network_path, feature_network_path,
                   class_num, resize_size, crop_size, batch_size, use_gpu,
                   opt):
    # done with debugging, works fine
    """
    Return the target list with pesudolabel
    :param target_list: list conatinging all target file path and a wrong label
    :param predict_network: network to perdict label for target image
    :param resize_size:
    :param crop_size:
    :param batch_size:
    :return:
    """
    label_list = []
    # net_config = predict_network_name
    # predict_network = net_config["name"](**net_config["params"])
    # if use_gpu:
    #     predict_network = predict_network.cuda()
    netF = models._netF(opt)
    netC = models._netC(opt, class_num)
    netF.load_state_dict(torch.load(feature_network_path))
    netC.load_state_dict(torch.load(predict_network_path))
    if use_gpu:
        netF.cuda()
        netC.cuda()

    mean = np.array([0.44, 0.44, 0.44])
    std = np.array([0.19, 0.19, 0.19])
    transform_target = transforms.Compose([
        transforms.Resize(resize_size),
        transforms.ToTensor(),
        transforms.Normalize(mean, std)
    ])

    dsets_tar = ImageList(target_list, transform=transform_target)
    dset_loaders_tar = util_data.DataLoader(dsets_tar,
                                            batch_size=batch_size,
                                            shuffle=True,
                                            num_workers=4)
    len_train_target = len(dset_loaders_tar)
    iter_target = iter(dset_loaders_tar)
    count = 0
    for i in range(len_train_target):
        input_tar, label_tar = iter_target.next()
        if use_gpu:
            input_tar, label_tar = Variable(input_tar).cuda(), Variable(
                label_tar).cuda()
        else:
            input_tar, label_tar = Variable(input_tar), Variable(label_tar)
        predict_score = netC(netF(input_tar))
        # _, predict_score = predict_network(input_tar)
        _, predict_label = torch.max(predict_score, 1)
        for num in range(len(predict_label.cpu())):
            label_list.append(target_list[count][:-2])
            label_list[count] = label_list[count] + str(
                predict_label[num].cpu().numpy()) + "\n"
            count += 1
    return label_list
Ejemplo n.º 5
0
    def __init__(self, opt, nclasses, mean, std, source_trainloader,
                 source_valloader, target_trainloader, target_valloader):

        self.source_trainloader = source_trainloader
        self.source_valloader = source_valloader
        self.target_trainloader = target_trainloader
        self.target_valloader = target_valloader
        self.opt = opt
        self.mean = mean
        self.std = std
        self.best_val = 0

        # Defining networks and optimizers
        self.nclasses = nclasses
        self.netG = models._netG(opt, nclasses)
        self.netD = models._netD(opt, nclasses)
        self.netF = models._netF(opt)
        self.netC = models._netC(opt, nclasses)

        # Weight initialization
        self.netG.apply(utils.weights_init)
        self.netD.apply(utils.weights_init)
        self.netF.apply(utils.weights_init)
        self.netC.apply(utils.weights_init)

        # Defining loss criterions
        self.criterion_c = nn.CrossEntropyLoss()
        self.criterion_s = nn.BCELoss()

        if opt.gpu >= 0:
            self.netD.cuda()
            self.netG.cuda()
            self.netF.cuda()
            self.netC.cuda()
            self.criterion_c.cuda()
            self.criterion_s.cuda()

        # Defining optimizers
        self.optimizerD = optim.Adam(self.netD.parameters(),
                                     lr=opt.lr,
                                     betas=(0.8, 0.999))
        self.optimizerG = optim.Adam(self.netG.parameters(),
                                     lr=opt.lr,
                                     betas=(0.8, 0.999))
        self.optimizerF = optim.Adam(self.netF.parameters(),
                                     lr=opt.lr,
                                     betas=(0.8, 0.999))
        self.optimizerC = optim.Adam(self.netC.parameters(),
                                     lr=opt.lr,
                                     betas=(0.8, 0.999))

        # Other variables
        self.real_label_val = 1
        self.fake_label_val = 0
Ejemplo n.º 6
0
    def __init__(self, opt, nclasses, source_trainloader, source_valloader):

        self.source_trainloader = source_trainloader
        self.source_valloader = source_valloader
        self.opt = opt
        self.best_val = 0
        
        # Defining networks and optimizers
        self.nclasses = nclasses
        self.netF = models._netF(opt)
        self.netC = models._netC(opt, nclasses)

        if torch.cuda.device_count() > 1:
            print("Let's use", torch.cuda.device_count(), "GPUs!")
            # dim = 0 [30, xxx] -> [10, ...], [10, ...], [10, ...] on 3 GPUs
            self.netF = nn.DataParallel(self.netF)
            self.netC = nn.DataParallel(self.netC)


        #print(self.netF)
        #print(self.netC)
        # for i, weights in enumerate(list(self.netF.parameters())):
        #     print('i:',i,'weights:',weights.size())
        # for i, weights in enumerate(list(self.netC.parameters())):
        #     print('i:',i,'weights:',weights.size())



        # Weight initialization
        self.netF.apply(utils.weights_init)
        self.netC.apply(utils.weights_init)


        if opt.loadExisting != 0: 
            netF_path = os.path.join(opt.checkpoint_dir, 'model_best_netF_sourceonly.pth')
            netC_path = os.path.join(opt.checkpoint_dir, 'model_best_netC_sourceonly.pth')

            if os.path.isfile(netF_path):
                self.netF.load_state_dict(torch.load(netF_path))
            if os.path.isfile(netC_path):
                self.netC.load_state_dict(torch.load(netC_path))
        

        # Defining loss criterions
        self.criterion = nn.CrossEntropyLoss()

        if opt.gpu>=0:
            self.netF.cuda()
            self.netC.cuda()
            self.criterion.cuda()

        # Defining optimizers
        self.optimizerF = optim.Adam(self.netF.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
        self.optimizerC = optim.Adam(self.netC.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
Ejemplo n.º 7
0
    def __init__(self, mean, std, source_trainloader, source_valloader,
                 targetloader):

        self.source_trainloader = source_trainloader
        self.source_valloader = source_valloader
        self.targetloader = targetloader
        self.mean = mean
        self.std = std
        self.best_val = 0
        self.cuda = True if torch.cuda.is_available() else False

        # Defining networks and optimizers
        self.netG = models._netG()
        self.netD = models._netD()
        self.netF = models._netF()
        self.netC = models._netC()

        # Weight initialization
        self.netG.apply(utils.weights_init)
        self.netD.apply(utils.weights_init)
        self.netF.apply(utils.weights_init)
        self.netC.apply(utils.weights_init)

        # Defining loss criterions
        self.criterion_c = nn.CrossEntropyLoss()
        self.criterion_s = nn.BCELoss()

        if self.cuda:
            self.netD.cuda()
            self.netG.cuda()
            self.netF.cuda()
            self.netC.cuda()
            self.criterion_c.cuda()
            self.criterion_s.cuda()

        # Defining optimizers
        self.optimizerD = optim.Adam(self.netD.parameters(),
                                     lr=consts.lr,
                                     betas=(consts.beta1, 0.999))
        self.optimizerG = optim.Adam(self.netG.parameters(),
                                     lr=consts.lr,
                                     betas=(consts.beta1, 0.999))
        self.optimizerF = optim.Adam(self.netF.parameters(),
                                     lr=consts.lr,
                                     betas=(consts.beta1, 0.999))
        self.optimizerC = optim.Adam(self.netC.parameters(),
                                     lr=consts.lr,
                                     betas=(consts.beta1, 0.999))

        # Other variables
        self.real_label_val = 1
        self.fake_label_val = 0
Ejemplo n.º 8
0
    def __init__(self, opt, nclasses, mean, std, source_trainloader, source_valloader, targetloader):

        self.source_trainloader = source_trainloader
        self.source_valloader = source_valloader
        self.targetloader = targetloader
        self.opt = opt
        self.mean = mean
        self.std = std
        self.best_val = 0
        self.best_test = 0
        
        # Defining networks and optimizers
        self.nclasses = nclasses
        self.netG = models._netG(opt, self.nclasses+1+opt.ndf*2)
        self.netD = models._netD(opt, nclasses, 1)
        self.netF = models._netF(opt)
        self.netC = models._netC(opt, nclasses)

        # Weight initialization
        self.netG.apply(utils.weights_init)
        self.netD.apply(utils.weights_init)
        self.netF.apply(utils.weights_init)
        self.netC.apply(utils.weights_init)

        # Defining loss criterions
        self.criterion_c = nn.CrossEntropyLoss()
        self.criterion_s = nn.BCELoss()

        if opt.gpu>=0:
            self.netD.cuda()
            self.netG.cuda()
            self.netF.cuda()
            self.netC.cuda()
            self.criterion_c.cuda()
            self.criterion_s.cuda()

        # Defining optimizers
        self.optimizerD = optim.Adam(self.netD.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
        self.optimizerG = optim.Adam(self.netG.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
        self.optimizerF = optim.Adam(self.netF.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
        self.optimizerC = optim.Adam(self.netC.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))

        # Other variables
        self.real_label_val = 1
        self.fake_label_val = 0
Ejemplo n.º 9
0
def main():

    parser = argparse.ArgumentParser()
    parser.add_argument('--dataroot', required=True, help='path to source dataset')
    parser.add_argument('--workers', type=int, help='number of data loading workers', default=2)
    parser.add_argument('--batchSize', type=int, default=100, help='input batch size')
    parser.add_argument('--imageSize', type=int, default=32, help='the height / width of the input image to network')
    parser.add_argument('--nz', type=int, default=512, help='size of the latent z vector')
    parser.add_argument('--ngf', type=int, default=64, help='Number of filters to use in the generator network')
    parser.add_argument('--ndf', type=int, default=64, help='Number of filters to use in the discriminator network')
    parser.add_argument('--gpu', type=int, default=1, help='GPU to use, -1 for CPU training')
    parser.add_argument('--checkpoint_dir', default='results/models', help='folder to load model checkpoints from')
    parser.add_argument('--method', default='GTA', help='Method to evaluate| GTA, sourceonly')
    parser.add_argument('--model_best', type=int, default=0, help='Flag to specify whether to use the best validation model or last checkpoint| 1-model best, 0-current checkpoint')

    opt = parser.parse_args()

    # GPU/CPU flags
    cudnn.benchmark = True
    if torch.cuda.is_available() and opt.gpu == -1:
        print("WARNING: You have a CUDA device, so you should probably run with --gpu [gpu id]")
    if opt.gpu>=0:
        os.environ['CUDA_VISIBLE_DEVICES'] = str(opt.gpu)

    # Creating data loaders
    mean = np.array([0.44, 0.44, 0.44])
    std = np.array([0.19, 0.19, 0.19])

    target_root = os.path.join(opt.dataroot, 'mnist/trainset')

    transform_target = transforms.Compose([transforms.Resize(opt.imageSize), transforms.ToTensor(), transforms.Normalize(mean,std)])
    target_test = dset.ImageFolder(root=target_root, transform=transform_target)
    targetloader = torch.utils.data.DataLoader(target_test, batch_size=opt.batchSize, shuffle=False, num_workers=2)

    nclasses = len(target_test.classes)
    
    # Creating and loading models
    
    netF = models._netF(opt)
    netC = models._netC(opt, nclasses)
    
    if opt.method == 'GTA':
        if opt.model_best == 0: 
            netF_path = os.path.join(opt.checkpoint_dir, 'netF.pth')
            netC_path = os.path.join(opt.checkpoint_dir, 'netC.pth')
        else:
            netF_path = os.path.join(opt.checkpoint_dir, 'model_best_netF.pth')
            netC_path = os.path.join(opt.checkpoint_dir, 'model_best_netC.pth')
    
    elif opt.method == 'sourceonly':
        if opt.model_best == 0: 
            netF_path = os.path.join(opt.checkpoint_dir, 'netF_sourceonly.pth')
            netC_path = os.path.join(opt.checkpoint_dir, 'netC_sourceonly.pth')
        else:
            netF_path = os.path.join(opt.checkpoint_dir, 'model_best_netF_sourceonly.pth')
            netC_path = os.path.join(opt.checkpoint_dir, 'model_best_netC_sourceonly.pth')
    else:
        raise ValueError('method argument should be sourceonly or GTA')
        
    netF.load_state_dict(torch.load(netF_path))
    netC.load_state_dict(torch.load(netC_path))
    
    if opt.gpu>=0:
        netF.cuda()
        netC.cuda()
        
    # Testing
    
    netF.eval()
    netC.eval()
        
    total = 0
    correct = 0

    for i, datas in enumerate(targetloader):
        inputs, labels = datas
        if opt.gpu>=0:
            inputs, labels = inputs.cuda(), labels.cuda()
        inputv, labelv = Variable(inputs, volatile=True), Variable(labels)

        outC = netC(netF(inputv))
        _, predicted = torch.max(outC.data, 1)        
        total += labels.size(0)
        correct += ((predicted == labels.cuda()).sum())
        
    test_acc = 100*float(correct)/total
    print('Test Accuracy: %f %%' % (test_acc))
Ejemplo n.º 10
0
def main():

    parser = argparse.ArgumentParser()
    parser.add_argument('--batchSize',
                        type=int,
                        default=100,
                        help='input batch size')
    parser.add_argument('--nz',
                        type=int,
                        default=64,
                        help='size of the latent z vector')
    parser.add_argument('--gpu',
                        type=int,
                        default=-1,
                        help='GPU to use, -1 for CPU training')
    parser.add_argument('--checkpoint_dir',
                        default='results/models',
                        help='folder to load model checkpoints from')
    parser.add_argument(
        '--model_best',
        type=int,
        default=0,
        help=
        'Flag to specify whether to use the best validation model or last checkpoint| 1-model best, 0-current checkpoint'
    )

    opt = parser.parse_args()

    # GPU/CPU flags
    cudnn.benchmark = True
    if torch.cuda.is_available() and opt.gpu == -1:
        print(
            "WARNING: You have a CUDA device, so you should probably run with --gpu [gpu id]"
        )
    if opt.gpu >= 0:
        os.environ['CUDA_VISIBLE_DEVICES'] = str(opt.gpu)

    # Creating data loader

    source_dataset, sourceval_dataset, target_dataset = dataset_load(person=1)
    targetloader = DataLoader(target_dataset,
                              batch_size=opt.batchSize,
                              shuffle=False,
                              num_workers=1)

    nclasses = 4

    # Creating and loading models

    netF = models._netF(opt)
    netC = models._netC(opt, nclasses)

    if opt.model_best == 0:
        netF_path = os.path.join(opt.checkpoint_dir, 'netF.pth')
        netC_path = os.path.join(opt.checkpoint_dir, 'netC.pth')
    else:
        netF_path = os.path.join(opt.checkpoint_dir, 'model_best_netF.pth')
        netC_path = os.path.join(opt.checkpoint_dir, 'model_best_netC.pth')

    netF.load_state_dict(torch.load(netF_path))
    netC.load_state_dict(torch.load(netC_path))

    if opt.gpu >= 0:
        netF.cuda()
        netC.cuda()

    # Testing

    netF.eval()
    netC.eval()

    total = 0
    correct = 0

    for i, datas in enumerate(targetloader):
        inputs, labels = datas
        if opt.gpu >= 0:
            inputs, labels = inputs.cuda(), labels.cuda()
        with torch.no_grad():
            inputv, labelv = Variable(inputs), Variable(labels)

        outC = netC(netF(inputv))
        _, predicted = torch.max(outC.data, 1)
        total += labels.size(0)
        correct += ((predicted == labels).sum())

    test_acc = 100 * float(correct) / total
    print('Test Accuracy: %f %%' % (test_acc))
Ejemplo n.º 11
0
    def __init__(self, opt, nclasses, mean, std, source_trainloader, source_valloader, target_trainloader, target_valloader):

        self.source_trainloader = source_trainloader
        self.source_valloader = source_valloader
        self.target_trainloader = target_trainloader
        self.target_valloader = target_valloader
        self.opt = opt
        self.mean = mean
        self.std = std
        self.best_val = 0
        
        # Defining networks and optimizers
        self.nclasses = nclasses
        self.netG = models._netG(opt, nclasses)
        self.netD = models._netD(opt, nclasses)
        self.netF = models._netF(opt)
        self.netC = models._netC(opt, nclasses)

        if torch.cuda.device_count() > 1:
            print("Let's use", torch.cuda.device_count(), "GPUs!")
            # dim = 0 [30, xxx] -> [10, ...], [10, ...], [10, ...] on 3 GPUs
            self.netG = nn.DataParallel(self.netG)
            self.netD = nn.DataParallel(self.netD)
            self.netF = nn.DataParallel(self.netF)
            self.netC = nn.DataParallel(self.netC)

        # Weight initialization
        self.netG.apply(utils.weights_init)
        self.netD.apply(utils.weights_init)
        self.netF.apply(utils.weights_init)
        self.netC.apply(utils.weights_init)

        if opt.loadExisting != 0: 

            netF_path = os.path.join(opt.checkpoint_dir, 'model_best_netF_sourceonly.pth')
            netC_path = os.path.join(opt.checkpoint_dir, 'model_best_netC_sourceonly.pth')

            netG_path = os.path.join(opt.checkpoint_dir, 'model_best_netG.pth')
            netD_path = os.path.join(opt.checkpoint_dir, 'model_best_netD.pth')
            if os.path.isfile(netF_path):
                self.netF.load_state_dict(torch.load(netF_path))
            if os.path.isfile(netC_path):
                self.netC.load_state_dict(torch.load(netC_path))
            if os.path.isfile(netG_path):
                self.netG.load_state_dict(torch.load(netG_path))
            if os.path.isfile(netD_path):
                self.netD.load_state_dict(torch.load(netD_path))        


        # Defining loss criterions
        self.criterion_c = nn.CrossEntropyLoss()
        self.criterion_s = nn.BCELoss()

        if opt.gpu>=0:
            self.netD.cuda()
            self.netG.cuda()
            self.netF.cuda()
            self.netC.cuda()
            self.criterion_c.cuda()
            self.criterion_s.cuda()

        # Defining optimizers
        self.optimizerD = optim.Adam(self.netD.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
        self.optimizerG = optim.Adam(self.netG.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
        self.optimizerF = optim.Adam(self.netF.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
        self.optimizerC = optim.Adam(self.netC.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))

        # Other variables
        self.real_label_val = 1
        self.fake_label_val = 0
Ejemplo n.º 12
0
    def __init__(self, opt, nclasses, mean, std, source_trainloader,
                 source_valloader, targetloader, class_balance, augment):

        self.source_trainloader = source_trainloader
        self.source_valloader = source_valloader
        self.targetloader = targetloader
        self.opt = opt
        self.mean = mean
        self.std = std
        self.best_val = 0

        # Defining networks and optimizers
        self.nclasses = nclasses
        self.netG = models._netG(opt, nclasses)
        self.netD = models._netD(opt, nclasses)
        self.netF = models._netF(opt)
        self.netC = models._netC(opt, nclasses)

        # Weight initialization
        self.netG.apply(utils.weights_init)
        self.netD.apply(utils.weights_init)
        self.netF.apply(utils.weights_init)
        self.netC.apply(utils.weights_init)

        # Defining loss criterions
        self.criterion_c = nn.CrossEntropyLoss()
        self.criterion_s = nn.BCELoss()

        if opt.gpu >= 0:
            self.netD.cuda()
            self.netG.cuda()
            self.netF.cuda()
            self.netC.cuda()
            self.criterion_c.cuda()
            self.criterion_s.cuda()

        # Defining optimizers
        self.optimizerD = optim.Adam(self.netD.parameters(),
                                     lr=opt.lr,
                                     betas=(opt.beta1, 0.999))
        self.optimizerG = optim.Adam(self.netG.parameters(),
                                     lr=opt.lr,
                                     betas=(opt.beta1, 0.999))
        self.optimizerF = optim.Adam(self.netF.parameters(),
                                     lr=opt.lr,
                                     betas=(opt.beta1, 0.999))
        self.optimizerC = optim.Adam(self.netC.parameters(),
                                     lr=opt.lr,
                                     betas=(opt.beta1, 0.999))

        # Other variables
        self.real_label_val = 1
        self.fake_label_val = 0

        self.augment = augment
        self.class_balance = class_balance
        self.uniform_cls_distribution = torch.ones(self.nclasses) * float(
            1.0 / self.nclasses)
        self.cls_bal_fn = robust_binary_crossentropy
        if self.opt.gpu >= 0:
            self.uniform_cls_distribution = self.uniform_cls_distribution.cuda(
            )
Ejemplo n.º 13
0
def cross_validation_loss(feature_network_path, predict_network_path,
                          src_cls_list, target_path, val_cls_list, class_num,
                          resize_size, crop_size, batch_size, use_gpu, opt):
    """
    Main function for computing the CV loss
    :param feature_network:
    :param predict_network:
    :param src_cls_list:
    :param target_path:
    :param val_cls_list:
    :param class_num:
    :param resize_size:
    :param crop_size:
    :param batch_size:
    :return:
    """
    target_list_no_label = open(target_path).readlines()
    tar_cls_list = []
    cross_val_loss = 0

    netF = models._netF(opt)
    netC = models._netC(opt, class_num)
    netF.load_state_dict(torch.load(feature_network_path))
    netC.load_state_dict(torch.load(predict_network_path))
    if use_gpu:
        netF.cuda()
        netC.cuda()
    mean = np.array([0.44, 0.44, 0.44])
    std = np.array([0.19, 0.19, 0.19])
    transform_target = transforms.Compose([
        transforms.Resize(resize_size),
        transforms.ToTensor(),
        transforms.Normalize(mean, std)
    ])
    # add pesudolabel for target data
    target_list = get_label_list(target_list_no_label, feature_network_path,
                                 predict_network_path, resize_size, crop_size,
                                 batch_size, use_gpu, opt, class_num)

    # seperate the class
    for i in range(1, class_num + 1):
        tar_cls_list.append([
            j for j in target_list
            if int(j.split(" ")[1].replace("\n", "")) == i
        ])
    # load different class's image
    for cls in range(class_num):
        dsets_src = ImageList(src_cls_list[cls], transform=transform_target)
        dset_loaders_src = util_data.DataLoader(dsets_src,
                                                batch_size=batch_size,
                                                shuffle=False,
                                                num_workers=2)

        dsets_tar = ImageList(tar_cls_list[cls], transform=transform_target)
        dset_loaders_tar = util_data.DataLoader(dsets_tar,
                                                batch_size=batch_size,
                                                shuffle=False,
                                                num_workers=2)

        dsets_val = ImageList(val_cls_list[cls], transform=transform_target)
        dset_loaders_val = util_data.DataLoader(dsets_val,
                                                batch_size=batch_size,
                                                shuffle=False,
                                                num_workers=2)

        # prepare source feature
        iter_src = iter(dset_loaders_src)
        src_input = iter_src.next()[0]
        if use_gpu:
            src_input = Variable(src_input).cuda()
        else:
            src_input = Variable(src_input)
        src_feature = netF(src_input)
        src_feature_de = src_feature.detach().cpu().numpy()
        for count_src in range(len(dset_loaders_src) - 1):
            src_input = iter_src.next()[0]
            if use_gpu:
                src_input = Variable(src_input).cuda()
            else:
                src_input = Variable(src_input)
            src_feature_new = netF(src_input)
            src_feature_new_de = src_feature_new.detach().cpu().numpy()
            src_feature_de = np.append(src_feature_de,
                                       src_feature_new_de,
                                       axis=0)

        # prepare target feature
        iter_tar = iter(dset_loaders_tar)
        tar_input = iter_tar.next()[0]
        if use_gpu:
            tar_input = Variable(tar_input).cuda()
        else:
            tar_input = Variable(tar_input)
        tar_feature = netF(tar_input)
        tar_feature_de = tar_feature.detach().cpu().numpy()
        for count_tar in range(len(dset_loaders_tar) - 1):
            tar_input = iter_tar.next()[0]
            if use_gpu:
                tar_input = Variable(tar_input).cuda()
            else:
                tar_input = Variable(tar_input)
            tar_feature_new = netF(tar_input)
            tar_feature_new_de = tar_feature_new.detach().cpu().numpy()
            tar_feature_de = np.append(tar_feature_de,
                                       tar_feature_new_de,
                                       axis=0)

        # prepare validation feature and predicted label for validation

        iter_val = iter(dset_loaders_val)
        val_input, val_labels = iter_val.next()
        if use_gpu:
            val_input, val_labels = Variable(val_input).cuda(), Variable(
                val_labels).cuda()
        else:
            val_input, val_labels = Variable(val_input), Variable(val_labels)
        val_feature = netF(val_input)
        pred_label = netC(netF(val_input))
        val_feature_de = val_feature.detach().cpu().numpy()

        w = pred_label[0].shape[0]
        error = np.zeros(1)

        error[0] = predict_loss(cls, pred_label[0].reshape(1, w)).item()
        error = error.reshape(1, 1)
        for num_image in range(1, len(pred_label)):
            single_pred_label = pred_label[num_image]
            w = single_pred_label.shape[0]
            error = np.append(
                error,
                [[predict_loss(cls, single_pred_label.reshape(1, w)).item()]],
                axis=0)
        for count_val in range(len(dset_loaders_val) - 1):
            val_input, val_labels = iter_val.next()
            if use_gpu:
                val_input, val_labels = Variable(val_input).cuda(), Variable(
                    val_labels).cuda()
            else:
                val_input, val_labels = Variable(val_input), Variable(
                    val_labels)
            val_feature_new = netF(val_input)

            val_feature_new_de = val_feature_new.detach().cpu().numpy()
            val_feature_de = np.append(val_feature_de,
                                       val_feature_new_de,
                                       axis=0)
            pred_label = netC(netF(val_input))
            for num_image in range(len(pred_label)):
                single_pred_label = pred_label[num_image]
                w = single_pred_label.shape[0]
                # cls should be a value, new_labels should be a [[x]] tensor format, the input format required by predict_loss
                error = np.append(error, [[
                    predict_loss(cls, single_pred_label.reshape(1, w)).item()
                ]],
                                  axis=0)
            # error should be a (N, 1) numpy array, the input format required by get_dev_risk

        weight = get_weight(src_feature_de, tar_feature_de, val_feature_de)
        cross_val_loss = cross_val_loss + get_dev_risk(weight,
                                                       error) / class_num

    return cross_val_loss
Ejemplo n.º 14
0
def cross_validation_loss(feature_network_path, predict_network_path, src_list,
                          target_path, val_list, class_num, resize_size,
                          crop_size, batch_size, use_gpu, opt):
    """
    Main function for computing the CV loss
    :param feature_network:
    :param predict_network:
    :param src_cls_list:
    :param target_path:
    :param val_cls_list:
    :param class_num:
    :param resize_size:
    :param crop_size:
    :param batch_size:
    :return:
    """
    netF = models._netF(opt)
    netC = models._netC(opt, class_num)
    netF.load_state_dict(torch.load(feature_network_path))
    netC.load_state_dict(torch.load(predict_network_path))
    if use_gpu:
        netF.cuda()
        netC.cuda()

    val_list = seperate_data.dimension_rd(val_list)

    tar_list = open(target_path).readlines()
    cross_val_loss = 0

    mean = np.array([0.44, 0.44, 0.44])
    std = np.array([0.19, 0.19, 0.19])
    transform_target = transforms.Compose([
        transforms.Resize(resize_size),
        transforms.ToTensor(),
        transforms.Normalize(mean, std)
    ])

    # prep_dict = prep.image_train(resize_size=resize_size, crop_size=crop_size)
    # load different class's image

    dsets_src = ImageList(src_list, transform=transform_target)
    dset_loaders_src = util_data.DataLoader(dsets_src,
                                            batch_size=batch_size,
                                            shuffle=False,
                                            num_workers=2)
    dsets_val = ImageList(val_list, transform=transform_target)
    dset_loaders_val = util_data.DataLoader(dsets_val,
                                            batch_size=batch_size,
                                            shuffle=False,
                                            num_workers=2)
    dsets_tar = ImageList(tar_list, transform=transform_target)
    dset_loaders_tar = util_data.DataLoader(dsets_tar,
                                            batch_size=batch_size,
                                            shuffle=False,
                                            num_workers=2)

    # prepare source feature
    iter_src = iter(dset_loaders_src)
    src_input, src_labels = iter_src.next()
    if use_gpu:
        src_input, src_labels = Variable(src_input).cuda(), Variable(
            src_labels).cuda()
    else:
        src_input, src_labels = Variable(src_input), Variable(src_labels)
    # src_feature, _ = feature_network(src_input)
    src_feature = netF(src_input)
    src_feature_de = src_feature.detach().cpu().numpy()
    for _ in range(len(dset_loaders_src) - 1):
        src_input, src_labels = iter_src.next()
        if use_gpu:
            src_input, src_labels = Variable(src_input).cuda(), Variable(
                src_labels).cuda()
        else:
            src_input, src_labels = Variable(src_input), Variable(src_labels)
        # src_feature_new, _ = feature_network(src_input)
        src_feature_new = netF(src_input)
        src_feature_new_de = src_feature_new.detach().cpu().numpy()
        src_feature_de = np.append(src_feature_de, src_feature_new_de, axis=0)
        # src_feature = torch.cat((src_feature, src_feature_new), 0)

    # prepare target feature
    iter_tar = iter(dset_loaders_tar)
    tar_input, _ = iter_tar.next()
    if use_gpu:
        tar_input, _ = Variable(tar_input).cuda(), Variable(_).cuda()
    else:
        src_input, _ = Variable(tar_input), Variable(_)
    # tar_feature, _ = feature_network(tar_input)
    tar_feature = netF(tar_input)
    tar_feature_de = tar_feature.detach().cpu().numpy()
    k = 0
    for _ in range(len(dset_loaders_tar) - 1):
        k = k + 1
        print(k)
        tar_input, _ = iter_tar.next()
        if use_gpu:
            tar_input, _ = Variable(tar_input).cuda(), Variable(_).cuda()
        else:
            src_input, _ = Variable(tar_input), Variable(_)
        # tar_feature_new, _ = feature_network(tar_input)
        tar_feature_new = netF(tar_input)
        tar_feature_new_de = tar_feature_new.detach().cpu().numpy()
        tar_feature_de = np.append(tar_feature_de, tar_feature_new_de, axis=0)
        # tar_feature = torch.cat((tar_feature, tar_feature_new), 0)

    # prepare validation feature and predicted label for validation
    iter_val = iter(dset_loaders_val)
    val_input, val_labels = iter_val.next()
    if use_gpu:
        val_input, val_labels = Variable(val_input).cuda(), Variable(
            val_labels).cuda()
    else:
        val_input, val_labels = Variable(val_input), Variable(val_labels)
    # val_feature, _ = feature_network(val_input)
    # _, pred_label = predict_network(val_input)
    val_feature = netF(val_input)
    pred_label = netC(netF(val_input))
    val_feature_de = val_feature.detach().cpu().numpy()

    w = pred_label[0].shape[0]
    error = np.zeros(1)

    error[0] = predict_loss(val_labels[0].item(),
                            pred_label[0].reshape(1, w)).item()
    error = error.reshape(1, 1)
    print("Before the final")
    print(pred_label.shape)
    print(len(val_feature_de))
    for num_image in range(1, len(pred_label)):
        single_pred_label = pred_label[num_image]
        w = single_pred_label.shape[0]
        single_val_label = val_labels[num_image]
        error = np.append(error, [[
            predict_loss(single_val_label.item(),
                         single_pred_label.reshape(1, w)).item()
        ]],
                          axis=0)

    for _ in range(len(dset_loaders_val) - 1):
        val_input, val_labels = iter_val.next()
        if use_gpu:
            val_input, val_labels = Variable(val_input).cuda(), Variable(
                val_labels).cuda()
        else:
            val_input, val_labels = Variable(val_input), Variable(val_labels)
        # val_feature_new, _ = feature_network(val_input)
        val_feature_new = netF(val_input)
        val_feature_new_de = val_feature_new.detach().cpu().numpy()
        val_feature_de = np.append(val_feature_de, val_feature_new_de, axis=0)
        # val_feature = torch.cat((val_feature, val_feature_new), 0)
        # _, pred_label = predict_network(val_input)
        pred_label = netC(netF(val_input))
        for num_image in range(len(pred_label)):
            single_pred_label = pred_label[num_image]
            w = single_pred_label.shape[0]
            single_val_label = val_labels[num_image]
            error = np.append(error, [[
                predict_loss(single_val_label.item(),
                             single_pred_label.reshape(1, w)).item()
            ]],
                              axis=0)
    #     print("Insides the for loop")
    #     print(len(error))
    #     print(len(val_feature_de))
    #
    # print("Input for scrore calculation: ")
    # print(len(error))
    # print(len(val_feature_de))
    weight = get_weight(src_feature_de, tar_feature_de, val_feature_de)
    cross_val_loss = cross_val_loss + get_dev_risk(weight, error)

    return cross_val_loss
Ejemplo n.º 15
0
    def __init__(self, opt, nclasses, mean, std, source_trainloader,
                 source_valloader, target_trainloader, target_valloader,
                 res_dir):

        self.source_trainloader = source_trainloader
        self.source_valloader = source_valloader
        self.target_trainloader = target_trainloader
        self.target_valloader = target_valloader
        self.opt = opt
        self.best_val = 0

        # Defining networks and optimizers
        self.nclasses = nclasses
        self.netG = models._netG(opt, nclasses, flattens=opt.flattens)
        self.netD = models._netD(opt, nclasses)
        self.netF = models._netF(opt)
        self.netC = models._netC(opt, nclasses, flattens=opt.flattens)

        # Weight initialization
        self.netG.apply(utils.weights_init)
        self.netD.apply(utils.weights_init)
        self.netF.apply(utils.weights_init)
        self.netC.apply(utils.weights_init)

        logging.basicConfig(filename='{}/app.log'.format(res_dir),
                            level=logging.DEBUG,
                            format='%(asctime)s:%(levelname)s:%(message)s')

        if True:
            print('netG<<')
            print(self.netG)
            logging.debug(self.netG)
            print('>>\n')
            print('netD<<')
            print(self.netD)
            logging.debug(self.netD)
            print('>>\n')
            print('netF<<')
            print(self.netF)
            logging.debug(self.netF)
            print('>>\n')
            print('netC<<')
            print(self.netC)
            logging.debug(self.netC)
            print('>>')

        # Defining loss criterions
        self.criterion_c = nn.CrossEntropyLoss()
        self.criterion_s = nn.BCELoss()

        self.mmd_loss = MMD_loss()
        self.mse_loss = nn.MSELoss()

        if opt.gpu >= 0:
            self.netD.cuda()
            self.netG.cuda()
            self.netF.cuda()
            self.netC.cuda()
            self.criterion_c.cuda()
            self.criterion_s.cuda()

        # Defining optimizers
        self.optimizerD = optim.Adam(self.netD.parameters(),
                                     lr=opt.lr,
                                     betas=(opt.beta1, 0.999))
        self.optimizerG = optim.Adam(self.netG.parameters(),
                                     lr=opt.lr,
                                     betas=(opt.beta1, 0.999))
        self.optimizerF = optim.Adam(self.netF.parameters(),
                                     lr=opt.lr,
                                     betas=(opt.beta1, 0.999))
        self.optimizerC = optim.Adam(self.netC.parameters(),
                                     lr=opt.lr,
                                     betas=(opt.beta1, 0.999))

        # Other variables
        self.real_label_val = 1
        self.fake_label_val = 0
Ejemplo n.º 16
0
    # if training mnistm to svhn please use transform2 on target_val & target_train
    #########################
    target_test = GetLoader(img_root=image_dir, transform=transform)
    print('# images in dataset:', len(target_test))
    targetloader = torch.utils.data.DataLoader(target_test,
                                               batch_size=consts.batch_size,
                                               shuffle=False,
                                               num_workers=consts.workers)
    sample_batch = next(iter(targetloader))
    print('Image tensor in each batch:', sample_batch[0].shape,
          sample_batch[0].dtype)

    ######################################################################
    # load model
    ######################################################################
    netF = models._netF()
    netC = models._netC()
    netF_path = os.path.join(model_path, 'model_best_netF.pth')
    netC_path = os.path.join(model_path, 'model_best_netC.pth')
    netF.load_state_dict(torch.load(netF_path))
    netC.load_state_dict(torch.load(netC_path))

    ######################################################################
    # predict
    ######################################################################
    if cuda:
        netF.cuda()
        netC.cuda()
    # Testing
    netF.eval()
    netC.eval()
Ejemplo n.º 17
0
def main():

    parser = argparse.ArgumentParser()
    parser.add_argument('--dataroot',
                        required=True,
                        help='path to source dataset')
    parser.add_argument('--workers',
                        type=int,
                        help='number of data loading workers',
                        default=2)
    parser.add_argument('--batchSize',
                        type=int,
                        default=100,
                        help='input batch size')
    parser.add_argument(
        '--imageSize',
        type=int,
        default=32,
        help='the height / width of the input image to network')
    parser.add_argument('--nz',
                        type=int,
                        default=512,
                        help='size of the latent z vector')
    parser.add_argument(
        '--ngf',
        type=int,
        default=64,
        help='Number of filters to use in the generator network')
    parser.add_argument(
        '--ndf',
        type=int,
        default=64,
        help='Number of filters to use in the discriminator network')
    parser.add_argument('--gpu',
                        type=int,
                        default=1,
                        help='GPU to use, -1 for CPU training')
    parser.add_argument('--checkpoint_dir',
                        default='results/models',
                        help='folder to load model checkpoints from')
    parser.add_argument('--method',
                        default='GTA',
                        help='Method to evaluate| GTA, sourceonly')
    parser.add_argument(
        '--model_best',
        type=int,
        default=0,
        help=
        'Flag to specify whether to use the best validation model or last checkpoint| 1-model best, 0-current checkpoint'
    )
    parser.add_argument('--src_path',
                        type=str,
                        default='digits/server_svhn_list.txt',
                        help='path for source dataset txt file')
    parser.add_argument('--tar_path',
                        type=str,
                        default='digits/server_mnist_list.txt',
                        help='path for target dataset txt file')
    parser.add_argument('--val_method',
                        type=str,
                        default='Source_Risk',
                        choices=['Source_Risk', 'Dev_icml', 'Dev'])
    opt = parser.parse_args()

    # GPU/CPU flags
    cudnn.benchmark = True
    if torch.cuda.is_available() and opt.gpu == -1:
        print(
            "WARNING: You have a CUDA device, so you should probably run with --gpu [gpu id]"
        )
    if opt.gpu >= 0:
        os.environ['CUDA_VISIBLE_DEVICES'] = str(opt.gpu)
        use_gpu = True

    # Creating data loaders
    mean = np.array([0.44, 0.44, 0.44])
    std = np.array([0.19, 0.19, 0.19])

    if 'svhn' in opt.src_path and 'mnist' in opt.tar_path:
        test_adaptation = 's->m'
    elif 'usps' in opt.src_path and 'mnist' in opt.tar_path:
        test_adaptation = 'u->m'
    else:
        test_adaptation = 'm->u'

    if test_adaptation == 'u->m' or test_adaptation == 's->m':
        target_root = os.path.join(opt.dataroot, 'mnist/trainset')

        transform_target = transforms.Compose([
            transforms.Resize(opt.imageSize),
            transforms.ToTensor(),
            transforms.Normalize(mean, std)
        ])
        target_test = dset.ImageFolder(root=target_root,
                                       transform=transform_target)
        nclasses = len(target_test.classes)
    elif test_adaptation == 'm->u':
        transform_usps = transforms.Compose([
            transforms.Resize(opt.imageSize),
            transforms.Grayscale(3),
            transforms.ToTensor(),
            transforms.Normalize((0.44, ), (0.19, ))
        ])
        target_test = usps.USPS(root=opt.dataroot,
                                train=True,
                                transform=transform_usps,
                                download=True)
        nclasses = 10
    # target_root = os.path.join(opt.dataroot, 'mnist/trainset')
    #
    # transform_target = transforms.Compose([transforms.Resize(opt.imageSize), transforms.ToTensor(), transforms.Normalize(mean,std)])
    # target_test = dset.ImageFolder(root=target_root, transform=transform_target)
    targetloader = torch.utils.data.DataLoader(target_test,
                                               batch_size=opt.batchSize,
                                               shuffle=False,
                                               num_workers=2)

    # Creating and loading models

    netF = models._netF(opt)
    netC = models._netC(opt, nclasses)

    if opt.method == 'GTA':
        if opt.model_best == 0:
            netF_path = os.path.join(opt.checkpoint_dir, 'netF.pth')
            netC_path = os.path.join(opt.checkpoint_dir, 'netC.pth')
        else:
            netF_path = os.path.join(opt.checkpoint_dir, 'model_best_netF.pth')
            netC_path = os.path.join(opt.checkpoint_dir, 'model_best_netC.pth')

    elif opt.method == 'sourceonly':
        if opt.model_best == 0:
            netF_path = os.path.join(opt.checkpoint_dir, 'netF_sourceonly.pth')
            netC_path = os.path.join(opt.checkpoint_dir, 'netC_sourceonly.pth')
        else:
            netF_path = os.path.join(opt.checkpoint_dir,
                                     'model_best_netF_sourceonly.pth')
            netC_path = os.path.join(opt.checkpoint_dir,
                                     'model_best_netC_sourceonly.pth')
    else:
        raise ValueError('method argument should be sourceonly or GTA')

    netF.load_state_dict(torch.load(netF_path))
    netC.load_state_dict(torch.load(netC_path))

    if opt.gpu >= 0:
        netF.cuda()
        netC.cuda()

    # Testing

    netF.eval()
    netC.eval()

    total = 0
    correct = 0

    for i, datas in enumerate(targetloader):
        inputs, labels = datas
        if opt.gpu >= 0:
            inputs, labels = inputs.cuda(), labels.cuda()
        inputv, labelv = Variable(inputs, volatile=True), Variable(labels)

        outC = netC(netF(inputv))
        _, predicted = torch.max(outC.data, 1)
        total += labels.size(0)
        correct += ((predicted == labels.cuda()).sum())

    test_acc = 100 * float(correct) / total
    print('Test Accuracy: %f %%' % (test_acc))

    cls_source_list, cls_validation_list = sep.split_set(
        opt.src_path, nclasses)
    source_list = sep.dimension_rd(cls_source_list)
    # outC = netC(netF(inputv)) 是算 classification的
    # outF = netF(inputv)) 是算 feature的
    # netF.load_state_dict(torch.load(netF_path)) 是加载网络的 方式
    # crop size 不用
    if opt.val_method == 'Source_Risk':
        cv_loss = source_risk.cross_validation_loss(
            netF_path, netC_path, cls_source_list, opt.tar_path,
            cls_validation_list, nclasses, opt.imageSize, 224, opt.batchSize,
            use_gpu, opt)
    elif opt.val_method == 'Dev_icml':
        cv_loss = dev_icml.cross_validation_loss(netF_path, netC_path,
                                                 source_list, opt.tar_path,
                                                 cls_validation_list, nclasses,
                                                 opt.imageSize, 224,
                                                 opt.batchSize, use_gpu, opt)
    elif opt.val_method == 'Dev':
        cv_loss = dev.cross_validation_loss(netF_path, netC_path,
                                            cls_source_list, opt.tar_path,
                                            cls_validation_list, nclasses,
                                            opt.imageSize, 224, opt.batchSize,
                                            use_gpu, opt)
    print(cv_loss)
Ejemplo n.º 18
0
    def __init__(self, args):

        self.args = args
        Path(args.saver_root).mkdir(parents=True, exist_ok=True)

        if args.exp == 'MNIST':
            self.log('Running MNIST -> MNIST-M')
            dataloders = datasets.form_mnist_dataset(args)
        elif args.exp == 'VISDA':
            # TODO: Include VISDA
            pass

        self.s_trainloader = dataloders['s_train']
        self.s_valloader = dataloders['s_val']
        self.t_trainloader = dataloders['t_train']
        self.t_valloader = dataloders['t_val']
        self.s_trainloader_classwise = dataloders['s_classwise']
        nclasses = self.nclasses = dataloders['nclasses']

        self.s_classwise_iterators = []
        for i in range(len(self.s_trainloader_classwise)):
            self.s_classwise_iterators.append(
                iter(self.s_trainloader_classwise[i]))

        ###############################
        # Create models
        self.netF = models._netF().cuda()
        self.netC = models._netC(self.nclasses).cuda()
        if args.alg == 'wasserstein' or args.alg == 'NW':
            self.netD = models._netD_wasserstein().cuda()
        else:
            self.netD = models._netD().cuda()

        # Create optimizers
        if args.adam:
            self.optimizerF = optim.Adam(self.netF.parameters(),
                                         lr=args.lr,
                                         betas=(0.5, 0.999))
            self.optimizerC = optim.Adam(self.netC.parameters(),
                                         lr=args.lr,
                                         betas=(0.5, 0.999))
            self.optimizerD = optim.Adam(self.netD.parameters(),
                                         lr=args.lr,
                                         betas=(0.5, 0.999))
            if args.alg == 'NW':
                self.pi = nn.Parameter(
                    torch.FloatTensor(nclasses).fill_(1.0 / nclasses).cuda())
                self.optimizerPi = optim.Adam(iter([self.pi]),
                                              lr=args.lrPi,
                                              betas=(0.5, 0.999))
        else:
            self.optimizerF = optim.SGD(self.netF.parameters(),
                                        lr=args.lr,
                                        momentum=0.9)
            self.optimizerC = optim.SGD(self.netC.parameters(),
                                        lr=args.lr,
                                        momentum=0.9)
            self.optimizerD = optim.SGD(self.netD.parameters(),
                                        lr=args.lr,
                                        momentum=0.9)
            if args.alg == 'NW':
                self.pi = nn.Parameter(
                    torch.FloatTensor(nclasses).fill_(1.0 / nclasses).cuda())
                self.optimizerPi = optim.SGD(iter([self.pi]), lr=args.lrPi)