Exemple #1
0
    datasets.MNIST(DATA_PATH, train=False, download=True,
                   transform=transforms.ToTensor()),
    batch_size=args.batch_size, shuffle=True)

print('>>> data loaded')
print('train: ', len(train_data.dataset))
print('test: ', len(test_data.dataset))

def cuda_tensors(obj):
    for attr in dir(obj):
        value = getattr(obj, attr)
        if isinstance(value, torch.Tensor):
            setattr(obj, attr, value.cuda())


encA = EncoderA(args.wseed, zShared_dim=args.n_shared)
decA = DecoderA(args.wseed, zShared_dim=args.n_shared)
encB = EncoderB(args.wseed, zShared_dim=args.n_shared)
decB = DecoderB(args.wseed, zShared_dim=args.n_shared)
if CUDA:
    encA.cuda()
    decA.cuda()
    encB.cuda()
    decB.cuda()
    cuda_tensors(encA)
    cuda_tensors(decA)
    cuda_tensors(encB)
    cuda_tensors(decB)

optimizer = torch.optim.Adam(
    list(encB.parameters()) + list(decB.parameters()) + list(encA.parameters()) + list(decA.parameters()),
Exemple #2
0
print('test: ', len(test_data.dataset))

BIAS_TRAIN = (len(train_data.dataset) - 1) / (args.batch_size - 1)
BIAS_VAL = (len(val_data.dataset) - 1) / (args.batch_size - 1)
BIAS_TEST = (len(test_data.dataset) - 1) / (args.batch_size - 1)


def cuda_tensors(obj):
    for attr in dir(obj):
        value = getattr(obj, attr)
        if isinstance(value, torch.Tensor):
            setattr(obj, attr, value.cuda())


encA = EncoderA(args.wseed,
                zPrivate_dim=args.n_private,
                zShared_dim=args.n_shared)
decA = DecoderA(args.wseed,
                zPrivate_dim=args.n_private,
                zShared_dim=args.n_shared)

encB = []
decB = []
for _ in range(N_ATTR):
    encB.append(EncoderB(args.wseed, num_attr=1, zShared_dim=args.n_shared))
    decB.append(DecoderB(args.wseed, num_attr=1, zShared_dim=args.n_shared))

if CUDA:
    encA.cuda()
    decA.cuda()
    cuda_tensors(encA)
Exemple #3
0
test_data = torch.utils.data.DataLoader(DIGIT('./data', train=False),
                                        batch_size=args.batch_size,
                                        shuffle=False)

BIAS_TRAIN = (train_data.dataset.__len__() - 1) / (args.batch_size - 1)
BIAS_TEST = (test_data.dataset.__len__() - 1) / (args.batch_size - 1)


def cuda_tensors(obj):
    for attr in dir(obj):
        value = getattr(obj, attr)
        if isinstance(value, torch.Tensor):
            setattr(obj, attr, value.cuda())


encA = EncoderA(args.wseed, zPrivate_dim=args.n_private, num_pixels=NUM_PIXELS)
decA = DecoderA(args.wseed, zPrivate_dim=args.n_private, num_pixels=NUM_PIXELS)

encB = EncoderB(args.wseed, zPrivate_dim=args.n_private, num_pixels=NUM_PIXELS)
decB = DecoderB(args.wseed, zPrivate_dim=args.n_private, num_pixels=NUM_PIXELS)
if CUDA:
    encA.cuda()
    decA.cuda()
    encB.cuda()
    decB.cuda()
    cuda_tensors(encA)
    cuda_tensors(decA)
    cuda_tensors(encB)
    cuda_tensors(decB)

optimizer = torch.optim.Adam(list(encB.parameters()) +
Exemple #4
0
                                       shuffle=True,
                                       num_workers=len(GPU))

BIAS_TRAIN = (train_data.dataset.__len__() - 1) / (args.batch_size - 1)
BIAS_TEST = (test_data.dataset.__len__() - 1) / (args.batch_size - 1)


def cuda_tensors(obj):
    for attr in dir(obj):
        value = getattr(obj, attr)
        if isinstance(value, torch.Tensor):
            setattr(obj, attr, value.cuda())


encA = EncoderA(args.wseed,
                zPrivate_dim=args.n_privateA,
                zShared_dim=args.n_shared,
                num_hidden=args.num_hidden)
decA = DecoderA(args.wseed,
                zPrivate_dim=args.n_privateA,
                zShared_dim=args.n_shared,
                num_hidden=args.num_hidden)
encB = EncoderB(args.wseed,
                zShared_dim=args.n_shared,
                num_hidden=args.num_hidden)
decB = DecoderB(args.wseed,
                zShared_dim=args.n_shared,
                num_hidden=args.num_hidden)

if CUDA:
    encA.cuda()
    decA.cuda()
Exemple #5
0
test_data = torch.utils.data.DataLoader(datasets(path, ATTR_IDX, train=False, crop=1.2), batch_size=args.batch_size,
                                        shuffle=True,
                                        num_workers=len(GPU))

BIAS_TRAIN = (train_data.dataset.__len__() - 1) / (args.batch_size - 1)
BIAS_TEST = (test_data.dataset.__len__() - 1) / (args.batch_size - 1)


def cuda_tensors(obj):
    for attr in dir(obj):
        value = getattr(obj, attr)
        if isinstance(value, torch.Tensor):
            setattr(obj, attr, value.cuda())


encA = EncoderA(args.wseed, zPrivate_dim=args.n_privateA, zSharedAttr_dim=ATTR_DIM)
decA = DecoderA(args.wseed, zPrivate_dim=args.n_privateA, zSharedAttr_dim=ATTR_DIM)
encB = EncoderB(args.wseed, zSharedAttr_dim=ATTR_DIM)
decB = DecoderB(args.wseed, zSharedAttr_dim=ATTR_DIM)

if CUDA:
    encA.cuda()
    decA.cuda()
    encB.cuda()
    decB.cuda()
    cuda_tensors(encA)
    cuda_tensors(decA)
    cuda_tensors(encB)
    cuda_tensors(decB)
    if len(args.gpu) > 2:
        print('multi: ' + args.gpu)
Exemple #6
0
                                        batch_size=args.batch_size,
                                        shuffle=True,
                                        num_workers=len(GPU))

BIAS_TRAIN = (train_data.dataset.__len__() - 1) / (args.batch_size - 1)
BIAS_TEST = (test_data.dataset.__len__() - 1) / (args.batch_size - 1)


def cuda_tensors(obj):
    for attr in dir(obj):
        value = getattr(obj, attr)
        if isinstance(value, torch.Tensor):
            setattr(obj, attr, value.cuda())


encA = EncoderA(args.wseed)
decA = DecoderA(args.wseed)

if CUDA:
    encA.cuda()
    decA.cuda()
    cuda_tensors(encA)
    cuda_tensors(decA)
    if len(args.gpu) > 2:
        print('multi: ' + args.gpu)
        encA = nn.DataParallel(encA)
        decA = nn.DataParallel(decA)

optimizer = torch.optim.Adam(list(encA.parameters()) + list(decA.parameters()),
                             lr=args.lr)
Exemple #7
0
                                        shuffle=True,
                                        num_workers=len(GPU))

BIAS_TRAIN = (train_data.dataset.__len__() - 1) / (args.batch_size - 1)
BIAS_TEST = (test_data.dataset.__len__() - 1) / (args.batch_size - 1)


def cuda_tensors(obj):
    for attr in dir(obj):
        value = getattr(obj, attr)
        if isinstance(value, torch.Tensor):
            setattr(obj, attr, value.cuda())


encA = EncoderA(args.wseed,
                zPrivate_dim=args.n_privateA,
                zSharedAttr_dim=ATTR_DIM,
                zSharedLabel_dim=N_CLASSES)
decA = DecoderA2(args.wseed,
                 zPrivate_dim=args.n_privateA,
                 zSharedAttr_dim=ATTR_DIM,
                 zSharedLabel_dim=N_CLASSES)

if CUDA:
    encA.cuda()
    decA.cuda()
    cuda_tensors(encA)
    cuda_tensors(decA)
    if len(args.gpu) > 2:
        print('multi: ' + args.gpu)
        encA = nn.DataParallel(encA)
        decA = nn.DataParallel(decA)
Exemple #8
0
                                       shuffle=True,
                                       num_workers=len(GPU))

BIAS_TRAIN = (train_data.dataset.__len__() - 1) / (args.batch_size - 1)
BIAS_TEST = (test_data.dataset.__len__() - 1) / (args.batch_size - 1)


def cuda_tensors(obj):
    for attr in dir(obj):
        value = getattr(obj, attr)
        if isinstance(value, torch.Tensor):
            setattr(obj, attr, value.cuda())


encA = EncoderA(args.wseed,
                zPrivate_dim=args.n_privateA,
                zSharedAttr_dim=ATTR_DIM)

if CUDA:
    encA.cuda()
    cuda_tensors(encA)
    if len(args.gpu) > 2:
        print('multi: ' + args.gpu)
        encA = nn.DataParallel(encA)

optimizer = torch.optim.Adam(list(encA.parameters()), lr=args.lr)


def train(data, encA, optimizer):
    epoch_elbo = epoch_correct = 0.0
    epoch_pred = np.zeros(sum(ATTR_DIM))
Exemple #9
0
    )

    viz_port = args.viz_port  # port number, eg, 8097
    VIZ = visdom.Visdom(port=args.viz_port)
    viz_init()
    viz_ll_iter = args.viz_ll_iter
    viz_la_iter = args.viz_la_iter


def cuda_tensors(obj):
    for attr in dir(obj):
        value = getattr(obj, attr)
        if isinstance(value, torch.Tensor):
            setattr(obj, attr, value.cuda())

encA = EncoderA(num_pixels=4096, num_hidden=512, zPrivate_dim=args.n_private, zShared_dim=args.n_shared)
decA = DecoderA(num_pixels=4096, num_hidden=512, zPrivate_dim=args.n_private, zShared_dim=args.n_shared)
encB = EncoderB(num_pixels=4096, num_hidden=512, zPrivate_dim=args.n_private, zShared_dim=args.n_shared)
decB = DecoderB(num_pixels=4096, num_hidden=512, zPrivate_dim=args.n_private, zShared_dim=args.n_shared)
if CUDA:
    encA.cuda()
    decA.cuda()
    encB.cuda()
    decB.cuda()
    cuda_tensors(encA)
    cuda_tensors(decA)
    cuda_tensors(encB)
    cuda_tensors(decB)


optimizer =  torch.optim.Adam(list(encB.parameters())+list(decB.parameters())+list(encA.parameters())+list(decA.parameters()),
Exemple #10
0
train_data = torch.utils.data.DataLoader(datasets(partition='train', data_dir='../../data/celeba2',
                                                  image_transform=preprocess_data), batch_size=args.batch_size,
                                         shuffle=True)

test_data = torch.utils.data.DataLoader(datasets(partition='test', data_dir='../../data/celeba2',
                                                 image_transform=preprocess_data), batch_size=args.batch_size,
                                        shuffle=False)

def cuda_tensors(obj):
    for attr in dir(obj):
        value = getattr(obj, attr)
        if isinstance(value, torch.Tensor):
            setattr(obj, attr, value.cuda())


encA = EncoderA(args.wseed, n_attr=N_ATTR)
if CUDA:
    encA.cuda()
    cuda_tensors(encA)

optimizer = torch.optim.Adam(
    list(encA.parameters()),
    lr=args.lr)


def train(data, encA, optimizer):
    encA.train()
    N = 0
    total_loss = 0
    for b, (images, attributes) in enumerate(data):
        N += 1
Exemple #11
0
                                        batch_size=args.batch_size,
                                        shuffle=True)

print('>>> data loaded')
print('train: ', len(train_data.dataset))
print('test: ', len(test_data.dataset))


def cuda_tensors(obj):
    for attr in dir(obj):
        value = getattr(obj, attr)
        if isinstance(value, torch.Tensor):
            setattr(obj, attr, value.cuda())


encA = EncoderA(args.wseed, zShared_dim=args.n_shared)
if CUDA:
    encA.cuda()
    cuda_tensors(encA)

optimizer = torch.optim.Adam(list(encA.parameters()), lr=args.lr)


def train(data, encA, epoch, optimizer, gt_std):
    encA.train()

    for param in encA.parameters():
        param.requires_grad = False
    encA.fc32.weight.requires_grad = True

    N = 0
Exemple #12
0
encA = EncoderImgF(args.wseed,
                   zPrivate_dim=args.n_privateA,
                   zShared_dim=args.n_shared,
                   num_hidden=args.num_hidden)
decA = DecoderImgF(args.wseed,
                   zPrivate_dim=args.n_privateA,
                   zShared_dim=args.n_shared,
                   num_hidden=args.num_hidden)
encB = EncoderAttr(args.wseed,
                   zShared_dim=args.n_shared,
                   num_hidden=args.num_hidden)
decB = DecoderAttr(args.wseed,
                   zShared_dim=args.n_shared,
                   num_hidden=args.num_hidden)

ae_encA = EncoderA(0)
ae_decA = DecoderA2(0)

if CUDA:
    encA.cuda()
    decA.cuda()
    encB.cuda()
    decB.cuda()
    ae_encA.cuda()
    ae_decA.cuda()
    cuda_tensors(encA)
    cuda_tensors(decA)
    cuda_tensors(encB)
    cuda_tensors(decB)
    cuda_tensors(ae_encA)
    cuda_tensors(ae_decA)