Ejemplo n.º 1
0
                                        batch_size=args.batch_size,
                                        shuffle=False)

BIAS_TRAIN = (train_data.dataset.__len__() - 1) / (args.batch_size - 1)
BIAS_TEST = (test_data.dataset.__len__() - 1) / (args.batch_size - 1)


def cuda_tensors(obj):
    for attr in dir(obj):
        value = getattr(obj, attr)
        if isinstance(value, torch.Tensor):
            setattr(obj, attr, value.cuda())


encA = EncoderA(args.wseed, zPrivate_dim=args.n_private, num_pixels=NUM_PIXELS)
decA = DecoderA(args.wseed, zPrivate_dim=args.n_private, num_pixels=NUM_PIXELS)

encB = EncoderB(args.wseed, zPrivate_dim=args.n_private, num_pixels=NUM_PIXELS)
decB = DecoderB(args.wseed, zPrivate_dim=args.n_private, num_pixels=NUM_PIXELS)
if CUDA:
    encA.cuda()
    decA.cuda()
    encB.cuda()
    decB.cuda()
    cuda_tensors(encA)
    cuda_tensors(decA)
    cuda_tensors(encB)
    cuda_tensors(decB)

optimizer = torch.optim.Adam(list(encB.parameters()) +
                             list(decB.parameters()) +
Ejemplo n.º 2
0
                   transform=transforms.ToTensor()),
    batch_size=args.batch_size, shuffle=True)

print('>>> data loaded')
print('train: ', len(train_data.dataset))
print('test: ', len(test_data.dataset))

def cuda_tensors(obj):
    for attr in dir(obj):
        value = getattr(obj, attr)
        if isinstance(value, torch.Tensor):
            setattr(obj, attr, value.cuda())


encA = EncoderA(args.wseed, zShared_dim=args.n_shared)
decA = DecoderA(args.wseed, zShared_dim=args.n_shared)
encB = EncoderB(args.wseed, zShared_dim=args.n_shared)
decB = DecoderB(args.wseed, zShared_dim=args.n_shared)
if CUDA:
    encA.cuda()
    decA.cuda()
    encB.cuda()
    decB.cuda()
    cuda_tensors(encA)
    cuda_tensors(decA)
    cuda_tensors(encB)
    cuda_tensors(decB)

optimizer = torch.optim.Adam(
    list(encB.parameters()) + list(decB.parameters()) + list(encA.parameters()) + list(decA.parameters()),
    lr=args.lr)
Ejemplo n.º 3
0
                                        shuffle=True,
                                        num_workers=len(GPU))

BIAS_TRAIN = (train_data.dataset.__len__() - 1) / (args.batch_size - 1)
BIAS_TEST = (test_data.dataset.__len__() - 1) / (args.batch_size - 1)


def cuda_tensors(obj):
    for attr in dir(obj):
        value = getattr(obj, attr)
        if isinstance(value, torch.Tensor):
            setattr(obj, attr, value.cuda())


encA = EncoderA(args.wseed, zPrivate_dim=args.n_privateA, zSharedAttr_dim=ATTR_DIM)
decA = DecoderA(args.wseed, zPrivate_dim=args.n_privateA, zSharedAttr_dim=ATTR_DIM)
encB = EncoderB(args.wseed, zSharedAttr_dim=ATTR_DIM)
decB = DecoderB(args.wseed, zSharedAttr_dim=ATTR_DIM)

if CUDA:
    encA.cuda()
    decA.cuda()
    encB.cuda()
    decB.cuda()
    cuda_tensors(encA)
    cuda_tensors(decA)
    cuda_tensors(encB)
    cuda_tensors(decB)
    if len(args.gpu) > 2:
        print('multi: ' + args.gpu)
        encA = nn.DataParallel(encA)
Ejemplo n.º 4
0
BIAS_TEST = (test_data.dataset.__len__() - 1) / (args.batch_size - 1)


def cuda_tensors(obj):
    for attr in dir(obj):
        value = getattr(obj, attr)
        if isinstance(value, torch.Tensor):
            setattr(obj, attr, value.cuda())


encA = EncoderA(args.wseed,
                zPrivate_dim=args.n_privateA,
                zShared_dim=args.n_shared,
                num_hidden=args.num_hidden)
decA = DecoderA(args.wseed,
                zPrivate_dim=args.n_privateA,
                zShared_dim=args.n_shared,
                num_hidden=args.num_hidden)
encB = EncoderB(args.wseed,
                zShared_dim=args.n_shared,
                num_hidden=args.num_hidden)
decB = DecoderB(args.wseed,
                zShared_dim=args.n_shared,
                num_hidden=args.num_hidden)

if CUDA:
    encA.cuda()
    decA.cuda()
    encB.cuda()
    decB.cuda()
    cuda_tensors(encA)
    cuda_tensors(decA)
Ejemplo n.º 5
0
                                        shuffle=True,
                                        num_workers=len(GPU))

BIAS_TRAIN = (train_data.dataset.__len__() - 1) / (args.batch_size - 1)
BIAS_TEST = (test_data.dataset.__len__() - 1) / (args.batch_size - 1)


def cuda_tensors(obj):
    for attr in dir(obj):
        value = getattr(obj, attr)
        if isinstance(value, torch.Tensor):
            setattr(obj, attr, value.cuda())


encA = EncoderA(args.wseed)
decA = DecoderA(args.wseed)

if CUDA:
    encA.cuda()
    decA.cuda()
    cuda_tensors(encA)
    cuda_tensors(decA)
    if len(args.gpu) > 2:
        print('multi: ' + args.gpu)
        encA = nn.DataParallel(encA)
        decA = nn.DataParallel(decA)

optimizer = torch.optim.Adam(list(encA.parameters()) + list(decA.parameters()),
                             lr=args.lr)

Ejemplo n.º 6
0
BIAS_TEST = (test_data.dataset.__len__() - 1) / (args.batch_size - 1)


def cuda_tensors(obj):
    for attr in dir(obj):
        value = getattr(obj, attr)
        if isinstance(value, torch.Tensor):
            setattr(obj, attr, value.cuda())


encA = EncoderA(args.wseed,
                zPrivate_dim=args.n_privateA,
                zSharedAttr_dim=ATTR_DIM,
                zSharedLabel_dim=N_CLASSES)
decA = DecoderA(args.wseed,
                zPrivate_dim=args.n_privateA,
                zSharedAttr_dim=ATTR_DIM,
                zSharedLabel_dim=N_CLASSES)
encB = EncoderB(args.wseed,
                zSharedAttr_dim=ATTR_DIM,
                zSharedLabel_dim=N_CLASSES)
decB = DecoderB(args.wseed,
                zSharedAttr_dim=ATTR_DIM,
                zSharedLabel_dim=N_CLASSES)
encC = EncoderC(args.wseed, zSharedLabel_dim=N_CLASSES)
decC = DecoderC(args.wseed, zSharedLabel_dim=N_CLASSES)

if CUDA:
    encA.cuda()
    decA.cuda()
    encB.cuda()
    decB.cuda()
Ejemplo n.º 7
0
    viz_port = args.viz_port  # port number, eg, 8097
    VIZ = visdom.Visdom(port=args.viz_port)
    viz_init()
    viz_ll_iter = args.viz_ll_iter
    viz_la_iter = args.viz_la_iter


def cuda_tensors(obj):
    for attr in dir(obj):
        value = getattr(obj, attr)
        if isinstance(value, torch.Tensor):
            setattr(obj, attr, value.cuda())

encA = EncoderA(num_pixels=4096, num_hidden=512, zPrivate_dim=args.n_private, zShared_dim=args.n_shared)
decA = DecoderA(num_pixels=4096, num_hidden=512, zPrivate_dim=args.n_private, zShared_dim=args.n_shared)
encB = EncoderB(num_pixels=4096, num_hidden=512, zPrivate_dim=args.n_private, zShared_dim=args.n_shared)
decB = DecoderB(num_pixels=4096, num_hidden=512, zPrivate_dim=args.n_private, zShared_dim=args.n_shared)
if CUDA:
    encA.cuda()
    decA.cuda()
    encB.cuda()
    decB.cuda()
    cuda_tensors(encA)
    cuda_tensors(decA)
    cuda_tensors(encB)
    cuda_tensors(decB)


optimizer =  torch.optim.Adam(list(encB.parameters())+list(decB.parameters())+list(encA.parameters())+list(decA.parameters()),
                              lr=args.lr)