Beispiel #1
0
encA = EncoderA(args.wseed, zShared_dim=args.n_shared)
decA = DecoderA(args.wseed, zShared_dim=args.n_shared)
encB = EncoderB(args.wseed, zShared_dim=args.n_shared)
decB = DecoderB(args.wseed, zShared_dim=args.n_shared)
if CUDA:
    encA.cuda()
    decA.cuda()
    encB.cuda()
    decB.cuda()
    cuda_tensors(encA)
    cuda_tensors(decA)
    cuda_tensors(encB)
    cuda_tensors(decB)

optimizer = torch.optim.Adam(
    list(encB.parameters()) + list(decB.parameters()) + list(encA.parameters()) + list(decA.parameters()),
    lr=args.lr)


#
# def elbo(q, pA, pB, lamb=1.0, annealing_factor=1.0):
#     muA_own = q['sharedA'].dist.loc.squeeze(0)
#     stdA_own = q['sharedA'].dist.scale.squeeze(0)
#     muB_own = q['sharedB'].dist.loc.squeeze(0)
#     stdB_own = q['sharedB'].dist.scale.squeeze(0)
#
#     # from each of modality
#     reconst_loss_A = pA['images_own'].loss.mean()
#     kl_A = 0.5 * torch.sum(1 + torch.log(stdA_own ** 2 + EPS) - muA_own ** 2 - stdA_own ** 2,
#                            dim=1).mean()
#
Beispiel #2
0
        if isinstance(value, torch.Tensor):
            setattr(obj, attr, value.cuda())


encA = EncoderA(args.wseed,
                zPrivate_dim=args.n_privateA,
                zSharedAttr_dim=ATTR_DIM)

if CUDA:
    encA.cuda()
    cuda_tensors(encA)
    if len(args.gpu) > 2:
        print('multi: ' + args.gpu)
        encA = nn.DataParallel(encA)

optimizer = torch.optim.Adam(list(encA.parameters()), lr=args.lr)


def train(data, encA, optimizer):
    epoch_elbo = epoch_correct = 0.0
    epoch_pred = np.zeros(sum(ATTR_DIM))
    encA.train()

    N = 0
    for b, (images, attributes, label) in enumerate(data):
        if images.size()[0] == args.batch_size:
            N += 1
            attributes = attributes.float()
            optimizer.zero_grad()
            if CUDA:
                images = images.cuda()
Beispiel #3
0
    encA.cuda()
    decA.cuda()
    cuda_tensors(encA)
    cuda_tensors(decA)

    for i in range(N_ATTR):
        encB[i].cuda()
        decB[i].cuda()
        cuda_tensors(encB[i])
        cuda_tensors(decB[i])

attr_params = []
for i in range(N_ATTR):
    attr_params.extend(encB[i].parameters())
    attr_params.extend(encB[i].parameters())
optimizer = torch.optim.Adam(list(encA.parameters()) +
                             list(decA.parameters()) + attr_params,
                             lr=args.lr)


def elbo(q,
         pA,
         pB,
         lamb=1.0,
         beta1=(1.0, 1.0, 1.0),
         beta2=(1.0, 1.0, 1.0),
         bias=1.0):
    # from each of modality
    reconst_loss_A, kl_A = probtorch.objectives.mws_tcvae.elbo(
        q,
        pA,
Beispiel #4
0

def cuda_tensors(obj):
    for attr in dir(obj):
        value = getattr(obj, attr)
        if isinstance(value, torch.Tensor):
            setattr(obj, attr, value.cuda())


encA = EncoderA(args.wseed)
if CUDA:
    encA.cuda()
    cuda_tensors(encA)

optimizer = torch.optim.Adam(
    list(encA.parameters()),
    lr=args.lr)


def train(data, encA, optimizer):
    encA.train()
    N = 0
    total_loss = 0
    for b, (images, labels) in enumerate(data):
        if images.size()[0] == args.batch_size:
            N += 1
            optimizer.zero_grad()

            labels_onehot = torch.zeros(args.batch_size, 10)
            labels_onehot.scatter_(1, labels.unsqueeze(1), 1)
            labels_onehot = torch.clamp(labels_onehot, EPS, 1 - EPS)