Exemple #1
0
            dl = dl.reshape((latent_size, -1)).transpose()
        else:
            dl = dl.reshape((-1, latent_size))
    else:
        ql = latents
        dl = latents
        if args.transpose:
            dl = dl.reshape((latent_size, -1)).transpose()
        else:
            dl = dl.reshape((-1, latent_size))

    with torch.no_grad():

        if args.gpu:
            if args.split == 0:
                predict = test.decode(
                    torch.from_numpy(dl).to(device)).cpu().detach().numpy()
            else:
                split = args.split
                len_dl = dl.shape[0]
                start = 0
                predict = None
                while (start < len_dl):
                    end = min(len_dl, start + split)
                    dl_split = dl[start:end]
                    predict_s = test.decode(
                        torch.from_numpy(dl_split).to(
                            device)).cpu().detach().numpy()
                    if start == 0:
                        predict = predict_s
                    else:
                        predict = np.concatenate((predict, predict_s))
Exemple #2
0
if args.mode != "d":
    with torch.no_grad():
        #try:
        outputs = test(torch.from_numpy(picts).to(device))
    zs = outputs[2].cpu().detach().numpy()
    predict = outputs[0].cpu().detach().numpy()

else:
    zs = np.fromfile(args.latents, dtype=np.float32)
    if args.transpose:
        zs = zs.reshape((args.lsize, -1)).transpose()
    else:
        zs = zs.reshape((-1, args.lsize))
    with torch.no_grad():
        if args.gpu:
            predict = test.decode(
                torch.from_numpy(zs).to(device)).cpu().detach().numpy()
        else:
            predict = test.decode(torch.from_numpy(zs)).detach().numpy()

#predict=outputs[0].numpy()
print(zs.shape)
#print(predict.size)
qs = []
us = []
recon = np.zeros((height, width), dtype=np.float32)
eb = args.error * rng
if args.normalize:
    picts = (picts + 1) / 2
    picts = picts * (global_max - global_min) + global_min
    predict = (predict + 1) / 2
    predict = predict * (global_max - global_min) + global_min