Ejemplo n.º 1
0
def main(args):
    model = args.model.model
    model.load_state_dict(torch.load(args.weight))
    model.eval()

    if torch.cuda.is_available():
        model.cuda()

    vaemod = model.vae

    if args.random is not None:
        X = torch.tensor(
            np.random.normal(loc=0.0,
                             scale=args.random,
                             size=[args.batch_size, 320]).astype(np.float32))
        X = to_variable(X, torch.cuda.is_available())
        output = vaemod.decode(X)

    else:
        test_dataloader = DataLoader(args.dataset,
                                     batch_size=args.batch_size,
                                     shuffle=False)

        for batch_idx, data in enumerate(test_dataloader):
            X, Yactual = data
            X = to_variable(X, torch.cuda.is_available())
            output = vaemod(X)
            break

    if isinstance(output, tuple):
        output = output[0]
    save_image(output, args.save)
Ejemplo n.º 2
0
def runbatch(args, model, loss, batch):
    X, Yactual = batch
    X = to_variable(X, cuda=torch.cuda.is_available())
    Yactual = to_variable(Yactual, cuda=torch.cuda.is_available())

    Ypred = model(X)
    return loss(Ypred, Yactual, X), Ypred
Ejemplo n.º 3
0
def main(args):
    model = args.model.model
    model.load_state_dict(torch.load(args.weight))
    model.eval()
    if torch.cuda.is_available():
        model.cuda()

    dyn_model = model.dyn
    vae_model = model.vae

    # Load sequence:
    seq, param = args.data
    original_seq = seq[args.select,:,:]
    trajectory = [original_seq[args.start_step:(args.start_step+1),:]]
    renders = []

    get_image = lambda r: torch.squeeze(vae_model.decode(r).detach().cpu(), dim=0)

    # Start with the first index:
    X = to_variable(torch.tensor(trajectory[0]), cuda=torch.cuda.is_available())
    X.requires_grad_()
    if args.act == "render":
        renders.append(get_image(X))

    for i in range(args.steps):
        X = to_variable((X + dyn_model(X)).data, cuda=torch.cuda.is_available())
        X.requires_grad_()

        trajectory.append(X.detach().cpu().numpy())
        if args.act == "render":
            renders.append(get_image(X))

    trajectory = np.squeeze(np.stack(trajectory), axis=1)

    if args.act == "plot":
        plot_data(args, trajectory, original_seq=original_seq)
    elif args.act == "render":
        if args.save:
            save_image(renders, args.save)
        if args.save_frames:
            for i, im in enumerate(renders):
                save_image(im, args.save_frames.format(i))
Ejemplo n.º 4
0
def main(args):
    model = args.model.model
    model.load_state_dict(torch.load(args.weight))

    dataset = args.dataset
    test_dataloader = DataLoader(dataset,
                                 batch_size=args.batch_size,
                                 shuffle=False)

    # TODO: Flag to disable CUDA
    if torch.cuda.is_available():
        model.cuda()

    model.eval()
    trajectory = []
    for batch_idx, data in enumerate(test_dataloader):
        img, _ = data
        img = to_variable(img, cuda=torch.cuda.torch.cuda.is_available())

        # z = model(img)
        # Break the abstraction here:
        # Y_a, mu_a, logvar_a, z_a, Y_b, z_b = model(img)
        (Y_a, Code_a, Quantized_a, Perplexity_a, Latent_a, Y_b, Quantized_b,
         Latent_b) = model(img)
        if args.mu:
            z = mu_a
        else:
            # z = z_a
            z = Latent_a

        trajectory.append(z.cpu().data.numpy())

    trajectory = np.concatenate(trajectory)
    h5f = h5py.File(args.output, 'w')
    h5f.create_dataset('seq', data=[trajectory])
    h5f.create_dataset('param', data=[])
    h5f.close()

    mn = np.mean(trajectory, axis=0)
    linf = np.max(np.abs(trajectory - mn))

    print(f"Data: {trajectory.shape}; Mean: {mn.shape}; Linf: {linf}")

    np.savetxt(Path(args.output).with_suffix(".mean"), mn)
    Path(args.output).with_suffix(".linf").write_text(str(linf))
else:
    z_in_ball = torch.load(z_file_name)

if opt.gpu:
    learnable_z = z_in_ball.cuda()
else:
    learnable_z = z_in_ball

total_step = len(train_loader)

for epoch in range(opt.start_epoch, n_epochs):
    for i, x in enumerate(train_loader):
        if i == 4880:
            break

        x = to_variable(x)
        z = to_variable(learnable_z[i], requires_grad=True)
        x_hat = generator.forward(z)

        l1_loss = opt.l1_weight * torch.mean(torch.abs(x - x_hat))
        lap_loss = laplacian_loss(x, x_hat, n_levels=-1, cuda=opt.gpu)
        loss = l1_loss + lap_loss

        g_optimizer.zero_grad()
        loss.backward()
        g_optimizer.step()

        if opt.gpu:
            grad = z.grad.data.cuda()
        else:
            grad = z.grad.data
Ejemplo n.º 6
0
 def reparameterize(self, mu, logvar):
     std = torch.exp(0.5 * logvar)
     eps = to_variable(torch.randn(std.size()))
     return mu + eps * std
Ejemplo n.º 7
0
def main(args):
    hidden_dim = 116
    model = LSTMModel(2 * N, hidden_dim)

    model.load_state_dict(torch.load(args.weight))
    model.eval()
    if torch.cuda.is_available():
        model.cuda()

    physics = args.data._pendulum_gen
    n = args.data._n
    redim = args.data._redim
    h = args.timestep

    logger.info(f"Loaded physics simulator for {n}-link pendulum")

    cache_path = Path("pendulum-cache") / f"p-physics-{n}.npy"

    # Energy functions
    energy = pendulum_energy.pendulum_energy(n)

    if cache_path.exists():
        X_phy = np.load(cache_path).astype(np.float32)
        logger.info(f"Loaded trajectories from {cache_path}")
    else:
        raise Exception(f"No trajectories for {cache_path}")

    X_nn = to_variable(torch.tensor(X_phy[0, :, :]),
                       cuda=torch.cuda.is_available())
    errors = np.zeros((args.steps, ))
    X_nn.requires_grad = True
    X_nn = X_nn.unsqueeze(0)

    hiddens = None
    for i in range(1, args.steps):
        k1, new_hiddens = model(X_nn, hiddens)
        k1 = h * k1.detach()
        k2, _ = model(X_nn + k1 / 2, hiddens)
        k2 = h * k2.detach()
        k3, _ = model(X_nn + k2 / 2, hiddens)
        k3 = h * k3.detach()
        k4, _ = model(X_nn + k3, hiddens)
        k4 = h * k4.detach()
        X_nn = X_nn + 1 / 6 * (k1 + 2 * k2 + 2 * k3 + k4)

        # Detach
        X_nn = X_nn.detach()
        hiddens = tuple(d.detach() for d in new_hiddens)

        logger.info(f"Timestep {i}")

        y = X_nn.cpu().numpy()
        vel_error = np.sum((X_phy[i, :, n:] - y[0, :, n:])**2)
        ang_error = (X_phy[i, :, :n] - y[0, :, :n])
        while np.any(ang_error >= np.pi):
            ang_error[ang_error >= np.pi] -= 2 * np.pi
        while np.any(ang_error < -np.pi):
            ang_error[ang_error < -np.pi] += 2 * np.pi

        ang_error = np.sum(ang_error**2)
        errors[i] = (vel_error + ang_error)

    for i in range(args.steps):
        print(f"{i}\t{np.sum(errors[0:i])}\t{errors[i]}")
Ejemplo n.º 8
0
def main(args):
    model = args.model.model
    model.load_state_dict(torch.load(args.weight))
    model.eval()
    if torch.cuda.is_available():
        model.cuda()

    physics = args.data._pendulum_gen
    n = args.data._n
    redim = args.data._redim
    h = args.timestep

    logger.info(f"Loaded physics simulator for {n}-link pendulum")

    cache_path = Path("pendulum-cache") / f"p-physics-{n}.npy"

    # Energy functions
    energy = pendulum_energy.pendulum_energy(n)

    if not cache_path.exists():
        logger.info(f"Generating trajectories for {cache_path}")
        # Initialize args.number initial positions:
        X_init = np.zeros((args.number, 2 * n)).astype(np.float32)
        X_init[:,:] = (np.random.rand(args.number, 2*n).astype(np.float32) - 0.5) * np.pi/4 # Pick values in range [-pi/8, pi/8] radians, radians/sec

        X_phy = np.zeros((args.steps, *X_init.shape), dtype=np.float32)
        X_phy[0,...] = X_init
        for i in range(1, args.steps):
            logger.info(f"Timestep {i}")
            k1 = h * physics(X_phy[i-1,...])
            k2 = h * physics(X_phy[i-1,...] + k1/2)
            k3 = h * physics(X_phy[i-1,...] + k2/2)
            k4 = h * physics(X_phy[i-1,...] + k3)
            X_phy[i,...] = X_phy[i-1,...] + 1/6*(k1 + 2*k2 + 2*k3 + k4)
            assert not np.any(np.isnan(X_phy[i,...]))

        np.save(cache_path, X_phy)
        logger.info(f"Done generating trajectories for {cache_path}")

    else:
        X_phy = np.load(cache_path).astype(np.float32)
        logger.info(f"Loaded trajectories from {cache_path}")

    X_nn = to_variable(torch.tensor(X_phy[0,:,:]), cuda=torch.cuda.is_available())
    errors = np.zeros((args.steps,))
    for i in range(1, args.steps):
        X_nn.requires_grad = True
        k1 = h * model(X_nn)
        k1 = k1.detach()
        k2 = h * model(X_nn + k1/2)
        k2 = k2.detach()
        k3 = h * model(X_nn + k2/2)
        k3 = k3.detach()
        k4 = h * model(X_nn + k3)
        k4 = k4.detach()
        X_nn = X_nn + 1/6*(k1 + 2*k2 + 2*k3 + k4)
        X_nn = X_nn.detach()

        logger.info(f"Timestep {i}")
        y = X_nn.cpu().numpy()
        # TODO: Update error calculation
        vel_error = np.sum((X_phy[i,:,n:] - y[:,n:])**2)
        ang_error = (X_phy[i,:,:n] - y[:,:n])
        while np.any(ang_error >= np.pi):
            ang_error[ang_error >= np.pi] -= 2*np.pi
        while np.any(ang_error < -np.pi):
            ang_error[ang_error < -np.pi] += 2*np.pi

        ang_error = np.sum(ang_error**2)
        errors[i] = (vel_error + ang_error)

    for i in range(args.steps):
        print(f"{i}\t{np.sum(errors[0:i])}\t{errors[i]}")
Ejemplo n.º 9
0
 def init_hidden(self):
     # Assigning initial hidden and cell state
     # 2, since single layered LSTM
     return (to_variable(torch.zeros(2, self.batch_size, self.hidden_dim)),
             to_variable(torch.zeros(2, self.batch_size, self.hidden_dim)))