Example #1
0
        # get latent vector
        with torch.no_grad():
            # mu, logvar = sModel.encode(torch.from_numpy(skels[:-1]).unsqueeze(0).float())
            mu, logvar = sModel.encode(
                torch.from_numpy(skels[:]).unsqueeze(0).float())
            z = sModel.reparameterize(mu, logvar)
            z = z.cpu().numpy()

        with torch.no_grad():
            value, action, action_log_prob, states, _, _ = actor_critic.act(
                current_obs, current_state, masks, deterministic=True)

        # get trajectory of NAO by decoding
        with torch.no_grad():
            x_hat_rj = rModel.decode(action).reshape(-1, 14)

        # send motor values to Real NAO
        # torch -> numpy -> list -> bytes -> sendall
        filtered_action = alpha * x_hat_rj + (1 - alpha) * prev_actions  # TODO
        send_data = filtered_action.squeeze(0).cpu().numpy().tolist()
        nao_s.write_socket({'header': 'SetMotor', 'data': send_data})
        print('send: nao_angle (deg): ', np.rad2deg(send_data))

        # time.sleep(1/20)    # 20 Hz

        next_obs = env.robot._obfilt(z)
        update_current_obs(next_obs, current_obs)

        prev_actions = filtered_action
Example #2
0
                    rollouts.observations[step], rollouts.states[step],
                    rollouts.masks[step])

            # get next observation
            data, last_scene, first_scene = sdm.get_random_scene_frame()

            # get latent vector
            with torch.no_grad():
                mu, logvar = sModel.encode(
                    torch.from_numpy(data[:-1]).unsqueeze(0).float())
                zs = sModel.reparameterize(mu, logvar)

            # cpu_actions = action.data.squeeze(1).cpu().numpy()
            # get robot motor value by decoding
            with torch.no_grad():
                x_hat = rModel.decode(action.cpu()).reshape(-1, 14)

            if cyclic_policy:
                with torch.no_grad():
                    rSkel = sModel.decode(zs).reshape(75, -1).squeeze()
                    rSkel = rSkel.numpy()
            else:
                rSkel = data

            next_obs, next_state, reward, done, info, true_rew = env.step(
                x_hat,
                zs.cpu().numpy(), rSkel)

            # To separate the Q-value of the current scene from others
            # if last_scene:
            #     done[0] = 1     # done is np array, [0]
Example #3
0
    oirgin_imgs.append(test_faceDataset[i][0])
# for img in test_faceDataset[:10]:
for img in oirgin_imgs:
    x = Variable(img.unsqueeze_(0).cuda())
    out_img = model(x)[0]
    recon_imgs.append(out_img)
oirgin_imgs = torch.cat(oirgin_imgs)
recon_imgs = torch.cat(recon_imgs)
compare = torch.cat((oirgin_imgs, recon_imgs.cpu().data))
save_image(compare.cpu(), output_path+'/fig1_3.jpg', nrow=10, normalize=True)

# fig1_4
imgs = []
for i in range(32):
    z = Variable(torch.randn(1024).cuda())
    out_img = model.decode(z)
    imgs.append(out_img)
imgs = torch.cat(imgs)
save_image(imgs.cpu().data, output_path+'/fig1_4.jpg', nrow=8, normalize=True)

# fig1_5
data_for_tsne = torch.cat(data_for_tsne)
label_for_tsne = torch.cat(label_for_tsne).numpy()
mu, logvar = model.encode(Variable(data_for_tsne.cuda()))
latent_code = mu.cpu().data.numpy()
latent_emmbedded = TSNE(random_state=2).fit_transform(latent_code)
plt.figure()
# for i in [0,1]:
#     if i:
#         gender = 'Male'
#     else:
Example #4
0
 if o.sum() != 0 and o_next.sum() != 0:
     with torch.no_grad():
         if not is_image:
             s = [np.array([1])]
             s_next = [np.array([1])]
         else:
             s = model.encode(
                 Variable(
                     torch.cuda.FloatTensor(
                         np.transpose(o, (2, 0, 1))[None])))[1]
             s_next = model.encode(
                 Variable(
                     torch.cuda.FloatTensor(
                         np.transpose(o_next, (2, 0, 1))[None])))[1]
             if i == 0 and t == 0:
                 recon = model.decode(s)
                 import ipdb
                 ipdb.set_trace()
                 save_image(
                     torch.cat([
                         torch.cuda.FloatTensor(
                             np.transpose(o,
                                          (2, 0, 1))[None]).cpu() /
                         255,
                         recon.data.cpu()
                     ],
                               dim=0),
                     os.path.join(save_path,
                                  'verified_img_preprocessing.png'))
             s = s.cpu().numpy()
             s_next = s_next.cpu().numpy()