Example #1
0
def save_center(env, size_to_use, file_corners, img_size, enc_type):
    points = generate_points(range_x=range_x,
                             range_y=range_y,
                             z=z_table_height,
                             total=3,
                             object_x_y_size=[size_to_use, size_to_use])

    # sample images
    data_set = np.empty([1, img_size, img_size, 3])
    # move other objects to plaecs they do not disturb
    if enc_type == 'goal' or (args.enc_type == 'mixed'
                              and args.mix_h == 'goal'):
        env.env.env._set_position(names_list=['obstacle'],
                                  position=[2., 2., 0.4])
    elif enc_type == 'obstacle' or (args.enc_type == 'mixed'
                                    and args.mix_h == 'obstacle'):
        env.env.env._move_object(position=[2., 2., 0.4])
    else:
        raise Exception('Not supported enc_type')

    if enc_type == 'goal' or (args.enc_type == 'mixed'
                              and args.mix_h == 'goal'):
        env.env.env._move_object(position=points[4])
        data_set[0] = take_goal_image(env,
                                      img_size,
                                      make_table_invisible=False)
    elif enc_type == 'obstacle' or (args.enc_type == 'mixed'
                                    and args.mix_h == 'obstacle'):
        env.env.env._set_position(names_list=['obstacle'], position=points[4])
        data_set[0] = take_obstacle_image(env, img_size)
    else:
        raise Exception('Not supported enc_type')
    np.save(file_corners, data_set)
Example #2
0
    def reset_ep(self):
        if hasattr(self.args, 'vae_dist_help') and self.args.vae_dist_help:
            obs = self.env.env._get_obs()
            if self.args.vae_type == 'monet' or self.args.vae_type == 'space' or self.args.vae_type == 'bbox' or self.args.vae_type == 'faster_rcnn':
                self.goals_img_env.env._move_object(
                    position=obs['desired_goal'].copy())
                desired_goal_image = take_image_objects(
                    self,
                    self.args.img_size,
                    direct_env=self.goals_img_env.env)
                achieved_goal_image = take_image_objects(
                    self, self.args.img_size)
                if self.args.vae_type == 'space' or self.args.vae_type == 'bbox' or self.args.vae_type == 'faster_rcnn':
                    lg, lg_s, lo, lo_s = latents_from_images(
                        np.array([desired_goal_image, achieved_goal_image]),
                        self.args)
                    self.desired_goal_latent = lg[0].copy()
                    self.desired_goal_size_latent = lg_s[0].copy()
                    self.achieved_goal_size_latent = lg_s[1].copy()
                    self.achieved_goal_latent = lg[1].copy()
                    self.obstacle_latent = lo[1].copy()
                    self.obstacle_size_latent = lo_s[1].copy()
                else:
                    lg, lo, lo_s = latents_from_images(
                        np.array([desired_goal_image, achieved_goal_image]),
                        self.args)
                    self.desired_goal_latent = lg[0].copy()
                    self.achieved_goal_latent = lg[1].copy()
                    self.obstacle_latent = lo[1].copy()
                    self.obstacle_size_latent = lo_s[1].copy()

            else:
                self.goals_img_env.env._move_object(
                    position=obs['desired_goal'].copy())
                desired_goal_image = take_goal_image(
                    self,
                    self.args.img_size,
                    direct_env=self.goals_img_env.env)
                achieved_goal_image = take_goal_image(self, self.args.img_size)
                latents = goal_latent_from_images(
                    np.array([desired_goal_image, achieved_goal_image]),
                    self.args)
                self.desired_goal_latent = latents[0].copy()
                self.achieved_goal_latent = latents[1].copy()
                #self.achieved_goal_image = achieved_goal_image.copy()

                obstacle_image = take_obstacle_image(self, self.args.img_size)
                latents_obstacle, latents_o_size = obstacle_latent_from_images(
                    np.array([obstacle_image]), self.args)
                self.obstacle_latent = latents_obstacle[0].copy()
                self.obstacle_size_latent = latents_o_size[0].copy()

        self.rewards = 0.0
Example #3
0
def save_corners(env, size_to_use, file_corners, img_size, enc_type):
    points = generate_points(range_x=range_x,
                             range_y=range_y,
                             z=z_table_height,
                             total=2,
                             object_x_y_size=[size_to_use, size_to_use])

    # sample images
    data_set = np.empty([len(points), img_size, img_size, 3])
    # move other objects to plaecs they do not disturb
    if enc_type == 'goal' or (args.enc_type == 'mixed'
                              and args.mix_h == 'goal'):
        env.env.env._set_position(names_list=['obstacle'],
                                  position=[2., 2., 0.4])
    elif enc_type == 'obstacle' or (args.enc_type == 'mixed'
                                    and args.mix_h == 'obstacle'):
        env.env.env._move_object(position=[2., 2., 0.4])
    else:
        raise Exception('Not supported enc_type')
    for i, p in enumerate(points):
        if enc_type == 'goal' or (args.enc_type == 'mixed'
                                  and args.mix_h == 'goal'):
            env.env.env._move_object(position=p)
            data_set[i] = take_goal_image(env, img_size)
        elif enc_type == 'obstacle' or (args.enc_type == 'mixed'
                                        and args.mix_h == 'obstacle'):
            env.env.env._set_position(names_list=['obstacle'], position=p)
            data_set[i] = take_obstacle_image(env, img_size)
        else:
            raise Exception('Not supported enc_type')
    np.save(file_corners, data_set)
    all_array = None
    t = 0
    for r in range(len(points)):
        rcim = data_set[t].copy()
        t += 1
        if all_array is None:
            all_array = rcim
        else:
            all_array = np.concatenate([all_array.copy(), rcim], axis=1)
    all_ims = Image.fromarray(all_array.astype(np.uint8))
    all_ims.show()
    all_ims.close()
Example #4
0
    def step(self, action):
        # imaginary infinity horizon (without done signal)
        obs, reward, done, info = self.env.step(action)
        if hasattr(self.args, 'vae_dist_help') and self.args.vae_dist_help:
            if self.args.vae_type == 'monet' or self.args.vae_type == 'space' or self.args.vae_type == 'bbox' or self.args.vae_type == 'faster_rcnn':
                achieved_image = take_image_objects(self, self.args.img_size)
                if self.args.vae_type == 'space' or self.args.vae_type == 'bbox' or self.args.vae_type == 'faster_rcnn':
                    lg, lg_s, lo, lo_s = latents_from_images(
                        np.array([achieved_image]), self.args)
                    self.achieved_goal_size_latent = lg_s[0].copy()
                else:
                    lg, lo, lo_s = latents_from_images(
                        np.array([achieved_image]), self.args)
                self.achieved_goal_latent = lg[0].copy()
                self.obstacle_latent = lo[0].copy()

                self.obstacle_size_latent = lo_s[0].copy()
            else:
                achieved_goal_image = take_goal_image(self, self.args.img_size)
                latents_goal = goal_latent_from_images(
                    np.array([achieved_goal_image]), self.args)
                self.achieved_goal_latent = latents_goal[0].copy()

                obstacle_image = take_obstacle_image(self, self.args.img_size)
                latents_obstacle, latents_o_size = obstacle_latent_from_images(
                    np.array([obstacle_image]), self.args)
                self.obstacle_latent = latents_obstacle[0].copy()
                self.obstacle_size_latent = latents_o_size[0].copy()
                #self.achieved_goal_image = achieved_goal_image.copy()

        obs = self.get_obs()

        #The order is important, since a children class migth have a reward dependant from modification in obs
        info = self.process_info(obs, reward, info)
        #for compatibility passing last obs but actually none of the used reward functions use it
        reward = self.compute_reward(obs, self.last_obs, obs['desired_goal'])

        self.last_obs = obs.copy()
        return obs.copy(), reward, False, info
Example #5
0
def visualization_sizes_obstacle(env,
                                 env_name,
                                 model,
                                 enc_type,
                                 img_size,
                                 n,
                                 ind_1,
                                 ind_2=None,
                                 using_sb=True):
    cuda = torch.cuda.is_available()
    torch.manual_seed(1)
    device = torch.device("cuda" if cuda else "cpu")

    if enc_type == 'goal' or (args.enc_type == 'mixed'
                              and args.mix_h == 'goal'):
        env.env.env._set_position(names_list=['obstacle'],
                                  position=[2., 2., 0.4])
        pass
    elif enc_type == 'obstacle' or (args.enc_type == 'mixed'
                                    and args.mix_h == 'obstacle'):
        env.env.env._move_object(position=[2., 2., 0.4])
    else:
        raise Exception('Not supported enc type')

    if env_name == 'FetchPushMovingObstacleEnv-v1':
        assert ind_2 is not None
        min_s = min_obstacle_size['FetchPushMovingObstacleEnv-v1']
        max_s = max_obstacle_size['FetchPushMovingObstacleEnv-v1']
        sizes_x = np.linspace(min_s, max_s, num=n)
        sizes_y = np.linspace(min_s, max_s, num=n)
        plt.figure(1)
        plt.subplot(211)
        data_set = np.empty(
            [len(sizes_x) * len(sizes_x), img_size, img_size, 3])
        for i, s_x in enumerate(sizes_x):
            for j, s_y in enumerate(sizes_y):
                plt.scatter(s_x, s_y)
                plt.annotate(i * len(sizes_y) + j, (s_x, s_y))
                #take image
                env.env.env._set_size(names_list=['obstacle'],
                                      size=np.array([s_x, s_y, 0.035]))
                data_set[i * len(sizes_y) + j] = take_obstacle_image(
                    env, img_size)

        data = torch.from_numpy(data_set).float().to(device)
        data /= 255
        data = data.permute([0, 3, 1, 2])
        if not using_sb:
            mu, logvar = model.encode(data.reshape(-1,
                                                   img_size * img_size * 3))
        else:
            mu, logvar = model.encode(data)
        mu = mu.detach().cpu().numpy()

        assert ind_1 != ind_2
        mu = np.concatenate([
            np.expand_dims(mu[:, ind_1], axis=1),
            np.expand_dims(mu[:, ind_2], axis=1)
        ],
                            axis=1)
        mtx, mty = get_size_in_space(mu[:, 0], v2=mu[:, 1])
        mu = np.concatenate(
            [np.expand_dims(mtx, axis=1),
             np.expand_dims(mty, axis=1)], axis=1)
        plt.subplot(212)
        plt.title('latent')

        for i, m in enumerate(mu):
            plt.scatter(m[0], m[1])
            plt.annotate(i, (m[0], m[1]))

    else:
        sizes = np.linspace(min_obstacle_size, max_obstacle_size, num=n)
        #sizes = np.linspace(obstacle_size, obstacle_size, num=n)
        n_labels = np.arange(len(sizes))
        sizes = np.array(sizes)

        xs = np.repeat(1, len(sizes))
        ys = sizes
        plt.figure(1)
        plt.subplot(211)
        plt.scatter(xs, ys)
        plt.title('real')
        for i, en in enumerate(n_labels):
            plt.annotate(en, (xs[i], ys[i]))

        # sample images
        data_set = np.empty([len(sizes), img_size, img_size, 3])
        for i, p in enumerate(sizes):
            env.env.env._set_position(names_list=['obstacle'],
                                      position=center_obstacle)
            env.env.env._set_size(names_list=['obstacle'],
                                  size=np.array([sizes[i], 0.035, 0.0]))
            data_set[i] = take_obstacle_image(env, img_size)
        data = torch.from_numpy(data_set).float().to(device)
        data /= 255
        data = data.permute([0, 3, 1, 2])
        if not using_sb:
            mu, logvar = model.encode(data.reshape(-1,
                                                   img_size * img_size * 3))
        else:
            mu, logvar = model.encode(data)
        mu = mu.detach().cpu().numpy()

        mu = mu[:, ind_1]

        for i, p in enumerate(mu):
            mu[i] = get_size_in_space(mu[i])
            #    mu[i] = map_size_space(mu[i])
            #    mu[i] = get_size_in_space(map_size_space(mu[i]))
            pass

        lxs = np.repeat(1, len(sizes))
        lys = mu

        plt.subplot(212)
        plt.scatter(lxs, lys)
        plt.title('latent')
        for i, en in enumerate(n_labels):
            plt.annotate(en, (lxs[i], lys[i]))
        print(mu)

    plt.show()
    plt.close()
Example #6
0
def traversal(env,
              model,
              img_size,
              latent_size,
              n,
              enc_type,
              using_sb=True,
              fig_file_name=None):
    cuda = torch.cuda.is_available()
    torch.manual_seed(1)
    device = torch.device("cuda" if cuda else "cpu")
    dist = 0.8

    data_set = np.empty([n * latent_size, img_size, img_size, 3])
    # move other objects to plaecs they do not disturb
    if enc_type == 'goal' or (args.enc_type == 'mixed'
                              and args.mix_h == 'goal'):
        env.env.env._set_position(names_list=['obstacle'],
                                  position=[2., 2., 0.4])
    elif enc_type == 'obstacle' or (args.enc_type == 'mixed'
                                    and args.mix_h == 'obstacle'):
        env.env.env._move_object(position=[2., 2., 0.4])
    elif enc_type == 'all':
        env.env.env._set_position(names_list=['obstacle'],
                                  position=[10., 10., 10.])
        env.env.env._move_object(position=[-10., -10., -10.])
        obj = 'cube'
        p1 = np.array([-20., 20., 20.])
        p2 = np.array([-20., -20., 20.])
        env.env.env._set_position(names_list=['rectangle'], position=p1)
        env.env.env._set_position(names_list=['cylinder'], position=p2)
        s = 0.06
        env.env.env._set_size(names_list=['cube'], size=[s, s, s])
        pos = [1.3, 0.75, 0.4 + s]
        env.env.env._set_position(names_list=[obj], position=pos)
    else:
        raise Exception('Not supported enc type')

    # sample image  central
    if enc_type == 'goal' or (args.enc_type == 'mixed'
                              and args.mix_h == 'goal'):
        env.env.env._move_object(position=[1.3, 0.75, 0.4])
        central_im = take_goal_image(env, img_size, make_table_invisible=True)
    elif enc_type == 'obstacle' or (args.enc_type == 'mixed'
                                    and args.mix_h == 'obstacle'):
        env.env.env._set_position(names_list=['obstacle'],
                                  position=[1.3, 0.75, 0.4])
        # env.env.env._set_size(names_list=['obstacle'], size=np.array([0.15, 0.035, 0.]))
        central_im = take_obstacle_image(env, img_size)
    elif enc_type == 'all':
        central_im = take_objects_image_training(env, img_size)
    else:
        raise Exception('Not supported enc type')

    #trasform to latent
    data = np.expand_dims(central_im.copy(), axis=0)
    data = torch.from_numpy(data).float().to(device)
    data /= 255
    data = data.permute([0, 3, 1, 2])
    model.eval()
    if not using_sb:
        mu, logvar = model.encode(data.reshape(-1, img_size * img_size * 3))
    else:
        mu, logvar = model.encode(data)

    mid = int(n / 2)
    for l in range(latent_size):
        for t in range(n):
            if t == mid:
                data_set[n * l + t] = central_im.copy()
            else:
                v = torch.zeros(latent_size)
                v[l] = 1.
                if t < mid:
                    v = v * -(mid - t) * dist
                else:
                    v = v * (t - mid) * dist
                v = v.to(device)
                z = mu + v

                im = model.decode(z)
                im = im.view(3, img_size, img_size)
                im = im.permute([1, 2, 0])
                im *= 255.
                im = im.type(torch.uint8)
                im = im.detach().cpu().numpy()
                data_set[n * l + t] = im.copy()

    all_array = None
    t = 0
    for r in range(latent_size):
        row = None
        for c in range(n):
            rcim = data_set[t].copy()
            t += 1
            if row is None:
                row = rcim
            else:
                row = np.concatenate([row.copy(), rcim], axis=1)
        if all_array is None:
            all_array = row.copy()
        else:
            all_array = np.concatenate([all_array.copy(), row], axis=0)
    all_ims = Image.fromarray(all_array.astype(np.uint8))
    if fig_file_name is not None:
        all_ims.save('{}_ims.png'.format(fig_file_name))
    else:
        all_ims.show()
    all_ims.close()
Example #7
0
def visualization_grid_points(env,
                              model,
                              size_to_use,
                              img_size,
                              n,
                              enc_type,
                              ind_1,
                              ind_2,
                              using_sb=True,
                              use_d=False,
                              fig_file_name=None):
    if use_d:
        d = 0.12  #0.32
        points = generate_points(range_x=[range_x[0] - d, range_x[1] + d],
                                 range_y=[range_y[0] - d, range_y[1] + d],
                                 z=z_table_height,
                                 total=n,
                                 object_x_y_size=[size_to_use, size_to_use])
    else:
        points = generate_points(range_x=range_x,
                                 range_y=range_y,
                                 z=z_table_height,
                                 total=n,
                                 object_x_y_size=[size_to_use, size_to_use])

    n_labels = np.arange(len(points))

    points = np.array(points)
    #print_max_and_min(points)

    xs = points[:, 0]
    ys = points[:, 1]
    plt.figure(1)
    plt.subplot(211, )
    plt.scatter(xs, ys)
    plt.title('real')
    for i, en in enumerate(n_labels):
        plt.annotate(en, (xs[i], ys[i]))

    cuda = torch.cuda.is_available()
    torch.manual_seed(1)
    device = torch.device("cuda" if cuda else "cpu")

    # sample images
    data_set = np.empty([len(points), img_size, img_size, 3])
    #move other objects to plaecs they do not disturb
    if enc_type == 'goal' or (args.enc_type == 'mixed'
                              and args.mix_h == 'goal'):
        env.env.env._set_position(names_list=['obstacle'],
                                  position=[2., 2., 0.4])
        pass
    elif enc_type == 'obstacle' or (args.enc_type == 'mixed'
                                    and args.mix_h == 'obstacle'):
        env.env.env._move_object(position=[2., 2., 0.4])
    else:
        raise Exception('Not supported enc type')
    for i, p in enumerate(points):
        if enc_type == 'goal' or (args.enc_type == 'mixed'
                                  and args.mix_h == 'goal'):
            env.env.env._move_object(position=p)
            data_set[i] = take_goal_image(env,
                                          img_size,
                                          make_table_invisible=True)
        elif enc_type == 'obstacle' or (args.enc_type == 'mixed'
                                        and args.mix_h == 'obstacle'):
            env.env.env._set_position(names_list=['obstacle'], position=p)
            data_set[i] = take_obstacle_image(env, img_size)
        else:
            raise Exception('Not supported enc type')
    all_array = None
    t = 0
    for r in range(n):
        row = None
        for c in range(n):
            rcim = data_set[t].copy()
            t += 1
            if row is None:
                row = rcim
            else:
                row = np.concatenate([row.copy(), rcim], axis=1)
        if all_array is None:
            all_array = row.copy()
        else:
            all_array = np.concatenate([all_array.copy(), row], axis=0)
    all_ims = Image.fromarray(all_array.astype(np.uint8))
    if fig_file_name is not None:
        all_ims.save('{}_ims.png'.format(fig_file_name))
        from utils.hindsight_goals_visualizer import show_points
        show_points(points, '{}_vis'.format(fig_file_name), 'real')
    else:
        all_ims.show()
    all_ims.close()
    data = torch.from_numpy(data_set).float().to(device)
    data /= 255
    data = data.permute([0, 3, 1, 2])
    model.eval()
    if not using_sb:
        mu, logvar = model.encode(data.reshape(-1, img_size * img_size * 3))
    else:
        mu, logvar = model.encode(data)
    mu = mu.detach().cpu().numpy()

    assert ind_1 != ind_2
    mu = np.concatenate([
        np.expand_dims(mu[:, ind_1], axis=1),
        np.expand_dims(mu[:, ind_2], axis=1)
    ],
                        axis=1)

    if enc_type == 'goal' or (args.enc_type == 'mixed'
                              and args.mix_h == 'goal'):
        rm = create_rotation_matrix(angle_goal)
        mu = rotate_list_of_points(mu, rm)
        #mu = map_points(mu, goal_map_x, goal_map_y)
        pass
    elif enc_type == 'obstacle' or (args.enc_type == 'mixed'
                                    and args.mix_h == 'obstacle'):
        #for i, p in enumerate(mu):
        #    mu[i] = reflect_obstacle_transformation(p)
        rm = create_rotation_matrix(angle_obstacle)
        mu = rotate_list_of_points(mu, rm)
        #mu = map_points(mu, obstacle_map_x, obstacle_map_y)
        pass
    else:
        raise Exception('Not supported enc type')
    print_max_and_min(mu)

    lxs = mu[:, 0]
    lys = mu[:, 1]
    plt.subplot(212)
    plt.scatter(lxs, lys)
    plt.title('latent')
    for i, en in enumerate(n_labels):
        plt.annotate(en, (lxs[i], lys[i]))

    if fig_file_name is not None:
        plt.savefig(fig_file_name)
    else:
        plt.show()
    plt.close()