def add_object(self, obj):

        integrated_xml = generate_integrated_xml(
            self.env_base_xml_file,
            obj.obj_xml_file,
            scale=obj.scale,
            add_bbox_indicator=self.bbox_indicator)

        env = gym.make(self.env_name,
                       xml_path=integrated_xml,
                       use_bbox_indicator=self.bbox_indicator,
                       n_actions=self.n_robot_dof)

        obs = env.reset()
        pcp_utils.env_utils.reset_everything_on_table(env, obj, max_run=100)

        images = self.render_images(env)
        images = self.convert_to_adam(images)

        #### Do you need to save it everytime ? ####
        # self.visualize(images, save=True, obj_name=obj.name)
        #### I asked you a question above lolll ####

        #### Convert things to pointcloud and preprocess it, I am following instructions in 6dof_graspnet/demo/main.py ####
        obj_pc, pc, pc_colors = self.preprocess_points_for_6dgrasp(
            images, save=True, obj_name=obj.name)
        #### Pointcloud is ready for the forward pass ####

        #### Save the environment state too so it can be reloaded offline ####
        env_state = env.env.sim.get_state()

        return obj_pc, pc, pc_colors, env_state
    def add_object(self, obj):
        #share the env

        # make xml for the object:
        integrated_xml = generate_integrated_xml(
            self.env_base_xml_file,
            obj.obj_xml_file,
            scale=obj.scale,
            mass=obj.mass,
            euler=obj.euler,
            add_bbox_indicator=self.bbox_indicator,
            randomize_color=self.randomize_color,
            prefix=self.run_name)

        #obj.xml_file)# xml_path="fetch/pick_and_place_kp30000_debug.xml") #xml_path=obj.xml_file)
        env = gym.make(self.env_name,
                       xml_path=integrated_xml,
                       use_bbox_indicator=self.bbox_indicator,
                       n_actions=self.n_robot_dof,
                       init_info=self.init_info)
        print(f'max env steps are: {env._max_episode_steps}')
        env.seed(RANDOM_SEED)
        env.action_space.seed(RANDOM_SEED)

        obs = env.reset()
        # this part will decide which cluster to try first
        current_vis_state = self.get_inputs_for_selector(env, obj)
        results = self.selector.predict_forward(current_vis_state)
        cluster_rank, cluster_score = self.selector.compute_nearest_cluster(
            results["object_tensor"][0])

        # env.render()
        # this ordering should be something learnable
        is_clustered = False
        cname = cluster_rank[0]
        cluster = self.clusters[cname]

        # load policy of the first mesh (parent mesh) in an existing cluster
        #print("Checking performance of {} on policy for {}".format(obj, cid))
        stats = cluster['expert'].run_forwards(
            env,
            obj=obj,
            num_rollouts=self.num_rollouts,
            path_length=self.max_path_length)

        success_rate = stats['success_rate']
        print("Success Rate ", success_rate, "using object: ", obj.name,
              "by running on cluster", cname)

        cluster['objects'].append((obj, success_rate))
        self.object_to_cluster.append((obj, cname))

        env.close()
    def add_object(self, obj):
        #share the env

        # make xml for the object:
        integrated_xml = generate_integrated_xml(self.env_base_xml_file, obj.obj_xml_file, scale=obj.scale, mass=obj.mass,
                add_bbox_indicator=self.bbox_indicator, randomize_color=self.randomize_color, prefix=self.run_name)

        #obj.xml_file)# xml_path="fetch/pick_and_place_kp30000_debug.xml") #xml_path=obj.xml_file)
        env = gym.make(self.env_name, xml_path=integrated_xml, use_bbox_indicator=self.bbox_indicator,
            n_actions=self.n_robot_dof, init_info=self.init_info)
        print(f'max env steps are: {env._max_episode_steps}')

        # env.render()
        # this ordering should be something learnable
        is_clustered = False
        for cid, cluster in self.clusters.items():
            # load policy of the first mesh (parent mesh) in an existing cluster
            print("Checking performance of {} on policy for {}: {}".format(obj.name, cid, cluster['expert'].policy_name))
            stats = cluster['expert'].run_forwards(env, obj=obj, num_rollouts=self.num_rollouts, path_length=self.max_path_length, render=self.config.render)

            success_rate = stats['success_rate']
            print("Success Rate ", success_rate)

            if success_rate >= self.accept_threshold: #* base_success_rate:
                cluster['objects'].append((obj, success_rate))
                self.object_to_cluster.append((obj, cid))
                is_clustered = True
                break


        if not is_clustered:
            self.object_not_clustered.append(obj)
            self.object_to_cluster.append((obj, "not_assigned"))

        gym_xml_path = os.path.join(pcp_utils.utils.get_gym_dir(), 'gym/envs/robotics/assets')
        integrated_xml_full_path = os.path.join(gym_xml_path,  integrated_xml)
        os.remove(integrated_xml_full_path)


        env.close()
예제 #4
0
    def scene_to_npz(self, mesh_obj):

        integrated_xml = generate_integrated_xml(self.env_base_xml_file,
                                                 mesh_obj.obj_xml_file,
                                                 scale=mesh_obj.scale)
        abs_integrated_xml_path = os.path.join(self.env_xml_file_root,
                                               integrated_xml)
        visual_mjcf = mjcf.from_path(
            abs_integrated_xml_path
        )  #cannot have "include" in xml when loading with mjcf

        # camera pose is related the object pose?
        # for visualization
        if self.config.render:
            model = mujoco_py.load_model_from_path(abs_integrated_xml_path)
            sim = mujoco_py.MjSim(model)
            viewer = mujoco_py.MjViewer(sim)
            for _ in range(5000):
                sim.step()
            for _ in range(20):
                viewer.render()

        print(mesh_obj.name, self.config.camera_lookat_pos)

        # add cameras
        camera_positions, camera_quats = pcp_utils.cameras.generate_new_cameras_hemisphere(
            radius=self.config.camera_radius,
            lookat_point=self.config.camera_lookat_pos,
            pitch=self.config.camera_pitch,
            yaw=self.config.camera_yaw)

        # add all the cameras to the environtmnet
        for i, (pos, quat) in enumerate(zip(camera_positions, camera_quats)):
            visual_mjcf.worldbody.add('camera',
                                      name=f'vis_cam:{i}',
                                      pos=pos,
                                      quat=quat,
                                      fovy=self.config.camera_fov_y)
        visual_mjcf.worldbody.add('camera',
                                  name='ref_cam',
                                  pos=[0, -0.3, 0.5],
                                  zaxis=[0, -1, 0],
                                  fovy=self.config.camera_fov_y)

        # start simulate
        physics = mjcf.Physics.from_mjcf_model(visual_mjcf)
        physics.forward()

        for step_id in range(2000):
            physics.step()
        object_xpos = physics.data.xpos[physics.model.name2id(
            'object0', 'body')]
        #print(f'object final location', object_xpos)
        object_xmat = physics.data.xmat[physics.model.name2id(
            'object0', 'body')]
        #print(f'object final qpos', object_xmat)

        ep_imgs = list()
        ep_depths = list()
        ep_pix_T_cams = list()
        ep_camR_T_camXs = list()

        for i in range(len(camera_positions)):
            img = physics.render(self.config.camera_img_height,
                                 self.config.camera_img_width,
                                 camera_id=f'vis_cam:{i}')
            depth = physics.render(self.config.camera_img_height,
                                   self.config.camera_img_width,
                                   camera_id=f'vis_cam:{i}',
                                   depth=True)

            assert img.shape[
                0] == self.config.camera_img_height, "color img height is wrong"
            assert img.shape[
                1] == self.config.camera_img_width, "color img width is wrong"
            assert depth.shape[
                0] == self.config.camera_img_height, "depth img height is wrong"
            assert depth.shape[
                1] == self.config.camera_img_width, "depth img width is wrong"

            #if self.config.save_image:
            #    imageio.imwrite(f"tmp/img_{i}.png", img)
            #    imageio.imwrite(f"tmp/depth_{i}.png", np.minimum(depth, 5))

            pix_T_cams = pcp_utils.cameras.get_intrinsics(
                self.config.camera_fov_y, self.config.camera_img_width,
                self.config.camera_img_height)
            origin_T_camX = pcp_utils.cameras.dm_get_extrinsics(
                physics, physics.model.name2id(f'vis_cam:{i}', 'camera'))
            camR_T_camX = np.dot(self.adam_T_origin, origin_T_camX)

            # camR should be the top of the table
            #camR_T_camX =

            ep_imgs.append(img)
            ep_depths.append(depth)
            ep_pix_T_cams.append(pix_T_cams)
            # this extrinsics is in mujoco frame
            ep_camR_T_camXs.append(camR_T_camX)

        img = physics.render(self.config.camera_img_height,
                             self.config.camera_img_width,
                             camera_id='ref_cam')
        depth = physics.render(self.config.camera_img_height,
                               self.config.camera_img_width,
                               camera_id='ref_cam',
                               depth=True)
        pix_T_cams = pcp_utils.cameras.get_intrinsics(
            self.config.camera_fov_y, self.config.camera_img_width,
            self.config.camera_img_height)
        origin_T_camX = pcp_utils.cameras.dm_get_extrinsics(
            physics, physics.model.name2id('ref_cam', 'camera'))
        camR_T_camX = np.dot(self.adam_T_origin, origin_T_camX)

        if self.config.save_image:
            imageio.imwrite(f"tmp/img_ref.png", img)

        ep_imgs.append(img)
        ep_depths.append(depth)
        ep_pix_T_cams.append(pix_T_cams)
        ep_camR_T_camXs.append(camR_T_camX)

        composed_xmat = np.eye(4, dtype=np.float32)
        composed_xmat[:3, 3] = object_xpos
        composed_xmat[:3, :3] = np.reshape(object_xmat, [3, 3])
        composed_xmat = np.dot(self.adam_T_origin, composed_xmat)
        object_xpos_adam = composed_xmat[:3,
                                         3]  #np.dot(self.adam_T_origin, pcp_utils.geom.pts_addone(np.reshape(object_xpos, [1, 3])).T)[:3, 0]
        object_xmat_adam = composed_xmat[:3, :
                                         3]  #np.dot(self.adam_T_origin[:3, :3], np.reshape(object_xmat, [3, 3]))

        bbox_points_from_mesh_adam = pcp_utils.np_vis.compute_bounding_box_from_obj_xml(
            mesh_obj.obj_xml_file,
            object_xpos_adam,
            object_xmat_adam,
            scale=mesh_obj.scale)

        if self.config.visualize_bbox:
            _, xyz_camRs, _ = pcp_utils.np_vis.unproject_depth(
                ep_depths,
                ep_pix_T_cams,
                ep_camR_T_camXs,
                camR_T_origin=None,  #np.linalg.inv(self.origin_T_adam),
                clip_radius=5.0,
                do_vis=False)

            all_xyz_camR = np.concatenate(xyz_camRs, axis=0)
            object_pcd = pcp_utils.np_vis.get_pcd_object(all_xyz_camR,
                                                         clip_radius=2.0)

            # transform object xpos and xmat to the adam coordinate (x right, y downs)
            bbox_lineset_from_mesh_adam = pcp_utils.np_vis.make_lineset(
                bbox_points_from_mesh_adam)
            o3d.visualization.draw_geometries(
                [object_pcd, bbox_lineset_from_mesh_adam])
            #o3d.visualization.draw_geometries([object_pcd, bbox_lineset_from_mesh])
            import ipdb
            ipdb.set_trace()

        # create a dictionary to save the data and ship it !!
        save_dict = AttrDict()
        """
        last view is reference frame
        rgb_camXs: nviews x height x width x 3
        depth_camXs: nviews x height x width
        pix_T_cams: nviews x 3 x 3
        camR_T_camXs: nviews x 4 x 4
        bbox_camR: 8 x 3
        cluster_id: 'string'
        """
        save_dict.rgb_camXs = np.stack(ep_imgs)
        save_dict.depth_camXs = np.stack(ep_depths)
        save_dict.pix_T_cams = np.stack(ep_pix_T_cams)
        save_dict.camR_T_camXs = np.stack(ep_camR_T_camXs)
        save_dict.bbox_camR = bbox_points_from_mesh_adam
        save_dict.cluster_id = mesh_obj.cluster_id

        scene_name = mesh_obj.name
        if self.config.save_image:
            imageio.imwrite(f"tmp/{scene_name}_nviews.png",
                            np.concatenate(ep_imgs, axis=1))
        vis_save_path = os.path.join(self.output_dir,
                                     f"visual_data_{scene_name}.npy")
        np.save(vis_save_path, save_dict)

        self.all_files.append(
            os.path.join(self.output_dir_exclud_root,
                         f"visual_data_{scene_name}.npy"))

        print('---- done ----')
예제 #5
0
    def scene_to_npz(self, mesh_obj, repeat_id):

        integrated_xml = generate_integrated_xml(
            self.env_base_xml_file,
            mesh_obj.obj_xml_file,
            randomize_color=self.randomize_color,
            scale=mesh_obj.scale,
            mass=mesh_obj.mass,
            euler=mesh_obj.euler,
            prefix=self.run_name,
            remove_site=self.config.remove_site)
        #integrated_xml = "fetch/None_generated_env.xml"
        env = gym.make(self.env_name,
                       xml_path=integrated_xml,
                       use_bbox_indicator=self.bbox_indicator,
                       n_actions=self.n_robot_dof,
                       init_info=self.init_info)
        obs = env.reset()

        if self.config.render:
            env.render()
        for _ in range(20):
            obsDataNew, reward, done, info = env.step(
                np.zeros(self.n_robot_dof))
            if self.config.render:
                env.render()

        # add cameras
        camera_positions, camera_quats = pcp_utils.cameras.generate_new_cameras_hemisphere(
            radius=self.config.camera_radius,
            lookat_point=self.config.camera_lookat_pos,
            pitch=self.config.camera_pitch,
            yaw=self.config.camera_yaw)

        images = pcp_utils.cameras.render_images(env,
                                                 self.config.camera_img_height,
                                                 self.config.camera_img_width,
                                                 self.config.camera_fov,
                                                 camera_positions,
                                                 camera_quats,
                                                 camera_name="ext_camera_0")
        self.convert_to_adam(images)
        image_ref = pcp_utils.cameras.render_image_from_camX(
            env,
            self.config.camera_img_height,
            self.config.camera_img_width,
            self.config.camera_fov,
            camera_name="ref_cam")
        self.convert_to_adam(image_ref)

        scene_name = mesh_obj.name
        if self.config.save_image:
            imageio.imwrite(f"tmp/img_ref.png", image_ref["rgb_camXs"][0])
            n_images = images["rgb_camXs"].shape[0]
            imageio.imwrite(
                f"tmp/{scene_name}_nviews.png",
                np.squeeze(np.concatenate(np.split(images["rgb_camXs"],
                                                   n_images,
                                                   axis=0),
                                          axis=2),
                           axis=0))
            print(f"save image tmp/{scene_name}_nviews.png....")
            import ipdb
            ipdb.set_trace()

        if not self.detector:
            object_xpos, object_xmat = env.get_object_pos("object0")
            composed_xmat = np.eye(4, dtype=np.float32)
            composed_xmat[:3, 3] = object_xpos
            composed_xmat[:3, :3] = np.reshape(object_xmat, [3, 3])
            composed_xmat = np.dot(self.adam_T_origin, composed_xmat)
            object_xpos_adam = composed_xmat[:3,
                                             3]  #np.dot(self.adam_T_origin, pcp_utils.geom.pts_addone(np.reshape(object_xpos, [1, 3])).T)[:3, 0]
            object_xmat_adam = composed_xmat[:3, :
                                             3]  #np.dot(self.adam_T_origin[:3, :3], np.reshape(object_xmat, [3, 3]))

            bbox_points_from_mesh_adam = pcp_utils.np_vis.compute_bounding_box_from_obj_xml(
                mesh_obj.obj_xml_file,
                object_xpos_adam,
                object_xmat_adam,
                scale=mesh_obj.scale,
                euler=mesh_obj.euler)

        else:
            object_xpos, object_xmat = env.get_object_pos("object0")
            bbox_points_from_mesh_gt = pcp_utils.np_vis.compute_bounding_box_from_obj_xml(
                mesh_obj.obj_xml_file,
                object_xpos,
                object_xmat,
                scale=mesh_obj.scale,
                euler=mesh_obj.euler)
            results, _ = self.detector.detect_objects(env)
            bounds, center, extents, xyz_origin, xyz_origin_cp = results

            x_min = bounds[0, 0]
            x_max = bounds[1, 0]
            y_min = bounds[0, 1]
            y_max = bounds[1, 1]
            z_min = bounds[0, 2]
            z_max = bounds[1, 2]

            bbox_points_from_mesh = np.array([
                [x_min, y_min, z_min],
                [x_max, y_min, z_min],
                [x_min, y_min, z_max],
                [x_max, y_min, z_max],
                [x_min, y_max, z_min],
                [x_max, y_max, z_min],
                [x_min, y_max, z_max],
                [x_max, y_max, z_max],
            ])
            bbox_points_ones = np.concatenate(
                [bbox_points_from_mesh, np.ones([8, 1])], axis=1).T
            bbox_point_adam = np.dot(self.adam_T_origin,
                                     bbox_points_ones)[:3, :].T

            x_min = bbox_point_adam[0, 0]
            x_max = bbox_point_adam[-1, 0]
            y_min = bbox_point_adam[-1, 1]
            y_max = bbox_point_adam[0, 1]
            z_min = bbox_point_adam[0, 2]
            z_max = bbox_point_adam[-1, 2]
            bbox_points_from_mesh_adam = np.array([
                [x_min, y_min, z_min],
                [x_max, y_min, z_min],
                [x_min, y_min, z_max],
                [x_max, y_min, z_max],
                [x_min, y_max, z_min],
                [x_max, y_max, z_min],
                [x_min, y_max, z_max],
                [x_max, y_max, z_max],
            ])

            diff = np.linalg.norm(bbox_points_from_mesh_gt -
                                  bbox_points_from_mesh)
            if diff > 0.1:
                print("distance to gt is too high", diff)

        if self.config.visualize_bbox:
            ep_depths = images["depth_camXs"]
            ep_pix_T_cams = images["pix_T_cams"]
            ep_camR_T_camXs = images["origin_T_camXs"]

            _, xyz_camRs, _ = pcp_utils.np_vis.unproject_depth(
                ep_depths,
                ep_pix_T_cams,
                ep_camR_T_camXs,
                camR_T_origin=None,  #np.linalg.inv(self.origin_T_adam),
                clip_radius=5.0,
                do_vis=False)

            all_xyz_camR = np.concatenate(xyz_camRs, axis=0)
            object_pcd = pcp_utils.np_vis.get_pcd_object(all_xyz_camR,
                                                         clip_radius=2.0)

            # transform object xpos and xmat to the adam coordinate (x right, y downs)
            bbox_lineset_from_mesh_adam = pcp_utils.np_vis.make_lineset(
                bbox_points_from_mesh_adam)
            o3d.visualization.draw_geometries(
                [object_pcd, bbox_lineset_from_mesh_adam])
            #o3d.visualization.draw_geometries([object_pcd, bbox_lineset_from_mesh])
            import ipdb
            ipdb.set_trace()

        # create a dictionary to save the data and ship it !!
        save_dict = AttrDict()
        """
        last view is reference frame
        rgb_camXs: nviews x height x width x 3
        depth_camXs: nviews x height x width
        pix_T_cams: nviews x 3 x 3
        camR_T_camXs: nviews x 4 x 4
        bbox_camR: 8 x 3
        cluster_id: 'string'
        """
        save_dict.rgb_camXs = np.concatenate(
            [images['rgb_camXs'], image_ref['rgb_camXs']], axis=0)
        save_dict.depth_camXs = np.concatenate(
            [images['depth_camXs'], image_ref['depth_camXs']], axis=0)
        save_dict.pix_T_cams = np.concatenate(
            [images['pix_T_cams'], image_ref['pix_T_cams']], axis=0)
        save_dict.camR_T_camXs = np.concatenate(
            [images['origin_T_camXs'], image_ref['origin_T_camXs']], axis=0)
        save_dict.bbox_camR = bbox_points_from_mesh_adam
        save_dict.cluster_id = mesh_obj.cluster_id
        save_dict.success_rates_over_class = np.array(
            [float(x) for x in mesh_obj.success_rates_over_class.split(" ")])

        if self.sr_from_single_trial:
            srs = np.array([
                float(x) for x in mesh_obj.success_rates_over_class.split(" ")
            ])

            sr = [np.random.binomial(1, sr) for sr in srs]
            save_dict.success_rates_over_class

        vis_save_path = os.path.join(
            self.output_dir, f"visual_data_{scene_name}_r{repeat_id}.npy")
        np.save(vis_save_path, save_dict)
        self.all_files.append(
            os.path.join(self.output_dir_exclud_root,
                         f"visual_data_{scene_name}_r{repeat_id}.npy"))

        print(f'---- done saving {vis_save_path} ----')
    def add_object(self, obj):
        #share the env

        # make xml for the object:
        integrated_xml = generate_integrated_xml(
            self.env_base_xml_file,
            obj.obj_xml_file,
            scale=obj.scale,
            mass=obj.mass,
            euler=obj.euler,
            add_bbox_indicator=self.bbox_indicator,
            randomize_color=self.randomize_color,
            prefix=self.run_name)
        #obj.xml_file)# xml_path="fetch/pick_and_place_kp30000_debug.xml") #xml_path=obj.xml_file)
        env = gym.make(self.env_name,
                       xml_path=integrated_xml,
                       use_bbox_indicator=self.bbox_indicator,
                       n_actions=self.n_robot_dof,
                       init_info=self.init_info)

        env.seed(RANDOM_SEED)
        env.action_space.seed(RANDOM_SEED)
        print(f'max env steps are: {env._max_episode_steps}')

        # env.render()
        # this ordering should be something learnable
        is_clustered = False
        success_rates = np.zeros(len(self.clusters.items()))

        for cid, cluster in self.clusters.items():
            env.seed(RANDOM_SEED)
            # load policy of the first mesh (parent mesh) in an existing cluster
            print("Checking performance of {} on policy for {}: {}".format(
                obj.name, cid, cluster['expert'].policy_name))
            stats = cluster['expert'].run_forwards(
                env,
                obj=obj,
                num_rollouts=self.num_rollouts,
                path_length=self.max_path_length,
                render=self.config.render,
                cluster_name=cluster['expert'].policy_name,
                place=self.config.place
            )  #, accept_threshold=self.accept_threshold)

            success_rate = stats['success_rate']
            print("Success Rate ", success_rate)

            success_rates[int(cid[1:])] = success_rate

            if success_rate >= self.accept_threshold:  #* base_success_rate:

                self.object_to_cluster.append((obj, cid))

                cluster['objects'].append((obj, success_rate))
                is_clustered = True
            # if fail: randomly select from on of the top

        self.success_rates_over_class[obj.name] = " ".join(
            [str(success_rates[x]) for x in range(self.num_clusters)])

        self.success_rates.append(success_rates)

        if not is_clustered:
            self.object_not_clustered.append(obj)
            self.object_to_cluster.append((obj, "not_assigned"))

        gym_xml_path = os.path.join(pcp_utils.utils.get_gym_dir(),
                                    'gym/envs/robotics/assets')
        integrated_xml_full_path = os.path.join(gym_xml_path, integrated_xml)
        os.remove(integrated_xml_full_path)

        print("failed objects")
        print(self.failed_object)

        env.close()
예제 #7
0
    def add_object(self, obj):
        #share the env

        # make xml for the object:
        integrated_xml = generate_integrated_xml(
            self.env_base_xml_file,
            obj.obj_xml_file,
            scale=obj.scale,
            mass=obj.mass,
            euler=obj.euler,
            add_bbox_indicator=self.bbox_indicator,
            randomize_color=self.randomize_color,
            prefix=self.run_name)

        #obj.xml_file)# xml_path="fetch/pick_and_place_kp30000_debug.xml") #xml_path=obj.xml_file)
        env = gym.make(self.env_name,
                       xml_path=integrated_xml,
                       use_bbox_indicator=self.bbox_indicator,
                       n_actions=self.n_robot_dof,
                       init_info=self.init_info)
        print(f'max env steps are: {env._max_episode_steps}')

        # env.render()
        # this ordering should be something learnable
        is_clustered = False
        success_rates = [
            float(item) for item in obj.success_rates_over_class.split(" ")
        ]

        import ipdb
        ipdb.set_trace()

        for cid, cluster in self.clusters.items():

            cluster_id = int(cluster["expert_name"][1:])
            if success_rates[cluster_id] >= 0.8:
                for _ in range(self.num_rollouts):

                    obs = env.reset()
                    for _ in range(20):
                        obsDataNew, reward, done, info = env.step(
                            np.zeros(self.n_robot_dof))
                        if self.config.render:
                            env.render()

                    # if object success rate is high -> collect postive grasping
                    images = self.render_images(env)
                    images = self.convert_to_adam(images)
                    import ipdb
                    ipdb.set_trace()
                    obj_pc, pc, pc_colors = self.preprocess_points_for_6dgrasp(
                        env,
                        obj,
                        images,
                        obj_name=obj.name,
                        save_dir="tmp",
                        save=True)

                    # collect pcs

                    import ipdb
                    ipdb.set_trace()

                # collect grasp points: grasp_rt

        if not is_clustered:
            self.object_not_clustered.append(obj)
            self.object_to_cluster.append((obj, "not_assigned"))

        gym_xml_path = os.path.join(pcp_utils.utils.get_gym_dir(),
                                    'gym/envs/robotics/assets')
        integrated_xml_full_path = os.path.join(gym_xml_path, integrated_xml)
        os.remove(integrated_xml_full_path)

        env.close()