コード例 #1
0
            if "obj_ids" in dataset.metadata.keys():
                # modify object ids
                dataset_obj_ids = dataset.metadata["obj_ids"]
                for k in range(datapoint["obj_ids"].shape[0]):
                    dataset_obj_id = datapoint["obj_ids"][k]
                    if dataset_obj_id != np.iinfo(np.uint32).max:
                        dataset_obj_key = dataset_obj_ids[str(dataset_obj_id)]
                        if dataset_obj_key not in obj_ids.keys():
                            obj_ids[dataset_obj_key] = obj_id
                            obj_id += 1
                        datapoint["obj_ids"][k] = obj_ids[dataset_obj_key]

                # modify grasped obj id
                dataset_grasped_obj_id = datapoint["grasped_obj_ids"]
                grasped_obj_key = dataset_obj_ids[str(dataset_grasped_obj_id)]
                datapoint["grasped_obj_ids"] = obj_ids[grasped_obj_key]

            # add datapoint
            output_dataset.add(datapoint)

    # set metadata
    obj_ids = utils.reverse_dictionary(obj_ids)
    output_dataset.add_metadata("obj_ids", obj_ids)
    for field_name, field_data in dataset.metadata.iteritems():
        if field_name not in ["obj_ids"]:
            output_dataset.add_metadata(field_name, field_data)

    # flush to disk
    output_dataset.flush()
コード例 #2
0
def benchmark_bin_picking_policy(policy,
                                 # input_dataset_path,
                                 # heap_ids,
                                 # timesteps,
                                 # output_dataset_path,
                                 config,
                                 # excluded_heaps_file):
                                 ):
    """ Benchmark a bin picking policy.

    Parameters
    ----------
    policy : :obj:`Policy`
        policy to roll out
    input_dataset_path : str
        path to the input dataset
    heap_ids : list
        integer identifiers for the heaps to re-run
    timesteps : list
        integer timesteps to seed the simulation from
    output_dataset_path : str
        path to store the results
    config : dict
        dictionary-like objects containing parameters of the simulator and visualization
    """
    # read subconfigs
    vis_config = config['vis']
    dataset_config = config['dataset']

    # read parameters
    fully_observed = config['fully_observed']
    steps_per_test_case = config['steps_per_test_case']
    rollouts_per_garbage_collect = config['rollouts_per_garbage_collect']
    debug = config['debug']
    im_height = config['state_space']['camera']['im_height']
    im_width = config['state_space']['camera']['im_width']
    max_obj_per_pile = config['state_space']['object']['max_obj_per_pile']

    if debug:
        random.seed(SEED)
        np.random.seed(SEED)

    # read ids
    # if len(heap_ids) != len(timesteps):
    #     raise ValueError('Must provide same number of heap ids and timesteps')
    # num_rollouts = len(heap_ids)
    num_rollouts = 1
        
    # set dataset params
    tensor_config = dataset_config['tensors']
    fields_config = tensor_config['fields']
    # fields_config['color_ims']['height'] = im_height
    # fields_config['color_ims']['width'] = im_width
    # fields_config['depth_ims']['height'] = im_height
    # fields_config['depth_ims']['width'] = im_width
    fields_config['obj_poses']['height'] = POSE_DIM * max_obj_per_pile
    fields_config['obj_coms']['height'] = POINT_DIM * max_obj_per_pile
    fields_config['obj_ids']['height'] = max_obj_per_pile
    fields_config['bin_distances']['height'] = max_obj_per_pile
    # matrix has (n choose 2) elements in it
    max_distance_matrix_length = int(comb(max_obj_per_pile, 2))
    fields_config['distance_matrix']['height'] = max_distance_matrix_length

    # sample a process id
    proc_id = utils.gen_experiment_id()
    # if not os.path.exists(output_dataset_path):
    #     try:
    #         os.mkdir(output_dataset_path)
    #     except:
    #         logging.warning('Failed to create %s. The dataset path may have been created simultaneously by another process' %(dataset_path))
    # proc_id = 'clustering_2'
    # output_dataset_path = os.path.join(output_dataset_path, 'dataset_%s' %(proc_id))

    # open input dataset
    # logging.info('Opening input dataset: %s' % input_dataset_path)
    # input_dataset = TensorDataset.open(input_dataset_path)
    
    # open output_dataset
    # logging.info('Opening output dataset: %s' % output_dataset_path)
    # dataset = TensorDataset(output_dataset_path, tensor_config)
    # datapoint = dataset.datapoint_template

    # setup logging
    # experiment_log_filename = os.path.join(output_dataset_path, 'dataset_generation.log')
    # formatter = logging.Formatter('%(asctime)s %(levelname)s: %(message)s')
    # hdlr = logging.FileHandler(experiment_log_filename)
    # hdlr.setFormatter(formatter)
    # logging.getLogger().addHandler(hdlr)
    # config.save(os.path.join(output_dataset_path, 'dataset_generation_params.yaml'))
    
    # key mappings
    # we add the empty string as a mapping because if you don't evaluate dexnet on the 'before' state of the push
    obj_id = 1
    obj_ids = {'': 0}
    action_ids = {
        'ParallelJawGraspAction': 0,
        'SuctionGraspAction': 1,
        'LinearPushAction': 2
    }
    
    # add action ids
    reverse_action_ids = utils.reverse_dictionary(action_ids)
    # dataset.add_metadata('action_ids', reverse_action_ids)
    
    # perform rollouts
    n = 0
    rollout_start = time.time()
    current_heap_id = None
    while n < num_rollouts:
        # create env
        create_start = time.time()
        bin_picking_env = GraspingEnv(config, vis_config)
        create_stop = time.time()
        logging.info('Creating env took %.3f sec' %(create_stop-create_start))

        # perform rollouts
        rollouts_remaining = num_rollouts - n
        for i in range(min(rollouts_per_garbage_collect, rollouts_remaining)):
            # log current rollout
            logging.info('\n')
            if n % vis_config['log_rate'] == 0:
                logging.info('Rollout: %03d' %(n))

            try:    
                # mark rollout status
                data_saved = False
                num_steps = 0
                
                # read heap id
                # heap_id = heap_ids[n]
                # timestep = timesteps[n]
                # while heap_id == current_heap_id:# or heap_id < 81:#[226, 287, 325, 453, 469, 577, 601, 894, 921]: 26
                #     n += 1
                #     heap_id = heap_ids[n]
                #     timestep = timesteps[n]
                push_logger = logging.getLogger('push')
                # push_logger.info('~')
                # push_logger.info('Heap ID %d' % heap_id)
                # current_heap_id = heap_id
                
                # reset env
                reset_start = time.time()
                # bin_picking_env.reset_from_dataset(input_dataset,
                #                                    heap_id,
                #                                    timestep)
                bin_picking_env.reset()
                state = bin_picking_env.state
                environment = bin_picking_env.environment
                if fully_observed:
                    observation = None
                else:
                    observation = bin_picking_env.observation
                policy.set_environment(environment) 
                reset_stop = time.time()

                # add objects to mapping
                for obj_key in state.obj_keys:
                    if obj_key not in obj_ids.keys():
                        obj_ids[obj_key] = obj_id
                        obj_id += 1
                    push_logger.info(obj_key)
                # save id mappings
                reverse_obj_ids = utils.reverse_dictionary(obj_ids)
                # dataset.add_metadata('obj_ids', reverse_obj_ids)
                        
                # store datapoint env params
                # datapoint['heap_ids'] = current_heap_id
                # datapoint['camera_poses'] = environment.camera.T_camera_world.vec
                # datapoint['camera_intrs'] = environment.camera.intrinsics.vec
                # datapoint['robot_poses'] = environment.robot.T_robot_world.vec
            
                # render
                if vis_config['initial_state']:
                    vis3d.figure()
                    bin_picking_env.render_3d_scene()
                    vis3d.pose(environment.robot.T_robot_world)
                    vis3d.show(starting_camera_pose=CAMERA_POSE)
            
                # observe
                if vis_config['initial_obs']:
                    vis2d.figure()
                    vis2d.imshow(observation, auto_subplot=True)
                    vis2d.show()

                # rollout on current satte
                done = False
                failed = False
                # if isinstance(policy, SingulationFullRolloutPolicy):
                #     policy.reset_num_failed_grasps()
                while not done:
                    if vis_config['step_stats']:
                        logging.info('Heap ID: %s' % heap_id)
                        logging.info('Timestep: %s' % bin_picking_env.timestep)

                    # get action
                    policy_start = time.time()
                    if fully_observed:
                        action = policy.action(state)
                    else:
                        action = policy.action(observation)
                    policy_stop = time.time()
                    logging.info('Composite Policy took %.3f sec' %(policy_stop-policy_start))

                    # render scene before
                    if vis_config['action']:
                        #gripper = bin_picking_env.gripper(action)
                        vis3d.figure()
			            # GRASPINGENV
                        # bin_picking_env.render_3d_scene(render_camera=False, workspace_objs_wireframe=False)
                        bin_picking_env.render_3d_scene()
                        if isinstance(action, GraspAction):
                            vis3d.gripper(gripper, action.grasp(gripper))
                        #if isinstance(action, LinearPushAction):
                        else:
                            # # T_start_world = action.T_begin_world * gripper.T_mesh_grasp
                            # # T_end_world = action.T_end_world * gripper.T_mesh_grasp
                            # #start_point = action.T_begin_world.translation
                            # start_point = action['start']
                            # #end_point = action.T_end_world.translation
                            # end_point = action['end']
                            # vec = (end_point - start_point) / np.linalg.norm(end_point-start_point) if np.linalg.norm(end_point-start_point) > 0 else end_point-start_point 
                            # #h1 = np.array([[0.7071,-0.7071,0],[0.7071,0.7071,0],[0,0,1]]).dot(vec)
                            # #h2 = np.array([[0.7071,0.7071,0],[-0.7071,0.7071,0],[0,0,1]]).dot(vec)
                            # arrow_len = np.linalg.norm(start_point - end_point)
                            # h1 = (end_point - start_point + np.array([0,0,arrow_len])) / (arrow_len*math.sqrt(2))
                            # h2 = (end_point - start_point - np.array([0,0,arrow_len])) / (arrow_len*math.sqrt(2))
                            # shaft_points = [start_point, end_point]
                            # head_points = [end_point - 0.03*h2, end_point, end_point - 0.03*h1]
                            # #vis3d.plot3d(shaft_points, color=[0,0,1])
                            # #vis3d.plot3d(head_points, color=[0,0,1])
                            
                            # Displaying all potential topple points
                            for vertex, prob in zip(action['vertices'], action['probabilities']):
                                color = np.array([min(1, 2*(1-prob)), min(2*prob, 1), 0])
                                vis3d.points(Point(vertex, 'world'), scale=.0005, color=color)

                            for vertex in action['bottom_points']:
                                color = np.array([0,0,1])
                                vis3d.points(Point(vertex, 'world'), scale=.0005, color=color)
                            vis3d.points(Point(action['com'], 'world'), scale=.005, color=np.array([0,0,1]))
                            vis3d.points(Point(np.array([0,0,0]), 'world'), scale=.005, color=np.array([0,1,0]))
                            
                            #set_of_lines = action['set_of_lines']
                            #for i, line in enumerate(set_of_lines):
                            #    color = str(bin(i+1))[2:].zfill(3)
                            #    color = np.array([color[2], color[1], color[0]])
                            #    vis3d.plot3d(line, color=color)
                        vis3d.show(starting_camera_pose=CAMERA_POSE)

                        # Show 
                        vis3d.figure()
                        bin_picking_env.render_3d_scene()
                        final_pose_ind = action['final_pose_ind'] / np.amax(action['final_pose_ind'])
                        for vertex, final_pose_ind in zip(action['vertices'], final_pose_ind):
                            color = np.array([0, min(1, 2*(1-prob)), min(2*prob, 1)])
                            vis3d.points(Point(vertex, 'world'), scale=.0005, color=color)
                        vis3d.show(starting_camera_pose=CAMERA_POSE)



                        color=np.array([0,0,1])
                        original_pose = state.obj.T_obj_world
                        pose_num = 0
                        for pose, edge_point1, edge_point2 in zip(action['final_poses'], action['bottom_points'], np.roll(action['bottom_points'],-1,axis=0)):
                            print 'Pose:', pose_num
                            pose_num += 1
                            pose = pose.T_obj_table
                            vis3d.figure()
                            state.obj.T_obj_world = original_pose
                            bin_picking_env.render_3d_scene()
                            vis3d.points(Point(edge_point1, 'world'), scale=.0005, color=color)
                            vis3d.points(Point(edge_point2, 'world'), scale=.0005, color=color)
                            vis3d.show(starting_camera_pose=CAMERA_POSE)

                            vis3d.figure()
                            state.obj.T_obj_world = pose
                            bin_picking_env.render_3d_scene()
                            vis3d.points(Point(edge_point1, 'world'), scale=.0005, color=color)
                            vis3d.points(Point(edge_point2, 'world'), scale=.0005, color=color)
                            vis3d.show(starting_camera_pose=CAMERA_POSE)
                         #vis3d.save('/home/mjd3/Pictures/weird_pics/%d_%d_before.png' % (heap_id, bin_picking_env.timestep), starting_camera_pose=CAMERA_POSE)
                    # store datapoint pre-step data
                    j = 0
                    obj_poses = np.zeros(fields_config['obj_poses']['height'])
                    obj_coms = np.zeros(fields_config['obj_coms']['height'])
                    obj_ids_vec = np.iinfo(np.uint32).max * np.ones(fields_config['obj_ids']['height'])
                    for obj_state in state.obj_states:
                        obj_poses[j*POSE_DIM:(j+1)*POSE_DIM] = obj_state.T_obj_world.vec
                        obj_coms[j*POINT_DIM:(j+1)*POINT_DIM] = obj_state.center_of_mass
                        obj_ids_vec[j] = obj_ids[obj_state.key]
                        j += 1
                    action_poses = np.zeros(fields_config['action_poses']['height'])
                    #if isinstance(action, GraspAction):
                    #    action_poses[:7] = action.T_grasp_world.vec
                    #else:
                    #    action_poses[:7] = action.T_begin_world.vec
                    #    action_poses[7:] = action.T_end_world.vec

                    # if isinstance(policy, SingulationMetricsCompositePolicy):
                    #     actual_distance_matrix_length = int(comb(len(state.objs), 2))
                    #     bin_distances = np.append(action.metadata['bin_distances'], 
                    #                               np.zeros(max_obj_per_pile-len(state.objs))
                    #                             )
                    #     distance_matrix = np.append(action.metadata['distance_matrix'], 
                    #                                 np.zeros(max_distance_matrix_length - actual_distance_matrix_length)
                    #                             )
                    #     datapoint['bin_distances'] = bin_distances
                    #     datapoint['distance_matrix'] = distance_matrix
                    #     datapoint['T_begin_world'] = action.T_begin_world.matrix
                    #     datapoint['T_end_world'] = action.T_end_world.matrix
                    #     datapoint['parallel_jaw_best_q_value'] = action.metadata['parallel_jaw_best_q_value']
                    #     # datapoint['parallel_jaw_mean_q_value'] = action.metadata['parallel_jaw_mean_q_value']
                    #     # datapoint['parallel_jaw_num_grasps'] = action.metadata['parallel_jaw_num_grasps']
                    #     datapoint['suction_best_q_value'] = action.metadata['suction_best_q_value']
                    #     # datapoint['suction_mean_q_value'] = action.metadata['suction_mean_q_value']
                    #     # datapoint['suction_num_grasps'] = action.metadata['suction_num_grasps']
                    #     # logging.info('Suction Q: %f, PJ Q: %f' % (action.metadata['suction_q_value'], action.metadata['parallel_jaw_q_value']))
                    #     # datapoint['obj_index'] = action.metadata['obj_index']

                    #     # datapoint['parallel_jaw_best_q_value_single'] = action.metadata['parallel_jaw_best_q_value_single']
                    #     # datapoint['suction_best_q_value_single'] = action.metadata['suction_best_q_value_single']
                    #     datapoint['singulated_obj_index'] = action.metadata['singulated_obj_index']
                    #     datapoint['parallel_jaw_grasped_obj_index'] = obj_ids[action.metadata['parallel_jaw_grasped_obj_key']]
                    #     datapoint['suction_grasped_obj_index'] = obj_ids[action.metadata['suction_grasped_obj_key']]
                    # else:
                    #     datapoint['bin_distances'] = np.zeros(max_obj_per_pile)
                    #     datapoint['distance_matrix'] = np.zeros(max_distance_matrix_length)
                    #     datapoint['T_begin_world'] = np.zeros((4,4))
                    #     datapoint['T_end_world'] = np.zeros((4,4))
                    #     datapoint['parallel_jaw_best_q_value'] = -1
                    #     datapoint['suction_best_q_value'] = -1
                    #     datapoint['singulated_obj_index'] = -1
                    #     datapoint['parallel_jaw_grasped_obj_index'] = -1
                    #     datapoint['suction_grasped_obj_index'] = -1

                    # policy_id = 0
                    # if 'policy_id' in action.metadata.keys():
                    #     policy_id = action.metadata['policy_id']
                    # greedy_q_value = 0
                    # if 'greedy_q_value' in action.metadata.keys():
                    #     greedy_q_value = action.metadata['greedy_q_value']
                        
                    # datapoint['timesteps'] = bin_picking_env.timestep
                    # datapoint['obj_poses'] = obj_poses
                    # datapoint['obj_coms'] = obj_coms
                    # datapoint['obj_ids'] = obj_ids_vec
                    # # if bin_picking_env.render_mode == RenderMode.RGBD:
                    # #     color_data = observation.color.raw_data
                    # #     depth_data = observation.depth.raw_data
                    # # elif bin_picking_env.render_mode == RenderMode.DEPTH:
                    # #     color_data = np.zeros(observation.shape).astype(np.uint8)
                    # #     depth_data = observation.raw_data
                    # # elif bin_picking_env.render_mode == RenderMode.COLOR:
                    # #     color_data = observation.raw_data
                    # #     depth_data = np.zeros(observation.shape)
                    # # datapoint['color_ims'] = color_data
                    # # datapoint['depth_ims'] = depth_data
                    # datapoint['action_ids'] = action_ids[type(action).__name__]
                    # datapoint['action_poses'] = action_poses
                    # datapoint['policy_ids'] = policy_id
                    # datapoint['greedy_q_values'] = greedy_q_value
                    # datapoint['pred_q_values'] = action.q_value
                    
                    # step the policy
                    #observation, reward, done, info = bin_picking_env.step(action)
                    #state = bin_picking_env.state
                    state.objs[0].T_obj_world = action['final_state']

                    # if isinstance(policy, SingulationFullRolloutPolicy):
                    #     policy.grasp_succeeds(info['grasp_succeeds'])
        
                    # debugging info
                    if vis_config['step_stats']:
                        logging.info('Action type: %s' %(type(action).__name__))
                        logging.info('Action Q-value: %.3f' %(action.q_value))
                        logging.info('Reward: %d' %(reward))
                        logging.info('Policy took %.3f sec' %(policy_stop-policy_start))
                        logging.info('Num objects remaining: %d' %(bin_picking_env.num_objects))
                        if info['cleared_pile']:
                            logging.info('Cleared pile!')
                        
                    # # store datapoint post-step data
                    # datapoint['rewards'] = reward
                    # datapoint['grasp_metrics'] = info['grasp_metric']
                    # datapoint['collisions'] = 1 * info['collides']
                    # datapoint['collisions_with_env'] = 1 * info['collides_with_static_obstacles']
                    # datapoint['grasped_obj_ids'] = obj_ids[info['grasped_obj_key']]
                    # datapoint['cleared_pile'] = 1 * info['cleared_pile']

                    # # store datapoint
                    # # dataset.add(datapoint)
                    # data_saved = True    
                    
                    # render observation
                    if vis_config['obs']:
                        vis2d.figure()
                        vis2d.imshow(observation, auto_subplot=True)
                        vis2d.show()
        
                    # render scene after
                    if vis_config['state']:
                        vis3d.figure()
                        bin_picking_env.render_3d_scene(render_camera=False)
                        vis3d.show(starting_camera_pose=CAMERA_POSE)
                        # vis3d.save('/home/mjd3/Pictures/weird_pics/%d_%d_after.png' % (heap_id, bin_picking_env.timestep), starting_camera_pose=CAMERA_POSE)
                    state.objs[0].T_obj_world = action['tmpR']
                    vis3d.figure()
                    bin_picking_env.render_3d_scene(render_camera=False)
                    vis3d.show(starting_camera_pose=CAMERA_POSE)
                    state.objs[0].T_obj_world = action['final_state']
                    # increment the number of steps
                    num_steps += 1
                    if num_steps >= steps_per_test_case:
                        done = True
                        
            except NoActionFoundException as e:
                logging.warning('The policy failed to plan an action!')
                done = True                    
            except Exception as e:
                # log an error
                logging.warning('Rollout failed!')
                logging.warning('%s' %(str(e)))
                logging.warning(traceback.print_exc())
                # if debug:
                #     raise
                
                # reset env
                del bin_picking_env
                gc.collect()
                bin_picking_env = BinPickingEnv(config, vis_config)

                # terminate current rollout
                failed = True
                done = True

            # update test case id
            n += 1
            # dataset.flush()
            # logging.info("\n\nflushing")
            # logging.info("exiting")
            # sys.exit()
                
        # garbage collect
        del bin_picking_env
        gc.collect()

    # return the dataset 
    # dataset.flush()

    # log time
    rollout_stop = time.time()
    logging.info('Rollouts took %.3f sec' %(rollout_stop-rollout_start))

    return dataset
コード例 #3
0
def generate_segmask_dataset(output_dataset_path,
                             config,
                             save_tensors=True,
                             warm_start=False):
    """ Generate a segmentation training dataset

    Parameters
    ----------
    dataset_path : str
        path to store the dataset
    config : dict
        dictionary-like objects containing parameters of the simulator and visualization
    save_tensors : bool
        save tensor datasets (for recreating state)
    warm_start : bool
        restart dataset generation from a previous state
    """

    # read subconfigs
    dataset_config = config['dataset']
    image_config = config['images']
    vis_config = config['vis']

    # debugging
    debug = config['debug']
    if debug:
        np.random.seed(SEED)

    # read general parameters
    num_states = config['num_states']
    num_images_per_state = config['num_images_per_state']

    states_per_flush = config['states_per_flush']
    states_per_garbage_collect = config['states_per_garbage_collect']

    # set max obj per state
    max_objs_per_state = config['state_space']['heap']['max_objs']

    # read image parameters
    im_height = config['state_space']['camera']['im_height']
    im_width = config['state_space']['camera']['im_width']
    segmask_channels = max_objs_per_state + 1

    # create the dataset path and all subfolders if they don't exist
    if not os.path.exists(output_dataset_path):
        os.mkdir(output_dataset_path)

    image_dir = os.path.join(output_dataset_path, 'images')
    if not os.path.exists(image_dir):
        os.mkdir(image_dir)
    color_dir = os.path.join(image_dir, 'color_ims')
    if image_config['color'] and not os.path.exists(color_dir):
        os.mkdir(color_dir)
    depth_dir = os.path.join(image_dir, 'depth_ims')
    if image_config['depth'] and not os.path.exists(depth_dir):
        os.mkdir(depth_dir)
    amodal_dir = os.path.join(image_dir, 'amodal_masks')
    if image_config['amodal'] and not os.path.exists(amodal_dir):
        os.mkdir(amodal_dir)
    modal_dir = os.path.join(image_dir, 'modal_masks')
    if image_config['modal'] and not os.path.exists(modal_dir):
        os.mkdir(modal_dir)
    semantic_dir = os.path.join(image_dir, 'semantic_masks')
    if image_config['semantic'] and not os.path.exists(semantic_dir):
        os.mkdir(semantic_dir)

    # setup logging
    experiment_log_filename = os.path.join(output_dataset_path,
                                           'dataset_generation.log')
    if os.path.exists(experiment_log_filename) and not warm_start:
        os.remove(experiment_log_filename)
    Logger.add_log_file(logger, experiment_log_filename, global_log_file=True)
    config.save(
        os.path.join(output_dataset_path, 'dataset_generation_params.yaml'))
    metadata = {}
    num_prev_states = 0

    # set dataset params
    if save_tensors:

        # read dataset subconfigs
        state_dataset_config = dataset_config['states']
        image_dataset_config = dataset_config['images']
        state_tensor_config = state_dataset_config['tensors']
        image_tensor_config = image_dataset_config['tensors']

        obj_pose_dim = POSE_DIM * max_objs_per_state
        obj_com_dim = POINT_DIM * max_objs_per_state
        state_tensor_config['fields']['obj_poses']['height'] = obj_pose_dim
        state_tensor_config['fields']['obj_coms']['height'] = obj_com_dim
        state_tensor_config['fields']['obj_ids']['height'] = max_objs_per_state

        image_tensor_config['fields']['camera_pose']['height'] = POSE_DIM

        if image_config['color']:
            image_tensor_config['fields']['color_im'] = {
                'dtype': 'uint8',
                'channels': 3,
                'height': im_height,
                'width': im_width
            }

        if image_config['depth']:
            image_tensor_config['fields']['depth_im'] = {
                'dtype': 'float32',
                'channels': 1,
                'height': im_height,
                'width': im_width
            }

        if image_config['modal']:
            image_tensor_config['fields']['modal_segmasks'] = {
                'dtype': 'uint8',
                'channels': segmask_channels,
                'height': im_height,
                'width': im_width
            }

        if image_config['amodal']:
            image_tensor_config['fields']['amodal_segmasks'] = {
                'dtype': 'uint8',
                'channels': segmask_channels,
                'height': im_height,
                'width': im_width
            }

        if image_config['semantic']:
            image_tensor_config['fields']['semantic_segmasks'] = {
                'dtype': 'uint8',
                'channels': 1,
                'height': im_height,
                'width': im_width
            }

        # create dataset filenames
        state_dataset_path = os.path.join(output_dataset_path, 'state_tensors')
        image_dataset_path = os.path.join(output_dataset_path, 'image_tensors')

        if warm_start:

            if not os.path.exists(state_dataset_path) or not os.path.exists(
                    image_dataset_path):
                logger.error(
                    'Attempting to warm start without saved tensor dataset')
                exit(1)

            # open datasets
            logger.info('Opening state dataset')
            state_dataset = TensorDataset.open(state_dataset_path,
                                               access_mode='READ_WRITE')
            logger.info('Opening image dataset')
            image_dataset = TensorDataset.open(image_dataset_path,
                                               access_mode='READ_WRITE')

            # read configs
            state_tensor_config = state_dataset.config
            image_tensor_config = image_dataset.config

            # clean up datasets (there may be datapoints with indices corresponding to non-existent data)
            num_state_datapoints = state_dataset.num_datapoints
            num_image_datapoints = image_dataset.num_datapoints
            num_prev_states = num_state_datapoints

            # clean up images
            image_ind = num_image_datapoints - 1
            image_datapoint = image_dataset[image_ind]
            while image_ind > 0 and image_datapoint[
                    'state_ind'] >= num_state_datapoints:
                image_ind -= 1
                image_datapoint = image_dataset[image_ind]
            images_to_remove = num_image_datapoints - 1 - image_ind
            logger.info('Deleting last %d image tensors' % (images_to_remove))
            if images_to_remove > 0:
                image_dataset.delete_last(images_to_remove)
                num_image_datapoints = image_dataset.num_datapoints
        else:
            # create datasets from scratch
            logger.info('Creating datasets')

            state_dataset = TensorDataset(state_dataset_path,
                                          state_tensor_config)
            image_dataset = TensorDataset(image_dataset_path,
                                          image_tensor_config)

        # read templates
        state_datapoint = state_dataset.datapoint_template
        image_datapoint = image_dataset.datapoint_template

    if warm_start:

        if not os.path.exists(
                os.path.join(output_dataset_path, 'metadata.json')):
            logger.error(
                'Attempting to warm start without previously created dataset')
            exit(1)

        # Read metadata and indices
        metadata = json.load(
            open(os.path.join(output_dataset_path, 'metadata.json'), 'r'))
        test_inds = np.load(os.path.join(image_dir,
                                         'test_indices.npy')).tolist()
        train_inds = np.load(os.path.join(image_dir,
                                          'train_indices.npy')).tolist()

        # set obj ids and splits
        reverse_obj_ids = metadata['obj_ids']
        obj_id_map = utils.reverse_dictionary(reverse_obj_ids)
        obj_splits = metadata['obj_splits']
        obj_keys = obj_splits.keys()
        mesh_filenames = metadata['meshes']

        # Get list of images generated so far
        generated_images = sorted(
            os.listdir(color_dir)) if image_config['color'] else sorted(
                os.listdir(depth_dir))
        num_total_images = len(generated_images)

        # Do our own calculation if no saved tensors
        if num_prev_states == 0:
            num_prev_states = num_total_images // num_images_per_state

        # Find images to remove and remove them from all relevant places if they exist
        num_images_to_remove = num_total_images - (num_prev_states *
                                                   num_images_per_state)
        logger.info(
            'Deleting last {} invalid images'.format(num_images_to_remove))
        for k in range(num_images_to_remove):
            im_name = generated_images[-(k + 1)]
            im_basename = os.path.splitext(im_name)[0]
            im_ind = int(im_basename.split('_')[1])
            if os.path.exists(os.path.join(depth_dir, im_name)):
                os.remove(os.path.join(depth_dir, im_name))
            if os.path.exists(os.path.join(color_dir, im_name)):
                os.remove(os.path.join(color_dir, im_name))
            if os.path.exists(os.path.join(semantic_dir, im_name)):
                os.remove(os.path.join(semantic_dir, im_name))
            if os.path.exists(os.path.join(modal_dir, im_basename)):
                shutil.rmtree(os.path.join(modal_dir, im_basename))
            if os.path.exists(os.path.join(amodal_dir, im_basename)):
                shutil.rmtree(os.path.join(amodal_dir, im_basename))
            if im_ind in train_inds:
                train_inds.remove(im_ind)
            elif im_ind in test_inds:
                test_inds.remove(im_ind)

    else:

        # Create initial env to generate metadata
        env = BinHeapEnv(config)
        obj_id_map = env.state_space.obj_id_map
        obj_keys = env.state_space.obj_keys
        obj_splits = env.state_space.obj_splits
        mesh_filenames = env.state_space.mesh_filenames
        save_obj_id_map = obj_id_map.copy()
        save_obj_id_map[ENVIRONMENT_KEY] = np.iinfo(np.uint32).max
        reverse_obj_ids = utils.reverse_dictionary(save_obj_id_map)
        metadata['obj_ids'] = reverse_obj_ids
        metadata['obj_splits'] = obj_splits
        metadata['meshes'] = mesh_filenames
        json.dump(metadata,
                  open(os.path.join(output_dataset_path, 'metadata.json'),
                       'w'),
                  indent=JSON_INDENT,
                  sort_keys=True)
        train_inds = []
        test_inds = []

    # generate states and images
    state_id = num_prev_states
    while state_id < num_states:

        # create env and set objects
        create_start = time.time()
        env = BinHeapEnv(config)
        env.state_space.obj_id_map = obj_id_map
        env.state_space.obj_keys = obj_keys
        env.state_space.set_splits(obj_splits)
        env.state_space.mesh_filenames = mesh_filenames
        create_stop = time.time()
        logger.info('Creating env took %.3f sec' %
                    (create_stop - create_start))

        # sample states
        states_remaining = num_states - state_id
        for i in range(min(states_per_garbage_collect, states_remaining)):

            # log current rollout
            if state_id % config['log_rate'] == 0:
                logger.info('State: %06d' % (state_id))

            try:
                # reset env
                env.reset()
                state = env.state
                split = state.metadata['split']

                # render state
                if vis_config['state']:
                    env.view_3d_scene()

                # Save state if desired
                if save_tensors:

                    # set obj state variables
                    obj_pose_vec = np.zeros(obj_pose_dim)
                    obj_com_vec = np.zeros(obj_com_dim)
                    obj_id_vec = np.iinfo(
                        np.uint32).max * np.ones(max_objs_per_state)
                    j = 0
                    for obj_state in state.obj_states:
                        obj_pose_vec[j * POSE_DIM:(j + 1) *
                                     POSE_DIM] = obj_state.pose.vec
                        obj_com_vec[j * POINT_DIM:(j + 1) *
                                    POINT_DIM] = obj_state.center_of_mass
                        obj_id_vec[j] = int(obj_id_map[obj_state.key])
                        j += 1

                    # store datapoint env params
                    state_datapoint['state_id'] = state_id
                    state_datapoint['obj_poses'] = obj_pose_vec
                    state_datapoint['obj_coms'] = obj_com_vec
                    state_datapoint['obj_ids'] = obj_id_vec
                    state_datapoint['split'] = split

                    # store state datapoint
                    image_start_ind = image_dataset.num_datapoints
                    image_end_ind = image_start_ind + num_images_per_state
                    state_datapoint['image_start_ind'] = image_start_ind
                    state_datapoint['image_end_ind'] = image_end_ind

                    # clean up
                    del obj_pose_vec
                    del obj_com_vec
                    del obj_id_vec

                    # add state
                    state_dataset.add(state_datapoint)

                # render images
                for k in range(num_images_per_state):

                    # reset the camera
                    if num_images_per_state > 1:
                        env.reset_camera()

                    obs = env.render_camera_image(color=image_config['color'])
                    if image_config['color']:
                        color_obs, depth_obs = obs
                    else:
                        depth_obs = obs

                    # vis obs
                    if vis_config['obs']:
                        if image_config['depth']:
                            plt.figure()
                            plt.imshow(depth_obs)
                            plt.title('Depth Observation')
                        if image_config['color']:
                            plt.figure()
                            plt.imshow(color_obs)
                            plt.title('Color Observation')
                        plt.show()

                    if image_config['modal'] or image_config[
                            'amodal'] or image_config['semantic']:

                        # render segmasks
                        amodal_segmasks, modal_segmasks = env.render_segmentation_images(
                        )

                        # retrieve segmask data
                        modal_segmask_arr = np.iinfo(np.uint8).max * np.ones(
                            [im_height, im_width, segmask_channels],
                            dtype=np.uint8)
                        amodal_segmask_arr = np.iinfo(np.uint8).max * np.ones(
                            [im_height, im_width, segmask_channels],
                            dtype=np.uint8)
                        stacked_segmask_arr = np.zeros(
                            [im_height, im_width, 1], dtype=np.uint8)

                        modal_segmask_arr[:, :, :env.
                                          num_objects] = modal_segmasks
                        amodal_segmask_arr[:, :, :env.
                                           num_objects] = amodal_segmasks

                        if image_config['semantic']:
                            for j in range(env.num_objects):
                                this_obj_px = np.where(
                                    modal_segmasks[:, :, j] > 0)
                                stacked_segmask_arr[this_obj_px[0],
                                                    this_obj_px[1], 0] = j + 1

                    # visualize
                    if vis_config['semantic']:
                        plt.figure()
                        plt.imshow(stacked_segmask_arr.squeeze())
                        plt.show()

                    if save_tensors:
                        # save image data as tensors
                        if image_config['color']:
                            image_datapoint['color_im'] = color_obs
                        if image_config['depth']:
                            image_datapoint['depth_im'] = depth_obs[:, :, None]
                        if image_config['modal']:
                            image_datapoint[
                                'modal_segmasks'] = modal_segmask_arr
                        if image_config['amodal']:
                            image_datapoint[
                                'amodal_segmasks'] = amodal_segmask_arr
                        if image_config['semantic']:
                            image_datapoint[
                                'semantic_segmasks'] = stacked_segmask_arr

                        image_datapoint['camera_pose'] = env.camera.pose.vec
                        image_datapoint[
                            'camera_intrs'] = env.camera.intrinsics.vec
                        image_datapoint['state_ind'] = state_id
                        image_datapoint['split'] = split

                        # add image
                        image_dataset.add(image_datapoint)

                    # Save depth image and semantic masks
                    if image_config['color']:
                        ColorImage(color_obs).save(
                            os.path.join(
                                color_dir, 'image_{:06d}.png'.format(
                                    num_images_per_state * state_id + k)))
                    if image_config['depth']:
                        DepthImage(depth_obs).save(
                            os.path.join(
                                depth_dir, 'image_{:06d}.png'.format(
                                    num_images_per_state * state_id + k)))
                    if image_config['modal']:
                        modal_id_dir = os.path.join(
                            modal_dir,
                            'image_{:06d}'.format(num_images_per_state *
                                                  state_id + k))
                        if not os.path.exists(modal_id_dir):
                            os.mkdir(modal_id_dir)
                        for i in range(env.num_objects):
                            BinaryImage(modal_segmask_arr[:, :, i]).save(
                                os.path.join(modal_id_dir,
                                             'channel_{:03d}.png'.format(i)))
                    if image_config['amodal']:
                        amodal_id_dir = os.path.join(
                            amodal_dir,
                            'image_{:06d}'.format(num_images_per_state *
                                                  state_id + k))
                        if not os.path.exists(amodal_id_dir):
                            os.mkdir(amodal_id_dir)
                        for i in range(env.num_objects):
                            BinaryImage(amodal_segmask_arr[:, :, i]).save(
                                os.path.join(amodal_id_dir,
                                             'channel_{:03d}.png'.format(i)))
                    if image_config['semantic']:
                        GrayscaleImage(stacked_segmask_arr.squeeze()).save(
                            os.path.join(
                                semantic_dir, 'image_{:06d}.png'.format(
                                    num_images_per_state * state_id + k)))

                    # Save split
                    if split == TRAIN_ID:
                        train_inds.append(num_images_per_state * state_id + k)
                    else:
                        test_inds.append(num_images_per_state * state_id + k)

                # auto-flush after every so many timesteps
                if state_id % states_per_flush == 0:
                    np.save(os.path.join(image_dir, 'train_indices.npy'),
                            train_inds)
                    np.save(os.path.join(image_dir, 'test_indices.npy'),
                            test_inds)
                    if save_tensors:
                        state_dataset.flush()
                        image_dataset.flush()

                # delete action objects
                for obj_state in state.obj_states:
                    del obj_state
                del state
                gc.collect()

                # update state id
                state_id += 1

            except Exception as e:
                # log an error
                logger.warning('Heap failed!')
                logger.warning('%s' % (str(e)))
                logger.warning(traceback.print_exc())
                if debug:
                    raise

                del env
                gc.collect()
                env = BinHeapEnv(config)
                env.state_space.obj_id_map = obj_id_map
                env.state_space.obj_keys = obj_keys
                env.state_space.set_splits(obj_splits)
                env.state_space.mesh_filenames = mesh_filenames

        # garbage collect
        del env
        gc.collect()

    # write all datasets to file, save indices
    np.save(os.path.join(image_dir, 'train_indices.npy'), train_inds)
    np.save(os.path.join(image_dir, 'test_indices.npy'), test_inds)
    if save_tensors:
        state_dataset.flush()
        image_dataset.flush()

    logger.info('Generated %d image datapoints' %
                (state_id * num_images_per_state))
コード例 #4
0
def run_parallel_bin_picking_benchmark(input_dataset_path,
                                       heap_ids,
                                       timesteps,
                                       output_dataset_path,
                                       config_filename):
    raise NotImplementedError('Cannot run in parallel. Need to split up the heap ids and timesteps')

    # load config
    config = YamlConfig(config_filename)

    # init ray
    ray_config = config['ray']
    num_cpus = ray_config['num_cpus']
    ray.init(num_cpus=num_cpus,
             redirect_output=ray_config['redirect_output'])
    
    # rollouts
    num_rollouts = config['num_rollouts'] // num_cpus
    dataset_ids = [rollout_bin_picking_policy_in_parallel.remote(dataset_path, config_filename, num_rollouts) for i in range(num_cpus)]
    dataset_filenames = ray.get(dataset_ids)
    if len(dataset_filenames) == 0:
        return
    
    # merge datasets    
    subproc_dataset = TensorDataset.open(dataset_filenames[0])
    tensor_config = subproc_dataset.config


    # open dataset
    dataset = TensorDataset(dataset_path, tensor_config)
    dataset.add_metadata('action_ids', subproc_dataset.metadata['action_ids'])

    # add datapoints
    obj_id = 0
    heap_id = 0
    obj_ids = {}
    for dataset_filename in dataset_filenames:
        logging.info('Aggregating data from %s' %(dataset_filename))
        j = 0
        subproc_dataset = TensorDataset.open(dataset_filename)
        subproc_obj_ids = subproc_dataset.metadata['obj_ids']
        for datapoint in subproc_dataset:
            if j > 0 and datapoint['timesteps'] == 0:
                heap_id += 1
                
            # modify object ids
            for i in range(datapoint['obj_ids'].shape[0]):
                subproc_obj_id = datapoint['obj_ids'][i]
                if subproc_obj_id != np.uint32(-1):
                    subproc_obj_key = subproc_obj_ids[str(subproc_obj_id)]
                    if subproc_obj_key not in obj_ids.keys():
                        obj_ids[subproc_obj_key] = obj_id
                        obj_id += 1
                    datapoint['obj_ids'][i] = obj_ids[subproc_obj_key]

            # modify grasped obj id
            subproc_grasped_obj_id = datapoint['grasped_obj_ids']
            grasped_obj_key = subproc_obj_ids[str(subproc_grasped_obj_id)]
            datapoint['grasped_obj_ids'] = obj_ids[grasped_obj_key]

            # modify heap id
            datapoint['heap_ids'] = heap_id
                
            # add datapoint to dataset
            dataset.add(datapoint)
            j += 1
            
    # write to disk        
    obj_ids = utils.reverse_dictionary(obj_ids)
    dataset.add_metadata('obj_ids', obj_ids)
    dataset.flush()