Example #1
0
def figure_0():
    action = policy.action(env.state)
    env.render_3d_scene()
    bottom_points = policy.toppling_model.bottom_points
    vis3d.plot3d(bottom_points[:2], color=[0, 0, 0], tube_radius=.001)

    mesh = env.state.mesh.copy().apply_transform(env.state.T_obj_world.matrix)
    mesh.fix_normals()
    direction = normalize([0, -.04, 0])
    origin = mesh.center_mass + np.array([0, .04, .09])
    intersect, _, face_ind = mesh.ray.intersects_location([origin],
                                                          [direction],
                                                          multiple_hits=False)
    normal = mesh.face_normals[face_ind[0]]
    start_point = intersect[0] + .06 * normal
    end_point = intersect[0]
    shaft_points = [start_point, end_point]
    h1 = np.array([[1, 0, 0], [0, 0.7071, -0.7071], [0, 0.7071,
                                                     0.7071]]).dot(-normal)
    h2 = np.array([[1, 0, 0], [0, 0.7071, 0.7071], [0, -0.7071,
                                                    0.7071]]).dot(-normal)
    head_points = [end_point - 0.02 * h2, end_point, end_point - 0.02 * h1]
    vis3d.plot3d(shaft_points, color=[1, 0, 0], tube_radius=.002)
    vis3d.plot3d(head_points, color=[1, 0, 0], tube_radius=.002)
    vis3d.points(Point(end_point), scale=.004, color=[0, 0, 0])
    hand_pose = RigidTransform(rotation=policy.get_hand_pose(
        start_point, end_point)[0].rotation,
                               translation=start_point,
                               from_frame='grasp',
                               to_frame='world')
    orig_pose = env.state.obj.T_obj_world.copy()
    env.state.obj.T_obj_world = policy.toppling_model.final_poses[1]
    action = policy.grasping_policy.action(env.state)
    env.state.obj.T_obj_world = orig_pose

    gripper = env.gripper(action)
    #vis3d.mesh(gripper.mesh, hand_pose * gripper.T_mesh_grasp, color=light_blue)
    # mesh = trimesh.load('~/Downloads/chris.stl')
    rot = np.array([[0, -1, 0], [1, 0, 0],
                    [0, 0,
                     1]]).dot(np.array([[0, 0, 1], [0, 1, 0], [-1, 0, 0]]))
    T = RigidTransform(rotation=rot,
                       translation=np.array([0, -.09, .1]),
                       to_frame='mesh',
                       from_frame='mesh')
    # vis3d.mesh(mesh, hand_pose * gripper.T_mesh_grasp * T, color=light_blue)
    vis3d.mesh(gripper.mesh,
               hand_pose * gripper.T_mesh_grasp * T,
               color=light_blue)
    vis3d.show(starting_camera_pose=CAMERA_POSE)

    env.state.obj.T_obj_world = policy.toppling_model.final_poses[1]
    action = policy.grasping_policy.action(env.state)
    print 'q:', action.q_value
    env.render_3d_scene()
    vis3d.gripper(env.gripper(action),
                  action.grasp(env.gripper(action)),
                  color=light_blue)
    vis3d.show(starting_camera_pose=CAMERA_POSE)
def vis_topple_probs(vertices, topple_probs):
    vis3d.figure()
    env.render_3d_scene()
    for vertex, prob in zip(vertices, topple_probs):
       color = [min(1, 2*(1-prob)), min(2*prob, 1), 0]
       vis3d.points(Point(vertex, 'world'), scale=.001, color=color)
    for bottom_point in policy.toppling_model.bottom_points:
       vis3d.points(Point(bottom_point, 'world'), scale=.001, color=[0,0,0])
    vis3d.show(starting_camera_pose=CAMERA_POSE)
Example #3
0
def sim_to_real_tf(sim_point_cloud_masked, vis=False):
    # Capture point cloud from physical depth camera
    _, depth_im, _ = phoxi_sensor.frames()
    phys_point_cloud = phoxi_tf*phoxi_sensor.ir_intrinsics.deproject(depth_im)
    phys_point_cloud_masked, _ = phys_point_cloud.box_mask(mask_box)
    if phys_point_cloud_masked.num_points == 0:
        logging.warn('Object not found! Skipping...')
        quit()
    
    pcs_config = {'overlap': 0.6, 'accuracy': 0.001, 'samples': 500, 'timeout': 10, 'cache_dir': '/home/chriscorrea14/Super4PCS/cache'}
    pcs_aligner = Super4PCSAligner(pcs_config)

    sim_to_real_tf = pcs_aligner.align(phys_point_cloud_masked, sim_point_cloud_masked)
    if vis:
        vis3d.figure()
        vis3d.points(phys_point_cloud_masked.data.T, color=(1,0,0), scale=.001)
        vis3d.points((sim_to_real_tf*sim_point_cloud_masked).data.T, color=(0,1,0), scale=.001)
        vis3d.show(title='Predicted pose from Sim 2 Real')
    return sim_to_real_tf
Example #4
0
def figure_1():
    env.state.obj.T_obj_world.translation += np.array([-.01, -.05, .001])
    action = policy.action(env.state)
    env.render_3d_scene()
    bottom_points = policy.toppling_model.bottom_points
    vis3d.plot3d(bottom_points[:2], color=[0, 0, 0], tube_radius=.0005)
    vis3d.points(Point(bottom_points[0]), color=[0, 0, 0], scale=.001)
    vis3d.points(Point(bottom_points[1]), color=[0, 0, 0], scale=.001)
    y_dir = normalize(bottom_points[1] - bottom_points[0])
    origin = policy.toppling_model.com_projected_on_edges[0] - .005 * y_dir
    vis_axes(origin, y_dir)

    #mesh = env.state.mesh.copy().apply_transform(env.state.T_obj_world.matrix)
    #mesh.fix_normals()
    #direction = normalize([-.03, -.07, 0])
    #intersect, _, face_ind = mesh.ray.intersects_location([[.02, -.005, .09]], [direction], multiple_hits=False)
    #normal = mesh.face_normals[face_ind[0]]
    #start_point = intersect[0] + .03*normal
    #end_point = intersect[0]
    #shaft_points = [start_point, end_point]
    #h1 = np.array([[0.7071,-0.7071,0],[0.7071,0.7071,0],[0,0,1]]).dot(-normal)
    #h2 = np.array([[0.7071,0.7071,0],[-0.7071,0.7071,0],[0,0,1]]).dot(-normal)
    #head_points = [end_point - 0.01*h2, end_point, end_point - 0.01*h1]
    #vis3d.plot3d(shaft_points, color=[1,0,0], tube_radius=.001)
    #vis3d.plot3d(head_points, color=[1,0,0], tube_radius=.001)
    #vis3d.points(Point(end_point), scale=.002, color=[0,0,0])

    # Center of Mass
    #start_point = env.state.T_obj_world.translation - .0025*y_dir - np.array([0,0,.005])
    start_point = env.state.T_obj_world.translation
    end_point = start_point - np.array([0, 0, .03])
    vis3d.points(Point(start_point), scale=.002, color=[0, 0, 0])
    shaft_points = [start_point, end_point]
    h1 = np.array([[1, 0, 0], [0, 0.7071, -0.7071], [0, 0.7071,
                                                     0.7071]]).dot(-up)
    h2 = np.array([[1, 0, 0], [0, 0.7071, 0.7071], [0, -0.7071,
                                                    0.7071]]).dot(-up)
    head_points = [end_point - 0.01 * h2, end_point, end_point - 0.01 * h1]
    vis3d.plot3d(shaft_points, color=[1, 0, 0], tube_radius=.001)
    vis3d.plot3d(head_points, color=[1, 0, 0], tube_radius=.001)
    vis3d.show(starting_camera_pose=CAMERA_POSE)
Example #5
0
def benchmark_bin_picking_policy(policy,
                                 # input_dataset_path,
                                 # heap_ids,
                                 # timesteps,
                                 # output_dataset_path,
                                 config,
                                 # excluded_heaps_file):
                                 ):
    """ Benchmark a bin picking policy.

    Parameters
    ----------
    policy : :obj:`Policy`
        policy to roll out
    input_dataset_path : str
        path to the input dataset
    heap_ids : list
        integer identifiers for the heaps to re-run
    timesteps : list
        integer timesteps to seed the simulation from
    output_dataset_path : str
        path to store the results
    config : dict
        dictionary-like objects containing parameters of the simulator and visualization
    """
    # read subconfigs
    vis_config = config['vis']
    dataset_config = config['dataset']

    # read parameters
    fully_observed = config['fully_observed']
    steps_per_test_case = config['steps_per_test_case']
    rollouts_per_garbage_collect = config['rollouts_per_garbage_collect']
    debug = config['debug']
    im_height = config['state_space']['camera']['im_height']
    im_width = config['state_space']['camera']['im_width']
    max_obj_per_pile = config['state_space']['object']['max_obj_per_pile']

    if debug:
        random.seed(SEED)
        np.random.seed(SEED)

    # read ids
    # if len(heap_ids) != len(timesteps):
    #     raise ValueError('Must provide same number of heap ids and timesteps')
    # num_rollouts = len(heap_ids)
    num_rollouts = 1
        
    # set dataset params
    tensor_config = dataset_config['tensors']
    fields_config = tensor_config['fields']
    # fields_config['color_ims']['height'] = im_height
    # fields_config['color_ims']['width'] = im_width
    # fields_config['depth_ims']['height'] = im_height
    # fields_config['depth_ims']['width'] = im_width
    fields_config['obj_poses']['height'] = POSE_DIM * max_obj_per_pile
    fields_config['obj_coms']['height'] = POINT_DIM * max_obj_per_pile
    fields_config['obj_ids']['height'] = max_obj_per_pile
    fields_config['bin_distances']['height'] = max_obj_per_pile
    # matrix has (n choose 2) elements in it
    max_distance_matrix_length = int(comb(max_obj_per_pile, 2))
    fields_config['distance_matrix']['height'] = max_distance_matrix_length

    # sample a process id
    proc_id = utils.gen_experiment_id()
    # if not os.path.exists(output_dataset_path):
    #     try:
    #         os.mkdir(output_dataset_path)
    #     except:
    #         logging.warning('Failed to create %s. The dataset path may have been created simultaneously by another process' %(dataset_path))
    # proc_id = 'clustering_2'
    # output_dataset_path = os.path.join(output_dataset_path, 'dataset_%s' %(proc_id))

    # open input dataset
    # logging.info('Opening input dataset: %s' % input_dataset_path)
    # input_dataset = TensorDataset.open(input_dataset_path)
    
    # open output_dataset
    # logging.info('Opening output dataset: %s' % output_dataset_path)
    # dataset = TensorDataset(output_dataset_path, tensor_config)
    # datapoint = dataset.datapoint_template

    # setup logging
    # experiment_log_filename = os.path.join(output_dataset_path, 'dataset_generation.log')
    # formatter = logging.Formatter('%(asctime)s %(levelname)s: %(message)s')
    # hdlr = logging.FileHandler(experiment_log_filename)
    # hdlr.setFormatter(formatter)
    # logging.getLogger().addHandler(hdlr)
    # config.save(os.path.join(output_dataset_path, 'dataset_generation_params.yaml'))
    
    # key mappings
    # we add the empty string as a mapping because if you don't evaluate dexnet on the 'before' state of the push
    obj_id = 1
    obj_ids = {'': 0}
    action_ids = {
        'ParallelJawGraspAction': 0,
        'SuctionGraspAction': 1,
        'LinearPushAction': 2
    }
    
    # add action ids
    reverse_action_ids = utils.reverse_dictionary(action_ids)
    # dataset.add_metadata('action_ids', reverse_action_ids)
    
    # perform rollouts
    n = 0
    rollout_start = time.time()
    current_heap_id = None
    while n < num_rollouts:
        # create env
        create_start = time.time()
        bin_picking_env = GraspingEnv(config, vis_config)
        create_stop = time.time()
        logging.info('Creating env took %.3f sec' %(create_stop-create_start))

        # perform rollouts
        rollouts_remaining = num_rollouts - n
        for i in range(min(rollouts_per_garbage_collect, rollouts_remaining)):
            # log current rollout
            logging.info('\n')
            if n % vis_config['log_rate'] == 0:
                logging.info('Rollout: %03d' %(n))

            try:    
                # mark rollout status
                data_saved = False
                num_steps = 0
                
                # read heap id
                # heap_id = heap_ids[n]
                # timestep = timesteps[n]
                # while heap_id == current_heap_id:# or heap_id < 81:#[226, 287, 325, 453, 469, 577, 601, 894, 921]: 26
                #     n += 1
                #     heap_id = heap_ids[n]
                #     timestep = timesteps[n]
                push_logger = logging.getLogger('push')
                # push_logger.info('~')
                # push_logger.info('Heap ID %d' % heap_id)
                # current_heap_id = heap_id
                
                # reset env
                reset_start = time.time()
                # bin_picking_env.reset_from_dataset(input_dataset,
                #                                    heap_id,
                #                                    timestep)
                bin_picking_env.reset()
                state = bin_picking_env.state
                environment = bin_picking_env.environment
                if fully_observed:
                    observation = None
                else:
                    observation = bin_picking_env.observation
                policy.set_environment(environment) 
                reset_stop = time.time()

                # add objects to mapping
                for obj_key in state.obj_keys:
                    if obj_key not in obj_ids.keys():
                        obj_ids[obj_key] = obj_id
                        obj_id += 1
                    push_logger.info(obj_key)
                # save id mappings
                reverse_obj_ids = utils.reverse_dictionary(obj_ids)
                # dataset.add_metadata('obj_ids', reverse_obj_ids)
                        
                # store datapoint env params
                # datapoint['heap_ids'] = current_heap_id
                # datapoint['camera_poses'] = environment.camera.T_camera_world.vec
                # datapoint['camera_intrs'] = environment.camera.intrinsics.vec
                # datapoint['robot_poses'] = environment.robot.T_robot_world.vec
            
                # render
                if vis_config['initial_state']:
                    vis3d.figure()
                    bin_picking_env.render_3d_scene()
                    vis3d.pose(environment.robot.T_robot_world)
                    vis3d.show(starting_camera_pose=CAMERA_POSE)
            
                # observe
                if vis_config['initial_obs']:
                    vis2d.figure()
                    vis2d.imshow(observation, auto_subplot=True)
                    vis2d.show()

                # rollout on current satte
                done = False
                failed = False
                # if isinstance(policy, SingulationFullRolloutPolicy):
                #     policy.reset_num_failed_grasps()
                while not done:
                    if vis_config['step_stats']:
                        logging.info('Heap ID: %s' % heap_id)
                        logging.info('Timestep: %s' % bin_picking_env.timestep)

                    # get action
                    policy_start = time.time()
                    if fully_observed:
                        action = policy.action(state)
                    else:
                        action = policy.action(observation)
                    policy_stop = time.time()
                    logging.info('Composite Policy took %.3f sec' %(policy_stop-policy_start))

                    # render scene before
                    if vis_config['action']:
                        #gripper = bin_picking_env.gripper(action)
                        vis3d.figure()
			            # GRASPINGENV
                        # bin_picking_env.render_3d_scene(render_camera=False, workspace_objs_wireframe=False)
                        bin_picking_env.render_3d_scene()
                        if isinstance(action, GraspAction):
                            vis3d.gripper(gripper, action.grasp(gripper))
                        #if isinstance(action, LinearPushAction):
                        else:
                            # # T_start_world = action.T_begin_world * gripper.T_mesh_grasp
                            # # T_end_world = action.T_end_world * gripper.T_mesh_grasp
                            # #start_point = action.T_begin_world.translation
                            # start_point = action['start']
                            # #end_point = action.T_end_world.translation
                            # end_point = action['end']
                            # vec = (end_point - start_point) / np.linalg.norm(end_point-start_point) if np.linalg.norm(end_point-start_point) > 0 else end_point-start_point 
                            # #h1 = np.array([[0.7071,-0.7071,0],[0.7071,0.7071,0],[0,0,1]]).dot(vec)
                            # #h2 = np.array([[0.7071,0.7071,0],[-0.7071,0.7071,0],[0,0,1]]).dot(vec)
                            # arrow_len = np.linalg.norm(start_point - end_point)
                            # h1 = (end_point - start_point + np.array([0,0,arrow_len])) / (arrow_len*math.sqrt(2))
                            # h2 = (end_point - start_point - np.array([0,0,arrow_len])) / (arrow_len*math.sqrt(2))
                            # shaft_points = [start_point, end_point]
                            # head_points = [end_point - 0.03*h2, end_point, end_point - 0.03*h1]
                            # #vis3d.plot3d(shaft_points, color=[0,0,1])
                            # #vis3d.plot3d(head_points, color=[0,0,1])
                            
                            # Displaying all potential topple points
                            for vertex, prob in zip(action['vertices'], action['probabilities']):
                                color = np.array([min(1, 2*(1-prob)), min(2*prob, 1), 0])
                                vis3d.points(Point(vertex, 'world'), scale=.0005, color=color)

                            for vertex in action['bottom_points']:
                                color = np.array([0,0,1])
                                vis3d.points(Point(vertex, 'world'), scale=.0005, color=color)
                            vis3d.points(Point(action['com'], 'world'), scale=.005, color=np.array([0,0,1]))
                            vis3d.points(Point(np.array([0,0,0]), 'world'), scale=.005, color=np.array([0,1,0]))
                            
                            #set_of_lines = action['set_of_lines']
                            #for i, line in enumerate(set_of_lines):
                            #    color = str(bin(i+1))[2:].zfill(3)
                            #    color = np.array([color[2], color[1], color[0]])
                            #    vis3d.plot3d(line, color=color)
                        vis3d.show(starting_camera_pose=CAMERA_POSE)

                        # Show 
                        vis3d.figure()
                        bin_picking_env.render_3d_scene()
                        final_pose_ind = action['final_pose_ind'] / np.amax(action['final_pose_ind'])
                        for vertex, final_pose_ind in zip(action['vertices'], final_pose_ind):
                            color = np.array([0, min(1, 2*(1-prob)), min(2*prob, 1)])
                            vis3d.points(Point(vertex, 'world'), scale=.0005, color=color)
                        vis3d.show(starting_camera_pose=CAMERA_POSE)



                        color=np.array([0,0,1])
                        original_pose = state.obj.T_obj_world
                        pose_num = 0
                        for pose, edge_point1, edge_point2 in zip(action['final_poses'], action['bottom_points'], np.roll(action['bottom_points'],-1,axis=0)):
                            print 'Pose:', pose_num
                            pose_num += 1
                            pose = pose.T_obj_table
                            vis3d.figure()
                            state.obj.T_obj_world = original_pose
                            bin_picking_env.render_3d_scene()
                            vis3d.points(Point(edge_point1, 'world'), scale=.0005, color=color)
                            vis3d.points(Point(edge_point2, 'world'), scale=.0005, color=color)
                            vis3d.show(starting_camera_pose=CAMERA_POSE)

                            vis3d.figure()
                            state.obj.T_obj_world = pose
                            bin_picking_env.render_3d_scene()
                            vis3d.points(Point(edge_point1, 'world'), scale=.0005, color=color)
                            vis3d.points(Point(edge_point2, 'world'), scale=.0005, color=color)
                            vis3d.show(starting_camera_pose=CAMERA_POSE)
                         #vis3d.save('/home/mjd3/Pictures/weird_pics/%d_%d_before.png' % (heap_id, bin_picking_env.timestep), starting_camera_pose=CAMERA_POSE)
                    # store datapoint pre-step data
                    j = 0
                    obj_poses = np.zeros(fields_config['obj_poses']['height'])
                    obj_coms = np.zeros(fields_config['obj_coms']['height'])
                    obj_ids_vec = np.iinfo(np.uint32).max * np.ones(fields_config['obj_ids']['height'])
                    for obj_state in state.obj_states:
                        obj_poses[j*POSE_DIM:(j+1)*POSE_DIM] = obj_state.T_obj_world.vec
                        obj_coms[j*POINT_DIM:(j+1)*POINT_DIM] = obj_state.center_of_mass
                        obj_ids_vec[j] = obj_ids[obj_state.key]
                        j += 1
                    action_poses = np.zeros(fields_config['action_poses']['height'])
                    #if isinstance(action, GraspAction):
                    #    action_poses[:7] = action.T_grasp_world.vec
                    #else:
                    #    action_poses[:7] = action.T_begin_world.vec
                    #    action_poses[7:] = action.T_end_world.vec

                    # if isinstance(policy, SingulationMetricsCompositePolicy):
                    #     actual_distance_matrix_length = int(comb(len(state.objs), 2))
                    #     bin_distances = np.append(action.metadata['bin_distances'], 
                    #                               np.zeros(max_obj_per_pile-len(state.objs))
                    #                             )
                    #     distance_matrix = np.append(action.metadata['distance_matrix'], 
                    #                                 np.zeros(max_distance_matrix_length - actual_distance_matrix_length)
                    #                             )
                    #     datapoint['bin_distances'] = bin_distances
                    #     datapoint['distance_matrix'] = distance_matrix
                    #     datapoint['T_begin_world'] = action.T_begin_world.matrix
                    #     datapoint['T_end_world'] = action.T_end_world.matrix
                    #     datapoint['parallel_jaw_best_q_value'] = action.metadata['parallel_jaw_best_q_value']
                    #     # datapoint['parallel_jaw_mean_q_value'] = action.metadata['parallel_jaw_mean_q_value']
                    #     # datapoint['parallel_jaw_num_grasps'] = action.metadata['parallel_jaw_num_grasps']
                    #     datapoint['suction_best_q_value'] = action.metadata['suction_best_q_value']
                    #     # datapoint['suction_mean_q_value'] = action.metadata['suction_mean_q_value']
                    #     # datapoint['suction_num_grasps'] = action.metadata['suction_num_grasps']
                    #     # logging.info('Suction Q: %f, PJ Q: %f' % (action.metadata['suction_q_value'], action.metadata['parallel_jaw_q_value']))
                    #     # datapoint['obj_index'] = action.metadata['obj_index']

                    #     # datapoint['parallel_jaw_best_q_value_single'] = action.metadata['parallel_jaw_best_q_value_single']
                    #     # datapoint['suction_best_q_value_single'] = action.metadata['suction_best_q_value_single']
                    #     datapoint['singulated_obj_index'] = action.metadata['singulated_obj_index']
                    #     datapoint['parallel_jaw_grasped_obj_index'] = obj_ids[action.metadata['parallel_jaw_grasped_obj_key']]
                    #     datapoint['suction_grasped_obj_index'] = obj_ids[action.metadata['suction_grasped_obj_key']]
                    # else:
                    #     datapoint['bin_distances'] = np.zeros(max_obj_per_pile)
                    #     datapoint['distance_matrix'] = np.zeros(max_distance_matrix_length)
                    #     datapoint['T_begin_world'] = np.zeros((4,4))
                    #     datapoint['T_end_world'] = np.zeros((4,4))
                    #     datapoint['parallel_jaw_best_q_value'] = -1
                    #     datapoint['suction_best_q_value'] = -1
                    #     datapoint['singulated_obj_index'] = -1
                    #     datapoint['parallel_jaw_grasped_obj_index'] = -1
                    #     datapoint['suction_grasped_obj_index'] = -1

                    # policy_id = 0
                    # if 'policy_id' in action.metadata.keys():
                    #     policy_id = action.metadata['policy_id']
                    # greedy_q_value = 0
                    # if 'greedy_q_value' in action.metadata.keys():
                    #     greedy_q_value = action.metadata['greedy_q_value']
                        
                    # datapoint['timesteps'] = bin_picking_env.timestep
                    # datapoint['obj_poses'] = obj_poses
                    # datapoint['obj_coms'] = obj_coms
                    # datapoint['obj_ids'] = obj_ids_vec
                    # # if bin_picking_env.render_mode == RenderMode.RGBD:
                    # #     color_data = observation.color.raw_data
                    # #     depth_data = observation.depth.raw_data
                    # # elif bin_picking_env.render_mode == RenderMode.DEPTH:
                    # #     color_data = np.zeros(observation.shape).astype(np.uint8)
                    # #     depth_data = observation.raw_data
                    # # elif bin_picking_env.render_mode == RenderMode.COLOR:
                    # #     color_data = observation.raw_data
                    # #     depth_data = np.zeros(observation.shape)
                    # # datapoint['color_ims'] = color_data
                    # # datapoint['depth_ims'] = depth_data
                    # datapoint['action_ids'] = action_ids[type(action).__name__]
                    # datapoint['action_poses'] = action_poses
                    # datapoint['policy_ids'] = policy_id
                    # datapoint['greedy_q_values'] = greedy_q_value
                    # datapoint['pred_q_values'] = action.q_value
                    
                    # step the policy
                    #observation, reward, done, info = bin_picking_env.step(action)
                    #state = bin_picking_env.state
                    state.objs[0].T_obj_world = action['final_state']

                    # if isinstance(policy, SingulationFullRolloutPolicy):
                    #     policy.grasp_succeeds(info['grasp_succeeds'])
        
                    # debugging info
                    if vis_config['step_stats']:
                        logging.info('Action type: %s' %(type(action).__name__))
                        logging.info('Action Q-value: %.3f' %(action.q_value))
                        logging.info('Reward: %d' %(reward))
                        logging.info('Policy took %.3f sec' %(policy_stop-policy_start))
                        logging.info('Num objects remaining: %d' %(bin_picking_env.num_objects))
                        if info['cleared_pile']:
                            logging.info('Cleared pile!')
                        
                    # # store datapoint post-step data
                    # datapoint['rewards'] = reward
                    # datapoint['grasp_metrics'] = info['grasp_metric']
                    # datapoint['collisions'] = 1 * info['collides']
                    # datapoint['collisions_with_env'] = 1 * info['collides_with_static_obstacles']
                    # datapoint['grasped_obj_ids'] = obj_ids[info['grasped_obj_key']]
                    # datapoint['cleared_pile'] = 1 * info['cleared_pile']

                    # # store datapoint
                    # # dataset.add(datapoint)
                    # data_saved = True    
                    
                    # render observation
                    if vis_config['obs']:
                        vis2d.figure()
                        vis2d.imshow(observation, auto_subplot=True)
                        vis2d.show()
        
                    # render scene after
                    if vis_config['state']:
                        vis3d.figure()
                        bin_picking_env.render_3d_scene(render_camera=False)
                        vis3d.show(starting_camera_pose=CAMERA_POSE)
                        # vis3d.save('/home/mjd3/Pictures/weird_pics/%d_%d_after.png' % (heap_id, bin_picking_env.timestep), starting_camera_pose=CAMERA_POSE)
                    state.objs[0].T_obj_world = action['tmpR']
                    vis3d.figure()
                    bin_picking_env.render_3d_scene(render_camera=False)
                    vis3d.show(starting_camera_pose=CAMERA_POSE)
                    state.objs[0].T_obj_world = action['final_state']
                    # increment the number of steps
                    num_steps += 1
                    if num_steps >= steps_per_test_case:
                        done = True
                        
            except NoActionFoundException as e:
                logging.warning('The policy failed to plan an action!')
                done = True                    
            except Exception as e:
                # log an error
                logging.warning('Rollout failed!')
                logging.warning('%s' %(str(e)))
                logging.warning(traceback.print_exc())
                # if debug:
                #     raise
                
                # reset env
                del bin_picking_env
                gc.collect()
                bin_picking_env = BinPickingEnv(config, vis_config)

                # terminate current rollout
                failed = True
                done = True

            # update test case id
            n += 1
            # dataset.flush()
            # logging.info("\n\nflushing")
            # logging.info("exiting")
            # sys.exit()
                
        # garbage collect
        del bin_picking_env
        gc.collect()

    # return the dataset 
    # dataset.flush()

    # log time
    rollout_stop = time.time()
    logging.info('Rollouts took %.3f sec' %(rollout_stop-rollout_start))

    return dataset
Example #6
0
    return parser.parse_args()


if __name__ == '__main__':
    args = parse_args()
    config = YamlConfig(args.config_filename)
    policy = MultiTopplePolicy(config, use_sensitivity=True, num_samples=800)

    if config['debug']:
        random.seed(SEED)
        np.random.seed(SEED)

    env = GraspingEnv(config, config['vis'])
    env.reset()
    #env.state.material_props._color = np.array([0.86274509803] * 3)
    env.state.material_props._color = np.array([0.5] * 3)

    if args.before:
        env.render_3d_scene()
        color = np.array([0, 0, 0])
        vert = np.array([-0.01091172, 0.02806294, 0.06962403])
        normal = vert + .01 * np.array([-0.84288757, -0.3828792, 0.37807943])
        vis3d.points(Point(vert), scale=.001, color=color)
        vis3d.points(Point(normal), scale=.001, color=np.array([1, 0, 0]))
        vis3d.show(starting_camera_pose=CAMERA_POSE)

    policy.set_environment(env.environment)
    policy.action(env.state)
    #show_graph(policy.G, policy.edge_alphas)
    new_graph(policy.G)
Example #7
0
    env = GraspingEnv(config, config['vis'])
    env.reset()
    env.state.material_props._color = np.array([0.86274509803] * 3)
    obj_name = env.state.obj.key
    policy.set_environment(env.environment)
    if args.before:
        env.render_3d_scene()
        vis3d.show(starting_camera_pose=CAMERA_POSE)
    action = policy.action(env.state)

    vis3d.figure()
    env.render_3d_scene()
    num_vertices = len(action.metadata['vertices'])
    for i, vertex, q_increase in zip(np.arange(num_vertices),
                                     action.metadata['vertices'],
                                     action.metadata['quality_increases']):
        topples = action.metadata['final_pose_ind'][i] != 0
        color = np.array(
            [min(1, 2 * (1 - q_increase)),
             min(2 * q_increase, 1), 0]) if topples else np.array([0, 0, 0])
        # color = np.array([min(1, 2*(1-q_increase)), min(2*q_increase, 1), 0])
        scale = .001 if i != action.metadata['best_ind'] else .003
        vis3d.points(Point(vertex, 'world'), scale=scale, color=color)

    gripper = env.gripper(action)
    T_start_world = action.T_begin_world * gripper.T_mesh_grasp
    T_end_world = action.T_end_world * gripper.T_mesh_grasp
    vis3d.mesh(gripper.mesh, T_start_world, color=(0, 1, 0))
    vis3d.mesh(gripper.mesh, T_end_world, color=(1, 0, 0))
    display_or_save('{}_quality_increases.gif'.format(obj_name))
Example #8
0
def figure_3():
    #env.state.obj.T_obj_world.translation += np.array([-.01,-.05,.01])
    action = policy.action(env.state)
    mesh = env.state.obj.mesh.copy().apply_transform(
        env.state.T_obj_world.matrix)
    mesh = mesh.slice_plane([0, 0, .0005], -up)
    from dexnet.grasping import GraspableObject3D
    env.state.obj = GraspableObject3D(mesh)
    env.render_3d_scene()
    bottom_points = policy.toppling_model.bottom_points
    vis3d.plot3d(bottom_points[:2], color=[0, 0, 0], tube_radius=.0005)
    vis3d.points(Point(bottom_points[0]), color=[0, 0, 0], scale=.001)
    vis3d.points(Point(bottom_points[1]), color=[0, 0, 0], scale=.001)
    y_dir = normalize(bottom_points[1] - bottom_points[0])
    origin = policy.toppling_model.com_projected_on_edges[0] - .0025 * y_dir
    vis3d.points(Point(origin), color=[0, 0, 1], scale=.001)

    # t = .002
    # x = np.cross(y_dir, up)
    # while t < np.linalg.norm(origin - bottom_points[0]):
    #     start_point = origin - t*y_dir
    #     end_point = start_point + .0075*x
    #     shaft_points = [start_point, end_point]
    #     h1 = np.array([[0.7071,-0.7071,0],[0.7071,0.7071,0],[0,0,1]]).dot(x)
    #     h2 = np.array([[0.7071,0.7071,0],[-0.7071,0.7071,0],[0,0,1]]).dot(x)
    #     head_points = [end_point - 0.001*h2, end_point, end_point - 0.001*h1]
    #     vis3d.plot3d(shaft_points, color=purple, tube_radius=.0002)
    #     vis3d.plot3d(head_points, color=purple, tube_radius=.0002)
    #     t += .002

    ## try 2
    x = np.cross(y_dir, up)
    t = .004
    arrow_dir = x
    start_point = origin - t * y_dir
    end_point = start_point + .0075 * arrow_dir
    shaft_points = [start_point, end_point]
    h1 = np.array([[0.7071, -0.7071, 0], [0.7071, 0.7071, 0], [0, 0,
                                                               1]]).dot(x)
    h2 = np.array([[0.7071, 0.7071, 0], [-0.7071, 0.7071, 0], [0, 0,
                                                               1]]).dot(x)
    head_points = [end_point - 0.001 * h2, end_point, end_point - 0.001 * h1]
    vis3d.plot3d(shaft_points, color=purple, tube_radius=.0002)
    vis3d.plot3d(head_points, color=purple, tube_radius=.0002)

    # t = .000
    # while t < np.linalg.norm(origin - bottom_points[1]):
    #     arrow_dir = x
    #     start_point = origin + t*y_dir
    #     end_point = start_point + .0075*arrow_dir
    #     shaft_points = [start_point, end_point]
    #     h1 = np.array([[0.7071,-0.7071,0],[0.7071,0.7071,0],[0,0,1]]).dot(arrow_dir)
    #     h2 = np.array([[0.7071,0.7071,0],[-0.7071,0.7071,0],[0,0,1]]).dot(arrow_dir)
    #     head_points = [end_point - 0.001*h2, end_point, end_point - 0.001*h1]
    #     vis3d.plot3d(shaft_points, color=purple, tube_radius=.0002)
    #     vis3d.plot3d(head_points, color=purple, tube_radius=.0002)
    #     t += .002

    ## try 2
    t = .01
    arrow_dir = -x
    start_point = origin + t * y_dir
    end_point = start_point + .0075 * arrow_dir
    shaft_points = [start_point, end_point]
    h1 = np.array([[0.7071, -0.7071, 0], [0.7071, 0.7071, 0],
                   [0, 0, 1]]).dot(arrow_dir)
    h2 = np.array([[0.7071, 0.7071, 0], [-0.7071, 0.7071, 0],
                   [0, 0, 1]]).dot(arrow_dir)
    head_points = [end_point - 0.001 * h2, end_point, end_point - 0.001 * h1]
    vis3d.plot3d(shaft_points, color=purple, tube_radius=.0002)
    vis3d.plot3d(head_points, color=purple, tube_radius=.0002)

    #arrow_dir = np.cross(y_dir, up)
    #start_point = origin - .005*y_dir
    #end_point = start_point + .0075*arrow_dir
    #shaft_points = [start_point, end_point]
    #h1 = np.array([[0.7071,-0.7071,0],[0.7071,0.7071,0],[0,0,1]]).dot(arrow_dir)
    #h2 = np.array([[0.7071,0.7071,0],[-0.7071,0.7071,0],[0,0,1]]).dot(arrow_dir)
    #head_points = [end_point - 0.001*h2, end_point, end_point - 0.001*h1]
    #vis3d.plot3d(shaft_points, color=purple, tube_radius=.0002)
    #vis3d.plot3d(head_points, color=purple, tube_radius=.0002)

    #arrow_dir = -up
    #end_point = start_point + .0075*arrow_dir
    #shaft_points = [start_point, end_point]
    #h1 = np.array([[1,0,0],[0,0.7071,-0.7071],[0,0.7071,0.7071]]).dot(arrow_dir)
    #h2 = np.array([[1,0,0],[0,0.7071,0.7071],[0,-0.7071,0.7071]]).dot(arrow_dir)
    #head_points = [end_point - 0.001*h2, end_point, end_point - 0.001*h1]
    #vis3d.plot3d(shaft_points, color=blue, tube_radius=.0002)
    #vis3d.plot3d(head_points, color=blue, tube_radius=.0002)
    #
    #vis3d.points(Point(start_point), color=[0,1,0], scale=.001)
    #vis_axes(origin, y_dir)
    vis3d.show(starting_camera_pose=CAMERA_POSE)
    sys.exit()
Example #9
0
def figure_2():
    env.state.material_props._color = np.array([0.75] * 3)
    # env.state.obj.T_obj_world.translation += np.array([-.095,.025,.001])
    env.state.obj.T_obj_world.translation += np.array([-.01, -.04, .001])
    action = policy.action(env.state)
    env.render_3d_scene()
    bottom_points = policy.toppling_model.bottom_points
    edge = 1
    vis3d.plot3d(bottom_points[edge:edge + 2],
                 color=[0, 0, 0],
                 tube_radius=.0005)
    vis3d.points(Point(bottom_points[edge]), color=[0, 0, 0], scale=.001)
    vis3d.points(Point(bottom_points[edge + 1]), color=[0, 0, 0], scale=.001)
    y_dir = normalize(bottom_points[edge + 1] - bottom_points[edge])
    origin = policy.toppling_model.com_projected_on_edges[
        edge]  # - .0025*y_dir
    vis_axes(origin, y_dir)

    mesh = env.state.mesh.copy().apply_transform(env.state.T_obj_world.matrix)
    mesh.fix_normals()
    #direction = normalize([-.03, -.07, 0])
    #intersect, _, face_ind = mesh.ray.intersects_location([[.02, -.005, .09]], [direction], multiple_hits=False)
    ray_origin = mesh.center_mass + np.array([.1, -.1, .035])
    direction = normalize([-.1, .1, 0])
    intersect, _, face_ind = mesh.ray.intersects_location([ray_origin],
                                                          [direction],
                                                          multiple_hits=False)
    normal = mesh.face_normals[face_ind[0]]
    start_point = intersect[0] + .03 * normal
    end_point = intersect[0]
    shaft_points = [start_point, end_point]
    h1 = np.array([[0.7071, 0, -0.7071], [0, 1, 0], [0.7071, 0,
                                                     0.7071]]).dot(-normal)
    h2 = np.array([[0.7071, 0, 0.7071], [0, 1, 0], [-0.7071, 0,
                                                    0.7071]]).dot(-normal)
    head_points = [end_point - 0.01 * h2, end_point, end_point - 0.01 * h1]
    vis3d.plot3d(shaft_points, color=red, tube_radius=.001)
    vis3d.plot3d(head_points, color=red, tube_radius=.001)
    vis3d.points(Point(end_point), scale=.002, color=[0, 0, 0])

    # Center of Mass
    #start_point = env.state.T_obj_world.translation# - .0025*y_dir - np.array([0,0,.005])
    start_point = mesh.center_mass
    end_point = start_point - np.array([0, 0, .03])
    vis3d.points(Point(start_point), scale=.002, color=[0, 0, 0])
    shaft_points = [start_point, end_point]
    h1 = np.array([[1, 0, 0], [0, 0.7071, -0.7071], [0, 0.7071,
                                                     0.7071]]).dot(-up)
    h2 = np.array([[1, 0, 0], [0, 0.7071, 0.7071], [0, -0.7071,
                                                    0.7071]]).dot(-up)
    head_points = [end_point - 0.01 * h2, end_point, end_point - 0.01 * h1]
    vis3d.plot3d(shaft_points, color=red, tube_radius=.001)
    vis3d.plot3d(head_points, color=red, tube_radius=.001)

    # Dotted lines
    r_gs = dotted_line(start_point, origin)
    for r_g in r_gs:
        vis3d.plot3d(r_g, color=orange, tube_radius=.0006)
    s = normalize(bottom_points[edge + 1] - bottom_points[edge])
    vertex_projected_on_edge = (
        intersect[0] - bottom_points[edge]).dot(s) * s + bottom_points[edge]
    r_fs = dotted_line(intersect[0], vertex_projected_on_edge)
    for r_f in r_fs:
        vis3d.plot3d(r_f, color=orange, tube_radius=.0006)
    vis3d.show(starting_camera_pose=CAMERA_POSE)
Example #10
0
    #figure_3()
    #figure_0()
    # figure_3()
    # figure_0()
    #failure_modes()
    action = policy.action(env.state)
    #noise_vis()

    if False:
        j = np.argmax(action.metadata['vertex_probs'][:, 2])
        R = policy.toppling_model.tipping_point_rotations()[1]
        mesh.apply_transform(R.matrix)
        vis3d.mesh(mesh, color=env.state.color)
        new_point = (R * Point(action.metadata['vertices'][j], 'world')).data
        new_normal = (R * Point(action.metadata['normals'][j], 'world')).data
        vis3d.points(new_point, radius=.001, color=[0, 1, 0])
        vis3d.plot([new_point, new_point + .01 * new_normal], radius=.0001)
        vis3d.show(starting_camera_pose=CAMERA_POSE)

    # state.T_obj_world.translation[:2] = np.array([0,0])

    # Visualize
    if args.topple_probs:
        vis3d.figure()
        env.render_3d_scene()
        for vertex, prob in zip(action.metadata['vertices'],
                                action.metadata['topple_probs']):
            color = [min(1, 2 * (1 - prob)), min(2 * prob, 1), 0]
            vis3d.points(Point(vertex, 'world'), scale=.001, color=color)
        # for bottom_point in policy.toppling_model.bottom_points:
        #    vis3d.points(Point(bottom_point, 'world'), scale=.001, color=[0,0,0])