Example #1
0
def are_visible(world):
    ray_names = []
    rays = []
    for name in world.movable:
        for camera, info in world.cameras.items():
            camera_pose = get_pose(info.body)
            camera_point = point_from_pose(camera_pose)
            point = point_from_pose(get_pose(world.get_body(name)))
            if is_visible_point(CAMERA_MATRIX,
                                KINECT_DEPTH,
                                point,
                                camera_pose=camera_pose):
                ray_names.append(name)
                rays.append(Ray(camera_point, point))
    ray_results = batch_ray_collision(rays)
    visible_indices = [
        idx for idx, (name, result) in enumerate(zip(ray_names, ray_results))
        if result.objectUniqueId == world.get_body(name)
    ]
    visible_names = {ray_names[idx] for idx in visible_indices}
    print('Detected:', sorted(visible_names))
    if has_gui():
        handles = [
            add_line(rays[idx].start, rays[idx].end, color=BLUE)
            for idx in visible_indices
        ]
        wait_for_duration(1.0)
        remove_handles(handles)
    # TODO: the object drop seems to happen around here
    return visible_names
Example #2
0
 def approach(self):
     grasp = np.array([0, 0, 0.2])
     target_point_1 = np.array(get_pose(self.pw.pipe))[0] + grasp
     grasp = np.array([0, 0, 0.13])
     target_point_2 = np.array(get_pose(self.pw.pipe))[0] + grasp
     target_quat = (1, 0.5, 0, 0)  #get whatever it is by default
     target_pose = (target_point_1, target_quat)
     obstacles = [self.pw.pipe, self.pw.hollow]
     self.go_to_pose(target_pose, obstacles=obstacles)
     target_pose = (target_point_2, target_quat)
     self.go_to_pose(target_pose, obstacles=obstacles)
     self.pipe_attach = create_attachment(self.pw.robot, self.ee_link,
                                          self.pw.pipe)
     self.squeeze(0, width=self.default_width)
Example #3
0
def lower_spoon(world, bowl_name, spoon_name, min_spoon_z, max_spoon_z):
    assert min_spoon_z <= max_spoon_z
    bowl_body = world.get_body(bowl_name)
    bowl_urdf_from_center = get_urdf_from_base(
        bowl_body)  # get_urdf_from_center

    spoon_body = world.get_body(spoon_name)
    spoon_quat = lookup_orientation(spoon_name, STIR_QUATS)
    spoon_urdf_from_center = get_urdf_from_base(
        spoon_body, reference_quat=spoon_quat)  # get_urdf_from_center
    # Keeping the orientation consistent for generalization purposes

    # TODO: what happens when the base isn't flat (e.g. bowl)
    bowl_pose = get_pose(bowl_body)
    stir_z = None
    for z in np.arange(max_spoon_z, min_spoon_z, step=-0.005):
        bowl_from_stirrer = multiply(bowl_urdf_from_center, Pose(Point(z=z)),
                                     invert(spoon_urdf_from_center))
        set_pose(spoon_body, multiply(bowl_pose, bowl_from_stirrer))
        #wait_for_user()
        if pairwise_collision(bowl_body, spoon_body):
            # Want to be careful not to make contact with the base
            break
        stir_z = z
    return stir_z
Example #4
0
def pick_problem(arm='left', grasp_type='side'):
    other_arm = get_other_arm(arm)
    initial_conf = get_carry_conf(arm, grasp_type)

    pr2 = create_pr2()
    set_base_values(pr2, (0, -1.2, np.pi / 2))
    set_arm_conf(pr2, arm, initial_conf)
    open_arm(pr2, arm)
    set_arm_conf(pr2, other_arm, arm_conf(other_arm, REST_LEFT_ARM))
    close_arm(pr2, other_arm)

    plane = create_floor()
    table = load_pybullet(TABLE_URDF)
    # table = create_table(height=0.8)
    # table = load_pybullet("table_square/table_square.urdf")
    box = create_box(.07, .05, .25)
    set_point(box, (-0.25, -0.3, TABLE_MAX_Z + .25 / 2))
    # set_point(box, (0.2, -0.2, 0.8 + .25/2 + 0.1))

    return Problem(robot=pr2,
                   movable=[box],
                   arms=[arm],
                   grasp_types=[grasp_type],
                   surfaces=[table],
                   goal_conf=get_pose(pr2),
                   goal_holding=[(arm, box)])
Example #5
0
def add_scales(task, ros_world):
    scale_stackings = {}
    holding = {grasp.obj_name for grasp in task.init_holding.values()}
    with ClientSaver(ros_world.client):
        perception = ros_world.perception
        items = sorted(set(perception.get_items()) - holding,
                       key=lambda n: get_point(ros_world.get_body(n))[1],
                       reverse=False)  # Right to left
        for i, item in enumerate(items):
            if not POUR and (get_type(item) != SCOOP_CUP):
                continue
            item_body = ros_world.get_body(item)
            scale = create_name(SCALE_TYPE, i + 1)
            with HideOutput():
                scale_body = load_pybullet(get_body_urdf(get_type(scale)),
                                           fixed_base=True)
            ros_world.perception.sim_bodies[scale] = scale_body
            ros_world.perception.sim_items[scale] = None
            item_z = stable_z(item_body, scale_body)
            scale_pose_item = Pose(
                point=Point(z=-item_z))  # TODO: relies on origin in base
            set_pose(scale_body, multiply(get_pose(item_body),
                                          scale_pose_item))
            roll, pitch, _ = get_euler(scale_body)
            set_euler(scale_body, [roll, pitch, 0])
            scale_stackings[scale] = item
        #wait_for_user()
    return scale_stackings
Example #6
0
def label_elements(element_bodies):
    # +z points parallel to each element body
    for element, body in element_bodies.items():
        print(element)
        label_element(element_bodies, element)
        draw_pose(get_pose(body), length=0.02)
        wait_for_user()
Example #7
0
 def __init__(self,
              end_effector,
              element_bodies,
              node_points,
              ground_nodes,
              element,
              reverse,
              max_directions=INF):
     # TODO: more directions for ground collisions
     self.end_effector = end_effector
     self.element = element
     self.reverse = reverse
     self.max_directions = max_directions
     self.element_pose = get_pose(element_bodies[element])
     n1, n2 = element
     length = get_distance(node_points[n1], node_points[n2])
     self.translation_path = np.append(
         np.arange(-length / 2, length / 2, STEP_SIZE), [length / 2])
     if ORTHOGONAL_GROUND and is_ground(element, ground_nodes):
         # TODO: orthogonal to the ground
         self.direction_generator = cycle(
             [Pose(euler=Euler(roll=0, pitch=0))])
     else:
         self.direction_generator = get_direction_generator(
             self.end_effector, use_halton=False)
     self.trajectories = []
Example #8
0
def test_print(robot, node_points, elements):
    #elements = [elements[0]]
    elements = [elements[-1]]
    [element_body] = create_elements(node_points, elements)
    wait_for_user()

    phi = 0
    #grasp_rotations = [Pose(euler=Euler(roll=np.pi/2, pitch=phi, yaw=theta))
    #               for theta in np.linspace(0, 2*np.pi, 10, endpoint=False)]
    #grasp_rotations = [Pose(euler=Euler(roll=np.pi/2, pitch=theta, yaw=phi))
    #               for theta in np.linspace(0, 2*np.pi, 10, endpoint=False)]
    grasp_rotations = [sample_direction() for _ in range(25)]

    link = link_from_name(robot, TOOL_NAME)
    movable_joints = get_movable_joints(robot)
    sample_fn = get_sample_fn(robot, movable_joints)
    for grasp_rotation in grasp_rotations:
        n1, n2 = elements[0]
        length = np.linalg.norm(node_points[n2] - node_points[n1])
        set_joint_positions(robot, movable_joints, sample_fn())
        for t in np.linspace(-length / 2, length / 2, 10):
            #element_translation = Pose(point=Point(z=-t))
            #grasp_pose = multiply(grasp_rotation, element_translation)
            #reverse = Pose(euler=Euler())
            reverse = Pose(euler=Euler(roll=np.pi))
            grasp_pose = get_grasp_pose(t, grasp_rotation, 0)
            grasp_pose = multiply(grasp_pose, reverse)

            element_pose = get_pose(element_body)
            link_pose = multiply(element_pose, invert(grasp_pose))
            conf = inverse_kinematics(robot, link, link_pose)
            wait_for_user()
Example #9
0
 def fix_pose(self, name, pose=None, fraction=0.5):
     body = self.get_body(name)
     if pose is None:
         pose = get_pose(body)
     else:
         set_pose(body, pose)
     # TODO: can also drop in simulation
     x, y, z = point_from_pose(pose)
     roll, pitch, yaw = euler_from_quat(quat_from_pose(pose))
     quat = quat_from_euler(Euler(yaw=yaw))
     set_quat(body, quat)
     surface_name = self.get_supporting(name)
     if surface_name is None:
         return None, None
     if fraction == 0:
         new_pose = (Point(x, y, z), quat)
         return new_pose, surface_name
     surface_aabb = compute_surface_aabb(self, surface_name)
     new_z = (1 - fraction) * z + fraction * stable_z_on_aabb(
         body, surface_aabb)
     point = Point(x, y, new_z)
     set_point(body, point)
     print('{} error: roll={:.3f}, pitch={:.3f}, z-delta: {:.3f}'.format(
         name, roll, pitch, new_z - z))
     new_pose = (point, quat)
     return new_pose, surface_name
Example #10
0
def observe_pybullet(world):
    # TODO: randomize robot's pose
    # TODO: probabilities based on whether in viewcone or not
    # TODO: sample from poses on table
    # world_saver = WorldSaver()
    visible_entities = are_visible(world)
    detections = {}
    assert OBS_P_FP == 0
    for name in visible_entities:
        # TODO: false positives
        if random.random() < OBS_P_FN:
            continue
        body = world.get_body(name)
        pose = get_pose(body)
        dx, dy = np.random.multivariate_normal(mean=np.zeros(2),
                                               cov=math.pow(OBS_POS_STD, 2) *
                                               np.eye(2))
        dyaw, = np.random.multivariate_normal(mean=np.zeros(1),
                                              cov=math.pow(OBS_ORI_STD, 2) *
                                              np.eye(1))
        print('{}: dx={:.3f}, dy={:.3f}, dyaw={:.5f}'.format(
            name, dx, dy, dyaw))
        noise_pose = Pose(Point(x=dx, y=dy), Euler(yaw=dyaw))
        observed_pose = multiply(pose, noise_pose)
        #world.get_body_type(name)
        detections.setdefault(name, []).append(
            observed_pose)  # TODO: use type instead
    #world_saver.restore()
    return detections
Example #11
0
def create_bounding_mesh(printed_elements, element_bodies=None, node_points=None, buffer=0.):
    # TODO: use bounding boxes instead of points
    # TODO: connected components
    assert printed_elements
    assert element_bodies or node_points
    printed_points = []
    if node_points is not None:
        printed_points.extend(node_points[n] for element in printed_elements for n in element)
    if element_bodies is not None:
        for element in printed_elements:
            body = element_bodies[element]
            printed_points.extend(apply_affine(get_pose(body), vertices_from_link(body, BASE_LINK)))

    if buffer != 0.:
        half_extents = buffer*np.ones(3)
        for point in list(printed_points):
            printed_points.extend(np.array(point) + np.array(corner)
                                  for corner in get_aabb_vertices(AABB(-half_extents, half_extents)))

    rgb = colorsys.hsv_to_rgb(h=random.random(), s=1, v=1)
    #rgb = RED
    try:
        mesh = convex_hull(printed_points)
        # handles = draw_mesh(mesh)
        return create_mesh(mesh, under=True, color=apply_alpha(rgb, 0.5))
    except QhullError as e:
        print(printed_elements)
        raise e
Example #12
0
def mirror_robot(robot1, node_points):
    # TODO: place robots side by side or diagonal across
    set_extrusion_camera(node_points, theta=-np.pi / 3)
    #draw_pose(Pose())
    centroid = np.average(node_points, axis=0)
    centroid_pose = Pose(point=centroid)
    #draw_pose(Pose(point=centroid))

    # print(centroid)
    scale = 0.  # 0.15
    vector = get_point(robot1) - centroid
    set_pose(robot1, Pose(point=Point(*+scale * vector[:2])))
    # Inner product of end-effector z with base->centroid or perpendicular to this line
    # Partition by sides

    robot2 = load_robot()
    set_pose(
        robot2,
        Pose(point=Point(*-(2 + scale) * vector[:2]), euler=Euler(yaw=np.pi)))

    # robots = [robot1]
    robots = [robot1, robot2]
    for robot in robots:
        set_configuration(robot, DUAL_CONF)
        # joint1 = get_movable_joints(robot)[0]
        # set_joint_position(robot, joint1, np.pi / 8)
        draw_pose(get_pose(robot), length=0.25)
    return robots
Example #13
0
def is_visible_by_camera(world, point):
    for camera_name in world.cameras:
        camera_body, camera_matrix, camera_depth = world.cameras[camera_name]
        camera_pose = get_pose(camera_body)
        #camera_point = point_from_pose(camera_pose)
        if is_visible_point(camera_matrix, camera_depth, point, camera_pose):
            return True
    return False
Example #14
0
def main():
    parser = argparse.ArgumentParser()  # Automatically includes help
    parser.add_argument('-viewer', action='store_true', help='enable viewer.')
    args = parser.parse_args()

    connect(use_gui=True)

    with LockRenderer():
        draw_pose(unit_pose(), length=1, width=1)
        floor = create_floor()
        set_point(floor, Point(z=-EPSILON))

        table1 = create_table(width=TABLE_WIDTH, length=TABLE_WIDTH/2, height=TABLE_WIDTH/2,
                              top_color=TAN, cylinder=False)
        set_point(table1, Point(y=+0.5))

        table2 = create_table(width=TABLE_WIDTH, length=TABLE_WIDTH/2, height=TABLE_WIDTH/2,
                              top_color=TAN, cylinder=False)
        set_point(table2, Point(y=-0.5))

        tables = [table1, table2]

        #set_euler(table1, Euler(yaw=np.pi/2))
        with HideOutput():
            # data_path = add_data_path()
            # robot_path = os.path.join(data_path, WSG_GRIPPER)
            robot_path = get_model_path(WSG_50_URDF)  # WSG_50_URDF | PANDA_HAND_URDF
            robot = load_pybullet(robot_path, fixed_base=True)
            #dump_body(robot)

        block1 = create_box(w=BLOCK_SIDE, l=BLOCK_SIDE, h=BLOCK_SIDE, color=RED)
        block_z = stable_z(block1, table1)
        set_point(block1, Point(y=-0.5, z=block_z))

        block2 = create_box(w=BLOCK_SIDE, l=BLOCK_SIDE, h=BLOCK_SIDE, color=GREEN)
        set_point(block2, Point(x=-0.25, y=-0.5, z=block_z))

        block3 = create_box(w=BLOCK_SIDE, l=BLOCK_SIDE, h=BLOCK_SIDE, color=BLUE)
        set_point(block3, Point(x=-0.15, y=+0.5, z=block_z))

        blocks = [block1, block2, block3]

        set_camera_pose(camera_point=Point(x=-1, z=block_z+1), target_point=Point(z=block_z))

    block_pose = get_pose(block1)
    open_gripper(robot)
    tool_link = link_from_name(robot, 'tool_link')
    base_from_tool = get_relative_pose(robot, tool_link)
    #draw_pose(unit_pose(), parent=robot, parent_link=tool_link)
    grasps = get_side_grasps(block1, tool_pose=Pose(euler=Euler(yaw=np.pi/2)),
                             top_offset=0.02, grasp_length=0.03, under=False)[1:2]
    for grasp in grasps:
        gripper_pose = multiply(block_pose, invert(grasp))
        set_pose(robot, multiply(gripper_pose, invert(base_from_tool)))
        wait_for_user()

    wait_for_user('Finish?')
    disconnect()
Example #15
0
File: pour.py Project: lyltc1/LTAMP
def bowl_path_collision(bowl_body, body, body_path_bowl):
    bowl_pose = get_pose(bowl_body)
    with BodySaver(body):
        for cup_pose_bowl in body_path_bowl:
            cup_pose_world = multiply(bowl_pose, cup_pose_bowl)
            set_pose(body, cup_pose_world)
            if body_pair_collision(bowl_body, body):  #, collision_buffer=0.0):
                return True
    return False
def main():
    connect(use_gui=True)
    with HideOutput():
        pr2 = load_model(
            "models/drake/pr2_description/urdf/pr2_simplified.urdf")
    set_joint_positions(pr2, joints_from_names(pr2, PR2_GROUPS['left_arm']),
                        REST_LEFT_ARM)
    set_joint_positions(pr2, joints_from_names(pr2, PR2_GROUPS['right_arm']),
                        rightarm_from_leftarm(REST_LEFT_ARM))
    set_joint_positions(pr2, joints_from_names(pr2, PR2_GROUPS['torso']),
                        [0.2])
    dump_body(pr2)

    block = load_model(BLOCK_URDF, fixed_base=False)
    set_point(block, [2, 0.5, 1])
    target_point = point_from_pose(get_pose(block))

    # head_link = link_from_name(pr2, HEAD_LINK)
    head_joints = joints_from_names(pr2, PR2_GROUPS['head'])
    head_link = head_joints[-1]

    #max_detect_distance = 2.5
    max_register_distance = 1.0
    distance_range = (max_register_distance / 2, max_register_distance)
    base_generator = visible_base_generator(pr2, target_point, distance_range)

    base_joints = joints_from_names(pr2, PR2_GROUPS['base'])
    for i in range(5):
        base_conf = next(base_generator)
        set_joint_positions(pr2, base_joints, base_conf)

        p.addUserDebugLine(point_from_pose(get_link_pose(pr2, head_link)),
                           target_point,
                           lineColorRGB=(1, 0, 0))  # addUserDebugText
        p.addUserDebugLine(point_from_pose(
            get_link_pose(pr2, link_from_name(pr2, HEAD_LINK_NAME))),
                           target_point,
                           lineColorRGB=(0, 0, 1))  # addUserDebugText

        # head_conf = sub_inverse_kinematics(pr2, head_joints[0], HEAD_LINK, )
        head_conf = inverse_visibility(pr2, target_point)
        set_joint_positions(pr2, head_joints, head_conf)
        print(get_detections(pr2))
        # TODO: does this detect the robot sometimes?

        detect_mesh, z = get_detection_cone(pr2, block)
        detect_cone = create_mesh(detect_mesh, color=(0, 1, 0, 0.5))
        set_pose(detect_cone,
                 get_link_pose(pr2, link_from_name(pr2, HEAD_LINK_NAME)))
        view_cone = get_viewcone(depth=2.5, color=(1, 0, 0, 0.25))
        set_pose(view_cone,
                 get_link_pose(pr2, link_from_name(pr2, HEAD_LINK_NAME)))
        wait_for_user()
        remove_body(detect_cone)
        remove_body(view_cone)

    disconnect()
Example #17
0
 def place(self, use_policy=False):
     grasp = np.array([0, 0, 0.3])
     target_point = np.array(get_pose(self.pw.hollow))[0] + grasp
     self.hollow_pose = get_pose(self.pw.hollow)
     target_quat = (1, 0.5, 0, 0)
     target_pose = (target_point, target_quat)
     lift_point = np.array(
         p.getLinkState(self.pw.robot, self.finger_joints[0])[0]) + grasp
     traj1 = self.go_to_pose((lift_point, target_quat),
                             obstacles=[self.pw.hollow],
                             attachments=[self.pipe_attach],
                             cart_traj=True,
                             use_policy=use_policy)
     traj2 = self.go_to_pose(target_pose,
                             obstacles=[self.pw.hollow],
                             attachments=[self.pipe_attach],
                             cart_traj=True,
                             use_policy=use_policy)
     return traj1 + traj2
Example #18
0
def visualize_cartesian_path(body, pose_path):
    for i, pose in enumerate(pose_path):
        set_pose(body, pose)
        print('{}/{}) continue?'.format(i, len(pose_path)))
        wait_for_user()
    handles = draw_pose(get_pose(body))
    handles.extend(draw_aabb(get_aabb(body)))
    print('Finish?')
    wait_for_user()
    for h in handles:
        remove_debug(h)
Example #19
0
def get_direction_generator(end_effector, **kwargs):
    world_from_base = get_pose(end_effector.robot)
    lower = [-np.pi / 2, -np.pi / 2]
    upper = [+np.pi / 2, +np.pi / 2]
    for [roll, pitch] in interval_generator(lower, upper, **kwargs):
        ##roll = random.uniform(0, np.pi)
        #roll = np.pi/4
        #pitch = random.uniform(0, 2*np.pi)
        #return Pose(euler=Euler(roll=np.pi / 2 + roll, pitch=pitch))
        #roll = random.uniform(-np.pi/2, np.pi/2)
        #pitch = random.uniform(-np.pi/2, np.pi/2)
        pose = Pose(euler=Euler(roll=roll, pitch=pitch))
        yield pose
Example #20
0
def compute_forward_reachability(robot, **kwargs):
    # TODO: compute wrt torso instead
    points = list(
        map(point_from_pose,
            learned_forward_generator(robot, get_pose(robot), **kwargs)))
    #for point in points:
    #    draw_point(point)
    #wait_for_interrupt()
    points2d = [point[:2] for point in points]
    #centriod = np.average(points, axis=0)
    from scipy.spatial import ConvexHull
    hull = ConvexHull(points2d)
    return [points2d[i] for i in hull.vertices]
Example #21
0
def get_contained_beads(body, beads, height_fraction=1.0, top_threshold=0.0):
    #aabb = get_aabb(body)
    center, extent = approximate_as_prism(body, body_pose=get_pose(body))
    lower = center - extent / 2.
    upper = center + extent / 2.
    _, _, z1 = lower
    x2, y2, z2 = upper
    z_upper = z1 + height_fraction * (z2 - z1) + top_threshold
    new_aabb = AABB(lower, np.array([x2, y2, z_upper]))
    #handles = draw_aabb(new_aabb)
    return {
        bead
        for bead in beads if aabb_contains_aabb(get_aabb(bead), new_aabb)
    }
Example #22
0
File: move.py Project: lyltc1/LTAMP
 def water_test(q):
     if attachment is None:
         return False
     set_joint_positions(body, joints, q)
     attachment.assign()
     attachment_pose = get_pose(attachment.child)
     pose = multiply(attachment_pose,
                     reference_pose)  # TODO: confirm not inverted
     roll, pitch, _ = euler_from_quat(quat_from_pose(pose))
     violation = (MAX_ROTATION < abs(roll)) or (MAX_ROTATION < abs(pitch))
     #if violation: # TODO: check whether different confs can be waypoints for each object
     #    print(q, violation)
     #    wait_for_user()
     return violation
Example #23
0
def visualize_learned_pours(learner, bowl_type='blue_bowl', cup_type='red_cup', num_steps=None):
    learner.query_type = REJECTION # BEST | REJECTION | STRADDLE
    learner.validity_learner = None

    world = create_world()
    #add_obstacles()
    #environment = get_bodies()

    world.bodies[bowl_type] = load_cup_bowl(bowl_type)
    world.bodies[cup_type] = load_cup_bowl(cup_type)
    feature = get_pour_feature(world, bowl_type, cup_type)
    draw_pose(get_pose(world.get_body(bowl_type)), length=0.2)
    # TODO: sample different sizes

    print('Feature:', feature)
    for parameter in learner.parameter_generator(world, feature, valid=False):
        handles = []
        print('Parameter:', parameter)
        valid = is_valid_pour(world, feature, parameter)
        score = learner.score(feature, parameter)
        x = learner.func.x_from_feature_parameter(feature, parameter)
        [prediction] = learner.predict_x(np.array([x]), noise=True)
        print('Valid: {} | Mean: {:.3f} | Var: {:.3f} | Score: {:.3f}'.format(
            valid, prediction['mean'], prediction['variance'], score))

        #fraction = rescale(clip(prediction['mean'], *DEFAULT_INTERVAL), DEFAULT_INTERVAL, new_interval=(0, 1))
        #color = (1 - fraction) * np.array(RED) + fraction * np.array(GREEN)
        #set_color(world.get_body(cup_type), apply_alpha(color, alpha=0.25))
        # TODO: overlay many cups at different poses
        bowl_from_pivot = get_bowl_from_pivot(world, feature, parameter)
        #handles.extend(draw_pose(bowl_from_pivot))
        handles.extend(draw_point(point_from_pose(bowl_from_pivot), color=BLACK))

        # TODO: could label bowl, cup dimensions
        path, _ = pour_path_from_parameter(world, feature, parameter, cup_yaw=0)
        for p1, p2 in safe_zip(path[:-1], path[1:]):
            handles.append(add_line(point_from_pose(p1), point_from_pose(p2), color=RED))

        if num_steps is not None:
            path = path[:num_steps]
        for i, pose in enumerate(path[::-1]):
            #step_handles = draw_pose(pose, length=0.1)
            #step_handles = draw_point(point_from_pose(pose), color=BLACK)
            set_pose(world.get_body(cup_type), pose)
            print('Step {}/{}'.format(i+1, len(path)))
            wait_for_user()
            #remove_handles(step_handles)
        remove_handles(handles)
Example #24
0
def is_robot_visible(world, links):
    for link in links:
        link_point = point_from_pose(get_link_pose(world.robot, link))
        visible = False
        for camera_body, camera_matrix, camera_depth in world.cameras.values():
            camera_pose = get_pose(camera_body)
            #camera_point = point_from_pose(camera_pose)
            #add_line(link_point, camera_point)
            if is_visible_point(camera_matrix, camera_depth, link_point,
                                camera_pose):
                visible = True
                break
        if not visible:
            return False
    #wait_for_user()
    return True
Example #25
0
 def _load_robot(self, real_world):
     with ClientSaver(self.client):
         # TODO: set the x,y,theta joints using the base pose
         pose = get_pose(self.robot)  # base_link is origin
         base_pose = get_link_pose(self.robot,
                                   link_from_name(self.robot, BASE_FRAME))
         set_pose(self.robot, multiply(invert(base_pose), pose))
         # base_pose = real_world.controller.return_cartesian_pose(arm='l')
         # Because the robot might have an xyz
         movable_names = real_world.controller.get_joint_names()
         movable_joints = [
             joint_from_name(self.robot, name) for name in movable_names
         ]
         movable_positions = real_world.controller.get_joint_positions(
             movable_names)
         set_joint_positions(self.robot, movable_joints, movable_positions)
         self.initial_config = get_configuration(self.robot)
         self.body_mapping[self.robot] = real_world.robot
Example #26
0
def compute_assignments(robots, elements, node_points, initial_confs):
    # TODO: print direction might influence the assignment
    assignments = {name: set() for name in initial_confs}
    for element in elements:
        point = get_midpoint(node_points, element)  # min/max
        closest_robot, closest_distance = None, INF
        for i, robot in enumerate(robots):
            base_pose = get_pose(robot)
            base_point = point_from_pose(base_pose)
            point_base = tform_point(invert(base_pose), point)
            distance = get_yaw(point_base)  # which side its on
            #distance = abs((base_point - point)[0]) # x distance
            #distance = get_length((base_point - point)[:2]) # xy distance
            if distance < closest_distance:
                closest_robot, closest_distance = ROBOT_TEMPLATE.format(
                    i), distance
        assert closest_robot is not None
        # TODO: assign to several robots if close to the best distance
        assignments[closest_robot].add(element)
    return assignments
Example #27
0
    def fn(obj_name, pose):
        # TODO: incorporate probability mass
        # Ether sample observation (control) or target belief (next state)
        body = world.get_body(obj_name)
        open_surface_joints(world, pose.support)
        for camera_name in world.cameras:
            camera_body, camera_matrix, camera_depth = world.cameras[
                camera_name]
            camera_pose = get_pose(camera_body)
            camera_point = point_from_pose(camera_pose)
            obj_point = point_from_pose(pose.get_world_from_body())

            aabb = get_view_aabb(body, camera_pose)
            center = get_aabb_center(aabb)
            extent = np.multiply([detect_scale, detect_scale, 1],
                                 get_aabb_extent(aabb))
            view_aabb = (center - extent / 2, center + extent / 2)
            # print(is_visible_aabb(view_aabb, camera_matrix=camera_matrix))
            obj_points = apply_affine(
                camera_pose, support_from_aabb(view_aabb)) + [obj_point]
            # obj_points = [obj_point]
            if not all(
                    is_visible_point(camera_matrix, camera_depth, point,
                                     camera_pose) for point in obj_points):
                continue
            rays = [Ray(camera_point, point) for point in obj_points]
            detect = Detect(world, camera_name, obj_name, pose, rays)
            if ray_trace:
                # TODO: how should doors be handled?
                move_occluding(world)
                open_surface_joints(world, pose.support)
                detect.pose.assign()
                if obstacles & detect.compute_occluding():
                    continue
            #detect.draw()
            #wait_for_user()
            return (detect, )
        return None
Example #28
0
def test_simulation(robot, target_x, video=None):
    use_turtlebot = (get_body_name(robot) == 'turtlebot')
    if not use_turtlebot:
        target_point, target_quat = map(list, get_pose(robot))
        target_point[0] = target_x
        add_pose_constraint(robot,
                            pose=(target_point, target_quat),
                            max_force=200)  # TODO: velocity constraint?
    else:
        # p.changeDynamics(robot, robot_joints[0], # Doesn't work
        #                  maxJointVelocity=1,
        #                  jointLimitForce=1,)
        robot_joints = get_movable_joints(robot)[:3]
        print('Max velocities:', get_max_velocities(robot, robot_joints))
        print('Max forces:', get_max_forces(robot, robot_joints))
        control_joint(robot,
                      joint=robot_joints[0],
                      position=target_x,
                      velocity=0,
                      position_gain=None,
                      velocity_scale=None,
                      max_velocity=100,
                      max_force=300)
        # control_joints(robot, robot_joints, positions=[target_x, 0, PI], max_force=300)
        # velocity_control_joints(robot, robot_joints, velocities=[-2., 0, 0]) #, max_force=300)

    robot_link = get_first_link(robot)
    if video is None:
        wait_if_gui('Begin?')
    simulate(controller=condition_controller(
        lambda *args: abs(target_x - point_from_pose(
            get_link_pose(robot, robot_link))[0]) < 1e-3),
             sleep=0.01)  # TODO: velocity condition
    # print('Velocities:', get_joint_velocities(robot, robot_joints))
    # print('Torques:', get_joint_torques(robot, robot_joints))
    if video is None:
        set_renderer(enable=True)
        wait_if_gui('Finish?')
Example #29
0
 def update_dist(self, observation, obstacles=[], verbose=False):
     # cfree_dist.conditionOnVar(index=1, has_detection=True)
     if not BAYESIAN and (self.name in observation):
         # TODO: convert into a Multivariate Gaussian
         [detected_pose] = observation[self.name]
         return DeltaDist(detected_pose)
     if not self.world.cameras:
         return self.dist.copy()
     body = self.world.get_body(self.name)
     all_poses = self.dist.support()
     cfree_poses = all_poses
     #cfree_poses = compute_cfree(body, all_poses, obstacles)
     #cfree_dist = self.cfree_dist
     cfree_dist = DDist(
         {pose: self.dist.prob(pose)
          for pose in cfree_poses})
     # TODO: do these updates simultaneously for each object
     # TODO: check all camera poses
     [camera] = self.world.cameras.keys()
     info = self.world.cameras[camera]
     camera_pose = get_pose(info.body)
     detectable_poses = compute_detectable(cfree_poses, camera_pose)
     visible_poses = compute_visible(body,
                                     detectable_poses,
                                     camera_pose,
                                     draw=False)
     if verbose:
         print(
             'Total: {} | CFree: {} | Detectable: {} | Visible: {}'.format(
                 len(all_poses), len(cfree_poses), len(detectable_poses),
                 len(visible_poses)))
     assert set(visible_poses) <= set(detectable_poses)
     # obs_fn = get_observation_fn(surface)
     #wait_for_user()
     return self.bayesian_belief_update(cfree_dist,
                                        visible_poses,
                                        observation,
                                        verbose=verbose)
Example #30
0
def create_table_bodies(world, item_ranges, randomize=True):
    perception = world.perception
    with HideOutput():
        add_data_path()
        floor_body = load_pybullet("plane.urdf")
        set_pose(floor_body, get_pose(world.robot))
        add_table(world)
        for name, limits in sorted(item_ranges.items()):
            perception.sim_items[name] = None
            if randomize:
                body = randomize_body(name,
                                      width_range=limits.width_range,
                                      height_range=limits.height_range,
                                      mass_range=limits.mass_range)
            else:
                body = load_body(name)
            perception.sim_bodies[name] = body
            # perception.add_item(name, unit_pose())
            x, y, yaw = np.random.uniform(*limits.pose2d_range)
            surface_body = perception.get_body(limits.surface)
            z = stable_z(body, surface_body) + Z_EPSILON
            pose = Pose((x, y, z), Euler(yaw=yaw))
            perception.set_pose(name, *pose)