示例#1
0
 def fix_pose(self, name, pose=None, fraction=0.5):
     body = self.get_body(name)
     if pose is None:
         pose = get_pose(body)
     else:
         set_pose(body, pose)
     # TODO: can also drop in simulation
     x, y, z = point_from_pose(pose)
     roll, pitch, yaw = euler_from_quat(quat_from_pose(pose))
     quat = quat_from_euler(Euler(yaw=yaw))
     set_quat(body, quat)
     surface_name = self.get_supporting(name)
     if surface_name is None:
         return None, None
     if fraction == 0:
         new_pose = (Point(x, y, z), quat)
         return new_pose, surface_name
     surface_aabb = compute_surface_aabb(self, surface_name)
     new_z = (1 - fraction) * z + fraction * stable_z_on_aabb(
         body, surface_aabb)
     point = Point(x, y, new_z)
     set_point(body, point)
     print('{} error: roll={:.3f}, pitch={:.3f}, z-delta: {:.3f}'.format(
         name, roll, pitch, new_z - z))
     new_pose = (point, quat)
     return new_pose, surface_name
示例#2
0
def load_plane(z=-1e-3):
    add_data_path()
    plane = load_pybullet('plane.urdf', fixed_base=True)
    #plane = load_model('plane.urdf')
    if z is not None:
        set_point(plane, Point(z=z))
    return plane
示例#3
0
def test_push_pour(visualize):
    arms = ARMS
    cup_name = create_name('bluecup', 1)
    bowl_name = create_name('bowl', 1)

    items = [bowl_name, cup_name, bowl_name]
    world, table_body = create_world(items, visualize=visualize)
    set_point(table_body, np.array(TABLE_POSE[0]) + np.array([0, -0.1, 0]))

    with ClientSaver(world.perception.client):
        cup_z = stable_z(world.perception.sim_bodies[cup_name], table_body) + Z_EPSILON
        bowl_z = stable_z(world.perception.sim_bodies[bowl_name], table_body) + Z_EPSILON
    world.perception.set_pose(cup_name, Point(0.75, 0.4, cup_z), unit_quat())
    world.perception.set_pose(bowl_name, Point(0.5, -0.6, bowl_z), unit_quat())
    update_world(world, table_body)

    # TODO: can prevent the right arm from being able to pick
    init = [
        ('Contains', cup_name, COFFEE),
        #('Graspable', bowl_name),
        ('CanPush', bowl_name, LEFT_ARM), # TODO: intersection of these regions
    ]
    goal = [
        ('Contains', bowl_name, COFFEE),
        ('InRegion', bowl_name, LEFT_ARM),
    ]
    task = Task(init=init, goal=goal, arms=arms, pushable=[bowl_name])
    # Most of the time places the cup to exchange arms

    return world, task
示例#4
0
def main():
    # TODO: move to pybullet-planning for now
    parser = argparse.ArgumentParser()
    parser.add_argument('-viewer',
                        action='store_true',
                        help='enable the viewer while planning')
    args = parser.parse_args()
    print(args)

    connect(use_gui=True)
    with LockRenderer():
        draw_pose(unit_pose(), length=1)
        floor = create_floor()
        with HideOutput():
            robot = load_pybullet(get_model_path(ROOMBA_URDF),
                                  fixed_base=True,
                                  scale=2.0)
            for link in get_all_links(robot):
                set_color(robot, link=link, color=ORANGE)
            robot_z = stable_z(robot, floor)
            set_point(robot, Point(z=robot_z))
        #set_base_conf(robot, rover_confs[i])

        data_path = add_data_path()
        shelf, = load_model(os.path.join(data_path, KIVA_SHELF_SDF),
                            fixed_base=True)  # TODO: returns a tuple
        dump_body(shelf)
        #draw_aabb(get_aabb(shelf))

    wait_for_user()
    disconnect()
示例#5
0
def pick_problem(arm='left', grasp_type='side'):
    other_arm = get_other_arm(arm)
    initial_conf = get_carry_conf(arm, grasp_type)

    pr2 = create_pr2()
    set_base_values(pr2, (0, -1.2, np.pi / 2))
    set_arm_conf(pr2, arm, initial_conf)
    open_arm(pr2, arm)
    set_arm_conf(pr2, other_arm, arm_conf(other_arm, REST_LEFT_ARM))
    close_arm(pr2, other_arm)

    plane = create_floor()
    table = load_pybullet(TABLE_URDF)
    # table = create_table(height=0.8)
    # table = load_pybullet("table_square/table_square.urdf")
    box = create_box(.07, .05, .25)
    set_point(box, (-0.25, -0.3, TABLE_MAX_Z + .25 / 2))
    # set_point(box, (0.2, -0.2, 0.8 + .25/2 + 0.1))

    return Problem(robot=pr2,
                   movable=[box],
                   arms=[arm],
                   grasp_types=[grasp_type],
                   surfaces=[table],
                   goal_conf=get_pose(pr2),
                   goal_holding=[(arm, box)])
示例#6
0
def pour_beads(world,
               cup_name,
               beads,
               reset_contained=False,
               fix_outside=True,
               cup_thickness=0.01,
               bead_offset=0.01,
               drop_rate=0.02,
               **kwargs):
    if not beads:
        return set()
    start_time = time.time()
    # TODO: compute the radius of each bead
    bead_radius = np.average(approximate_as_prism(beads[0])) / 2.

    masses = list(map(get_mass, beads))
    savers = list(map(BodySaver, beads))
    for body in beads:
        set_mass(body, 0)

    cup = world.get_body(cup_name)
    local_center, (diameter, height) = approximate_as_cylinder(cup)
    center = get_point(cup) + local_center
    interior_radius = max(0.0, diameter / 2. - bead_radius - cup_thickness)
    # TODO: fill up to a certain threshold

    ty = get_type(cup_name)
    if ty in SPOON_DIAMETERS:
        # TODO: do this in a more principled way
        interior_radius = 0
        center[1] += (SPOON_LENGTHS[ty] - SPOON_DIAMETERS[ty]) / 2.

    # TODO: some sneak out through the bottom
    # TODO: could reduce gravity while filling
    world.controller.set_gravity()
    for i, bead in enumerate(beads):
        # TODO: halton sequence
        x, y = center[:2] + np.random.uniform(
            0, interior_radius) * unit_from_theta(
                np.random.uniform(-np.pi, np.pi))
        new_z = center[2] + height / 2. + bead_radius + bead_offset
        set_point(bead, [x, y, new_z])
        set_mass(bead, masses[i])
        world.controller.rest_for_duration(drop_rate)
    world.controller.rest_for_duration(BEADS_REST)
    print('Simulated {} beads in {:3f} seconds'.format(
        len(beads), elapsed_time(start_time)))
    contained_beads = get_contained_beads(cup, beads, **kwargs)
    #wait_for_user()

    for body in beads:
        if fix_outside and (body not in contained_beads):
            set_mass(body, 0)
    for saver in savers:
        if reset_contained or (saver.body not in contained_beads):
            saver.restore()
    #wait_for_user()
    return contained_beads
def main():
    connect(use_gui=True)
    with HideOutput():
        pr2 = load_model(
            "models/drake/pr2_description/urdf/pr2_simplified.urdf")
    set_joint_positions(pr2, joints_from_names(pr2, PR2_GROUPS['left_arm']),
                        REST_LEFT_ARM)
    set_joint_positions(pr2, joints_from_names(pr2, PR2_GROUPS['right_arm']),
                        rightarm_from_leftarm(REST_LEFT_ARM))
    set_joint_positions(pr2, joints_from_names(pr2, PR2_GROUPS['torso']),
                        [0.2])
    dump_body(pr2)

    block = load_model(BLOCK_URDF, fixed_base=False)
    set_point(block, [2, 0.5, 1])
    target_point = point_from_pose(get_pose(block))

    # head_link = link_from_name(pr2, HEAD_LINK)
    head_joints = joints_from_names(pr2, PR2_GROUPS['head'])
    head_link = head_joints[-1]

    #max_detect_distance = 2.5
    max_register_distance = 1.0
    distance_range = (max_register_distance / 2, max_register_distance)
    base_generator = visible_base_generator(pr2, target_point, distance_range)

    base_joints = joints_from_names(pr2, PR2_GROUPS['base'])
    for i in range(5):
        base_conf = next(base_generator)
        set_joint_positions(pr2, base_joints, base_conf)

        p.addUserDebugLine(point_from_pose(get_link_pose(pr2, head_link)),
                           target_point,
                           lineColorRGB=(1, 0, 0))  # addUserDebugText
        p.addUserDebugLine(point_from_pose(
            get_link_pose(pr2, link_from_name(pr2, HEAD_LINK_NAME))),
                           target_point,
                           lineColorRGB=(0, 0, 1))  # addUserDebugText

        # head_conf = sub_inverse_kinematics(pr2, head_joints[0], HEAD_LINK, )
        head_conf = inverse_visibility(pr2, target_point)
        set_joint_positions(pr2, head_joints, head_conf)
        print(get_detections(pr2))
        # TODO: does this detect the robot sometimes?

        detect_mesh, z = get_detection_cone(pr2, block)
        detect_cone = create_mesh(detect_mesh, color=(0, 1, 0, 0.5))
        set_pose(detect_cone,
                 get_link_pose(pr2, link_from_name(pr2, HEAD_LINK_NAME)))
        view_cone = get_viewcone(depth=2.5, color=(1, 0, 0, 0.25))
        set_pose(view_cone,
                 get_link_pose(pr2, link_from_name(pr2, HEAD_LINK_NAME)))
        wait_for_user()
        remove_body(detect_cone)
        remove_body(view_cone)

    disconnect()
def main(use_pr2_drake=True):
    connect(use_gui=True)
    add_data_path()

    plane = p.loadURDF("plane.urdf")
    table_path = "models/table_collision/table.urdf"
    table = load_pybullet(table_path, fixed_base=True)
    set_quat(table, quat_from_euler(Euler(yaw=PI / 2)))
    # table/table.urdf, table_square/table_square.urdf, cube.urdf, block.urdf, door.urdf
    obstacles = [plane, table]

    pr2_urdf = DRAKE_PR2_URDF if use_pr2_drake else PR2_URDF
    with HideOutput():
        pr2 = load_model(pr2_urdf, fixed_base=True)  # TODO: suppress warnings?
    dump_body(pr2)

    z = base_aligned_z(pr2)
    print(z)
    #z = stable_z_on_aabb(pr2, AABB(np.zeros(3), np.zeros(3)))
    print(z)

    set_point(pr2, Point(z=z))
    print(get_aabb(pr2))
    wait_if_gui()

    base_start = (-2, -2, 0)
    base_goal = (2, 2, 0)
    arm_start = SIDE_HOLDING_LEFT_ARM
    #arm_start = TOP_HOLDING_LEFT_ARM
    #arm_start = REST_LEFT_ARM
    arm_goal = TOP_HOLDING_LEFT_ARM
    #arm_goal = SIDE_HOLDING_LEFT_ARM

    left_joints = joints_from_names(pr2, PR2_GROUPS['left_arm'])
    right_joints = joints_from_names(pr2, PR2_GROUPS['right_arm'])
    torso_joints = joints_from_names(pr2, PR2_GROUPS['torso'])
    set_joint_positions(pr2, left_joints, arm_start)
    set_joint_positions(pr2, right_joints,
                        rightarm_from_leftarm(REST_LEFT_ARM))
    set_joint_positions(pr2, torso_joints, [0.2])
    open_arm(pr2, 'left')
    # test_ikfast(pr2)

    add_line(base_start, base_goal, color=RED)
    print(base_start, base_goal)
    if use_pr2_drake:
        test_drake_base_motion(pr2, base_start, base_goal, obstacles=obstacles)
    else:
        test_base_motion(pr2, base_start, base_goal, obstacles=obstacles)

    test_arm_motion(pr2, left_joints, arm_goal)
    # test_arm_control(pr2, left_joints, arm_start)

    wait_if_gui('Finish?')
    disconnect()
示例#9
0
def create_elements_bodies(node_points,
                           elements,
                           color=apply_alpha(RED, alpha=1),
                           diameter=ELEMENT_DIAMETER,
                           shrink=ELEMENT_SHRINK):
    # TODO: could scale the whole environment
    # TODO: create a version without shrinking for transit planning
    # URDF_USE_IMPLICIT_CYLINDER
    element_bodies = []
    for (n1, n2) in elements:
        p1, p2 = node_points[n1], node_points[n2]
        height = max(np.linalg.norm(p2 - p1) - 2 * shrink, 0)
        #if height == 0: # Cannot keep this here
        #    continue
        center = (p1 + p2) / 2
        # extents = (p2 - p1) / 2

        delta = p2 - p1
        x, y, z = delta
        phi = np.math.atan2(y, x)
        theta = np.math.acos(z / np.linalg.norm(delta))
        quat = quat_from_euler(Euler(pitch=theta, yaw=phi))
        # p1 is z=-height/2, p2 is z=+height/2

        # Much smaller than cylinder
        # Also faster, C_shape 177.398 vs 400
        body = create_box(diameter,
                          diameter,
                          height,
                          color=color,
                          mass=STATIC_MASS)
        set_color(body, color)
        set_point(body, center)
        set_quat(body, quat)
        #dump_body(body, fixed=True)

        # Visually, smallest diameter is 2e-3
        # The geometries and bounding boxes seem correct though
        # TODO: create_cylinder takes in a radius not diameter
        #body = create_cylinder(ELEMENT_DIAMETER, height, color=color, mass=STATIC_MASS)
        #print('Diameter={:.5f} | Height={:.5f}'.format(ELEMENT_DIAMETER/2., height))
        #print(get_aabb_extent(get_aabb(body)).round(6).tolist())
        #print(get_visual_data(body))
        #print(get_collision_data(body))
        #set_point(body, center)
        #set_quat(body, quat)
        #draw_aabb(get_aabb(body))

        element_bodies.append(body)
        #wait_for_user()
    return element_bodies
示例#10
0
def create_beads(num, radius, parameters={}, uniform_color=None, init_x=10.0):
    beads = []
    if num <= 0:
        return beads
    #timestep = 1/1200. # 1/350.
    #p.setTimeStep(timestep, physicsClientId=get_client())
    for i in range(num):
        color = apply_alpha(
            np.random.random(3)) if uniform_color is None else uniform_color
        body = create_sphere(radius, color=color)  # TODO: other shapes
        #set_color(droplet, color)
        #print(get_dynamics_info(droplet))
        set_dynamics(body, **parameters)
        x = init_x + i * (2 * radius + 1e-2)
        set_point(body, Point(x=x))
        beads.append(body)
    return beads
示例#11
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('-shape',
                        default='box',
                        choices=['box', 'sphere', 'cylinder', 'capsule'])
    parser.add_argument('-video', action='store_true')
    args = parser.parse_args()
    video = 'video.mp4' if args.video else None

    connect(use_gui=True, mp4=video)
    if video is not None:
        set_renderer(enable=False)

    draw_global_system()
    set_camera_pose(camera_point=Point(+1.5, -1.5, +1.5),
                    target_point=Point(-1.5, +1.5, 0))

    add_data_path()
    plane = load_pybullet('plane.urdf', fixed_base=True)
    #plane = load_model('plane.urdf')
    set_point(plane, Point(z=-1e-3))

    ramp = create_ramp()
    dump_body(ramp)

    obj = create_object(args.shape)
    set_euler(obj, np.random.uniform(-np.math.radians(1), np.math.radians(1),
                                     3))
    set_point(obj, np.random.uniform(-1e-3, +1e-3, 3))
    #set_velocity(obj, linear=Point(x=-1))
    set_position(obj, z=2.)
    #set_position(obj, z=base_aligned_z(obj))
    dump_body(obj)

    #add_pose_constraint(obj, pose=(target_point, target_quat), max_force=200)

    set_renderer(enable=True)
    if video is None:
        wait_if_gui('Begin?')
    simulate(controller=condition_controller(lambda step:
                                             (step != 0) and at_rest(obj)))
    if video is None:
        wait_if_gui('Finish?')
    disconnect()
示例#12
0
def estimate_spoon_capacity(world, spoon_name, beads, max_beads=100):
    beads = beads[:max_beads]
    if not beads:
        return 0
    bead_radius = np.average(approximate_as_prism(beads[0])) / 2.
    spoon_body = world.get_body(spoon_name)
    spoon_mass = get_mass(spoon_body)
    set_mass(spoon_body, STATIC_MASS)
    set_point(spoon_body, (1, 0, 1))
    set_quat(spoon_body, get_liquid_quat(spoon_name))
    capacity_beads = fill_with_beads(world,
                                     spoon_name,
                                     beads,
                                     reset_contained=True,
                                     fix_outside=False,
                                     height_fraction=1.0,
                                     top_threshold=bead_radius)
    #wait_for_user()
    set_mass(spoon_body, spoon_mass)
    return len(capacity_beads)
示例#13
0
def load_world(use_floor=True):
    obstacles = []
    #side, height = 10, 0.01
    robot = load_robot()
    with HideOutput():
        lower, _ = get_aabb(robot)
        if use_floor:
            #floor = load_model('models/short_floor.urdf', fixed_base=True)
            #add_data_path()
            #floor = load_pybullet('plane.urdf', fixed_base=True)
            #set_color(floor, TAN)
            #floor = create_box(w=side, l=side, h=height, color=apply_alpha(GROUND_COLOR))
            floor = create_plane(color=apply_alpha(GROUND_COLOR))
            obstacles.append(floor)
            #set_point(floor, Point(z=lower[2]))
            set_point(floor, Point(x=1.2, z=0.023 - 0.025))  # -0.02
        else:
            floor = None  # TODO: make this an empty list of obstacles
        #set_all_static()
    return obstacles, robot
def main(floor_width=2.0):
    # Creates a pybullet world and a visualizer for it
    connect(use_gui=True)
    identity_pose = (unit_point(), unit_quat())
    origin_handles = draw_pose(
        identity_pose, length=1.0
    )  # Draws the origin coordinate system (x:RED, y:GREEN, z:BLUE)

    # Bodies are described by an integer index
    floor = create_box(w=floor_width, l=floor_width, h=0.001,
                       color=TAN)  # Creates a tan box object for the floor
    set_point(floor,
              [0, 0, -0.001 / 2.])  # Sets the [x,y,z] translation of the floor

    obstacle = create_box(w=0.5, l=0.5, h=0.1,
                          color=RED)  # Creates a red box obstacle
    set_point(
        obstacle,
        [0.5, 0.5, 0.1 / 2.])  # Sets the [x,y,z] position of the obstacle
    print('Position:', get_point(obstacle))
    set_euler(obstacle,
              [0, 0, np.pi / 4
               ])  #  Sets the [roll,pitch,yaw] orientation of the obstacle
    print('Orientation:', get_euler(obstacle))

    with LockRenderer(
    ):  # Temporarily prevents the renderer from updating for improved loading efficiency
        with HideOutput():  # Temporarily suppresses pybullet output
            robot = load_model(ROOMBA_URDF)  # Loads a robot from a *.urdf file
            robot_z = stable_z(
                robot, floor
            )  # Returns the z offset required for robot to be placed on floor
            set_point(robot,
                      [0, 0, robot_z])  # Sets the z position of the robot
    dump_body(robot)  # Prints joint and link information about robot
    set_all_static()

    # Joints are also described by an integer index
    # The turtlebot has explicit joints representing x, y, theta
    x_joint = joint_from_name(robot, 'x')  # Looks up the robot joint named 'x'
    y_joint = joint_from_name(robot, 'y')  # Looks up the robot joint named 'y'
    theta_joint = joint_from_name(
        robot, 'theta')  # Looks up the robot joint named 'theta'
    joints = [x_joint, y_joint, theta_joint]

    base_link = link_from_name(
        robot, 'base_link')  # Looks up the robot link named 'base_link'
    world_from_obstacle = get_pose(
        obstacle
    )  # Returns the pose of the origin of obstacle wrt the world frame
    obstacle_aabb = get_subtree_aabb(obstacle)
    draw_aabb(obstacle_aabb)

    random.seed(0)  # Sets the random number generator state
    handles = []
    for i in range(10):
        for handle in handles:
            remove_debug(handle)
        print('\nIteration: {}'.format(i))
        x = random.uniform(-floor_width / 2., floor_width / 2.)
        set_joint_position(robot, x_joint,
                           x)  # Sets the current value of the x joint
        y = random.uniform(-floor_width / 2., floor_width / 2.)
        set_joint_position(robot, y_joint,
                           y)  # Sets the current value of the y joint
        yaw = random.uniform(-np.pi, np.pi)
        set_joint_position(robot, theta_joint,
                           yaw)  # Sets the current value of the theta joint
        values = get_joint_positions(
            robot,
            joints)  # Obtains the current values for the specified joints
        print('Joint values: [x={:.3f}, y={:.3f}, yaw={:.3f}]'.format(*values))

        world_from_robot = get_link_pose(
            robot,
            base_link)  # Returns the pose of base_link wrt the world frame
        position, quaternion = world_from_robot  # Decomposing pose into position and orientation (quaternion)
        x, y, z = position  # Decomposing position into x, y, z
        print('Base link position: [x={:.3f}, y={:.3f}, z={:.3f}]'.format(
            x, y, z))
        euler = euler_from_quat(
            quaternion)  # Converting from quaternion to euler angles
        roll, pitch, yaw = euler  # Decomposing orientation into roll, pitch, yaw
        print('Base link orientation: [roll={:.3f}, pitch={:.3f}, yaw={:.3f}]'.
              format(roll, pitch, yaw))
        handles.extend(
            draw_pose(world_from_robot, length=0.5)
        )  # # Draws the base coordinate system (x:RED, y:GREEN, z:BLUE)
        obstacle_from_robot = multiply(
            invert(world_from_obstacle),
            world_from_robot)  # Relative transformation from robot to obstacle

        robot_aabb = get_subtree_aabb(
            robot,
            base_link)  # Computes the robot's axis-aligned bounding box (AABB)
        lower, upper = robot_aabb  # Decomposing the AABB into the lower and upper extrema
        center = (lower + upper) / 2.  # Computing the center of the AABB
        extent = upper - lower  # Computing the dimensions of the AABB
        handles.extend(draw_aabb(robot_aabb))

        collision = pairwise_collision(
            robot, obstacle
        )  # Checks whether robot is currently colliding with obstacle
        print('Collision: {}'.format(collision))
        wait_for_duration(1.0)  # Like sleep() but also updates the viewer
    wait_for_user()  # Like raw_input() but also updates the viewer

    # Destroys the pybullet world
    disconnect()
def main():
    connect(use_gui=True)
    add_data_path()

    set_camera(0, -30, 1)
    plane = load_pybullet('plane.urdf', fixed_base=True)
    #plane = load_model('plane.urdf')
    cup = load_model('models/cup.urdf', fixed_base=True)
    #set_point(cup, Point(z=stable_z(cup, plane)))
    set_point(cup, Point(z=.2))
    set_color(cup, (1, 0, 0, .4))

    num_droplets = 100
    #radius = 0.025
    #radius = 0.005
    radius = 0.0025
    # TODO: more efficient ways to make all of these
    droplets = [create_sphere(radius, mass=0.01)
                for _ in range(num_droplets)]  # kg
    cup_thickness = 0.001

    lower, upper = get_lower_upper(cup)
    print(lower, upper)
    buffer = cup_thickness + radius
    lower = np.array(lower) + buffer * np.ones(len(lower))
    upper = np.array(upper) - buffer * np.ones(len(upper))

    limits = zip(lower, upper)
    x_range, y_range = limits[:2]
    z = upper[2] + 0.1
    #x_range = [-1, 1]
    #y_range = [-1, 1]
    #z = 1
    for droplet in droplets:
        x = np.random.uniform(*x_range)
        y = np.random.uniform(*y_range)
        set_point(droplet, Point(x, y, z))

    for i, droplet in enumerate(droplets):
        x, y = np.random.normal(0, 1e-3, 2)
        set_point(droplet, Point(x, y, z + i * (2 * radius + 1e-3)))

    #dump_world()
    wait_for_user()

    #user_input('Start?')
    enable_gravity()
    simulate_for_duration(5.0)

    # enable_real_time()
    # try:
    #     while True:
    #         enable_gravity() # enable_real_time requires a command
    #         #time.sleep(dt)
    # except KeyboardInterrupt:
    #     pass
    # print()

    #time.sleep(1.0)
    wait_for_user('Finish?')
    disconnect()
示例#16
0
def main():
    parser = argparse.ArgumentParser()  # Automatically includes help
    parser.add_argument('-viewer', action='store_true', help='enable viewer.')
    args = parser.parse_args()

    connect(use_gui=True)
    #ycb_path = os.path.join(DRAKE_PATH, DRAKE_YCB)
    #ycb_path = os.path.join(YCB_PATH, YCB_TEMPLATE.format('003_cracker_box'))
    #print(ycb_path)
    #load_pybullet(ycb_path)

    with LockRenderer():
        draw_pose(unit_pose(), length=1, width=1)
        floor = create_floor()
        set_point(floor, Point(z=-EPSILON))
        table = create_table(width=TABLE_WIDTH,
                             length=TABLE_WIDTH / 2,
                             height=TABLE_WIDTH / 2,
                             top_color=TAN,
                             cylinder=False)
        #set_euler(table, Euler(yaw=np.pi/2))
        with HideOutput(False):
            # data_path = add_data_path()
            # robot_path = os.path.join(data_path, WSG_GRIPPER)
            robot_path = get_model_path(
                WSG_50_URDF)  # WSG_50_URDF | PANDA_HAND_URDF
            #robot_path = get_file_path(__file__, 'mit_arch_suction_gripper/mit_arch_suction_gripper.urdf')
            robot = load_pybullet(robot_path, fixed_base=True)
            #dump_body(robot)
            #robot = create_cylinder(radius=0.5*BLOCK_SIDE, height=4*BLOCK_SIDE) # vacuum gripper

        block1 = create_box(w=BLOCK_SIDE,
                            l=BLOCK_SIDE,
                            h=BLOCK_SIDE,
                            color=RED)
        block_z = stable_z(block1, table)
        set_point(block1, Point(z=block_z))

        block2 = create_box(w=BLOCK_SIDE,
                            l=BLOCK_SIDE,
                            h=BLOCK_SIDE,
                            color=GREEN)
        set_point(block2, Point(x=+0.25, z=block_z))

        block3 = create_box(w=BLOCK_SIDE,
                            l=BLOCK_SIDE,
                            h=BLOCK_SIDE,
                            color=BLUE)
        set_point(block3, Point(x=-0.15, z=block_z))

        blocks = [block1, block2, block3]

        add_line(Point(x=-TABLE_WIDTH / 2,
                       z=block_z - BLOCK_SIDE / 2 + EPSILON),
                 Point(x=+TABLE_WIDTH / 2,
                       z=block_z - BLOCK_SIDE / 2 + EPSILON),
                 color=RED)
        set_camera_pose(camera_point=Point(y=-1, z=block_z + 1),
                        target_point=Point(z=block_z))

    wait_for_user()
    block_pose = get_pose(block1)
    open_gripper(robot)
    tool_link = link_from_name(robot, 'tool_link')
    base_from_tool = get_relative_pose(robot, tool_link)
    #draw_pose(unit_pose(), parent=robot, parent_link=tool_link)

    y_grasp, x_grasp = get_top_grasps(block1,
                                      tool_pose=unit_pose(),
                                      grasp_length=0.03,
                                      under=False)
    grasp = y_grasp  # fingers won't collide
    gripper_pose = multiply(block_pose, invert(grasp))
    set_pose(robot, multiply(gripper_pose, invert(base_from_tool)))
    wait_for_user('Finish?')
    disconnect()
示例#17
0
def main():
    # TODO: update this example

    connect(use_gui=True)
    with HideOutput():
        pr2 = load_model(DRAKE_PR2_URDF)
    set_joint_positions(pr2, joints_from_names(pr2, PR2_GROUPS['left_arm']),
                        REST_LEFT_ARM)
    set_joint_positions(pr2, joints_from_names(pr2, PR2_GROUPS['right_arm']),
                        rightarm_from_leftarm(REST_LEFT_ARM))
    set_joint_positions(pr2, joints_from_names(pr2, PR2_GROUPS['torso']),
                        [0.2])
    dump_body(pr2)

    block = load_model(BLOCK_URDF, fixed_base=False)
    set_point(block, [2, 0.5, 1])
    target_point = point_from_pose(get_pose(block))
    draw_point(target_point)

    head_joints = joints_from_names(pr2, PR2_GROUPS['head'])
    #head_link = child_link_from_joint(head_joints[-1])
    #head_name = get_link_name(pr2, head_link)

    head_name = 'high_def_optical_frame'  # HEAD_LINK_NAME | high_def_optical_frame | high_def_frame
    head_link = link_from_name(pr2, head_name)

    #max_detect_distance = 2.5
    max_register_distance = 1.0
    distance_range = (max_register_distance / 2, max_register_distance)
    base_generator = visible_base_generator(pr2, target_point, distance_range)

    base_joints = joints_from_names(pr2, PR2_GROUPS['base'])
    for i in range(5):
        base_conf = next(base_generator)
        set_joint_positions(pr2, base_joints, base_conf)

        handles = [
            add_line(point_from_pose(get_link_pose(pr2, head_link)),
                     target_point,
                     color=RED),
            add_line(point_from_pose(
                get_link_pose(pr2, link_from_name(pr2, HEAD_LINK_NAME))),
                     target_point,
                     color=BLUE),
        ]

        # head_conf = sub_inverse_kinematics(pr2, head_joints[0], HEAD_LINK, )
        head_conf = inverse_visibility(pr2,
                                       target_point,
                                       head_name=head_name,
                                       head_joints=head_joints)
        assert head_conf is not None
        set_joint_positions(pr2, head_joints, head_conf)
        print(get_detections(pr2))
        # TODO: does this detect the robot sometimes?

        detect_mesh, z = get_detection_cone(pr2, block)
        detect_cone = create_mesh(detect_mesh, color=(0, 1, 0, 0.5))
        set_pose(detect_cone,
                 get_link_pose(pr2, link_from_name(pr2, HEAD_LINK_NAME)))
        view_cone = get_viewcone(depth=2.5, color=(1, 0, 0, 0.25))
        set_pose(view_cone,
                 get_link_pose(pr2, link_from_name(pr2, HEAD_LINK_NAME)))
        wait_if_gui()
        remove_body(detect_cone)
        remove_body(view_cone)

    disconnect()
def problem1(n_obstacles=10, wall_side=0.1, obst_width=0.25, obst_height=0.5):
    floor_extent = 5.0
    base_limits = (-floor_extent/2.*np.ones(2),
                   +floor_extent/2.*np.ones(2))

    floor_height = 0.001
    floor = create_box(floor_extent, floor_extent, floor_height, color=TAN)
    set_point(floor, Point(z=-floor_height/2.))

    wall1 = create_box(floor_extent + wall_side, wall_side, wall_side, color=GREY)
    set_point(wall1, Point(y=floor_extent/2., z=wall_side/2.))
    wall2 = create_box(floor_extent + wall_side, wall_side, wall_side, color=GREY)
    set_point(wall2, Point(y=-floor_extent/2., z=wall_side/2.))
    wall3 = create_box(wall_side, floor_extent + wall_side, wall_side, color=GREY)
    set_point(wall3, Point(x=floor_extent/2., z=wall_side/2.))
    wall4 = create_box(wall_side, floor_extent + wall_side, wall_side, color=GREY)
    set_point(wall4, Point(x=-floor_extent/2., z=wall_side/2.))
    walls = [wall1, wall2, wall3, wall4]

    initial_surfaces = OrderedDict()
    for _ in range(n_obstacles):
        body = create_box(obst_width, obst_width, obst_height, color=GREY)
        initial_surfaces[body] = floor
    obstacles = walls + list(initial_surfaces)

    initial_conf = np.array([+floor_extent/3, -floor_extent/3, 3*PI/4])
    goal_conf = -initial_conf

    with HideOutput():
        robot = load_model(TURTLEBOT_URDF)
        base_joints = joints_from_names(robot, BASE_JOINTS)
        # base_link = child_link_from_joint(base_joints[-1])
        base_link = link_from_name(robot, BASE_LINK_NAME)
        set_all_color(robot, BLUE)
    dump_body(robot)
    set_point(robot, Point(z=stable_z(robot, floor)))
    draw_pose(Pose(), parent=robot, parent_link=base_link, length=0.5)
    set_joint_positions(robot, base_joints, initial_conf)

    sample_placements(initial_surfaces, obstacles=[robot] + walls,
                      savers=[BodySaver(robot, joints=base_joints, positions=goal_conf)],
                      min_distances=10e-2)

    return robot, base_limits, goal_conf, obstacles
示例#19
0
def visualize_vector_field(learner, bowl_type='blue_bowl', cup_type='red_cup',
                           num=500, delta=False, alpha=0.5):
    print(learner, len(learner.results))
    xx, yy, ww = learner.xx, learner.yy, learner.weights
    learner.hyperparameters = get_parameters(learner.model)
    print(learner.hyperparameters)
    learner.query_type = REJECTION  # BEST | REJECTION | STRADDLE

    world = create_world()
    world.bodies[bowl_type] = load_cup_bowl(bowl_type)
    world.bodies[cup_type] = load_cup_bowl(cup_type)
    feature = get_pour_feature(world, bowl_type, cup_type)
    set_point(world.bodies[cup_type], np.array([0.2, 0, 0]))

    # TODO: visualize training data as well
    # TODO: artificially limit training data to make a low-confidence region
    # TODO: visualize mean and var at the same time using intensity and hue
    print('Feature:', feature)
    with LockRenderer():
        #for parameter in islice(learner.parameter_generator(world, feature, valid=True), num):
        parameters = []
        while len(parameters) < num:
            parameter = learner.sample_parameter()
            # TODO: special color if invalid
            if is_valid_pour(world, feature, parameter):
                parameters.append(parameter)

    center, _ = approximate_as_prism(world.get_body(cup_type))
    bodies = []
    with LockRenderer():
        for parameter in parameters:
            path, _ = pour_path_from_parameter(world, feature, parameter)
            pose = path[0]
            body = create_cylinder(radius=0.001, height=0.02, color=apply_alpha(GREY, alpha))
            set_pose(body, multiply(pose, Pose(point=center)))
            bodies.append(body)

    #train_sizes = inclusive_range(10, 100, 1)
    #train_sizes = inclusive_range(10, 100, 10)
    #train_sizes = inclusive_range(100, 1000, 100)
    #train_sizes = [1000]
    train_sizes = [None]

    # training_poses = []
    # for result in learner.results[:train_sizes[-1]]:
    #     path, _ = pour_path_from_parameter(world, feature, result['parameter'])
    #     pose = path[0]
    #     training_poses.append(pose)

    # TODO: draw the example as well
    scores_per_size = []
    for train_size in train_sizes:
        if train_size is not None:
            learner.xx, learner.yy, learner.weights = xx[:train_size], yy[:train_size],  ww[:train_size]
            learner.retrain()
        X = np.array([learner.func.x_from_feature_parameter(feature, parameter) for parameter in parameters])
        predictions = learner.predict_x(X, noise=False) # Noise is just a constant
        scores_per_size.append([prediction['mean'] for prediction in predictions]) # mean | variance
        #scores_per_size.append([1./np.sqrt(prediction['variance']) for prediction in predictions])
        #scores_per_size.append([prediction['mean'] / np.sqrt(prediction['variance']) for prediction in predictions])
        #plt.hist(scores_per_size[-1])
        #plt.show()
        #scores_per_size.append([scipy.stats.norm.interval(alpha=0.95, loc=prediction['mean'],
        #                                                  scale=np.sqrt(prediction['variance']))[0]
        #                         for prediction in predictions]) # mean | variance
        # score = learner.score(feature, parameter)

    if delta:
        scores_per_size = [np.array(s2) - np.array(s1) for s1, s2 in zip(scores_per_size, scores_per_size[1:])]
        train_sizes = train_sizes[1:]

    all_scores = [score for scores in scores_per_size for score in scores]
    #interval = (min(all_scores), max(all_scores))
    interval = scipy.stats.norm.interval(alpha=0.95, loc=np.mean(all_scores), scale=np.std(all_scores))
    #interval = DEFAULT_INTERVAL
    print('Interval:', interval)
    print('Mean: {:.3f} | Stdev: {:.3f}'.format(np.mean(all_scores), np.std(all_scores)))

    #train_body = create_cylinder(radius=0.002, height=0.02, color=apply_alpha(BLACK, 1.0))
    for i, (train_size, scores) in enumerate(zip(train_sizes, scores_per_size)):
        print('Train size: {} | Average: {:.3f} | Min: {:.3f} | Max: {:.3f}'.format(
            train_size, np.mean(scores), min(scores), max(scores)))
        handles = []
        # TODO: visualize delta
        with LockRenderer():
            #if train_size is None:
            #    train_size = len(learner.results)
            #set_pose(train_body, multiply(training_poses[train_size-1], Pose(point=center)))
            #set_pose(world.bodies[cup_type], training_poses[train_size-1])
            for score, body in zip(scores, bodies):
                score = clip(score, *interval)
                fraction = rescale(score, interval, new_interval=(0, 1))
                color = interpolate_hue(fraction)
                #color = (1 - fraction) * np.array(RED) + fraction * np.array(GREEN) # TODO: convex combination
                set_color(body, apply_alpha(color, alpha))
                #set_pose(world.bodies[cup_type], pose)
                #draw_pose(pose, length=0.05)
                #handles.extend(draw_point(tform_point(pose, center), color=color))
                #extent = np.array([0, 0, 0.01])
                #start = tform_point(pose, center - extent/2)
                #end = tform_point(pose, center + extent/2)
                #handles.append(add_line(start, end, color=color, width=1))
        wait_for_duration(0.5)
        # TODO: test on boundaries
    wait_for_user()
def problem1(n_obstacles=10, wall_side=0.1, obst_width=0.25, obst_height=0.5):
    floor_extent = 5.0
    base_limits = (-floor_extent / 2. * np.ones(2),
                   +floor_extent / 2. * np.ones(2))

    floor_height = 0.001
    floor = create_box(floor_extent, floor_extent, floor_height, color=TAN)
    set_point(floor, Point(z=-floor_height / 2.))

    wall1 = create_box(floor_extent + wall_side,
                       wall_side,
                       wall_side,
                       color=GREY)
    set_point(wall1, Point(y=floor_extent / 2., z=wall_side / 2.))
    wall2 = create_box(floor_extent + wall_side,
                       wall_side,
                       wall_side,
                       color=GREY)
    set_point(wall2, Point(y=-floor_extent / 2., z=wall_side / 2.))
    wall3 = create_box(wall_side,
                       floor_extent + wall_side,
                       wall_side,
                       color=GREY)
    set_point(wall3, Point(x=floor_extent / 2., z=wall_side / 2.))
    wall4 = create_box(wall_side,
                       floor_extent + wall_side,
                       wall_side,
                       color=GREY)
    set_point(wall4, Point(x=-floor_extent / 2., z=wall_side / 2.))
    wall5 = create_box(obst_width, obst_width, obst_height, color=GREY)
    set_point(wall5, Point(z=obst_height / 2.))
    walls = [wall1, wall2, wall3, wall4, wall5]

    initial_surfaces = OrderedDict()
    for _ in range(n_obstacles - 1):
        body = create_box(obst_width, obst_width, obst_height, color=GREY)
        initial_surfaces[body] = floor
    pillars = list(initial_surfaces)
    obstacles = walls + pillars

    initial_conf = np.array([+floor_extent / 3, -floor_extent / 3, 3 * PI / 4])
    goal_conf = -initial_conf

    robot = load_pybullet(TURTLEBOT_URDF,
                          fixed_base=True,
                          merge=True,
                          sat=False)
    base_joints = joints_from_names(robot, BASE_JOINTS)
    # base_link = child_link_from_joint(base_joints[-1])
    base_link = link_from_name(robot, BASE_LINK_NAME)
    set_all_color(robot, BLUE)
    dump_body(robot)
    set_point(robot, Point(z=stable_z(robot, floor)))
    #set_point(robot, Point(z=base_aligned_z(robot)))
    draw_pose(Pose(), parent=robot, parent_link=base_link, length=0.5)
    set_joint_positions(robot, base_joints, initial_conf)

    sample_placements(
        initial_surfaces,
        obstacles=[robot] + walls,
        savers=[BodySaver(robot, joints=base_joints, positions=goal_conf)],
        min_distances=10e-2)

    # The first calls appear to be the slowest
    # times = []
    # for body1, body2 in combinations(pillars, r=2):
    #     start_time = time.time()
    #     colliding = pairwise_collision(body1, body2)
    #     runtime = elapsed_time(start_time)
    #     print(colliding, runtime)
    #     times.append(runtime)
    # print(times)

    return robot, base_limits, goal_conf, obstacles
示例#21
0
    def __init__(self,
                 robot_name=FRANKA_CARTER,
                 use_gui=True,
                 full_kitchen=False):
        self.task = None
        self.interface = None
        self.client = connect(use_gui=use_gui)
        set_real_time(False)
        #set_caching(False) # Seems to make things worse
        disable_gravity()
        add_data_path()
        set_camera_pose(camera_point=[2, -1.5, 1])
        if DEBUG:
            draw_pose(Pose(), length=3)

        #self.kitchen_yaml = load_yaml(KITCHEN_YAML)
        with HideOutput(enable=True):
            self.kitchen = load_pybullet(KITCHEN_PATH,
                                         fixed_base=True,
                                         cylinder=True)

        self.floor = load_pybullet('plane.urdf', fixed_base=True)
        z = stable_z(self.kitchen, self.floor) - get_point(self.floor)[2]
        point = np.array(get_point(self.kitchen)) - np.array([0, 0, z])
        set_point(self.floor, point)

        self.robot_name = robot_name
        if self.robot_name == FRANKA_CARTER:
            urdf_path, yaml_path = FRANKA_CARTER_PATH, None
            #urdf_path, yaml_path = FRANKA_CARTER_PATH, FRANKA_YAML
        #elif self.robot_name == EVE:
        #    urdf_path, yaml_path = EVE_PATH, None
        else:
            raise ValueError(self.robot_name)
        self.robot_yaml = yaml_path if yaml_path is None else load_yaml(
            yaml_path)
        with HideOutput(enable=True):
            self.robot = load_pybullet(urdf_path)
        #dump_body(self.robot)
        #chassis_pose = get_link_pose(self.robot, link_from_name(self.robot, 'chassis_link'))
        #wheel_pose = get_link_pose(self.robot, link_from_name(self.robot, 'left_wheel_link'))
        #wait_for_user()
        set_point(self.robot, Point(z=stable_z(self.robot, self.floor)))
        self.set_initial_conf()
        self.gripper = create_gripper(self.robot)

        self.environment_bodies = {}
        if full_kitchen:
            self._initialize_environment()
        self._initialize_ik(urdf_path)
        self.initial_saver = WorldSaver()

        self.body_from_name = {}
        # self.path_from_name = {}
        self.names_from_type = {}
        self.custom_limits = {}
        self.base_limits_handles = []
        self.cameras = {}

        self.disabled_collisions = set()
        if self.robot_name == FRANKA_CARTER:
            self.disabled_collisions.update(
                tuple(link_from_name(self.robot, link) for link in pair)
                for pair in DISABLED_FRANKA_COLLISIONS)

        self.carry_conf = FConf(self.robot, self.arm_joints, self.default_conf)
        #self.calibrate_conf = Conf(self.robot, self.arm_joints, load_calibrate_conf(side='left'))
        self.calibrate_conf = FConf(
            self.robot, self.arm_joints,
            self.default_conf)  # Must differ from carry_conf
        self.special_confs = [self.carry_conf]  #, self.calibrate_conf]
        self.open_gq = FConf(self.robot, self.gripper_joints,
                             get_max_limits(self.robot, self.gripper_joints))
        self.closed_gq = FConf(self.robot, self.gripper_joints,
                               get_min_limits(self.robot, self.gripper_joints))
        self.gripper_confs = [self.open_gq, self.closed_gq]
        self.open_kitchen_confs = {
            joint: FConf(self.kitchen, [joint], [self.open_conf(joint)])
            for joint in self.kitchen_joints
        }
        self.closed_kitchen_confs = {
            joint: FConf(self.kitchen, [joint], [self.closed_conf(joint)])
            for joint in self.kitchen_joints
        }
        self._update_custom_limits()
        self._update_initial()
示例#22
0
def main():
    parser = argparse.ArgumentParser()  # Automatically includes help
    parser.add_argument('-viewer', action='store_true', help='enable viewer.')
    args = parser.parse_args()

    connect(use_gui=True)

    with LockRenderer():
        draw_pose(unit_pose(), length=1, width=1)
        floor = create_floor()
        set_point(floor, Point(z=-EPSILON))

        table1 = create_table(width=TABLE_WIDTH, length=TABLE_WIDTH/2, height=TABLE_WIDTH/2,
                              top_color=TAN, cylinder=False)
        set_point(table1, Point(y=+0.5))

        table2 = create_table(width=TABLE_WIDTH, length=TABLE_WIDTH/2, height=TABLE_WIDTH/2,
                              top_color=TAN, cylinder=False)
        set_point(table2, Point(y=-0.5))

        tables = [table1, table2]

        #set_euler(table1, Euler(yaw=np.pi/2))
        with HideOutput():
            # data_path = add_data_path()
            # robot_path = os.path.join(data_path, WSG_GRIPPER)
            robot_path = get_model_path(WSG_50_URDF)  # WSG_50_URDF | PANDA_HAND_URDF
            robot = load_pybullet(robot_path, fixed_base=True)
            #dump_body(robot)

        block1 = create_box(w=BLOCK_SIDE, l=BLOCK_SIDE, h=BLOCK_SIDE, color=RED)
        block_z = stable_z(block1, table1)
        set_point(block1, Point(y=-0.5, z=block_z))

        block2 = create_box(w=BLOCK_SIDE, l=BLOCK_SIDE, h=BLOCK_SIDE, color=GREEN)
        set_point(block2, Point(x=-0.25, y=-0.5, z=block_z))

        block3 = create_box(w=BLOCK_SIDE, l=BLOCK_SIDE, h=BLOCK_SIDE, color=BLUE)
        set_point(block3, Point(x=-0.15, y=+0.5, z=block_z))

        blocks = [block1, block2, block3]

        set_camera_pose(camera_point=Point(x=-1, z=block_z+1), target_point=Point(z=block_z))

    block_pose = get_pose(block1)
    open_gripper(robot)
    tool_link = link_from_name(robot, 'tool_link')
    base_from_tool = get_relative_pose(robot, tool_link)
    #draw_pose(unit_pose(), parent=robot, parent_link=tool_link)
    grasps = get_side_grasps(block1, tool_pose=Pose(euler=Euler(yaw=np.pi/2)),
                             top_offset=0.02, grasp_length=0.03, under=False)[1:2]
    for grasp in grasps:
        gripper_pose = multiply(block_pose, invert(grasp))
        set_pose(robot, multiply(gripper_pose, invert(base_from_tool)))
        wait_for_user()

    wait_for_user('Finish?')
    disconnect()
示例#23
0
def main(use_turtlebot=True):
    parser = argparse.ArgumentParser()
    parser.add_argument('-sim', action='store_true')
    parser.add_argument('-video', action='store_true')
    args = parser.parse_args()
    video = 'video.mp4' if args.video else None

    connect(use_gui=True, mp4=video)
    #set_renderer(enable=False)
    # print(list_pybullet_data())
    # print(list_pybullet_robots())

    draw_global_system()
    set_camera_pose(camera_point=Point(+1.5, -1.5, +1.5),
                    target_point=Point(-1.5, +1.5, 0))

    plane = load_plane()
    #door = load_pybullet('models/door.urdf', fixed_base=True) # From drake
    #set_point(door, Point(z=-.1))
    door = create_door()
    #set_position(door, z=base_aligned_z(door))
    set_point(door, base_aligned(door))
    #set_collision_margin(door, link=0, margin=0.)
    set_configuration(door, [math.radians(-5)])
    dump_body(door)

    door_joint = get_movable_joints(door)[0]
    door_link = child_link_from_joint(door_joint)
    #draw_pose(get_com_pose(door, door_link), parent=door, parent_link=door_link)
    draw_pose(Pose(), parent=door, parent_link=door_link)
    wait_if_gui()

    ##########

    start_x = +2
    target_x = -start_x
    if not use_turtlebot:
        side = 0.25
        robot = create_box(w=side, l=side, h=side, mass=5., color=BLUE)
        set_position(robot, x=start_x)
        #set_velocity(robot, linear=Point(x=-1))
    else:
        turtlebot_urdf = os.path.abspath(TURTLEBOT_URDF)
        print(turtlebot_urdf)
        #print(read(turtlebot_urdf))
        robot = load_pybullet(turtlebot_urdf, merge=True, fixed_base=True)
        robot_joints = get_movable_joints(robot)[:3]
        set_joint_positions(robot, robot_joints, [start_x, 0, PI])
    set_all_color(robot, BLUE)
    set_position(robot, z=base_aligned_z(robot))
    dump_body(robot)

    ##########

    set_renderer(enable=True)
    #test_door(door)
    if args.sim:
        test_simulation(robot, target_x, video)
    else:
        assert use_turtlebot  # TODO: extend to the block
        test_kinematic(robot, door, target_x)
    disconnect()
示例#24
0
def add_obstacles():
    ceiling = create_box(w=0.2, l=0.2, h=0.01, color=RED)
    set_point(ceiling, Point(z=0.2))
    pole = create_cylinder(radius=0.025, height=0.1, color=RED)
    set_point(pole, Point(z=0.15))