Esempio n. 1
0
def create_bounding_mesh(printed_elements, element_bodies=None, node_points=None, buffer=0.):
    # TODO: use bounding boxes instead of points
    # TODO: connected components
    assert printed_elements
    assert element_bodies or node_points
    printed_points = []
    if node_points is not None:
        printed_points.extend(node_points[n] for element in printed_elements for n in element)
    if element_bodies is not None:
        for element in printed_elements:
            body = element_bodies[element]
            printed_points.extend(apply_affine(get_pose(body), vertices_from_link(body, BASE_LINK)))

    if buffer != 0.:
        half_extents = buffer*np.ones(3)
        for point in list(printed_points):
            printed_points.extend(np.array(point) + np.array(corner)
                                  for corner in get_aabb_vertices(AABB(-half_extents, half_extents)))

    rgb = colorsys.hsv_to_rgb(h=random.random(), s=1, v=1)
    #rgb = RED
    try:
        mesh = convex_hull(printed_points)
        # handles = draw_mesh(mesh)
        return create_mesh(mesh, under=True, color=apply_alpha(rgb, 0.5))
    except QhullError as e:
        print(printed_elements)
        raise e
Esempio n. 2
0
def test_lis(world):
    #model = 'oil'
    model = 'soup'

    point_path = os.path.join(LIS_DIRECTORY, 'clouds', CLOUDS[model])
    mesh_path = os.path.join(LIS_DIRECTORY, 'meshes',
                             MESHES[model])  # off | ply | wrl
    #ycb_path = os.path.join(DATA_DIRECTORY, 'ycb', 'plastic_wine_cup', 'meshes', 'tsdf.stl') # stl | ply
    ycb_path = os.path.join(LIS_DIRECTORY, 'ycb', 'plastic_wine_cup',
                            'textured_meshes',
                            'optimized_tsdf_texture_mapped_mesh.obj')  # ply

    print(point_path)
    print(mesh_path)
    print(ycb_path)

    #mesh = read_mesh_off(mesh_path, scale=0.001)
    #print(mesh)
    points = read_pcd_file(point_path)
    #print(points)
    #print(convex_hull(points))
    mesh = mesh_from_points(points)
    #print(mesh)

    body = create_mesh(mesh, color=(1, 0, 0, 1))
    #print(get_num_links(body))
    #print(mesh_from_body(body))

    #set_point(body, (1, 1, 1))
    wait_if_gui()
def main():
    connect(use_gui=True)
    with HideOutput():
        pr2 = load_model(
            "models/drake/pr2_description/urdf/pr2_simplified.urdf")
    set_joint_positions(pr2, joints_from_names(pr2, PR2_GROUPS['left_arm']),
                        REST_LEFT_ARM)
    set_joint_positions(pr2, joints_from_names(pr2, PR2_GROUPS['right_arm']),
                        rightarm_from_leftarm(REST_LEFT_ARM))
    set_joint_positions(pr2, joints_from_names(pr2, PR2_GROUPS['torso']),
                        [0.2])
    dump_body(pr2)

    block = load_model(BLOCK_URDF, fixed_base=False)
    set_point(block, [2, 0.5, 1])
    target_point = point_from_pose(get_pose(block))

    # head_link = link_from_name(pr2, HEAD_LINK)
    head_joints = joints_from_names(pr2, PR2_GROUPS['head'])
    head_link = head_joints[-1]

    #max_detect_distance = 2.5
    max_register_distance = 1.0
    distance_range = (max_register_distance / 2, max_register_distance)
    base_generator = visible_base_generator(pr2, target_point, distance_range)

    base_joints = joints_from_names(pr2, PR2_GROUPS['base'])
    for i in range(5):
        base_conf = next(base_generator)
        set_joint_positions(pr2, base_joints, base_conf)

        p.addUserDebugLine(point_from_pose(get_link_pose(pr2, head_link)),
                           target_point,
                           lineColorRGB=(1, 0, 0))  # addUserDebugText
        p.addUserDebugLine(point_from_pose(
            get_link_pose(pr2, link_from_name(pr2, HEAD_LINK_NAME))),
                           target_point,
                           lineColorRGB=(0, 0, 1))  # addUserDebugText

        # head_conf = sub_inverse_kinematics(pr2, head_joints[0], HEAD_LINK, )
        head_conf = inverse_visibility(pr2, target_point)
        set_joint_positions(pr2, head_joints, head_conf)
        print(get_detections(pr2))
        # TODO: does this detect the robot sometimes?

        detect_mesh, z = get_detection_cone(pr2, block)
        detect_cone = create_mesh(detect_mesh, color=(0, 1, 0, 0.5))
        set_pose(detect_cone,
                 get_link_pose(pr2, link_from_name(pr2, HEAD_LINK_NAME)))
        view_cone = get_viewcone(depth=2.5, color=(1, 0, 0, 0.25))
        set_pose(view_cone,
                 get_link_pose(pr2, link_from_name(pr2, HEAD_LINK_NAME)))
        wait_for_user()
        remove_body(detect_cone)
        remove_body(view_cone)

    disconnect()
Esempio n. 4
0
def add_table(world, use_surface=False):
    # Use the table in simulation to ensure no penetration
    if use_surface:
        # TODO: note whether the convex hull image was clipped (only partial)
        table_mesh = rectangular_mesh(width=0.6, length=1.2)
        color = apply_alpha(COLORS['tan'])
        table_body = create_mesh(table_mesh, under=True, color=color)
        set_pose(table_body, TABLE_POSE)
        world.perception.sim_bodies[TABLE_NAME] = table_body
        world.perception.sim_surfaces[TABLE_NAME] = None
    else:
        table_body = world.perception.add_surface(TABLE_NAME, TABLE_POSE)
    return table_body
Esempio n. 5
0
def main():
    # TODO: update this example

    connect(use_gui=True)
    with HideOutput():
        pr2 = load_model(DRAKE_PR2_URDF)
    set_joint_positions(pr2, joints_from_names(pr2, PR2_GROUPS['left_arm']),
                        REST_LEFT_ARM)
    set_joint_positions(pr2, joints_from_names(pr2, PR2_GROUPS['right_arm']),
                        rightarm_from_leftarm(REST_LEFT_ARM))
    set_joint_positions(pr2, joints_from_names(pr2, PR2_GROUPS['torso']),
                        [0.2])
    dump_body(pr2)

    block = load_model(BLOCK_URDF, fixed_base=False)
    set_point(block, [2, 0.5, 1])
    target_point = point_from_pose(get_pose(block))
    draw_point(target_point)

    head_joints = joints_from_names(pr2, PR2_GROUPS['head'])
    #head_link = child_link_from_joint(head_joints[-1])
    #head_name = get_link_name(pr2, head_link)

    head_name = 'high_def_optical_frame'  # HEAD_LINK_NAME | high_def_optical_frame | high_def_frame
    head_link = link_from_name(pr2, head_name)

    #max_detect_distance = 2.5
    max_register_distance = 1.0
    distance_range = (max_register_distance / 2, max_register_distance)
    base_generator = visible_base_generator(pr2, target_point, distance_range)

    base_joints = joints_from_names(pr2, PR2_GROUPS['base'])
    for i in range(5):
        base_conf = next(base_generator)
        set_joint_positions(pr2, base_joints, base_conf)

        handles = [
            add_line(point_from_pose(get_link_pose(pr2, head_link)),
                     target_point,
                     color=RED),
            add_line(point_from_pose(
                get_link_pose(pr2, link_from_name(pr2, HEAD_LINK_NAME))),
                     target_point,
                     color=BLUE),
        ]

        # head_conf = sub_inverse_kinematics(pr2, head_joints[0], HEAD_LINK, )
        head_conf = inverse_visibility(pr2,
                                       target_point,
                                       head_name=head_name,
                                       head_joints=head_joints)
        assert head_conf is not None
        set_joint_positions(pr2, head_joints, head_conf)
        print(get_detections(pr2))
        # TODO: does this detect the robot sometimes?

        detect_mesh, z = get_detection_cone(pr2, block)
        detect_cone = create_mesh(detect_mesh, color=(0, 1, 0, 0.5))
        set_pose(detect_cone,
                 get_link_pose(pr2, link_from_name(pr2, HEAD_LINK_NAME)))
        view_cone = get_viewcone(depth=2.5, color=(1, 0, 0, 0.25))
        set_pose(view_cone,
                 get_link_pose(pr2, link_from_name(pr2, HEAD_LINK_NAME)))
        wait_if_gui()
        remove_body(detect_cone)
        remove_body(view_cone)

    disconnect()