Пример #1
0
def test_urdfpy(tmpdir):
    outfn = tmpdir.mkdir('urdf').join('ur5.urdf').strpath

    # Load
    u = URDF.load('tests/data/ur5/ur5.urdf')

    assert isinstance(u, URDF)
    for j in u.joints:
        assert isinstance(j, Joint)
    for l in u.links:
        assert isinstance(l, Link)
    for t in u.transmissions:
        assert isinstance(t, Transmission)
    for m in u.materials:
        assert isinstance(m, Material)

    # Test fk
    fk = u.link_fk()
    assert isinstance(fk, dict)
    for l in fk:
        assert isinstance(l, Link)
        assert isinstance(fk[l], np.ndarray)
        assert fk[l].shape == (4, 4)

    # Test save
    u.save(outfn)

    nu = URDF.load(outfn)
    assert len(u.links) == len(nu.links)
    assert len(u.joints) == len(nu.joints)
    def register_object(self, body_id, urdf_path, global_scaling=1):
        link_id_map = dict()
        n = p.getNumJoints(body_id)
        link_id_map[p.getBodyInfo(body_id)[0].decode('gb2312')] = -1
        for link_id in range(0, n):
            link_id_map[p.getJointInfo(body_id, link_id)[
                12].decode('gb2312')] = link_id

        dir_path = dirname(abspath(urdf_path))
        file_name = splitext(basename(urdf_path))[0]
        robot = URDF.load(urdf_path)
        for link in robot.links:
            link_id = link_id_map[link.name]
            if len(link.visuals) > 0:
                for i, link_visual in enumerate(link.visuals):
                    mesh_scale = [global_scaling,
                                  global_scaling, global_scaling]\
                        if link_visual.geometry.mesh.scale is None \
                        else link_visual.geometry.mesh.scale * global_scaling
                    self.links.append(
                        PyBulletRecorder.LinkTracker(
                            name=file_name + f'_{body_id}_{link.name}_{i}',
                            body_id=body_id,
                            link_id=link_id,
                            link_origin=  # If link_id == -1 then is base link,
                            # PyBullet will return
                            # inertial_origin @ visual_origin,
                            # so need to undo that transform
                            (np.linalg.inv(link.inertial.origin)
                             if link_id == -1
                             else np.identity(4)) @
                            link_visual.origin * global_scaling,
                            mesh_path=dir_path + '/' +
                            link_visual.geometry.mesh.filename,
                            mesh_scale=mesh_scale))
Пример #3
0
    def __init__(self, obstacles) -> None:
        self.robot = URDF.load('data/xarm6/xarm6.urdf')
        self.spaces = RealVectorSpace(6)
        self.robot_cm = CollisionManager()
        self.env_cm = CollisionManager()
        self.start_config = [0, 0, 0, 0, 0, 0]

        self.traj = []
        self.count_i = 0
        self.curr_q = [0, 0, 0, 0, 0, 0]

        cfg = self.get_config([0, 0, 0, 0, 0, 0])
        self.init_poses = [0 for i in range(6)]
        #print(cfg)
        fk = self.robot.collision_trimesh_fk(cfg=cfg)
        #fk = self.robot.visual_trimesh_fk(cfg=cfg)

        # adding robot to the scene
        for i, tm in enumerate(fk):
            pose = fk[tm]
            name = "link_" + str(i + 1)
            self.init_poses.append(pose)
            self.robot_cm.add_object(name, tm, pose)

        for i, ob in enumerate(obstacles):
            self.env_cm.add_object("obstacle_" + str(i), ob)
        for i, ob in enumerate(obstacles[:-1]):
            self.env_cm.add_object("prediction_" + str(i), ob)
Пример #4
0
    def __init__(self,
                 path_to_urdf,
                 active_chain_definition,
                 use_collision=False,
                 cfg=None,
                 visualisation=True,
                 off_screen=False,
                 ego_view=True):

        self.off_screen = off_screen
        self.r_model = URDF.load(path_to_urdf)
        self.use_collision = use_collision
        self.cfg = cfg
        self.viewer = None
        self.active_chain_definition = active_chain_definition
        self.scene = None
        self.rend = None
        self.r_node_map = {}
        self.visualisation = visualisation
        if visualisation:
            logger.debug("robot visualisation scene created")
            self.create_scene(off_screen=off_screen, ego_view=ego_view)
        else:
            self.scene = None
        self.target_node = None
Пример #5
0
    def _setup_urdf(self):
        # Parse the URDF
        # TODO: Need to fix this absolute path
        self.urdf = URDF.load(osp.join(os.environ['HOME'],
                                       # 'ws_moveit/src/odium/urdf/pr2.urdf'))
                                       'workspaces/odium_ws/src/odium/urdf/pr2.urdf'))
        self.link_names = [link.name for link in self.urdf.links]
        self.jmap = {}
        for j in self.robot.get_joint_names():
            if j != 'world_joint':
                self.jmap[j] = self.robot.get_joint(j).value()

        # self.group.get_end_effector_link()
        self.end_link = 'l_gripper_motor_slider_link'
        self.cache_fk = {}
        return True
Пример #6
0
    def add_object(self, urdf_fn, obj_id, globalScaling=1.0):
        # Load urdf file by urdfpy
        robot = URDF.load(urdf_fn)

        for link_id, link in enumerate(robot.links):
            if len(link.visuals) == 0:
                continue
            link_id = link_id - 1
            # Get each links
            visual = link.visuals[0]
            obj_trimesh = visual.geometry.meshes[0]

            # Set mesh color to default (remove texture)
            obj_trimesh.visual = trimesh.visual.ColorVisuals()

            # Set initial origin (pybullet pose already considered initial origin position, not orientation)
            pose = visual.origin

            # Scale if it is mesh object (e.g. STL, OBJ file)
            mesh = visual.geometry.mesh
            if mesh is not None and mesh.scale is not None:
                S = np.eye(4, dtype=np.float64)
                S[:3, :3] = np.diag(mesh.scale)
                pose = pose.dot(S)

            # Apply interial origin if applicable
            inertial = link.inertial
            if inertial is not None and inertial.origin is not None:
                pose = np.linalg.inv(inertial.origin).dot(pose)

            # Set global scaling
            pose = np.diag([globalScaling] * 3 + [1]).dot(pose)

            obj_trimesh = obj_trimesh.apply_transform(pose)
            obj_name = "{}_{}".format(obj_id, link_id)

            self.objects[obj_name] = Link(obj_id, link_id, self.cid)
            position, orientation = self.objects[obj_name].get_pose()

            # Add object in pyrender
            self.renderer.add_object(
                obj_trimesh,
                obj_name,
                position=position,  # [-0.015, 0, 0.0235],
                orientation=orientation,  # [0, 0, 0],
            )
Пример #7
0
 def register_object(self, body_id, path):
     full_path = abspath(path)
     dir_path = dirname(full_path)
     file_name = splitext(basename(path))[0]
     robot = URDF.load(path)
     num_joints = p.getNumJoints(body_id)
     assert num_joints + 1 == len(robot.links)
     if len(robot.links) == 1:
         self.links.append(
             PybulletRecorder.LinkTracker(
                 name=file_name + f'_{body_id}_root',
                 body_id=body_id,
                 link_id=-1,
                 link_origin=robot.links[0].visuals[0].origin,
                 mesh_path=dir_path + '/' +
                 robot.links[0].visuals[0].geometry.mesh.filename,
                 mesh_scale=robot.links[0].visuals[0].geometry.mesh.scale))
     else:
         self.links.append(
             PybulletRecorder.LinkTracker(
                 name=file_name + f'_{body_id}_root',
                 body_id=body_id,
                 link_id=-1,
                 link_origin=robot.links[0].visuals[0].origin,
                 mesh_path=dir_path + '/' +
                 robot.links[0].visuals[0].geometry.mesh.filename,
                 mesh_scale=robot.links[0].visuals[0].geometry.mesh.scale))
         for link_id, link in enumerate(robot.links):
             if link_id == 0:
                 continue
             link_id -= 1
             if len(link.visuals) > 0:
                 if p.getLinkState(body_id, link_id) is not None\
                         and link.visuals[0].geometry.mesh:
                     # hard code for robotiq
                     self.links.append(
                         PybulletRecorder.LinkTracker(
                             name=file_name + f'_{body_id}_{link.name}',
                             body_id=body_id,
                             link_id=link_id,
                             link_origin=link.visuals[0].origin,
                             mesh_path=dir_path + '/' +
                             link.visuals[0].geometry.mesh.filename,
                             mesh_scale=link.visuals[0].geometry.mesh.scale)
                     )
Пример #8
0
def urdf_from_path(path_to_urdf: str) -> URDF:

    for file in os.listdir(path_to_urdf):
        if is_urdf_file(file):
            with open(os.path.join(path_to_urdf, file)) as f:
                s = f.read()
            break
    else:
        raise Arcor2UrdfException("Failed to find urdf file.")

    s = s.replace("package://", "")

    buff = io.BytesIO(s.encode())
    buff.name = os.path.join(path_to_urdf, file)

    try:
        return URDF.load(buff)
    except (ValueError, TypeError) as e:
        raise Arcor2UrdfException(str(e)) from e
Пример #9
0
def import_urdf(name, path):
    # add '.urdf' extension if it was not added by the user
    if not name.endswith(".urdf"):
        name += ".urdf"

    urdf = URDF.load("{}/{}".format(path, name))
    root = urdf.base_link.name

    shapes = {}
    for link in urdf.links:
        if len(link.collisions) == 0:
            continue
        else:
            shapes[link.name] = parse_link(link)

    tfs = []
    final_shapes = []
    for joint in urdf.joints:
        if joint.parent == root:
            tfs.append(joint.origin)
            final_shapes.append(shapes[joint.child])
    return Scene(final_shapes, tfs)
Пример #10
0
    def __init__(self, obstacles) -> None:
        self.robot = URDF.load('data/ur5/ur5.urdf')
        self.spaces = RealVectorSpace(6)
        self.robot_cm = CollisionManager()
        self.env_cm = CollisionManager()
        cfg = self.get_config([0, 0, 0, 0, 0, 0])
        self.init_poses = [0 for i in range(6)]
        #print(cfg)
        fk = self.robot.collision_trimesh_fk(cfg=cfg)
        #fk = self.robot.visual_trimesh_fk(cfg=cfg)

        # adding robot to the scene
        for i, tm in enumerate(fk):
            pose = fk[tm]
            name = "link_" + str(i + 1)
            self.init_poses.append(pose)
            self.robot_cm.add_object(name, tm, pose)

        table = cylinder(radius=0.7, height=0.02)
        table.apply_translation([0, 0, -0.015])
        obstacles.append(table)
        for i, ob in enumerate(obstacles):
            self.env_cm.add_object("obstacle_" + str(i), ob)
Пример #11
0
    def __init__(self, obstacles) -> None:
        self.robot = URDF.load('data/two_planar/2dof_planar.urdf')
        self.spaces = RealVectorSpace(2)
        self.robot_cm = CollisionManager()
        self.env_cm = CollisionManager()
        self.start_config = [0, 0]

        self.traj = []
        self.count_i = 0
        self.curr_q = [0, 0]

        cfg = self.get_config([0, 0])
        fk = self.robot.link_fk(cfg=cfg)
        self.init_poses = []
        for i, tm in enumerate(fk):
            pose = fk[tm]
            init_pose, link_mesh = self.get_link_mesh(tm)
            self.init_poses.append(init_pose)
            self.robot_cm.add_object(
                tm.name, link_mesh, np.matmul(pose, init_pose))

        for i, ob in enumerate(obstacles):
            self.env_cm.add_object("obstacle_" + str(i), ob)
            self.env_cm.add_object("prediction_" + str(i), ob)
Пример #12
0
from ikpy.chain import Chain
from ikpy.URDF_utils import get_chain_from_joints
from urdfpy import URDF
import math
import sys


def ang(offset_deg):
    abs_deg = 150 + offset_deg
    abs_rad = math.pi * abs_deg / 180
    return abs_rad


DARWIN_URDF = 'darwin.urdf'
robot = URDF.load(DARWIN_URDF)
joint_names = list(filter(lambda j: j.joint_type != 'fixed', robot.joints))
default_angle = math.pi * 150 / 180
cfg = dict(zip(joint_names, [default_angle] * len(joint_names)))
cfg['neck_joint'] = ang(-60)
cfg['l_shoulder_joint'] = ang(-60)
cfg['l_biceps_joint'] = ang(60)
cfg['l_elbow_joint'] = ang(-120)  # limit: 0-150
cfg['r_shoulder_joint'] = ang(-60)
cfg['r_biceps_joint'] = ang(-60)
cfg['r_elbow_joint'] = ang(120)  # limit:150-300
cfg['l_hip_joint'] = ang(30)
cfg['l_thigh_joint'] = ang(-60)
cfg['l_knee_joint'] = ang(30)
cfg['l_ankle_joint'] = ang(30)
cfg['l_foot_joint'] = ang(-30)
cfg['r_hip_joint'] = ang(-30)
Пример #13
0
            'joint_head_tilt': head_tilt_rad
        }

        return configuration

# #######################################################################################

if __name__ == "__main__":
    parser = argparse.ArgumentParser(description='Python based URDF visualization')
    parser.add_argument("--motion", help="Turn motor on", action="store_true")
    parser.add_argument("--fake", help="Show fake robot", action="store_true")
    args = parser.parse_args()

    urdf_file = os.path.join(os.environ['HELLO_FLEET_PATH'], os.environ['HELLO_FLEET_ID'], 'exported_urdf/stretch.urdf')
    controller_parameters_filename = os.path.join(os.environ['HELLO_FLEET_PATH'], os.environ['HELLO_FLEET_ID'], 'exported_urdf/controller_calibration_head.yaml')
    robot_model = URDF.load(urdf_file)

    if args.motion:
        cfg_trajectory = {
            'joint_left_wheel': [0.0, math.pi],
            'joint_right_wheel': [0.0, math.pi],
            'joint_lift': [0.9, 0.7],
            'joint_arm_l0': [0.0, 0.1],
            'joint_arm_l1': [0.0, 0.1],
            'joint_arm_l2': [0.0, 0.1],
            'joint_arm_l3': [0.0, 0.1],
            'joint_wrist_yaw': [math.pi, 0.0],
            'joint_gripper_finger_left': [0.0, 0.25],
            'joint_gripper_finger_right': [0.0, 0.25],
            'joint_head_pan': [0.0, -(math.pi / 2.0)],
            'joint_head_tilt': [0.5, -0.5]
Пример #14
0
    target_output = torch.from_numpy(right_hand_data.iloc[:WINDOW_SIZE].values)
    target_output = target_output.repeat(BATCH_SIZE, 1, 1)
    target_output.requires_grad = False
    true_cfg = joint_data_to_urdf_joint_state(data=target_output, data_keys=adl_right_hand_joint_names, hand_keys=RightHand)

    # Instantiation of loss module and configuration ________________________________________________________________
    # As some of the links in our URDF do not have an equivalent on the dataset, they have to be removed
    urdf_relevant_links = ['palm_base', 'palm_link', 'thumb_knuckle_link', 'thumb_proximal_link',
                           'thumb_middle_link', 'index_knuckle_link', 'index_proximal_link',
                           'index_middle_link', 'middle_knuckle_link', 'middle_proximal_link',
                           'middle_middle_link', 'ring_knuckle_link', 'ring_proximal_link',
                           'ring_middle_link', 'little_knuckle_link', 'little_proximal_link',
                           'little_middle_link']

    # An instance of the hand URDF is required
    urdf = URDF.load('robots/right_hand_relative.urdf')

    # Instantiation of the loss module, which have two types of loss functions:
    # 1
    fk_loss = CartesianSpaceLoss(urdf, loss_type='frobenius', relevant_links=urdf_relevant_links)
    # 2
    # fk_loss = CartesianSpaceLoss(urdf, loss_type='rot_only', relevant_links=urdf_relevant_links)

    start = timeit.default_timer()
    # Loss forward pass example:
    loss = fk_loss(pred_cfg, true_cfg)
    stop = timeit.default_timer()
    print('\nLoss forward enlapsed time (CPU) for batch size %d and window size %s: %.5fs' % (BATCH_SIZE, WINDOW_SIZE, stop - start))

    # Test that the gradients are being computed and propagated appropriately _____________________________________
    loss.backward()
Пример #15
0
def test_urdfpy(tmpdir):
    outfn = tmpdir.mkdir('urdf').join('ur5.urdf').strpath

    # Load
    u = URDF.load('tests/data/ur5/ur5.urdf')

    assert isinstance(u, URDF)
    for j in u.joints:
        assert isinstance(j, Joint)
    for l in u.links:
        assert isinstance(l, Link)
    for t in u.transmissions:
        assert isinstance(t, Transmission)
    for m in u.materials:
        assert isinstance(m, Material)

    # Test fk
    fk = u.link_fk()
    assert isinstance(fk, dict)
    for l in fk:
        assert isinstance(l, Link)
        assert isinstance(fk[l], np.ndarray)
        assert fk[l].shape == (4,4)

    fk = u.link_fk({'shoulder_pan_joint': 2.0})
    assert isinstance(fk, dict)
    for l in fk:
        assert isinstance(l, Link)
        assert isinstance(fk[l], np.ndarray)
        assert fk[l].shape == (4,4)

    fk = u.link_fk(np.zeros(6))
    assert isinstance(fk, dict)
    for l in fk:
        assert isinstance(l, Link)
        assert isinstance(fk[l], np.ndarray)
        assert fk[l].shape == (4,4)

    fk = u.link_fk(np.zeros(6), link='upper_arm_link')
    assert isinstance(fk, np.ndarray)
    assert fk.shape == (4,4)

    fk = u.link_fk(links=['shoulder_link', 'upper_arm_link'])
    assert isinstance(fk, dict)
    assert len(fk) == 2
    for l in fk:
        assert isinstance(l, Link)
        assert isinstance(fk[l], np.ndarray)
        assert fk[l].shape == (4,4)

    fk = u.link_fk(links=list(u.links)[:2])
    assert isinstance(fk, dict)
    assert len(fk) == 2
    for l in fk:
        assert isinstance(l, Link)
        assert isinstance(fk[l], np.ndarray)
        assert fk[l].shape == (4,4)

    cfg={j.name: 0.5 for j in u.actuated_joints}
    for _ in range(1000):
        fk = u.collision_trimesh_fk(cfg=cfg)
        for key in fk:
            assert isinstance(fk[key], np.ndarray)
            assert fk[key].shape == (4,4)

    cfg = {j.name: np.random.uniform(size=1000) for j in u.actuated_joints}
    fk = u.link_fk_batch(cfgs=cfg)
    for key in fk:
        assert isinstance(fk[key], np.ndarray)
        assert fk[key].shape == (1000,4,4)

    cfg={j.name: 0.5 for j in u.actuated_joints}
    for _ in range(1000):
        fk = u.collision_trimesh_fk(cfg=cfg)
        for key in fk:
            assert isinstance(key, trimesh.Trimesh)
            assert fk[key].shape == (4,4)
    cfg = {j.name: np.random.uniform(size=1000) for j in u.actuated_joints}
    fk = u.collision_trimesh_fk_batch(cfgs=cfg)
    for key in fk:
        assert isinstance(key, trimesh.Trimesh)
        assert fk[key].shape == (1000,4,4)

    # Test save
    u.save(outfn)

    nu = URDF.load(outfn)
    assert len(u.links) == len(nu.links)
    assert len(u.joints) == len(nu.joints)

    # Test join
    with pytest.raises(ValueError):
        x = u.join(u, link=u.link_map['tool0'])
    x = u.join(u, link=u.link_map['tool0'], name='copy', prefix='prefix')
    assert isinstance(x, URDF)
    assert x.name == 'copy'
    assert len(x.joints) == 2 * len(u.joints) + 1
    assert len(x.links) == 2 * len(u.links)

    # Test scale
    x = u.copy(scale=3)
    assert isinstance(x, URDF)
    x = x.copy(scale=[1,1,3])
    assert isinstance(x, URDF)
Пример #16
0
from urdfpy import URDF
robot = URDF.load(
    "/home/frank/Documents/can_motor/two_arm_neck/two_arm_neck.urdf")
for link in robot.links:
    print(link.name)
for joint in robot.joints:
    print('{} connects {} to {}'.format(joint.name, joint.parent, joint.child))
for joint in robot.actuated_joints:
    print(joint.name)

#fk = robot.link_fk(cfg={'shoulder_pan_joint' : 1.0})
#print(fk[robot.links[1]])
Пример #17
0
def calibrate_robot(
    robot_joints: list[Joint],
    robot_pose: Pose,
    camera_pose: Pose,
    camera_parameters: CameraParameters,
    robot: URDF,
    depth_image: Image,
    draw_results: bool = False,
) -> Pose:

    logger.info("Creating robot model...")

    fk = robot.visual_trimesh_fk(cfg={joint.name: joint.value for joint in robot_joints})

    robot_tr_matrix = robot_pose.as_tr_matrix()

    res_mesh = o3d.geometry.TriangleMesh()

    for tm in fk:
        pose = fk[tm]
        mesh = o3d.geometry.TriangleMesh(
            vertices=o3d.utility.Vector3dVector(tm.vertices), triangles=o3d.utility.Vector3iVector(tm.faces)
        )
        mesh.transform(np.dot(robot_tr_matrix, pose))
        mesh.compute_vertex_normals()
        res_mesh += mesh

    mesh_frame = o3d.geometry.TriangleMesh.create_coordinate_frame(size=0.1)

    sim_pcd = res_mesh.sample_points_uniformly(int(1e5))

    camera_tr_matrix = camera_pose.as_tr_matrix()
    camera_matrix = camera_parameters.as_camera_matrix()
    depth = depth_image_to_np(depth_image)

    wh = (depth_image.width, depth_image.height)

    dist = np.array(camera_parameters.dist_coefs)
    newcameramtx, _ = cv2.getOptimalNewCameraMatrix(camera_matrix, dist, wh, 1, wh)
    depth = cv2.undistort(depth, camera_matrix, dist, None, newcameramtx)

    real_pcd = o3d.geometry.PointCloud.create_from_depth_image(
        o3d.geometry.Image(depth),
        o3d.camera.PinholeCameraIntrinsic(
            depth_image.width,
            depth_image.height,
            camera_parameters.fx,
            camera_parameters.fy,
            camera_parameters.cx,
            camera_parameters.cy,
        ),
    )

    real_pcd.transform(camera_tr_matrix)

    bb = sim_pcd.get_axis_aligned_bounding_box()
    real_pcd = real_pcd.crop(bb.scale(1.25, bb.get_center()))

    # logger.info("Outlier removal...")
    # real_pcd = real_pcd.remove_radius_outlier(nb_points=50, radius=0.01)[0]

    sim_pcd = sim_pcd.select_by_index(sim_pcd.hidden_point_removal(np.array(list(camera_pose.position)), 500)[1])

    print("Estimating normals...")
    real_pcd.estimate_normals()

    if draw_results:
        o3d.visualization.draw_geometries([sim_pcd, real_pcd, mesh_frame])

    threshold = 1.0
    trans_init = np.identity(4)

    """
    evaluation = o3d.pipelines.registration.evaluate_registration(
        sim_pcd,
        real_pcd,
        threshold,
        trans_init,
    )

    logger.info(evaluation)
    """

    logger.info("Applying point-to-plane robust ICP...")

    loss = o3d.pipelines.registration.TukeyLoss(k=0.25)

    p2l = o3d.pipelines.registration.TransformationEstimationPointToPlane(loss)
    reg_p2p = o3d.pipelines.registration.registration_icp(
        sim_pcd,
        real_pcd,
        threshold,
        trans_init,
        p2l,
        o3d.pipelines.registration.ICPConvergenceCriteria(max_iteration=100),
    )

    logger.info(reg_p2p)
    logger.debug(reg_p2p.transformation)

    # TODO somehow check if calibration is ok

    if draw_results:
        draw_registration_result(sim_pcd, real_pcd, trans_init, reg_p2p.transformation)

    robot_tr_matrix = np.dot(reg_p2p.transformation, robot_tr_matrix)
    pose = Pose.from_tr_matrix(robot_tr_matrix)

    logger.info("Done")

    return pose
Пример #18
0
from urdfpy import URDF

robot = URDF.load('olympian.urdf')

for link in robot.links:
    print(link.name)

print(robot.base_link.name)
Пример #19
0
def setup_animation_scene(real_traj,
                          pred_traj,
                          urdf_path,
                          hand_offset,
                          loop_time,
                          real_color,
                          pred_color_cmap,
                          background_color,
                          reverse=False):
    pred_trajectories = pred_traj if isinstance(pred_traj,
                                                list) else [pred_traj]
    real_hand = URDF.load(urdf_path)
    pred_hands = []

    for i, traj in enumerate(pred_trajectories):
        pred_hands.append(URDF.load(urdf_path))
        pred_origin = pred_hands[i].joint_map[
            'palm_joint_abduction'].origin[:3, 3]
        pred_hands[i].joint_map[
            'palm_joint_abduction'].origin[:3, 3] = pred_origin + np.array(
                [hand_offset * (i + 1), 0.0, 0.0])

    traj_len = None  # Length of the trajectory in steps

    # If it is specified, parse it and extract the trajectory length.
    for pred_traj in pred_trajectories:
        if isinstance(real_traj, dict) and isinstance(pred_traj, dict):
            if len(real_traj) > 0:
                for joint_name in real_traj:
                    if len(real_traj[joint_name]) != len(
                            pred_traj[joint_name]):
                        raise ValueError(
                            'Real and pred trajectories [%d] must be same length'
                            % joint_name)
                    elif traj_len is None:
                        traj_len = len(real_traj[joint_name])
                    elif traj_len != len(real_traj[joint_name]):
                        raise ValueError(
                            'All joint trajectories must be same length')
        else:
            raise TypeError(
                'Invalid loss_type for trajectories real[%s], pred[%s]' %
                (type(real_traj), type(pred_traj)))

    # Create an array of times that loops from 0 to 1 and back to 0
    fps = 30.0

    # Create the new interpolated trajectory
    if reverse:
        n_steps = int(loop_time * 2 * fps)
        times = np.linspace(0.0, 1.0, n_steps)
        times = np.hstack((times, np.flip(times)))
        # Create bin edges in the range [0, 1] for each trajectory step
        bins = np.arange(traj_len) / (float(traj_len) - 1.0)

        # Compute alphas for each time
        right_inds = np.digitize(times, bins, right=True)
        right_inds[right_inds == 0] = 1
        alphas = ((bins[right_inds] - times) /
                  (bins[right_inds] - bins[right_inds - 1]))

        for joint_name in real_traj:
            real_traj[joint_name] = (
                alphas * real_traj[joint_name][right_inds - 1] +
                (1.0 - alphas) * real_traj[joint_name][right_inds])
            # Compute all
            for pred_traj in pred_trajectories:
                pred_traj[joint_name] = (
                    alphas * pred_traj[joint_name][right_inds - 1] +
                    (1.0 - alphas) * pred_traj[joint_name][right_inds])
    else:
        n_steps = int(loop_time * fps)
        times = np.linspace(0.0, 1.0, n_steps)

    # Create the scene
    fk_real = real_hand.visual_trimesh_fk()
    fk_preds = [pred_hand.visual_trimesh_fk() for pred_hand in pred_hands]

    node_map = {}
    scene = pyrender.Scene(bg_color=background_color)
    # Spawn ground truth hand
    for tm_real in fk_real:
        # Little hack to overcome the urdfpy bug of ignoring URDF materials for .stl meshes
        tm_real._visual.face_colors = tm_real._visual.face_colors * 0 + real_color

        # Real hand nodes
        pose = fk_real[tm_real]
        real_mesh = pyrender.Mesh.from_trimesh(tm_real, smooth=False)
        node = scene.add(real_mesh, pose=pose)
        node_map[tm_real] = node

    # Spawn prediction hands
    pred_color = []
    for color_code in np.linspace(0, 1, len(pred_trajectories)):
        pred_color.append(pred_color_cmap(color_code, bytes=True))

    for i, fk_pred in enumerate(fk_preds):
        for tm_pred in fk_pred:
            # Little hack to overcome the urdfpy bug of ignoring URDF materials for .stl meshes
            tm_pred._visual.face_colors = tm_pred._visual.face_colors * 0 + pred_color[
                i]

            # Pred hand nodes
            pose = fk_pred[tm_pred]
            pred_mesh = pyrender.Mesh.from_trimesh(tm_pred, smooth=False)
            node = scene.add(pred_mesh, pose=pose)
            node_map[tm_pred] = node

    # Get base pose to focus on
    origin = real_hand.link_fk(
        links=[real_hand.base_link])[real_hand.base_link]

    return scene, origin, node_map, real_hand, pred_hands, pred_trajectories, times, fps
Пример #20
0
def yumi2graph(urdf_file, cfg):
    # load URDF
    robot = URDF.load(urdf_file)

    # parse joint params
    joints = {}
    for joint in robot.joints:
        # joint atributes
        joints[joint.name] = {
            'type': joint.joint_type,
            'axis': joint.axis,
            'parent': joint.parent,
            'child': joint.child,
            'origin': matrix_to_xyz_rpy(joint.origin),
            'lower': joint.limit.lower if joint.limit else -float('Inf'),
            'upper': joint.limit.upper if joint.limit else float('Inf')
        }

    # debug msg
    # for name, attr in joints.items():
    #     print(name, attr)

    # skeleton type & topology type
    skeleton_type = 0
    topology_type = 0

    # collect edge index & edge feature
    joints_name = cfg['joints_name']
    joints_index = {name: i for i, name in enumerate(joints_name)}
    edge_index = []
    edge_attr = []
    for edge in cfg['edges']:
        parent, child = edge
        # add edge index
        edge_index.append(
            torch.LongTensor([joints_index[parent], joints_index[child]]))
        # add edge attr
        edge_attr.append(torch.Tensor(joints[child]['origin']))
    edge_index = torch.stack(edge_index, dim=0)
    edge_index = edge_index.permute(1, 0)
    edge_attr = torch.stack(edge_attr, dim=0)
    # print(edge_index, edge_attr, edge_index.shape, edge_attr.shape)

    # number of nodes
    num_nodes = len(joints_name)

    # end effector mask
    ee_mask = torch.zeros(len(joints_name), 1).bool()
    for ee in cfg['end_effectors']:
        ee_mask[joints_index[ee]] = True

    # shoulder mask
    sh_mask = torch.zeros(len(joints_name), 1).bool()
    for sh in cfg['shoulders']:
        sh_mask[joints_index[sh]] = True

    # elbow mask
    el_mask = torch.zeros(len(joints_name), 1).bool()
    for el in cfg['elbows']:
        el_mask[joints_index[el]] = True

    # node parent
    parent = -torch.ones(len(joints_name)).long()
    for edge in edge_index.permute(1, 0):
        parent[edge[1]] = edge[0]

    # node offset
    offset = torch.stack(
        [torch.Tensor(joints[joint]['origin']) for joint in joints_name],
        dim=0)
    # change root offset to store init pose
    init_pose = {}
    fk = robot.link_fk()
    for link, matrix in fk.items():
        init_pose[link.name] = matrix_to_xyz_rpy(matrix)
    origin = torch.zeros(6)
    for root in cfg['root_name']:
        offset[joints_index[root]] = torch.Tensor(
            init_pose[joints[root]['child']])
        origin[:3] += offset[joints_index[root]][:3]
    origin /= 2
    # move relative to origin
    for root in cfg['root_name']:
        offset[joints_index[root]] -= origin
    # print(offset, offset.shape)

    # dist to root
    root_dist = torch.zeros(len(joints_name), 1)
    for node_idx in range(len(joints_name)):
        dist = 0
        current_idx = node_idx
        while current_idx != -1:
            origin = offset[current_idx]
            offsets_mod = math.sqrt(origin[0]**2 + origin[1]**2 + origin[2]**2)
            dist += offsets_mod
            current_idx = parent[current_idx]
        root_dist[node_idx] = dist
    # print(root_dist, root_dist.shape)

    # dist to shoulder
    shoulder_dist = torch.zeros(len(joints_name), 1)
    for node_idx in range(len(joints_name)):
        dist = 0
        current_idx = node_idx
        while current_idx != -1 and joints_name[current_idx] not in cfg[
                'shoulders']:
            origin = offset[current_idx]
            offsets_mod = math.sqrt(origin[0]**2 + origin[1]**2 + origin[2]**2)
            dist += offsets_mod
            current_idx = parent[current_idx]
        shoulder_dist[node_idx] = dist
    # print(shoulder_dist, shoulder_dist.shape)

    # dist to elbow
    elbow_dist = torch.zeros(len(joints_name), 1)
    for node_idx in range(len(joints_name)):
        dist = 0
        current_idx = node_idx
        while current_idx != -1 and joints_name[current_idx] not in cfg[
                'elbows']:
            origin = offset[current_idx]
            offsets_mod = math.sqrt(origin[0]**2 + origin[1]**2 + origin[2]**2)
            dist += offsets_mod
            current_idx = parent[current_idx]
        elbow_dist[node_idx] = dist
    # print(elbow_dist, elbow_dist.shape)

    # rotation axis
    axis = [torch.Tensor(joints[joint]['axis']) for joint in joints_name]
    axis = torch.stack(axis, dim=0)

    # joint limit
    lower = [torch.Tensor([joints[joint]['lower']]) for joint in joints_name]
    lower = torch.stack(lower, dim=0)
    upper = [torch.Tensor([joints[joint]['upper']]) for joint in joints_name]
    upper = torch.stack(upper, dim=0)
    # print(lower.shape, upper.shape)

    # skeleton
    data = Data(x=torch.zeros(num_nodes, 1),
                edge_index=edge_index,
                edge_attr=edge_attr,
                skeleton_type=skeleton_type,
                topology_type=topology_type,
                ee_mask=ee_mask,
                sh_mask=sh_mask,
                el_mask=el_mask,
                root_dist=root_dist,
                shoulder_dist=shoulder_dist,
                elbow_dist=elbow_dist,
                num_nodes=num_nodes,
                parent=parent,
                offset=offset,
                axis=axis,
                lower=lower,
                upper=upper)

    # test forward kinematics
    # print(joints_name)
    # result = robot.link_fk(cfg={joint:0.0 for joint in joints_name})
    # for link, matrix in result.items():
    #     print(link.name, matrix)
    # import os, sys, inspect
    # currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
    # parentdir = os.path.dirname(currentdir)
    # sys.path.insert(0,parentdir)
    # from models.kinematics import ForwardKinematicsURDF
    # fk = ForwardKinematicsURDF()
    # pos = fk(data.x, data.parent, data.offset, 1)

    # # visualize
    # import matplotlib.pyplot as plt
    # fig = plt.figure()
    # ax = fig.add_subplot(111, projection='3d')
    # ax.set_xlabel('X')
    # ax.set_ylabel('Y')
    # ax.set_zlabel('Z')

    # # plot 3D lines
    # for edge in edge_index.permute(1, 0):
    #     line_x = [pos[edge[0]][0], pos[edge[1]][0]]
    #     line_y = [pos[edge[0]][1], pos[edge[1]][1]]
    #     line_z = [pos[edge[0]][2], pos[edge[1]][2]]
    #     plt.plot(line_x, line_y, line_z, 'royalblue', marker='o')
    # # plt.show()
    # plt.savefig('foo.png')

    return data
Пример #21
0
def loadRobotURDF(fn):
    return URDF.load(fn)