Ejemplo n.º 1
0
    def run(self):
        while not rospy.is_shutdown():
            if self.image is None:
                continue
            if not self.camera_info_received:
                continue

            if self.rectify_flag:
                self.rectify_image()
            list_pose_tag = self.detect_april_tag()
            if not list_pose_tag:
                if self.encoder_conf_flag:
                    trans_map_base_2D, theta_map_base_2D = translation_angle_from_SE2(
                        self.pose_map_base_SE2)
                    self.pose_map_base_SE2 = encoder_localization_client(
                        trans_map_base_2D[0], trans_map_base_2D[1],
                        theta_map_base_2D)
                self.tfs_msg_map_base.transform = self.pose2transform(
                    SE3_from_SE2(self.pose_map_base_SE2))
                self.tfs_msg_map_base.header.stamp = rospy.Time.now()
                self.pub_tf_fused_loc.publish(self.tfs_msg_map_base)
                self.br_map_base.sendTransformMessage(self.tfs_msg_map_base)
            else:

                pose_dta_dtc = self.pose_inverse(list_pose_tag[0])
                pose_april_cam = self.T_a_dta @ pose_dta_dtc @ self.T_dtc_c
                pose_map_base = self.pose_map_at @ pose_april_cam @ self.pose_cam_base
                quat_map_base = quaternion_from_matrix(pose_map_base)
                euler = euler_from_quaternion(quat_map_base)
                theta = euler[2]
                rot_map_base, translation_map_base = rotation_translation_from_SE3(
                    pose_map_base)
                pose_map_base_SE2_enc = encoder_localization_client(
                    translation_map_base[0], translation_map_base[1], theta)
                self.encoder_conf_flag = True
                self.pose_map_base_SE2 = SE2_from_xytheta(
                    [translation_map_base[0], translation_map_base[1], theta])
                self.tfs_msg_map_base.transform = self.pose2transform(
                    SE3_from_SE2(self.pose_map_base_SE2))
                self.tfs_msg_map_base.header.stamp = rospy.Time.now()
                self.pub_tf_fused_loc.publish(self.tfs_msg_map_base)
                self.br_map_base.sendTransformMessage(self.tfs_msg_map_base)

                self.tfs_msg_map_april.header.stamp = self.image_timestamp
                self.static_tf_br_map_april.sendTransform(
                    self.tfs_msg_map_april)

                self.tfs_msg_cam_base.header.stamp = self.image_timestamp
                self.static_tf_br_cam_base.sendTransform(self.tfs_msg_cam_base)

                self.tfs_msg_april_cam.transform = self.pose2transform(
                    pose_april_cam)
                self.tfs_msg_april_cam.header.stamp = self.image_timestamp
                self.br_april_cam.sendTransformMessage(self.tfs_msg_april_cam)
    def get_fused_pose(self, req):
        if req is None:
            pose_resp = None
        else:
            pose = SE2_from_xytheta([req.x, req.y, req.theta])
            pose = self.db_kinematics.step(self.encoder_ticks_left_delta_t, self.encoder_ticks_right_delta_t, pose)
            trans, theta = translation_angle_from_SE2(pose)
            pose_resp = PoseResponse(trans[0], trans[1], theta)
            if not self.configure_flag:
                self.pose = pose
                self.configure_flag = True

        if self.configure_flag:
            self.pose = self.db_kinematics.step(self.encoder_ticks_left_delta_t, self.encoder_ticks_right_delta_t,
                                                self.pose)
            self.encoder_ticks_right_delta_t = 0
            self.encoder_ticks_left_delta_t = 0
            pose_SE3 = SE3_from_SE2(self.pose)
            rot, trans = rotation_translation_from_SE3(pose_SE3)
            quaternion_tf = quaternion_from_matrix(pose_SE3)
            quaternion = Quaternion(quaternion_tf[0], quaternion_tf[1], quaternion_tf[2], quaternion_tf[3])
            translation = Vector3(trans[0], trans[1], trans[2])
            trafo = Transform(translation, quaternion)
            self.tfs_msg.transform = trafo
            if self.vel_left == 0.0 and self.vel_right == 0.0:
                self.tfs_msg.header.stamp = rospy.Time.now()
            else:
                self.tfs_msg.header.stamp = self.encoder_timestamp
            self.br.sendTransformMessage(self.tfs_msg)
            self.pub_tf_enc_loc.publish(self.tfs_msg)
        return pose_resp
Ejemplo n.º 3
0
    def test_poses_SE2(self):
        from vehicles_dynamics import SE2Dynamics

        dynamics = SE2Dynamics(max_linear_velocity=[1, 1],
                               max_angular_velocity=1)

        dt = 0.1

        M = 1  # max
        Z = 0  # zero
        m = -1  # min

        tests = [
            # format   (  (start_xy, start_theta), commands,
            # (final_xy, final_theta))
            (([0, 0], 0), [Z, Z, Z], ([0, 0], 0)),
            (([1, 2], 3), [Z, Z, Z], ([1, 2], 3)),
            (([0, 0], 0), [M, Z, Z], ([dt, 0], 0)),
            (([0, 0], 0), [m, Z, Z], ([-dt, 0], 0)),
            (([0, 0], 0), [Z, M, Z], ([0, dt], 0)),
            (([0, 0], 0), [Z, m, Z], ([0, -dt], 0)),
            (([0, 0], 0), [Z, Z, M], ([0, 0], dt)),
            (([0, 0], 0), [Z, Z, m], ([0, 0], -dt)),
            (([0, 0], np.radians(90)), [M, Z, Z], ([0, dt], np.radians(90))),
            (([0, 0], np.radians(90)), [Z, M, Z], ([-dt, 0], np.radians(90)))

            # TODO: add some more tests with non-zero initial rotation
        ]

        for initial, commands, final in tests:
            start_pose = SE2_from_translation_angle(*initial)
            final_pose = SE2_from_translation_angle(*final)

            start_state = dynamics.pose2state(SE3_from_SE2(start_pose))
            final_state = dynamics.pose2state(SE3_from_SE2(final_pose))
            commands = np.array(commands)

            print(
                '%s -> [%s] -> %s' %
                (SE2.friendly(start_pose), commands, SE2.friendly(final_pose)))

            actual, dummy = dynamics.integrate(start_state, +commands, dt)
            SE2.assert_close(actual, final_pose)

            start2, dummy = dynamics.integrate(final_state, -commands, dt)
            SE2.assert_close(start_pose, start2)
Ejemplo n.º 4
0
 def new_episode(self):
     ''' Returns an episode tuple. By default it just
         samples a random pose for the robot.
     '''
     p = self.start_pose
     x = p[0]
     y = p[1]
     th = np.deg2rad(p[2])
     vehicle_state = SE3_from_SE2(SE2_from_xytheta([x, y, th]))
     id_episode = unique_timestamp_string()
     return World.Episode(id_episode, vehicle_state)
Ejemplo n.º 5
0
def instance_vehicle_spec(entry):
    from ..simulation import Vehicle
    from ..interfaces import VehicleSensor, Dynamics

    from vehicles.configuration.master import get_conftools_dynamics, \
        get_conftools_sensors


    check_valid_vehicle_config(entry)
    try:
        master = get_conftools_dynamics()
        if 'id_dynamics' in entry:
            id_dynamics = entry['id_dynamics']
            dynamics = master.instance(id_dynamics)
        else:
            id_dynamics = entry['dynamics']['id']
            dynamics = master.instance_spec(entry['dynamics'])

        assert isinstance(dynamics, Dynamics)

        sensors = entry['sensors']
        radius = entry['radius']
        extra = entry.get('extra', {})
        vehicle = Vehicle(radius=radius, extra=extra)
        vehicle.add_dynamics(id_dynamics, dynamics)
        master = get_conftools_sensors()

        for sensor in sensors:
            if 'id_sensor' in sensor:
                id_sensor = sensor['id_sensor']
                sensor_instance = master.instance(id_sensor)
            else:
                id_sensor = sensor['sensor']['id']
                sensor_instance = master.instance_spec(sensor['sensor'])
            assert isinstance(sensor_instance, VehicleSensor)

            # TODO: document this
            x, y, theta_deg = sensor.get('pose', [0, 0, 0])
            theta = np.radians(theta_deg)
            pose = SE2_from_translation_angle([x, y], theta)
            pose = SE3_from_SE2(pose)
            joint = sensor.get('joint', 0)
            extra = sensor.get('extra', {})
            vehicle.add_sensor(id_sensor=id_sensor,
                               sensor=sensor_instance,
                               pose=pose, joint=joint,
                               extra=extra)
        return vehicle
    except:
        logger.error('Error while trying to instantiate vehicle. Entry:\n%s'
                     % (pformat(entry)))
        raise
Ejemplo n.º 6
0
    def pose2state(self, pose):
        ''' 
            Returns the state that best approximates 
            the given pose (in SE3).
        '''
        # random_angle = SO2.convert_to(SE3, SO2.sample_uniform()) # XXX
        #        random_angle = SE3_from_SE2(SE2_from_SO2(SO2.sample_uniform()))

        # angle = BaseTopDynamics.last_turret_angle
        angle = np.random.rand() * np.pi * 2
        turret_pose = SE3_from_SE2(SE2_from_translation_angle([0, 0], angle))
        # print('Starting at %s deg ' % np.rad2deg(angle))
        return self.compose_state(base=self.base.pose2state(pose),
                                  top=self.top.pose2state(turret_pose))
Ejemplo n.º 7
0
 def run(self, event=None):
     self.pose = self.db_kinematics.step(self.encoder_ticks_left_delta_t, self.encoder_ticks_right_delta_t, self.pose)
     self.encoder_ticks_right_delta_t = 0
     self.encoder_ticks_left_delta_t = 0
     pose_SE3 = SE3_from_SE2(self.pose)
     rot, trans = rotation_translation_from_SE3(pose_SE3)
     quaternion_tf = quaternion_from_matrix(pose_SE3)
     quaternion = Quaternion(quaternion_tf[0], quaternion_tf[1], quaternion_tf[2], quaternion_tf[3])
     translation = Vector3(trans[0], trans[1], trans[2])
     trafo = Transform(translation, quaternion)
     self.tfs_msg.transform = trafo
     if self.vel_left == 0.0 and self.vel_right == 0.0:
         self.tfs_msg.header.stamp = rospy.Time.now()
     else:
         self.tfs_msg.header.stamp = self.encoder_timestamp
     self.br.sendTransformMessage(self.tfs_msg)
     self.pub_tf_enc_loc.publish(self.tfs_msg)
Ejemplo n.º 8
0
    def joint_state(self, state, joint=0):
        car_state = self.car_state_from_big_state(state)
        steering = self.steering_from_big_state(state)

        car_position = self.car.joint_state(car_state, joint=0)

        if joint == 0:
            return car_position
        elif joint == 1:
            p = [self.car.axis_dist, self.car.L]
        elif joint == 2:
            p = [self.car.axis_dist, -self.car.L]
        else:
            raise ValueError('Invalid joint ID %r' % joint)

        pose, vel = car_position
        rel_pose = SE3_from_SE2(SE2_from_translation_angle(p, steering))
        wpose = SE3.multiply(pose, rel_pose)
        return wpose, vel  # XXX: vel
Ejemplo n.º 9
0
    def __init__(self, node_name):

        # Initialize the DTROS parent class
        super(EncoderLocalizationNode, self).__init__(node_name=node_name,node_type=NodeType.GENERIC)
        self.veh_name = rospy.get_namespace().strip("/")
        self.radius = rospy.get_param(f'/{self.veh_name}/kinematics_node/radius', 100)
        self.baseline = 0.0968
        x_init = rospy.get_param(f'/{self.veh_name}/{node_name}/x_init', 100)
        self.pose = SE2_from_xytheta([x_init, 0, np.pi])
        self.db_kinematics = DuckiebotKinematics(radius=self.radius, baseline=self.baseline)
        self.vel_left = 0.0
        self.vel_right = 0.0
        self.encoder_ticks_left_total = 0
        self.encoder_ticks_left_delta_t = 0
        self.encoder_ticks_right_total = 0
        self.encoder_ticks_right_delta_t = 0
        self.encoder_timestamp = rospy.Time.now()
        self.max_number_ticks = 135
        init_pose_SE3 = SE3_from_SE2(self.pose)
        rot, trans = rotation_translation_from_SE3(init_pose_SE3)
        quaternion_tf = quaternion_from_matrix(init_pose_SE3)
        quaternion = Quaternion(quaternion_tf[0], quaternion_tf[1], quaternion_tf[2], quaternion_tf[3])
        translation = Vector3(trans[0], trans[1], trans[2])
        trafo = Transform(translation, quaternion)
        self.tfs_msg = TransformStamped()
        self.tfs_msg.header.frame_id = 'map'
        self.tfs_msg.header.stamp = self.encoder_timestamp
        self.tfs_msg.child_frame_id = 'encoder_baselink'
        self.tfs_msg.transform = trafo
        self.br = tf.TransformBroadcaster()

        self.sub_executed_commands = rospy.Subscriber(f'/{self.veh_name}/wheels_driver_node/wheels_cmd_executed',
                                                      WheelsCmdStamped, self.cb_executed_commands)
        self.sub_encoder_ticks_left = rospy.Subscriber(f'/{self.veh_name}/left_wheel_encoder_node/tick',
                                                       WheelEncoderStamped, self.callback_left)
        self.sub_encoder_ticks_right = rospy.Subscriber(f'/{self.veh_name}/right_wheel_encoder_node/tick',
                                                        WheelEncoderStamped, self.callback_right)

        self.pub_tf_enc_loc = rospy.Publisher(f'/{self.veh_name}/{node_name}/transform_stamped',
                                             TransformStamped, queue_size=10)
Ejemplo n.º 10
0
    def new_episode(self):
        ''' Returns an episode tuple. By default it just
            samples a random pose for the robot.
        '''
        if self.start_poses is None:
            x = np.random.uniform(self.bounds[0][0], self.bounds[0][1])
            y = np.random.uniform(self.bounds[1][0], self.bounds[1][1])
            th = np.random.uniform(0, np.pi * 2)
        else:
            nposes = len(self.start_poses)
            if self.cur_pose is None:
                self.cur_pose = np.random.randint(nposes)
            else:
                self.cur_pose = (self.cur_pose + 1) % nposes
            p = self.start_poses[self.cur_pose]
            x = p[0]
            y = p[1]
            th = np.deg2rad(p[2])

        vehicle_state = SE3_from_SE2(SE2_from_xytheta([x, y, th]))
        id_episode = unique_timestamp_string()
        return World.Episode(id_episode, vehicle_state)
Ejemplo n.º 11
0
def export_gltf(dm: DuckietownMap, outdir: str, background: bool = True):
    gltf = GLTF()
    # setattr(gltf, 'md52PV', {})

    scene_index = add_scene(gltf, Scene(nodes=[]))

    map_nodes = []
    it: IterateByTestResult

    tiles = list(iterate_by_class(dm, Tile))
    if not tiles:
        raise ZValueError("no tiles?")
    for it in tiles:
        tile = cast(Tile, it.object)
        name = it.fqn[-1]
        fn = tile.fn
        fn_normal = tile.fn_normal
        fn_emissive = tile.fn_emissive
        fn_metallic_roughness = tile.fn_metallic_roughness
        fn_occlusion = tile.fn_occlusion
        material_index = make_material(
            gltf,
            doubleSided=False,
            baseColorFactor=[1, 1, 1, 1.0],
            fn=fn,
            fn_normals=fn_normal,
            fn_emissive=fn_emissive,
            fn_metallic_roughness=fn_metallic_roughness,
            fn_occlusion=fn_occlusion,
        )
        mi = get_square()
        mesh_index = add_polygon(
            gltf,
            name + "-mesh",
            vertices=mi.vertices,
            texture=mi.textures,
            colors=mi.color,
            normals=mi.normals,
            material=material_index,
        )
        node1_index = add_node(gltf, Node(mesh=mesh_index))

        i, j = ij_from_tilename(name)
        c = (i + j) % 2
        color = [1, 0, 0, 1.0] if c else [0, 1, 0, 1.0]
        add_back = False
        if add_back:

            material_back = make_material(gltf, doubleSided=False, baseColorFactor=color)
            back_mesh_index = add_polygon(
                gltf,
                name + "-mesh",
                vertices=mi.vertices,
                texture=mi.textures,
                colors=mi.color,
                normals=mi.normals,
                material=material_back,
            )

            flip = np.diag([1.0, 1.0, -1.0, 1.0])
            flip[2, 3] = -0.01
            back_index = add_node(gltf, Node(mesh=back_mesh_index, matrix=gm(flip)))
        else:
            back_index = None

        tile_transform = it.transform_sequence
        tile_matrix2d = tile_transform.asmatrix2d().m
        s = dm.tile_size
        scale = np.diag([s, s, s, 1])
        tile_matrix = SE3_from_SE2(tile_matrix2d)
        tile_matrix = tile_matrix @ scale @ SE3_rotz(-np.pi / 2)

        tile_matrix_float = list(tile_matrix.T.flatten())
        if back_index is not None:
            children = [node1_index, back_index]
        else:
            children = [node1_index]
        tile_node = Node(name=name, matrix=tile_matrix_float, children=children)
        tile_node_index = add_node(gltf, tile_node)

        map_nodes.append(tile_node_index)

    if background:
        bg_index = add_background(gltf)
        add_node_to_scene(gltf, scene_index, bg_index)

    exports = {
        "Sign": export_sign,
        # XXX: the tree model is crewed up
        "Tree": export_tree,
        # "Tree": None,
        "Duckie": export_duckie,
        "DB18": export_DB18,
        # "Bus": export_bus,
        # "Truck": export_truck,
        # "House": export_house,
        "TileMap": None,
        "TrafficLight": export_trafficlight,
        "LaneSegment": None,
        "PlacedObject": None,
        "DuckietownMap": None,
        "Tile": None,
    }

    for it in iterate_by_class(dm, PlacedObject):
        ob = it.object

        K = type(ob).__name__
        if isinstance(ob, Sign):
            K = "Sign"

        if K not in exports:
            logger.warn(f"cannot convert {type(ob).__name__}")
            continue

        f = exports[K]
        if f is None:
            continue
        thing_index = f(gltf, it.fqn[-1], ob)

        tile_transform = it.transform_sequence
        tile_matrix2d = tile_transform.asmatrix2d().m
        tile_matrix = SE3_from_SE2(tile_matrix2d) @ SE3_rotz(np.pi / 2)
        sign_node_index = add_node(gltf, Node(children=[thing_index]))
        tile_matrix_float = list(tile_matrix.T.flatten())
        tile_node = Node(name=it.fqn[-1], matrix=tile_matrix_float, children=[sign_node_index])
        tile_node_index = add_node(gltf, tile_node)
        map_nodes.append(tile_node_index)

    mapnode = Node(name="tiles", children=map_nodes)
    map_index = add_node(gltf, mapnode)
    add_node_to_scene(gltf, scene_index, map_index)

    # add_node_to_scene(model, scene_index, node1_index)
    yfov = np.deg2rad(60)
    camera = Camera(
        name="perpcamera",
        type="perspective",
        perspective=PerspectiveCameraInfo(aspectRatio=4 / 3, yfov=yfov, znear=0.01, zfar=1000),
    )
    gltf.model.cameras.append(camera)

    t = np.array([2, 2, 0.15])
    matrix = look_at(pos=t, target=np.array([0, 2, 0]))
    cam_index = add_node(gltf, Node(name="cameranode", camera=0, matrix=list(matrix.T.flatten())))
    add_node_to_scene(gltf, scene_index, cam_index)

    cleanup_model(gltf)

    fn = os.path.join(outdir, "main.gltf")
    make_sure_dir_exists(fn)
    logger.info(f"writing to {fn}")
    gltf.export(fn)
    if True:
        data = read_ustring_from_utf8_file(fn)

        j = json.loads(data)
        data2 = json.dumps(j, indent=2)
        write_ustring_to_utf8_file(data2, fn)

    fnb = os.path.join(outdir, "main.glb")
    logger.info(f"writing to {fnb}")
    gltf.export(fnb)
    verify_trimesh = False
    if verify_trimesh:
        res = trimesh.load(fn)
        # camera_pose, _ = res.graph['cameranode']
        # logger.info(res=res)
        import pyrender

        scene = pyrender.Scene.from_trimesh_scene(res)