示例#1
0
文件: lora.py 项目: robotika/osgar
    def update(self):
        self.recent_packets = []
        channel = super().update()  # define self.time
        if channel == 'raw':
            if self.verbose:
                print(self.time, "update", channel, self.raw)

            self.buf, packet = split_lora_buffer(self.buf + self.raw)
            while len(packet) > 0:
                self.recent_packets.append(packet)
                addr, data = parse_lora_packet(packet)
                if addr is not None and data.startswith(b'['):
                    arr = literal_eval(data.decode('ascii'))
                    if len(arr) == 3:
                        pose2d = arr
                        self.publish('robot_status',
                                     [addr[-1], pose2d, b'running'
                                      ])  # TODO read it from robot
                    else:
                        assert len(arr) == 4, arr  # artifact, x, y z
                        self.publish('artf', [addr[-1], arr])
                    if self.verbose:
                        self.debug_robot_poses.append(
                            (self.time.total_seconds(), addr[-1], pose))
                cmd = parse_my_cmd(self.device_id, data)
                if cmd is not None:
                    self.publish('cmd', cmd)
                if addr is not None and self.device_id is not None and self.device_id not in addr:
                    if self.verbose:
                        print('re-transmit')
                    self.send_data(
                        packet.strip())  # re-transmit data from other robots
                self.buf, packet = split_lora_buffer(self.buf)
        elif channel == 'radio':
            self.on_radio(self.radio)
        elif channel == 'pose2d':
            if self.last_transmit is None or self.time > self.last_transmit + self.min_transmit_dt:
                self.send_data(bytes(str(self.pose2d), encoding='ascii'))
        elif channel == 'pose3d':
            if self.last_transmit is None or self.time > self.last_transmit + self.min_transmit_dt:
                xyz, quat = self.pose3d
                x, y, z = xyz
                pose2d = [
                    int(round(x * 1000)),
                    int(round(y * 1000)),
                    int(round(100 * math.degrees(quaternion.heading(quat))))
                ]
                self.send_data(bytes(str(pose2d), encoding='ascii'))
        elif channel == 'cmd':
            assert len(self.cmd) == 2, self.cmd
            self.send_data(
                b'%d:%s:%d' %
                (self.cmd[0], self.cmd[1], int(self.time.total_seconds())))
        elif channel == 'artf':
            # send data as they are, ignore transmit time, ignore transmit failure
            for artf_item in self.artf:
                self.send_data(bytes(str(artf_item), encoding='ascii'))
        else:
            assert False, channel  # not supported
        return channel
示例#2
0
    def scan(self):
        from shapely.affinity import translate, rotate
        "270 degrees -> np.linspace(-135, 135, 271)"
        ret = np.zeros((271,))

        # create lidar lines
        angles = np.linspace(math.radians(-135), math.radians(135), 271)
        acos = np.cos(angles)
        asin = np.sin(angles)
        max_dist_m = 10.0
        x = acos * max_dist_m
        y = asin * max_dist_m

        # transform world to robot coordinates
        world = self.world
        world = translate(world, *[-c for c in self.xyz])
        world = rotate(world, -quaternion.heading(self.orientation), origin=(0,0), use_radians=True)

        # intersect each lidar line with the world
        for i, point in enumerate(zip(x, y)):
            cross = world.intersection(LineString([(0,0), point]))
            if cross.is_empty:
                continue
            if cross.geom_type == 'Point':
                ret[i] = math.hypot(*cross.coords)
            elif cross.geom_type == 'MultiPoint':
                # find closest intersection
                ret[i] = max_dist_m
                for item in cross:
                    dist = math.hypot(*cross.coords)
                    if ret[i] > dist:
                        ret[i] = dist
            else:
                assert False
        return ret
示例#3
0
 def on_desired_speed(self, dt, channel, data):
     forward = data[0]/1000.               # from millimeters per second to meters per second
     angular = math.radians(data[1]/100.)  # from centidegrees per second to radians per second
     dtime = 0.1                           # simulation step time in seconds
     max_acc_forward = 1                   # meters per sec**2
     max_acc_angular = 1                   # radians per sec**2
     acc_forward = np.clip(-max_acc_forward, (forward - self.speed_forward)/dtime, max_acc_forward)
     acc_angular = np.clip(-max_acc_angular, (angular - self.speed_angular)/dtime, max_acc_angular)
     self.speed_forward += acc_forward * dtime
     self.speed_angular += acc_angular * dtime
     log("desired", f"forward {forward:.2f} m/s", f"angular {math.degrees(angular):.2f} deg/s")
     log("current", f"forward {self.speed_forward:.2f} m/s", f"angular {math.degrees(self.speed_angular):.2f} deg/s")
     dist = dtime * self.speed_forward
     dist3d = quaternion.rotate_vector([dist, 0, 0], self.orientation)
     self.xyz = [a + b for a, b in zip(self.xyz, dist3d)]
     turn = quaternion.from_axis_angle((0,0,1), dtime * self.speed_angular)
     self.orientation = quaternion.multiply(self.orientation, turn)
     self.time += datetime.timedelta(seconds=dtime)
     heading_centidegrees = round(math.degrees(quaternion.heading(self.orientation))*100)
     x_mm, y_mm, z_mm = [round(c*1000) for c in self.xyz]
     self.bus.publish('orientation', self.orientation)
     self.bus.publish('rot', [heading_centidegrees, 0, 0])
     self.bus.publish('pose2d', [x_mm, y_mm, heading_centidegrees])
     self.bus.publish('pose3d', [[a - b for a, b, in zip(self.xyz, self.origin)], self.orientation])
     self.bus.publish('sim_time_sec', self.time.total_seconds())
     self.bus.publish('scan', self.scan())
示例#4
0
 def warmup(self):
     corrected = [rr - oo for rr, oo in zip(self.xyz, self.origin)]
     ROBOT_NAME = b'X100L'
     self.bus.publish('origin', [ROBOT_NAME, *corrected, *self.orientation])
     self.bus.publish('robot_name', ROBOT_NAME)
     self.bus.publish('scan', self.scan())
     heading_centidegrees = round(math.degrees(quaternion.heading(self.orientation))*100)
     self.bus.publish('rot', [heading_centidegrees, 0, 0])
     self.bus.publish('orientation', self.orientation)
示例#5
0
 def pose_callback(self, frame):
     try:
         pose_frame = frame.as_pose_frame()
         profile = pose_frame.get_profile()
         with self.origins_lock:
             try:
                 name = self.origins[profile.unique_id()]
             except KeyError:
                 print(
                     'Pose callback call before registering the stream origin.'
                 )
                 return
         pose = pose_frame.get_pose_data()
         n = frame.get_frame_number()
         if n % self.pose_subsample != 0:
             return
         timestamp = frame.get_timestamp()
     except Exception as e:
         print(e)
         self.finished.set()
         return
     orientation = quaternion_multiply(
         self.tracking_sensor_pitch_quat_inv[name],
         t265_to_osgar_orientation(pose.rotation))
     self.publish(f'{name}_orientation', orientation)
     x, y, z = rotate_vector(t265_to_osgar_position(pose.translation),
                             self.tracking_sensor_yaw_quat[name])
     yaw = quaternion.heading(orientation)
     self.publish(
         f'{name}_pose2d',
         [int(x * 1000),
          int(y * 1000),
          int(math.degrees(yaw) * 100)])
     self.publish(f'{name}_pose3d', [[x, y, z], orientation])
     self.publish(f'{name}_pose_raw', [
         n,
         timestamp,
         [pose.translation.x, pose.translation.y, pose.translation.z],
         [
             pose.rotation.x, pose.rotation.y, pose.rotation.z,
             pose.rotation.w
         ],
         [pose.velocity.x, pose.velocity.y, pose.velocity.z],
         [
             pose.angular_velocity.x, pose.angular_velocity.y,
             pose.angular_velocity.z
         ],
         [pose.acceleration.x, pose.acceleration.y, pose.acceleration.z],
         [
             pose.angular_acceleration.x, pose.angular_acceleration.y,
             pose.angular_acceleration.z
         ],
         [pose.mapper_confidence, pose.tracker_confidence],
     ])  # raw RealSense2 Pose data
示例#6
0
 def _step(self, direction):
     self.bbox = None
     if (self.current + direction) >= len(self.log):
         self.log.grow()
     while self.current + direction >= 0 and self.current + direction < len(self.log):
         self.current += direction
         timestamp, stream_id, data = self.log[self.current]
         if stream_id == self.keyframes_id:
             self.keyframe = True
         if stream_id == self.title_id:
             self.title = deserialize(data)
         if stream_id == self.bbox_id:
             self.bbox = deserialize(data)
         if stream_id == self.lidar_id:
             self.scan = deserialize(data)
             keyframe = self.keyframe
             self.keyframe = False
             return timestamp, self.pose, self.scan, self.scan2, self.image, self.image2, self.bbox, self.joint, keyframe, self.title, False
         if stream_id == self.lidar2_id:
             self.scan2 = deserialize(data)
         elif stream_id == self.camera_id:
             self.image = get_image(deserialize(data))
             if self.lidar_id is None:
                 keyframe = self.keyframe
                 self.keyframe = False
                 return timestamp, self.pose, self.scan, self.scan2, self.image, self.image2, self.bbox, self.joint, keyframe, self.title, False
         elif stream_id == self.camera2_id:
             self.image2 = get_image(deserialize(data))
         elif stream_id == self.joint_id:
             self.joint = deserialize(data)
         elif stream_id == self.pose3d_id:
             pose3d, orientation = deserialize(data)
             assert len(pose3d) == 3
             assert len(orientation) == 4
             self.pose = [pose3d[0], pose3d[1], quaternion.heading(orientation)]
             self.pose3d = [pose3d, orientation]
         elif stream_id == self.pose2d_id:
             arr = deserialize(data)
             assert len(arr) == 3
             self.pose = (arr[0]/1000.0, arr[1]/1000.0, math.radians(arr[2]/100.0))
             x, y, heading = self.pose
             self.pose = (x * math.cos(g_rotation_offset_rad) - y * math.sin(g_rotation_offset_rad),
                          x * math.sin(g_rotation_offset_rad) + y * math.cos(g_rotation_offset_rad),
                          heading + g_rotation_offset_rad)
             if self.lidar_id is None and self.camera_id is None:
                 keyframe = self.keyframe
                 self.keyframe = False
                 return timestamp, self.pose, self.scan, self.scan2, self.image, self.image2, self.bbox, self.joint, keyframe, self.title, False
     return timedelta(), self.pose, self.scan, self.scan2, self.image, self.image2, self.bbox, self.joint, self.keyframe, self.title, True
示例#7
0
 def pose_callback(self, frame):
     try:
         pose = frame.as_pose_frame().get_pose_data()
         n = frame.get_frame_number()
         if n % self.pose_subsample != 0:
             return
         timestamp = frame.get_timestamp()
     except Exception as e:
         print(e)
         self.finished.set()
         return
     orientation = quaternion_multiply(
         self.tracking_sensor_pitch_quat_inv,
         t265_to_osgar_orientation(pose.rotation))
     self.publish('orientation', orientation)
     x, y, z = rotate_vector(t265_to_osgar_position(pose.translation),
                             self.tracking_sensor_yaw_quat)
     yaw = quaternion.heading(orientation)
     self.publish(
         'pose2d',
         [int(x * 1000),
          int(y * 1000),
          int(math.degrees(yaw) * 100)])
     self.publish('pose3d', [[x, y, z], orientation])
     self.publish('pose_raw', [
         n,
         timestamp,
         [pose.translation.x, pose.translation.y, pose.translation.z],
         [
             pose.rotation.x, pose.rotation.y, pose.rotation.z,
             pose.rotation.w
         ],
         [pose.velocity.x, pose.velocity.y, pose.velocity.z],
         [
             pose.angular_velocity.x, pose.angular_velocity.y,
             pose.angular_velocity.z
         ],
         [pose.acceleration.x, pose.acceleration.y, pose.acceleration.z],
         [
             pose.angular_acceleration.x, pose.angular_acceleration.y,
             pose.angular_acceleration.z
         ],
         [pose.mapper_confidence, pose.tracker_confidence],
     ])  # raw RealSense2 Pose data
示例#8
0
文件: lidarview.py 项目: tajgr/osgar
 def _step(self, direction):
     if (self.current + direction) >= len(self.log):
         self.log.grow()
     while self.current + direction >= 0 and self.current + direction < len(
             self.log):
         self.current += direction
         timestamp, stream_id, data = self.log[self.current]
         if stream_id == self.lidar_id:
             self.scan = deserialize(data)
             return timestamp, self.pose, self.scan, self.image, False
         elif stream_id == self.camera_id:
             jpeg = deserialize(data)
             self.image = pygame.image.load(io.BytesIO(jpeg),
                                            'JPG').convert()
             if self.lidar_id is None:
                 return timestamp, self.pose, self.scan, self.image, False
         elif stream_id == self.pose3d_id:
             pose3d, orientation = deserialize(data)
             assert len(pose3d) == 3
             assert len(orientation) == 4
             self.pose = [
                 pose3d[0], pose3d[1],
                 quaternion.heading(orientation)
             ]
             self.pose3d = [pose3d, orientation]
         elif stream_id == self.pose2d_id:
             arr = deserialize(data)
             assert len(arr) == 3
             self.pose = (arr[0] / 1000.0, arr[1] / 1000.0,
                          math.radians(arr[2] / 100.0))
             x, y, heading = self.pose
             self.pose = (x * math.cos(g_rotation_offset_rad) -
                          y * math.sin(g_rotation_offset_rad),
                          x * math.sin(g_rotation_offset_rad) +
                          y * math.cos(g_rotation_offset_rad),
                          heading + g_rotation_offset_rad)
             if self.lidar_id is None and self.camera_id is None:
                 return timestamp, self.pose, self.scan, self.image, False
     return timedelta(), self.pose, self.scan, self.image, True
示例#9
0
 def _step(self, direction):
     self.bbox = []
     self.title = []
     if (self.current + direction) >= len(self.log):
         self.log.grow()
     while self.current + direction >= 0 and self.current + direction < len(
             self.log):
         self.current += direction
         timestamp, stream_id, data = self.log[self.current]
         if stream_id == self.keyframes_id:
             self.keyframe = True
         if stream_id in self.title_id and stream_id != self.bbox_id:
             self.title.append(deserialize(data))
         if stream_id == self.lidar_id:
             self.scan = deserialize(data)
             keyframe = self.keyframe
             self.keyframe = False
             return timestamp, self.frame, self.pose, self.pose3d, self.scan, self.scan2, self.image, self.image2, self.bbox, self.joint, keyframe, self.title, False
         if stream_id == self.lidar2_id:
             self.scan2 = deserialize(data)
         if stream_id == self.lidar_up_id:
             self.frame.lidar_up = deserialize(data)
         if stream_id == self.lidar_down_id:
             self.frame.lidar_down = deserialize(data)
         elif stream_id == self.camera_id:
             self.image, _ = get_image(deserialize(data))
             # bounding boxes associated with an image are stored after the image in the log
             # therefore, we need to continue reading the log past the image in order to gathering its bounding box data
             current = self.current
             while current + direction >= 0 and current + direction < len(
                     self.log):
                 current += direction
                 _, new_stream_id, new_data = self.log[current]
                 if new_stream_id == self.bbox_id:
                     self.bbox.append(deserialize(new_data))
                 if new_stream_id in self.title_id:
                     self.title.append(deserialize(new_data))
                 if new_stream_id == self.camera_id:
                     break
             if self.lidar_id is None:
                 keyframe = self.keyframe
                 self.keyframe = False
                 return timestamp, self.frame, self.pose, self.pose3d, self.scan, self.scan2, self.image, self.image2, self.bbox, self.joint, keyframe, self.title, False
         elif stream_id == self.camera2_id:
             self.image2, _ = get_image(deserialize(data))
         elif stream_id == self.rgbd_id:
             _, _, img_data, depth_data = deserialize(data)
             self.image, self.image2 = get_image((img_data, depth_data))
             if self.lidar_id is None:
                 keyframe = self.keyframe
                 self.keyframe = False
                 return timestamp, self.frame, self.pose, self.pose3d, self.scan, self.scan2, self.image, self.image2, self.bbox, self.joint, keyframe, self.title, False
         elif stream_id == self.joint_id:
             self.joint = deserialize(data)
         elif stream_id == self.pose3d_id:
             pose3d, orientation = deserialize(data)
             assert len(pose3d) == 3
             assert len(orientation) == 4
             self.pose = [
                 pose3d[0], pose3d[1],
                 quaternion.heading(orientation)
             ]
             self.pose3d = [pose3d, orientation]
         elif stream_id == self.pose2d_id:
             arr = deserialize(data)
             assert len(arr) == 3
             self.pose = (arr[0] / 1000.0, arr[1] / 1000.0,
                          math.radians(arr[2] / 100.0))
             x, y, heading = self.pose
             self.pose = (x * math.cos(g_rotation_offset_rad) -
                          y * math.sin(g_rotation_offset_rad),
                          x * math.sin(g_rotation_offset_rad) +
                          y * math.cos(g_rotation_offset_rad),
                          heading + g_rotation_offset_rad)
             if self.lidar_id is None and self.camera_id is None:
                 keyframe = self.keyframe
                 self.keyframe = False
                 return timestamp, self.frame, self.pose, self.pose3d, self.scan, self.scan2, self.image, self.image2, self.bbox, self.joint, keyframe, self.title, False
     return timedelta(
     ), self.frame, self.pose, self.pose3d, self.scan, self.scan2, self.image, self.image2, self.bbox, self.joint, self.keyframe, self.title, True
示例#10
0
    def play_virtual_part(self):
        self.stdout("Waiting for origin ...")
        self.origin = None  # invalidate origin
        self.origin_error = False
        self.bus.publish('request_origin', True)
        while self.origin is None and not self.origin_error:
            self.update()
        self.stdout('Origin:', self.origin, self.robot_name)

        if self.origin is not None:
            x, y, z = self.origin
            x1, y1, z1 = self.xyz
            self.offset = x - x1, y - y1, z - z1
            self.stdout('Offset:', self.offset)
            heading = quaternion.heading(self.origin_quat)
            self.stdout('heading', math.degrees(heading), 'angle', math.degrees(math.atan2(-y, -x)), 'dist', math.hypot(x, y))

            self.go_to_entrance()
        else:
            # lost in tunnel
            self.stdout('Lost in tunnel:', self.origin_error, self.offset)

        start_time = self.sim_time_sec
        for loop in range(100):
            self.collision_detector_enabled = True
            if self.sim_time_sec - start_time > self.timeout.total_seconds():
                print('Total Timeout Reached', self.timeout.total_seconds())
                break
            timeout = timedelta(seconds=self.timeout.total_seconds() - (self.sim_time_sec - start_time))
            print('Current timeout', timeout)

            dist, reason = self.follow_wall(radius=self.walldist, right_wall=self.use_right_wall,  # was radius=0.9
                                timeout=timeout, pitch_limit=LIMIT_PITCH, roll_limit=None)
            self.collision_detector_enabled = False
            if reason == REASON_VIRTUAL_BUMPER:
                assert self.virtual_bumper is not None
                self.virtual_bumper.reset_counters()

                # the robot ended in cycle probably
                self.return_home(timedelta(seconds=10))

                # try something crazy if you do not have other ideas ...
                before_center = self.use_center
                self.use_center = True
                dist, reason = self.follow_wall(radius=self.walldist, right_wall=self.use_right_wall,  # was radius=0.9
                                    timeout=timedelta(seconds=60), pitch_limit=LIMIT_PITCH, roll_limit=None)
                self.use_center = before_center
                if reason is None or reason != REASON_PITCH_LIMIT:
                    continue

            if reason is None or reason != REASON_PITCH_LIMIT:
                break
            self.stdout(self.time, "Microstep HOME %d %.3f" % (loop, dist), reason)
            self.go_straight(-0.3, timeout=timedelta(seconds=10))
            self.return_home(timedelta(seconds=10))

        self.stdout("Artifacts:", self.artifacts)

        self.stdout(self.time, "Going HOME %.3f" % dist, reason)

        self.return_home(2 * self.timeout)
        self.send_speed_cmd(0, 0)

        if self.artifacts:
            self.bus.publish('artf_xyz', [[artifact_data, round(x*1000), round(y*1000), round(z*1000)] 
                                          for artifact_data, (x, y, z) in self.artifacts])

        self.wait(timedelta(seconds=10), use_sim_time=True)