Ejemplo n.º 1
0
    def _get_earlist_index(self, index_list):
        is_last = False
        earlist_index = -1
        index = 0
        queue_list = []
        for queue in self.fusion_operator.queues_for_input_ports.values():
            queue_list.append(queue)
            index = index + 1
        index = 0
        for queue in queue_list:
            if index_list[index] != None:
                if earlist_index < 0:
                    earlist_index = index
                elif len(queue) > 0:
                    new = queue[index_list[index]]
                    new_stamp = new.header.stamp
                    new_stamp = Time.from_msg(new_stamp)
                    cur_queue = queue_list[earlist_index]
                    cur = cur_queue[index_list[earlist_index]]
                    cur_stamp = cur.header.stamp
                    cur_stamp = Time.from_msg(cur_stamp)
                    if (new_stamp < cur_stamp):
                        earlist_index = index

            index = index + 1

        if len(queue_list[earlist_index]) == index_list[earlist_index] + 1:
            is_last = True
        return earlist_index, is_last
Ejemplo n.º 2
0
 def fusion_callback(self, channel, msg):
     try:
         start = time.time()
         self.fusion_operator.queues_for_input_ports[channel].append(msg)
         valid_input_data = self._find_valid_input_data()
         oldest_birthmark = None
         if valid_input_data:
             for c, data in valid_input_data.items():
                 if data == None:
                     continue
                 if oldest_birthmark == None or Time.from_msg(
                         data.header.stamp) < Time.from_msg(
                             oldest_birthmark):
                     oldest_birthmark = data.header.stamp
                     freshness_constraint = data.freshness_constraint
                 valid_input_data[c] = pickle.loads(data.body)
             splash_message = SplashMessage()
             header = Header()
             header.stamp = oldest_birthmark
             splash_message.header = header
             splash_message.body = array('B',
                                         pickle.dumps(valid_input_data))
             splash_message.freshness_constraint = freshness_constraint
             self.fusion_operator.stream_output_ports[
                 self.fusion_operator.output_channel].write(splash_message)
         else:
             pass
         print(f"time: {(time.time() - start) * 1000:.2f}ms")
     except Exception as e:
         print(traceback.format_exc())
         print(e)
Ejemplo n.º 3
0
    def _heartbeat_callback(self, msg):
        self.get_logger().debug('Heartbeat received {}'.format(msg.stamp))

        current = Time.from_msg(self._clock.now().to_msg())
        if (current - Time.from_msg(msg.stamp)) > self._timeout_duration:
            return

        self._set_connected(True)
        self._timer.reset()
Ejemplo n.º 4
0
    def test_time_message_conversions(self):
        time1 = Time(nanoseconds=1, clock_type=ClockType.ROS_TIME)
        builtins_msg = Builtins()
        builtins_msg.time_value = time1.to_msg()

        # Default clock type resulting from from_msg will be ROS time
        time2 = Time.from_msg(builtins_msg.time_value)
        assert isinstance(time2, Time)
        assert time1 == time2
        # Clock type can be specified if appropriate
        time3 = Time.from_msg(builtins_msg.time_value, clock_type=ClockType.SYSTEM_TIME)
        assert time3.clock_type == ClockType.SYSTEM_TIME
    def write_estimated_pose_timer_callback(self):
        try:
            transform_msg = self.tf_buffer.lookup_transform(
                'map', 'base_link', Time())
            orientation = transform_msg.transform.rotation
            theta, _, _ = pyquaternion.Quaternion(
                x=orientation.x,
                y=orientation.y,
                z=orientation.z,
                w=orientation.w).yaw_pitch_roll

            self.estimated_poses_df = self.estimated_poses_df.append(
                {
                    't':
                    nanoseconds_to_seconds(
                        Time.from_msg(transform_msg.header.stamp).nanoseconds),
                    'x':
                    transform_msg.transform.translation.x,
                    'y':
                    transform_msg.transform.translation.y,
                    'theta':
                    theta
                },
                ignore_index=True)

        except (tf2_ros.LookupException, tf2_ros.ConnectivityException,
                tf2_ros.ExtrapolationException):
            pass
    def ground_truth_pose_callback(self, odometry_msg: nav_msgs.msg.Odometry):
        self.latest_ground_truth_pose_msg = odometry_msg
        if not self.run_started:
            return

        orientation = odometry_msg.pose.pose.orientation
        theta, _, _ = pyquaternion.Quaternion(x=orientation.x,
                                              y=orientation.y,
                                              z=orientation.z,
                                              w=orientation.w).yaw_pitch_roll

        self.ground_truth_poses_df = self.ground_truth_poses_df.append(
            {
                't':
                nanoseconds_to_seconds(
                    Time.from_msg(odometry_msg.header.stamp).nanoseconds),
                'x':
                odometry_msg.pose.pose.position.x,
                'y':
                odometry_msg.pose.pose.position.y,
                'theta':
                theta,
                'v_x':
                odometry_msg.twist.twist.linear.x,
                'v_y':
                odometry_msg.twist.twist.linear.y,
                'v_theta':
                odometry_msg.twist.twist.angular.z,
            },
            ignore_index=True)
Ejemplo n.º 7
0
def read_data_frames(bag_view: BagView,
                     field_dict: Dict[str, List[str]],
                     auto_stamp=True):
    data = {}
    field_types = {}
    topics = dict(bag_view.topics())
    for (topic, fields) in field_dict.items():
        data[topic] = {}
        field_types[topic] = {}
        msg_type = topics[topic]
        fields_and_types = msg_type.get_fields_and_field_types()
        if ('header.stamp' not in fields and auto_stamp
                and 'header' in fields_and_types
                and fields_and_types['header'] == 'std_msgs/Header'):
            fields = fields.copy()  # ensure not to mutate the input
            fields.append('header.stamp')
        for field in fields:
            field_types[topic][field] = _field_type(msg_type, field)
            data[topic][field] = []
    for (topic, msg, _) in bag_view:
        for field, field_type in field_types[topic].items():
            value = _rgetattr(msg, field)
            if field_type == 'builtin_interfaces/Time':
                ns = Time.from_msg(value).nanoseconds
                value = pd.to_datetime(ns, origin='unix', unit='ns')
            data[topic][field].append(value)
    return {
        topic: pd.DataFrame(data_dict)
        for topic, data_dict in data.items()
    }
Ejemplo n.º 8
0
    def add(self, msg):
        if not hasattr(msg, 'header') or not hasattr(msg.header, 'stamp'):
            if not self.allow_headerless:
                msg_filters_logger = rclpy.logging.get_logger(
                    'message_filters_cache')
                msg_filters_logger.set_level(LoggingSeverity.INFO)
                msg_filters_logger.warn("can not use message filters messages "
                                        "without timestamp infomation when "
                                        "'allow_headerless' is disabled. "
                                        "auto assign ROSTIME to headerless "
                                        "messages once enabling constructor "
                                        "option of 'allow_headerless'.")

                return

            stamp = ROSClock().now()
        else:
            stamp = msg.header.stamp
            if not hasattr(stamp, 'nanoseconds'):
                stamp = Time.from_msg(stamp)
        # Insert sorted
        self.cache_times.append(stamp)
        self.cache_msgs.append(msg)

        # Implement a ring buffer, discard older if oversized
        if (len(self.cache_msgs) > self.cache_size):
            del self.cache_msgs[0]
            del self.cache_times[0]

        # Signal new input
        self.signalMessage(msg)
Ejemplo n.º 9
0
 def cb(self, msg: JointState):
     """Save the latest published movement values with corresponding timestamp."""
     self._timestamp = Time.from_msg(msg.header.stamp)
     self._joint_names = msg.name
     self._position = msg.position
     self._velocity = msg.velocity
     self._effort = msg.effort
Ejemplo n.º 10
0
 def _message_callback(self, message):
     """Log when a message is received."""
     now = self.get_clock().now()
     diff = now - Time.from_msg(message.header.stamp)
     self.get_logger().info(
         f'I heard an Image. Message single trip latency: [{diff.nanoseconds}]\n---'
     )
Ejemplo n.º 11
0
    def add(self, msg, my_queue, my_queue_index=None):
        if not hasattr(msg, 'header') or not hasattr(msg.header, 'stamp'):
            if not self.allow_headerless:
                msg_filters_logger = rclpy.logging.get_logger(
                    'message_filters_approx')
                msg_filters_logger.set_level(LoggingSeverity.INFO)
                msg_filters_logger.warn(
                    "can not use message filters for "
                    "messages without timestamp infomation when "
                    "'allow_headerless' is disabled. auto assign "
                    "ROSTIME to headerless messages once enabling "
                    "constructor option of 'allow_headerless'.")
                return

            stamp = ROSClock().now()
        else:
            stamp = msg.header.stamp
            if not hasattr(stamp, 'nanoseconds'):
                stamp = Time.from_msg(stamp)
            # print(stamp)
        self.lock.acquire()
        my_queue[stamp.nanoseconds] = msg
        while len(my_queue) > self.queue_size:
            del my_queue[min(my_queue)]
        # self.queues = [topic_0 {stamp: msg}, topic_1 {stamp: msg}, ...]
        if my_queue_index is None:
            search_queues = self.queues
        else:
            search_queues = self.queues[:my_queue_index] + \
                self.queues[my_queue_index+1:]
        # sort and leave only reasonable stamps for synchronization
        stamps = []
        for queue in search_queues:
            topic_stamps = []
            for s in queue:
                stamp_delta = Duration(nanoseconds=abs(s - stamp.nanoseconds))
                if stamp_delta > self.slop:
                    continue  # far over the slop
                topic_stamps.append(
                    ((Time(nanoseconds=s,
                           clock_type=stamp.clock_type)), stamp_delta))
            if not topic_stamps:
                self.lock.release()
                return
            topic_stamps = sorted(topic_stamps, key=lambda x: x[1])
            stamps.append(topic_stamps)
        for vv in itertools.product(*[list(zip(*s))[0] for s in stamps]):
            vv = list(vv)
            # insert the new message
            if my_queue_index is not None:
                vv.insert(my_queue_index, stamp)
            qt = list(zip(self.queues, vv))
            if (((max(vv) - min(vv)) < self.slop) and
                (len([1 for q, t in qt if t.nanoseconds not in q]) == 0)):
                msgs = [q[t.nanoseconds] for q, t in qt]
                self.signalMessage(*msgs)
                for q, t in qt:
                    del q[t.nanoseconds]
                break  # fast finish after the synchronization
        self.lock.release()
Ejemplo n.º 12
0
 def _task(self):
     if len(self._queue) > 0:
         # print("*"*5, len(self._queue))
         msg = self._queue.pop(0)
         # self._count_queue.append(msg)
         self.count += 1
         self.count_rate += 1
         time_diff_ms = (
             self._stream_output_port.parent.get_clock().now().nanoseconds -
             Time.from_msg(msg.header.stamp).nanoseconds) / 1000000
         if msg.freshness > 0 and msg.freshness < time_diff_ms:
             self.count_fresh += 1
             # raise self.exception('{}ms exceeded(constraint: {}ms, cur: {}ms'.format(time_diff_ms - msg.freshness, msg.freshness, time_diff_ms))
             self._task()
             return
         tlqkf1 = time.time()
         self.count_publish += 1
         # print("rate publish: ", tlqkf1 - self.tlqkf1_prev)
         self._publisher.publish(msg)
         self.tlqkf1_prev = tlqkf1
     elif self._last_sent_item:
         self.count += 1
         msg = self._last_sent_item
     else:
         return
     self._last_sent_item = msg
Ejemplo n.º 13
0
    def callback_delay(self, msg):
        """
        Calculate delay time.

        :param msg: Message instance
        """
        if not hasattr(msg, 'header'):
            raise RuntimeError('msg does not have header')
        with self.lock:
            curr_rostime = self._clock.now()

            # time reset
            if curr_rostime.nanoseconds == 0:
                if len(self.delays) > 0:
                    print('time has reset, resetting counters')
                    self.delays = []
                return

            curr = curr_rostime.nanoseconds
            if self.msg_t0 < 0 or self.msg_t0 > curr:
                self.msg_t0 = curr
                self.msg_tn = curr
                self.delays = []
            else:
                # store the duration nanoseconds in self.delays
                duration = (curr_rostime - Time.from_msg(msg.header.stamp))
                self.delays.append(duration.nanoseconds)
                self.msg_tn = curr

            if len(self.delays) > self.window_size:
                self.delays.pop(0)
Ejemplo n.º 14
0
 def _check_mode_and_execute_callback(self, msg):
     if self.parent.mode == self.parent.get_current_mode():
         # self.parent.get_logger().info(msg.body)
         # self.parent.get_logger().info(msg.header.frame_id)
         if msg.freshness:
             # self.parent.get_logger().info("freshness: {}".format(msg.freshness))
             time_exec_ms = (
                 self.parent.get_clock().now().nanoseconds -
                 Time.from_msg(msg.header.stamp).nanoseconds) / 1000000
             # self.parent.get_logger().info("time_exec: {}".format(time_exec_ms))
             if time_exec_ms > msg.freshness:
                 self.get_logger.warn(
                     '{}ms exceeded(constraint: {}ms, cur: {}ms'.format(
                         time_exec_ms - msg.freshness, msg.freshness,
                         time_exec_ms))
         self.msg_list.append(msg)
         msg_decoded = json.loads(msg.body)
         msg_converted = convert_dictionary_to_ros_message(
             self._msg_type, msg_decoded)
         if self._args:
             self._callback(msg, self._args[0])
         else:
             if self.from_fusion:
                 # self.parent.get_logger().info("{}".format(msg_converted))
                 msg_obj = self.FusionedObj(json.loads(msg_converted.data))
                 self._callback(msg_obj)
             else:
                 self._callback(msg_converted)
         self.msg_list.pop(0)
     else:
         pass
Ejemplo n.º 15
0
    def update_state(self, msg: AsebaEvent) -> None:

        if self.state == CalibrationState.waiting_button:
            return

        if self.state == CalibrationState.waiting_ground:
            self.ths = [d - self.gap for d in msg.data]
            self.state = CalibrationState.running
            return

        if self.motor_speed == 0:
            return
        line = self.line
        if self.motor == 'right':
            v = msg.data[0]
            th = self.ths[0]
        else:
            v = msg.data[1]
            th = self.ths[1]
        self.get_logger().info(f'update_state {line.state} {v} {th} -> ?')
        stamp = Time.from_msg(msg.stamp).nanoseconds * 1e-9
        if v > th and line.state == LineState.UNKNOWN:
            line.state = LineState.OFF
        if v < th and line.state == LineState.OFF:
            line.state = LineState.IN
            line.in_at_time = stamp
        if v > th and line.state == LineState.IN:
            line.state = LineState.OFF
            pick = (stamp + line.in_at_time) / 2
            self.get_logger().info(f"Line after {pick} s")
            self.add_pick(pick)
Ejemplo n.º 16
0
    def lookup_transform(
        self,
        target_frame: str,
        source_frame: str,
        time: Time,
        timeout: Duration = Duration()
    ) -> TransformStamped:
        """
        Get the transform from the source frame to the target frame.

        :param target_frame: Name of the frame to transform into.
        :param source_frame: Name of the input frame.
        :param time: The time at which to get the transform (0 will get the latest).
        :param timeout: Time to wait for the target frame to become available.
        :return: The transform between the frames.
        """
        if isinstance(time, builtin_interfaces.msg.Time):
            source_time = Time.from_msg(time)
            warnings.warn(
                'Passing a builtin_interfaces.msg.Time argument is deprecated, and will be removed in the near future. '
                'Use rclpy.time.Time instead.')
        elif isinstance(time, Time):
            source_time = time
        else:
            raise TypeError('Must pass a rclpy.time.Time object.')

        goal = LookupTransform.Goal()
        goal.target_frame = target_frame
        goal.source_frame = source_frame
        goal.source_time = source_time.to_msg()
        goal.timeout = timeout.to_msg()
        goal.advanced = False

        return self.__process_goal(goal)
Ejemplo n.º 17
0
 def process(self, args, msgs):
     idx = 0
     with open(args.out, 'w') as f:
         for _, img_msg, stamp in msgs:
             if args.header:
                 stamp = Time.from_msg(img_msg.header.stamp).nanoseconds
             f.write(f'{str(idx).zfill(8)},{stamp}\n')
             idx += 1
Ejemplo n.º 18
0
    def test_time_message_conversions_big_nanoseconds(self):
        time1 = Time(nanoseconds=1553575413247045598, clock_type=ClockType.ROS_TIME)
        builtins_msg = Builtins()
        builtins_msg.time_value = time1.to_msg()

        # Default clock type resulting from from_msg will be ROS time
        time2 = Time.from_msg(builtins_msg.time_value)
        assert isinstance(time2, Time)
        assert time1 == time2
Ejemplo n.º 19
0
    def _is_valid_data(self, index_list):
        i = 0
        cur_data_list = []
        for queue in self.fusion_operator.queues_for_input_ports.values():
            if index_list[i] != None:
                cur_data_list.append(queue[index_list[i]])
            i = i + 1
        if len(cur_data_list) < 2:
            return False

        for data in cur_data_list:
            for data2 in cur_data_list:
                if data == data2: continue
                time_diff_ms = abs(
                    Time.from_msg(data.header.stamp).nanoseconds -
                    Time.from_msg(data2.header.stamp).nanoseconds) / 1000000
                if time_diff_ms > self.fusion_operator.fusion_rule.correlation_constraint:
                    return False
            return True
Ejemplo n.º 20
0
    def _check_and_fusion(self, msg, channel):
        # print("=====================================")
        # print("check_and_fusion: ", channel)
        try:
            msg_decoded = json.loads(msg.body)
            msg_converted = convert_dictionary_to_ros_message(
                self._stream_input_ports[channel][0].get_msg_type(),
                msg_decoded)
            for c, queue in self._queues_for_each_input_port.items():
                index = 0
                new_queue = []
                for item in queue:
                    time_exec_ms = (self.get_clock().now().nanoseconds -
                                    item["time"]) / 1000000
                    if item["freshness"] == None or item[
                            "freshness"] == 0 or item[
                                "freshness"] > time_exec_ms:
                        new_queue.append(item)
                    else:
                        self.get_logger.warn(
                            '{}ms exceeded(constraint: {}ms, cur: {}ms'.format(
                                time_exec_ms - item["freshness"],
                                item["freshness"], time_exec_ms))
                    index = index + 1
                self._queues_for_each_input_port[c] = new_queue
            self._queues_for_each_input_port[channel].append({
                "message":
                msg_converted,
                "time":
                Time.from_msg(msg.header.stamp).nanoseconds,
                "freshness":
                msg.freshness
            })
            valid_input_data = self._find_valid_input_data(
                self._fusion_rule, self._queues_for_each_input_port)
            if valid_input_data:
                # print(valid_input_data)
                data_encoded = json.dumps(valid_input_data)

            else:
                # print("==============empty data===============")
                return
                # empty_input_data = {}
                # for c in self._queues_for_each_input_port.keys():
                #     empty_input_data[c] = None
                # data_encoded = json.dumps(empty_input_data)

            new_msg = String()
            new_msg.data = data_encoded
            for port in self._stream_output_ports:
                port.write(new_msg)
            # print("================{}================".format(valid_input_data))
        except Exception as e:
            pass
Ejemplo n.º 21
0
    def _velocity(self, prev_pose, current_pose, prev_stamp, current_stamp):
        # 姿勢の差分から速度を求める

        velocity = Pose2D()
        diff_time_stamp = Time.from_msg(current_stamp) - Time.from_msg(
            prev_stamp)
        diff_time_secs = diff_time_stamp.nanoseconds / 1000000000

        if diff_time_secs > 0:
            diff_pose = Pose2D()
            diff_pose.x = current_pose.x - prev_pose.x
            diff_pose.y = current_pose.y - prev_pose.y
            diff_pose.theta = self._angle_normalize(current_pose.theta -
                                                    prev_pose.theta)

            velocity.x = diff_pose.x / diff_time_secs
            velocity.y = diff_pose.y / diff_time_secs
            velocity.theta = diff_pose.theta / diff_time_secs

        return velocity
Ejemplo n.º 22
0
    def process(self, args, odometry_iter):
        with open(args.out, 'w') as f:
            fmt = '{0:.' + str(args.precision) + 'f}'

            # if the input is a geo pose it is UTM-projected, and we need to detect
            # (zone, band) changes from message to message
            previous_utm_zone_band = None

            for topic_name, msg, _ in odometry_iter:
                if isinstance(msg, Odometry):
                    pos = msg.pose.pose.position
                    ori = msg.pose.pose.orientation
                elif isinstance(msg, NavSatFix):
                    utm_point = fromLatLong(msg.latitude, msg.longitude,
                                            msg.altitude)
                    pos = Vector3()
                    pos.x = utm_point.easting
                    pos.y = utm_point.northing
                    pos.z = utm_point.altitude
                    utm_lattice_coordinate = (utm_point.zone, utm_point.band)
                    if previous_utm_zone_band and previous_utm_zone_band != utm_lattice_coordinate:
                        msg = f'UTM (zone, band) changes between messages in topic {topic_name}'
                        raise ExporterError(msg)
                    previous_utm_zone_band = utm_lattice_coordinate
                    # orientation is unit for NavSatFix
                    ori = Quaternion()
                else:
                    exp_clz = self.__class__.__name__
                    msg_clz = msg.__class__.__name__
                    raise TypeError(
                        f'{exp_clz} can not export messages of type {msg_clz}')

                t_ros = Time.from_msg(msg.header.stamp)
                t_sec = t_ros.nanoseconds / 1e9

                f.write(str(t_sec))
                f.write(' ')
                f.write(fmt.format(pos.x))
                f.write(' ')
                f.write(fmt.format(pos.y))
                f.write(' ')
                f.write(fmt.format(pos.z))
                f.write(' ')
                f.write(fmt.format(ori.x))
                f.write(' ')
                f.write(fmt.format(ori.y))
                f.write(' ')
                f.write(fmt.format(ori.z))
                f.write(' ')
                f.write(fmt.format(ori.w))
                f.write('\n')
Ejemplo n.º 23
0
    def execute_callback(self, goal_handle):
        response = LookupTransform.Result()
        response.transform = TransformStamped()
        response.error = TF2Error()

        try:
            if not goal_handle.request.advanced:
                transform = self.buffer_core.lookup_transform_core(
                    target_frame=goal_handle.request.target_frame,
                    source_frame=goal_handle.request.source_frame,
                    time=Time.from_msg(goal_handle.request.source_time))
            else:
                transform = self.buffer_core.lookup_transform_full_core(
                    target_frame=goal_handle.request.target_frame,
                    target_time=Time.from_msg(goal_handle.request.target_time),
                    source_frame=goal_handle.request.source_frame,
                    source_time=Time.from_msg(goal_handle.request.source_time),
                    fixed_frame=goal_handle.request.fixed_frame)
            response.transform = transform
        except LookupException as e:
            response.error.error = TF2Error.LOOKUP_ERROR

        return response
Ejemplo n.º 24
0
    def _extract_robot_pose(self, color, detection_robots, time_stamp):
        # ロボット姿勢を抽出する
        # ロボット姿勢が複数ある場合は、平均値を姿勢とする
        detections = [[] for i in range(len(self._robot_info[color]))]

        # ID毎のリストに分ける
        for robot in detection_robots:
            robot_id = robot.robot_id
            detections[robot_id].append(robot)

        for robot_id, robots in enumerate(detections):
            if robots:
                average_pose = self._average_pose(robots)
                velocity = self._velocity(
                    self._robot_info[color][robot_id].pose, average_pose,
                    self._robot_info[color][robot_id].detection_stamp,
                    time_stamp)

                self._robot_info[color][robot_id].robot_id = robot_id
                self._robot_info[color][robot_id].pose = average_pose
                self._robot_info[color][
                    robot_id].last_detection_pose = average_pose
                self._robot_info[color][robot_id].velocity = velocity
                self._robot_info[color][robot_id].detected = True
                self._robot_info[color][robot_id].detection_stamp = time_stamp
                self._robot_info[color][robot_id].disappeared = False
            else:
                self._robot_info[color][robot_id].detected = False
                self._robot_info[color][robot_id].robot_id = robot_id

                # 座標を受け取らなかった場合は、速度を用いて線形予測する
                if self._robot_info[color][robot_id].disappeared is False:
                    duration = Clock(clock_type=ClockType.ROS_TIME).now() - \
                        Time.from_msg(self._robot_info[color][robot_id].detection_stamp)
                    diff_time_secs = duration.nanoseconds / 1000000000

                    self._robot_info[color][robot_id].pose = self._estimate(
                        self._robot_info[color][robot_id].last_detection_pose,
                        self._robot_info[color][robot_id].velocity,
                        diff_time_secs)

                    # 一定時間、座標を受け取らなかったら消滅判定にする
                    if diff_time_secs > self._DISAPPERED_TIME_THRESH:
                        self._robot_info[color][robot_id].disappeared = True

        if self._PUBLISH_ROBOT[color]:
            for robot_id in range(len(self._pubs_robot_info[color])):
                self._pubs_robot_info[color][robot_id].publish(
                    self._robot_info[color][robot_id])
Ejemplo n.º 25
0
 def add(self, msg, my_queue, my_queue_index=None):
     self.lock.acquire()
     stamp = Time.from_msg(msg.header.stamp)
     my_queue[stamp.nanoseconds] = msg
     while len(my_queue) > self.queue_size:
         del my_queue[min(my_queue)]
     # common is the set of timestamps that occur in all queues
     common = reduce(set.intersection, [set(q) for q in self.queues])
     for t in sorted(common):
         # msgs is list of msgs (one from each queue) with stamp t
         msgs = [q[t] for q in self.queues]
         self.signalMessage(*msgs)
         for q in self.queues:
             del q[t]
     self.lock.release()
Ejemplo n.º 26
0
 def _task(self):
     if len(self._queue) > 0:
         msg = self._queue.pop(0)
         time_diff_ms = (self._stream_output_port.component.get_clock().now(
         ).nanoseconds - Time.from_msg(
             msg.header.stamp).nanoseconds) / 1000000
         if msg.freshness > 0 and msg.freshness < time_diff_ms:
             raise self.exception(
                 '{}ms exceeded(constraint: {}ms, cur: {}ms'.format(
                     time_diff_ms - msg.freshness, msg.freshness,
                     time_diff_ms))
             self._task()
             return
     elif self._last_sent_item:
         msg = self._last_sent_item
     else:
         return
     self._last_sent_item = msg
     self._publisher.publish(msg)
    def estimated_pose_correction_callback(
            self, pose_with_covariance_msg: geometry_msgs.msg.
        PoseWithCovarianceStamped):
        if not self.run_started:
            return

        orientation = pose_with_covariance_msg.pose.pose.orientation
        theta, _, _ = pyquaternion.Quaternion(x=orientation.x,
                                              y=orientation.y,
                                              z=orientation.z,
                                              w=orientation.w).yaw_pitch_roll
        covariance_mat = np.array(
            pose_with_covariance_msg.pose.covariance).reshape(6, 6)

        self.estimated_correction_poses_df = self.estimated_correction_poses_df.append(
            {
                't':
                nanoseconds_to_seconds(
                    Time.from_msg(
                        pose_with_covariance_msg.header.stamp).nanoseconds),
                'x':
                pose_with_covariance_msg.pose.pose.position.x,
                'y':
                pose_with_covariance_msg.pose.pose.position.y,
                'theta':
                theta,
                'cov_x_x':
                covariance_mat[0, 0],
                'cov_x_y':
                covariance_mat[0, 1],
                'cov_y_y':
                covariance_mat[1, 1],
                'cov_theta_theta':
                covariance_mat[5, 5]
            },
            ignore_index=True)
Ejemplo n.º 28
0
    def _extract_ball_pose(self, detection_balls, time_stamp):
        # ボール座標を抽出する
        # ボール座標が複数ある場合は、平均値を座標とする
        if detection_balls:
            average_pose = self._average_pose(detection_balls)
            velocity = self._velocity(self._ball_info.pose, average_pose,
                                      self._ball_info.detection_stamp,
                                      time_stamp)

            self._ball_info.pose = average_pose
            self._ball_info.last_detection_pose = average_pose
            self._ball_info.velocity = velocity
            self._ball_info.detected = True
            self._ball_info.detection_stamp = time_stamp
            self._ball_info.disappeared = False
        else:
            self._ball_info.detected = False

            if self._ball_info.disappeared is False:
                # 座標を受け取らなかった場合は、速度を用いて線形予測する

                diff_time_stamp = Clock(
                    clock_type=ClockType.ROS_TIME).now() - Time.from_msg(
                        self._ball_info.detection_stamp)
                diff_time_secs = diff_time_stamp.nanoseconds / 1000000000

                self._ball_info.pose = self._estimate(
                    self._ball_info.last_detection_pose,
                    self._ball_info.velocity, diff_time_secs)

                # 一定時間、座標を受け取らなかったら消滅判定にする
                if diff_time_secs > self._DISAPPERED_TIME_THRESH:
                    self._ball_info.disappeared = True

        if self._PUBLISH_BALL:
            self._pub_ball_info.publish(self._ball_info)
Ejemplo n.º 29
0
 def clock_callback(self, msg):
     # Cache the last message in case a new clock is attached.
     time_from_msg = Time.from_msg(msg.clock)
     self._last_time_set = time_from_msg
     for clock in self._associated_clocks:
         clock.set_ros_time_override(time_from_msg)
Ejemplo n.º 30
0
    def main(self, *, args):  # noqa: D102
        if not os.path.exists(args.bag_file):
            return print_error("bag file '{}' does not exist!".format(
                args.bag_file))

        if not args.topic:
            args.topic = []

        reader = SequentialReader()
        in_storage_options = StorageOptions(uri=args.bag_file,
                                            storage_id=args.storage)
        in_converter_options = ConverterOptions(
            input_serialization_format=args.serialization_format,
            output_serialization_format=args.serialization_format)
        reader.open(in_storage_options, in_converter_options)

        info = Info()
        metadata = info.read_metadata(args.bag_file, args.storage)
        message_counts = {}
        for entry in metadata.topics_with_message_count:
            message_counts[entry.topic_metadata.name] = entry.message_count
        bag_duration_s = metadata.duration.total_seconds()

        type_name_to_type_map = {}
        topic_to_type_map = {}
        summaries = {}

        for topic_metadata in reader.get_all_topics_and_types():
            if args.topic and topic_metadata.name not in args.topic:
                continue
            if topic_metadata.type not in type_name_to_type_map:
                try:
                    type_name_to_type_map[topic_metadata.type] = get_message(
                        topic_metadata.type)
                except (AttributeError, ModuleNotFoundError, ValueError):
                    raise RuntimeError(
                        f"Cannot load message type '{topic_metadata.type}'")
            topic_to_type_map[topic_metadata.name] = type_name_to_type_map[
                topic_metadata.type]
            summaries[topic_metadata.name] = {
                'frame_ids': set(),
                'write_delays_ns': [],
                'custom': default_summary_output(topic_metadata.type)
            }

        reader.set_filter(StorageFilter(topics=args.topic))

        progress = ProgressTracker()
        if args.progress:
            progress.add_estimated_work(metadata, 1.0)

        while reader.has_next():
            (topic, data, t) = reader.read_next()
            msg_type = topic_to_type_map[topic]
            msg = deserialize_message(data, msg_type)
            for custom in summaries[topic]['custom']:
                custom.update(msg)
            if hasattr(msg, 'header'):
                summaries[topic]['frame_ids'].add(msg.header.frame_id)
                delay = t - Time.from_msg(msg.header.stamp).nanoseconds
                summaries[topic]['write_delays_ns'].append(delay)
            if args.progress:
                progress.print_update(progress.update(topic), every=100)

        if args.progress:
            progress.print_finish()

        for topic, summary in summaries.items():
            print(topic)
            if not message_counts[topic]:
                print('\tNo messages')
                continue
            frame_id_str = ', '.join(summary['frame_ids'])
            print(f'\tframe_id: {frame_id_str}')
            freq = message_counts[topic] / bag_duration_s
            print(f'\tfrequency: {freq:.2f} hz')
            if summary['write_delays_ns']:
                # only messages with header.stamp have delays
                write_delays = np.array(summary['write_delays_ns'])
                delay_ms_mean = np.mean(write_delays) / 1000 / 1000
                delay_ms_stddev = np.std(write_delays) / 1000 / 1000
                print(
                    f'\twrite delay: {delay_ms_mean:.2f}ms (stddev {delay_ms_stddev:.2f})'
                )
                for custom in summaries[topic]['custom']:
                    custom.write()