コード例 #1
0
ファイル: bag_to_clock.py プロジェクト: ganechean/exploration
def bag_to_clock(filename='clock.bag'):
    bagfile = Bag(filename)

    clock = []

    for topic, msg, timestamp in bagfile.read_messages():
        if topic == '/clock':
            clock.append([timestamp.to_sec(), msg.clock.to_sec()])

    return numpy.array(clock)
コード例 #2
0
def bag_to_power(filename='power.bag'):
    bagfile = Bag(filename)

    energies = {}

    power = []
    for topic, msg, timestamp in bagfile.read_messages():
        if topic.find('energy') != -1:
            energies[topic] = msg.data
            power.append([timestamp.to_sec(), sum(energies.values())])

    bagfile.close()

    return numpy.array(power)
コード例 #3
0
def bag_to_csv(filename):
  bagfile = Bag(filename)
  topics = init_topics(filename)
  
  start_time = bagfile.get_start_time()
  for topic, msg, timestamp in bagfile.read_messages(topics=topics.keys()):
    formatter = topics[topic]['formatter']
    data_line = formatter((topic,msg,timestamp,start_time))
    topics[topic]['file'].write(', '.join(map(str,data_line)) + '\n')

  for topic in topics.keys():
    topics[topic]['file'].close()

  bagfile.close()
コード例 #4
0
def bag_to_blobs(filename):
    bagfile = Bag(filename)

    blobs = []
    b_time = None
    curr_blobs = [None, None]
    poses = []
    start_time = bagfile.get_start_time()

    for topic, msg, timestamp in bagfile.read_messages():
        if topic[-9:] == 'blob_list':
            if len(msg.blobs) == 1:
                m_time = msg.header.stamp.to_sec()
                r_idx = int(topic[-20])
                blob = [msg.blobs[0].x, msg.blobs[0].y]
                #blobs.append([m_time,r_idx] + blob)

                if b_time is None:
                    b_time = m_time
                    curr_blobs[r_idx] = blob

                elif abs(b_time - m_time) < 0.001:
                    curr_blobs[r_idx] = blob
                    blobs.append([b_time] + curr_blobs[0] + curr_blobs[1])
                    b_time = None
                    curr_blobs = [None, None]

                else:
                    b_time = m_time
                    curr_blobs = [None, None]
                    curr_blobs[r_idx] = blob

        elif topic == '/tf':
            for tf_msg in msg.transforms:
                p_time = tf_msg.header.stamp.to_sec()
                base_frame = tf_msg.header.frame_id
                child_frame = tf_msg.child_frame_id
                pos = tf_msg.transform.translation
                pos = [pos.x, pos.y, pos.z]
                ori = tf_msg.transform.rotation
                ori = [ori.x, ori.y, ori.z, ori.w]
                poses.append([p_time, base_frame, child_frame, pos, ori])

    bagfile.close()

    return blobs, poses
コード例 #5
0
if __name__ == "__main__":
    parser = argparse.ArgumentParser(description="save data from bag file")
    parser.add_argument("--bag", required=True, help="Path to bag file")
    parser.add_argument("--output", default="bag_output", help="directory for data")
    parser.add_argument("--blob-topic", dest="blob_topic",
                        default="/gatlin/objectscreencoords",
                        help="topic for blobs")
    parser.add_argument("--image-topic", dest="image_topic",
                        default="/camera/rgb/image_rect_color_throttled",
                        help="topic for images")
    parser.add_argument("--factor", default=10**7, help="Factor for decimal digits")
    parser.add_argument("--interactive", action="store_true", help="Show all images to mark blocks")
    args = parser.parse_args()

    bag = Bag(args.bag)
    messages = bag.read_messages()

    # Convert bag data to correct format
    groups = defaultdict(lambda : defaultdict(lambda : defaultdict(list)))
    blobs_to_group_later = []
    for topic, msg, time in messages:
        if topic == args.blob_topic:
            data = [msg.x, msg.y]
            blobs_to_group_later.append((data, time))
        else:
            data = np.fromstring(msg.data, np.uint8)
            data = np.reshape(data, (msg.height, msg.width, 3))
            groups[time.secs][int(time.nsecs / args.factor)][args.image_topic].append(data)

    # Match blobs with images
    for data, time in blobs_to_group_later:
コード例 #6
0
class RosbagReader():
    def __init__(self, input_bag, save_dir):
        self.save_dir = save_dir
        Path.mkdir(self.save_dir, parents=True, exist_ok=True)
        self.bag = Bag(input_bag)

    def print_bag_info(self):
        info_dict = yaml.load(Bag(self.bag, 'r')._get_yaml_info())
        print(info_dict)

    def extract_camera_data(self):
        image_topic = '/camera_left/color/image_raw'
        bag_name = self.bag.filename.strip(".bag").split('/')[-1]
        output_dir = os.path.join(self.save_dir, bag_name, 'camera')
        if not os.path.exists(output_dir):
            os.makedirs(output_dir)

        bridge = CvBridge()
        count = 0
        for topic, msg, t in self.bag.read_messages(topics=[image_topic]):
            cv_img = bridge.imgmsg_to_cv2(msg, desired_encoding="passthrough")
            cv2.imwrite(os.path.join(output_dir, "frame%06i.png" % count),
                        cv_img)
            print("Wrote image {}".format(count))
            count += 1

    def extract_lidar_frames(self, extension, topicList=None):
        topic_name = '/front_lidar/velodyne_points'
        # bag_name = self.bag.filename.strip(".bag").split('/')[-1]
        # save_temp_dir = os.path.join(self.save_dir,bag_name)

        i = 0
        for topic, msg, t in self.bag.read_messages(topic_name):
            # for each instance in time that has data for topicName
            # parse data from this instance
            save_file = self.save_dir / 'frame_{}.{}'.format(i, extension)
            i += 1
            if topic == '/front_lidar/velodyne_points':
                pc_list = [[p[0], p[1], p[2], p[3]]
                           for p in pc2.read_points(msg,
                                                    skip_nans=True,
                                                    field_names=("x", "y", "z",
                                                                 "intensity"))]
            # print(np.array(pc_list, dtype=np.float32).shape)

    def read_bag(self, save_to_csv=True):
        """
        Return dict with all recorded messages with topics as keys
        Args:
            save_to_csv (bool, optional): Save data to csv files (one individual file per topic) if True
        Returns:
            dict: containing all published data points per topic
        """
        topics = self.readBagTopicList()
        messages = {}

        max_it_bag = 10
        it_bag = 0
        # iterate through topic, message, timestamp of published messages in bag
        for topic, msg, _ in self.bag.read_messages(topics=topics):

            if type(msg).__name__ == '_sensor_msgs__PointCloud2':
                points = np.zeros((msg.height * msg.width, 3))
                for pp, ii in zip(
                        pc2.read_points(msg,
                                        skip_nans=True,
                                        field_names=("x", "y", "z")),
                        range(points.shape[0])):
                    points[ii, :] = [pp[0], pp[1], pp[2]]
                msg = points

            if topic not in messages:
                messages[topic] = [msg]
            else:
                messages[topic].append(msg)

            it_bag += 1
            if it_bag >= max_it_bag:
                break  # stop -- too many topics

        return messages

    def readBagTopicList(self):
        """
        Read and save the initial topic list from bag
        """
        print("[OK] Reading topics in this bag. Can take a while..")
        topicList = []
        bag_info = yaml.load(self.bag._get_yaml_info(), Loader=yaml.FullLoader)
        for info in bag_info['topics']:
            topicList.append(info['topic'])

        print('{0} topics found'.format(len(topicList)))
        return topicList
コード例 #7
0
class RosbagReader():
    def __init__(self, input_bag, save_dir):
        self.save_dir = save_dir
        Path.mkdir(self.save_dir, parents=True, exist_ok=True)
        self.bag = Bag(input_bag)

    def print_bag_info(self):
        info_dict = yaml.load(Bag(self.bag, 'r')._get_yaml_info())
        print(info_dict)

    def extract_camera_data(self):
        image_topic = '/camera_left/color/image_raw'
        bag_name = self.bag.filename.strip(".bag").split('/')[-1]
        output_dir = os.path.join(self.save_dir, bag_name, 'camera')
        if not os.path.exists(output_dir):
            os.makedirs(output_dir)

        bridge = CvBridge()
        count = 0
        for topic, msg, t in self.bag.read_messages(topics=[image_topic]):
            cv_img = bridge.imgmsg_to_cv2(msg, desired_encoding="passthrough")
            cv2.imwrite(os.path.join(output_dir, "frame%06i.png" % count),
                        cv_img)
            print("Wrote image {}".format(count))
            count += 1

    def extract_lidar_frames(self, extension, topicList=None):
        topic_name = '/front_lidar/velodyne_points'
        # bag_name = self.bag.filename.strip(".bag").split('/')[-1]
        # save_temp_dir = os.path.join(self.save_dir,bag_name)

        i = 0
        for topic, msg, t in self.bag.read_messages(topic_name):
            # for each instance in time that has data for topicName
            # parse data from this instance
            save_file = self.save_dir / 'frame_{}.{}'.format(i, extension)
            i += 1
            if topic == '/front_lidar/velodyne_points':
                pc_list = [[p[0], p[1], p[2], p[3]]
                           for p in pc2.read_points(msg,
                                                    skip_nans=True,
                                                    field_names=("x", "y", "z",
                                                                 "intensity"))]
            # print(np.array(pc_list, dtype=np.float32).shape)

            if extension == 'pcd':
                p = pcl.PointCloud(np.array(pc_list, dtype=np.float32))
                p.to_file(save_file)
            elif extension == 'h5':
                save_h5_basic(save_file, np.array(pc_list, dtype=np.float32))
        print('---> Saved {} frames to files.'.format(i))

    def save_bag_to_csv(self, topicList=None):
        if not topicList:
            topicList = self.readBagTopicList()

        for topic_name in topicList:
            bag_name = self.bag.filename.strip(".bag").split('/')[-1]
            filename = self.save_dir + bag_name + topic_name.replace(
                '/', '-') + '.csv'

            # create csv file for topic data
            with open(filename, 'w+') as csvfile:
                print("Parsing data. This may take a while.")
                filewriter = csv.writer(csvfile, delimiter=',')
                flag = 1  # use flag to only write header once to csv file

                for topic, msg, t in self.bag.read_messages(topic_name):
                    # for each instance in time that has data for topicName
                    # parse data from this instance
                    row = [t]
                    if topic == '/front_lidar/velodyne_points':
                        header = [
                            "rosbagTimestamp", "x", "y", "z", "intensity",
                            "time"
                        ]
                        if flag:
                            filewriter.writerow(header)
                            flag = 0
                        for p in pc2.read_points(msg,
                                                 skip_nans=True,
                                                 field_names=("x", "y", "z",
                                                              "intensity",
                                                              "time")):
                            row += [p[0], p[1], p[2], p[3], p[4]]
                            filewriter.writerow(row)
                            row = [t]
                    else:
                        msg_fields = msg.fields
                        field_names = []
                        header = ['rosbagTimestamp']
                        for f in msg_fields:
                            field_names.append(f.name)
                            header.append(f.name)
                        if flag:
                            filewriter.writerow(header)
                            flag = 0
                        for p in pc2.read_points(
                                msg,
                                skip_nans=True,
                                field_names=tuple(field_names)):
                            row += list(p)
                            filewriter.writerow(row)
                            row = [t]

        print("Parsed data. Saved as {}".format(filename))

    def save_rwth_detections(self):
        """
        Saves Detections generated by RWTH Detector into separate csv files.
        """
        os.chdir(self.save_dir)
        topic_name = '/drow_detected_persons'
        bag_name = string.rstrip(self.bag.filename, ".bag")
        filename = self.save_dir + bag_name + string.replace(
            topic_name, '/', '-') + '.csv'

        # create csv file for topic data
        with open(filename, 'w+') as csvfile:
            print("Parsing data. This may take a while.")
            filewriter = csv.writer(csvfile, delimiter=',')

            header = [
                'rosbagTimestamp', 'detection_id', 'confidence', 'det_x',
                'det_y', 'det_z', 'height'
            ]
            filewriter.writerow(header)

            for _, msg, t in self.bag.read_messages(topic_name):
                for det in msg.detections:
                    row = [t]
                    row.append(det.detection_id)
                    row.append(det.confidence)
                    row.append(det.pose.pose.position.x)
                    row.append(det.pose.pose.position.y)
                    row.append(det.pose.pose.position.z)
                    row.append(det.height)
                    filewriter.writerow(row)
                    row = [t]
        print("Parsed data. Saved as {}".format(filename))

    def read_bag(self, save_to_csv=True):
        """
        Return dict with all recorded messages with topics as keys
        Args:
            save_to_csv (bool, optional): Save data to csv files (one individual file per topic) if True
        Returns:
            dict: containing all published data points per topic
        """
        topics = self.readBagTopicList()
        messages = {}

        max_it_bag = 10
        it_bag = 0
        # iterate through topic, message, timestamp of published messages in bag
        for topic, msg, _ in self.bag.read_messages(topics=topics):

            if type(msg).__name__ == '_sensor_msgs__PointCloud2':
                points = np.zeros((msg.height * msg.width, 3))
                for pp, ii in zip(
                        pc2.read_points(msg,
                                        skip_nans=True,
                                        field_names=("x", "y", "z")),
                        range(points.shape[0])):
                    points[ii, :] = [pp[0], pp[1], pp[2]]
                msg = points

            if topic not in messages:
                messages[topic] = [msg]
            else:
                messages[topic].append(msg)

            it_bag += 1
            if it_bag >= max_it_bag:
                break  # stop -- too many topics

        return messages

    def readBagTopicList(self):
        """
        Read and save the initial topic list from bag
        """
        print("[OK] Reading topics in this bag. Can take a while..")
        topicList = []
        bag_info = yaml.load(self.bag._get_yaml_info(), Loader=yaml.FullLoader)
        for info in bag_info['topics']:
            topicList.append(info['topic'])

        print('{0} topics found'.format(len(topicList)))
        return topicList