예제 #1
0
 def _assert_bag_valid(self,
                       filename,
                       topics=None,
                       start_time=None,
                       stop_time=None):
     '''
     Open the bagfile at the specified filename and read it to ensure topic limits were
     enforced and the optional topic list and start/stop times are also enforced.
     '''
     bag = Bag(filename)
     topics_dict = bag.get_type_and_topic_info()[1]
     bag_topics = set(topics_dict.keys())
     param_topics = set(self.topic_limits.keys())
     if topics:
         self.assertEqual(bag_topics, set(topics))
     self.assertTrue(bag_topics.issubset(param_topics))
     for topic in topics_dict:
         size = topics_dict[
             topic].message_count * 8  # Calculate stored message size as each message is 8 bytes
         count = topics_dict[topic].message_count
         gen = bag.read_messages(topics=topic)
         _, _, first_time = next(gen)
         last_time = first_time  # in case the next for loop does not execute
         if start_time:
             self.assertGreaterEqual(first_time, start_time)
         for _, _, last_time in gen:  # Read through all messages so last_time is valid
             pass
         if stop_time:
             self.assertLessEqual(last_time, stop_time)
         duration = last_time - first_time
         self._assert_limits_enforced(topic, duration, size, count)
예제 #2
0
def stream(input_file: Bag, output_file: Bag, topics: list) -> None:
    """
    Stream data from an input bag to an output bag.

    Args:
        input_file: the input bag to stream from
        output_file: the output bag to write data to
        topics: a list of the topics to include

    Returns:
        None

    """
    included = 0
    skipped = 0
    # create a progress bar for iterating over the messages in the bag
    with tqdm(total=input_file.get_message_count(), unit='message') as prog:
        # iterate over the messages in this input bag
        for topic, msg, time in input_file:
            # check for matches between the topics filter and this topic
            if any(fnmatchcase(topic, pattern) for pattern in topics):
                # write this message to the output bag
                output_file.write(topic, msg, time)
                # increment the counter of included messages
                included += 1
            else:
                # increment the counter of excluded messages
                skipped += 1
            # update the progress bar with a single iteration
            prog.update(1)
            # update the progress bar post fix text with statistics
            prog.set_postfix(included=included, skipped=skipped)
예제 #3
0
def load_trajectory_from_bag(file,fast=True,fast_only=False):
	traj = []
	fast_traj = []
	
	bag = Bag(file)
	
	normal_topics = ['raven_state', 'raven_state/array']
	normal_topics = normal_topics + ['/' + topic for topic in normal_topics]
	
	fast_topic = ['raven_state/1000Hz']
	#fast_topic = fast_topic + ['/' + topic for topic in fast_topic]
	
	if fast_only:
		for topic, msg, t in bag.read_messages(topics=fast_topic):
			fast_traj.append(msg)
		return get_trajectory(fast_traj)
	
	for topic, msg, t in bag.read_messages(topics=normal_topics):
		traj.append(msg)
	
	if fast:
		for topic, msg, t in bag.read_messages(topics=fast_topic):
			fast_traj.append(msg)
	
	if fast_traj:
		return get_trajectory(fast_traj,traj)
	else:
		return get_trajectory(traj)
예제 #4
0
        def incoming_ask(ask_msg):
            rospy.loginfo('Incoming ask:\n' + str(ask_msg))
            if ask_msg.model == self.model and ask_msg.token == self.token:
                rospy.loginfo('Incoming ask with right model and token')

                with TemporaryDirectory() as tmpdir:
                    rospy.logdebug('Temporary directory created: %s', tmpdir)
                    os.chdir(tmpdir)
                    rospy.loginfo('Waiting for objective file from IPFS...')
                    try:
                        self.ipfs.get(ask_msg.objective, timeout=60)
                        rospy.logdebug('Objective is written to %s', tmpdir + '/' + ask_msg.objective)
                        bag = Bag(str(tmpdir + '/' + ask_msg.objective))
                        msg_type = list(bag.get_type_and_topic_info()[1].values())[0][0]
                        if msg_type != Mission()._type:
                            rospy.logwarn('Wrong message type in objective topic')
                            return
                    except ipfsapi.exceptions.TimeoutError:
                        rospy.logwarn('Timeout of waiting objective file: ' + str(ask_msg.objective))
                        rospy.logwarn('Skip incoming ask')
                        return

                self.make_bid(ask_msg)
            else:
                rospy.logwarn('Incoming ask with wrong model and token, skip')
예제 #5
0
def main():

    parser = argparse.ArgumentParser(
        description=
        'Merge one or more bag files with the possibilities of filtering topics.'
    )
    parser.add_argument('outputbag',
                        help='Output bag file with topics merged.')
    parser.add_argument('inputbag', nargs='+', help='Input bag files.')
    parser.add_argument('-v',
                        '--verbose',
                        action="store_true",
                        default=False,
                        help='Verbose output.')
    parser.add_argument(
        '-t',
        '--topics',
        default="*",
        help=
        'String interpreted as a list of topics (wildcards \'*\' and \'?\' allowed) to include in the merged bag file.'
    )

    args = parser.parse_args()

    topics = args.topics.split(' ')

    total_included_count = 0
    total_skipped_count = 0

    if (args.verbose):
        print("Writing bag file: " + args.outputbag)
        print("Matching topics against patters: '%s'" % ' '.join(topics))

    with Bag(args.outputbag, 'w') as o:
        for ifile in args.inputbag:
            matchedtopics = []
            included_count = 0
            skipped_count = 0
            if (args.verbose):
                print("> Reading bag file: " + ifile)
            with Bag(ifile, 'r') as ib:
                for topic, msg, t in ib:
                    if any(fnmatchcase(topic, pattern) for pattern in topics):
                        if not topic in matchedtopics:
                            matchedtopics.append(topic)
                            if (args.verbose):
                                print("Including matched topic '%s'" % topic)
                        o.write(topic, msg, t)
                        included_count += 1
                    else:
                        skipped_count += 1
            total_included_count += included_count
            total_skipped_count += skipped_count
            if (args.verbose):
                print("< Included %d messages and skipped %d" %
                      (included_count, skipped_count))

    if (args.verbose):
        print("Total: Included %d messages and skipped %d" %
              (total_included_count, total_skipped_count))
예제 #6
0
def dump(input_file: Bag, topics: list, output_file: 'file' = None) -> None:
    """
    Dump messages from a bag.

    Args:
        input_file: the input bag to dump topics from
        topics: the topics to dump
        output_file: an optional file to dump to

    Returns:
        None

    """
    # create a progress bar for iterating over the messages in the bag
    with tqdm(total=input_file.get_message_count(
            topic_filters=topics)) as prog:
        # iterate over the messages in this input bag
        for topic, msg, _ in input_file.read_messages(topics=topics):
            # update the progress bar with a single iteration
            prog.update(1)
            # create the line to print
            line = '{} {}\n\n'.format(topic, msg)
            # print the line to the terminal
            print(line)
            # if there is an output file, write the line to it
            if output_file is not None:
                output_file.write(line)
def get_info(bag_file, topic_filter=None):
    bag = Bag(bag_file)
    topics = bag.get_type_and_topic_info().topics
    for topic in topics:
        if topic_filter and topics[topic].msg_type not in topic_filter:
            continue
        print("{}: {} Hz".format(topic, round(topics[topic].frequency, 3)))
        print(topics[topic].message_count)
        times = np.ones(shape=bag.get_message_count(topic_filters=topic))
        n = 0
        for _, msg, t in bag.read_messages(topics=topic):
            times[n] = msg.header.stamp.to_sec()
            n += 1
        times = 1 / np.gradient(times)
        times = times[np.where((times > np.percentile(times, 10)) & (times < np.percentile(times, 90)))]
        print("mean: {}, median: {}".format(np.mean(times), np.median(times)))
        print("min: {}, max: {}".format(np.min(times), np.max(times)))
        # plt.scatter(times, np.gradient(times))
        plt.hist(times)
        plt.yscale("log")
        plt.title("{}: {}".format(os.path.basename(bag_file), topic))
        if not os.path.exists("images"):
            os.makedirs("images")
        plt.savefig(os.path.join("images/", "{}.{}.png".format(os.path.basename(bag_file), topic.replace("/", ""))))
        plt.cla()
예제 #8
0
def load_trajectory_from_bag(file, fast=True, fast_only=False):
    traj = []
    fast_traj = []

    bag = Bag(file)

    normal_topics = ['raven_state', 'raven_state/array']
    normal_topics = normal_topics + ['/' + topic for topic in normal_topics]

    fast_topic = ['raven_state/1000Hz']
    #fast_topic = fast_topic + ['/' + topic for topic in fast_topic]

    if fast_only:
        for topic, msg, t in bag.read_messages(topics=fast_topic):
            fast_traj.append(msg)
        return get_trajectory(fast_traj)

    for topic, msg, t in bag.read_messages(topics=normal_topics):
        traj.append(msg)

    if fast:
        for topic, msg, t in bag.read_messages(topics=fast_topic):
            fast_traj.append(msg)

    if fast_traj:
        return get_trajectory(fast_traj, traj)
    else:
        return get_trajectory(traj)
예제 #9
0
def shift(
    input_file: Bag,
    output_file: Bag,
    shift_topic: list,
    shift_sec: float,
) -> None:
    """
    Shift a topic in a bag forward or backwards in time.

    Args:
        input_file: the input bag to stream from
        output_file: the output bag to write data to
        topic: the topic to shift in time
        shift_sec: the amount of time to shift by

    Returns:
        None

    """
    # create a progress bar for iterating over the messages in the bag
    with tqdm(total=input_file.get_message_count()) as prog:
        # iterate over the messages in this input bag
        for topic, msg, time in input_file:
            # if the topic is the sentinel topic, increase the count
            if topic == shift_topic:
                # adjust time by the shift value
                time += rospy.Duration(shift_sec)
                # update the message header with the new time stamp
                msg.header.stamp = time
            # update the progress bar with a single iteration
            prog.update(1)
            # write the message
            output_file.write(topic, msg, time)
예제 #10
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("--bag",
                        dest="bag",
                        help="Path to ROS bag.",
                        required=True)
    parser.add_argument("--left_image_0",
                        help="Path to 'left_image_00000.png'")
    parser.add_argument("--right_image_0",
                        help="Path to 'right_image_00000.png'")
    parser.add_argument("--start_time",
                        help="A start time to check",
                        type=float,
                        default=None)
    args = parser.parse_args()

    bridge = CvBridge()
    bag = Bag(args.bag)
    t_start = bag.get_start_time()
    n_msgs = 0

    if args.left_image_0 is None and args.right_image_0 is None:
        raise ValueError(
            "You must provide either left_image_0 or right_image_0")
    isLeft = args.left_image_0 is not None
    ref_img = args.left_image_0 if isLeft else args.right_image_0
    print("Using ref image %s" % ref_img)
    ref_image_0 = cv2.imread(ref_img)[:, :, 0]
    assert ref_image_0 is not None

    topic = '/davis/left/image_raw' if isLeft else '/davis/right/image_raw'
    if args.start_time:
        topic, msg, t = next(
            bag.read_messages(topics=[topic],
                              start_time=rospy.Time(args.start_time +
                                                    t_start)))
        width = msg.width
        height = msg.height
        image = np.asarray(bridge.imgmsg_to_cv2(msg, msg.encoding))
        image = np.reshape(image, (height, width))

        print("Correct? {}".format(bool(np.all(np.isclose(image,
                                                          ref_image_0)))))

    else:
        for topic, msg, t in bag.read_messages(topics=[topic]):
            n_msgs += 1
            width = msg.width
            height = msg.height
            image = np.asarray(bridge.imgmsg_to_cv2(msg, msg.encoding))
            image = np.reshape(image, (height, width))

            if bool(np.all(np.isclose(image, ref_image_0))):
                print("Ref image found at {} ({} from start)".format(
                    t.to_sec(),
                    t.to_sec() - t_start))
                return

        print("Processed {} images, ref not found!".format(n_msgs))
예제 #11
0
def write_bag(bag_origin, bag_write: rosbag.Bag, t_start):
    count = 0
    for topic, msg, t in bag_origin.read_messages("/tf", t_start):
        time_stamp = rospy.Time(0) + (t - t_start)
        msg.transforms[0].header.stamp = time_stamp
        bag_write.write(topic, msg, time_stamp)
        count += 1
    bag_write.close()
예제 #12
0
    def import_schema(self, schema, srv_options, options, restriction_type,
                      restricts):
        Bag = import_bag(srv_options)
        pcid_str = options.pop('pcid', srv_options.pop('pcid', 0))
        pcid = int(pcid_str)
        patch_column = options.pop('patch_column',
                                   srv_options.pop('patch_column', 'points'))
        patch_columns = options.pop('patch_columns', '*').strip()
        patch_columns = [
            col.strip() for col in patch_columns.split(',') if col.strip()
        ]
        filename = srv_options.pop('rosbag_path', "") + options.pop(
            'rosbag_path', "") + schema
        bag = Bag(filename, 'r')

        tablecols = []
        topics = bag.get_type_and_topic_info().topics
        pcid_for_topic = {k: pcid + 1 + i for i, k in enumerate(topics.keys())}
        pointcloud_formats = True
        if restriction_type is 'limit':
            topics = {k: v for k, v in topics.items() if k in restricts}
            pointcloud_formats = 'pointcloud_formats' in restricts
        elif restriction_type is 'except':
            topics = {k: v for k, v in topics.items() if k not in restricts}
            pointcloud_formats = 'pointcloud_formats' not in restricts

        tabledefs = []
        if pointcloud_formats:
            tablecols = [
                ColumnDefinition('pcid', type_name='integer'),
                ColumnDefinition('srid', type_name='integer'),
                ColumnDefinition('schema', type_name='text'),
                ColumnDefinition('format', type_name='text'),
                ColumnDefinition('rostype', type_name='text'),
                ColumnDefinition('columns', type_name='text[]'),
                ColumnDefinition('ply_header', type_name='text'),
            ]
            tableopts = {
                'metadata': 'true',
                'rosbag': schema,
                'pcid': pcid_str
            }
            tabledefs.append(
                TableDefinition("pointcloud_formats",
                                columns=tablecols,
                                options=tableopts))

        for topic, infos in topics.items():
            pcid = pcid_for_topic[topic]
            columns, _, _, _, _, _ = get_columns(bag, topic, infos, pcid,
                                                 patch_column, patch_columns)
            tablecols = [get_column_def(k, *v) for k, v in columns.items()]
            tableopts = {'topic': topic, 'rosbag': schema, 'pcid': str(pcid)}
            tabledefs.append(
                TableDefinition(topic, columns=tablecols, options=tableopts))
        return tabledefs
def merge():

	# Setting arguments
	parser = argparse.ArgumentParser(description='Merge one or more bag files with the possibilities of filtering topics.')
	parser.add_argument('outputbag', help='output bag file with topics merged')
	parser.add_argument('inputbag', nargs='+', help='input bag files')
	parser.add_argument('-v', '--verbose', action="store_true", default=False, help='verbose output')
	parser.add_argument('-t', '--topics', default="*", help='string interpreted as a list of topics (wildcards \'*\' and \'?\' allowed) to include in the merged/filtered bag file')
	parser.add_argument('-nt', '--new_topic', default=False, help='store all the potentially found topics with this "new_topic" name in the merged/filtered bag file')
	args = parser.parse_args()

	# Varialbe initialisation
	topics = args.topics.split(' ')
	total_included_count = 0
	total_skipped_count = 0

	if (args.verbose):
		print("Writing bag file: " + args.outputbag)
		print("Matching topics against patters: '%s'" % ' '.join(topics))

	# Start the process
	with Bag(args.outputbag, 'w') as o:
		for ifile in args.inputbag:
			matchedtopics = []
			included_count = 0
			skipped_count = 0

			if (args.verbose):
				print("> Reading bag file: " + ifile)

			# Read each potentially available bag file
			with Bag(ifile, 'r') as ib:
				for topic, msg, t in ib:
					if any(fnmatchcase(topic, pattern) for pattern in topics):
						if not topic in matchedtopics:
							matchedtopics.append(topic)
							if (args.verbose):
								print("Including matched topic '%s'" % topic)

						if args.new_topic: # If required, store with new topic name
							o.write(args.new_topic, msg, t)
						else: # otherwise, continue as it is
							o.write(topic, msg, t)
						included_count += 1
					else:
						skipped_count += 1

			total_included_count += included_count
			total_skipped_count += skipped_count
			if (args.verbose):
				print("< Included %d messages and skipped %d" % (included_count, skipped_count))

	if (args.verbose):
		print("Total: Included %d messages and skipped %d" % (total_included_count, total_skipped_count))
예제 #14
0
def RenameTopics(bag_file, location, collect_type, car_name):
    print("Renaming topics...")
    new_bag_file = bag_file + "_" + location + "_" + collect_type + "_" + car_name + ".bag"

    velodyne_name = '/vlp16_1/velodyne_points'
    new_velodyne_name = '/points_raw'

    with Bag(new_bag_file, 'w') as Y:
        for topic, msg, t in Bag(bag_file + ".bag"):
            if topic == velodyne_name:
                Y.write(new_velodyne_name, msg, t)
            else:
                Y.write(topic, msg, t)
예제 #15
0
def remap(input_bag_path, output_bag_path):
    """
    Remaps the topics of a single bag
    :param input_bag_path: the bag with the topic that should be renamed
    :param output_bag_path: the path where the output bag should be created
    """
    with Bag(output_bag_path, 'w') as output_bag:  # create output bag
        # write all topics into the output bag and rename the specified one:
        for topic, msg, t in Bag(input_bag_path):
            if topic == '/image_raw':
                output_bag.write('/camera/image_proc', msg, t)
            else:
                output_bag.write(topic, msg, t)
예제 #16
0
def decompress(
    input_file: Bag,
    output_file: Bag,
    camera_info: list,
    compressed_camera: list,
    raw_camera: list,
) -> None:
    """
    Decompress a compressed image into a raw image.

    Args:
        input_file: the input bag to stream from
        output_file: the output bag to write data to
        camera_info: a list of info topics for each compressed image topic
        compressed_camera: a list of compressed image topics
        raw_camera: a list of raw image topics to publish

    Returns:
        None

    """
    # get the dims for each compressed camera info topic
    dims = [get_camera_dimensions(input_file, info) for info in camera_info]
    # convert the list of dims to a dictionary of compressed topics to dims
    dims = dict(zip(compressed_camera, dims))
    # setup a dictionary mapping compressed topics to raw camera topics
    raw_camera = dict(zip(compressed_camera, raw_camera))
    # get the number of camera info topics to leave out
    camera_info_total = input_file.get_message_count(topic_filters=camera_info)
    # get the total number of output messages as total minus camera info topics
    total = input_file.get_message_count() - camera_info_total
    # create a progress bar for iterating over the messages in the output bag
    with tqdm(total=total) as prog:
        # iterate over the messages in this input bag
        for topic, msg, time in input_file:
            # don't write the camera info to the bag
            if topic in camera_info:
                continue
            # if the topic is the compress image topic, decompress the image
            if topic in compressed_camera:
                # get the image from the compressed topic
                img = get_camera_image(msg.data, dims[topic])
                # create a raw image message and replace the message
                msg = image_msg(img, time, dims[topic], 'rgb8')
                # reset the topic
                topic = raw_camera[topic]
            # update the progress bar with a single iteration
            prog.update(1)
            # write the message
            output_file.write(topic, msg, time)
예제 #17
0
def bag_to_video(
    input_file: Bag,
    output_file: str,
    topic: str,
    fps: float = 30,
    codec: str = 'MJPG',
) -> None:
    """
    Convert a ROS bag with image topic to a video file.

    Args:
        input_file: the bag file to get image data from
        output_file: the path to an output video file to create
        topic: the topic to read image data from
        fps: the frame rate of the video
        codec: the codec to use when outputting to the video file

    Returns:
        None

    """
    # create an empty reference for the output video file
    video = None
    # get the total number of frames to write
    total = input_file.get_message_count(topic_filters=topic)
    # get an iterator for the topic with the frame data
    iterator = input_file.read_messages(topics=topic)
    # iterate over the image messages of the given topic
    for _, msg, _ in tqdm(iterator, total=total):
        # open the video file if it isn't open
        if video is None:
            # create the video codec
            codec = cv2.VideoWriter_fourcc(*codec)
            # open the output video file
            cv_dims = (msg.width, msg.height)
            video = cv2.VideoWriter(output_file, codec, fps, cv_dims)
        # read the image data into a NumPy tensor
        img = get_camera_image(msg.data, (msg.height, msg.width))
        # write the image to the video file
        video.write(img)

    # if the video file is open, close it
    if video is not None:
        video.release()

    # read the start time of the video from the bag file
    start = input_file.get_start_time()
    # set the access and modification times for the video file
    os.utime(output_file, (start, start))
예제 #18
0
def create_objective(period: str, stream_id: str, email: str) -> str:
    ipfs = connect()
    tempdir = gettempdir()
    os.chdir(tempdir)

    with NamedTemporaryFile(delete=False) as f:
        bag = Bag(f.name, 'w')
        bag.write('/period', String(period))
        bag.write('/stream_id', String(stream_id))
        bag.write('/email', String(email))
        bag.close()

        res = ipfs.add(f.name)
        rospy.loginfo("Hash of the objective is {}".format(res['Hash']))
        return res['Hash']
예제 #19
0
def process_bag_file(file: Union[str, BinaryIO],
                     topic_to_cb: Mapping[str, Callable[[ROSMsgT], DataPoint]]) \
        -> DistTrace:
    """
    Process messages from bag files

    Notes
    -----
    Only the timestamp of messages from the same topic is guaranteed to be monotonically increasing.
    Be careful when merging messages from different topics.
    """
    ret_dist_trace = {topic: []
                      for topic in topic_to_cb.keys()
                      }  # type: Dict[str, List[StampedDataPoint]]
    with Bag(file) as bag:
        for topic, msg, t in bag.read_messages(topics=topic_to_cb.keys()):
            if hasattr(msg, "header"):
                t = msg.header.stamp  # Use sender's timestamp if available
            t_nsec = t.to_nsec()  # type: int
            quo, rem = divmod(t_nsec, (5**6))
            if t_nsec > MAX_PRECISE_INT or rem != 0:
                raise RuntimeWarning(
                    "Timestamp %s nano-sec may lose precision when converted to milli-sec"
                    "in floating point." % str(t))
            t_msec_f = float(quo) / (2**6)

            if topic in topic_to_cb:
                dp = topic_to_cb[topic](msg)
                ret_dist_trace[topic].append(StampedDataPoint(t_msec_f, dp))
            else:
                continue  # Ignore other topics

    return ret_dist_trace
예제 #20
0
def d8n_get_all_images_topic_bag(bag: rosbag.Bag, min_messages: int = 0) -> List[Tuple[str, type]]:
    """
    Returns the (name, type) of all topics that look like images
    and that have nonzero message count.
    """
    tat = bag.get_type_and_topic_info()
    consider_images = [
        "sensor_msgs/Image",
        "sensor_msgs/CompressedImage",
    ]
    all_types = set()
    found = []
    topics = tat.topics
    for t, v in list(topics.items()):
        msg_type = v.msg_type
        all_types.add(msg_type)
        message_count = v.message_count
        if msg_type in consider_images:

            # quick fix: ignore image_raw if we have image_compressed version
            if "raw" in t:
                other = t.replace("raw", "compressed")

                if other in topics:
                    continue

            if message_count < min_messages:
                # print('ignoring topic %r because message_count = 0' % t)
                continue

            found.append((t, msg_type))
    return found
예제 #21
0
def main():

    ifile = './mvsec_data_bags/outdoor_day2_events.bag'
    write_bag = Bag(ifile, 'w')
    print('write bag opened')

    ofile = './mvsec_data_bags/outdoor_day2_data.bag'
    read_bag = Bag(ofile, 'r')
    print('read bag opened')

    iteration = 1
    for topic, msg, t in read_bag:
        if ('events' in topic):
            write_bag.write(topic, msg, t)
            iteration = iteration + 1
            print(iteration, topic)
예제 #22
0
def get_start_stamp(bag: rosbag.Bag):
    pos_prev = []
    t_prev = 0
    count = 0
    t_start = 0
    for topic, msg, t in bag.read_messages('/tf'):
        if msg.transforms[0].child_frame_id == "Puck":
            pos = np.array([
                msg.transforms[0].transform.translation.x,
                msg.transforms[0].transform.translation.y,
                msg.transforms[0].transform.translation.z
            ])
            t_sec = msg.transforms[0].header.stamp.to_sec()
            if t_prev != 0:
                vel = np.linalg.norm((pos - pos_prev) / (t_sec - t_prev))
                if vel > 0.1:
                    if count == 0:
                        t_start = t
                    count += 1
                    if count > 10:
                        return t_start
                else:
                    count = 0
            pos_prev = pos
            t_prev = t_sec
예제 #23
0
def ipfs_download(multihash: Multihash) -> (dict, Bag):
    rospy.wait_for_service('/ipfs/get_file')
    download = rospy.ServiceProxy('/ipfs/get_file', IpfsDownloadFile)
    tmpfile = NamedTemporaryFile(delete=False)
    res = download(multihash, Filepath(tmpfile.name))
    tmpfile.close()
    if not res.success:
        raise Exception(res.error_msg)
    messages = {}
    bag = Bag(tmpfile.name, 'r')
    for topic, msg, timestamp in bag.read_messages():
        if topic not in messages:
            messages[topic] = [msg]
        else:
            messages[topic].append(msg)
    os.unlink(tmpfile.name)
    return (messages, bag)
예제 #24
0
    def import_schema(self, schema, srv_options, options,
                      restriction_type, restricts):
        Bag = import_bag(srv_options)
        pcid_str = options.pop('pcid', srv_options.pop('pcid', 0))
        pcid = int(pcid_str)
        patch_column = options.pop('patch_column', srv_options.pop('patch_column', 'points'))
        patch_columns = options.pop('patch_columns', '*').strip()
        patch_columns = [col.strip() for col in patch_columns.split(',') if col.strip()]
        filename = srv_options.pop('rosbag_path', "") + options.pop('rosbag_path', "") + schema
        bag = Bag(filename, 'r')

        tablecols = []
        topics = bag.get_type_and_topic_info().topics
        pcid_for_topic = {k: pcid+1+i for i, k in enumerate(topics.keys())}
        pointcloud_formats = True
        if restriction_type is 'limit':
            topics = {k: v for k, v in topics.items() if k in restricts}
            pointcloud_formats = 'pointcloud_formats' in restricts
        elif restriction_type is 'except':
            topics = {k: v for k, v in topics.items() if k not in restricts}
            pointcloud_formats = 'pointcloud_formats' not in restricts

        tabledefs = []
        if pointcloud_formats:
            tablecols = [
                ColumnDefinition('pcid', type_name='integer'),
                ColumnDefinition('srid', type_name='integer'),
                ColumnDefinition('schema', type_name='text'),
                ColumnDefinition('format', type_name='text'),
                ColumnDefinition('rostype', type_name='text'),
                ColumnDefinition('columns', type_name='text[]'),
                ColumnDefinition('ply_header', type_name='text'),
            ]
            tableopts = {'metadata': 'true', 'rosbag': schema, 'pcid': pcid_str}
            tabledefs.append(TableDefinition("pointcloud_formats", columns=tablecols,
                                             options=tableopts))

        for topic, infos in topics.items():
            pcid = pcid_for_topic[topic]
            columns, _, _, _, _, _ = get_columns(bag, topic, infos, pcid,
                                                 patch_column, patch_columns)
            tablecols = [get_column_def(k, *v) for k, v in columns.items()]
            tableopts = {'topic': topic, 'rosbag': schema, 'pcid': str(pcid)}
            tabledefs.append(TableDefinition(topic, columns=tablecols, options=tableopts))
        return tabledefs
예제 #25
0
def writepc2frombag2file(input_bagfile, pc2_topic, out_file_path,start_time=None, end_time=None, msg_count=None):
    try:
        output_file_name = get_output_file_name(input_bagfile) + '_xyz.txt'
        if out_file_path.endswith('/'):
            output_file = out_file_path + output_file_name
        else:
            output_file = out_file_path+'/' +  output_file_name
        
        output_file_fh = open(output_file,'w')
        print('writing output_file at', output_file)

        # check start and end time condition 
        if start_time is not None and end_time is not None:
            assert (end_time - start_time) > 0 , "end_time should be higher than start time"

        if msg_count is None:
            use_msg_count = False
        else:
            use_msg_count = True
            # msg_count is already static casted in arg parse, just checking again for sanity ad if function is re used individially
            msg_count = int(msg_count)
            assert msg_count > 0, "should have positive msg_count"
        
        input_bag = Bag(input_bagfile,'r')
        print('bag load success')
        
        count = 0;
        for topic, msg, ts in input_bag.read_messages(topics=[pc2_topic],start_time=start_time, end_time=end_time):

            output_file_fh.write('msg_timestamp: %f\n' % msg.header.stamp.to_sec())
            output_file_fh.write('%d\n' %msg.width)
            for data_pts in pc2.read_points(msg, field_names=("x", "y", "z", "intensity"), skip_nans=True):
                output_file_fh.write('%f %f %f %d\n' % (data_pts[0], data_pts[1], data_pts[2], data_pts[3]))
            
            count +=1
            print(count,msg_count)         
            if use_msg_count and count >= msg_count:
                break

        input_bag.close()
        output_file_fh.close()

    except Exception as e_init:
        print(traceback.format_exc(e_init))
예제 #26
0
    def _read_info(self):
        """Read some metadata from inside this rosbag."""
        from rosbag import Bag, ROSBagUnindexedException, ROSBagException
        try:
            b = Bag(self.path)
        except ROSBagUnindexedException:
            b = Bag(self.path, allow_unindexed=True)
            print('Reindexing', self.filename)
            b.reindex()

        try:
            duration = b.get_end_time() - b.get_start_time()
        except ROSBagException:
            duration = 0
        return {
            'n_messages': b.get_message_count(),
            'duration': duration,
            'topic_list': b.get_type_and_topic_info()[1].keys()
        }
예제 #27
0
def clip(
    input_file: Bag,
    output_file: Bag,
    sentinel_topic: list,
    start: int,
    stop: int,
) -> None:
    """
    Clip a bag using a range of messages for a given topic.

    Args:
        input_file: the input bag to stream from
        output_file: the output bag to write data to
        sentinel_topic: the topic to watch for starting and stopping the clip
        start: the starting message for the clip
        stop: the stopping message for the clip

    Returns:
        None

    """
    # initialize a counter for the sentinel topic messages
    count = 0
    # create a progress bar for iterating over the messages in the bag
    with tqdm(total=stop - start, unit='sentinel') as prog:
        # iterate over the messages in this input bag
        for topic, msg, time in input_file:
            # if the topic is the sentinel topic, increase the count
            if topic == sentinel_topic:
                count += 1
            # if the count hasn't reached the starting point, continue
            if count < start:
                continue
            # if the count has exceeded the stopping point, break
            if count >= stop:
                break
            # update the progress bar with a single iteration
            if topic == sentinel_topic:
                prog.update(1)
            # write the message
            output_file.write(topic, msg, time)
예제 #28
0
def ipfs_download(multihash):
    rospy.wait_for_service('/liability/infochan/ipfs/get_file')
    download = rospy.ServiceProxy('/liability/infochan/ipfs/get_file',
                                  IpfsDownloadFile)
    with NamedTemporaryFile(delete=False) as tmpfile:
        res = download(multihash, Filepath(tmpfile.name))
        if not res.success:
            raise Exception(res.error_msg)
        messages = {}
        for topic, msg, timestamp in Bag(tmpfile.name, 'r').read_messages():
            messages[topic] = msg
        return messages
예제 #29
0
    def _get_objective(self, lia: Liability) -> dict:
        mhash = lia.objective.multihash
        tempdir = gettempdir()
        os.chdir(tempdir)

        # temp_obj = NamedTemporaryFile(delete=False)
        self.ipfs.get(mhash)

        messages = {}
        for topic, msg, timestamp in Bag(mhash, 'r').read_messages():
            messages[topic] = msg
        return messages
예제 #30
0
def extract_depth(
    bag_file: Bag,
    rgb_directory: str,
    camera_info: str,
    depth: str
) -> None:
    """
    Extract depth data from a bag file.

    Args:
        bag_file: the bag file to play
        rgb_directory: the directory to find RGB image to match depths to
        camera_info: the topic to use to read metadata about the camera
        depth: the topic to use to read 32-bit floating point depth measures

    Returns:
        None

    """
    # extract the camera dimensions from the bag
    dims = get_camera_dimensions(bag, camera_info)
    # get the images from a glob
    images = glob.glob(os.path.join(rgb_directory, 'X', 'data', '*.png'))
    # convert the images to numbers
    path_to_int = lambda x: int(os.path.basename(x).replace('.png', ''))
    images = sorted([path_to_int(image) for image in images])
    # create the output directory
    output_dir = os.path.join(rgb_directory, 'D', 'data')
    try:
        os.makedirs(output_dir)
    except FileExistsError:
        pass
    # iterate over the messages
    progress = tqdm(total=len(images))
    for _, msg, time in bag_file.read_messages(topics=depth):
        # if there are no more images left, break out of the loop
        if not len(images):
            break
        # if the time is less than the current image, continue
        if int(str(time)) < images[0]:
            continue
        # update the progress bar
        progress.update(1)
        # get the depth image
        img = get_depth_image(msg.data, dims, as_rgb=False)
        # save the depth image to disk
        output_file = os.path.join(output_dir, '{}-{}.npz'.format(images[0], time))
        np.savez_compressed(output_file, y=img)
        # remove the first item from the list of times
        images.pop(0)
    # close the progress bar
    progress.close()
def ipfs_download(multihash):
    tempdir = gettempdir()
    os.chdir(tempdir)

    temp_obj = NamedTemporaryFile(delete=False)

    res = ipfs_download_file(connect(), multihash.multihash, temp_obj.name)
    if not res:
        raise Exception("Can't download objective")
    messages = {}
    for topic, msg, timestamp in Bag(temp_obj.name, 'r').read_messages():
        messages[topic] = msg
    return messages
예제 #32
0
def pinata_rosbag(ipfs_hash: str) -> str:

    data = requests.get(f'https://gateway.pinata.cloud/ipfs/{ipfs_hash}')
    rospy.loginfo(data.status_code)
    if data.status_code != 200:
        rospy.loginfo(f'Error downloading rosbag! {data.content}')
        return

    else:
        tmpfile = NamedTemporaryFile(delete=False)
        tmpfile.write(data.content)
        bag = Bag(tmpfile.name)

        messages = {}
        for topic, msg, timestamp in bag.read_messages():
            if topic not in messages:
                messages[topic] = [msg]
            else:
                messages[topic].append(msg)

        os.unlink(tmpfile.name)
    return (messages, bag)
예제 #33
0
    def __init__(self, options, columns=None):
        super(Rosbag, self).__init__(options, columns)
        Bag = import_bag(options)
        self.filename = options.pop('rosbag_path', "") + options.pop('rosbag')
        self.topic = options.pop('topic', None)
        pointcloud_formats = strtobool(options.pop('metadata', 'false'))

        self.patch_column = options.pop('patch_column', 'points').strip()
        self.patch_columns = options.pop('patch_columns', '*').strip()
        self.patch_columns = [col.strip() for col in self.patch_columns.split(',') if col.strip()]

        self.patch_count_default = int(options.pop('patch_count_default', 1000))
        # 0 => 1 patch per message
        self.patch_count_pointcloud = int(options.pop('patch_count_pointcloud', 0))
        assert(self.patch_count_default > 0)
        assert(self.patch_count_pointcloud >= 0)
        self.pcid = int(options.pop('pcid', 0))
        self.bag = Bag(self.filename, 'r')
        self.topics = self.bag.get_type_and_topic_info().topics
        self.pointcloud_formats = None

        if pointcloud_formats:
            self.pointcloud_formats = []
            topics = self.topic.split(',') if self.topic else self.topics
            for i, topic in enumerate(self.topics):
                if topic not in topics:
                    continue
                infos = self.topics[topic]
                columns, patch_schema, patch_ply_header, _, patch_columns, patch_srid = \
                    get_columns(self.bag, topic, infos, self.pcid+i+1, self.patch_column,
                                self.patch_columns)
                self.pointcloud_formats.append({
                    'pcid': self.pcid+i+1,
                    'srid': patch_srid,
                    'schema': patch_schema,
                    'format': columns[self.patch_column][3],
                    'rostype': infos.msg_type,
                    'columns': patch_columns,
                    'ply_header': patch_ply_header,
                })
            return

        if not self.topic:
            log_to_postgres('"topic" option is required', ERROR)

        self.infos = self.topics[self.topic]
        (self.columns, self.patch_schema, self.patch_ply_header, self.endianness,
         self.patch_columns, self.patch_srid) = \
            get_columns(self.bag, self.topic, self.infos, self.pcid, self.patch_column,
                        self.patch_columns)

        if columns:
            missing = set(columns) - set(self.columns.keys())
            columns = list(c for c in self.columns.keys() if c in columns)
            if missing:
                missing = ", ".join(sorted(missing))
                support = ", ".join(sorted(self.columns.keys()))
                log_to_postgres(
                    "extra unsupported columns : {}".format(missing), WARNING,
                    hint="supported columns : {}".format(support))
            self.columns = {col: self.columns[col] for col in columns}

        if options:
            log_to_postgres("extra unsupported options : {}".format(
                options.keys()), WARNING)
예제 #34
0
class Rosbag(ForeignDataWrapper):
    def __init__(self, options, columns=None):
        super(Rosbag, self).__init__(options, columns)
        Bag = import_bag(options)
        self.filename = options.pop('rosbag_path', "") + options.pop('rosbag')
        self.topic = options.pop('topic', None)
        pointcloud_formats = strtobool(options.pop('metadata', 'false'))

        self.patch_column = options.pop('patch_column', 'points').strip()
        self.patch_columns = options.pop('patch_columns', '*').strip()
        self.patch_columns = [col.strip() for col in self.patch_columns.split(',') if col.strip()]

        self.patch_count_default = int(options.pop('patch_count_default', 1000))
        # 0 => 1 patch per message
        self.patch_count_pointcloud = int(options.pop('patch_count_pointcloud', 0))
        assert(self.patch_count_default > 0)
        assert(self.patch_count_pointcloud >= 0)
        self.pcid = int(options.pop('pcid', 0))
        self.bag = Bag(self.filename, 'r')
        self.topics = self.bag.get_type_and_topic_info().topics
        self.pointcloud_formats = None

        if pointcloud_formats:
            self.pointcloud_formats = []
            topics = self.topic.split(',') if self.topic else self.topics
            for i, topic in enumerate(self.topics):
                if topic not in topics:
                    continue
                infos = self.topics[topic]
                columns, patch_schema, patch_ply_header, _, patch_columns, patch_srid = \
                    get_columns(self.bag, topic, infos, self.pcid+i+1, self.patch_column,
                                self.patch_columns)
                self.pointcloud_formats.append({
                    'pcid': self.pcid+i+1,
                    'srid': patch_srid,
                    'schema': patch_schema,
                    'format': columns[self.patch_column][3],
                    'rostype': infos.msg_type,
                    'columns': patch_columns,
                    'ply_header': patch_ply_header,
                })
            return

        if not self.topic:
            log_to_postgres('"topic" option is required', ERROR)

        self.infos = self.topics[self.topic]
        (self.columns, self.patch_schema, self.patch_ply_header, self.endianness,
         self.patch_columns, self.patch_srid) = \
            get_columns(self.bag, self.topic, self.infos, self.pcid, self.patch_column,
                        self.patch_columns)

        if columns:
            missing = set(columns) - set(self.columns.keys())
            columns = list(c for c in self.columns.keys() if c in columns)
            if missing:
                missing = ", ".join(sorted(missing))
                support = ", ".join(sorted(self.columns.keys()))
                log_to_postgres(
                    "extra unsupported columns : {}".format(missing), WARNING,
                    hint="supported columns : {}".format(support))
            self.columns = {col: self.columns[col] for col in columns}

        if options:
            log_to_postgres("extra unsupported options : {}".format(
                options.keys()), WARNING)

    @classmethod
    def import_schema(self, schema, srv_options, options,
                      restriction_type, restricts):
        Bag = import_bag(srv_options)
        pcid_str = options.pop('pcid', srv_options.pop('pcid', 0))
        pcid = int(pcid_str)
        patch_column = options.pop('patch_column', srv_options.pop('patch_column', 'points'))
        patch_columns = options.pop('patch_columns', '*').strip()
        patch_columns = [col.strip() for col in patch_columns.split(',') if col.strip()]
        filename = srv_options.pop('rosbag_path', "") + options.pop('rosbag_path', "") + schema
        bag = Bag(filename, 'r')

        tablecols = []
        topics = bag.get_type_and_topic_info().topics
        pcid_for_topic = {k: pcid+1+i for i, k in enumerate(topics.keys())}
        pointcloud_formats = True
        if restriction_type is 'limit':
            topics = {k: v for k, v in topics.items() if k in restricts}
            pointcloud_formats = 'pointcloud_formats' in restricts
        elif restriction_type is 'except':
            topics = {k: v for k, v in topics.items() if k not in restricts}
            pointcloud_formats = 'pointcloud_formats' not in restricts

        tabledefs = []
        if pointcloud_formats:
            tablecols = [
                ColumnDefinition('pcid', type_name='integer'),
                ColumnDefinition('srid', type_name='integer'),
                ColumnDefinition('schema', type_name='text'),
                ColumnDefinition('format', type_name='text'),
                ColumnDefinition('rostype', type_name='text'),
                ColumnDefinition('columns', type_name='text[]'),
                ColumnDefinition('ply_header', type_name='text'),
            ]
            tableopts = {'metadata': 'true', 'rosbag': schema, 'pcid': pcid_str}
            tabledefs.append(TableDefinition("pointcloud_formats", columns=tablecols,
                                             options=tableopts))

        for topic, infos in topics.items():
            pcid = pcid_for_topic[topic]
            columns, _, _, _, _, _ = get_columns(bag, topic, infos, pcid,
                                                 patch_column, patch_columns)
            tablecols = [get_column_def(k, *v) for k, v in columns.items()]
            tableopts = {'topic': topic, 'rosbag': schema, 'pcid': str(pcid)}
            tabledefs.append(TableDefinition(topic, columns=tablecols, options=tableopts))
        return tabledefs

    def execute(self, quals, columns):
        if self.pointcloud_formats is not None:
            for f in self.pointcloud_formats:
                yield f
            return
        self.patch_data = ''
        from rospy.rostime import Time
        tmin = None
        tmax = None
        for qual in quals:
            if qual.field_name == "time":
                t = int(qual.value)
                t = Time(t / 1000000000, t % 1000000000)
                if qual.operator in ['=', '>', '>=']:
                    tmin = t
                if qual.operator in ['=', '<', '<=']:
                    tmax = t
        for topic, msg, t in self.bag.read_messages(
                topics=self.topic, start_time=tmin, end_time=tmax):
            for row in self.get_rows(topic, msg, t, columns):
                yield row

        # flush leftover patch data
        if self.patch_data and self.last_row:
            count = int((len(self.patch_data) / self.point_size))
            # in replicating mode, a single leftover point must not be reported
            if count > 1 or self.patch_step_size == self.patch_size:
                res = self.last_row
                if self.patch_column in columns:
                    res[self.patch_column] = hexlify(
                            pack('=b3I', self.endianness, self.pcid, 0, count) + self.patch_data)
                if self.patch_ply_header and 'ply' in columns:
                    self.ply_info['count'] = count
                    res['ply'] = self.patch_ply_header.format(**self.ply_info) + self.patch_data
                yield res

    def get_rows(self, topic, msg, t, columns, toplevel=True):
        if toplevel and len(msg.__slots__) == 1:
            attr = getattr(msg, msg.__slots__[0])
            if isinstance(attr, list):
                for msg in attr:
                    for row in self.get_rows(topic, msg, t, columns, False):
                        yield row
                return
        res = {}
        data_columns = set(columns)
        if self.patch_column in columns:
            data_columns = data_columns.union(self.patch_columns) - set([self.patch_column])
        if "filename" in data_columns:
            res["filename"] = self.filename
        if "topic" in data_columns:
            res["topic"] = topic
        if "time" in data_columns:
            res["time"] = t.to_nsec()
        if self.infos.msg_type == 'sensor_msgs/PointCloud2':
            self.patch_count = self.patch_count_pointcloud or (msg.width*msg.height)
            self.point_size = msg.point_step
            self.patch_size = self.patch_count * self.point_size
            self.patch_step_size = self.patch_size
            self.endianness = 0 if msg.is_bigendian else 1
            data_columns = data_columns - set(['ply', self.patch_column])
            self.patch_data += msg.data

        data_columns = data_columns - set(res.keys())
        for column in data_columns:
            attr = msg
            for col in column.split('.'):
                if isinstance(attr, list):
                    attr = tuple(getattr(a, col) for a in attr)
                else:
                    attr = getattr(attr, col)
            if hasattr(attr, "to_nsec"):
                attr = attr.to_nsec()
            elif hasattr(attr, "x"):
                if hasattr(attr, "w"):
                    attr = (attr.x, attr.y, attr.z, attr.w)
                else:
                    attr = (attr.x, attr.y, attr.z)
            elif isinstance(attr, str):
                fmt = self.columns[column][3]
                if fmt:
                    attr = unpack(fmt, attr)
            res[column] = attr

        if self.patch_column in columns and not self.infos.msg_type == 'sensor_msgs/PointCloud2':
            fmt = self.columns[self.patch_column][3]
            self.patch_count = self.patch_count_default
            self.point_size = calcsize(fmt)
            self.patch_size = self.patch_count * self.point_size
            self.patch_step_size = self.patch_size - self.point_size
            self.patch_data += get_point_data(res, self.patch_columns, fmt)
            res = {k: v for k, v in res.items() if k not in self.patch_columns}

        if not self.patch_data:
            yield res
        else:
            # todo: ensure current res and previous res are equal if there is some leftover
            # patch_data
            while len(self.patch_data) >= self.patch_size:
                data = self.patch_data[0:self.patch_size]
                count = int(self.patch_size / self.point_size)
                res[self.patch_column] = hexlify(
                        pack('=b3I', self.endianness, self.pcid, 0, count) + data)
                if self.patch_ply_header and 'ply' in columns:
                    self.ply_info = {
                        'endianness': 'big' if self.endianness else 'little',
                        'filename': self.filename,
                        'topic': self.topic,
                        'count': count
                    }
                    res['ply'] = self.patch_ply_header.format(**self.ply_info) + data
                self.patch_data = self.patch_data[self.patch_step_size:]
                yield res
            self.last_row = res
	def on_enter(self, userdata):
		self._failed = False
		try:
			# Initialization
		
			# userdata
			self._bag_filename = userdata.bag_filename
			self._trajectories_command = userdata.trajectories_command
			
			# joint definitions
			l_arm_range = range(16,23);
			r_arm_range = range(23,30);
			atlasJointNames = [
			    'back_bkz', 'back_bky', 'back_bkx', 'neck_ry',
			    'l_leg_hpz', 'l_leg_hpx', 'l_leg_hpy', 'l_leg_kny', 'l_leg_aky', 'l_leg_akx',
			    'r_leg_hpz', 'r_leg_hpx', 'r_leg_hpy', 'r_leg_kny', 'r_leg_aky', 'r_leg_akx',
			    'l_arm_shz', 'l_arm_shx', 'l_arm_ely', 'l_arm_elx', 'l_arm_wry', 'l_arm_wrx', 'l_arm_wry2',
			    'r_arm_shz', 'r_arm_shx', 'r_arm_ely', 'r_arm_elx', 'r_arm_wry', 'r_arm_wrx', 'r_arm_wry2']
			joint_position_cmd = [0]*len(l_arm_range)
			# starting and stopping time of measurement data to be taken into account for cailbration
			time_start = 0
			time_end = 0
			time_tf = 0
			
			# gravity vector in world frame
			g_world = np.resize(np.array([0,0,-9.81]),(3,1))
		
			# take this number of data points for each pose.
			max_data_points_per_pose = 100
			
			# rotation of the force torque sensor in world frame (quaternions)
			ft_rotation = [0,0,0,0]
			
			# loop over all ft sensors to calibrate
			
			for chain in self._calibration_chain: 
				tf_chain=['/pelvis', 'ltorso', 'mtorso', 'utorso']
				if chain == 'left_arm':
					joint_range = l_arm_range
					tfname = 'l_hand'
					tf_chain.extend(['l_clav', 'l_scap', 'l_uarm', 'l_larm', 'l_ufarm', 'l_lfarm', 'l_hand'])
				elif chain == 'right_arm':
					joint_range = r_arm_range
					tf_chain.extend(['r_clav', 'r_scap', 'r_uarm', 'r_larm', 'r_ufarm', 'r_lfarm', 'r_hand'])
					
					tfname = 'r_hand'
				else:
					Logger.logwarn('CalculateForceTorqueCalibration: Undefined chain %s', chain)

				# initialize transformations
				tf_data = [] # frame to frame transformation with numerical indexes from the tf_chain
				tf_data_cum = [] # world to frame transformation with numerical indexes from the tf_chain

				for i in range(len(tf_chain)):
					tf_data.append([0,0,0,0])
					tf_data_cum.append([0,0,0,0])
									
				# get number of poses from the commanded trajectories. 2 Points per Pose
				number_of_poses = len(self._trajectories_command[chain].points) / 2
				# define information matrix for calibration
				InfMat = np.zeros((6*max_data_points_per_pose*number_of_poses, 10))
				InfMat_i = 0 # Index in this Matrix
				# definie measurement vector for calibration
				MeasVec = np.zeros((6*max_data_points_per_pose*number_of_poses, 1))
				
				# commanded joint trajectory for current chain
				# all commanded positions in this trajectory
				current_traj_cmd = self._trajectories_command[chain] 
				
				# flag if the time interval was already set:
				timesetflag = False
				
				point_index_cmd = 0 # start with index 0
				transformation_available = False
				
				# read time series from bag file
				# check in desired input trajectory, at which time a position is reached
				# take the data in a defined time period after the commanded new position
				bag_from_robot = Bag(os.path.expanduser(self._bag_filename)) # open bag file
				Logger.loginfo('CalculateForceTorqueCalibration: Calibrate %s from %s. Using %d different positions from trajectory command. Expecting 2 commanded positions per pose' % (chain, self._bag_filename, len(current_traj_cmd.points)) )
				for topic, msg, t in bag_from_robot.read_messages(topics=['/flor/controller/atlas_state', '/tf', '/flor/controller/joint_command']):
					# Check if all desired poses have been reached
					if point_index_cmd > len(current_traj_cmd.points)-1:
						break
					current_position_cmd = current_traj_cmd.points[point_index_cmd].positions
					####################################################
					# Check tf message: remember last transformation from world to ft sensor
					if topic == '/tf':
			  			# loop over all transformations inside this tf message
						for j in range(len(msg.transforms)):
							data = msg.transforms[j]
							tr = data.transform # current transformation
							header = data.header
							# check if frame matches on of the saved frames
							for i in range(len(tf_chain)):
								if data.child_frame_id == tf_chain[i]:
									tf_data[i] = np.array([tr.rotation.x, tr.rotation.y, tr.rotation.z, tr.rotation.w])

						# calculate the newest transformation to the force torque sensor
						tf_data_cum[0] = tf_data[0]
						for i in range(1,len(tf_chain)):
							tf_data_cum[i] = q = tf.transformations.quaternion_multiply(tf_data_cum[i-1], tf_data[i])
							if i == len(tf_chain)-1:
								time_tf = msg.transforms[0].header.stamp.to_sec()
						# nothing to do with tf message
						# check if all transformations are available. (up to the last link)
						if np.any(tf_data_cum[-1]): # real part of quaternion unequal to zero: data exists
							transformation_available = True
						continue
					else:
						time_msg = msg.header.stamp.to_sec()
					
					####################################################
					# check if timestamp is interesting. Then check the 
					if not (time_msg > time_start and time_msg < time_end) and not timesetflag:
						# the timestamp is not in evaluation interval. Look for reaching of the pose
						# record data (see below)
						if topic == '/flor/controller/joint_command':
				  			# get the commanded position
							for i in range(len(joint_range)):
								osrf_ndx = joint_range[i]
								joint_position_cmd[i] = msg.position[osrf_ndx]
	
				  			# Check if position command matches the currently expected commanded position
				  			pos_reached = True

				  			for i in range(len(joint_range)):
				  				if abs(joint_position_cmd[i]-current_position_cmd[i]) > 0.001:
				  					pos_reached = False
				  					break
		  					if pos_reached:
			  					# end time of the values regarded for calibration data: take the latest time possible  for this pose
			  					time_end = msg.header.stamp.to_sec() + self._settlingtime
			  					# starting time for calibration: Take 100ms
			  					time_start = time_end - 0.1
			  					data_points_for_this_pose = 0
			  					timesetflag = True
			  					# take the next point next time. Each pose consists of two trajectory points (one for reaching, one for settling).
			  					# take the second one
			  					point_index_cmd = point_index_cmd + 2
				  			if not pos_reached:
				  				# The commanded position has not been reached. Skip
				  				continue
			  			continue # continue here, because data aquisition is triggered by the time_start, time_end
			  		
			  		if time_msg > time_end:
			  			timesetflag = False # prepare for new evaluation interval
			  			
					####################################################
					# check if enough datapoints for this pose have been collected
			  		if data_points_for_this_pose > max_data_points_per_pose:
			  			# already enough data points for this pose	
			  			continue
			  		
			  		####################################################
			  		# Check if message is atlas_state
			  		# IF this is the case, fill information matrix
			  		if topic != '/flor/controller/atlas_state':
			  			continue

					# Extract measured force and torque
					if chain == 'left_arm':
						FT = [msg.l_hand.force.x, msg.l_hand.force.y, msg.l_hand.force.z, msg.l_hand.torque.x, msg.l_hand.torque.y, msg.l_hand.torque.z ]
					elif chain == 'right_arm':
						FT = [msg.r_hand.force.x, msg.r_hand.force.y, msg.r_hand.force.z, msg.r_hand.torque.x, msg.r_hand.torque.y, msg.r_hand.torque.z ]
	
					# calculate gravitation vector
					if not transformation_available:
						Logger.logwarn('No tf messages available at time %1.4f.' % time_msg)
						continue
						
					R = tf.transformations.quaternion_matrix(tf_data_cum[-1])
					g = np.dot((R[0:3,0:3]).transpose(), g_world)
	
					gx = g[0]
					gy = g[1]
					gz = g[2]
				
					# fill information matrix for this data point. Source: [1], equ. (7)
					M = np.zeros((6, 10))
					M[0,0] = gx
					M[1,0] = gy
					M[2,0] = gz
					
					M[3,2] = gz
					M[3,3] = -gy 
					M[4,1] = -gz
					M[4,3] = gx
					M[5,1] = gy
					M[5,2] = -gx
					M[0,4] = 1.0
					M[1,5] = 1.0
					M[2,6] = 1.0
					M[3,7] = 1.0
					M[4,8] = 1.0
					M[5,9] = 1.0
					
					# fill big information matrix and vector (stack small information matrizes)
					for i in range(6):
						for j in range(10):
							InfMat[InfMat_i*6+i,j] = M[i,j]
						MeasVec[InfMat_i*6+i,0] = FT[i]
						
					InfMat_i = InfMat_i + 1 # increase index
					data_points_for_this_pose = data_points_for_this_pose + 1
			
				# shorten big information matrix
				if InfMat_i < max_data_points_per_pose*number_of_poses:
					InfMat_calc = InfMat[0:(6*InfMat_i)-1,:]
					MeasVec_calc = MeasVec[0:(6*InfMat_i-1),:]
				else:
					InfMat_calc = InfMat
					MeasVec_calc = MeasVec

				# calculate calibration data
				if chain in self._static_calibration_data.keys(): # calculate calibration with given static parameters
					# bring colums with first parameters on the other side of the equation
					if len(self._static_calibration_data[chain]) != 4:
						Logger.logwarn( "CalculateForceTorqueCalibration: Given static calibration data for %s has length %d. Required 4 entries." % (chain, len(k_fix)) )
						self._failed = True
						return
					# convert physical parameters to identification parameters (mass, 1st moment)
					m = self._static_calibration_data[chain][0]
					if m == 0:
						mom_x = 0
						mom_y = 0
						mom_z = 0
					elif m > 0:
						mom_x = self._static_calibration_data[chain][1]/m
						mom_y = self._static_calibration_data[chain][2]/m
						mom_z = self._static_calibration_data[chain][3]/m
					else:
						Logger.logwarn( "CalculateForceTorqueCalibration: Negative mass (%f) for calibration requested. Abort." % mass )
						self._failed = True
						return
					k_fix = np.resize(np.array([m, mom_x, mom_y, mom_z]),(4,1))
					Logger.loginfo( "CalculateForceTorqueCalibration:static calibration data for %s given: %s. Reduce equation system" % (chain, str(self._static_calibration_data[chain])) )
					MeasVec_calc_corr = np.subtract(np.array(MeasVec_calc), np.dot(InfMat_calc[:,0:4], k_fix))
					InfMat_calc_corr = InfMat_calc[:,4:10] # only the last 6 colums which correspond to the sensor offsets
					k = np.linalg.lstsq(InfMat_calc_corr, MeasVec_calc_corr)[0] # solve reduced equation system
					k_calibration = self._static_calibration_data[chain]
					k_calibration.extend(k)
				else: # calculate normally with all parameters unknown
					k = np.linalg.lstsq(InfMat_calc, MeasVec_calc)[0]
					# convert to physical parameters (first moment -> mass)
					if k[0] > 0:
						k_calibration = [k[0], k[1]/k[0], k[2]/k[0], k[3]/k[0], k[4], k[5], k[6], k[7], k[8], k[9]]
					else:
						Logger.loginfo("CalculateForceTorqueCalibration:Calibration brought negative mass %f" % k[0])
						k_calibration = [0.0, 0.0, 0.0, 0.0, k[4], k[5], k[6], k[7], k[8], k[9]]
					
				Logger.loginfo("CalculateForceTorqueCalibration:calibration data for %s" % chain)
				Logger.loginfo("CalculateForceTorqueCalibration:mass: %f" % float(k_calibration[0]))
				Logger.loginfo("CalculateForceTorqueCalibration:center of mass: %f %f %f" % (k_calibration[1], k_calibration[2], k_calibration[3]))
				Logger.loginfo("CalculateForceTorqueCalibration:F offset: %f %f %f" % (k_calibration[4], k_calibration[5], k_calibration[6]))
				Logger.loginfo("CalculateForceTorqueCalibration:M offset: %f %f %f" % (k_calibration[7], k_calibration[8], k_calibration[9]))
				self._ft_calib_data[chain] = k_calibration
			
				bag_from_robot.close()
	
			userdata.ft_calib_data = self._ft_calib_data
			Logger.loginfo('CalculateForceTorqueCalibration:Calibration finished')
			self._done = True

		except Exception as e:
			Logger.logwarn('CalculateForceTorqueCalibration:Unable to calculate calibration:\n%s' % str(e))
			self._failed = True