def publish(self, channel, data): assert channel in self.outputs.values(), (channel, tuple(self.outputs.values())) dt, stream_id, bytes_data = next(self.reader) while stream_id not in self.outputs: input_name = self.inputs[stream_id] if input_name.startswith("slot_"): data = deserialize(bytes_data) getattr(self.node, input_name)(dt, data) else: self.buffer_queue.append((dt, stream_id, bytes_data)) dt, stream_id, bytes_data = next(self.reader) assert channel == self.outputs[stream_id], (channel, self.outputs[stream_id], dt ) # wrong channel if len(self.buffer_queue) > 0: delay = dt - self.buffer_queue[0][0] if delay > self.max_delay: self.max_delay = delay self.max_delay_timestamp = dt if delay > ASSERT_QUEUE_DELAY: print(dt, "maximum delay overshot:", delay) ref_data = deserialize(bytes_data) assert almost_equal(data, ref_data), (data, ref_data, dt) return dt
def main(): import argparse parser = argparse.ArgumentParser(description=__doc__) parser.add_argument('logfile', help='recorded log file', nargs='+') parser.add_argument('--out', help='map output file') parser.add_argument('--pose2d', help='stream ID for pose2d messages', default='app.pose2d') parser.add_argument('--artf', help='stream ID with artifacts XYZ') args = parser.parse_args() out_filename = args.out if out_filename is None: out_filename = os.path.splitext(args.logfile[0])[0] + '.jpg' pts = [] artf = [] for filename in args.logfile: pose_id = lookup_stream_id(filename, args.pose2d) streams = [pose_id] artf_id = None if args.artf is not None: artf_id = lookup_stream_id(filename, args.artf) streams.append(artf_id) for dt, channel, data in LogReader(filename, only_stream_id=streams): if channel == pose_id: pose = deserialize(data) pts.append(pose[:2]) elif channel == artf_id: arr = deserialize(data) artf.extend(arr) pts2image(pts, artf, out_filename)
def test_packed_data(self): data = [0] * 1000 packet = serialize(data) self.assertEqual(len(packet), 1003) self.assertEqual(deserialize(packet), data) compressed_packet = serialize(data, compress=True) self.assertEqual(len(compressed_packet), 22) self.assertEqual(deserialize(compressed_packet), data)
def test_numpy(self): data = numpy.asarray(range(1, 100, 2), dtype=numpy.uint16) packet = serialize(data) self.assertEqual(len(packet), 163) dedata = deserialize(packet) self.assertTrue(numpy.array_equal(dedata, data)) self.assertEqual(dedata.dtype, data.dtype) with unittest.mock.patch('osgar.lib.serialize.numpy', new=False): with self.assertRaises(TypeError): serialize(data) with self.assertRaises(TypeError): deserialize(packet)
def debug2dir(filename, out_dir): from osgar.logger import LogReader, lookup_stream_names from osgar.lib.serialize import deserialize names = lookup_stream_names(filename) assert 'detector.debug_artf' in names, names assert 'detector.artf' in names, names assert 'rosmsg.sim_time_sec' in names, names image_id = names.index('detector.debug_artf') + 1 artf_id = names.index('detector.artf') + 1 sim_sec_id = names.index('rosmsg.sim_time_sec') + 1 sim_time_sec = None image = None artf = None for dt, channel, data in LogReader( filename, only_stream_id=[image_id, artf_id, sim_sec_id]): data = deserialize(data) if channel == sim_sec_id: sim_time_sec = data elif channel == image_id: image = data assert artf is not None time_sec = sim_time_sec if sim_time_sec is not None else int( dt.total_seconds()) name = os.path.basename(filename)[:-4] + '-' + artf[0] + '-' + str( time_sec) + '.jpg' print(name) with open(os.path.join(out_dir, name), 'wb') as f: f.write(image) elif channel == artf_id: artf = data
def mapping_server(logfile, cpr, loop=False): try: name = u'Skiddy' stream_id = lookup_stream_id(logfile, 'fromros.points') except ValueError: name = u'Kloubak' stream_id = lookup_stream_id(logfile, 'from_jetson_front.points') with LogReader(args.logfile, only_stream_id=stream_id) as logreader: for timestamp, stream, raw_data in logreader: arr = deserialize(raw_data) assert len( arr ) == 1, arr # array of arrays, but maybe it is mistake no ROS serializer side? if len(arr[0]) == 0: continue # but maybe we should report at least one empty map? print('SENDING', len(arr[0])) cloud = create_map(arr) print(cloud) if cpr is not None: res = cpr.send_map_msg(u'PointCloud2', msg=cloud, name=name) print(res) if not loop: break time.sleep(1.0)
def create_video(logfile, stream, outfile, add_time=False, start_time_sec=0, end_time_sec=None, fps=25): assert outfile.endswith(".avi"), outFilename only_stream = lookup_stream_id(logfile, stream) with LogReader(logfile, only_stream_id=only_stream) as log: writer = None for timestamp, stream_id, data in log: buf = deserialize(data) img = cv2.imdecode(np.fromstring(buf, dtype=np.uint8), 1) if writer is None: height, width = img.shape[:2] writer = cv2.VideoWriter(outfile, cv2.VideoWriter_fourcc('F', 'M', 'P', '4'), fps, (width, height)) if add_time: if (width, height) == (640, 480): x, y = 350, 50 thickness = 3 size = 3.0 else: x, y = 800, 100 thickness = 5 size = 5.0 # clip microseconds to miliseconds s = str(timestamp-timedelta(seconds=start_time_sec))[:-3] cv2.putText(img, s, (x, y), cv2.FONT_HERSHEY_PLAIN, size, (255, 255, 255), thickness=thickness) if timestamp.total_seconds() > start_time_sec: writer.write(img) if end_time_sec is not None and timestamp.total_seconds() > end_time_sec: break writer.release()
def main(): import argparse import sys import io from pprint import pprint parser = argparse.ArgumentParser(description='') parser.add_argument('log', help='filepaths', nargs='+') parser.add_argument('--threads', help='how many threads to use', type=int, default=1) parser.add_argument('--gui', help='show found tags', default=False, action='store_true') parser.add_argument('--margin', help='apriltag decision margin threshold', default=30, type=int) args = parser.parse_args() detector = apriltag.apriltag('tag16h5', threads=args.threads) for filepath in args.log: print(filepath) streams = lookup_stream_names(filepath) processing = [] for i, stream in enumerate(streams): if stream == "camera.raw" or stream.endswith('.jpg') or stream.endswith('.image'): print("processing stream {} => {}".format(i, stream)) processing.append(i+1) if len(processing) == 0: print("no jpeg found in streams:") pprint(streams) continue with LogReader(filepath, only_stream_id=processing) as log: for dt, channel, data in log: try: jpeg = deserialize(data) except Exception as e: print(e) continue np_jpeg = numpy.frombuffer(jpeg, dtype='u1') gray = cv2.imdecode(np_jpeg, cv2.IMREAD_GRAYSCALE) found = detector.detect(gray) found = [tag for tag in found if tag['margin'] > args.margin and tag['hamming'] == 0] if len(found) > 0: ids = list(tag['id'] for tag in found) print(dt, end=' ') if args.gui: img = cv2.imdecode(np_jpeg, cv2.IMREAD_COLOR) for tag in found: rect = tag['lb-rb-rt-lt'].astype('float32') area = cv2.contourArea(rect) print('{:2d}: {{margin: {:3d}, area: {:4d}}}'.format(tag['id'], int(tag['margin']), int(area)), end=' ') center = tuple(tag['center'].astype(int)) poly = rect.astype('int32') if args.gui: cv2.circle(img, center, 3, (0, 0 ,255), -1) cv2.polylines(img, [poly], True, (0, 255, 255), 3) print() if args.gui: cv2.imshow('image', img) key = cv2.waitKey(0) if key == 27: return
def main(logfile, streams): warning_event = False base_logname = os.path.basename(logfile) print("\n" + base_logname) print("-" * 60) relevant_streams_id = [ lookup_stream_id(logfile, name) for name in streams.keys() ] stat_list = [np.array([])] * len(relevant_streams_id) encoder_stream_id = lookup_stream_id(logfile, "kloubak.encoders") prev_time_in_sec = None time_diff = None with LogReader(logfile, only_stream_id=relevant_streams_id) as log: for timestamp, stream_id, data in log: time_in_sec = timestamp.total_seconds() item_id = relevant_streams_id.index(stream_id) stat_list[item_id] = np.append(stat_list[item_id], time_in_sec) if stream_id == encoder_stream_id: encoders = deserialize(data) for enc in encoders: if enc > abs(100): if prev_time_in_sec is not None: time_diff = time_in_sec - prev_time_in_sec print( f"\nEncoders: {encoders}, time diff: {time_diff}, at {timestamp}" ) prev_time_in_sec = time_in_sec for arr, name in zip(stat_list, streams): gaps_event = False if len(arr) <= 1: print(f'\n{name:>{20}}\t{"No data or only one msg received!!!"}') warning_event = True else: # average frequencies average_freq = len(arr) / (arr[-1] - arr[0] ) # average number of msg per second expected_freq = streams[name][0] acceptable_gap = streams[name][1] num_gaps = len(np.where(np.diff(arr) > acceptable_gap)[0]) if num_gaps > 0: max_gap = max(np.diff(arr)) print( f'\n{name:>{20}}\t{"number of gaps %d" %num_gaps}\t{"max gap %.2f s" %max_gap}\t{"acceptable gap %.2f s" %acceptable_gap}' ) gaps_event = True warning_event = True if average_freq < expected_freq: if not gaps_event: print("") print( f'{name:>{20}}\t{"received %.1f Hz" % average_freq}\t{"expected %.1f Hz" % expected_freq}' ) warning_event = True if not warning_event: print("log OK")
def listen(self): dt, stream_id, bytes_data = next(self.reader) self.time = dt channel = self.inputs[stream_id] data = deserialize(bytes_data) if channel.startswith("slot_"): channel = channel[len('slot_'):] # workaround for non-slot run return dt, channel, data
def listen(self): if self.buffer_queue.empty(): dt, stream_id, bytes_data = next(self.reader) else: dt, stream_id, bytes_data = self.buffer_queue.get() channel = self.inputs[stream_id] data = deserialize(bytes_data) return dt, channel, data
def _step(self, direction): self.bbox = None if (self.current + direction) >= len(self.log): self.log.grow() while self.current + direction >= 0 and self.current + direction < len(self.log): self.current += direction timestamp, stream_id, data = self.log[self.current] if stream_id == self.keyframes_id: self.keyframe = True if stream_id == self.title_id: self.title = deserialize(data) if stream_id == self.bbox_id: self.bbox = deserialize(data) if stream_id == self.lidar_id: self.scan = deserialize(data) keyframe = self.keyframe self.keyframe = False return timestamp, self.pose, self.scan, self.scan2, self.image, self.image2, self.bbox, self.joint, keyframe, self.title, False if stream_id == self.lidar2_id: self.scan2 = deserialize(data) elif stream_id == self.camera_id: self.image = get_image(deserialize(data)) if self.lidar_id is None: keyframe = self.keyframe self.keyframe = False return timestamp, self.pose, self.scan, self.scan2, self.image, self.image2, self.bbox, self.joint, keyframe, self.title, False elif stream_id == self.camera2_id: self.image2 = get_image(deserialize(data)) elif stream_id == self.joint_id: self.joint = deserialize(data) elif stream_id == self.pose3d_id: pose3d, orientation = deserialize(data) assert len(pose3d) == 3 assert len(orientation) == 4 self.pose = [pose3d[0], pose3d[1], quaternion.heading(orientation)] self.pose3d = [pose3d, orientation] elif stream_id == self.pose2d_id: arr = deserialize(data) assert len(arr) == 3 self.pose = (arr[0]/1000.0, arr[1]/1000.0, math.radians(arr[2]/100.0)) x, y, heading = self.pose self.pose = (x * math.cos(g_rotation_offset_rad) - y * math.sin(g_rotation_offset_rad), x * math.sin(g_rotation_offset_rad) + y * math.cos(g_rotation_offset_rad), heading + g_rotation_offset_rad) if self.lidar_id is None and self.camera_id is None: keyframe = self.keyframe self.keyframe = False return timestamp, self.pose, self.scan, self.scan2, self.image, self.image2, self.bbox, self.joint, keyframe, self.title, False return timedelta(), self.pose, self.scan, self.scan2, self.image, self.image2, self.bbox, self.joint, self.keyframe, self.title, True
def scans_gen(logfile, lidar_name=None, poses_name=None, camera_name=None): """ Generator for (timestamp, pose, lidar, image) where freqency is defined by LIDAR and pose and image is used the most recent """ names = lookup_stream_names(logfile) assert not (lidar_name is None and poses_name is None and camera_name is None), names lidar_id, poses_id, camera_id = None, None, None if lidar_name is not None: lidar_id = names.index(lidar_name) + 1 if poses_name is not None: poses_id = names.index(poses_name) + 1 if camera_name is not None: camera_id = names.index(camera_name) + 1 pose = (0, 0, 0) image = None scan = [] eof = False streams = [s for s in [lidar_id, poses_id, camera_id] if s is not None] with LogReader(logfile, only_stream_id=streams) as log: for timestamp, stream_id, data in log: if stream_id == lidar_id: scan = deserialize(data) yield timestamp, pose, scan, image, eof elif stream_id == camera_id: jpeg = deserialize(data) image = pygame.image.load(io.BytesIO(jpeg), 'JPG').convert() if lidar_id is None: yield timestamp, pose, scan, image, eof elif stream_id == poses_id: arr = deserialize(data) assert len(arr) == 3 pose = (arr[0] / 1000.0, arr[1] / 1000.0, math.radians(arr[2] / 100.0)) if lidar_id is None and camera_id is None: yield timestamp, pose, scan, image, eof # generate last message with EOF ... eof = True yield timestamp, pose, scan, image, eof
def decompress(data): # RTABMap compresses float32 depth data into RGBA channes of a PNG image. # It does not, however, store information about endiannes. All we can do is # hope that the current machine and the data origin machine have the same # one. depth_img = cv2.imdecode(np.frombuffer(data, np.uint8), cv2.IMREAD_UNCHANGED) if depth_img is not None: return depth_img.view(dtype=np.float32)[:, :, 0] return deserialize(data)
def listen(self): while True: if len(self.buffer_queue) == 0: dt, stream_id, bytes_data = next(self.reader) else: dt, stream_id, bytes_data = self.buffer_queue.popleft() channel = self.inputs[stream_id] data = deserialize(bytes_data) if channel.startswith("slot_"): getattr(self.node, channel)(data) else: break return dt, channel, data
def publish(self, channel, data): assert channel in self.outputs.values(), (channel, self.outputs.values()) dt, stream_id, bytes_data = next(self.reader) while stream_id not in self.outputs: assert stream_id in self.inputs, stream_id self.buffer_queue.put((dt, stream_id, bytes_data)) dt, stream_id, bytes_data = next(self.reader) assert channel == self.outputs[stream_id], (channel, self.outputs[stream_id] ) # wrong channel ref_data = deserialize(bytes_data) assert data == ref_data, (data, ref_data)
def publish(self, channel, data): assert channel in self.outputs.values(), (channel, self.outputs.values()) dt, stream_id, bytes_data = next(self.reader) while stream_id not in self.outputs: input_name = self.inputs[stream_id] if input_name.startswith("slot_"): data = deserialize(bytes_data) getattr(self.node, input_name)(data) else: self.buffer_queue.append((dt, stream_id, bytes_data)) dt, stream_id, bytes_data = next(self.reader) assert channel == self.outputs[stream_id], (channel, self.outputs[stream_id], dt) # wrong channel if len(self.buffer_queue) > 0: delay = dt - self.buffer_queue[0][0] if delay > self.max_delay: self.max_delay = delay self.max_delay_timestamp = dt if delay > ASSERT_QUEUE_DELAY: print("maximum delay overshot:", delay) ref_data = deserialize(bytes_data) assert data == ref_data, (data, ref_data, dt) return dt
def _step(self, direction): if (self.current + direction) >= len(self.log): self.log.grow() while self.current + direction >= 0 and self.current + direction < len( self.log): self.current += direction timestamp, stream_id, data = self.log[self.current] if stream_id == self.lidar_id: self.scan = deserialize(data) return timestamp, self.pose, self.scan, self.image, False elif stream_id == self.camera_id: jpeg = deserialize(data) self.image = pygame.image.load(io.BytesIO(jpeg), 'JPG').convert() if self.lidar_id is None: return timestamp, self.pose, self.scan, self.image, False elif stream_id == self.pose3d_id: pose3d, orientation = deserialize(data) assert len(pose3d) == 3 assert len(orientation) == 4 self.pose = [ pose3d[0], pose3d[1], quaternion.heading(orientation) ] self.pose3d = [pose3d, orientation] elif stream_id == self.pose2d_id: arr = deserialize(data) assert len(arr) == 3 self.pose = (arr[0] / 1000.0, arr[1] / 1000.0, math.radians(arr[2] / 100.0)) x, y, heading = self.pose self.pose = (x * math.cos(g_rotation_offset_rad) - y * math.sin(g_rotation_offset_rad), x * math.sin(g_rotation_offset_rad) + y * math.cos(g_rotation_offset_rad), heading + g_rotation_offset_rad) if self.lidar_id is None and self.camera_id is None: return timestamp, self.pose, self.scan, self.image, False return timedelta(), self.pose, self.scan, self.image, True
def autodetect_name(logfile): stream_id = lookup_stream_id(logfile, ROBOT_NAME_STREAM) for dt, channel, data in LogReader(logfile, only_stream_id=stream_id): robot_name = deserialize(data).decode('ascii') return robot_name _, _, data = next(LogReader(logfile)) data = ast.literal_eval(str(data, 'ascii')) while data: if data[0] != "--robot-name": del data[0] continue return data[1] assert False, "Robot name autodetection failed!"
def on_radio(self, data): src, packet = data name = src.decode('ascii') __, channel, msg_data = deserialize(packet) if channel == 'artf': self.publish( 'artf_xyz', msg_data) # topic rename - beware of limits 1500bytes! elif channel in ['breadcrumb', 'trace_info', 'robot_trace']: self.publish(channel, msg_data) elif channel == 'xyz': self.publish('robot_xyz', [name, msg_data]) if self.verbose: self.debug_robot_poses.append((self.time, name, msg_data))
def run_input(self): names = lookup_stream_names(self.filename) print(names) ids = [i + 1 for i, name in enumerate(names) if name in self.pins] print(ids) for timestamp, channel_index, data_raw in LogReader(self.filename, only_stream_id=ids): if not self.bus.is_alive(): break channel = names[channel_index - 1] assert channel in self.pins data = deserialize(data_raw) # TODO reuse timestamp self.bus.publish(self.pins[channel], data) print('Replay completed!')
def run_input(self): names = lookup_stream_names(self.filename) print(names) ids = [i + 1 for i, name in enumerate(names) if name in self.pins] print(ids) for timestamp, channel_index, data_raw in LogReader( self.filename, only_stream_id=ids): if not self.bus.is_alive(): break channel = names[channel_index - 1] assert channel in self.pins data = deserialize(data_raw) # TODO reuse timestamp self.bus.publish(self.pins[channel], data) print('Replay completed!')
def deserialize(self): from osgar.logger import LogReader, lookup_stream_names, lookup_stream_id from osgar.lib.serialize import deserialize streams = lookup_stream_names(self.log_file) with LogReader(self.log_file, only_stream_id=lookup_stream_id(self.log_file, 'can.can')) as log: log_list = [] for timestamp, stream_id, data in log: sec = timestamp.total_seconds() stream_id = stream_id data = deserialize(data) log_list.append( [stream_id, sec, data[0], data[1].hex(), len(data[1])]) return streams, log_list
def listen(self): while True: if self.finished.is_set(): raise BusShutdownException if len(self.buffer_queue) == 0: dt, stream_id, bytes_data = next(self.reader) else: dt, stream_id, bytes_data = self.buffer_queue.popleft() if stream_id not in self.inputs: error = f"unexpected input (stream {stream_id}, known inputs {list(self.inputs.keys())})" raise RuntimeError(error) channel = self.inputs[stream_id] data = deserialize(bytes_data) if channel.startswith("slot_"): getattr(self.node, channel)(dt, data) else: break return dt, channel, data
def read_pose3d(filename, pose3d_name, seconds=MAX_SIMULATION_DURATION): stream_id = lookup_stream_id(filename, pose3d_name) sim_time_id = lookup_stream_id(filename, SIM_TIME_STREAM) poses = [] sim_time = None pose_sim_time = None # sim_time, but valid only for the first pose after time change for dt, channel, data in LogReader(filename, only_stream_id=[stream_id, sim_time_id]): value = deserialize(data) if len(poses) > seconds: break if channel == sim_time_id: if sim_time != value: sim_time = value pose_sim_time = sim_time else: # pose3d if pose_sim_time is not None: poses.append((pose_sim_time, value)) pose_sim_time = None return poses
def create_video(logfile, stream, outfile, add_time=False, start_time_sec=0, end_time_sec=None, fps=25): assert outfile.endswith(".avi"), outFilename only_stream = lookup_stream_id(logfile, stream) with LogReader(logfile, only_stream_id=only_stream) as log: writer = None for timestamp, stream_id, data in log: buf = deserialize(data) img = cv2.imdecode(np.fromstring(buf, dtype=np.uint8), 1) if writer is None: height, width = img.shape[:2] writer = cv2.VideoWriter( outfile, cv2.VideoWriter_fourcc('F', 'M', 'P', '4'), fps, (width, height)) if add_time: if (width, height) == (640, 480): x, y = 350, 50 thickness = 3 size = 3.0 else: x, y = 800, 100 thickness = 5 size = 5.0 # clip microseconds to miliseconds s = str(timestamp - timedelta(seconds=start_time_sec))[:-3] cv2.putText(img, s, (x, y), cv2.FONT_HERSHEY_PLAIN, size, (255, 255, 255), thickness=thickness) if timestamp.total_seconds() > start_time_sec: writer.write(img) if end_time_sec is not None and timestamp.total_seconds( ) > end_time_sec: break writer.release()
def log2pcap(input_filepath, output_dir): """ Extract from logfile Velodyne UPD packets and save them in 'pcap' format undestandable by Wireshark and VeloView. """ output_filepath = os.path.join(output_dir, os.path.basename(input_filepath)) assert output_filepath.endswith('.log'), output_filepath output_filepath = output_filepath[:-4] + '.pcap' print(output_filepath) only_stream = lookup_stream_id(input_filepath, 'velodyne.raw') with LogReader(input_filepath, only_stream_id=only_stream) as log, open(output_filepath, 'wb') as out: out.write(unhexlify(FILE_HEADER)) for timestamp, stream_id, data in log: packet = deserialize(data) assert len(packet) == 1206, len(packet) out.write(unhexlify(PACKET_HEADER)) # TODO revise timestamps out.write(unhexlify(IP_HEADER)) out.write(packet)
def deserialize(self): from osgar.logger import LogReader, lookup_stream_names, lookup_stream_id from osgar.lib.serialize import deserialize streams = lookup_stream_names(log_file) # streams.index("can.can") # for list_id in range(len(streams)): # if 'can.can' == streams[list_id]: break with LogReader(log_file, only_stream_id=lookup_stream_id(log_file, 'can.can')) as log: log_list = [] for timestamp, stream_id, data in log: sec = timestamp.total_seconds() stream_id = stream_id data = deserialize(data) log_list.append( [stream_id, sec, data[0], data[1].hex(), len(data[1])]) return log_list
def log2pcap(input_filepath, output_dir): """ Extract from logfile Velodyne UPD packets and save them in 'pcap' format undestandable by Wireshark and VeloView. """ output_filepath = os.path.join(output_dir, os.path.basename(input_filepath)) assert output_filepath.endswith('.log'), output_filepath output_filepath = output_filepath[:-4] + '.pcap' print(output_filepath) only_stream = lookup_stream_id(input_filepath, 'velodyne.raw') with LogReader(input_filepath, only_stream_id=only_stream) as log, open( output_filepath, 'wb') as out: out.write(unhexlify(FILE_HEADER)) for timestamp, stream_id, data in log: packet = deserialize(data) assert len(packet) == 1206, len(packet) out.write(unhexlify(PACKET_HEADER)) # TODO revise timestamps out.write(unhexlify(IP_HEADER)) out.write(packet)
def listen(self): dt, stream_id, bytes_data = next(self.reader) self.time = dt channel = self.inputs[stream_id] data = deserialize(bytes_data) return dt, channel, data
def listen(self): dt, stream_id, bytes_data = next(self.reader) channel = self.inputs[stream_id] data = deserialize(bytes_data) return dt, channel, data
def main(): import argparse import sys from osgar.lib.serialize import deserialize parser = argparse.ArgumentParser(description='Extract data from log') parser.add_argument('logfile', help='filename of stored file') parser.add_argument('--stream', help='stream ID or name', default=None, nargs='*') parser.add_argument('--list-names', '-l', help='list stream names', action='store_true') parser.add_argument('--times', help='display timestamps', action='store_true') parser.add_argument('--sec', help='display timestamps in seconds', action='store_true') parser.add_argument('--stat', help='output only message statistics', action='store_true') parser.add_argument('--raw', help='skip data deserialization', action='store_true') args = parser.parse_args() if args.list_names: print(lookup_stream_names(args.logfile)) sys.exit() if args.stat: stat, count, timestamp = calculate_stat(args.logfile) seconds = timestamp.total_seconds() names = ['sys'] + lookup_stream_names(args.logfile) column_width = max([len(x) for x in names]) for k, name in enumerate(names): print( '%2d' % k, name.rjust(column_width), '%10d | %5d | %5.1fHz' % (stat.get(k, 0), count[k], count[k] / seconds)) print('\nTotal time', timestamp) sys.exit() if args.stream is None: only_stream = None else: only_stream = [] for name in args.stream: only_stream.append(lookup_stream_id(args.logfile, name)) with LogReader(args.logfile, only_stream_id=only_stream) as log: for timestamp, stream_id, data in log: if not args.raw and stream_id != 0: data = deserialize(data) if args.times: print(timestamp, stream_id, data) elif args.sec: print(timestamp.total_seconds(), stream_id, data) else: sys.stdout.buffer.write(data)
fx = config['robot']['modules'][args.module_name]['init']['fx'] max_depth = config['robot']['modules'][args.module_name]['init'].get( 'max_depth', 10.0) last_artf = None # reported before debug_rgbd last_result = None last_cv_result = None with LogReader(args.logfile, only_stream_id=[ artf_stream_id, rgbd_stream_id, result_id, cv_result_id ]) as logreader: for time, stream, msg_data in logreader: if args.time_limit_sec is not None and time.total_seconds( ) > args.time_limit_sec: break data = deserialize(msg_data) if stream == rgbd_stream_id: robot_pose, camera_pose, __rgb, depth = data # debug_rgbd is stored ONLY when both detectors detect something and it is fused assert last_result is not None assert last_cv_result is not None checked_result = check_results(last_result, last_cv_result) assert checked_result # the debug rgbd is stored, so there should be a valid report report = result2report(checked_result, decompress_depth(depth), fx, robot_pose, camera_pose, max_depth) if args.verbose: print(report) assert last_artf == report, (last_artf, report)
#log_file = r"C:\Users\xkadj\OneDrive\PROJEKTY\Projekt_ROBOTIKA\logs\K2_200204\test-pcan-200204_142841.log" streams = lookup_stream_names(log_file) for list_id in range(len(streams)): if 'can.can' == streams[list_id]: break with LogReader(log_file, only_stream_id=20) as log: log_list = [] for timestamp, stream_id, data in log: sec = timestamp.total_seconds() stream_id = stream_id # msg_id = data[2] #hex(data[2]) # msg_len = len(data[-9:-1]) # msg_content = struct.unpack(str(msg_len)+'B', data[-9:-1]) # log_list.append([sec,stream_id,msg_id,msg_len,msg_content,str(data)]) data_des = deserialize(data) # msg_id = data[2] #hex(data[2]) # msg_len = len(data[-9:-1]) # msg_content = struct.unpack(str(msg_len)+'B', data[-9:-1]) log_list.append( [stream_id, sec, data_des[0], data_des[1].hex(), len(data_des[1])]) deserialize(data) # log = pd.DataFrame(log_list) #0:00:06.076378 19 [0, 654] #0:00:06.634475 19 [0, 656] #0:00:07.176152 19 [0, 654] # #0:00:07.575947 20 [36, b'\x00\x00\xfa\x00', 0]
parser.add_argument('--stream', help='stream ID or name', default='can.can') parser.add_argument('--dbc', help='interpretation of raw data', choices=['spider', 'eduro']) args = parser.parse_args() dbc = {} if args.dbc == 'spider': dbc = { 0x200: 'H', # status 0x201: 'HHHH', # wheels 0x202: 'HHHH', # drive status 0x203: 'HHHH', # zero steering 0x204: 'HHBBH' # user input } elif args.dbc == 'eduro': dbc = { # 0x80: # SYNC, no data 0x181: '<i', # encoders L 0x182: '<i', # encoders R #0x187, 0x387, 0x487 # compass, acc 0x28A: '<H', # buttons 0x18B: '<H', # battery(V)/100.0 } stream_id = lookup_stream_id(args.logfile, args.stream) with LogReader(args.logfile, only_stream_id=stream_id) as log: for timestamp, stream_id, data in log: print(timestamp, print_packet(deserialize(data), dbc)) # vim: expandtab sw=4 ts=4
parser.add_argument('--draw', '-d', help="draw result", action='store_true') parser.add_argument('--index', '-i', help="scan index", type=int) parser.add_argument('--transporter', help="detect_transporter", action='store_true') args = parser.parse_args() filename = args.filename only_stream = lookup_stream_id(filename, 'lidar.scan') index = args.index offset_y = 0 with LogReader(filename, only_stream_id=only_stream) as log: for ind, row in enumerate(log): if index is not None and ind < index: continue timestamp, stream_id, data = row scan = deserialize(data) if args.transporter: trans = detect_transporter(scan, offset_y) if trans is None: print('%d\tNone' % ind) else: print('%d\t%.1f\t%.3f' % (ind, math.degrees(trans[0]), trans[1])) else: pairs = scan_split(scan[135:-135], max_diff=20) if args.verbose: for f, t in pairs: print(f, t) print(scan[135+f:135+t]) pairs = [(f+135, t+135) for f, t in pairs]
parser.add_argument('--dbc', help='interpretation of raw data', choices=['spider', 'eduro']) args = parser.parse_args() dbc = {} if args.dbc == 'spider': dbc = { 0x200: 'H', # status 0x201: 'HHHH', # wheels 0x202: 'HHHH', # drive status 0x203: 'HHHH', # zero steering 0x204: 'HHBBH' # user input } elif args.dbc == 'eduro': dbc = { # 0x80: # SYNC, no data 0x181: '<i', # encoders L 0x182: '<i', # encoders R #0x187, 0x387, 0x487 # compass, acc 0x28A: '<H', # buttons 0x18B: '<H', # battery(V)/100.0 } stream_id = lookup_stream_id(args.logfile, args.stream) with LogReader(args.logfile, only_stream_id=stream_id) as log: for timestamp, stream_id, data in log: print(timestamp, print_packet(deserialize(data), dbc)) # vim: expandtab sw=4 ts=4
def test_array(self): # USB read returns array('B', [...]), which fails to serialize arr = array('B', [1, 2, 3]) b_arr = serialize(bytes(arr)) self.assertEqual(deserialize(b_arr), bytes([1, 2, 3]))