def render(name, gps, orientation): fig = plt.figure() fig.subplots_adjust(wspace=0.3) ax1 = fig.add_subplot(1, 3, 1) # e-n plot ax2 = fig.add_subplot(2, 3, 2) # orientation plot ax3 = fig.add_subplot(2, 3, 3) # e-time plot ax4 = fig.add_subplot(2, 3, 5) # up plot ax5 = fig.add_subplot(2, 3, 6) # n-time plot # masking for finite values gps = np.array(gps) gps = gps[np.isfinite(gps[:, 1])] # precompute plot vars c = cm.prism(gps[:, 7] / 2) ax1.scatter(gps[:, 4], gps[:, 5], c=c, edgecolor='none', s=3, label="green: RTK\nyellow: DGPS\nred: Single") xfmt = md.DateFormatter('%H:%M:%S') ax2.xaxis.set_major_formatter(xfmt) ax3.xaxis.set_major_formatter(xfmt) ax4.xaxis.set_major_formatter(xfmt) ax5.xaxis.set_major_formatter(xfmt) if orientation: orientation = np.array(orientation) ax2.plot([datetime.fromtimestamp(x) for x in orientation[:, 0]], orientation[:, 1]) ax3.plot([datetime.fromtimestamp(x) for x in gps[:, 0]], gps[:, 4]) ax4.plot([datetime.fromtimestamp(x) for x in gps[:, 0]], gps[:, 6]) ax5.plot([datetime.fromtimestamp(x) for x in gps[:, 0]], gps[:, 5]) fig.autofmt_xdate() # add the legends ax1.legend(loc="best") ax1.set_ylabel('GNSS northing [m]') ax1.set_xlabel('GNSS easting [m]') ax2.set_ylabel('Heading over time [rad]') ax3.set_ylabel('GNSS easting over time [m]') ax4.set_ylabel('GNSS height over time [m]') ax5.set_ylabel('GNSS northing over time [m]') fig.set_size_inches(16, 9) plt_path, plt_id = marv.make_file(name) try: fig.savefig(plt_path) finally: plt.close() return plt_id
def image(cam): """Extract first image of input stream to jpg file. Args: cam: Input stream of raw rosbag messages. Returns: File instance for first image of input stream. """ # Set output stream title and pull first message yield marv.set_header(title=cam.topic) msg = yield marv.pull(cam) if msg is None: return # Deserialize raw ros message pytype = get_message_type(cam) rosmsg = pytype() rosmsg.deserialize(msg.data) # Write image to jpeg and push it to output stream name = '{}.jpg'.format(cam.topic.replace('/', ':')[1:]) imgfile = yield marv.make_file(name) img = imgmsg_to_cv2(rosmsg, "rgb8") cv2.imwrite(imgfile.path, img, (cv2.IMWRITE_JPEG_QUALITY, 60)) yield marv.push(imgfile)
def images(cam): """Extract images from input stream to jpg files. Args: cam: Input stream of raw rosbag messages. Returns: File instances for images of input stream. """ # Set output stream title and pull first message yield marv.set_header(title=cam.topic) # Fetch and process first 20 image messages name_template = '%s-{}.jpg' % cam.topic.replace('/', ':')[1:] while True: idx, msg = yield marv.pull(cam, enumerate=True) if msg is None or idx >= 20: break # Deserialize raw ros message pytype = get_message_type(cam) rosmsg = pytype() rosmsg.deserialize(msg.data) # Write image to jpeg and push it to output stream img = imgmsg_to_cv2(rosmsg, "rgb8") name = name_template.format(idx) imgfile = yield marv.make_file(name) cv2.imwrite(imgfile.path, img) yield marv.push(imgfile)
def filesize_plot_fixed(filesizes): # set_header() helps marv to schedule nodes yield marv.set_header() # Pull all filesizes sizes = [] while True: size = yield marv.pull(filesizes) if size is None: break sizes.append(size) # plot fig = plt.figure() axis = fig.add_subplot(1, 1, 1) axis.plot(sizes, 'bo') #axis.set_xlabel('foo') #axis.set_ylabel('bat') # save figure to file plotfile = yield marv.make_file('filesizes.json') with open(plotfile.path, 'w') as f: json.dump(mpld3.fig_to_dict(fig), f) # create plot widget referencing file widget = { 'title': 'Filesizes', 'mpld3': 'marv-partial:{}'.format(plotfile.relpath), } yield marv.push(widget)
def filesize_plot(filesizes): # Pull all filesizes sizes = [] while True: size = yield marv.pull(filesizes) if size is None: break sizes.append(size) # plot fig = plt.figure() axis = fig.add_subplot(1, 1, 1) axis.plot(sizes, 'bo') # EE: save figure to file plotfile = yield marv.make_file('filesizes.json') with open(plotfile.path, 'w') as f: json.dump(mpld3.fig_to_dict(fig), f) # EE: create plot widget referencing file widget = { 'title': 'Filesizes', 'mpld3': 'marv-partial:{}'.format(plotfile.relpath), } # Alternative code for community edition #plotfile = yield marv.make_file('filesizes.jpg') #fig.savefig(plotfile.path) #widget = { # 'title': 'Filesizes', # 'image': {'src': plotfile.relpath}, #} yield marv.push(widget)
def ffmpeg(stream, speed, convert_32FC1_scale, convert_32FC1_offset): """Create video for each sensor_msgs/Image topic with ffmpeg""" yield marv.set_header(title=stream.topic) name = '{}.webm'.format(stream.topic.replace('/', '_')[1:]) video = yield marv.make_file(name) duration = (stream.end_time - stream.start_time) * 1e-9 framerate = stream.msg_count / duration pytype = get_message_type(stream) rosmsg = pytype() encoder = None while True: msg = yield marv.pull(stream) if msg is None: break rosmsg.deserialize(msg.data) try: mono, img = ros2cv(rosmsg, convert_32FC1_scale, convert_32FC1_offset) except cv_bridge.CvBridgeError as e: log = yield marv.get_logger() log.error('could not convert image %s', e) return if not encoder: ffargs = [ 'ffmpeg', '-f', 'rawvideo', '-pixel_format', '%s' % 'gray' if mono else 'bgr24', '-video_size', '%dx%d' % (rosmsg.width, rosmsg.height), '-framerate', '%s' % framerate, '-i', '-', '-c:v', 'libvpx-vp9', '-pix_fmt', 'yuv420p', '-loglevel', 'error', '-threads', '8', '-speed', str(speed), '-y', video.path, ] encoder = subprocess.Popen(ffargs, stdin=subprocess.PIPE) encoder.stdin.write(img) encoder.stdin.close() encoder.wait() yield video
def camera_frames(bagmeta, messages, image_width=320, max_frames=50): """Extract camera frames from sensors_msgs/Image streams Images are scaled to image_width while keeping the aspect ratio and a maximum of max_frames equidistantly spread frames is extracted from each stream. """ # Message counts and desired intervals for all topics. All topics, # as we cannot know which topics a user configured. message_counts = {k: v['message_count'] for k, v in bagmeta.topics.items()} intervals = { topic: int(math.ceil(message_count / max_frames)) for topic, message_count in message_counts.items() } # Keep track of per-topic message indices and images generated msg_indices = defaultdict(int) images = defaultdict(list) for topic, msg, _ in messages: # Only use messages in desired per-topic intervals idx = msg_indices[topic] msg_indices[topic] = idx + 1 if idx % intervals[topic]: continue try: img = imgmsg_to_cv2(msg, "rgb8") except: import traceback marv.log_error('On topic %r: %s', topic, traceback.format_exc()) continue # Instruct marv to create a file for the current image: # image_path is for us to work with the file, image_id we # return so marv knows what we are talking about. The # location we are writing to is not the same as the one the # image will be served from later on. image_name = '{name}_{idx:03d}.jpg'.format(name=topic.replace( '/', '_')[1:], idx=idx // intervals[topic]) image_path, image_id = marv.make_file(image_name) # We return a mapping of topics to image lists for # e.g. per-topic galleries to be rendered images[topic].append(image_id) # scale image and write to disk height = int(round(image_width * img.shape[0] / img.shape[1])) scaled_img = cv2.resize(img, (image_width, height), interpolation=cv2.INTER_AREA) cv2.imwrite(image_path, scaled_img, (cv2.IMWRITE_JPEG_QUALITY, 60)) # return mapping of topics to image lists return images
def ffmpeg(stream, speed, convert_32FC1_scale, convert_32FC1_offset): """Create video for each sensor_msgs/Image topic with ffmpeg""" yield marv.set_header(title=stream.topic) name = '{}.webm'.format(stream.topic.replace('/', '_')[1:]) video = yield marv.make_file(name) duration = (stream.end_time - stream.start_time) * 1e-9 framerate = stream.msg_count / duration pytype = get_message_type(stream) rosmsg = pytype() encoder = None while True: msg = yield marv.pull(stream) if msg is None: break rosmsg.deserialize(msg.data) if not encoder: ffargs = [ 'ffmpeg', '-f', 'rawvideo', '-pixel_format', '%s' % {'mono8': 'gray', '32FC1': 'gray', '8UC1': 'gray'}.get(rosmsg.encoding, 'rgb24'), '-video_size', '%dx%d' % (rosmsg.width, rosmsg.height), '-framerate', '%s' % framerate, '-i', '-', '-c:v', 'libvpx-vp9', '-pix_fmt', 'yuv420p', '-loglevel', 'error', '-threads', '8', '-speed', str(speed), '-y', video.path, ] encoder = subprocess.Popen(ffargs, stdin=subprocess.PIPE) if rosmsg.encoding == 'mono8': data = rosmsg.data elif rosmsg.encoding == '32FC1': data = cv2.convertScaleAbs(numpy.nan_to_num(imgmsg_to_cv2(rosmsg, 'passthrough')), None, convert_32FC1_scale, convert_32FC1_offset) elif rosmsg.encoding == '8UC1': data = imgmsg_to_cv2(rosmsg).tobytes() else: data = imgmsg_to_cv2(rosmsg, 'rgb8').tobytes() encoder.stdin.write(data) encoder.stdin.close() encoder.wait() yield video
def images(stream, image_width, max_frames, convert_32FC1_scale, convert_32FC1_offset): """ Extract max_frames equidistantly spread images from each sensor_msgs/Image stream. Args: stream: sensor_msgs/Image stream image_width (int): Scale to image_width, keeping aspect ratio. max_frames (int): Maximum number of frames to extract. """ yield marv.set_header(title=stream.topic) pytype = get_message_type(stream) rosmsg = pytype() interval = int(math.ceil(stream.msg_count / max_frames)) digits = int(math.ceil(math.log(stream.msg_count) / math.log(10))) name_template = '%s-{:0%sd}.jpg' % (stream.topic.replace('/', ':')[1:], digits) counter = count() while True: msg = yield marv.pull(stream) if msg is None: break idx = counter.next() if idx % interval: continue rosmsg.deserialize(msg.data) try: mono, img = ros2cv(rosmsg, convert_32FC1_scale, convert_32FC1_offset) except cv_bridge.CvBridgeError as e: log = yield marv.get_logger() log.error('could not convert image %s', e) return height = int(round(image_width * img.shape[0] / img.shape[1])) scaled_img = cv2.resize(img, (image_width, height), interpolation=cv2.INTER_AREA) name = name_template.format(idx) imgfile = yield marv.make_file(name) cv2.imwrite(imgfile.path, scaled_img, (cv2.IMWRITE_JPEG_QUALITY, 60)) yield imgfile
def images(stream, image_width, max_frames, convert_32FC1_scale, convert_32FC1_offset): """ Extract max_frames equidistantly spread images from each sensor_msgs/Image stream. Args: stream: sensor_msgs/Image stream image_width (int): Scale to image_width, keeping aspect ratio. max_frames (int): Maximum number of frames to extract. """ yield marv.set_header(title=stream.topic) pytype = get_message_type(stream) rosmsg = pytype() interval = int(math.ceil(stream.msg_count / max_frames)) digits = int(math.ceil(math.log(stream.msg_count) / math.log(10))) name_template = '%s-{:0%sd}.jpg' % (stream.topic.replace('/', ':')[1:], digits) counter = count() while True: msg = yield marv.pull(stream) if msg is None: break idx = counter.next() if idx % interval: continue rosmsg.deserialize(msg.data) if rosmsg.encoding == '32FC1': img = cv2.convertScaleAbs(numpy.nan_to_num(imgmsg_to_cv2(rosmsg, 'passthrough')), None, convert_32FC1_scale, convert_32FC1_offset) elif rosmsg.encoding == '8UC1': img = imgmsg_to_cv2(rosmsg) else: img = imgmsg_to_cv2(rosmsg, "rgb8") height = int(round(image_width * img.shape[0] / img.shape[1])) scaled_img = cv2.resize(img, (image_width, height), interpolation=cv2.INTER_AREA) name = name_template.format(idx) imgfile = yield marv.make_file(name) cv2.imwrite(imgfile.path, scaled_img, (cv2.IMWRITE_JPEG_QUALITY, 60)) yield imgfile
def trajectory_section(geojson, title, minzoom, maxzoom, tile_server_protocol): """Section displaying trajectory on a map. Args: tile_server_protocol (str): Set to ``https:`` if you host marv behind http and prefer the tile requests to be secured. """ geojson = yield marv.pull(geojson) if not geojson: raise marv.Abort() layers = [ {'title': 'Background', 'tiles': [ {'title': 'Roadmap', 'url': '%s//[abc].osm.ternaris.com/styles/osm-bright/rendered/{z}/{x}/{y}.png' % tile_server_protocol, 'attribution': '© <a href="http://openstreetmap.org/copyright">OpenStreetMap</a> contributors', 'retina': 3, 'zoom': {'min': 0, 'max': 20}}, {'title': 'Satellite', 'url': '%s//server.arcgisonline.com/ArcGIS/rest/services/World_Imagery/MapServer/tile/{z}/{y}/{x}.png' % tile_server_protocol, 'attribution': 'Sources: Esri, DigitalGlobe, GeoEye, Earthstar Geographics, CNES/Airbus DS, USDA, USGS, AeroGRID, IGN, and the GIS User Community', 'zoom': {'min': 0, 'max': 18}}, ]}, {'title': 'Trajectory', 'color': (0., 1., 0., 1.), 'geojson': geojson}, ] dct = make_map_dict({ 'layers': layers, 'zoom': {'min': minzoom, 'max': maxzoom}, }) jsonfile = yield marv.make_file('data.json') with open(jsonfile.path, 'w') as f: json.dump(dct, f, sort_keys=True) yield marv.push({'title': title, 'widgets': [{'map_partial': 'marv-partial:{}'.format(jsonfile.relpath)}]})
def gnss_plots(gps, orientation): # TODO: framework does not yet support multiple foreach # pick only first combination for now log = yield marv.get_logger() gps, orientation = yield marv.pull_all(gps, orientation) if gps is None: log.error('No gps messages') raise marv.Abort() gtitle = gps.title gps = yield marv.pull(gps) # There is only one message # Check whether there are any valid messages left if gps is None: log.error('No valid gps messages') raise marv.Abort() gps = gps['values'] if orientation is not None: otitle = orientation.title orientation = yield marv.pull(orientation) if orientation is None: log.warn('No orientations found') otitle = 'none' orientation = [] else: orientation = orientation['values'] name = '__'.join(x.replace('/', ':')[1:] for x in [gtitle, otitle]) + '.jpg' title = '{} with {}'.format(gtitle, otitle) yield marv.set_header(title=title) plotfile = yield marv.make_file(name) fig = plt.figure() fig.subplots_adjust(wspace=0.3) ax1 = fig.add_subplot(1, 3, 1) # e-n plot ax2 = fig.add_subplot(2, 3, 2) # orientation plot ax3 = fig.add_subplot(2, 3, 3) # e-time plot ax4 = fig.add_subplot(2, 3, 5) # up plot ax5 = fig.add_subplot(2, 3, 6) # n-time plot # masking for finite values gps = np.array(gps) gps = gps[np.isfinite(gps[:, 1])] # precompute plot vars c = cm.prism(gps[:, 7] / 2) ax1.scatter(gps[:, 4], gps[:, 5], c=c, edgecolor='none', s=3, label="green: RTK\nyellow: DGPS\nred: Single") xfmt = md.DateFormatter('%H:%M:%S') ax3.xaxis.set_major_formatter(xfmt) ax4.xaxis.set_major_formatter(xfmt) ax5.xaxis.set_major_formatter(xfmt) if orientation: ax2.xaxis.set_major_formatter(xfmt) orientation = np.array(orientation) ax2.plot([datetime.fromtimestamp(x) for x in orientation[:, 0]], orientation[:, 1]) ax3.plot([datetime.fromtimestamp(x) for x in gps[:, 0]], gps[:, 4]) ax4.plot([datetime.fromtimestamp(x) for x in gps[:, 0]], gps[:, 6]) ax5.plot([datetime.fromtimestamp(x) for x in gps[:, 0]], gps[:, 5]) fig.autofmt_xdate() ax1.legend(loc='upper right', title='') ax1.set_ylabel('GNSS northing [m]') ax1.set_xlabel('GNSS easting [m]') ax2.set_ylabel('Heading over time [rad]') ax3.set_ylabel('GNSS easting over time [m]') ax4.set_ylabel('GNSS height over time [m]') ax5.set_ylabel('GNSS northing over time [m]') fig.set_size_inches(16, 9) try: fig.savefig(plotfile.path) except: plt.close() yield plotfile