def look_at(log, output, anti_instagram, line_detector, image_prep, lane_filter, all_details): filename = get_local_bag_file(log) bag = rosbag.Bag(filename) vehicle_name = dtu.which_robot(bag) dtu.logger.info('Vehicle name: %s' % vehicle_name) gp = GroundProjection(vehicle_name) topic = dtu.get_image_topic(bag) res = dtu.d8n_read_all_images_from_bag(bag, topic, max_images=1) image_cv = res[0]['rgb'] # dtu.logger.debug(dtu.describe_value(image_cv)) image_cv_bgr = dtu.bgr_from_rgb(image_cv) dtu.DuckietownConstants.show_timeit_benchmarks = True res, _stats = run_pipeline(image_cv_bgr, gp=gp, anti_instagram_name=anti_instagram, line_detector_name=line_detector, image_prep_name=image_prep, lane_filter_name=lane_filter, all_details=all_details) res = dtu.resize_small_images(res) dtu.write_bgr_images_as_jpgs(res, output)
def jobs_videos(context, log, name, outd, only_camera): filename = get_local_bag_file(log) bag = rosbag.Bag(filename) main_camera_topic = dtu.get_image_topic(bag) min_messages = 5 # need at least 5 frames to make a video topics = [ _ for _, __ in dtu.d8n_get_all_images_topic_bag( bag, min_messages=min_messages) ] bag.close() only_camera_fn = outd + '-video.mp4' for topic in topics: stop_at = min_messages + 2 actual_count, count, _stopped_early = \ count_messages_in_slice(filename, topic, log.t0, log.t1, stop_at=stop_at) assert count >= min_messages if actual_count < min_messages: msg = 'There are only %d (out of %d) in the slice [%s, %s]' % ( actual_count, count, log.t0, log.t1) msg += '\n topic: %s' % topic continue d = topic.replace('/', '_') if d.startswith('_'): d = d[1:] if only_camera: if topic != main_camera_topic: continue out = only_camera_fn j = context.comp(dtu.d8n_make_video_from_bag, filename, topic, out, t0=log.t0, t1=log.t1, job_id='%s-%s' % (name, topic)) else: out = os.path.join(outd, name + '-' + d + '.mp4') j = context.comp(dtu.d8n_make_video_from_bag, filename, topic, out, job_id='%s-%s' % (name, topic)) # create link if topic == main_camera_topic: context.comp(link, j, out, only_camera_fn)
def run(self, cis): assert isinstance(cis, ChallengeInterfaceSolution) params = cis.get_challenge_parameters() print('parameters: %s' % params) to_process = params['to_process'] for basename in to_process: filename = cis.get_challenge_file(basename) print('processing %s' % filename) bag = rosbag.Bag(filename) topic = dtu.get_image_topic(bag) fn = os.path.join(cis.get_tmp_dir(), 'tmp.mp4') print('creating %s' % fn) dtu.d8n_make_video_from_bag(filename, topic, fn) cis.set_solution_output_file(basename + '.mp4', fn, 'processed video') cis.set_solution_output_dict({'processed': to_process})
def work(log, outd, max_images, only_camera, write_frames): filename = get_local_bag_file(log) t0 = log.t0 t1 = log.t1 MIN_HEIGHT = 480 # noinspection PyUnboundLocalVariable bag = rosbag.Bag(filename) main = dtu.get_image_topic(bag) topics = [_ for _, __ in dtu.d8n_get_all_images_topic_bag(bag)] bag.close() dtu.logger.debug('%s - topics: %s' % (filename, topics)) for topic in topics: if only_camera and topic != main: continue try: bag = rosbag.Bag(filename) except: msg = 'Cannot read Bag file %s' % filename dtu.logger.error(msg) raise bag_proxy = dtu.BagReadProxy(bag, t0, t1) res = dtu.d8n_read_all_images_from_bag(bag_proxy, topic, max_images=max_images, use_relative_time=True) bag.close() d = topic.replace('/', '_') if d.startswith('_'): d = d[1:] d0 = os.path.join(outd, d) images_with_label = [] for i in range(len(res)): rgb = res[i]['rgb'] H, _W = rgb.shape[:2] if H < MIN_HEIGHT: zoom = int(np.ceil(MIN_HEIGHT / H)) rgb = dtu.zoom_image(rgb, zoom) timestamp = res[i]['timestamp'] s = 't = %.2f' % (timestamp - t0) with_label = dtu.add_header_to_rgb(rgb, s) images_with_label.append(with_label) if write_frames: for i, rgb in enumerate(images_with_label): rgb = res[i]['rgb'] fn = os.path.join(d0, ('image-%05d' % i) + '.jpg') dtu.write_bgr_as_jpg(dtu.bgr_from_rgb(rgb), fn) grid = dtu.make_images_grid( images_with_label, pad=4, bgcolor=dtu.ColorConstants.RGB_DUCKIETOWN_YELLOW) s = log.log_name grid = dtu.add_header_to_rgb(grid, s, max_height=32) if (topic != main) or len(topics) > 1: fn = d0 + '.jpg' dtu.write_rgb_as_jpg(grid, fn) if topic == main: fn = outd + '.thumbnails.jpg' dtu.write_rgb_as_jpg(grid, fn)
def process_log(self, bag_in, prefix_in, bag_out, prefix_out, utils): #@UnusedVariable log_name = utils.get_log().log_name vehicle_name = dtu.which_robot(bag_in) dtu.logger.info('Vehicle name: %s' % vehicle_name) gp = GroundProjection(vehicle_name) topic = dtu.get_image_topic(bag_in) bgcolor = dtu.ColorConstants.BGR_DUCKIETOWN_YELLOW sequence = bag_in.read_messages_plus(topics=[topic]) for _i, mp in enumerate(sequence): bgr = dtu.bgr_from_rgb(dtu.rgb_from_ros(mp.msg)) res, stats = run_pipeline(bgr, gp=gp, line_detector_name=self.line_detector, image_prep_name=self.image_prep, lane_filter_name=self.lane_filter, anti_instagram_name=self.anti_instagram, all_details=self.all_details) rect = (480, 640) if not self.all_details else (240, 320) res = dtu.resize_images_to_fit_in_rect(res, rect, bgcolor=bgcolor) print('abs: %s window: %s fron log: %s' % (mp.time_absolute, mp.time_window, mp.time_from_physical_log_start)) headers = [ "Robot: %s log: %s time: %.2f s" % (vehicle_name, log_name, mp.time_from_physical_log_start), "Algorithms | color correction: %s | preparation: %s | detector: %s | filter: %s" % ( self.anti_instagram, self.image_prep, self.line_detector, self.lane_filter, ) ] res = dtu.write_bgr_images_as_jpgs(res, dirname=None, bgcolor=bgcolor) cv_image = res['all'] for head in reversed(headers): max_height = 35 cv_image = dtu.add_header_to_bgr(cv_image, head, max_height=max_height) otopic = "all" omsg = dtu.d8_compressed_image_from_cv_image( cv_image, same_timestamp_as=mp.msg) t = rospy.Time.from_sec(mp.time_absolute) # @UndefinedVariable print('written %r at t = %s' % (otopic, t.to_sec())) bag_out.write(prefix_out + '/' + otopic, omsg, t=t) for name, value in stats.items(): utils.write_stat(prefix_out + '/' + name, value, t=t)