def write_images(bag_filename, topic, basename): """ Returns the name of the first created file """ dtu.logger.info(f"reading topic {topic!r} from {bag_filename!r}") bag = rosbag.Bag(bag_filename) nfound = bag.get_message_count(topic) if nfound == 0: msg = f"Found 0 messages for topic {topic}" msg += "\n\n" + dtu.indent(dbu.get_summary_of_bag_messages(bag), " ") raise ValueError(msg) res = dbu.d8n_read_all_images_from_bag(bag, topic) n = len(res) filenames = [] for i in range(n): rgb = res[i]["rgb"] data: bytes = dtu.png_from_bgr(dtu.bgr_from_rgb(rgb)) if n == 1: fn = basename + ".png" else: fn = basename + f"-{i:02d}.png" filenames.append(fn) dtu.write_data_to_file(data, fn) bag.close() return filenames[0]
def go(self): vehicle_name = dtu.get_current_robot_name() # noinspection PyUnresolvedReferences output = self.options.output if output is None: output = "out/pipeline" # + dtu.get_md5(self.options.image)[:6] self.info(f"No --output given, using {output}") # noinspection PyUnresolvedReferences opt_image = self.options.image if opt_image is not None: image_filename = opt_image if image_filename.startswith("http"): image_filename = dtu.get_file_from_url(image_filename) bgr = dtu.bgr_from_jpg_fn(image_filename) else: self.info("Validating using the ROS image stream...") import rospy from sensor_msgs.msg import CompressedImage topic_name = os.path.join("/", vehicle_name, "camera_node/image/compressed") self.info("Let's wait for an image. Say cheese!") # Dummy to get ROS message rospy.init_node("single_image") try: img_msg = cast( CompressedImage, rospy.wait_for_message(topic_name, CompressedImage, timeout=10)) self.info("Image captured") except rospy.ROSException as e: self.info( f"\n\n\nDidn't get any message: {e}\n MAKE SURE YOU USE DT SHELL COMMANDS OF VERSION " "4.1.9 OR HIGHER!\n\n\n") raise bgr = dtu.bgr_from_rgb(dru.rgb_from_ros(img_msg)) self.info(f"Picture taken: {str(bgr.shape)} ") dtu.DuckietownConstants.show_timeit_benchmarks = True res, _stats = run_pipeline(bgr) self.info("Resizing images..") res = dtu.resize_small_images(res) self.info("Writing images..") dtu.write_bgr_images_as_jpgs(res, output)
def look_at( log, output: str, anti_instagram: str, line_detector: str, image_prep: str, lane_filter: str, all_details: bool, ) -> None: filename = get_local_bag_file(log) bag = rosbag.Bag(filename) vehicle_name = dbu.which_robot(bag) dtu.logger.info(f"Vehicle name: {vehicle_name}") brp = dbu.BagReadProxy(bag) rcg = get_robot_camera_geometry_from_log(brp) topic = dbu.get_image_topic(bag) res = dbu.d8n_read_all_images_from_bag(bag, topic, max_images=1) image_cv = res[0]["rgb"] # dtu.logger.debug(dtu.describe_value(image_cv)) image_cv_bgr = dtu.bgr_from_rgb(image_cv) dtu.DuckietownConstants.show_timeit_benchmarks = True res, _stats = run_pipeline( image_cv_bgr, gpg=rcg.gpg, rectifier=rcg.rectifier, anti_instagram_name=anti_instagram, line_detector_name=line_detector, image_prep_name=image_prep, lane_filter_name=lane_filter, all_details=all_details, ) res = dtu.resize_small_images(res) dtu.write_bgr_images_as_jpgs(res, output)
def on_received_observations(self, context: Context, data: DB20Observations): camera: JPGImage = data.camera if self.rgb is None: context.info("received first observations") self.rgb = dcu.bgr_from_rgb(dcu.bgr_from_jpg(camera.jpg_data))
def work(log, outd, max_images, only_camera, write_frames): filename = get_local_bag_file(log) t0 = log.t0 t1 = log.t1 MIN_HEIGHT = 480 # noinspection PyUnboundLocalVariable bag = rosbag.Bag(filename) main = dbu.get_image_topic(bag) topics = [_ for _, __ in dbu.d8n_get_all_images_topic_bag(bag)] bag.close() dtu.logger.debug(f"{filename} - topics: {topics}") for topic in topics: if only_camera and topic != main: continue try: bag = rosbag.Bag(filename) except: msg = f"Cannot read Bag file {filename}" dtu.logger.error(msg) raise bag_proxy = dbu.BagReadProxy(bag, t0, t1) res = dbu.d8n_read_all_images_from_bag(bag_proxy, topic, max_images=max_images, use_relative_time=True) bag.close() d = topic.replace("/", "_") if d.startswith("_"): d = d[1:] d0 = os.path.join(outd, d) images_with_label = [] for i in range(len(res)): rgb = res[i]["rgb"] H, _W = rgb.shape[:2] if H < MIN_HEIGHT: zoom = int(np.ceil(MIN_HEIGHT / H)) rgb = dtu.zoom_image(rgb, zoom) timestamp = res[i]["timestamp"] s = f"t = {timestamp - t0:.2f}" with_label = dtu.add_header_to_rgb(rgb, s) images_with_label.append(with_label) if write_frames: for i, rgb in enumerate(images_with_label): rgb = res[i]["rgb"] fn = os.path.join(d0, f"image-{i:05d}" + ".jpg") dtu.write_bgr_as_jpg(dtu.bgr_from_rgb(rgb), fn) grid = dtu.make_images_grid( images_with_label, pad=4, bgcolor=dtu.ColorConstants.RGB_DUCKIETOWN_YELLOW) s = log.log_name grid = dtu.add_header_to_rgb(grid, s, max_height=32) if (topic != main) or len(topics) > 1: fn = d0 + ".jpg" dtu.write_rgb_as_jpg(grid, fn) if topic == main: fn = outd + ".thumbnails.jpg" dtu.write_rgb_as_jpg(grid, fn)
def go(self): robot_name = dtu.get_current_robot_name() # noinspection PyUnresolvedReferences output = self.options.output # noinspection PyUnresolvedReferences the_input = self.options.input if output is None: output = "out/calibrate-extrinsics" # + dtu.get_md5(self.options.image)[:6] self.info(f"No --output given, using {output}") if the_input is None: self.info( ("{}\nCalibrating using the ROS image stream...\n".format("*" * 20))) import rospy from sensor_msgs.msg import CompressedImage topic_name = os.path.join("/", robot_name, "camera_node/image/compressed") logger.info(f"Topic to listen to is: {topic_name}") self.info("Let's wait for an image. Say cheese!") # Dummy for getting a ROS message rospy.init_node("calibrate_extrinsics") try: img_msg = rospy.wait_for_message(topic_name, CompressedImage, timeout=10) self.info("Image captured") except rospy.ROSException as e: print(( "\n\n\n" f"Didn't get any message: {e}\n " "MAKE SURE YOU USE DT SHELL COMMANDS OF VERSION 4.1.9 OR HIGHER." )) raise img_msg = cast(CompressedImage, img_msg) bgr = dtu.bgr_from_rgb(dru.rgb_from_ros(img_msg)) self.info(f"Picture taken: {str(bgr.shape)} ") else: self.info(f"Loading input image {the_input}") bgr = dtu.bgr_from_jpg_fn(the_input) if bgr.shape[1] != 640: interpolation = cv2.INTER_CUBIC bgr = dtu.d8_image_resize_fit(bgr, 640, interpolation) self.info(f"Resized to: {str(bgr.shape)} ") # Disable the old calibration file self.info("Disableing old homography") disable_old_homography(robot_name) self.info("Obtaining camera info") try: camera_info = get_camera_info_for_robot(robot_name) except Exception as e: msg = "Error on obtaining camera info." raise Exception(msg) from e self.info("Get default homography") try: homography_dummy = get_homography_default() except Exception as e: msg = "Error on getting homography." raise Exception(msg) from e self.info("Rectify image") try: rect = Rectify(camera_info) except Exception as e: msg = "Error rectifying image." raise Exception(msg) from e self.info("Calculate GPG") try: gpg = GroundProjectionGeometry(camera_info.width, camera_info.height, homography_dummy.reshape((3, 3))) except Exception as e: msg = "Error calculating GroundProjectionGeometry." raise Exception(msg) from e self.info("Ordered Dict") res = {} try: bgr_rectified = rect.rectify(bgr, interpolation=cv2.INTER_CUBIC) res["bgr"] = bgr res["bgr_rectified"] = bgr_rectified _new_matrix, res["rectified_full_ratio_auto"] = rect.rectify_full( bgr, ratio=1.65) (result_gpg, status) = gpg.estimate_homography(bgr_rectified) if status is not None: raise Exception(status) save_homography(result_gpg.H, robot_name) msg = """ To check that this worked well, place the robot on the road, and run: rosrun complete_image_pipeline single_image Look at the produced jpgs. """ self.info(msg) except Exception as E: self.error(E) finally: dtu.write_bgr_images_as_jpgs(res, output)
def on_received_observations(self, context: Context, data: DB20Observations): logger.info("received", data=data) camera: JPGImage = data.camera self.rgb = dcu.bgr_from_rgb(dcu.bgr_from_jpg(camera.jpg_data))
def process_log( self, bag_in: dbu.BagReadProxy, prefix_in: str, bag_out, prefix_out: str, utils: ProcessorUtilsInterface, ): log_name = utils.get_log().log_name vehicle_name = dbu.which_robot(bag_in) logger.info(f"Vehicle name: {vehicle_name}") topic = dbu.get_image_topic(bag_in) rcg = get_robot_camera_geometry_from_log(bag_in) bgcolor = dtu.ColorConstants.BGR_DUCKIETOWN_YELLOW sequence = bag_in.read_messages_plus(topics=[topic]) for _i, mp in enumerate(sequence): bgr = dtu.bgr_from_rgb(dru.rgb_from_ros(mp.msg)) res, stats = run_pipeline( bgr, gpg=rcg.gpg, rectifier=rcg.rectifier, line_detector_name=self.line_detector, image_prep_name=self.image_prep, lane_filter_name=self.lane_filter, anti_instagram_name=self.anti_instagram, all_details=self.all_details, ) rect = (480, 640) if not self.all_details else (240, 320) res = dtu.resize_images_to_fit_in_rect(res, rect, bgcolor=bgcolor) msg = ( f"abs: {mp.time_absolute} window: {mp.time_window} from log: " f"{mp.time_from_physical_log_start}") dtu.logger.info(msg) headers = [ f"Robot: {vehicle_name} log: {log_name} time: {mp.time_from_physical_log_start:.2f} s", f"Algorithms | color correction: {self.anti_instagram} | preparation: {self.image_prep} | " f"detector: {self.line_detector} | filter: {self.lane_filter}", ] res = dtu.write_bgr_images_as_jpgs(res, dirname=None, bgcolor=bgcolor) cv_image = res["all"] for head in reversed(headers): max_height = 35 cv_image = dtu.add_header_to_bgr(cv_image, head, max_height=max_height) otopic = "all" omsg = dru.d8_compressed_image_from_cv_image( cv_image, same_timestamp_as=mp.msg) t = rospy.Time.from_sec(mp.time_absolute) dtu.logger.info(f"written {otopic!r} at t = {t.to_sec()}") bag_out.write(prefix_out + "/" + otopic, omsg, t=t) for name, value in list(stats.items()): utils.write_stat(prefix_out + "/" + name, value, t=t)