Exemplo n.º 1
0
    def go(self):
        vehicle_name = dtu.get_current_robot_name()

        # noinspection PyUnresolvedReferences
        output = self.options.output
        if output is None:
            output = "out/pipeline"  # + dtu.get_md5(self.options.image)[:6]
            self.info(f"No --output given, using {output}")

        # noinspection PyUnresolvedReferences
        opt_image = self.options.image
        if opt_image is not None:
            image_filename = opt_image
            if image_filename.startswith("http"):
                image_filename = dtu.get_file_from_url(image_filename)

            bgr = dtu.bgr_from_jpg_fn(image_filename)
        else:
            self.info("Validating using the ROS image stream...")
            import rospy
            from sensor_msgs.msg import CompressedImage

            topic_name = os.path.join("/", vehicle_name,
                                      "camera_node/image/compressed")

            self.info("Let's wait for an image. Say cheese!")

            # Dummy to get ROS message
            rospy.init_node("single_image")

            try:
                img_msg = cast(
                    CompressedImage,
                    rospy.wait_for_message(topic_name,
                                           CompressedImage,
                                           timeout=10))
                self.info("Image captured")
            except rospy.ROSException as e:
                self.info(
                    f"\n\n\nDidn't get any message: {e}\n MAKE SURE YOU USE DT SHELL COMMANDS OF VERSION "
                    "4.1.9 OR HIGHER!\n\n\n")
                raise

            bgr = dtu.bgr_from_rgb(dru.rgb_from_ros(img_msg))
            self.info(f"Picture taken: {str(bgr.shape)} ")

        dtu.DuckietownConstants.show_timeit_benchmarks = True
        res, _stats = run_pipeline(bgr)

        self.info("Resizing images..")
        res = dtu.resize_small_images(res)
        self.info("Writing images..")
        dtu.write_bgr_images_as_jpgs(res, output)
Exemplo n.º 2
0
def look_at(
    log,
    output: str,
    anti_instagram: str,
    line_detector: str,
    image_prep: str,
    lane_filter: str,
    all_details: bool,
) -> None:
    filename = get_local_bag_file(log)

    bag = rosbag.Bag(filename)

    vehicle_name = dbu.which_robot(bag)

    dtu.logger.info(f"Vehicle name: {vehicle_name}")

    brp = dbu.BagReadProxy(bag)
    rcg = get_robot_camera_geometry_from_log(brp)

    topic = dbu.get_image_topic(bag)
    res = dbu.d8n_read_all_images_from_bag(bag, topic, max_images=1)

    image_cv = res[0]["rgb"]

    #     dtu.logger.debug(dtu.describe_value(image_cv))

    image_cv_bgr = dtu.bgr_from_rgb(image_cv)

    dtu.DuckietownConstants.show_timeit_benchmarks = True
    res, _stats = run_pipeline(
        image_cv_bgr,
        gpg=rcg.gpg,
        rectifier=rcg.rectifier,
        anti_instagram_name=anti_instagram,
        line_detector_name=line_detector,
        image_prep_name=image_prep,
        lane_filter_name=lane_filter,
        all_details=all_details,
    )

    res = dtu.resize_small_images(res)

    dtu.write_bgr_images_as_jpgs(res, output)
Exemplo n.º 3
0
def test_synthetic(
    actual_map_name: str,
    template,
    robot_name: str,
    line_detector_name: str,
    image_prep_name: str,
    lane_filter_name: str,
    pose_or_location,
    outd: str,
    seed: int = 42,
) -> Tuple[object, object]:
    np.random.seed(seed)
    db = get_easy_algo_db()
    actual_map = db.create_instance(FAMILY_SEGMAPS, actual_map_name)

    # first, load calibration for robot
    easy_algo_db = get_easy_algo_db()
    dtu.logger.debug(f"looking for localization template {template!r}")
    localization_template = easy_algo_db.create_instance(
        FAMILY_LOC_TEMPLATES, template)

    rcg = get_robot_camera_geometry(robot_name)

    if pose_or_location.shape == (3, 3):  # SE(2)
        pose = pose_or_location
        location = localization_template.coords_from_pose(pose)
    else:
        location = pose_or_location
        pose = localization_template.pose_from_coords(location)

    simulation_data = simulate_image(actual_map,
                                     pose,
                                     gpg=rcg.gpg,
                                     rectifier=rcg.rectifier,
                                     blur_sigma=0.3)

    image = simulation_data.distorted_synthetic_bgr

    #     anti_instagram_name='identity' # skip
    anti_instagram_name = "baseline"

    all_details = False
    res, stats = run_pipeline(
        image,
        gpg=rcg.gpg,
        rectifier=rcg.rectifier,
        line_detector_name=line_detector_name,
        image_prep_name=image_prep_name,
        lane_filter_name=lane_filter_name,
        anti_instagram_name=anti_instagram_name,
        all_details=all_details,
        ground_truth=pose,
        actual_map=actual_map,
    )

    error = np.empty_like(location)
    for k in error.dtype.fields:
        error[k] = stats["estimate"][k] - location[k]
    stats["error"] = error

    res = dtu.resize_small_images(res)

    dtu.write_bgr_images_as_jpgs(res, outd, extra_string=outd.split("/")[-1])
    return res, stats