コード例 #1
0
ファイル: image_simulation.py プロジェクト: light5551/dt-core
def simulate_image(sm_orig: SegmentsMap, pose: SE2value,
                   gpg: GroundProjectionGeometry, rectifier: Rectify,
                   blur_sigma: float) -> SimulationData:
    H, W = gpg.get_shape()
    #     H_pad = int(0.3*H)
    #     W_pad = int(0.3*W)
    #     H_padded = H + H_pad
    #     W_padded = W + W_pad
    blank = np.zeros(dtype="uint8", shape=(H, W, 3))
    blank.fill(128)

    tinfo = TransformationsInfo()

    frames = list(set(_.id_frame for _ in list(sm_orig.points.values())))
    id_frame = frames[0]
    dtu.logger.debug(f"frames: {frames} choose {id_frame}")

    tinfo.add_transformation(frame1=id_frame, frame2=FRAME_AXLE, g=pose)

    sm_axle = tinfo.transform_map_to_frame(sm_orig, FRAME_AXLE)

    rectified_synthetic = plot_map(blank, sm_axle, gpg, do_segments=False)
    rectified_segments = plot_map(blank,
                                  sm_axle,
                                  gpg,
                                  do_segments=True,
                                  do_ground=True,
                                  do_faces=False,
                                  do_horizon=False)

    distorted_synthetic = rectifier.distort(rectified_synthetic)

    distorted_synthetic = add_noise(distorted_synthetic)
    #     distorted_synthetic = cv2.medianBlur(distorted_synthetic, 3)
    distorted_synthetic = cv2.GaussianBlur(distorted_synthetic, (0, 0),
                                           blur_sigma)

    return SimulationData(
        distorted_synthetic_bgr=distorted_synthetic,
        rectified_synthetic_bgr=rectified_synthetic,
        rectified_segments_bgr=rectified_segments,
    )
コード例 #2
0
def simulate_image(sm_orig, pose, gpg, blur_sigma):
    camera_info = gpg.get_camera_info()
    H = camera_info.height
    W = camera_info.width
    #     H_pad = int(0.3*H)
    #     W_pad = int(0.3*W)
    #     H_padded = H + H_pad
    #     W_padded = W + W_pad
    blank = np.zeros(dtype='uint8', shape=(H, W, 3))
    blank.fill(128)

    tinfo = TransformationsInfo()

    frames = list(set(_.id_frame for _ in sm_orig.points.values()))
    id_frame = frames[0]
    dtu.logger.debug('frames: %s choose %s' % (frames, id_frame))

    tinfo.add_transformation(frame1=id_frame, frame2=FRAME_AXLE, g=pose)

    sm_axle = tinfo.transform_map_to_frame(sm_orig, FRAME_AXLE)

    rectified_synthetic = plot_map(blank, sm_axle, gpg, do_segments=False)
    rectified_segments = plot_map(blank,
                                  sm_axle,
                                  gpg,
                                  do_segments=True,
                                  do_ground=True,
                                  do_faces=False,
                                  do_horizon=False)

    distorted_synthetic = gpg.distort(rectified_synthetic)

    distorted_synthetic = add_noise(distorted_synthetic)
    #     distorted_synthetic = cv2.medianBlur(distorted_synthetic, 3)
    distorted_synthetic = cv2.GaussianBlur(distorted_synthetic, (0, 0),
                                           blur_sigma)

    return SimulationData(distorted_synthetic_bgr=distorted_synthetic,
                          rectified_synthetic_bgr=rectified_synthetic,
                          rectified_segments_bgr=rectified_segments)
コード例 #3
0
def run_pipeline(
    image: dtu.NPImageBGR,
    gpg: GroundProjectionGeometry,
    rectifier: Rectify,
    line_detector_name: str,
    image_prep_name: str,
    lane_filter_name: str,
    anti_instagram_name: str,
    all_details: bool = False,
    ground_truth=None,
    actual_map=None,
) -> Tuple[Dict[str, dtu.NPImageBGR], Dict[str, float]]:
    """
        Image: numpy (H,W,3) == BGR
        Returns a dictionary, res with the following fields:

            res['input_image']

        ground_truth = pose
    """

    logger.debug(f"backend: {matplotlib.get_backend()}")
    logger.debug(f"fname: {matplotlib.matplotlib_fname()}")

    quick: bool = False

    dtu.check_isinstance(image, np.ndarray)

    res: Dict[str, dtu.NPImageBGR] = {}
    stats = {}

    res["Raw input image"] = image
    algo_db = get_easy_algo_db()
    line_detector = algo_db.create_instance(FAMILY_LINE_DETECTOR,
                                            line_detector_name)
    lane_filter = algo_db.create_instance(FAMILY_LANE_FILTER, lane_filter_name)
    image_prep = algo_db.create_instance(ImagePrep.FAMILY, image_prep_name)
    ai = algo_db.create_instance(AntiInstagramInterface.FAMILY,
                                 anti_instagram_name)

    pts = ProcessingTimingStats()
    pts.reset()
    pts.received_message()
    pts.decided_to_process()

    if all_details:
        segment_list = image_prep.process(FakeContext(),
                                          image,
                                          line_detector,
                                          transform=None)

        res["segments_on_image_input"] = vs_fancy_display(
            image_prep.image_cv, segment_list)
        res["segments_on_image_resized"] = vs_fancy_display(
            image_prep.image_resized, segment_list)

    with pts.phase("calculate AI transform"):
        ai.calculateTransform(image)

    with pts.phase("apply AI transform"):

        transformed = ai.applyTransform(image)

        if all_details:
            res["image_input_transformed"] = transformed

    with pts.phase("edge detection"):
        # note: do not apply transform twice!
        segment_list2 = image_prep.process(pts,
                                           image,
                                           line_detector,
                                           transform=ai.applyTransform)

        if all_details:
            res["resized and corrected"] = image_prep.image_corrected

    logger.debug(f"segment_list2: {len(segment_list2.segments)}")

    if all_details:
        res["segments_on_image_input_transformed"] = vs_fancy_display(
            image_prep.image_cv, segment_list2)

    if all_details:
        res["segments_on_image_input_transformed_resized"] = vs_fancy_display(
            image_prep.image_resized, segment_list2)

    if all_details:
        grid = get_grid(image.shape[:2])
        res["grid"] = grid
        res["grid_remapped"] = rectifier.rectify(grid)

    #     res['difference between the two'] = res['image_input']*0.5 + res['image_input_rect']*0.5

    with pts.phase("rectify_segments"):
        segment_list2_rect = rectify_segments(rectifier, gpg, segment_list2)

    # Project to ground
    with pts.phase("find_ground_coordinates"):
        sg = find_ground_coordinates(gpg, segment_list2_rect)

    lane_filter.initialize()

    # lane_filter.get_plot_phi_d()
    if all_details:
        res["prior"] = lane_filter.get_plot_phi_d()

    with pts.phase("lane filter update"):
        logger.info(type(lane_filter).__name__)
        if type(lane_filter).__name__ == "LaneFilterHistogram":
            # XXX merging pain
            _likelihood = lane_filter.update(sg.segments)
        else:
            _likelihood = lane_filter.update(sg)

    if not quick:
        with pts.phase("lane filter plot"):
            res["likelihood"] = lane_filter.get_plot_phi_d(
                ground_truth=ground_truth)
    easy_algo_db = get_easy_algo_db()

    if isinstance(lane_filter, LaneFilterMoreGeneric):
        template_name = lane_filter.localization_template
    else:
        template_name = "DT17_template_straight_straight"
        dtu.logger.debug(
            f"Using default template {template_name!r} for visualization")

    localization_template = easy_algo_db.create_instance(
        FAMILY_LOC_TEMPLATES, template_name)

    with pts.phase("lane filter get_estimate()"):
        est = lane_filter.get_estimate()

    # Coordinates in TILE frame
    g = localization_template.pose_from_coords(est)
    tinfo = TransformationsInfo()
    tinfo.add_transformation(frame1=FRAME_GLOBAL, frame2=FRAME_AXLE, g=g)

    if all_details:
        if actual_map is not None:
            #         sm_axle = tinfo.transform_map_to_frame(actual_map, FRAME_AXLE)
            res["real"] = plot_map_and_segments(actual_map,
                                                tinfo,
                                                sg.segments,
                                                dpi=120,
                                                ground_truth=ground_truth)

    with pts.phase("rectify"):
        rectified0 = rectifier.rectify(image)

    rectified = ai.applyTransform(rectified0)

    if all_details:
        res["image_input_rect"] = rectified

    res["segments rectified on image rectified"] = vs_fancy_display(
        rectified, segment_list2_rect)

    assumed = localization_template.get_map()

    if not quick:
        with pts.phase("plot_map_and_segments"):
            res["model assumed for localization"] = plot_map_and_segments(
                assumed,
                tinfo,
                sg.segments,
                dpi=120,
                ground_truth=ground_truth)

    assumed_axle = tinfo.transform_map_to_frame(assumed, FRAME_AXLE)

    with pts.phase("plot_map reprojected"):
        res["map reprojected on image"] = plot_map(
            rectified,
            assumed_axle,
            gpg,
            do_ground=False,
            do_horizon=True,
            do_faces=False,
            do_faces_outline=True,
            do_segments=False,
        )

    with pts.phase("quality computation"):
        predicted_segment_list_rectified = predict_segments(sm=assumed_axle,
                                                            gpg=gpg)
        quality_res, quality_stats = judge_quality(
            image, segment_list2_rect, predicted_segment_list_rectified)
        res.update(quality_res)

    #     res['blurred']= cv2.medianBlur(image, 11)
    stats["estimate"] = est
    stats.update(quality_stats)

    dtu.logger.info(pts.get_stats())
    return res, stats
コード例 #4
0
ファイル: pipeline.py プロジェクト: frank-qcd-qk/dt-core
def run_pipeline(image, all_details=False, ground_truth=None, actual_map=None):
    """
        Image: numpy (H,W,3) == BGR
        Returns a dictionary, res with the following fields:

            res['input_image']

        ground_truth = pose
    """

    print('backend: %s' % matplotlib.get_backend())
    print('fname: %s' % matplotlib.matplotlib_fname())

    quick = False

    dtu.check_isinstance(image, np.ndarray)

    res = OrderedDict()
    stats = OrderedDict()
    vehicle_name = dtu.get_current_robot_name()

    res['Raw input image'] = image
    gp = get_ground_projection(vehicle_name)
    gpg = gp.get_ground_projection_geometry
    line_detector = LineDetector()
    lane_filter = LaneFilterHistogram(None)
    print("pass lane_filter")
    ai = AntiInstagram()
    pts = ProcessingTimingStats()
    pts.reset()
    pts.received_message()
    pts.decided_to_process()

    with pts.phase('calculate AI transform'):
        [lower, upper] = ai.calculate_color_balance_thresholds(image)

    with pts.phase('apply AI transform'):
        transformed = ai.apply_color_balance(lower, upper, image)

    with pts.phase('edge detection'):
        # note: do not apply transform twice!
        segment_list2 = image_prep.process(pts,
                                           image,
                                           line_detector,
                                           transform=ai.apply_color_balance)

        if all_details:

            res['resized and corrected'] = image_prep.image_corrected

    dtu.logger.debug('segment_list2: %s' % len(segment_list2.segments))

    if all_details:
        res['segments_on_image_input_transformed'] = \
            vs_fancy_display(image_prep.image_cv, segment_list2)

    if all_details:
        res['segments_on_image_input_transformed_resized'] = \
            vs_fancy_display(image_prep.image_resized, segment_list2)

    if all_details:
        grid = get_grid(image.shape[:2])
        res['grid'] = grid
        res['grid_remapped'] = gpg.rectify(grid)

#     res['difference between the two'] = res['image_input']*0.5 + res['image_input_rect']*0.5

    with pts.phase('rectify_segments'):
        segment_list2_rect = rectify_segments(gpg, segment_list2)

    # Project to ground
    with pts.phase('find_ground_coordinates'):
        sg = find_ground_coordinates(gpg, segment_list2_rect)

    lane_filter.initialize()
    if all_details:
        res['prior'] = lane_filter.get_plot_phi_d()

    with pts.phase('lane filter update'):
        print(type(lane_filter).__name__)
        if type(lane_filter).__name__ == 'LaneFilterHistogram':
            # XXX merging pain
            _likelihood = lane_filter.update(sg.segments)
        else:
            _likelihood = lane_filter.update(sg)

    if not quick:
        with pts.phase('lane filter plot'):
            res['likelihood'] = lane_filter.get_plot_phi_d(
                ground_truth=ground_truth)
    easy_algo_db = get_easy_algo_db()

    if isinstance(lane_filter, LaneFilterMoreGeneric):
        template_name = lane_filter.localization_template
    else:
        template_name = 'DT17_template_straight_straight'
        dtu.logger.debug('Using default template %r for visualization' %
                         template_name)

    localization_template = \
        easy_algo_db.create_instance(FAMILY_LOC_TEMPLATES, template_name)

    with pts.phase('lane filter get_estimate()'):
        est = lane_filter.get_estimate()

    # Coordinates in TILE frame
    g = localization_template.pose_from_coords(est)
    tinfo = TransformationsInfo()
    tinfo.add_transformation(frame1=FRAME_GLOBAL, frame2=FRAME_AXLE, g=g)

    if all_details:
        if actual_map is not None:
            #         sm_axle = tinfo.transform_map_to_frame(actual_map, FRAME_AXLE)
            res['real'] = plot_map_and_segments(actual_map,
                                                tinfo,
                                                sg.segments,
                                                dpi=120,
                                                ground_truth=ground_truth)

    with pts.phase('rectify'):
        rectified0 = gpg.rectify(image)

    rectified = ai.apply_color_balance(rectified0)

    if all_details:
        res['image_input_rect'] = rectified

    res['segments rectified on image rectified'] = \
        vs_fancy_display(rectified, segment_list2_rect)

    assumed = localization_template.get_map()

    if not quick:
        with pts.phase('plot_map_and_segments'):
            res['model assumed for localization'] = plot_map_and_segments(
                assumed,
                tinfo,
                sg.segments,
                dpi=120,
                ground_truth=ground_truth)

    assumed_axle = tinfo.transform_map_to_frame(assumed, FRAME_AXLE)

    with pts.phase('plot_map reprojected'):
        res['map reprojected on image'] = plot_map(rectified,
                                                   assumed_axle,
                                                   gpg,
                                                   do_ground=False,
                                                   do_horizon=True,
                                                   do_faces=False,
                                                   do_faces_outline=True,
                                                   do_segments=False)

    with pts.phase('quality computation'):
        predicted_segment_list_rectified = predict_segments(sm=assumed_axle,
                                                            gpg=gpg)
        quality_res, quality_stats = judge_quality(
            image, segment_list2_rect, predicted_segment_list_rectified)
        res.update(quality_res)


#     res['blurred']= cv2.medianBlur(image, 11)
    stats['estimate'] = est
    stats.update(quality_stats)

    dtu.logger.info(pts.get_stats())
    return res, stats