Example #1
0
def name_from_spec(x: Union[str, dict]) -> str:
    dtu.check_isinstance(x, (str, dict))
    if isinstance(x, str):
        return x
    elif isinstance(x, dict):
        name, _ = _parse_inline_spec(x)
        return name
Example #2
0
    def create_instance(self, family_name: str,
                        instance_name_or_spec: Union[dict, str]):
        dtu.check_isinstance(instance_name_or_spec, (dict, str))

        family = self.get_family(family_name)
        if not family.valid:
            msg = f"Cannot instantiate {instance_name_or_spec!r} because its family {family_name!r} is " \
                  f"invalid."
            msg += "\n\n" + dtu.indent(family.error_if_invalid, "  > ")
            raise dtu.DTConfigException(msg)

        if isinstance(instance_name_or_spec, str):
            instance_name = instance_name_or_spec
            dtu.check_is_in("instance", instance_name, family.instances)
            instance = family.instances[instance_name]
            if not instance.valid:
                msg = f"Cannot instantiate {instance_name!r} because it is invalid:\n" \
                      f"{dtu.indent(instance.error_if_invalid, '> ')}"
                raise dtu.DTConfigException(msg)
            res = dtu.instantiate(instance.constructor, instance.parameters)
        elif isinstance(instance_name_or_spec, dict):
            _name, spec = _parse_inline_spec(instance_name_or_spec)
            res = dtu.instantiate(spec.constructor, spec.parameters)
        else:
            assert False

        interface = dtu.import_name(family.interface)
        if not isinstance(res, interface):
            msg = f"I expected that {instance_name_or_spec!r} would be a {interface.__name__} but it is a " \
                  f"{type(res).__name__}."
            raise dtu.DTConfigException(msg)

        return res
Example #3
0
def read_data_for_signals(bag_in, prefix_in, signals):
    topic2messages = defaultdict(lambda: dict(timestamp=[], data=[]))
    topics = []
    for signal_spec in signals:
        dtu.check_isinstance(signal_spec, PlotSignalSpec)
        topic_fqn = prefix_in + signal_spec.topic
        topics.append(topic_fqn)

    for _j, mp in enumerate(bag_in.read_messages_plus(topics=topics)):
        data = interpret_ros(mp.msg)
        topic2messages[mp.topic]["data"].append(data)
        topic2messages[mp.topic]["timestamp"].append(
            mp.time_from_physical_log_start)

    for v in list(topic2messages.values()):
        v["data"] = np.array(v["data"])
        v["timestamp"] = np.array(v["timestamp"])

    for signal_spec in signals:
        topic_fqn = prefix_in + signal_spec.topic
        if not topic_fqn in topic2messages:
            msg = f"Could not found any value for topic {topic_fqn!r}."
            raise ValueError(msg)

    return topic2messages
Example #4
0
def _parse_inline_spec(x: dict) -> Tuple[str, SpecValues]:
    dtu.check_isinstance(x, dict)
    if len(x) != 1:
        msg = f"Invalid spec: length is {len(x)}"
        dtu.raise_desc(ValueError, msg, x=x)
    key = list(x)[0]
    value = x[key]

    value = copy.deepcopy(value)
    if "description" in value:
        description = value.pop("description")
        dtu.dt_check_isinstance("description", description, str)
    else:
        description = None

    constructor = value.pop("constructor")
    dtu.dt_check_isinstance("constructor", constructor, str)

    parameters = value.pop("parameters")
    dtu.dt_check_isinstance("parameters", parameters, (dict, type(None)))

    if parameters is None:
        parameters = {}

    res = SpecValues(constructor=constructor,
                     parameters=parameters,
                     description=description)
    return key, res
Example #5
0
def yaml_representation_of_phy_logs(logs: Dict[str, PhysicalLog]) -> str:
    dtu.check_isinstance(logs, dict)
    res = {}
    for k, l in list(logs.items()):
        res[k] = yaml_from_physical_log(l)
    s = dtu.yaml_dump_pretty(res)
    return s
Example #6
0
def logs_from_yaml(data: dict) -> Dict[str, PhysicalLog]:
    dtu.check_isinstance(data, dict)

    res = {}
    for k, d in list(data.items()):
        res[k] = physical_log_from_yaml(d)
    return res
Example #7
0
 def __init__(self, current: ResultDBEntry,
              entries: Sequence[ResultDBEntry]):
     for e in entries:
         dtu.check_isinstance(e, ResultDBEntry)
     self.entries = entries
     self.current = current
     dtu.check_isinstance(current, ResultDBEntry)
Example #8
0
 def __str__(self):
     n = len(self.entries)
     s = f"ResultsDB with {n} entries"
     s += "\n" + dtu.indent(str(self.current), "", "  current: ")
     for i, p in enumerate(self.entries):
         dtu.check_isinstance(p, ResultDBEntry)
         s += "\n" + dtu.indent(str(p), "", f" {i + 1:2d} of {n}: ")
     return s
Example #9
0
    def __init__(self, variables: Dict[str, Dict], precision="float32"):
        self.precision = precision
        dtu.check_isinstance(variables, dict)
        if len(variables) != 2:
            msg = f"I can only deal with 2 variables, obtained {variables}"
            raise ValueError(msg)

        self._names = list(variables)
        self._specs = [spec_from_yaml(variables[_]) for _ in self._names]

        s0 = self._specs[0]
        s1 = self._specs[1]
        self._mgrids = list(np.mgrid[s0.min:s0.max:s0.resolution,
                                     s1.min:s1.max:s1.resolution])

        for a in [0, 1]:
            new_max = self._specs[
                a].min + self._mgrids[a].shape[a] * self._specs[a].resolution
            self._specs[a] = replace(self._specs[a], max=new_max)

        H, W = self.shape = self._mgrids[0].shape
        self._centers = [np.zeros(shape=(H, W)), np.zeros(shape=(H, W))]

        for i, j in itertools.product(list(range(H)), list(range(W))):
            self._centers[0][i, j] = s0.min + (i + 0.5) * s0.resolution
            self._centers[1][i, j] = s1.min + (j + 0.5) * s1.resolution

        # these are the bounds you would give to pcolor
        # there is one row and one column more
        # self.d, self.phi are the lower corners

        # Each cell captures this area:
        #         (X[i,   j],   Y[i,   j]),
        #         (X[i,   j+1], Y[i,   j+1]),
        #         (X[i+1, j],   Y[i+1, j]),
        #         (X[i+1, j+1], Y[i+1, j+1])
        self._mgrids_plus = list(
            np.mgrid[s0.min:s0.max + s0.resolution:s0.resolution,
                     s1.min:s1.max + s1.resolution:s1.resolution, ])

        k0_o_sigma = 1.0 / self._specs[0].resolution

        def K0(x):
            d = x * k0_o_sigma
            d2 = d * d
            return np.exp(-d2)

        k1_o_sigma = 1.0 / self._specs[1].resolution

        def K1(x):
            d = x * k1_o_sigma
            d2 = d * d
            return np.exp(-d2)

        self.K0 = K0
        self.K1 = K1
Example #10
0
def get_homography_for_robot(robot_name: str) -> np.ndarray:
    dtu.check_isinstance(robot_name, str)
    # find the file

    fn = get_homography_info_config_file(robot_name)

    # load the YAML
    data = dtu.yaml_load_file(fn, plain_yaml=True)  # True means "plain"

    # convert the YAML
    homography = homography_from_yaml(data)

    return homography
Example #11
0
    def validate(self):
        for k, p in list(self.points.items()):
            dtu.check_isinstance(p, SegMapPoint)
            dtu.check_isinstance(k, str)

            coords = p.coords
            if not isinstance(coords, np.ndarray):
                self.points[k] = replace(p, coords=np.array(coords))

        for S in self.segments:
            dtu.check_isinstance(S, SegMapSegment)
            for p in S.points:
                if not p in self.points:
                    msg = f"Invalid point {p!r}"
                    raise ValueError(msg)
            assert len(S.points) == 2

            w1 = self.points[S.points[0]].coords
            w2 = self.points[S.points[1]].coords

            dist = np.linalg.norm(w1 - w2)
            if dist == 0:
                msg = f"This is a degenerate segment (points: {w1} {w2}) "
                msg += f"names: {S.points}"
                raise ValueError(msg)

        for F in self.faces:
            dtu.check_isinstance(F, SegMapFace)
            for p in F.points:
                if not p in self.points:
                    msg = f"Invalid point {p!r}"
                    raise ValueError(msg)
Example #12
0
def download_if_necessary(log: PhysicalLog) -> PhysicalLog:
    """
    Downloads the log if necessary.

    Use like this:

        log = ...

        log2 = download_if_necessary(log)


    """
    # dtu.logger.info('Log:\n%s' % log.resources)
    dtu.check_isinstance(log, PhysicalLog)

    resource_name = "bag"
    filename = get_log_if_not_exists(log, resource_name=resource_name)

    local_uri = _create_file_uri(filename)
    resource: PhysicalLogResource = log.resources[resource_name]
    urls: List[str] = resource["urls"]
    urls.append(local_uri)

    return log
Example #13
0
    def query_results_one(self, branch, date, commit):
        possible = self.query_results(branch, date, commit)
        from easy_regression.conditions.eval import DataNotFound

        if len(possible) == 0:
            msg = "Could not find any match for the query."
            msg += f"\n branch: {branch}"
            msg += f"\n   date: {date}"
            msg += f"\n commit: {commit}"
            raise DataNotFound(msg)

        if len(possible) > 1:
            n = len(possible)
            msg = f"Found {n} matches for this query."
            msg += f"\n   branch: {branch}"
            msg += f"\n     date: {date}"
            msg += f"\n   commit: {commit}"
            msg += "\nThese are the matches:"
            for i, p in enumerate(possible):
                dtu.check_isinstance(p, ResultDBEntry)
                msg += "\n" + dtu.indent(str(p), f" {i + 1:2d} of {n}: ")
            raise AmbiguousQuery(msg)

        return possible[0]
Example #14
0
def run_pipeline(
    image: dtu.NPImageBGR,
    gpg: GroundProjectionGeometry,
    rectifier: Rectify,
    line_detector_name: str,
    image_prep_name: str,
    lane_filter_name: str,
    anti_instagram_name: str,
    all_details: bool = False,
    ground_truth=None,
    actual_map=None,
) -> Tuple[Dict[str, dtu.NPImageBGR], Dict[str, float]]:
    """
        Image: numpy (H,W,3) == BGR
        Returns a dictionary, res with the following fields:

            res['input_image']

        ground_truth = pose
    """

    logger.debug(f"backend: {matplotlib.get_backend()}")
    logger.debug(f"fname: {matplotlib.matplotlib_fname()}")

    quick: bool = False

    dtu.check_isinstance(image, np.ndarray)

    res: Dict[str, dtu.NPImageBGR] = {}
    stats = {}

    res["Raw input image"] = image
    algo_db = get_easy_algo_db()
    line_detector = algo_db.create_instance(FAMILY_LINE_DETECTOR,
                                            line_detector_name)
    lane_filter = algo_db.create_instance(FAMILY_LANE_FILTER, lane_filter_name)
    image_prep = algo_db.create_instance(ImagePrep.FAMILY, image_prep_name)
    ai = algo_db.create_instance(AntiInstagramInterface.FAMILY,
                                 anti_instagram_name)

    pts = ProcessingTimingStats()
    pts.reset()
    pts.received_message()
    pts.decided_to_process()

    if all_details:
        segment_list = image_prep.process(FakeContext(),
                                          image,
                                          line_detector,
                                          transform=None)

        res["segments_on_image_input"] = vs_fancy_display(
            image_prep.image_cv, segment_list)
        res["segments_on_image_resized"] = vs_fancy_display(
            image_prep.image_resized, segment_list)

    with pts.phase("calculate AI transform"):
        ai.calculateTransform(image)

    with pts.phase("apply AI transform"):

        transformed = ai.applyTransform(image)

        if all_details:
            res["image_input_transformed"] = transformed

    with pts.phase("edge detection"):
        # note: do not apply transform twice!
        segment_list2 = image_prep.process(pts,
                                           image,
                                           line_detector,
                                           transform=ai.applyTransform)

        if all_details:
            res["resized and corrected"] = image_prep.image_corrected

    logger.debug(f"segment_list2: {len(segment_list2.segments)}")

    if all_details:
        res["segments_on_image_input_transformed"] = vs_fancy_display(
            image_prep.image_cv, segment_list2)

    if all_details:
        res["segments_on_image_input_transformed_resized"] = vs_fancy_display(
            image_prep.image_resized, segment_list2)

    if all_details:
        grid = get_grid(image.shape[:2])
        res["grid"] = grid
        res["grid_remapped"] = rectifier.rectify(grid)

    #     res['difference between the two'] = res['image_input']*0.5 + res['image_input_rect']*0.5

    with pts.phase("rectify_segments"):
        segment_list2_rect = rectify_segments(rectifier, gpg, segment_list2)

    # Project to ground
    with pts.phase("find_ground_coordinates"):
        sg = find_ground_coordinates(gpg, segment_list2_rect)

    lane_filter.initialize()

    # lane_filter.get_plot_phi_d()
    if all_details:
        res["prior"] = lane_filter.get_plot_phi_d()

    with pts.phase("lane filter update"):
        logger.info(type(lane_filter).__name__)
        if type(lane_filter).__name__ == "LaneFilterHistogram":
            # XXX merging pain
            _likelihood = lane_filter.update(sg.segments)
        else:
            _likelihood = lane_filter.update(sg)

    if not quick:
        with pts.phase("lane filter plot"):
            res["likelihood"] = lane_filter.get_plot_phi_d(
                ground_truth=ground_truth)
    easy_algo_db = get_easy_algo_db()

    if isinstance(lane_filter, LaneFilterMoreGeneric):
        template_name = lane_filter.localization_template
    else:
        template_name = "DT17_template_straight_straight"
        dtu.logger.debug(
            f"Using default template {template_name!r} for visualization")

    localization_template = easy_algo_db.create_instance(
        FAMILY_LOC_TEMPLATES, template_name)

    with pts.phase("lane filter get_estimate()"):
        est = lane_filter.get_estimate()

    # Coordinates in TILE frame
    g = localization_template.pose_from_coords(est)
    tinfo = TransformationsInfo()
    tinfo.add_transformation(frame1=FRAME_GLOBAL, frame2=FRAME_AXLE, g=g)

    if all_details:
        if actual_map is not None:
            #         sm_axle = tinfo.transform_map_to_frame(actual_map, FRAME_AXLE)
            res["real"] = plot_map_and_segments(actual_map,
                                                tinfo,
                                                sg.segments,
                                                dpi=120,
                                                ground_truth=ground_truth)

    with pts.phase("rectify"):
        rectified0 = rectifier.rectify(image)

    rectified = ai.applyTransform(rectified0)

    if all_details:
        res["image_input_rect"] = rectified

    res["segments rectified on image rectified"] = vs_fancy_display(
        rectified, segment_list2_rect)

    assumed = localization_template.get_map()

    if not quick:
        with pts.phase("plot_map_and_segments"):
            res["model assumed for localization"] = plot_map_and_segments(
                assumed,
                tinfo,
                sg.segments,
                dpi=120,
                ground_truth=ground_truth)

    assumed_axle = tinfo.transform_map_to_frame(assumed, FRAME_AXLE)

    with pts.phase("plot_map reprojected"):
        res["map reprojected on image"] = plot_map(
            rectified,
            assumed_axle,
            gpg,
            do_ground=False,
            do_horizon=True,
            do_faces=False,
            do_faces_outline=True,
            do_segments=False,
        )

    with pts.phase("quality computation"):
        predicted_segment_list_rectified = predict_segments(sm=assumed_axle,
                                                            gpg=gpg)
        quality_res, quality_stats = judge_quality(
            image, segment_list2_rect, predicted_segment_list_rectified)
        res.update(quality_res)

    #     res['blurred']= cv2.medianBlur(image, 11)
    stats["estimate"] = est
    stats.update(quality_stats)

    dtu.logger.info(pts.get_stats())
    return res, stats
Example #15
0
def do_plot(bag_in, prefix_in, bag_out, prefix_out, signals, plot_name):
    topic2messages = defaultdict(lambda: dict(timestamp=[], data=[]))

    topics = []
    for signal_spec in signals:
        dtu.check_isinstance(signal_spec, PlotSignalSpec)
        topic_fqn = prefix_in + signal_spec.topic
        topics.append(topic_fqn)

    for _j, mp in enumerate(bag_in.read_messages_plus(topics=topics)):
        data = interpret_ros(mp.msg)
        topic2messages[mp.topic]["data"].append(data)
        topic2messages[mp.topic]["timestamp"].append(
            mp.time_from_physical_log_start)

    for signal_spec in signals:

        topic_fqn = prefix_in + signal_spec.topic
        if not topic_fqn in topic2messages:
            msg = f"Could not found any value for topic {topic_fqn!r}."
            raise ValueError(msg)

    bgcolor = dtu.ColorConstants.RGB_DUCKIETOWN_YELLOW
    dpi = 100
    figure_args = dict(facecolor=dtu.matplotlib_01_from_rgb(bgcolor))
    a = dtu.CreateImageFromPylab(dpi=dpi, figure_args=figure_args)

    use_legend = len(signals) >= 3
    # todo: check same units

    with a as pylab:
        axes = []
        _fig, ax0 = pylab.subplots()
        ax0.set_xlabel("time (s)")
        axes.append(ax0)
        if use_legend:
            for i in range(len(signals) - 1):
                axes.append(ax0)
        else:
            for i in range(len(signals) - 1):
                axes.append(ax0.twinx())

        for i, signal_spec in enumerate(signals):
            ax = axes[i]

            topic_fqn = prefix_in + signal_spec.topic
            recorded = topic2messages[topic_fqn]
            data = np.array(recorded["data"])
            t = np.array(recorded["timestamp"])

            color = signal_spec.color
            markersize = 5

            data_converted = convert_unit(data, signal_spec.units,
                                          signal_spec.units_display)

            ax.plot(
                t,
                data_converted,
                "o",
                color=color,
                label=signal_spec.label,
                markersize=markersize,
                clip_on=False,
            )

            if not use_legend:
                label = f"{signal_spec.label} [{signal_spec.units_display}]"
                ax.set_ylabel(label, color=signal_spec.color)
                ax.tick_params("y", colors=color)

            ax.set_ylim(signal_spec.min, signal_spec.max)

            outward_offset = 20
            ax.xaxis.set_tick_params(direction="out")
            ax.yaxis.set_tick_params(direction="out")
            ax.spines["left"].set_position(("outward", outward_offset))
            ax.spines["top"].set_color("none")  # don't draw spine
            ax.spines["bottom"].set_position(("outward", outward_offset))
            ax.spines["right"].set_position(("outward", outward_offset))

            pos = {0: "left", 1: "right", 2: "right"}[i]
            ax.spines[pos].set_color(color)

            ax.xaxis.set_ticks_position("bottom")

        if use_legend:
            label = f"[{signal_spec.units_display}]"
            ax.set_ylabel(label)

            pylab.legend()

    bgr = a.get_bgr()
    plot_name = plot_name.replace("/", "")
    # output_filename = os.path.join('tmp', plot_name +'.png')
    # dtu.write_bgr_as_jpg(bgr, output_filename)

    t_inf = rospy.Time.from_sec(bag_in.get_end_time())
    omsg = dru.d8_compressed_image_from_cv_image(bgr, timestamp=t_inf)
    otopic = prefix_out + "/" + plot_name
    bag_out.write(otopic, omsg, t=t_inf)
Example #16
0
def query_logs(logs: Dict[str, PhysicalLog],
               query: QueryType,
               raise_if_no_matches=True) -> Dict[str, PhysicalLog]:
    """
    query: a string or a list of strings

    Returns a dict str -> PhysicalLog.

    The query can also be a filename.

    """
    if isinstance(query, list):
        res = {}
        for q in query:
            res.update(query_logs(logs, q, raise_if_no_matches=False))
        if raise_if_no_matches and not res:
            msg = "Could not find any match for the queries:"
            for q in query:
                msg += f"\n- {q}"
            raise dtu.DTNoMatches(msg)
        return res
    else:
        dtu.check_isinstance(query, str)

        filters = {}
        filters.update(filters_slice)
        filters.update(dtu.filters0)
        aliases = {}
        aliases.update(logs)
        # adding aliases unless we are asking for everything
        if query != "*":
            # print('adding more (query = %s)' % query)
            for _, log in list(logs.items()):
                dtr = DTR.from_yaml(log.resources["bag"])

                original_name = dtr.name

                # print ('alias: %s %s' % (original_name, dtr.name))
                aliases[original_name] = log
                original_name = original_name.replace(".bag", "")
                aliases[original_name] = log

        result = dtu.fuzzy_match(query,
                                 aliases,
                                 filters=filters,
                                 raise_if_no_matches=raise_if_no_matches)
        # remove doubles after
        # XXX: this still has bugs
        present = defaultdict(set)
        for k, v in list(result.items()):
            present[id(v)].add(k)

        def choose(options):
            if len(options) == 1:
                return list(options)[0]
            else:
                options = sorted(options, key=len)
                return options[0]

        c = {}
        for k, v in list(result.items()):
            chosen = choose(present[id(v)])
            if k == chosen:
                c[k] = v

        return c