Ejemplo n.º 1
0
    def test_load_dicts(self):
        with open(annotations_path, "r") as f:
            annotations = Annotation.from_dicts(json.load(f))

        for annotation in annotations:
            assert isinstance(
                annotation, Annotation
            ), f"Expected component annotation but found: {type(annotation)}"
Ejemplo n.º 2
0
    def test_copy(self, multi_level_annotation):
        circuit = list(
            filter(
                lambda anno: anno.body[0]["value"] == "Circuit", multi_level_annotation
            )
        ).pop()
        circuit_copy = Annotation.from_annotation(circuit)
        port = circuit.children[0]
        port_copy = circuit_copy.children[0]
        name = port.children[0]
        name_copy = port_copy.children[0]

        assert port_copy.parent_id == circuit_copy.id
        assert name_copy.parent_id == port_copy.id
        assert len(Annotation.to_dicts([circuit_copy])) == 3
        assert circuit_copy.canonical == circuit.id
        assert port_copy.id != port.id
        assert port_copy.canonical == port.id
        assert name_copy.id != name.id
        assert name_copy.canonical == name.id
Ejemplo n.º 3
0
    def test_rotate(self):
        with open(annotations_path, "r") as f:
            components = Annotation.from_dicts(json.load(f))

        c1 = next((c for c in components if c.id == "C1"))
        rotated = deepcopy(c1)

        expected = [(253, 32), (249, 36)]
        for pt in expected:
            rotated.rotate()
            x, y, _, _ = rotated.bounding_box()
            assert pt[0] == x and pt[1] == y, f"Expected {pt} but found ({x}, {y})"
Ejemplo n.º 4
0
Archivo: line.py Proyecto: symbench/scm
    def detect_intersection(self, image, offset=0):
        margin = 4
        top, bottom, left, right = self.snippet(image, margin, offset=offset)
        region = image[top:bottom, left:right]

        # This next portion is written as if we are working with a line running north
        # (in terms of variable names, etc) but the code is generic)
        idx = 1 if self.is_vertical() else 0
        sum_axis = 1 if self.is_vertical() else 0
        center = min(self.start[idx], margin)
        l_region_idx = [range(0, region.shape[0]), range(0, region.shape[1])]
        l_region_idx[idx] = range(0, center + 1)

        r_region_idx = [range(0, region.shape[0]), range(0, region.shape[1])]
        r_region_idx[idx] = range(center, region.shape[idx])
        l_side = region[np.ix_(*l_region_idx)]
        r_side = region[np.ix_(*r_region_idx)]

        j_regions = []
        for side in [l_side, r_side]:
            perp_edges = np.where(side.sum(axis=sum_axis) == margin + 1)
            for idx in perp_edges[0]:
                last_region = j_regions[-1] if len(j_regions) > 0 else None
                if last_region and (idx - last_region[-1] == 1):
                    last_region.append(idx)
                else:
                    j_regions.append([idx])

        if len(j_regions):
            j_regions.sort(key=lambda vals: vals[0])
            closest_region = (j_regions[0] if self.direction.is_positive() else
                              j_regions[-1])

            oidx = 0 if self.is_vertical() else 1
            ref_point = self.start.copy() if self.direction.is_positive(
            ) else self.end
            ref_point += offset * self.direction.value()

            start = ref_point.copy()
            start[oidx] += closest_region[0]
            end = ref_point.copy()
            end[oidx] += closest_region[-1]
            direction = (self.direction if self.direction.is_positive() else
                         self.direction.reverse())
            region_line = DirectedLine(start, end, direction, True)

            ((top, left),
             (bottom, right)) = detect_intersection_rect(image, region_line)
            return Annotation.from_component("Intersection",
                                             ((left, top), (right, bottom)))

        return None
Ejemplo n.º 5
0
    def test_move_to(self):
        with open(annotations_path, "r") as f:
            components = Annotation.from_dicts(json.load(f))

        c1 = next((c for c in components if c.id == "C1"))

        _, _, w, h = c1.bounding_box()
        c1.move_to(0, 0)
        new_x, new_y, new_w, new_h = c1.bounding_box()
        assert new_x == 0
        assert new_y == 0
        assert w == new_w, f"Width was changed from {w} to {new_w}"
        assert h == new_h, f"Width was changed from {h} to {new_h}"
Ejemplo n.º 6
0
async def detect_connections(annotation_dicts: List[Dict[str, Any]]):
    try:
        annotations = Annotation.from_dicts(annotation_dicts)
    except Exception as e:
        raise HTTPException(status_code=400, detail=e.args[0])

    get_image_url = lambda ann: ann.target["source"]
    annotations.sort(key=get_image_url)
    results = []
    for (image_url, annotations) in groupby(annotations, get_image_url):
        annotations = list(annotations)
        image = clean_image(cv2.imread(get_image_path(image_url), cv2.IMREAD_UNCHANGED))
        edges = connect.collect_edges(image, annotations)
        edges = connect.unique_edges(edges)
        results.append(
            {
                "components": Annotation.to_dicts(annotations),
                "edges": [edge.to_dict() for edge in edges],
                "image_url": image_url,
            }
        )

    return results
Ejemplo n.º 7
0
    def test_move_ports(self):
        with open(annotations_path, "r") as f:
            components = Annotation.from_dicts(json.load(f))

        c1 = next((c for c in components if c.id == "C1"))

        c1x, c1y, _, _ = c1.bounding_box()
        port = c1.children[0]
        x, y, _, _ = port.bounding_box()
        expected = (x - c1x + 10, y - c1y + 10)
        c1.move_to(10, 10)
        x, y, _, _ = port.bounding_box()
        assert (
            expected[0] == x and expected[1] == y
        ), f"Expected {expected} but found ({x}, {y})"
Ejemplo n.º 8
0
def from_line(line, width, height, components, source=None):
    chunks = line.split(" ")
    if len(chunks) == 5:
        chunks.append(None)
    label_index, cx, cy, box_w, box_h, confidence = chunks
    label = components[int(label_index)]
    cx = float(cx) * width
    box_w = float(box_w) * width
    cy = float(cy) * height
    box_h = float(box_h) * height
    x = cx - box_w / 2
    y = cy - box_h / 2
    right = x + box_w
    bottom = y + box_h
    return Annotation.from_component(label, ((x, y), (right, bottom)), source)
Ejemplo n.º 9
0
    def test_rotate_ports(self):
        with open(annotations_path, "r") as f:
            components = Annotation.from_dicts(json.load(f))

        c1 = next((c for c in components if c.id == "C1"))
        c1.children = c1.children[0:1]

        expected = [(0, -14), (-14, -4)]
        c1.move_to(0, 0)
        port = c1.children[0]
        for pt in expected:
            c1.rotate(90, np.array([0, 0]))
            x, y, _, _ = port.bounding_box()
            assert (
                abs(pt[0] - x) < 0.00001 and abs(pt[1] - y) < 0.00001
            ), f"Expected {pt} but found ({x}, {y})"
Ejemplo n.º 10
0
    def test_move_to_ports_size(self):
        with open(annotations_path, "r") as f:
            components = Annotation.from_dicts(json.load(f))

        c1 = next((c for c in components if c.id == "C1"))
        port = c1.children[0]

        x, y, _, _ = c1.bounding_box()
        px, py, w, h = port.bounding_box()
        offset_x = px - x
        offset_y = py - y

        c1.move_to(0, 0)
        x, y, new_w, new_h = port.bounding_box()
        assert offset_x == x, f"Port x offset changed from {offset_x} to {x}"
        assert offset_y == y, f"Port y offset changed from {offset_y} to {y}"
        assert w == new_w, f"Width was changed from {w} to {new_w}"
        assert h == new_h, f"Width was changed from {h} to {new_h}"
Ejemplo n.º 11
0
 def test_isclose(self):
     with open(annotations_path, "r") as f:
         annotation = Annotation.from_dicts(json.load(f))[0]
     other_anno = Annotation.from_annotation(annotation)
     assert annotation.isclose(other_anno)
Ejemplo n.º 12
0
async def invoke_model(model_name, body: DetectComponentsRequest):
    ensure_yolo_enabled()
    yolo_path = os.environ["YOLO_PATH"]
    run_dir = os.path.join(yolo_path, "runs")
    model_path = os.path.join(run_dir, "train", model_name, "weights", "best.pt")
    if not os.path.exists(model_path):
        raise HTTPException(status_code=404, detail="Model not found.")

    component_file = os.path.join(run_dir, "train", model_name, "labels.json")
    with open(component_file, "r") as f:
        components = json.load(f)

    detect_dir = os.path.join(run_dir, "detect")
    annotations = []
    for image_url in body.image_urls:

        with tempfile.NamedTemporaryFile(dir=detect_dir) as f:
            tmp_name = os.path.basename(f.name)
            x, y = 0, 0
            image_path = get_image_path(image_url)
            image_file = None
            if body.parent:  # Crop the image if needed
                x, y, width, height = body.parent.bounding_box().astype(int)
                image = cv2.imread(image_path, cv2.IMREAD_UNCHANGED)
                snippet = image[y : y + height, x : x + width]
                ext = "." + image_url.split(".")[-1]
                image_file = tempfile.NamedTemporaryFile(suffix=ext)
                image_path = image_file.name
                cv2.imwrite(image_path, snippet)

            detect_file = os.path.join(yolo_path, "detect.py")
            cmd = [  # TODO: optimize this
                "python3",
                detect_file,
                "--weights",
                model_path,
                "--source",
                image_path,
                "--save-txt",
                "--save-conf",
                "--nosave",
                "--name",
                tmp_name,
                "--exist-ok",
            ]
            subprocess.run(cmd)
            ext = image_path.split(os.path.extsep)[-1]
            image_basename = os.path.basename(image_path).replace(
                os.path.extsep + ext, ""
            )
            labels_file = os.path.join(
                "runs", "detect", tmp_name, "labels", image_basename + ".txt"
            )
            with open(labels_file, "r") as lbl_f:
                annos = yolo.load(image_path, lbl_f, components, source=image_url)
                for a in annos:
                    a.parent_id = body.parent.id if body.parent else None

                annotations.extend([a.move_by(x, y) for a in annos])
            if image_file:
                image_file.close()

    # TODO: remove duplicates
    # TODO: remove existing
    # TODO: include confidence?
    return Annotation.to_dicts(annotations)
Ejemplo n.º 13
0
async def detect_components(body: ComponentRecognitionRequest):
    """
    Given a list of annotations and set of image URLs, find approximate matches using
    template matching. The annotations are represented as lists of IDs. The first ID
    is the annotation to match; the rest are the children to copy on the new match (ie, ports).

    If an annotation is a child annotation, restrict matches to the parent's bounding box.
    """

    try:
        existing = Annotation.from_dicts(body.annotations)
        all_annotations = [
            anno for parent_anno in existing for anno in parent_anno.flat()
        ]
        anno_dict = {anno.id: anno for anno in all_annotations}
    except Exception as e:
        raise HTTPException(status_code=400, detail=e.args[0])

    target_images = (
        AnnotationImage(
            source=url,
            content=cv2.imread(get_image_path(url), cv2.IMREAD_UNCHANGED),
        )
        for url in body.image_urls
    )

    annotations = []
    for anno_ids in body.match_ids:
        parent_id, children_ids = anno_ids[0], anno_ids[1:]
        parent = anno_dict[parent_id]
        annotations.append(parent.with_descendents(children_ids))

    annotations, child_annos = partition(lambda a: a.parent_id is None, annotations)

    # Match top-level annotation in the target images
    annotated_snippets = [
        (annotation, image_snippet(annotation)) for annotation in annotations
    ]
    matches = chain.from_iterable(
        (
            recognition.find_unique_matches(image, annotated_snippets)
            for image in target_images
        )
    )

    # If the annotation has a parent, search within the parent's bbox
    child_matches = chain.from_iterable(
        (
            detect_child_components(anno_dict[anno.parent_id], anno)
            for anno in child_annos
        )
    )

    new_annotations = []
    for match in chain(matches, child_matches):
        exists = any((match.isclose(ann) for ann in all_annotations))

        if not exists:  # Keep the closest match
            index = next(
                (i for (i, a) in enumerate(new_annotations) if match.isclose(a)), -1
            )
            if index > -1:
                existing_match = new_annotations[index]
                new_annotations[index] = (
                    match
                    if match.confidence > existing_match.confidence
                    else existing_match
                )
            else:
                new_annotations.append(match)

    return Annotation.to_dicts(new_annotations)
Ejemplo n.º 14
0
 def test_missing_parent(self, missing_parent_dicts):
     with pytest.raises(ValueError):
         Annotation.from_dicts(missing_parent_dicts)
Ejemplo n.º 15
0
 def multi_level_annotation(self):
     with open(get_test_asset_path("bq25700a.json"), "r") as f:
         return Annotation.from_dicts(json.load(f))
Ejemplo n.º 16
0
 def output_annotations(self, name, image, annotations):
     print(json.dumps(Annotation.to_dicts(annotations)))
Ejemplo n.º 17
0
 def test_multi_levels(self, multi_level_annotation):
     assert len(multi_level_annotation) == 2
     assert len(Annotation.to_dicts(multi_level_annotation)) == 6
Ejemplo n.º 18
0
 def collpitts_r1_annotation(self):
     return Annotation.from_component("Resistor",
                                      ((152.0, 182.0), (178.0, 220.0)))
Ejemplo n.º 19
0
 def test_isclose_diff_images(self):
     with open(annotations_path, "r") as f:
         annotation = Annotation.from_dicts(json.load(f))[0]
     other_anno = Annotation.from_annotation(annotation)
     other_anno.target["source"] += "2"
     assert not annotation.isclose(other_anno)
Ejemplo n.º 20
0
parser = ArgumentParser()
parser.add_argument("annotations", nargs="+")
parser.add_argument("--components", required=True)
parser.add_argument("--outdir", default=".")
args = parser.parse_args()

with open(args.components, "r") as f:
    components = json.load(f)
    if type(components) is dict:
        components = sorted(components["components"].keys())

for annotations_path in args.annotations:
    print(annotations_path)
    with open(annotations_path, "r") as f:
        annotations = Annotation.from_dicts(json.load(f))

    if len(annotations) == 0:
        continue

    url = annotations[0].target["source"]
    name = url.split("/").pop()

    image_path = get_image_path(url)
    out_image = f"{args.outdir}/images/{name}"
    os.makedirs(os.path.dirname(out_image), exist_ok=True)

    if image_path != out_image:
        shutil.copyfile(image_path, out_image)

    ext_regex = re.compile("\.[^.]+$")
Ejemplo n.º 21
0
 def test_from_annotation_port_parent_id(self):
     with open(annotations_path, "r") as f:
         annotation = Annotation.from_dicts(json.load(f))[0]
     other_anno = Annotation.from_annotation(annotation)
     for child in other_anno.children:
         assert child.parent_id == other_anno.id