def __equivalencia(afd: Automata) -> list: equivalencia = dict() annotations = dict() for i in afd.getStates(): for j in afd.getStates(): equivalencia.update({(i, j): False}) annotations.update({(i, j): []}) if i == j: equivalencia[(i, j)] = False annotations.pop((i, j)) for i in list(equivalencia.keys()): if (i[0] in afd.getFinals() and i[1] not in afd.getFinals()) or ( i[1] in afd.getFinals() and i[0] not in afd.getFinals()): equivalencia[i] = True for i in list(equivalencia.keys()): if not equivalencia[i]: t1 = afd.getTransitionsFrom(i[0]) t2 = afd.getTransitionsFrom(i[1]) if t1 != t2: for j in range(0, len(afd.getAlphabet())): if equivalencia[(t1[j][1][0], t2[j][1][0])]: equivalencia[i] = True equivalencia[(i[1], i[0])] = True break else: annotations[i].append((t1[j][1][0], t2[j][1][0])) annotations[(i[1], i[0])].append( (t1[j][1][0], t2[j][1][0])) flagW = False flagW2 = False while not flagW: flagW = True for i in list(annotations.keys()): if not equivalencia[i]: for j in annotations[i]: if equivalencia[j]: equivalencia[i] = True flagW = False break estados = list() for i in list(equivalencia.keys()): if not equivalencia[i]: estados.append(i) estados_eq = list() for i in estados: if i[0] != i[1]: q = sorted(list(i)) estados_eq.append(tuple(q)) estados_eq = list(set(estados_eq)) return estados_eq
def __dask_distributed_annotations_unpack__( annotations: MutableMapping[str, Any], new_annotations: Mapping[str, Any] | None, keys: Iterable[Hashable], ) -> None: """ Unpack a set of layer annotations across a set of keys, then merge those expanded annotations for the layer into an existing annotations mapping. This is not a simple shallow merge because some annotations like retries, priority, workers, etc need to be able to retain keys from different layers. Parameters ---------- annotations: MutableMapping[str, Any], input/output Already unpacked annotations, which are to be updated with the new unpacked annotations new_annotations: Mapping[str, Any], optional New annotations to be unpacked into `annotations` keys: Iterable All keys in the layer. """ if new_annotations is None: return expanded = {} keys_stringified = False # Expand the new annotations across the keyset for a, v in new_annotations.items(): if type(v) is dict and "__expanded_annotations__" in v: # Maybe do a destructive update for efficiency? v = v.copy() del v["__expanded_annotations__"] expanded[a] = v else: if not keys_stringified: keys = [stringify(k) for k in keys] keys_stringified = True expanded[a] = dict.fromkeys(keys, v) # Merge the expanded annotations with the existing annotations mapping for k, v in expanded.items(): v.update(annotations.get(k, {})) annotations.update(expanded)
def _get_class_tree_properties(self, class_: typing.Any, properties: dict = None, annotations: dict = None)\ -> Tuple[dict, dict]: if properties is None and annotations is None: properties = {} annotations = {} properties.update(class_.__dict__) try: annotations.update(typing.get_type_hints(class_)) except AttributeError: pass if hasattr(class_, '__bases__'): for base in class_.__bases__: properties, annotations = self._get_class_tree_properties(base, properties, annotations) return properties, annotations
def merge_coco(a: CocoDataset, b: CocoDataset) -> CocoDataset: """ Merges two COCO datasets into a single one. When an image id, category id, or instance id is present in both, the image, category, or instance in the second dataset is treated as authoritative. This is useful for merging keypoint data into the main dataset (e.g., for COCO 2017). """ images = get_image_mapping(a) images.update(get_image_mapping(b)) categories = get_category_mapping(a) categories.update(get_category_mapping(b)) annotations = get_annotation_mapping(a) annotations.update(get_annotation_mapping(b)) return { "images": list(images.values()), "categories": list(categories.values()), "annotations": list(annotations.values()) }