Esempio n. 1
0
    def test_annotation_comparison(self):
        a = Dataset.from_iterable([
            DatasetItem(id=1, annotations=[
                Caption('hello'), # unmatched
                Caption('world', group=5),
                Label(2, attributes={ 'x': 1, 'y': '2', }),
                Bbox(1, 2, 3, 4, label=4, z_order=1, attributes={
                    'score': 1.0,
                }),
                Bbox(5, 6, 7, 8, group=5),
                Points([1, 2, 2, 0, 1, 1], label=0, z_order=4),
                Mask(label=3, z_order=2, image=np.ones((2, 3))),
            ]),
        ], categories=['a', 'b', 'c', 'd'])

        b = Dataset.from_iterable([
            DatasetItem(id=1, annotations=[
                Caption('world', group=5),
                Label(2, attributes={ 'x': 1, 'y': '2', }),
                Bbox(1, 2, 3, 4, label=4, z_order=1, attributes={
                    'score': 1.0,
                }),
                Bbox(5, 6, 7, 8, group=5),
                Bbox(5, 6, 7, 8, group=5), # unmatched
                Points([1, 2, 2, 0, 1, 1], label=0, z_order=4),
                Mask(label=3, z_order=2, image=np.ones((2, 3))),
            ]),
        ], categories=['a', 'b', 'c', 'd'])

        comp = ExactComparator()
        matched, unmatched, _, _, errors = comp.compare_datasets(a, b)

        self.assertEqual(6, len(matched), matched)
        self.assertEqual(2, len(unmatched), unmatched)
        self.assertEqual(0, len(errors), errors)
Esempio n. 2
0
    def test_class_comparison(self):
        a = Dataset.from_iterable([], categories=['a', 'b', 'c'])
        b = Dataset.from_iterable([], categories=['b', 'c'])

        comp = ExactComparator()
        _, _, _, _, errors = comp.compare_datasets(a, b)

        self.assertEqual(1, len(errors), errors)
Esempio n. 3
0
    def test_image_comparison(self):
        a = Dataset.from_iterable([
            DatasetItem(id=11, image=np.ones((5, 4, 3)), annotations=[
                Bbox(5, 6, 7, 8),
            ]),
            DatasetItem(id=12, image=np.ones((5, 4, 3)), annotations=[
                Bbox(1, 2, 3, 4),
                Bbox(5, 6, 7, 8),
            ]),
            DatasetItem(id=13, image=np.ones((5, 4, 3)), annotations=[
                Bbox(9, 10, 11, 12), # mismatch
            ]),

            DatasetItem(id=14, image=np.zeros((5, 4, 3)), annotations=[
                Bbox(1, 2, 3, 4),
                Bbox(5, 6, 7, 8),
            ], attributes={ 'a': 1 }),

            DatasetItem(id=15, image=np.zeros((5, 5, 3)), annotations=[
                Bbox(1, 2, 3, 4),
                Bbox(5, 6, 7, 8),
            ]),
        ], categories=['a', 'b', 'c', 'd'])

        b = Dataset.from_iterable([
            DatasetItem(id=21, image=np.ones((5, 4, 3)), annotations=[
                Bbox(5, 6, 7, 8),
            ]),
            DatasetItem(id=22, image=np.ones((5, 4, 3)), annotations=[
                Bbox(1, 2, 3, 4),
                Bbox(5, 6, 7, 8),
            ]),
            DatasetItem(id=23, image=np.ones((5, 4, 3)), annotations=[
                Bbox(10, 10, 11, 12), # mismatch
            ]),

            DatasetItem(id=24, image=np.zeros((5, 4, 3)), annotations=[
                Bbox(6, 6, 7, 8), # 1 ann missing, mismatch
            ], attributes={ 'a': 2 }),

            DatasetItem(id=25, image=np.zeros((4, 4, 3)), annotations=[
                Bbox(6, 6, 7, 8),
            ]),
        ], categories=['a', 'b', 'c', 'd'])

        comp = ExactComparator(match_images=True)
        matched_ann, unmatched_ann, a_unmatched, b_unmatched, errors = \
            comp.compare_datasets(a, b)

        self.assertEqual(3, len(matched_ann), matched_ann)
        self.assertEqual(5, len(unmatched_ann), unmatched_ann)
        self.assertEqual(1, len(a_unmatched), a_unmatched)
        self.assertEqual(1, len(b_unmatched), b_unmatched)
        self.assertEqual(1, len(errors), errors)
Esempio n. 4
0
    def test_item_comparison(self):
        a = Dataset.from_iterable([
            DatasetItem(id=1, subset='train'),
            DatasetItem(id=2, subset='test', attributes={'x': 1}),
        ], categories=['a', 'b', 'c'])

        b = Dataset.from_iterable([
            DatasetItem(id=2, subset='test'),
            DatasetItem(id=3),
        ], categories=['a', 'b', 'c'])

        comp = ExactComparator()
        _, _, a_extra_items, b_extra_items, errors = comp.compare_datasets(a, b)

        self.assertEqual({('1', 'train')}, a_extra_items)
        self.assertEqual({('3', '')}, b_extra_items)
        self.assertEqual(1, len(errors), errors)
Esempio n. 5
0
def ediff_command(args):
    first_project = load_project(args.project_dir)
    second_project = load_project(args.other_project_dir)

    if args.ignore_field:
        args.ignore_field = _ediff_default_if
    comparator = ExactComparator(
        match_images=args.match_images,
        ignored_fields=args.ignore_field,
        ignored_attrs=args.ignore_attr,
        ignored_item_attrs=args.ignore_item_attr)
    matches, mismatches, a_extra, b_extra, errors = \
        comparator.compare_datasets(
            first_project.make_dataset(), second_project.make_dataset())
    output = {
        "mismatches": mismatches,
        "a_extra_items": sorted(a_extra),
        "b_extra_items": sorted(b_extra),
        "errors": errors,
    }
    if args.all:
        output["matches"] = matches

    output_file = generate_next_file_name('diff', ext='.json')
    with open(output_file, 'w') as f:
        json.dump(output, f, indent=4, sort_keys=True)

    print("Found:")
    print("The first project has %s unmatched items" % len(a_extra))
    print("The second project has %s unmatched items" % len(b_extra))
    print("%s item conflicts" % len(errors))
    print("%s matching annotations" % len(matches))
    print("%s mismatching annotations" % len(mismatches))

    log.info("Output has been saved to '%s'" % output_file)

    return 0
Esempio n. 6
0
def diff_command(args):
    dst_dir = args.dst_dir
    if dst_dir:
        if not args.overwrite and osp.isdir(dst_dir) and os.listdir(dst_dir):
            raise CliException("Directory '%s' already exists "
                               "(pass --overwrite to overwrite)" % dst_dir)
    else:
        dst_dir = generate_next_file_name('diff')
    dst_dir = osp.abspath(dst_dir)

    if not osp.exists(dst_dir):
        on_error_do(rmtree, dst_dir, ignore_errors=True)
        os.makedirs(dst_dir)

    project = None
    try:
        project = scope_add(load_project(args.project_dir))
    except ProjectNotFoundError:
        if args.project_dir:
            raise

    try:
        if not args.second_target:
            first_dataset = project.working_tree.make_dataset()
            second_dataset, target_project = \
                parse_full_revpath(args.first_target, project)
            if target_project:
                scope_add(target_project)
        else:
            first_dataset, target_project = \
                parse_full_revpath(args.first_target, project)
            if target_project:
                scope_add(target_project)

            second_dataset, target_project = \
                parse_full_revpath(args.second_target, project)
            if target_project:
                scope_add(target_project)
    except Exception as e:
        raise CliException(str(e))

    if args.method is ComparisonMethod.equality:
        if args.ignore_field:
            args.ignore_field = eq_default_if
        comparator = ExactComparator(match_images=args.match_images,
                                     ignored_fields=args.ignore_field,
                                     ignored_attrs=args.ignore_attr,
                                     ignored_item_attrs=args.ignore_item_attr)
        matches, mismatches, a_extra, b_extra, errors = \
            comparator.compare_datasets(first_dataset, second_dataset)

        output = {
            "mismatches": mismatches,
            "a_extra_items": sorted(a_extra),
            "b_extra_items": sorted(b_extra),
            "errors": errors,
        }
        if args.all:
            output["matches"] = matches

        output_file = osp.join(
            dst_dir,
            generate_next_file_name('diff', ext='.json', basedir=dst_dir))
        log.info("Saving diff to '%s'" % output_file)
        dump_json_file(output_file, output, indent=True)

        print("Found:")
        print("The first project has %s unmatched items" % len(a_extra))
        print("The second project has %s unmatched items" % len(b_extra))
        print("%s item conflicts" % len(errors))
        print("%s matching annotations" % len(matches))
        print("%s mismatching annotations" % len(mismatches))
    elif args.method is ComparisonMethod.distance:
        comparator = DistanceComparator(iou_threshold=args.iou_thresh)

        with DiffVisualizer(save_dir=dst_dir,
                            comparator=comparator,
                            output_format=args.format) as visualizer:
            log.info("Saving diff to '%s'" % dst_dir)
            visualizer.save(first_dataset, second_dataset)

    return 0