Example #1
0
 def _show(self, n, bins=None, ax=None, color=None, label=None):
     """ plot samples monte-carlo style """
     if ax is None:
         import kwplot
         kwplot.autompl()
         from matplotlib import pyplot as plt
         ax = plt.gca()
     data = self.sample(n)
     ax.hist(data, bins=bins, color=color, label=label)
Example #2
0
def main():
    print('PARSE')
    import xinspect
    parser = xinspect.auto_argparse(setup_harn)

    parser.add_argument('--lrtest',
                        action='store_true',
                        help='Run Leslie Smith\'s LR range test')
    parser.add_argument('--interact',
                        action='store_true',
                        help='Interact with the range test')
    args, unknown = parser.parse_known_args()
    ns = args.__dict__.copy()

    args.interact |= args.lr == 'interact'
    args.lrtest |= (args.lr == 'test' or args.interact)

    if args.lrtest or args.interact:
        # TODO:
        # - [ ] tweak setup_harn so running the lr-range-test isn't awkward
        from netharn.prefit.lr_tests import lr_range_test
        ns['lr'] = 1e-99

        if args.interact:
            import kwplot
            kwplot.autompl()
            import matplotlib.pyplot as plt

        harn = setup_harn(**ns)
        harn.initialize()
        # TODO: We could cache the result based on the netharn
        # hyperparameters. This would let us integrate with the
        # default fit harness.
        result = lr_range_test(harn)
        print('result.recommended_lr = {!r}'.format(result.recommended_lr))

        if args.interact:
            result.draw()
            plt.show()

        # Seed value with test result
        ns['lr'] = result.recommended_lr
        harn = setup_harn(**ns).initialize()
    else:
        harn = setup_harn(**ns)

    harn.initialize()
    harn.run()
Example #3
0
    def _debug_index(self):
        from shapely.ops import cascaded_union

        def _to_shapely(boxes):
            from shapely.geometry import Polygon
            from kwimage.structs.boxes import _cat
            x1, y1, x2, y2 = boxes.to_tlbr(copy=False).components
            a = _cat([x1, y1]).tolist()
            b = _cat([x1, y2]).tolist()
            c = _cat([x2, y2]).tolist()
            d = _cat([x2, y1]).tolist()
            polygons = [Polygon(points) for points in zip(a, b, c, d, a)]
            return polygons

        for gid, qtree in self.qtrees.items():
            boxes = kwimage.Boxes(np.array(list(qtree.aid_to_tlbr.values())),
                                  'tlbr')
            polygons = _to_shapely(boxes)

            bounds = kwimage.Boxes([[0, 0, qtree.width, qtree.height]], 'tlbr')
            bounds = _to_shapely(bounds)[0]
            merged_polygon = cascaded_union(polygons)
            uncovered = (bounds - merged_polygon)
            print('uncovered.area = {!r}'.format(uncovered.area))

            # plot these two polygons separately
            if 1:
                from descartes import PolygonPatch
                from matplotlib import pyplot as plt
                import kwplot
                kwplot.autompl()
                fig = plt.figure(gid)
                ax = fig.add_subplot(111)
                ax.cla()
                # ax.add_patch(
                #     PolygonPatch(bounds, alpha=0.5, zorder=2, fc='blue')
                # )
                # ax.add_patch(
                #     PolygonPatch(merged_polygon, alpha=0.5, zorder=2, fc='red')
                # )
                ax.add_patch(
                    PolygonPatch(uncovered, alpha=0.5, zorder=2, fc='green'))
                ax.set_xlim(0, qtree.width)
                ax.set_ylim(0, qtree.height)
                ax.set_aspect(1)
Example #4
0
 def draw():
     import kwplot
     kwplot.autompl()
     ylimits = records['loss'] + (6 * records['loss_std'])
     ymax = np.percentile(ylimits, 96.9) / .9
     kwplot.multi_plot(
         xdata=records['lr'],
         ydata=records['loss'],
         spread=records['loss_std'],
         xlabel='learning-rate',
         ylabel='smoothed-loss',
         xscale='log',
         ymin=0,
         ymax=ymax,
         xmin='data',
         xmax='data',
         # xmin=min(records['lr']),
         # xmax=max(records['lr']),
         doclf=True,
         fnum=1,
     )
Example #5
0
def _redump_measures(dpath):
    """
    """
    import json
    from os.path import join

    import kwplot
    kwplot.autompl(force='agg')

    try:
        import seaborn as sns
        sns.set()
    except ImportError:
        pass

    fpath = join(dpath, 'tb_data.json')
    tb_data = json.load(open(fpath, 'r'))

    out_dpath = dpath
    mode = 'epoch'
    _dump_measures(tb_data, out_dpath, mode)
Example #6
0
def fastfill_multipolygon():
    kwplot.autompl()
    shape = (1208, 1208)
    self = kwimage.MultiPolygon.random(10).scale(shape)

    ti = ub.Timerit(3, bestof=1, verbose=2, unit='us')
    for timer in ti.reset('draw_on'):
        with timer:
            mask = np.zeros(shape, dtype=np.uint8)
            mask = self.draw_on(mask)

    for timer in ti.reset('custom'):
        with timer:
            mask = np.zeros(shape, dtype=np.uint8)
            for p in self.data:
                if p is not None:
                    p.fill(mask, value=255)

    for timer in ti.reset('to_mask'):
        with timer:
            self.to_mask(shape)

    kwplot.imshow(mask)
Example #7
0
def benchmark_hash_file():
    """
    CommandLine:
        python ~/code/ubelt/dev/bench_hash.py --show
        python ~/code/ubelt/dev/bench_hash.py --show
    """
    import ubelt as ub
    import random

    # dpath = ub.ensuredir(ub.expandpath('$HOME/raid/data/tmp'))
    dpath = ub.ensuredir(ub.expandpath('$HOME/tmp'))

    rng = random.Random(0)
    # Create a pool of random chunks of data
    chunksize = int(2 ** 20)
    pool_size = 8
    part_pool = [_random_data(rng, chunksize) for _ in range(pool_size)]

    #ITEM = 'JUST A STRING' * 100
    HASHERS = ['sha1', 'sha512', 'xxh32', 'xxh64', 'blake3']

    scales = list(range(5, 10))
    import os

    results = ub.AutoDict()
    # Use json is faster or at least as fast it most cases
    # xxhash is also significantly faster than sha512
    ti = ub.Timerit(9, bestof=3, verbose=1, unit='ms')
    for s in ub.ProgIter(scales, desc='benchmark', verbose=3):
        N = 2 ** s
        print(' --- s={s}, N={N} --- '.format(s=s, N=N))
        # Write a big file
        size_pool = [N]
        fpath = _write_random_file(dpath, part_pool, size_pool, rng)

        megabytes = os.stat(fpath).st_size / (2 ** 20)
        print('megabytes = {!r}'.format(megabytes))

        for hasher in HASHERS:
            for timer in ti.reset(hasher):
                ub.hash_file(fpath, hasher=hasher)
            results[hasher].update({N: ti.mean()})
        col = {h: results[h][N] for h in HASHERS}
        sortx = ub.argsort(col)
        ranking = ub.dict_subset(col, sortx)
        print('walltime: ' + ub.repr2(ranking, precision=9, nl=0))
        best = next(iter(ranking))
        #pairs = list(ub.iter_window( 2))
        pairs = [(k, best) for k in ranking]
        ratios = [ranking[k1] / ranking[k2] for k1, k2 in pairs]
        nicekeys = ['{}/{}'.format(k1, k2) for k1, k2 in pairs]
        relratios = ub.odict(zip(nicekeys, ratios))
        print('speedup: ' + ub.repr2(relratios, precision=4, nl=0))
    # xdoc +REQUIRES(--show)
    # import pytest
    # pytest.skip()
    import pandas as pd
    df = pd.DataFrame.from_dict(results)
    df.columns.name = 'hasher'
    df.index.name = 'N'
    ratios = df.copy().drop(columns=df.columns)
    for k1, k2 in [('sha512', 'xxh64'), ('sha1', 'xxh64'), ('xxh32', 'xxh64'), ('blake3', 'xxh64')]:
        ratios['{}/{}'.format(k1, k2)] = df[k1] / df[k2]
    print()
    print('Seconds per iteration')
    print(df.to_string(float_format='%.9f'))
    print()
    print('Ratios of seconds')
    print(ratios.to_string(float_format='%.2f'))
    print()
    print('Average Ratio (over all N)')
    print(ratios.mean().sort_values())
    if ub.argflag('--show'):
        import kwplot
        kwplot.autompl()
        xdata = sorted(ub.peek(results.values()).keys())
        ydata = ub.map_vals(lambda d: [d[x] for x in xdata], results)
        kwplot.multi_plot(xdata, ydata, xlabel='N', ylabel='seconds')
        kwplot.show_if_requested()
Example #8
0
def _devcheck_load_sub_image():
    import kwimage
    import numpy as np

    sampler = grab_camvid_sampler()

    cid_to_cidx = sampler.catgraph.id_to_idx
    classes = sampler.catgraph

    # Try loading a subregion of an image
    sample = sampler.load_positive(2)
    imdata = sample['im']
    annots = sample['annots']
    aids = annots['aids']
    cids = annots['cids']
    boxes = annots['rel_boxes']
    class_idxs = np.array([cid_to_cidx[cid] for cid in cids])
    segmentations = annots['rel_ssegs']

    raw_dets = kwimage.Detections(
        aids=aids,
        boxes=boxes,
        class_idxs=class_idxs,
        segmentations=segmentations,
        classes=classes,
        datakeys=['aids'],
    )

    # Clip boxes to the image boundary
    input_dims = imdata.shape[0:2]
    raw_dets.data['boxes'] = raw_dets.boxes.clip(0, 0, input_dims[1],
                                                 input_dims[0])

    keep = []
    for i, s in enumerate(raw_dets.data['segmentations']):
        # TODO: clip polygons
        m = s.to_mask(input_dims)
        if m.area > 0:
            keep.append(i)
    dets = raw_dets.take(keep)

    heatmap = dets.rasterize(bg_size=(1, 1), input_dims=input_dims)

    if 1:
        print('dets = {!r}'.format(dets))
        print('dets.data = {!r}'.format(dets.data))
        print('dets.meta = {!r}'.format(dets.meta))

    if ub.argflag('--show'):
        import kwplot

        kwplot.autompl()
        heatmap.draw()

        draw_boxes = 1

        kwplot.figure(doclf=True)
        with ub.Timer('dets.draw_on'):
            canvas = imdata.copy()
            # TODO: add logic to color by class
            canvas = dets.draw_on(canvas, boxes=draw_boxes, color='random')
            kwplot.imshow(canvas, pnum=(1, 2, 1), title='dets.draw_on')

        with ub.Timer('dets.draw'):
            kwplot.imshow(imdata,
                          pnum=(1, 2, 2),
                          docla=True,
                          title='dets.draw')
            dets.draw(boxes=draw_boxes, color='random')
Example #9
0
def _define_camvid_class_hierarcy(dset):
    # add extra supercategories
    # NOTE: life-conscious, and life-inanimate are disjoint in this
    # forumlation because we are restricted to a tree structure.  If
    # this changse, then we can try rencoding with multiple parents.
    extra_structure = {
        # Break down the image into things that are part of the system, and
        # things that aren't
        'background': 'root',
        'system': 'root',

        # The system is made up of environmental components and actor
        # components.
        'environment': 'system',
        'actor': 'system',

        # Break actors (things with complex movement) into subtypes
        'life-conscious': 'actor',
        'vehicle-land': 'actor',
        'actor-other': 'actor',

        # Break the environment (things with simple movement) info subtypes
        'life-inanimate': 'environment',
        'civil-structure': 'environment',
        'civil-notice': 'environment',
        'transport-way': 'environment',

        # Subclassify transport mediums
        'drive-way': 'transport-way',
        'walk-way': 'transport-way',
    }

    for child, parent in extra_structure.items():
        if child in dset.name_to_cat:
            dset.name_to_cat[child]['supercategory'] = parent
        else:
            dset.add_category(name=child, supercategory=parent)

    dset.name_to_cat['background']['supercategory'] = 'root'

    dset.name_to_cat['Sky']['supercategory'] = 'environment'

    dset.name_to_cat['Animal']['supercategory'] = 'life-conscious'
    dset.name_to_cat['Bicyclist']['supercategory'] = 'life-conscious'
    dset.name_to_cat['Pedestrian']['supercategory'] = 'life-conscious'
    dset.name_to_cat['Child']['supercategory'] = 'life-conscious'

    dset.name_to_cat['OtherMoving']['supercategory'] = 'actor-other'
    dset.name_to_cat['CartLuggagePram']['supercategory'] = 'actor-other'

    dset.name_to_cat['Car']['supercategory'] = 'vehicle-land'
    dset.name_to_cat['Train']['supercategory'] = 'vehicle-land'
    dset.name_to_cat['Truck_Bus']['supercategory'] = 'vehicle-land'
    dset.name_to_cat['SUVPickupTruck']['supercategory'] = 'vehicle-land'
    dset.name_to_cat['MotorcycleScooter']['supercategory'] = 'vehicle-land'

    dset.name_to_cat['VegetationMisc']['supercategory'] = 'life-inanimate'
    dset.name_to_cat['Tree']['supercategory'] = 'life-inanimate'

    dset.name_to_cat['Column_Pole']['supercategory'] = 'civil-structure'
    dset.name_to_cat['Fence']['supercategory'] = 'civil-structure'
    dset.name_to_cat['Wall']['supercategory'] = 'civil-structure'
    dset.name_to_cat['Building']['supercategory'] = 'civil-structure'
    dset.name_to_cat['Archway']['supercategory'] = 'civil-structure'
    dset.name_to_cat['Bridge']['supercategory'] = 'civil-structure'
    dset.name_to_cat['Tunnel']['supercategory'] = 'civil-structure'

    dset.name_to_cat['TrafficCone']['supercategory'] = 'civil-notice'
    dset.name_to_cat['TrafficLight']['supercategory'] = 'civil-notice'
    dset.name_to_cat['LaneMkgsDriv']['supercategory'] = 'civil-notice'
    dset.name_to_cat['LaneMkgsNonDriv']['supercategory'] = 'civil-notice'
    dset.name_to_cat['SignSymbol']['supercategory'] = 'civil-notice'
    dset.name_to_cat['ParkingBlock']['supercategory'] = 'civil-notice'
    dset.name_to_cat['Misc_Text']['supercategory'] = 'civil-notice'

    dset.name_to_cat['Road']['supercategory'] = 'drive-way'
    dset.name_to_cat['RoadShoulder']['supercategory'] = 'drive-way'
    dset.name_to_cat['Sidewalk']['supercategory'] = 'walk-way'

    for cat in list(dset.cats.values()):
        parent = cat.get('supercategory', None)
        if parent is not None:
            if parent not in dset.name_to_cat:
                print('Missing parent = {!r}'.format(parent))
                dset.add_category(name=parent, supercategory=parent)

    if 0:
        graph = dset.category_graph()
        import graphid
        graphid.util.show_nx(graph)

    # Add in some hierarcy information
    if 0:
        for x in dset.name_to_cat:
            print(
                "dset.name_to_cat[{!r}]['supercategory'] = 'object'".format(x))

    if 0:
        example_cat_aids = []
        for cat in dset.cats.values():
            cname = cat['name']
            aids = dset.index.cid_to_aids[dset.name_to_cat[cname]['id']]
            if len(aids):
                aid = ub.peek(aids)
                example_cat_aids.append(aid)
            else:
                print('No examples of cat = {!r}'.format(cat))

        import xdev
        import kwplot
        kwplot.autompl()
        for aid in xdev.InteractiveIter(example_cat_aids):
            print('aid = {!r}'.format(aid))
            ann = dset.anns[aid]
            cat = dset.cats[ann['category_id']]
            print('cat = {!r}'.format(cat))
            dset.show_image(aid=aid)
            xdev.InteractiveIter.draw()

        if 0:
            cname = 'CartLuggagePram'
            cname = 'ParkingBlock'
            cname = 'LaneMkgsDriv'
            aids = dset.index.cid_to_aids[dset.name_to_cat[cname]['id']]
            if len(aids):
                aid = ub.peek(aids)
                print('aid = {!r}'.format(aid))
                ann = dset.anns[aid]
                cat = dset.cats[ann['category_id']]
                print('cat = {!r}'.format(cat))
                dset.show_image(aid=aid)
Example #10
0
def convert_camvid_raw_to_coco(camvid_raw_info):
    """
    Converts the raw camvid format to an MSCOCO based format, ( which lets use
    use kwcoco's COCO backend).

    Example:
        >>> # xdoctest: +REQUIRES(--download)
        >>> camvid_raw_info = grab_raw_camvid()
        >>> # test with a reduced set of data
        >>> del camvid_raw_info['img_paths'][2:]
        >>> del camvid_raw_info['mask_paths'][2:]
        >>> dset = convert_camvid_raw_to_coco(camvid_raw_info)
        >>> # xdoctest: +REQUIRES(--show)
        >>> import kwplot
        >>> plt = kwplot.autoplt()
        >>> kwplot.figure(fnum=1, pnum=(1, 2, 1))
        >>> dset.show_image(gid=1)
        >>> kwplot.figure(fnum=1, pnum=(1, 2, 2))
        >>> dset.show_image(gid=2)
    """
    import re
    import kwimage
    import kwcoco
    print('Converting CamVid to MS-COCO format')

    dset_root, img_paths, label_path, mask_paths = ub.take(
        camvid_raw_info,
        'dset_root, img_paths, label_path, mask_paths'.split(', '))

    img_infos = {
        'img_fname': img_paths,
        'mask_fname': mask_paths,
    }
    keys = list(img_infos.keys())
    next_vals = list(zip(*img_infos.values()))
    image_items = [{k: v for k, v in zip(keys, vals)} for vals in next_vals]

    dataset = {
        'img_root': dset_root,
        'images': [],
        'categories': [],
        'annotations': [],
    }

    lines = ub.readfrom(label_path).split('\n')
    lines = [line for line in lines if line]
    for line in lines:
        color_text, name = re.split('\t+', line)
        r, g, b = map(int, color_text.split(' '))
        color = (r, g, b)

        # Parse the special camvid format
        cid = (r << 16) + (g << 8) + (b << 0)
        cat = {
            'id': cid,
            'name': name,
            'color': color,
        }
        dataset['categories'].append(cat)

    for gid, img_item in enumerate(image_items, start=1):
        img = {
            'id': gid,
            'file_name': img_item['img_fname'],
            # nonstandard image field
            'segmentation': img_item['mask_fname'],
        }
        dataset['images'].append(img)

    dset = kwcoco.CocoDataset(dataset)
    dset.rename_categories({'Void': 'background'})

    assert dset.name_to_cat['background']['id'] == 0
    dset.name_to_cat['background'].setdefault('alias', []).append('Void')

    if False:
        _define_camvid_class_hierarcy(dset)

    if 1:
        # TODO: Binarize CCs (and efficiently encode if possible)
        import numpy as np

        bad_info = []
        once = False

        # Add images
        dset.remove_annotations(list(dset.index.anns.keys()))
        for gid, img in ub.ProgIter(dset.imgs.items(),
                                    desc='parse label masks'):
            mask_fpath = join(dset_root, img['segmentation'])

            rgb_mask = kwimage.imread(mask_fpath, space='rgb')
            r, g, b = rgb_mask.T.astype(np.int64)
            cid_mask = np.ascontiguousarray(rgb_to_cid(r, g, b).T)

            cids = set(np.unique(cid_mask)) - {0}

            for cid in cids:
                if cid not in dset.cats:
                    if gid == 618:
                        # Handle a known issue with image 618
                        c_mask = (cid == cid_mask).astype(np.uint8)
                        total_bad = c_mask.sum()
                        if total_bad < 32:
                            if not once:
                                print(
                                    'gid 618 has a few known bad pixels, ignoring them'
                                )
                                once = True
                            continue
                        else:
                            raise Exception('more bad pixels than expected')
                    else:
                        raise Exception(
                            'UNKNOWN cid = {!r} in gid={!r}'.format(cid, gid))

                    # bad_rgb = cid_to_rgb(cid)
                    # print('bad_rgb = {!r}'.format(bad_rgb))
                    # print('WARNING UNKNOWN cid = {!r} in gid={!r}'.format(cid, gid))
                    # bad_info.append({
                    #     'gid': gid,
                    #     'cid': cid,
                    # })
                else:
                    ann = {
                        'category_id': cid,
                        'image_id': gid
                        # 'segmentation': mask.to_coco()
                    }
                    assert cid in dset.cats
                    c_mask = (cid == cid_mask).astype(np.uint8)
                    mask = kwimage.Mask(c_mask, 'c_mask')

                    box = kwimage.Boxes([mask.get_xywh()], 'xywh')
                    # box = mask.to_boxes()

                    ann['bbox'] = ub.peek(box.to_coco())
                    ann['segmentation'] = mask.to_coco()
                    dset.add_annotation(**ann)

        if 0:
            bad_cids = [i['cid'] for i in bad_info]
            print(sorted([c['color'] for c in dataset['categories']]))
            print(sorted(set([cid_to_rgb(i['cid']) for i in bad_info])))

            gid = 618
            img = dset.imgs[gid]
            mask_fpath = join(dset_root, img['segmentation'])
            rgb_mask = kwimage.imread(mask_fpath, space='rgb')
            r, g, b = rgb_mask.T.astype(np.int64)
            cid_mask = np.ascontiguousarray(rgb_to_cid(r, g, b).T)
            cid_hist = ub.dict_hist(cid_mask.ravel())

            bad_cid_hist = {}
            for cid in bad_cids:
                bad_cid_hist[cid] = cid_hist.pop(cid)

            import kwplot
            kwplot.autompl()
            kwplot.imshow(rgb_mask)

    if 0:
        import kwplot
        plt = kwplot.autoplt()
        plt.clf()
        dset.show_image(1)

        import xdev
        gid_list = list(dset.imgs)
        for gid in xdev.InteractiveIter(gid_list):
            dset.show_image(gid)
            xdev.InteractiveIter.draw()

    dset._build_index()
    dset._build_hashid()
    return dset
Example #11
0
def test_yolo_lr():
    if 0:
        datasets = {
            'train': nh.data.ToyData2d(size=3, border=1, n=18, rng=0),
            # 'vali': nh.data.ToyData2d(size=3, border=1, n=16, rng=1),
        }
        burn_in = 2.5
        lr = 0.1
        bstep = 2
        bsize = 2
        decay = 0.0005
        simulated_bsize = bstep * bsize
        max_epoch = 4
        points = {
            0: lr * 1.0,
            3: lr * 1.0,
            4: lr * 0.1,
        }
    else:
        datasets = {
            'train': nh.data.ToyData2d(size=3, border=1, n=16551 // 100,
                                       rng=0),
            'vali': nh.data.ToyData2d(size=3, border=1, n=4952 // 100, rng=1),
        }
        # number of epochs to burn_in for. approx 1000 batches?
        burn_in = 3.86683584
        lr = 0.001
        bstep = 2
        bsize = 32
        decay = 0.0005
        simulated_bsize = bstep * bsize
        max_epoch = 311
        points = {
            0: lr * 1.0 / simulated_bsize,
            154: lr * 1.0 / simulated_bsize,  # 1.5625e-05
            155: lr * 0.1 / simulated_bsize,  # 1.5625e-06
            232: lr * 0.1 / simulated_bsize,
            233: lr * 0.01 / simulated_bsize,  # 1.5625e-07
        }

    hyper = {
        # --- data first
        'datasets':
        datasets,
        'name':
        'restart_lr',
        'workdir':
        ub.ensure_app_cache_dir('netharn/test/restart_lr'),
        'loaders': {
            'batch_size': bsize
        },
        'xpu':
        nh.XPU.coerce('cpu'),
        # --- algorithm second
        'model': (nh.models.ToyNet2d, {}),
        'optimizer': (nh.optimizers.SGD, {
            'lr': points[0],
            'weight_decay': decay * simulated_bsize
        }),
        'criterion': (nh.criterions.FocalLoss, {}),
        'initializer': (nh.initializers.NoOp, {}),
        'scheduler': (nh.schedulers.YOLOScheduler, {
            'points': points,
            'burn_in': burn_in,
            'dset_size': len(datasets['train']),
            'batch_size': bsize,
            'interpolate': False,
        }),
        'dynamics': {
            'batch_step': bstep
        },
        'monitor': (nh.Monitor, {
            'max_epoch': max_epoch
        }),
    }
    harn = MyHarn(hyper=hyper)
    harn.preferences['prog_backend'] = 'progiter'
    harn.preferences['use_tensorboard'] = False
    # Delete previous data
    harn.initialize(reset='delete')

    # Cause the harness to fail
    try:
        harn.failpoint = 100
        harn.run()
    except Failpoint:
        pass
    print('\nFAILPOINT REACHED\n')
    failpoint_lrs = set(harn._current_lrs())

    old_harn = harn

    # Restarting the harness should begin at the same point
    harn = MyHarn(hyper=hyper)
    harn.preferences['prog_backend'] = 'progiter'
    harn.preferences['use_tensorboard'] = False
    harn.initialize()
    harn.xdata = old_harn.xdata
    harn.ydata = old_harn.ydata

    restart_lrs = set(harn._current_lrs())
    print('failpoint_lrs = {!r}'.format(failpoint_lrs))
    print('restart_lrs   = {!r}'.format(restart_lrs))

    harn.failpoint = None
    harn.run()

    if ub.argflag('--show'):
        import kwplot
        kwplot.autompl()
        kwplot.multi_plot(harn.xdata, harn.ydata)
        from matplotlib import pyplot as plt
        plt.show()

    assert restart_lrs == failpoint_lrs
Example #12
0
def benchamrk_det_nms():
    """
    Benchmarks different implementations of non-max-supression on the CPU, GPU,
    and using cython / numpy / torch.

    CommandLine:
        xdoctest -m ~/code/kwimage/dev/bench_nms.py benchamrk_det_nms --show

    SeeAlso:
        PJR Darknet NonMax supression
        https://github.com/pjreddie/darknet/blob/master/src/box.c

        Lightnet NMS
        https://gitlab.com/EAVISE/lightnet/blob/master/lightnet/data/transform/_postprocess.py#L116
    """

    # N = 200
    # bestof = 50
    N = 1
    bestof = 1

    # xdata = [10, 20, 40, 80, 100, 200, 300, 400, 500, 600, 700, 1000, 1500, 2000]

    # max number of boxes yolo will spit out at a time
    max_boxes = 19 * 19 * 5

    xdata = [
        10, 20, 40, 80, 100, 200, 300, 400, 500, 600, 700, 1000, 1500,
        max_boxes
    ]
    # xdata = [10, 20, 40, 80, 100, 200, 300, 400, 500]

    # Demo values
    xdata = [0, 1, 2, 3, 10, 100, 200, 300, 500]

    if ub.argflag('--small'):
        xdata = [10, 100, 500, 1000, 1500, 2000, 5000, 10000]

    if ub.argflag('--medium'):
        xdata = [
            1000,
            5000,
            10000,
            20000,
            50000,
        ]

    if ub.argflag('--large'):
        xdata = [
            1000,
            5000,
            10000,
            20000,
            50000,
            100000,
        ]

    if ub.argflag('--extra-large'):
        xdata = [
            1000,
            2000,
            10000,
            20000,
            40000,
            100000,
            200000,
        ]

    title_parts = []

    SMALL_BOXES = ub.argflag('--small-boxes')
    if SMALL_BOXES:
        title_parts.append('small boxes')
    else:
        title_parts.append('large boxes')

    # NOTE: for large images we may have up to 21,850,753 detections!

    thresh = float(ub.argval('--thresh', default=0.4))
    title_parts.append('thresh={:.2f}'.format(thresh))

    from kwimage.algo.algo_nms import available_nms_impls
    valid_impls = available_nms_impls()
    print('valid_impls = {!r}'.format(valid_impls))

    basis = {
        'type': ['ndarray', 'tensor', 'tensor0'],
        # 'daq': [True, False],
        # 'daq': [False],
        # 'device': [None],
        # 'impl': valid_impls,
        'impl': valid_impls + ['auto'],
    }

    if ub.argflag('--daq'):
        basis['daq'] = [True, False]

    # if torch.cuda.is_available():
    #     basis['device'].append(0)

    combos = [
        ub.dzip(basis.keys(), vals) for vals in it.product(*basis.values())
    ]

    def is_valid_combo(combo):
        # if combo['impl'] in {'py', 'cython_cpu'} and combo['device'] is not None:
        #     return False
        # if combo['type'] == 'ndarray' and combo['impl'] == 'cython_gpu':
        #     if combo['device'] is None:
        #         return False
        # if combo['type'] == 'ndarray' and combo['impl'] != 'cython_gpu':
        #     if combo['device'] is not None:
        #         return False

        # if combo['type'].endswith('0'):
        #     if combo['impl'] in {'numpy', 'cython_gpu', 'cython_cpu'}:
        #         return False

        # if combo['type'] == 'ndarray':
        #     if combo['impl'] in {'torch'}:
        #         return False

        REMOVE_SLOW = True
        if REMOVE_SLOW:
            known_bad = [
                {
                    'impl': 'torch',
                    'type': 'tensor'
                },
                {
                    'impl': 'numpy',
                    'type': 'tensor'
                },
                # {'impl': 'cython_gpu', 'type': 'tensor'},
                {
                    'impl': 'cython_cpu',
                    'type': 'tensor'
                },

                # {'impl': 'torch', 'type': 'tensor0'},
                {
                    'impl': 'numpy',
                    'type': 'tensor0'
                },
                # {'impl': 'cython_gpu', 'type': 'tensor0'},
                # {'impl': 'cython_cpu', 'type': 'tensor0'},
                {
                    'impl': 'torchvision',
                    'type': 'ndarray'
                },
            ]
            for known in known_bad:
                if all(combo[key] == val for key, val in known.items()):
                    return False

        return True

    combos = list(filter(is_valid_combo, combos))

    times = ub.ddict(list)
    for num in xdata:

        if num > 10000:
            N = 1
            bestof = 1
        if num > 1000:
            N = 3
            bestof = 1
        if num > 100:
            N = 10
            bestof = 3
        elif num > 10:
            N = 100
            bestof = 10
        else:
            N = 1000
            bestof = 10
        print('\n\n---- number of boxes = {} ----\n'.format(num))

        outputs = {}

        ti = ub.Timerit(N, bestof=bestof, verbose=1)

        # Build random test boxes and scores
        np_dets1 = kwimage.Detections.random(num // 2, scale=1000.0, rng=0)
        np_dets1.data['boxes'] = np_dets1.boxes.to_xywh()

        if SMALL_BOXES:
            max_dim = 100
            np_dets1.boxes.data[..., 2] = np.minimum(np_dets1.boxes.width,
                                                     max_dim).ravel()
            np_dets1.boxes.data[..., 3] = np.minimum(np_dets1.boxes.height,
                                                     max_dim).ravel()

        np_dets2 = copy.deepcopy(np_dets1)
        np_dets2.boxes.translate(10, inplace=True)
        # add boxes that will definately be removed
        np_dets = kwimage.Detections.concatenate([np_dets1, np_dets2])

        # make all scores unique to ensure comparability
        np_dets.scores[:] = np.linspace(0, 1, np_dets.num_boxes())

        np_dets.data['scores'] = np_dets.scores.astype(np.float32)
        np_dets.boxes.data = np_dets.boxes.data.astype(np.float32)

        typed_data = {}
        # ----------------------------------

        import netharn as nh
        for combo in combos:
            print('combo = {}'.format(ub.repr2(combo, nl=0)))

            label = nh.util.make_idstr(combo)
            mode = combo.copy()

            # if mode['impl'] == 'cython_gpu':
            #     mode['device_id'] = mode['device']

            mode_type = mode.pop('type')

            if mode_type in typed_data:
                dets = typed_data[mode_type]
            else:
                if mode_type == 'ndarray':
                    dets = np_dets.numpy()
                elif mode_type == 'tensor':
                    dets = np_dets.tensor(None)
                elif mode_type == 'tensor0':
                    dets = np_dets.tensor(0)
                else:
                    raise KeyError
                typed_data[mode_type] = dets

            for timer in ti.reset(label):
                with timer:
                    keep = dets.non_max_supression(thresh=thresh, **mode)
                    torch.cuda.synchronize()
            times[ti.label].append(ti.min())
            outputs[ti.label] = ensure_numpy_indices(keep)

        # ----------------------------------

        # Check that all kept boxes do not have more than `threshold` ious
        if 0:
            for key, keep_idxs in outputs.items():
                kept = np_dets.take(keep_idxs).boxes
                ious = kept.ious(kept)
                max_iou = (np.tril(ious) - np.eye(len(ious))).max()
                if max_iou > thresh:
                    print('{} produced a bad result with max_iou={}'.format(
                        key, max_iou))

        # Check result consistency:
        print('\nResult stats:')
        for key in sorted(outputs.keys()):
            print('    * {:<20}: num={}'.format(key, len(outputs[key])))

        print('\nResult overlaps (method1, method2: jaccard):')
        datas = []
        for k1, k2 in it.combinations(sorted(outputs.keys()), 2):
            idxs1 = set(outputs[k1])
            idxs2 = set(outputs[k2])
            jaccard = len(idxs1 & idxs2) / max(len(idxs1 | idxs2), 1)
            datas.append((k1, k2, jaccard))

        datas = sorted(datas, key=lambda x: -x[2])
        for k1, k2, jaccard in datas:
            print('    * {:<20}, {:<20}: {:0.4f}'.format(k1, k2, jaccard))

    if True:
        ydata = {key: 1.0 / np.array(vals) for key, vals in times.items()}
        ylabel = 'Hz'
        reverse = True
        yscale = 'symlog'
    else:
        ydata = {key: np.array(vals) for key, vals in times.items()}
        ylabel = 'seconds'
        reverse = False
        yscale = 'linear'
    scores = {key: vals[-1] for key, vals in ydata.items()}
    ydata = ub.dict_subset(ydata, ub.argsort(scores, reverse=reverse))

    ###
    times_of_interest = [0, 10, 100, 200, 1000]
    times_of_interest = xdata

    lines = []
    record = lines.append
    record('### times_of_interest = {!r}'.format(times_of_interest))
    for x in times_of_interest:

        if times_of_interest[-1] == x:
            record('else:')
        elif times_of_interest[0] == x:
            record('if num <= {}:'.format(x))
        else:
            record('elif num <= {}:'.format(x))

        if x in xdata:
            pos = xdata.index(x)
            score_wrt_x = {}
            for key, vals in ydata.items():
                score_wrt_x[key] = vals[pos]

            typekeys = ['tensor0', 'tensor', 'ndarray']
            type_groups = dict([(b,
                                 ub.group_items(score_wrt_x,
                                                lambda y: y.endswith(b))[True])
                                for b in typekeys])
            # print('\n=========')
            # print('x = {!r}'.format(x))
            record('    if code not in {!r}:'.format(set(typekeys)))
            record('        raise KeyError(code)')
            for typekey, group in type_groups.items():
                # print('-------')
                record('    if code == {!r}:'.format(typekey))
                # print('typekey = {!r}'.format(typekey))
                # print('group = {!r}'.format(group))
                group_x = ub.dict_isect(score_wrt_x, group)
                valid_keys = ub.argsort(group_x, reverse=True)
                valid_x = ub.dict_subset(group_x, valid_keys)
                # parts = [','.split(k) for k in valid_keys]
                ordered_impls = []
                ordered_impls2 = ub.odict()
                for k in valid_keys:
                    vals = valid_x[k]
                    p = k.split(',')
                    d = dict(i.split('=') for i in p)
                    ordered_impls2[d['impl']] = vals
                    ordered_impls.append(d['impl'])

                ordered_impls = list(ub.oset(ordered_impls) - {'auto'})
                ordered_impls2.pop('auto')
                record('        # {}'.format(
                    ub.repr2(ordered_impls2, precision=1, nl=0,
                             explicit=True)))
                record('        preference = {}'.format(
                    ub.repr2(ordered_impls, nl=0)))
    record('### end times of interest ')
    print(ub.indent('\n'.join(lines), ' ' * 8))
    ###

    markers = {
        key: 'o' if 'auto' in key else ''
        for key, score in scores.items()
    }

    if ub.argflag('--daq'):
        markers = {
            key: '+' if 'daq=True' in key else ''
            for key, score in scores.items()
        }

    labels = {
        key: '{:.2f} {} - {}'.format(score, ylabel[0:3], key)
        for key, score in scores.items()
    }

    title = 'NSM-impl speed: ' + ', '.join(title_parts)

    import kwplot
    kwplot.autompl()
    kwplot.multi_plot(
        xdata,
        ydata,
        xlabel='num boxes',
        ylabel=ylabel,
        label=labels,
        yscale=yscale,
        title=title,
        marker=markers,
        # xscale='symlog',
    )

    kwplot.show_if_requested()
Example #13
0
fpath = ub.grabdata('https://i.redd.it/ywip9sbwysy71.jpg')
data = kwimage.imread(fpath)

subdata = data[242:-22, 22:300]

img = subdata

inty = integrate.cumtrapz(img, axis=0)
intx = integrate.cumtrapz(img, axis=1)

dery = np.gradient(img, axis=0)
derx = np.gradient(img, axis=1)

der_canvas = kwarray.normalize(kwimage.stack_images([dery, derx], axis=0))
int_canvas = kwarray.normalize(kwimage.stack_images([inty, intx], axis=0))
der_canvas = kwimage.ensure_uint255(der_canvas)
int_canvas = kwimage.ensure_uint255(int_canvas)

der_canvas = kwimage.draw_header_text(der_canvas, 'derivative', color='white')
int_canvas = kwimage.draw_header_text(int_canvas,
                                      'antiderivative',
                                      color='white')

canvas = kwimage.stack_images([der_canvas, int_canvas], axis=1)

# kwimage.imwrite('ftfy.jpg', canvas)

kwplot.autompl()
kwplot.imshow(canvas)
Example #14
0
def _devcheck_voc_consistency():
    """
    # CHECK FOR ISSUES WITH MY MAP COMPUTATION

    TODO:
        Check how cocoeval works
        https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocotools/cocoeval.py
    """
    import pandas as pd
    import kwcoco as nh
    # method = 'voc2012'
    method = 'voc2007'

    bias = 0
    bias = 0

    # classes = [0, 1, 2]
    classes = [0]

    classname = 0
    # nimgs = 5
    # nboxes = 2
    nimgs = 5
    nboxes = 5
    nbad = 1

    bg_weight = 1.0
    iou_thresh = 0.5
    bg_cls = -1

    xdata = []
    ydatas = ub.ddict(list)
    for noise in np.linspace(0, 5, 10):
        recs = {}
        lines = []
        confusions = []
        rng = np.random.RandomState(0)

        detmetrics = DetectionMetrics()

        true_coco = nh.data.coco_api.CocoDataset()
        pred_coco = nh.data.coco_api.CocoDataset()
        cid = true_coco.add_category('cat1')
        cid = pred_coco.add_category('cat1')
        for imgname in range(nimgs):

            # Create voc style data
            imgname = str(imgname)
            import kwimage
            true_boxes = kwimage.Boxes.random(num=nboxes,
                                              scale=100.,
                                              rng=rng,
                                              format='cxywh')
            pred_boxes = true_boxes.copy()
            pred_boxes.data = pred_boxes.data.astype(
                np.float) + (rng.rand() * noise)
            if nbad:
                pred_boxes.data = np.vstack([
                    pred_boxes.data,
                    kwimage.Boxes.random(num=nbad,
                                         scale=100.,
                                         rng=rng,
                                         format='cxywh').data
                ])

            true_cxs = rng.choice(classes, size=len(true_boxes))
            pred_cxs = true_cxs.copy()

            change = rng.rand(len(true_cxs)) < (noise / 5)
            pred_cxs_swap = rng.choice(classes, size=len(pred_cxs))
            pred_cxs[change] = pred_cxs_swap[change]
            if nbad:
                pred_cxs = np.hstack(
                    [pred_cxs, rng.choice(classes, size=nbad)])

            np.array([0] * len(true_boxes))
            pred_cxs = np.array([0] * len(pred_boxes))

            recs[imgname] = []
            for bbox in true_boxes.to_tlbr().data:
                recs[imgname].append({
                    'bbox': bbox,
                    'difficult': False,
                    'name': classname
                })

            for bbox, score in zip(pred_boxes.to_tlbr().data,
                                   np.arange(len(pred_boxes))):
                lines.append([imgname, score] + list(bbox))
                # lines.append('{} {} {} {} {} {}'.format(imgname, score, *bbox))

            # Create MS-COCO style data
            gid = true_coco.add_image(imgname)
            gid = pred_coco.add_image(imgname)
            for bbox in true_boxes.to_xywh():
                true_coco.add_annotation(gid,
                                         cid,
                                         bbox=bbox,
                                         iscrowd=False,
                                         ignore=0,
                                         area=bbox.area[0])
            for bbox, score in zip(pred_boxes.to_xywh(),
                                   np.arange(len(pred_boxes))):
                pred_coco.add_annotation(gid,
                                         cid,
                                         bbox=bbox,
                                         iscrowd=False,
                                         ignore=0,
                                         score=score,
                                         area=bbox.area[0])

            # Create kwcoco style confusion data
            true_weights = np.array([1] * len(true_boxes))
            pred_scores = np.arange(len(pred_boxes))

            y = pd.DataFrame(
                detection_confusions(true_boxes,
                                     true_cxs,
                                     true_weights,
                                     pred_boxes,
                                     pred_scores,
                                     pred_cxs,
                                     bg_weight=1.0,
                                     iou_thresh=0.5,
                                     bg_cls=-1,
                                     bias=bias))
            y['gx'] = int(imgname)
            y = (y)
            confusions.append(y)

        from pycocotools import cocoeval as coco_score
        cocoGt = true_coco._aspycoco()
        cocoDt = pred_coco._aspycoco()

        evaler = coco_score.COCOeval(cocoGt, cocoDt, iouType='bbox')
        evaler.evaluate()
        evaler.accumulate()
        evaler.summarize()
        coco_ap = evaler.stats[1]

        y = pd.concat(confusions)

        mine_ap = score_detection_assignment(y, method=method)['ap']
        voc_rec, voc_prec, voc_ap = voc_eval(lines,
                                             recs,
                                             classname,
                                             iou_thresh=0.5,
                                             method=method,
                                             bias=bias)
        eav_prec, eav_rec, eav_ap1 = _multiclass_ap(y)

        eav_ap2 = _ave_precision(eav_rec, eav_prec, method=method)
        voc_ap2 = _ave_precision(voc_rec, voc_prec, method=method)

        eav_ap = eav_ap2

        print('noise = {!r}'.format(noise))
        print('mine_ap = {!r}'.format(mine_ap.values.mean()))
        print('voc_ap = {!r}'.format(voc_ap))
        print('eav_ap = {!r}'.format(eav_ap))
        print('---')
        xdata.append(noise)
        ydatas['voc'].append(voc_ap)
        ydatas['eav'].append(eav_ap)
        ydatas['kwcoco'].append(mine_ap.values.mean())
        ydatas['coco'].append(coco_ap)

    ydf = pd.DataFrame(ydatas)
    print(ydf)

    import kwplot
    kwplot.autompl()
    kwplot.multi_plot(xdata=xdata, ydata=ydatas, fnum=1, doclf=True)
Example #15
0
def _devcheck_voc_consistency2():
    """
    # CHECK FOR ISSUES WITH MY MAP COMPUTATION

    TODO:
        Check how cocoeval works
        https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocotools/cocoeval.py
    """
    import pandas as pd
    from kwcoco.metrics.detections import DetectionMetrics
    xdata = []
    ydatas = ub.ddict(list)

    dmets = []

    for box_noise in np.linspace(0, 8, 20):
        dmet = DetectionMetrics.demo(
            nimgs=20,
            nboxes=(0, 20),
            classes=3,
            rng=0,
            # anchors=np.array([[.5, .5], [.3, .3], [.1, .3], [.2, .1]]),
            box_noise=box_noise,
            # n_fp=0 if box_noise == 0 else (0, 3),
            # n_fn=0 if box_noise == 0 else (0, 3),
            # cls_noise=0 if box_noise == 0 else .3,
        )
        dmets.append(dmet)

        nh_scores = dmet.score_kwcoco(bias=0)
        voc_scores = dmet.score_voc(bias=0)
        coco_scores = dmet.score_coco()
        nh_map = nh_scores['mAP']
        voc_map = voc_scores['mAP']
        coco_map = coco_scores['mAP']
        print('nh_map = {!r}'.format(nh_map))
        print('voc_map = {!r}'.format(voc_map))
        print('coco_map = {!r}'.format(coco_map))

        xdata.append(box_noise)
        ydatas['voc'].append(voc_map)
        ydatas['kwcoco'].append(nh_map)
        ydatas['coco'].append(coco_map)

    ydf = pd.DataFrame(ydatas)
    print(ydf)

    import kwplot
    kwplot.autompl()
    kwplot.multi_plot(xdata=xdata, ydata=ydatas, fnum=1, doclf=True)

    if False:
        dmet_ = dmets[-1]
        dmet_ = dmets[0]
        print('true = ' + ub.repr2(dmet_.true.dataset, nl=2, precision=2))
        print('pred = ' + ub.repr2(dmet_.pred.dataset, nl=2, precision=2))

        dmet = DetectionMetrics()
        for gid in range(0, 5):
            print('----')
            print('gid = {!r}'.format(gid))
            dmet.true = dmet_.true.subset([gid])
            dmet.pred = dmet_.pred.subset([gid])

            nh_scores = dmet.score_kwcoco(bias=0)
            voc_scores = dmet.score_voc(bias=0)
            coco_scores = dmet.score_coco()
            nh_map = nh_scores['mAP']
            voc_map = voc_scores['mAP']
            coco_map = coco_scores['mAP']
            print('nh_map = {!r}'.format(nh_map))
            print('voc_map = {!r}'.format(voc_map))
            print('coco_map = {!r}'.format(coco_map))

            print('true = ' + ub.repr2(dmet.true.dataset, nl=2))
            print('pred = ' + ub.repr2(dmet.pred.dataset, nl=2))
Example #16
0
def check_fill_poly_properties():
    """
    Notes:
        It seems as if cv2.fillPoly will draw multiple polygons, but it will
        toggle between drawing holes and filling depending on if the next
        polygon is inside of a previous one.

        skimage.draw.polygon is very slow

        PIL is very slow for floats, but ints aren't too bad. cv2 is better.

    """
    kwplot.autompl()

    shape = (1208, 1208)
    self = kwimage.Polygon.random(n=10, n_holes=1, convex=False).scale(1208)

    cv_contours = self._to_cv_countours()

    value = 1

    mask = np.zeros((128, 128), dtype=np.uint8)
    value = 1
    line_type = cv2.LINE_8

    mask = np.zeros((128, 128), dtype=np.uint8)
    # Modification happens inplace
    cv2.fillPoly(mask, cv_contours, value, line_type, shift=0)

    kwplot.autompl()
    kwplot.imshow(mask)

    extra = cv_contours[1] + 40
    cv_contours3 = cv_contours + [extra, extra + 2]
    mask = np.zeros((128, 128), dtype=np.uint8)
    cv2.fillPoly(mask, cv_contours3, value, line_type, shift=0)

    kwplot.imshow(mask)

    geom = shapely.geometry.Polygon(
        shell=self.data['exterior'].data,
        holes=[c.data for c in self.data['interiors']])

    xs, ys = self.data['exterior'].data.T
    rr, cc = skimage.draw.polygon(xs, ys)

    mask = np.zeros(shape, dtype=np.uint8)

    ti = ub.Timerit(10, bestof=3, verbose=2, unit='us')
    if False:
        # Not general enough
        for timer in ti.reset('fillConvexPoly'):
            mask[:, :] = 0
            with timer:
                cv_contours = self._to_cv_countours()
                cv2.fillConvexPoly(mask, cv_contours[0], value)

    for timer in ti.reset('fillPoly'):
        mask[:, :] = 0
        with timer:
            cv_contours = self._to_cv_countours()
            cv2.fillPoly(mask, cv_contours[0:1], value)

    for timer in ti.reset('skimage.draw.polygon'):
        mask = np.zeros(shape, dtype=np.uint8)
        with timer:
            xs, ys = self.data['exterior'].data.T
            rr, cc = skimage.draw.polygon(xs, ys)

    from PIL import Image, ImageDraw
    for timer in ti.reset('PIL'):
        height, width = shape
        pil_img = Image.new('L', (width, height), 0)
        with timer:
            draw_obj = ImageDraw.Draw(pil_img)
            pil_poly = self.data['exterior'].data.astype(
                np.int).ravel().tolist()
            pil_poly = pil_poly + pil_poly[0:2]
            draw_obj.polygon(pil_poly, outline=0, fill=255)
            mask = np.array(pil_img)
Example #17
0
def plot_convolutional_features(conv,
                                limit=144,
                                colorspace='rgb',
                                fnum=None,
                                nCols=None,
                                voxels=False,
                                alpha=.2,
                                labels=False,
                                normaxis=None,
                                _hack_2drows=False):
    """Plots the convolutional layers to a matplotlib pyplot.

    The convolutional filters (kernels) are stored into a grid and saved to disk
    as a Maplotlib figure.  The convolutional filters, if it has one channel,
    will be stored as an intensity imgage.  If a colorspace is specified and
    there are three input channels, the convolutional filters will be
    represented as an RGB image.

    In the event that 2 or 4+ filters are
    displayed, the different channels will be flattened and showed as distinct
    outputs in the grid.

    TODO:
        - [ ] refactor to use make_conv_images

    Args:
        conv (torch.nn.ConvNd): torch convolutional layer with weights to draw
        limit (int, optional): the limit on the number of filters drawn in the
            figure, achieved by simply dropping any filters past the limit
            starting at the first filter.  Detaults to 144.

        colorspace (str): the colorspace seen by the convolutional filter
            (if applicable), so we can convert to rgb for display.

        voxels (bool): if True, and we have a 3d conv, show the voxels
        alpha (float): only applicable if voxels=True
        stride (list): only applicable if voxels=True

    Returns:
        matplotlib.figure.Figure: fig - a Matplotlib figure

    References:
        https://matplotlib.org/devdocs/gallery/mplot3d/voxels.html

    Example:
        >>> # xdoctest: +REQUIRES(module:torch)
        >>> conv = torch.nn.Conv2d(3, 9, (5, 7))
        >>> plot_convolutional_features(conv, colorspace=None, fnum=None, limit=2)

    Example:
        >>> # xdoctest: +REQUIRES(--comprehensive)
        >>> # xdoctest: +REQUIRES(module:torch)
        >>> import torchvision
        >>> # 2d uncolored gray-images
        >>> conv = torch.nn.Conv3d(1, 2, (3, 4, 5))
        >>> plot_convolutional_features(conv, colorspace=None, fnum=1, limit=2)

        >>> # 2d colored rgb-images
        >>> conv = torch.nn.Conv3d(3, 2, (6, 4, 5))
        >>> plot_convolutional_features(conv, colorspace='rgb', fnum=1, limit=2)

        >>> # 2d uncolored rgb-images
        >>> conv = torch.nn.Conv3d(3, 2, (6, 4, 5))
        >>> plot_convolutional_features(conv, colorspace=None, fnum=1, limit=2)

        >>> # 3d gray voxels
        >>> conv = torch.nn.Conv3d(1, 2, (6, 4, 5))
        >>> plot_convolutional_features(conv, colorspace=None, fnum=1, voxels=True,
        >>>                             limit=2)

        >>> # 3d color voxels
        >>> conv = torch.nn.Conv3d(3, 2, (6, 4, 5))
        >>> plot_convolutional_features(conv, colorspace='rgb', fnum=1,
        >>>                             voxels=True, alpha=1, limit=3)

        >>> # hack the nice resnet weights into 3d-space
        >>> # xdoctest: +REQUIRES(--network)
        >>> import torchvision
        >>> model = torchvision.models.resnet50(pretrained=True)
        >>> conv = torch.nn.Conv3d(3, 1, (7, 7, 7))
        >>> weights_tohack = model.conv1.weight[0:7].data.numpy()
        >>> # normalize each weight for nice colors, then place in the conv3d
        >>> for w in weights_tohack:
        ...     w[:] = (w - w.min()) / (w.max() - w.min())
        >>> weights_hacked = weights_tohack.transpose(1, 0, 2, 3)[None, :]
        >>> conv.weight.data[:] = torch.FloatTensor(weights_hacked)

        >>> plot_convolutional_features(conv, colorspace='rgb', fnum=1, voxels=True, alpha=.6)

        >>> plot_convolutional_features(conv, colorspace='rgb', fnum=2, voxels=False, alpha=.9)

    Example:
        >>> # xdoctest: +REQUIRES(--network)
        >>> # xdoctest: +REQUIRES(module:torch)
        >>> import torchvision
        >>> model = torchvision.models.resnet50(pretrained=True)
        >>> conv = model.conv1
        >>> plot_convolutional_features(conv, colorspace='rgb', fnum=None)

    """
    import kwplot
    kwplot.autompl()
    import matplotlib.pyplot as plt

    # get relavent data out of pytorch module
    weights = conv.weight.data.cpu().numpy()
    in_channels = conv.in_channels
    # out_channels = conv.out_channels
    kernel_size = conv.kernel_size
    conv_dim = len(kernel_size)

    # TODO: use make_conv_images in the 2d case here

    if voxels:
        # use up to 3 spatial dimensions
        spatial_axes = list(kernel_size[-3:])
    else:
        # use only 2 spatial dimensions
        spatial_axes = list(kernel_size[-2:])
    color_axes = []

    output_axis = 0

    # If there are 3 input channels, we can visualize features in a colorspace
    if colorspace is not None and in_channels == 3:
        # Move colorable channels to the end (handle 1, 2 and 3d convolution)
        axes = [0] + list(range(2, 2 + conv_dim)) + [1]
        weights = weights.transpose(*axes)
        color_axes = [in_channels]
        output_axis = 0
    else:
        pass

    # Normalize layer weights between 0 and 1
    if normaxis is None:
        minval = weights.min()
        maxval = weights.max()
    else:
        # if normaxis=0 norm over output channels
        minval = weights.min(axis=output_axis, keepdims=True)
        maxval = weights.max(axis=output_axis, keepdims=True)

    weights_norm = (weights - minval) / (maxval - minval)

    if _hack_2drows:
        # To agree with jason's visualization for a paper figure
        if not voxels:
            weights_norm = weights_norm.transpose(1, 0, 2, 3)

    # flatten everything but the spatial and requested color dims
    weights_flat = weights_norm.reshape(-1, *(spatial_axes + color_axes))

    num_plots = min(weights_flat.shape[0], limit)
    dim = int(np.ceil(np.sqrt(num_plots)))

    if voxels:
        from mpl_toolkits.mplot3d import Axes3D  # NOQA
        filled = np.ones(spatial_axes, dtype=np.bool)
        # np.ones(spatial_axes)
        # d, h, w = np.indices(spatial_axes)

    fnum = kwplot.ensure_fnum(fnum)
    fig = kwplot.figure(fnum=fnum)
    fig.clf()
    if nCols is None:
        nCols = dim
    pnum_ = kwplot.PlotNums(nCols=nCols, nSubplots=num_plots)

    def plot_kernel3d(i):
        img = weights_flat[i]

        # fig = kwplot.figure(fnum=fnum, pnum=pnum_[i])
        ax = fig.add_subplot(*pnum_[i], projection='3d')
        # ax = fig.gca(projection='3d')

        alpha_ = (filled * alpha)[..., None]
        colors = img

        if not color_axes:
            import kwimage
            # transform grays into colors
            grays = kwimage.atleast_nd(img, 4)
            colors = np.concatenate([grays, grays, grays], axis=3)

        if colorspace and color_axes:
            import kwimage
            # convert into RGB
            for d in range(len(colors)):
                colors[d] = kwimage.convert_colorspace(colors[d],
                                                       src_space=colorspace,
                                                       dst_space='rgb')
        facecolors = np.concatenate([colors, alpha_], axis=3)

        # shuffle dims so height is upwards and depth move away from us.
        dim_labels = ['d', 'h', 'w']
        axes = [2, 0, 1]

        dim_labels = list(ub.take(dim_labels, axes))
        facecolors = facecolors.transpose(*(axes + [3]))
        filled_ = filled.transpose(*axes)
        spatial_axes_ = list(ub.take(spatial_axes, axes))

        # ax.voxels(filled_, facecolors=facecolors, edgecolors=facecolors)
        if False:
            ax.voxels(filled_, facecolors=facecolors, edgecolors='k')
        else:
            # hack to show "occluded" voxels
            # stride = [1, 3, 1]
            stride = [2, 2, 2]
            slices = tuple(slice(None, None, s) for s in stride)
            spatial_axes2 = list(np.array(spatial_axes_) * stride)
            filled2 = np.zeros(spatial_axes2, dtype=np.bool)
            facecolors2 = np.empty(spatial_axes2 + [4], dtype=np.float32)
            filled2[slices] = filled_
            facecolors2[slices] = facecolors
            edgecolors2 = [0, 0, 0, alpha]
            # 'k'
            # edgecolors2 = facecolors2

            # Shrink the gaps, which let you see occluded voxels
            x, y, z = np.indices(np.array(filled2.shape) +
                                 1).astype(float) // 2
            x[0::2, :, :] += 0.05
            y[:, 0::2, :] += 0.05
            z[:, :, 0::2] += 0.05
            x[1::2, :, :] += 0.95
            y[:, 1::2, :] += 0.95
            z[:, :, 1::2] += 0.95

            ax.voxels(x,
                      y,
                      z,
                      filled2,
                      facecolors=facecolors2,
                      edgecolors=edgecolors2)

        for xyz, dlbl in zip(['x', 'y', 'z'], dim_labels):
            getattr(ax, 'set_' + xyz + 'label')(dlbl)

        for xyz in ['x', 'y', 'z']:
            getattr(ax, 'set_' + xyz + 'ticks')([])

        ax.set_aspect('equal')
        if not labels or i < num_plots - 1:
            # show axis only on the last plot
            ax.grid(False)
            plt.axis('off')

    for i in ub.ProgIter(range(num_plots),
                         desc='plot conv layer',
                         enabled=False):
        if voxels:
            plot_kernel3d(i)
        else:
            img = weights_flat[i]
            kwplot.imshow(img,
                          fnum=fnum,
                          pnum=pnum_[i],
                          interpolation='nearest',
                          colorspace=colorspace)
    return fig
Example #18
0
def warp_image_test(image, transform, dsize=None):
    """

    from kwimage.transform import Affine
    import kwimage
    image = kwimage.grab_test_image('checkerboard', dsize=(2048, 2048)).astype(np.float32)
    image = kwimage.grab_test_image('astro', dsize=(2048, 2048))
    transform = Affine.random() @ Affine.scale(0.01)

    """
    from kwimage.transform import Affine
    import kwimage
    import numpy as np
    import ubelt as ub

    # Choose a random affine transform that probably has a small scale
    # transform = Affine.random() @ Affine.scale((0.3, 2))
    # transform = Affine.scale((0.1, 1.2))
    # transform = Affine.scale(0.05)
    transform = Affine.random() @ Affine.scale(0.01)
    # transform = Affine.random()

    image = kwimage.grab_test_image('astro')
    image = kwimage.grab_test_image('checkerboard')

    image = kwimage.ensure_float01(image)

    from kwimage import im_cv2
    import kwarray
    import cv2
    transform = Affine.coerce(transform)

    if 1 or dsize is None:
        h, w = image.shape[0:2]

        boxes = kwimage.Boxes(np.array([[0, 0, w, h]]), 'xywh')
        poly = boxes.to_polygons()[0]
        warped_poly = poly.warp(transform.matrix)
        warped_box = warped_poly.to_boxes().to_ltrb().quantize()
        dsize = tuple(map(int, warped_box.data[0, 2:4]))

    import timerit
    ti = timerit.Timerit(10, bestof=3, verbose=2)

    def _full_gauss_kernel(k0, sigma0, scale):
        num_downscales = np.log2(1 / scale)
        if num_downscales < 0:
            return 1, 0

        # Define b0 = kernel size for one downsample operation
        b0 = 5
        # Define sigma0 = sigma for one downsample operation
        sigma0 = 1

        # The kernel size and sigma doubles for each 2x downsample
        k = int(np.ceil(b0 * (2 ** (num_downscales - 1))))
        sigma = sigma0 * (2 ** (num_downscales - 1))

        if k % 2 == 0:
            k += 1
        return k, sigma

    def pyrDownK(a, k=1):
        assert k >= 0
        for _ in range(k):
            a = cv2.pyrDown(a)
        return a

    for timer in ti.reset('naive'):
        with timer:
            interpolation = 'nearest'
            flags = im_cv2._coerce_interpolation(interpolation)
            final_v5 = cv2.warpAffine(image, transform.matrix[0:2], dsize=dsize, flags=flags)

    # --------------------
    # METHOD 1
    #
    for timer in ti.reset('resize+warp'):
        with timer:
            params = transform.decompose()

            sx, sy = params['scale']
            noscale_params = ub.dict_diff(params, {'scale'})
            noscale_warp = Affine.affine(**noscale_params)

            h, w = image.shape[0:2]
            resize_dsize = (int(np.ceil(sx * w)), int(np.ceil(sy * h)))

            downsampled = cv2.resize(image, dsize=resize_dsize, fx=sx, fy=sy,
                                     interpolation=cv2.INTER_AREA)

            interpolation = 'linear'
            flags = im_cv2._coerce_interpolation(interpolation)
            final_v1 = cv2.warpAffine(downsampled, noscale_warp.matrix[0:2], dsize=dsize, flags=flags)

    # --------------------
    # METHOD 2
    for timer in ti.reset('fullblur+warp'):
        with timer:
            k_x, sigma_x = _full_gauss_kernel(k0=5, sigma0=1, scale=sx)
            k_y, sigma_y = _full_gauss_kernel(k0=5, sigma0=1, scale=sy)
            image_ = image.copy()
            image_ = cv2.GaussianBlur(image_, (k_x, k_y), sigma_x, sigma_y)
            image_ = kwarray.atleast_nd(image_, 3)
            # image_ = image_.clip(0, 1)

            interpolation = 'linear'
            flags = im_cv2._coerce_interpolation(interpolation)
            final_v2 = cv2.warpAffine(image_, transform.matrix[0:2], dsize=dsize, flags=flags)

    # --------------------
    # METHOD 3

    for timer in ti.reset('pyrDown+blur+warp'):
        with timer:
            temp = image.copy()
            params = transform.decompose()
            sx, sy = params['scale']

            biggest_scale = max(sx, sy)
            # The -2 allows the gaussian to be a little bigger. This
            # seems to help with border effects at only a small runtime cost
            num_downscales = max(int(np.log2(1 / biggest_scale)) - 2, 0)
            pyr_scale = 1 / (2 ** num_downscales)

            # Does the gaussian downsampling
            temp = pyrDownK(image, num_downscales)

            rest_sx = sx / pyr_scale
            rest_sy = sy / pyr_scale

            partial_scale = Affine.scale((rest_sx, rest_sy))
            rest_warp = noscale_warp @ partial_scale

            k_x, sigma_x = _full_gauss_kernel(k0=5, sigma0=1, scale=rest_sx)
            k_y, sigma_y = _full_gauss_kernel(k0=5, sigma0=1, scale=rest_sy)
            temp = cv2.GaussianBlur(temp, (k_x, k_y), sigma_x, sigma_y)
            temp = kwarray.atleast_nd(temp, 3)

            interpolation = 'cubic'
            flags = im_cv2._coerce_interpolation(interpolation)
            final_v3 = cv2.warpAffine(temp, rest_warp.matrix[0:2], dsize=dsize,
                                      flags=flags)

    # --------------------
    # METHOD 4 - dont do the final blur

    for timer in ti.reset('pyrDown+warp'):
        with timer:
            temp = image.copy()
            params = transform.decompose()
            sx, sy = params['scale']

            biggest_scale = max(sx, sy)
            num_downscales = max(int(np.log2(1 / biggest_scale)), 0)
            pyr_scale = 1 / (2 ** num_downscales)

            # Does the gaussian downsampling
            temp = pyrDownK(image, num_downscales)

            rest_sx = sx / pyr_scale
            rest_sy = sy / pyr_scale

            partial_scale = Affine.scale((rest_sx, rest_sy))
            rest_warp = noscale_warp @ partial_scale

            interpolation = 'linear'
            flags = im_cv2._coerce_interpolation(interpolation)
            final_v4 = cv2.warpAffine(temp, rest_warp.matrix[0:2], dsize=dsize, flags=flags)

    if 1:

        def get_title(key):
            from ubelt.timerit import _choose_unit
            value = ti.measures['mean'][key]
            suffix, mag = _choose_unit(value)
            unit_val = value / mag

            return key + ' ' + ub.repr2(unit_val, precision=2) + ' ' + suffix

        final_v2 = final_v2.clip(0, 1)
        final_v1 = final_v1.clip(0, 1)
        final_v3 = final_v3.clip(0, 1)
        final_v4 = final_v4.clip(0, 1)
        final_v5 = final_v5.clip(0, 1)
        import kwplot
        kwplot.autompl()
        kwplot.imshow(final_v5, pnum=(1, 5, 1), title=get_title('naive'))
        kwplot.imshow(final_v2, pnum=(1, 5, 2), title=get_title('fullblur+warp'))
        kwplot.imshow(final_v1, pnum=(1, 5, 3), title=get_title('resize+warp'))
        kwplot.imshow(final_v3, pnum=(1, 5, 4), title=get_title('pyrDown+blur+warp'))
        kwplot.imshow(final_v4, pnum=(1, 5, 5), title=get_title('pyrDown+warp'))
Example #19
0
def convert_voc_to_coco(dpath=None):
    # TODO: convert segmentation information

    classes = [
        'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat',
        'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', 'person',
        'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor'
    ]
    devkit_dpath = ensure_voc_data(dpath=dpath)
    root = out_dpath = dirname(devkit_dpath)

    dsets = []

    d = _convert_voc_split(devkit_dpath, classes, 'train', 2012, root)
    dsets.append(d)

    d = _convert_voc_split(devkit_dpath, classes, 'train', 2007, root)
    dsets.append(d)

    d = _convert_voc_split(devkit_dpath, classes, 'val', 2012, root)
    dsets.append(d)

    d = _convert_voc_split(devkit_dpath, classes, 'val', 2007, root)
    dsets.append(d)

    d = _convert_voc_split(devkit_dpath, classes, 'test', 2007, root)
    dsets.append(d)

    if 0:
        import xdev
        xdev.view_directory(out_dpath)

    def reroot_imgs(dset, root):
        for img in dset.imgs.values():
            img['file_name'] = relpath(img['file_name'], root)

    import kwcoco
    t1 = kwcoco.CocoDataset(join(out_dpath, 'voc-train-2007.mscoco.json'))
    t2 = kwcoco.CocoDataset(join(out_dpath, 'voc-train-2012.mscoco.json'))

    v1 = kwcoco.CocoDataset(join(out_dpath, 'voc-val-2007.mscoco.json'))
    v2 = kwcoco.CocoDataset(join(out_dpath, 'voc-val-2012.mscoco.json'))

    t = kwcoco.CocoDataset.union(t1, t2)
    t.tag = 'voc-train'
    t.fpath = join(root, t.tag + '.mscoco.json')

    v = kwcoco.CocoDataset.union(v1, v2)
    v.tag = 'voc-val'
    v.fpath = join(root, v.tag + '.mscoco.json')

    tv = kwcoco.CocoDataset.union(t1, t2, v1, v2)
    tv.tag = 'voc-trainval'
    tv.fpath = join(root, tv.tag + '.mscoco.json')

    print('t.fpath = {!r}'.format(t.fpath))
    t.dump(t.fpath, newlines=True)
    print('v.fpath = {!r}'.format(v.fpath))
    v.dump(v.fpath, newlines=True)
    print('tv.fpath = {!r}'.format(tv.fpath))
    tv.dump(tv.fpath, newlines=True)
    if 0:
        tv.img_root = root
        import kwplot
        kwplot.autompl()
        tv.show_image(2)

    dsets = {
        'train': t,
        'vali': v,
        'trainval': tv,
    }
    return dsets
Example #20
0
def demo_flip_orientations():
    import ubelt as ub
    from kwcoco.demo.toypatterns import Rasters
    img = Rasters.eff()[0]

    import kwplot
    kwplot.autompl()
    kwplot.imshow(img)

    flips = [
        [],
        [0],
        [1],
        [0, 1],
    ]
    import numpy as np

    toshow = []

    for axis in flips:
        img_ = img
        img_ = np.flip(img_, axis=axis)
        for k in [0, 1, 2, 3]:
            img_ = np.rot90(img_, k=k)
            row = {'img': img_, 'label': f'rot(flip({axis}), k={k})'}
            row['params'] = [f'axis={axis}, k={k}']
            toshow.append(row)

    # for k in [0, 1, 2, 3]:
    #     img_ = img
    #     img_ = np.rot90(img_, k=k)
    #     for axis in flips:
    #         img_ = np.flip(img_, axis=axis)
    #         row = {'img': img_, 'label': f'flip(rot(k={k}), {axis})'}
    #         row['params'] = [f'k={k}, axis={axis}']
    #         toshow.append(row)

    for row in toshow:
        row['hash'] = ub.hash_data(row['img'])

    pnum_ = kwplot.PlotNums(nSubplots=len(toshow))
    groups = ub.group_items(toshow, lambda x: x['hash'])
    # Only 8 possibiliteis
    for k, group in groups.items():
        print('\n\nk = {!r}'.format(k))
        for g in group:
            print(g['params'])
            row = g
            kwplot.imshow(row['img'], pnum=pnum_(), fnum=1, title=row['label'])

    unique_fliprots = [
        {
            'k': 0,
            'axis': []
        },
        {
            'k': 1,
            'axis': []
        },
        {
            'k': 2,
            'axis': []
        },
        {
            'k': 3,
            'axis': []
        },
        {
            'k': 0,
            'axis': [0]
        },
        {
            'k': 1,
            'axis': [0]
        },
        {
            'k': 2,
            'axis': [0]
        },
        {
            'k': 3,
            'axis': [0]
        },
    ]
    s = []
    for params in unique_fliprots:
        k = params['k']
        axis = params['axis']
        img_ = np.rot90(np.flip(img, axis=axis), k=k)
        s.append(ub.hash_data(img_))

    assert len(set(s)) == len(s)
Example #21
0
def benchmark_hash_data():
    """
    CommandLine:
        python ~/code/ubelt/dev/bench_hash.py --convert=True --show
        python ~/code/ubelt/dev/bench_hash.py --convert=False --show
    """
    import ubelt as ub
    #ITEM = 'JUST A STRING' * 100
    ITEM = [0, 1, 'a', 'b', ['JUST A STRING'] * 4]
    HASHERS = ['sha1', 'sha512', 'xxh32', 'xxh64', 'blake3']
    scales = list(range(5, 13))
    results = ub.AutoDict()
    # Use json is faster or at least as fast it most cases
    # xxhash is also significantly faster than sha512
    convert = ub.argval('--convert', default='True').lower() == 'True'
    print('convert = {!r}'.format(convert))
    ti = ub.Timerit(9, bestof=3, verbose=1, unit='ms')
    for s in ub.ProgIter(scales, desc='benchmark', verbose=3):
        N = 2**s
        print(' --- s={s}, N={N} --- '.format(s=s, N=N))
        data = [ITEM] * N
        for hasher in HASHERS:
            for timer in ti.reset(hasher):
                ub.hash_data(data, hasher=hasher, convert=convert)
            results[hasher].update({N: ti.mean()})
        col = {h: results[h][N] for h in HASHERS}
        sortx = ub.argsort(col)
        ranking = ub.dict_subset(col, sortx)
        print('walltime: ' + ub.repr2(ranking, precision=9, nl=0))
        best = next(iter(ranking))
        #pairs = list(ub.iter_window( 2))
        pairs = [(k, best) for k in ranking]
        ratios = [ranking[k1] / ranking[k2] for k1, k2 in pairs]
        nicekeys = ['{}/{}'.format(k1, k2) for k1, k2 in pairs]
        relratios = ub.odict(zip(nicekeys, ratios))
        print('speedup: ' + ub.repr2(relratios, precision=4, nl=0))
    # xdoc +REQUIRES(--show)
    # import pytest
    # pytest.skip()
    import pandas as pd
    df = pd.DataFrame.from_dict(results)
    df.columns.name = 'hasher'
    df.index.name = 'N'
    ratios = df.copy().drop(columns=df.columns)
    for k1, k2 in [('sha512', 'xxh32'), ('sha1', 'xxh32'), ('xxh64', 'xxh32')]:
        ratios['{}/{}'.format(k1, k2)] = df[k1] / df[k2]
    print()
    print('Seconds per iteration')
    print(df.to_string(float_format='%.9f'))
    print()
    print('Ratios of seconds')
    print(ratios.to_string(float_format='%.2f'))
    print()
    print('Average Ratio (over all N)')
    print('convert = {!r}'.format(convert))
    print(ratios.mean().sort_values())
    if ub.argflag('--show'):
        import kwplot
        kwplot.autompl()
        xdata = sorted(ub.peek(results.values()).keys())
        ydata = ub.map_vals(lambda d: [d[x] for x in xdata], results)
        kwplot.multi_plot(xdata,
                          ydata,
                          xlabel='N',
                          ylabel='seconds',
                          title='convert = {}'.format(convert))
        kwplot.show_if_requested()
Example #22
0
def main():
    """
    CommandLine:
        python examples/ggr_matching.py --help

        # Test Runs:
            # use a very small input dimension to test things out
            python examples/ggr_matching.py --dbname PZ_MTEST --workers=0 --dim=32 --xpu=cpu

            # test that GPU works
            python examples/ggr_matching.py --dbname PZ_MTEST --workers=0 --dim=32 --xpu=gpu0

            # test that running at a large size works
            python examples/ggr_matching.py --dbname PZ_MTEST --workers=6 --dim=416 --xpu=gpu0

        # Real Run:
        python examples/ggr_matching.py --dbname GZ_Master1 --workers=6 --dim=512 --xpu=gpu0 --batch_size=10 --lr=0.00001 --nice=gzrun
        python examples/ggr_matching.py --dbname GZ_Master1 --workers=6 --dim=512 --xpu=gpu0 --batch_size=6 --lr=0.001 --nice=gzrun

    Notes:
        # Some database names
        PZ_Master1
        GZ_Master1
        RotanTurtles
        humpbacks_fb
    """
    print('PARSE')
    import xinspect
    parser = xinspect.auto_argparse(setup_harn)

    parser.add_argument('--lrtest',
                        action='store_true',
                        help='Run Leslie Smith\'s LR range test')
    parser.add_argument('--interact',
                        action='store_true',
                        help='Interact with the range test')
    args, unknown = parser.parse_known_args()
    ns = args.__dict__.copy()

    args.interact |= args.lr == 'interact'
    args.lrtest |= (args.lr == 'test' or args.interact)

    if args.lrtest or args.interact:
        # TODO:
        # - [ ] tweak setup_harn so running the lr-range-test isn't awkward
        from netharn.prefit.lr_tests import lr_range_test
        ns['lr'] = 1e-99

        if args.interact:
            import kwplot
            kwplot.autompl()
            import matplotlib.pyplot as plt

        harn = setup_harn(**ns)
        harn.initialize()
        # TODO: We could cache the result based on the netharn
        # hyperparameters. This would let us integrate with the
        # default fit harness.
        result = lr_range_test(harn)
        print('result.recommended_lr = {!r}'.format(result.recommended_lr))

        if args.interact:
            result.draw()
            plt.show()

        # Seed value with test result
        ns['lr'] = result.recommended_lr
        harn = setup_harn(**ns).initialize()
    else:
        harn = setup_harn(**ns)

    print('ABOUT TO INIT')
    harn.initialize()
    print('ABOUT TO RUN')
    harn.run()