Esempio n. 1
0
def show_function_usage(fname, funcname_list, dpath_list):
    # Check to see for function usage
    funcname_list = [r'\b%s\b' % (funcname.strip(),) for funcname in funcname_list if len(funcname) > 0]
    flagged_funcnames = []
    for funcname in funcname_list:
        found_filestr_list, found_lines_list, found_lxs_list = ut.grep([funcname], dpath_list=dpath_list)
        total = 0
        for lines in found_lines_list:
            total += len(lines)
        funcname_ = funcname.replace('\\b', '')
        print(funcname_ + ' ' + str(total))
        if total == 1:
            flagged_funcnames.append(funcname_)
        # See where external usage is
        isexternal_list = [fname == fname_ for fname_ in found_filestr_list]
        external_filestr_list = ut.filter_items(found_filestr_list, isexternal_list)
        external_lines_list = ut.filter_items(found_lines_list, isexternal_list)
        #external_lxs_list = ut.filter_items(found_lxs_list, isexternal_list)
        if len(external_filestr_list) == 0:
            print(' no external usage')
        else:
            for filename, lines in zip(external_filestr_list, external_lines_list):
                print(' * filename=%r' % (filename,))
                print(ut.list_str(lines))
            #print(ut.list_str(list(zip(external_filestr_list, external_lines_list))))
    print('----------')
    print('flagged:')
    print('\n'.join(flagged_funcnames))
Esempio n. 2
0
def _where_ranks_lt(orgres, num):
    """ get new orgres where all the ranks are less or equal to """
    # Remove None ranks
    isvalid = [rank is not None and rank <= num and rank != -1
                for rank in orgres.ranks]
    orgres2 = OrganizedResult(orgres.orgtype + ' < %d' % num)
    orgres2.qaids  = utool.filter_items(orgres.qaids, isvalid)
    orgres2.aids   = utool.filter_items(orgres.aids, isvalid)
    orgres2.scores = utool.filter_items(orgres.scores, isvalid)
    orgres2.ranks  = utool.filter_items(orgres.ranks, isvalid)
    return orgres2
Esempio n. 3
0
def fix_nulled_yaws(ibs):
    aid_list = ibs.get_valid_aids()
    yaw_list = ibs.get_annot_yaws(aid_list)
    valid_list = [yaw == 0.0 for yaw in yaw_list]
    dirty_aid_list = ut.filter_items(aid_list, valid_list)
    print("[duct_tape] Nulling %d annotation yaws" % len(dirty_aid_list))
    ibs.set_annot_yaws(dirty_aid_list, [None] * len(dirty_aid_list))
Esempio n. 4
0
def parse_and_update_image_exif_orientations(ibs, verbose=False):
    from PIL import Image  # NOQA
    from ibeis.algo.preproc.preproc_image import parse_exif
    from os.path import exists

    def _parse_orient(gpath):
        if verbose:
            print('[db_update (1.5.2)]     Parsing: %r' % (gpath, ))
        pil_img = Image.open(gpath, 'r')  # NOQA
        time, lat, lon, orient = parse_exif(pil_img)  # Read exif tags
        return orient

    # Get images without orientations and add to the database
    gid_list_all = ibs.get_valid_gids()
    gpath_list = ibs.get_image_paths(gid_list_all)
    valid_list = [exists(gpath) for gpath in gpath_list]
    gid_list = ut.filter_items(gid_list_all, valid_list)

    orient_list = ibs.get_image_orientation(gid_list)
    zipped = zip(gid_list, orient_list)
    gid_list_ = [gid for gid, orient in zipped if orient in [0, None]]
    args = (len(gid_list_), len(gid_list_all), valid_list.count(False))
    print(
        '[db_update (1.5.2)] Parsing Exif orientations for %d / %d images (skipping %d)'
        % args)
    gpath_list_ = ibs.get_image_paths(gid_list_)
    orient_list_ = [_parse_orient(gpath) for gpath in gpath_list_]
    ibs.set_image_orientation(gid_list_, orient_list_)
Esempio n. 5
0
def TEST_DETECT(ibs):
    # Create a HotSpotter API (hs) and GUI backend (back)
    print('get_valid_ANNOTATIONS')
    gid_list = ibs.get_valid_gids()[0:1]
    if SPECIAL:
        gid_list = utool.safe_slice(ibs.get_valid_gids(), 3)
    #gid_list.extend(ibs.add_images([utool.unixpath('~/Dropbox/Chuck/detect_testimg/testgrevy.jpg')]))
    species = 'zebra_grevys'
    detectkw = {
        'quick': True,
        'save_detection_images': SPECIAL,
        'save_scales': SPECIAL,
    }
    detect_gen = randomforest.generate_detections(ibs, gid_list, species, **detectkw)
    gid_list2 = []
    bbox_list2 = []
    for gid, bboxes, confidences, img_conf in detect_gen:
        for bbox in bboxes:
            gid_list2.append(gid)
            bbox_list2.append(bbox)
            # not using confidence nor img_conf here

    if SPECIAL:
        from plottool import viz_image2, fig_presenter
        #from plottool import draw_func2 as df2
        for gid in gid_list:
            isthisgid = [gid == gid2 for gid2 in gid_list2]
            bbox_list = utool.filter_items(bbox_list2, isthisgid)
            img = ibs.get_images(gid)
            fig = viz_image2.show_image(img, bbox_list=bbox_list)
        fig_presenter.present()
    #fig_presenter.all_figures_bring_to_front()
    #ibs.detect_random_forest(gid_list, 'zebra_grevys')
    return locals()
Esempio n. 6
0
def fix_nulled_yaws(ibs):
    aid_list = ibs.get_valid_aids()
    yaw_list = ibs.get_annot_yaws(aid_list)
    valid_list = [yaw == 0.0 for yaw in yaw_list]
    dirty_aid_list = ut.filter_items(aid_list, valid_list)
    print("[duct_tape] Nulling %d annotation yaws" % len(dirty_aid_list))
    ibs.set_annot_yaws(dirty_aid_list, [None] * len(dirty_aid_list))
Esempio n. 7
0
 def wrp_getter_cacher(self, rowid_list, *args, **kwargs):
     # the class must have a table_cache property
     cache_ = self.table_cache[tblname][colname]
     # Get cached values for each rowid
     vals_list = [cache_.get(rowid, None) for rowid in rowid_list]
     # Compute any cache misses
     miss_list = [val is None for val in vals_list]
     #DEBUG_CACHE_HITS = False
     #if DEBUG_CACHE_HITS:
     #    num_miss  = sum(miss_list)
     #    num_total = len(rowid_list)
     #    num_hit   = num_total - num_miss
     #    print('\n[get] %s.%s %d / %d cache hits' % (tblname, colname, num_hit, num_total))
     if not any(miss_list):
         return vals_list
     miss_rowid_list = utool.filter_items(rowid_list, miss_list)
     miss_vals = getter_func(self, miss_rowid_list, *args, **kwargs)
     # Write the misses to the cache
     miss_iter_ = iter(enumerate(iter(miss_vals)))
     for index, flag in enumerate(miss_list):
         if flag:
             miss_index, miss_val = miss_iter_.next()
             rowid = rowid_list[index]
             vals_list[index] = miss_val  # Output write
             cache_[rowid] = miss_val  # Cache write
     return vals_list
Esempio n. 8
0
def parse_and_update_image_exif_orientations(ibs, verbose=False):
    from PIL import Image  # NOQA
    from ibeis.algo.preproc.preproc_image import parse_exif
    from os.path import exists

    def _parse_orient(gpath):
        if verbose:
            print('[db_update (1.5.2)]     Parsing: %r' % (gpath, ))
        pil_img = Image.open(gpath, 'r')  # NOQA
        time, lat, lon, orient = parse_exif(pil_img)  # Read exif tags
        return orient

    # Get images without orientations and add to the database
    gid_list_all = ibs.get_valid_gids()
    gpath_list = ibs.get_image_paths(gid_list_all)
    valid_list = [ exists(gpath) for gpath in gpath_list ]
    gid_list = ut.filter_items(gid_list_all, valid_list)

    orient_list = ibs.get_image_orientation(gid_list)
    zipped = zip(gid_list, orient_list)
    gid_list_ = [ gid for gid, orient in zipped if orient in [0, None] ]
    args = (len(gid_list_), len(gid_list_all), valid_list.count(False))
    print('[db_update (1.5.2)] Parsing Exif orientations for %d / %d images (skipping %d)' % args)
    gpath_list_ = ibs.get_image_paths(gid_list_)
    orient_list_ = [ _parse_orient(gpath) for gpath in gpath_list_ ]
    ibs.set_image_orientation(gid_list_, orient_list_)
Esempio n. 9
0
def get_test_qaids(ibs):
    """ Gets test annotation_rowids based on command line arguments """
    #print('[main_helpers]')
    test_qaids = []
    valid_aids = ibs.get_valid_aids()
    printDBG('1. valid_aids = %r' % valid_aids[0:5])
    #print(utool.dict_str(vars(params.args)))

    if params.args.qaid is not None:
        printDBG('Testing qaid=%r' % params.args.qaid)
        test_qaids.extend(params.args.qaid)

    if params.args.all_cases:
        printDBG('Testing all %d cases' % (len(valid_aids),))
        printDBG('1. test_qaids = %r' % test_qaids[0:5])
        test_qaids.extend(valid_aids)
        printDBG('2. test_qaids = %r' % test_qaids[0:5])
    else:
        is_hard_list = ibsfuncs.get_annot_is_hard(ibs, valid_aids)
        hard_aids = utool.filter_items(valid_aids, is_hard_list)
        printDBG('Testing %d known hard cases' % len(hard_aids))
        test_qaids.extend(hard_aids)

    if params.args.all_gt_cases:
        has_gt_list = ibs.get_annot_has_groundtruth(valid_aids)
        hasgt_aids = utool.filter_items(valid_aids, has_gt_list)
        print('Testing all %d ground-truthed cases' % len(hasgt_aids))
        test_qaids.extend(hasgt_aids)

    # Sample a large pool of query indexes
    # Filter only the ones you want from the large pool
    if params.args.index is not None:
        indexes = utool.ensure_iterable(params.args.index)
        #printDBG('Chosen indexes=%r' % (indexes,))
        #printDBG('test_qaids = %r' % test_qaids[0:5])
        _test_qaids = [test_qaids[xx] for xx in indexes]
        test_qaids = _test_qaids
        #printDBG('test_qaids = %r' % test_qaids)
    elif len(test_qaids) == 0 and len(valid_aids) > 0:
        #printDBG('no hard or gt aids. Defaulting to the first ANNOTATION')
        test_qaids = valid_aids[0:1]

    #print('test_qaids = %r' % test_qaids)
    test_qaids = utool.unique_keep_order2(test_qaids)
    return test_qaids
Esempio n. 10
0
    def postingest_tesdb1_func(ibs):
        print('postingest_tesdb1_func')
        # Adjust data as we see fit
        import numpy as np
        gid_list = np.array(ibs.get_valid_gids())
        unixtimes_even = (gid_list[0::2] + 100).tolist()
        unixtimes_odd  = (gid_list[1::2] + 9001).tolist()
        unixtime_list = unixtimes_even + unixtimes_odd
        ibs.set_image_unixtime(gid_list, unixtime_list)
        # Unname first aid in every name
        aid_list = ibs.get_valid_aids()
        nid_list = ibs.get_annot_nids(aid_list)
        nid_list = [ (nid if nid > 0 else None) for nid in nid_list]
        unique_flag = utool.flag_unique_items(nid_list)
        unique_nids = utool.filter_items(nid_list, unique_flag)
        none_nids = [ nid is not None for nid in nid_list]
        flagged_nids = [nid for nid in unique_nids if nid_list.count(nid) > 1]
        plural_flag = [nid in flagged_nids for nid in nid_list]
        flag_list = map(all, izip(plural_flag, unique_flag, none_nids))
        flagged_aids = utool.filter_items(aid_list, flag_list)
        if utool.VERYVERBOSE:
            def print2(*args):
                print('[post_testdb1] ' + ', '.join(args))
            print2('aid_list=%r' % aid_list)
            print2('nid_list=%r' % nid_list)
            print2('unique_flag=%r' % unique_flag)
            print2('plural_flag=%r' % plural_flag)
            print2('unique_nids=%r' % unique_nids)
            print2('none_nids=%r' % none_nids)
            print2('flag_list=%r' % flag_list)
            print2('flagged_nids=%r' % flagged_nids)
            print2('flagged_aids=%r' % flagged_aids)
            # print2('new_nids=%r' % new_nids)
        # Unname, some annotations for testing
        delete_aids = utool.filter_items(aid_list, flag_list)
        ibs.delete_annot_nids(delete_aids)
        # Add all annotations with names as exemplars
        from ibeis.control.IBEISControl import IBEISController
        assert isinstance(ibs, IBEISController)
        unflagged_aids = utool.get_dirty_items(aid_list, flag_list)
        exemplar_flags = [True] * len(unflagged_aids)
        ibs.set_annot_exemplar_flag(unflagged_aids, exemplar_flags)

        return None
Esempio n. 11
0
def remove_existing_fpaths(fpath_list, verbose=VERBOSE, quiet=QUIET,
                           strict=False, print_caller=PRINT_CALLER, lbl='files'):
    """ checks existance before removing. then tries to remove exisint paths """
    import utool as ut
    if print_caller:
        print(util_dbg.get_caller_name(range(1, 4)) + ' called remove_existing_fpaths')
    fpath_list_ = ut.filter_Nones(fpath_list)
    exists_list = list(map(exists, fpath_list_))
    if verbose:
        nTotal = len(fpath_list)
        nValid = len(fpath_list_)
        nExist = sum(exists_list)
        print('[util_path.remove_existing_fpaths] requesting delete of %d %s' % (nTotal, lbl))
        if nValid != nTotal:
            print('[util_path.remove_existing_fpaths] trying to delete %d/%d non None %s ' % (nValid, nTotal, lbl))
        print('[util_path.remove_existing_fpaths] %d/%d exist and need to be deleted' % (nExist, nValid))
    existing_fpath_list = ut.filter_items(fpath_list_, exists_list)
    return remove_fpaths(existing_fpath_list, verbose=verbose, quiet=quiet,
                            strict=strict, print_caller=False, lbl=lbl)
Esempio n. 12
0
 def add_cleanly(db, tblname, colnames, params_iter, get_rowid_from_superkey, superkey_paramx=(0,)):
     """ ADDER Extra input:
         the first item of params_iter must be a superkey (like a uuid), """
     # ADD_CLEANLY_1: PREPROCESS INPUT
     params_list = list(params_iter)  # eagerly evaluate for superkeys
     # Extract superkeys from the params list (requires eager eval)
     superkey_lists = [[None if params is None else params[x]
                        for params in params_list]
                       for x in superkey_paramx]
     # ADD_CLEANLY_2: PREFORM INPUT CHECKS
     # check which parameters are valid
     isvalid_list = [params is not None for params in params_list]
     # Check for duplicate inputs
     isunique_list = utool.flag_unique_items(list(zip(*superkey_lists)))
     # Check to see if this already exists in the database
     rowid_list_ = get_rowid_from_superkey(*superkey_lists)
     isnew_list  = [rowid is None for rowid in rowid_list_]
     if VERBOSE and not all(isunique_list):
         print('[WARNING]: duplicate inputs to db.add_cleanly')
     # Flag each item that needs to added to the database
     isdirty_list = list(map(all, zip(isvalid_list, isunique_list, isnew_list)))
     # ADD_CLEANLY_3.1: EXIT IF CLEAN
     if not any(isdirty_list):
         return rowid_list_  # There is nothing to add. Return the rowids
     # ADD_CLEANLY_3.2: PERFORM DIRTY ADDITIONS
     dirty_params = utool.filter_items(params_list, isdirty_list)
     if utool.VERBOSE:
         print('[sql] adding %r/%r new %s' % (len(dirty_params), len(params_list), tblname))
     # Add any unadded parameters to the database
     try:
         db._add(tblname, colnames, dirty_params)
     except Exception as ex:
         utool.printex(ex, key_list=['isdirty_list', 'superkey_lists', 'rowid_list_'])
         raise
     # TODO: We should only have to preform a subset of adds here
     # (at the positions where rowid_list was None in the getter check)
     rowid_list = get_rowid_from_superkey(*superkey_lists)
     # ADD_CLEANLY_4: SANITY CHECK AND RETURN
     assert len(rowid_list) == len(params_list), 'failed sanity check'
     return rowid_list
Esempio n. 13
0
    def __init__(split_index, ibs, daid_list, num_forests=8):
        print('[nnsindex] make NNSplitIndex over %d annots' % (len(daid_list),))
        aid_list = daid_list
        nid_list = ibs.get_annot_nids(aid_list)
        #flag_list = ibs.get_annot_exemplar_flag(aid_list)
        nid2_aids = utool.group_items(aid_list, nid_list)
        key_list = nid2_aids.keys()
        aids_list = nid2_aids.values()
        isunknown_list = ibs.is_nid_unknown(key_list)

        known_aids  = utool.filterfalse_items(aids_list, isunknown_list)
        uknown_aids = utool.flatten(utool.filter_items(aids_list, isunknown_list))

        num_forests_ = min(max(map(len, aids_list)), num_forests)

        # Put one name per forest
        forest_aids, overflow_aids = utool.sample_zip(known_aids, num_forests_,
                                                      allow_overflow=True,
                                                      per_bin=1)

        forest_indexes = []
        extra_indexes = []
        for tx, aids in enumerate(forest_aids):
            print('[nnsindex] building forest %d/%d with %d aids' % (tx + 1, num_forests_, len(aids)))
            if len(aids) > 0:
                nn_index = NNIndex(ibs, aids)
                forest_indexes.append(nn_index)

        if len(overflow_aids) > 0:
            print('[nnsindex] building overflow forest')
            overflow_index = NNIndex(ibs, overflow_aids)
            extra_indexes.append(overflow_index)
        if len(uknown_aids) > 0:
            print('[nnsindex] building unknown forest')
            unknown_index = NNIndex(ibs, uknown_aids)
            extra_indexes.append(unknown_index)
        #print('[nnsindex] building normalizer forest')  # TODO

        split_index.forest_indexes = forest_indexes
        split_index.extra_indexes = extra_indexes
def fix_annotation_orientation(ibs, min_percentage=0.95):
    """
    Fixes the annotations that are outside the bounds of the image due to a
    changed image orientation flag in the database

    CommandLine:
        python -m ibeis.scripts.fix_annotation_orientation_issue fix_annotation_orientation

    Example:
        >>> # ENABLE_DOCTEST
        >>> import ibeis
        >>> from ibeis.scripts.fix_annotation_orientation_issue import *  # NOQA
        >>> ibs = ibeis.opendb()
        >>> unfixable_gid_list = fix_annotation_orientation(ibs)
        >>> assert len(unfixable_gid_list) == 0
    """
    from vtool import exif

    def bbox_overlap(bbox1, bbox2):
        ax1 = bbox1[0]
        ay1 = bbox1[1]
        ax2 = bbox1[0] + bbox1[2]
        ay2 = bbox1[1] + bbox1[3]
        bx1 = bbox2[0]
        by1 = bbox2[1]
        bx2 = bbox2[0] + bbox2[2]
        by2 = bbox2[1] + bbox2[3]
        overlap_x = max(0, min(ax2, bx2) - max(ax1, bx1))
        overlap_y = max(0, min(ay2, by2) - max(ay1, by1))
        return overlap_x * overlap_y

    orient_dict = exif.ORIENTATION_DICT_INVERSE
    good_orient_list = [
        exif.ORIENTATION_UNDEFINED,
        exif.ORIENTATION_000,
    ]
    good_orient_key_list = [
        orient_dict.get(good_orient) for good_orient in good_orient_list
    ]
    assert None not in good_orient_key_list

    gid_list = ibs.get_valid_gids()
    orient_list = ibs.get_image_orientation(gid_list)
    flag_list = [orient not in good_orient_key_list for orient in orient_list]

    # Filter based on based gids
    unfixable_gid_list = []
    gid_list = ut.filter_items(gid_list, flag_list)
    if len(gid_list) > 0:
        args = (len(gid_list), )
        print('Found %d images with non-standard orientations' % args)
        aids_list = ibs.get_image_aids(gid_list,
                                       is_staged=None,
                                       __check_staged__=False)
        size_list = ibs.get_image_sizes(gid_list)
        invalid_gid_list = []
        zipped = zip(gid_list, orient_list, aids_list, size_list)
        for gid, orient, aid_list, (w, h) in zipped:
            image = ibs.get_images(gid)
            h_, w_ = image.shape[:2]
            if h != h_ or w != w_:
                ibs._set_image_sizes([gid], [w_], [h_])
            orient_str = exif.ORIENTATION_DICT[orient]
            image_bbox = (0, 0, w, h)
            verts_list = ibs.get_annot_rotated_verts(aid_list)
            invalid = False
            for aid, vert_list in zip(aid_list, verts_list):
                annot_bbox = vt.bbox_from_verts(vert_list)
                overlap = bbox_overlap(image_bbox, annot_bbox)
                area = annot_bbox[2] * annot_bbox[3]
                percentage = overlap / area
                if percentage < min_percentage:
                    args = (gid, orient_str, aid, overlap, area, percentage)
                    print(
                        '\tInvalid GID %r, Orient %r, AID %r: Overlap %0.2f, Area %0.2f (%0.2f %%)'
                        % args)
                    invalid = True
                    # break
            if invalid:
                invalid_gid_list.append(gid)

        invalid_gid_list = list(set(invalid_gid_list))
        if len(invalid_gid_list) > 0:
            args = (
                len(invalid_gid_list),
                len(gid_list),
                invalid_gid_list,
            )
            print('Found %d / %d images with invalid annotations = %r' % args)
            orient_list = ibs.get_image_orientation(invalid_gid_list)
            aids_list = ibs.get_image_aids(invalid_gid_list,
                                           is_staged=None,
                                           __check_staged__=False)
            size_list = ibs.get_image_sizes(invalid_gid_list)
            zipped = zip(invalid_gid_list, orient_list, aids_list, size_list)
            for invalid_gid, orient, aid_list, (w, h) in zipped:
                orient_str = exif.ORIENTATION_DICT[orient]
                image_bbox = (0, 0, w, h)
                args = (
                    invalid_gid,
                    len(aid_list),
                )
                print('Fixing GID %r with %d annotations' % args)
                theta = np.pi / 2.0
                tx = 0.0
                ty = 0.0
                if orient == orient_dict.get(exif.ORIENTATION_090):
                    theta *= 1.0
                    tx = w
                elif orient == orient_dict.get(exif.ORIENTATION_180):
                    theta *= 2.0
                    tx = w
                    ty = h
                elif orient == orient_dict.get(exif.ORIENTATION_270):
                    theta *= -1.0
                    ty = h
                else:
                    raise ValueError('Unrecognized invalid orientation')
                H = np.array([[np.cos(theta), -np.sin(theta), tx],
                              [np.sin(theta), np.cos(theta), ty],
                              [0.0, 0.0, 1.0]])
                # print(H)
                verts_list = ibs.get_annot_rotated_verts(aid_list)
                for aid, vert_list in zip(aid_list, verts_list):
                    vert_list = np.array(vert_list)
                    # print(vert_list)
                    vert_list = vert_list.T
                    transformed_vert_list = vt.transform_points_with_homography(
                        H, vert_list)
                    transformed_vert_list = transformed_vert_list.T
                    # print(transformed_vert_list)

                    ibs.set_annot_verts([aid], [transformed_vert_list],
                                        update_visual_uuids=False)
                    current_theta = ibs.get_annot_thetas(aid)
                    new_theta = current_theta + theta
                    ibs.set_annot_thetas(aid,
                                         new_theta,
                                         update_visual_uuids=False)

                    fixed_vert_list = ibs.get_annot_rotated_verts(aid)
                    fixed_annot_bbox = vt.bbox_from_verts(fixed_vert_list)
                    fixed_overlap = bbox_overlap(image_bbox, fixed_annot_bbox)
                    fixed_area = fixed_annot_bbox[2] * fixed_annot_bbox[3]
                    fixed_percentage = fixed_overlap / fixed_area
                    args = (invalid_gid, orient_str, aid, fixed_overlap,
                            fixed_area, fixed_percentage)
                    print(
                        '\tFixing GID %r, Orient %r, AID %r: Overlap %0.2f, Area %0.2f (%0.2f %%)'
                        % args)
                    if fixed_percentage < min_percentage:
                        print('\tWARNING: FIXING DID NOT CORRECT AID %r' %
                              (aid, ))
                        unfixable_gid_list.append(gid)
    print('Un-fixable gid_list = %r' % (unfixable_gid_list, ))
    return unfixable_gid_list
Esempio n. 15
0
def sort_module_functions():
    from os.path import dirname, join
    import utool as ut
    import ibeis.control
    import re
    #import re
    #regex = r'[^@]*\ndef'
    modfpath = dirname(ibeis.control.__file__)
    fpath = join(modfpath, 'manual_annot_funcs.py')
    #fpath = join(modfpath, 'manual_dependant_funcs.py')
    #fpath = join(modfpath, 'manual_lblannot_funcs.py')
    #fpath = join(modfpath, 'manual_name_species_funcs.py')
    text = ut.read_from(fpath, verbose=False)
    lines =  text.splitlines()
    indent_list = [ut.get_indentation(line) for line in lines]
    isfunc_list = [line.startswith('def ') for line in lines]
    isblank_list = [len(line.strip(' ')) == 0 for line in lines]
    isdec_list = [line.startswith('@') for line in lines]

    tmp = ['def' if isfunc else indent for isfunc, indent in  zip(isfunc_list, indent_list)]
    tmp = ['b' if isblank else t for isblank, t in  zip(isblank_list, tmp)]
    tmp = ['@' if isdec else t for isdec, t in  zip(isdec_list, tmp)]
    #print('\n'.join([str((t, count + 1)) for (count, t) in enumerate(tmp)]))
    block_list = re.split('\n\n\n', text, flags=re.MULTILINE)

    #for block in block_list:
    #    print('#====')
    #    print(block)

    isfunc_list = [re.search('^def ', block, re.MULTILINE) is not None for block in block_list]

    whole_varname = ut.whole_word(ut.REGEX_VARNAME)
    funcname_regex = r'def\s+' + ut.named_field('funcname', whole_varname)

    def findfuncname(block):
        match = re.search(funcname_regex, block)
        return match.group('funcname')

    funcnameblock_list = [findfuncname(block) if isfunc else None
                          for isfunc, block in zip(isfunc_list, block_list)]

    funcblock_list = ut.filter_items(block_list, isfunc_list)
    funcname_list = ut.filter_items(funcnameblock_list, isfunc_list)

    nonfunc_list = ut.filterfalse_items(block_list, isfunc_list)

    nonfunc_list = ut.filterfalse_items(block_list, isfunc_list)
    ismain_list = [re.search('^if __name__ == ["\']__main__["\']', nonfunc) is not None
                   for nonfunc in nonfunc_list]

    mainblock_list = ut.filter_items(nonfunc_list, ismain_list)
    nonfunc_list = ut.filterfalse_items(nonfunc_list, ismain_list)

    newtext_list = []

    for nonfunc in nonfunc_list:
        newtext_list.append(nonfunc)
        newtext_list.append('\n')

    #funcname_list
    for funcblock in ut.sortedby(funcblock_list, funcname_list):
        newtext_list.append(funcblock)
        newtext_list.append('\n')

    for mainblock in mainblock_list:
        newtext_list.append(mainblock)

    newtext = '\n'.join(newtext_list)
    print('newtext = %s' % (newtext,))
    print('len(newtext) = %r' % (len(newtext),))
    print('len(text) = %r' % (len(text),))

    backup_fpath = ut.augpath(fpath, augext='.bak', augdir='_backup', ensure=True)

    ut.write_to(backup_fpath, text)
    ut.write_to(fpath, newtext)
Esempio n. 16
0
def analyze(ibsmap, qreq_dict, species_dict, path_to_file_list, params):
    print('[analyze] Beginning Analyze')
    print('[analyze] Received %d file paths' % (len(path_to_file_list)))
    # decompose the filename to get the car/person to whom this image belongs
    info_tup_list = [preprocess_fpath(ibsmap, species_dict, path_to_file, params) for path_to_file in path_to_file_list]
    is_valid_list = [tup_ is not None for tup_ in info_tup_list]

    # get the ungrouped tuples that were not None
    valid_tup_list_ug = ut.filter_items(info_tup_list, is_valid_list)
    valid_path_list_ug = ut.filter_items(path_to_file_list, is_valid_list)

    # group by species
    valid_species_list_ug = ut.get_list_column(valid_tup_list_ug, 3)
    seen_species = {}
    def get_species_tmpid(txt):
        if txt in seen_species:
            return seen_species[txt]
        else:
            seen_species[txt] = len(seen_species)
            return get_species_tmpid(txt)
    species_tmpid_list = np.array([get_species_tmpid(txt) for txt in valid_species_list_ug])
    #ibs.get_species_rowids_from_text(valid_species_list_ug)
    unique_species_rowids, groupxs = vt.group_indices(np.array(species_tmpid_list))

    grouped_valid_tup_list = vt.apply_grouping(np.array(valid_tup_list_ug, dtype=object), groupxs)
    grouped_path_list = vt.apply_grouping(np.array(valid_path_list_ug, dtype=object), groupxs)

    print('[analyze] Created  %d species groups' % (len(grouped_valid_tup_list)))
    print('[analyze] grouped_valid_tup_list = ' + ut.list_str(grouped_valid_tup_list))
    print('[analyze] grouped_path_list      = ' + ut.list_str(grouped_path_list))

    assert len(grouped_valid_tup_list) == len(grouped_path_list), 'lengths must match for zip'
    for groupx, (tup, valid_path_list) in enumerate(zip(grouped_valid_tup_list, grouped_path_list)):
        car_list, person_list, animal_list, species_list, offset_list, contributor_row_id_list = zip(*tup)

        assert ut.list_allsame(species_list)

        animal = animal_list[0]
        species = species_list[0]
        ibs = ibsmap[animal]
        with ut.Indenter('[GROUP-%d-%s]' % (groupx, species)):
            assert ((animal == 'zebra' and species == species_dict['zebra']) or
                    (animal == 'giraffe' and species == species_dict['giraffe'])), 'animal/species mismatch!'
            # Add image to database
            gid_list = ibs.add_images(valid_path_list, auto_localize=False)

            reported_time_list = list(map(vt.parse_exif_unixtime, valid_path_list))
            actual_unixtime_list = [
                reported_unixtime + offset
                for reported_unixtime, offset in
                zip(reported_time_list, offset_list)
            ]
            ibs.set_image_unixtime(gid_list, actual_unixtime_list, duplicate_behavior='filter')
            ibs.set_image_contributor_rowid(gid_list, contributor_row_id_list, duplicate_behavior='filter')

            print('[analyze] starting detection for %d images and species %s...' % (len(valid_path_list), species))
            qaids_list = ibs.detect_random_forest(gid_list, species=species)
            qaid_list, reverse_list = ut.invertible_flatten2(qaids_list)
            print('\n[analyze] detected %d animals of species %s' % (len(qaid_list), species))

            # if there were no detections, don't bother
            if not qaid_list:
                continue

            # because qreq_ is persistent we need only to update the qaid_list
            qreq_ = qreq_dict[animal]  # there is a qreq_ for each species
            qaid_list_unique, unique_inverse = np.unique(qaid_list, return_inverse=True)
            qreq_.set_external_qaids(qaid_list_unique)
            qres_list_unique = ibs.query_chips(qreq_=qreq_, verbose=False)
            qres_list = ut.list_take(qres_list_unique, unique_inverse)

            # so that we can draw a new bounding box for each detection
            detection_bbox_list = ibs.get_annot_verts(qaid_list)
            detection_bboxes_list = ut.unflatten2(detection_bbox_list, reverse_list)
            qreses_list = ut.unflatten2(qres_list, reverse_list)

            with ut.Indenter('[POSTPROCESS]'):
                for _tup in zip(valid_path_list, detection_bboxes_list, qreses_list,
                                car_list, person_list, animal_list, gid_list, qaids_list):
                    postprocess_result(ibs, _tup, params)

            with ut.Indenter('[REVIEW_CHECK]'):
                for car, person in zip(car_list, person_list):
                    check_if_need_review(person, car, params)
Esempio n. 17
0
def add_annot_chips(ibs, aid_list, qreq_=None):
    """ annot.chip.add(aid_list)

    CRITICAL FUNCTION MUST EXIST FOR ALL DEPENDANTS
    Adds / ensures / computes a dependant property

    Args:
         aid_list

    Returns:
        returns chip_rowid_list of added (or already existing chips)

    TemplateInfo:
        Tadder_pl_dependant
        parent = annot
        leaf = chip

    CommandLine:
        python -m ibeis.control.manual_chip_funcs --test-add_annot_chips

    Example1:
        >>> # ENABLE_DOCTEST
        >>> from ibeis.control.manual_chip_funcs import *  # NOQA
        >>> ibs, qreq_ = testdata_ibs()
        >>> aid_list = ibs._get_all_aids()[::3]
        >>> chip_rowid_list = ibs.add_annot_chips(aid_list, qreq_=qreq_)
        >>> assert len(chip_rowid_list) == len(aid_list)
        >>> ut.assert_all_not_None(chip_rowid_list)

    Example2:
        >>> # ENABLE_DOCTEST
        >>> from ibeis.control.manual_chip_funcs import *  # NOQA
        >>> ibs, qreq_ = testdata_ibs()
        >>> aid_list = ibs._get_all_aids()[0:10]
        >>> sub_aid_list1 = aid_list[0:6]
        >>> sub_aid_list2 = aid_list[5:7]
        >>> sub_aid_list3 = aid_list[0:7]
        >>> sub_cid_list1 = ibs.get_annot_chip_rowids(sub_aid_list1, qreq_=qreq_, ensure=True)
        >>> ut.assert_all_not_None(sub_cid_list1)
        >>> ibs.delete_annot_chips(sub_aid_list2)
        >>> sub_cid_list3 = ibs.get_annot_chip_rowids(sub_aid_list3, qreq_=qreq_, ensure=False)
        >>> # Only the last two should be None
        >>> ut.assert_all_not_None(sub_cid_list3)
        >>> assert sub_cid_list3[5:7] == [None, None]
        >>> sub_cid_list3_ensured = ibs.get_annot_chip_rowids(sub_aid_list3, qreq_=qreq_, ensure=True)
        >>> # Only two params should have been computed here
        >>> ut.assert_all_not_None(sub_cid_list3_ensured)

    """
    from ibeis.model.preproc import preproc_chip
    ut.assert_all_not_None(aid_list, 'aid_list')
    # Get requested configuration id
    config_rowid = ibs.get_chip_config_rowid(qreq_=qreq_)
    # Find leaf rowids that need to be computed
    initial_chip_rowid_list = get_annot_chip_rowids_(ibs, aid_list, qreq_=qreq_)
    # Get corresponding "dirty" parent rowids
    isdirty_list = ut.flag_None_items(initial_chip_rowid_list)
    dirty_aid_list = ut.filter_items(aid_list, isdirty_list)
    num_dirty = len(dirty_aid_list)
    if num_dirty > 0:
        #if ut.VERBOSE:
        print('[add_annot_chips] adding %d / %d new chips' % (len(dirty_aid_list), len(aid_list)))
        # Dependant columns do not need true from_superkey getters.
        # We can use the Tgetter_pl_dependant_rowids_ instead
        get_rowid_from_superkey = functools.partial(
            ibs.get_annot_chip_rowids_, qreq_=qreq_)
        proptup_gen = preproc_chip.generate_chip_properties(ibs, dirty_aid_list)
        dirty_params_iter = (
            (aid, config_rowid, chip_uri, chip_width, chip_height)
            for aid, (chip_uri, chip_width, chip_height,) in
            zip(dirty_aid_list, proptup_gen)
        )
        dirty_params_iter = list(dirty_params_iter)
        colnames = ['annot_rowid', 'config_rowid',
                    'chip_uri', 'chip_width', 'chip_height']
        #chip_rowid_list = ibs.dbcache.add_cleanly(
        #    const.CHIP_TABLE, colnames, params_iter, get_rowid_from_superkey)
        ibs.dbcache._add(const.CHIP_TABLE, colnames, dirty_params_iter)
        # Now that the dirty params are added get the correct order of rowids
        chip_rowid_list = get_rowid_from_superkey(aid_list)
    else:
        chip_rowid_list = initial_chip_rowid_list
    return chip_rowid_list
Esempio n. 18
0
def sort_module_functions():
    from os.path import dirname, join
    import utool as ut
    import ibeis.control
    import re
    #import re
    #regex = r'[^@]*\ndef'
    modfpath = dirname(ibeis.control.__file__)
    fpath = join(modfpath, 'manual_annot_funcs.py')
    #fpath = join(modfpath, 'manual_dependant_funcs.py')
    #fpath = join(modfpath, 'manual_lblannot_funcs.py')
    #fpath = join(modfpath, 'manual_name_species_funcs.py')
    text = ut.read_from(fpath, verbose=False)
    lines = text.splitlines()
    indent_list = [ut.get_indentation(line) for line in lines]
    isfunc_list = [line.startswith('def ') for line in lines]
    isblank_list = [len(line.strip(' ')) == 0 for line in lines]
    isdec_list = [line.startswith('@') for line in lines]

    tmp = [
        'def' if isfunc else indent
        for isfunc, indent in zip(isfunc_list, indent_list)
    ]
    tmp = ['b' if isblank else t for isblank, t in zip(isblank_list, tmp)]
    tmp = ['@' if isdec else t for isdec, t in zip(isdec_list, tmp)]
    #print('\n'.join([str((t, count + 1)) for (count, t) in enumerate(tmp)]))
    block_list = re.split('\n\n\n', text, flags=re.MULTILINE)

    #for block in block_list:
    #    print('#====')
    #    print(block)

    isfunc_list = [
        re.search('^def ', block, re.MULTILINE) is not None
        for block in block_list
    ]

    whole_varname = ut.whole_word(ut.REGEX_VARNAME)
    funcname_regex = r'def\s+' + ut.named_field('funcname', whole_varname)

    def findfuncname(block):
        match = re.search(funcname_regex, block)
        return match.group('funcname')

    funcnameblock_list = [
        findfuncname(block) if isfunc else None
        for isfunc, block in zip(isfunc_list, block_list)
    ]

    funcblock_list = ut.filter_items(block_list, isfunc_list)
    funcname_list = ut.filter_items(funcnameblock_list, isfunc_list)

    nonfunc_list = ut.filterfalse_items(block_list, isfunc_list)

    nonfunc_list = ut.filterfalse_items(block_list, isfunc_list)
    ismain_list = [
        re.search('^if __name__ == ["\']__main__["\']', nonfunc) is not None
        for nonfunc in nonfunc_list
    ]

    mainblock_list = ut.filter_items(nonfunc_list, ismain_list)
    nonfunc_list = ut.filterfalse_items(nonfunc_list, ismain_list)

    newtext_list = []

    for nonfunc in nonfunc_list:
        newtext_list.append(nonfunc)
        newtext_list.append('\n')

    #funcname_list
    for funcblock in ut.sortedby(funcblock_list, funcname_list):
        newtext_list.append(funcblock)
        newtext_list.append('\n')

    for mainblock in mainblock_list:
        newtext_list.append(mainblock)

    newtext = '\n'.join(newtext_list)
    print('newtext = %s' % (newtext, ))
    print('len(newtext) = %r' % (len(newtext), ))
    print('len(text) = %r' % (len(text), ))

    backup_fpath = ut.augpath(fpath,
                              augext='.bak',
                              augdir='_backup',
                              ensure=True)

    ut.write_to(backup_fpath, text)
    ut.write_to(fpath, newtext)