def thumb_getter(id_, thumbsize=128):
     """ Thumb getters must conform to thumbtup structure """
     if id_ not in imgname_list:
         return {
             'fpath': id_ + '.jpg',
             'thread_func': thread_func,
             'main_func': lambda: (id_, ),
         }
     # print(id_)
     if id_ == 'doesnotexist.jpg':
         return None
         img_path = None
         img_size = (100, 100)
     else:
         img_path = ut.grab_test_imgpath(id_, verbose=False)
         img_size = vt.open_image_size(img_path)
     thumb_path = join(guitool_test_thumbdir,
                       ut.hashstr(str(img_path)) + '.jpg')
     if id_ == 'carl.jpg':
         bbox_list = [(10, 10, 200, 200)]
         theta_list = [0]
     elif id_ == 'lena.png':
         # bbox_list = [(10, 10, 200, 200)]
         bbox_list = [None]
         theta_list = [None]
     else:
         bbox_list = []
         theta_list = []
     interest_list = [False]
     thumbtup = (thumb_path, img_path, img_size, bbox_list, theta_list,
                 interest_list)
     # print('thumbtup = %r' % (thumbtup,))
     return thumbtup
Example #2
0
def query_result_fpath(qreq, qaid, cfgstr):
    qres_dir  = qreq.qresdir
    fname = 'res_%s_qaid=%d.npz' % (cfgstr, qaid)
    if len(fname) > 64:
        hash_id = utool.hashstr(cfgstr)
        fname = 'res_%s_qaid=%d.npz' % (hash_id, qaid)
    fpath = join(qres_dir, fname)
    return fpath
Example #3
0
def remove_corrupted_queries(qresdir, qres, dryrun=True):
    # This qres must be corrupted!
    cfgstr = qres.cfgstr
    hash_id = ut.hashstr(cfgstr)
    qres_dir  = qresdir
    testres_dir = join(qresdir, '..', 'harness_results')
    ut.remove_files_in_dir(testres_dir, dryrun=dryrun)
    ut.remove_files_in_dir(qres_dir, '*' + cfgstr + '*', dryrun=dryrun)
    ut.remove_files_in_dir(qres_dir, '*' + hash_id + '*', dryrun=dryrun)
Example #4
0
def remove_corrupted_queries(qresdir, qres, dryrun=True):
    # This qres must be corrupted!
    cfgstr = qres.cfgstr
    hash_id = ut.hashstr(cfgstr)
    qres_dir  = qresdir
    testres_dir = join(qresdir, '..', 'experiment_harness_results')
    ut.remove_files_in_dir(testres_dir, dryrun=dryrun)
    ut.remove_files_in_dir(qres_dir, '*' + cfgstr + '*', dryrun=dryrun)
    ut.remove_files_in_dir(qres_dir, '*' + hash_id + '*', dryrun=dryrun)
Example #5
0
def assert_testdb_annot_consistency(ibs_gt, ibs2, aid_list1, aid_list2):
    """
    just tests uuids

    if anything goes wrong this should fix it:
        from ibeis.other import ibsfuncs
        aid_list1 = ibs_gt.get_valid_aids()
        ibs_gt.update_annot_visual_uuids(aid_list1)
        ibs2.update_annot_visual_uuids(aid_list2)
        ibsfuncs.fix_remove_visual_dupliate_annotations(ibs_gt)
    """
    assert len(aid_list2) == len(aid_list1)
    visualtup1 = ibs_gt.get_annot_visual_uuid_info(aid_list1)
    visualtup2 = ibs2.get_annot_visual_uuid_info(aid_list2)

    _visual_uuid_list1 = [ut.augment_uuid(*tup) for tup in zip(*visualtup1)]
    _visual_uuid_list2 = [ut.augment_uuid(*tup) for tup in zip(*visualtup2)]

    assert ut.hashstr(visualtup1) == ut.hashstr(visualtup2)
    ut.assert_lists_eq(visualtup1[0], visualtup2[0])
    ut.assert_lists_eq(visualtup1[1], visualtup2[1])
    ut.assert_lists_eq(visualtup1[2], visualtup2[2])
    #semantic_uuid_list1 = ibs_gt.get_annot_semantic_uuids(aid_list1)
    #semantic_uuid_list2 = ibs2.get_annot_semantic_uuids(aid_list2)

    visual_uuid_list1 = ibs_gt.get_annot_visual_uuids(aid_list1)
    visual_uuid_list2 = ibs2.get_annot_visual_uuids(aid_list2)

    # make sure visual uuids are still determenistic
    ut.assert_lists_eq(visual_uuid_list1, visual_uuid_list2)
    ut.assert_lists_eq(_visual_uuid_list1, visual_uuid_list1)
    ut.assert_lists_eq(_visual_uuid_list2, visual_uuid_list2)

    if ut.VERBOSE:
        ibs1_dup_annots = ut.debug_duplicate_items(visual_uuid_list1)
        ibs2_dup_annots = ut.debug_duplicate_items(visual_uuid_list2)
    else:
        ibs1_dup_annots = ut.find_duplicate_items(visual_uuid_list1)
        ibs2_dup_annots = ut.find_duplicate_items(visual_uuid_list2)

    # if these fail try ibsfuncs.fix_remove_visual_dupliate_annotations
    assert len(ibs1_dup_annots) == 0
    assert len(ibs2_dup_annots) == 0
Example #6
0
def assert_testdb_annot_consistency(ibs_gt, ibs2, aid_list1, aid_list2):
    """
    just tests uuids

    if anything goes wrong this should fix it:
        from ibeis.other import ibsfuncs
        aid_list1 = ibs_gt.get_valid_aids()
        ibs_gt.update_annot_visual_uuids(aid_list1)
        ibs2.update_annot_visual_uuids(aid_list2)
        ibsfuncs.fix_remove_visual_dupliate_annotations(ibs_gt)
    """
    assert len(aid_list2) == len(aid_list1)
    visualtup1 = ibs_gt.get_annot_visual_uuid_info(aid_list1)
    visualtup2 = ibs2.get_annot_visual_uuid_info(aid_list2)

    _visual_uuid_list1 = [ut.augment_uuid(*tup) for tup in zip(*visualtup1)]
    _visual_uuid_list2 = [ut.augment_uuid(*tup) for tup in zip(*visualtup2)]

    assert ut.hashstr(visualtup1) == ut.hashstr(visualtup2)
    ut.assert_lists_eq(visualtup1[0], visualtup2[0])
    ut.assert_lists_eq(visualtup1[1], visualtup2[1])
    ut.assert_lists_eq(visualtup1[2], visualtup2[2])
    #semantic_uuid_list1 = ibs_gt.get_annot_semantic_uuids(aid_list1)
    #semantic_uuid_list2 = ibs2.get_annot_semantic_uuids(aid_list2)

    visual_uuid_list1 = ibs_gt.get_annot_visual_uuids(aid_list1)
    visual_uuid_list2 = ibs2.get_annot_visual_uuids(aid_list2)

    # make sure visual uuids are still determenistic
    ut.assert_lists_eq(visual_uuid_list1, visual_uuid_list2)
    ut.assert_lists_eq(_visual_uuid_list1, visual_uuid_list1)
    ut.assert_lists_eq(_visual_uuid_list2, visual_uuid_list2)

    if ut.VERBOSE:
        ibs1_dup_annots = ut.debug_duplicate_items(visual_uuid_list1)
        ibs2_dup_annots = ut.debug_duplicate_items(visual_uuid_list2)
    else:
        ibs1_dup_annots = ut.find_duplicate_items(visual_uuid_list1)
        ibs2_dup_annots = ut.find_duplicate_items(visual_uuid_list2)

    # if these fail try ibsfuncs.fix_remove_visual_dupliate_annotations
    assert len(ibs1_dup_annots) == 0
    assert len(ibs2_dup_annots) == 0
Example #7
0
def write_to(fpath, to_write, aslines=False, verbose=False,
             onlyifdiff=False, mode='w'):
    """ Writes text to a file

    Args:
        fpath (str): file path
        to_write (str): text to write
        aslines (bool): if True to_write is assumed to be a list of lines
        verbose (bool): verbosity flag
        onlyifdiff (bool): only writes if needed!
            checks hash of to_write vs the hash of the contents of fpath
    """
    if onlyifdiff:
        import utool as ut
        if ut.hashstr(read_from(fpath)) == ut.hashstr(to_write):
            print('[util_io] * no difference')
            return
    if verbose or (verbose is None and __PRINT_WRITES__):
        print('[util_io] * Writing to text file: %r ' % util_path.tail(fpath))
    with open(fpath, mode) as file_:
        if aslines:
            file_.writelines(to_write)
        else:
            file_.write(to_write)
Example #8
0
File: smk1.py Project: whaozl/ibeis
def compute_data_gamma_(invindex, use_cache=True):
    """
    >>> from ibeis.model.hots.smk.smk import *  # NOQA
    >>> ibs, annots_df, taids, daids, qaids, nWords = testdata()
    >>> words = learn_visual_words(annots_df, taids, nWords)
    >>> with_internals = True
    >>> invindex = index_data_annots(annots_df, daids, words, with_internals)
    >>> daid2_gamma = compute_data_gamma_(invindex, use_cache=True)
    """
    cache_key = utool.hashstr(invindex.get_cfgstr())
    if use_cache:
        try:
            daid2_gamma = utool.global_cache_read(cache_key, appname='smk')
            #print('gamma_dbg cache hit')
            return daid2_gamma
        except Exception:
            pass

    # Gropuing by aid and words

    mark, end_ = utool.log_progress(('gamma grouping %s ' % (cache_key, )),
                                    invindex.wx2_drvecs.shape[0],
                                    flushfreq=100)
    daid2_wx2_drvecs = utool.ddict(dict)
    for count, wx in enumerate(invindex.wx2_drvecs.index):
        if count % 100 == 0:
            mark(wx)
        group = invindex.wx2_drvecs[wx].groupby(invindex.idx2_daid)
        for daid, vecs in group:
            daid2_wx2_drvecs[daid][wx] = vecs.values
    end_()

    # Summation over words for each aid
    mark, end_ = utool.log_progress('gamma summation ',
                                    len(daid2_wx2_drvecs),
                                    flushfreq=100)
    daid2_gamma = pd.Series(np.zeros(invindex.daids.shape[0]),
                            index=invindex.daids,
                            name='gamma')
    wx2_weight = invindex.wx2_weight
    for count, (daid,
                wx2_drvecs) in enumerate(six.iteritems(daid2_wx2_drvecs)):
        if count % 100 == 0:
            mark(count)
        wx2_rvecs = wx2_drvecs
        daid2_gamma[daid] = gamma_summation(wx2_rvecs, wx2_weight)
    utool.global_cache_write(cache_key, daid2_gamma, appname='smk')
    return daid2_gamma
Example #9
0
def compute_data_gamma_(invindex, use_cache=True):
    """
    >>> from ibeis.model.hots.smk.smk import *  # NOQA
    >>> ibs, annots_df, taids, daids, qaids, nWords = testdata()
    >>> words = learn_visual_words(annots_df, taids, nWords)
    >>> with_internals = True
    >>> invindex = index_data_annots(annots_df, daids, words, with_internals)
    >>> daid2_gamma = compute_data_gamma_(invindex, use_cache=True)
    """
    cache_key = utool.hashstr(invindex.get_cfgstr())
    if use_cache:
        try:
            daid2_gamma = utool.global_cache_read(cache_key, appname='smk')
            #print('gamma_dbg cache hit')
            return daid2_gamma
        except Exception:
            pass

    # Gropuing by aid and words

    mark, end_ = utool.log_progress(('gamma grouping %s ' % (cache_key,)),
                                    invindex.wx2_drvecs.shape[0],
                                    flushfreq=100)
    daid2_wx2_drvecs = utool.ddict(dict)
    for count, wx in enumerate(invindex.wx2_drvecs.index):
        if count % 100 == 0:
            mark(wx)
        group  = invindex.wx2_drvecs[wx].groupby(invindex.idx2_daid)
        for daid, vecs in group:
            daid2_wx2_drvecs[daid][wx] = vecs.values
    end_()

    # Summation over words for each aid
    mark, end_ = utool.log_progress('gamma summation ', len(daid2_wx2_drvecs),
                                    flushfreq=100)
    daid2_gamma = pd.Series(
        np.zeros(invindex.daids.shape[0]),
        index=invindex.daids,
        name='gamma')
    wx2_weight = invindex.wx2_weight
    for count, (daid, wx2_drvecs) in enumerate(six.iteritems(daid2_wx2_drvecs)):
        if count % 100 == 0:
            mark(count)
        wx2_rvecs = wx2_drvecs
        daid2_gamma[daid] = gamma_summation(wx2_rvecs, wx2_weight)
    utool.global_cache_write(cache_key, daid2_gamma, appname='smk')
    return daid2_gamma
Example #10
0
def make_dev_rebased_mixin(repo, master, branch):
    # Clear dev rebase branch
    rebase_branch = 'dev_rebase_' + branch
    dev_branch = rebase_branch

    if branch == 'missing_values_rf':
        reset_dev_branch(repo, branch, rebase_branch)
        missing_values_rf_rebase(repo, master, branch, rebase_branch)
    else:
        reset_dev_branch(repo, branch, rebase_branch)
        repo.issue('git rebase ' + master)
    # # squash everything into a commit
    with repo.chdir_context():
        out = ut.cmd2('git --no-pager log ' + master +
                      '..HEAD --pretty=oneline')['out']
    n_commits = len(out.split('\n'))
    repo.issue('git reset ' + master)
    msg = 'hash is: ' + ut.hashstr(out)
    repo.issue('git commit -am "Combination of %d commits\n%s"' %
               (n_commits, msg))
    # repo.issue('git reset ' + master)
    return rebase_branch
Example #11
0
 def thumb_getter(id_, thumbsize=128):
     """ Thumb getters must conform to thumbtup structure """
     #print(id_)
     if id_ == 'doesnotexist.jpg':
         return None
         img_path = None
         img_size = (100, 100)
     else:
         img_path = ut.grab_test_imgpath(id_, verbose=False)
         img_size = vt.open_image_size(img_path)
     thumb_path = join(guitool_test_thumbdir, ut.hashstr(str(img_path)) + '.jpg')
     if id_ == 'carl.jpg':
         bbox_list = [(10, 10, 200, 200)]
         theta_list = [0]
     elif id_ == 'lena.png':
         #bbox_list = [(10, 10, 200, 200)]
         bbox_list = [None]
         theta_list = [None]
     else:
         bbox_list = []
         theta_list = []
     thumbtup = (thumb_path, img_path, img_size, bbox_list, theta_list)
     #print('thumbtup = %r' % (thumbtup,))
     return thumbtup
Example #12
0
def write_to(fpath,
             to_write,
             aslines=False,
             verbose=None,
             onlyifdiff=False,
             mode='w',
             n=None):
    """ Writes text to a file. Automatically encodes text as utf8.

    Args:
        fpath (str): file path
        to_write (str): text to write (must be unicode text)
        aslines (bool): if True to_write is assumed to be a list of lines
        verbose (bool): verbosity flag
        onlyifdiff (bool): only writes if needed!
                checks hash of to_write vs the hash of the contents of fpath
        mode (unicode): (default = u'w')
        n (int):  (default = 2)

    CommandLine:
        python -m utool.util_io --exec-write_to --show

    Example:
        >>> # DISABLE_DOCTEST
        >>> from utool.util_io import *  # NOQA
        >>> import utool as ut
        >>> fpath = ut.unixjoin(ut.get_app_resource_dir('utool'), 'testwrite.txt')
        >>> ut.delete(fpath)
        >>> to_write = 'utf-8 symbols Δ, Й, ק, م, ๗, あ, 叶, 葉, and 말.'
        >>> aslines = False
        >>> verbose = True
        >>> onlyifdiff = False
        >>> mode = u'w'
        >>> n = 2
        >>> write_to(fpath, to_write, aslines, verbose, onlyifdiff, mode, n)
        >>> read_ = ut.read_from(fpath)
        >>> print('read_    = ' + read_)
        >>> print('to_write = ' + to_write)
        >>> assert read_ == to_write
    """
    if onlyifdiff:
        import utool as ut
        if ut.hashstr(read_from(fpath)) == ut.hashstr(to_write):
            print('[util_io] * no difference')
            return
    verbose = _rectify_verb_write(verbose)
    if verbose:
        n = None if verbose > 1 else 2
        print('[util_io] * Writing to text file: %r ' %
              util_path.tail(fpath, n=n))

    backup = False and exists(fpath)
    if backup:
        util_path.copy(fpath, fpath + '.backup')

    if not isinstance(fpath, six.string_types):
        # Assuming a file object with a name attribute
        # Should just read from the file
        fpath = fpath.name

    with open(fpath, mode) as file_:
        if aslines:
            file_.writelines(to_write)
        else:
            # Ensure python2 writes in bytes
            if six.PY2 and isinstance(to_write, unicode):
                to_write = to_write.encode('utf8')
            try:
                file_.write(to_write)
            except UnicodeEncodeError as ex:
                start = max(ex.args[2] - 10, 0)
                end = ex.args[3] + 10
                context = to_write[start:end]
                print(repr(context))
                print(context)
                from utool import util_dbg
                util_dbg.printex(ex, keys=[(type, 'to_write')])
                file_.close()
                if backup:
                    # restore
                    util_path.copy(fpath + '.backup', fpath)
                # import utool
                # utool.embed()
                raise
Example #13
0
File: smk1.py Project: whaozl/ibeis
 def get_cfgstr(invindex):
     lbl = 'InvIndex'
     hashstr = utool.hashstr(repr(invindex.wx2_drvecs))
     return '_{lbl}({hashstr})'.format(lbl=lbl, hashstr=hashstr)
Example #14
0
def write_to(fpath, to_write, aslines=False, verbose=None,
             onlyifdiff=False, mode='w', n=None):
    """ Writes text to a file. Automatically encodes text as utf8.

    Args:
        fpath (str): file path
        to_write (str): text to write (must be unicode text)
        aslines (bool): if True to_write is assumed to be a list of lines
        verbose (bool): verbosity flag
        onlyifdiff (bool): only writes if needed!
                checks hash of to_write vs the hash of the contents of fpath
        mode (unicode): (default = u'w')
        n (int):  (default = 2)

    CommandLine:
        python -m utool.util_io --exec-write_to --show

    Example:
        >>> # DISABLE_DOCTEST
        >>> from utool.util_io import *  # NOQA
        >>> import utool as ut
        >>> fpath = ut.unixjoin(ut.get_app_resource_dir('utool'), 'testwrite.txt')
        >>> ut.delete(fpath)
        >>> to_write = 'utf-8 symbols Δ, Й, ק, م, ๗, あ, 叶, 葉, and 말.'
        >>> aslines = False
        >>> verbose = True
        >>> onlyifdiff = False
        >>> mode = u'w'
        >>> n = 2
        >>> write_to(fpath, to_write, aslines, verbose, onlyifdiff, mode, n)
        >>> read_ = ut.read_from(fpath)
        >>> print('read_    = ' + read_)
        >>> print('to_write = ' + to_write)
        >>> assert read_ == to_write
    """
    if onlyifdiff:
        import utool as ut
        if ut.hashstr(read_from(fpath)) == ut.hashstr(to_write):
            print('[util_io] * no difference')
            return
    verbose = _rectify_verb_write(verbose)
    if verbose:
        # n = None if verbose > 1 else 2
        # print('[util_io] * Writing to text file: %r ' % util_path.tail(fpath, n=n))
        print('[util_io] * Writing to text file: {}'.format(fpath))

    backup = False and exists(fpath)
    if backup:
        util_path.copy(fpath, fpath + '.backup')

    if not isinstance(fpath, six.string_types):
        # Assuming a file object with a name attribute
        # Should just read from the file
        fpath = fpath.name

    with open(fpath, mode) as file_:
        if aslines:
            file_.writelines(to_write)
        else:
            # Ensure python2 writes in bytes
            if six.PY2 and isinstance(to_write, unicode):
                to_write = to_write.encode('utf8')
            try:
                file_.write(to_write)
            except UnicodeEncodeError as ex:
                start = max(ex.args[2] - 10, 0)
                end = ex.args[3] + 10
                context = to_write[start:end]
                print(repr(context))
                print(context)
                from utool import util_dbg
                util_dbg.printex(ex, keys=[(type, 'to_write')])
                file_.close()
                if backup:
                    # restore
                    util_path.copy(fpath + '.backup', fpath)
                # import utool
                # utool.embed()
                raise
Example #15
0
def test_hashstr():
    hashstr_ = utool.hashstr(lorium_text)
    print(repr(hashstr_))
    target_hashstr = 'yy7@rnyuhe&zhj0k'
    print(repr(target_hashstr))
    assert hashstr_ == target_hashstr
Example #16
0
def test_hashstr():
    hashstr_ = utool.hashstr(lorium_text)
    print(repr(hashstr_))
    target_hashstr = 'yy7@rnyuhe&zhj0k'
    print(repr(target_hashstr))
    assert hashstr_ == target_hashstr
Example #17
0
 def get_cfgstr(invindex):
     lbl = 'InvIndex'
     hashstr = utool.hashstr(repr(invindex.wx2_drvecs))
     return '_{lbl}({hashstr})'.format(lbl=lbl, hashstr=hashstr)