Пример #1
0
def test():
    print('enter test')
    log_fpath1 = utool.get_app_resource_dir('utool', 'test_logfile1.txt')
    log_fpath2 = utool.get_app_resource_dir('utool', 'test_logfile2.txt')

    utool.start_logging(log_fpath1, 'w')
    func1()
    func2()
    utool.stop_logging()

    print('\n\n')
    print('This line is NOT logged')
    print('\n\n')

    utool.start_logging(log_fpath2, 'w')
    print('This line is logged')
    utool.stop_logging()

    log1 = utool.read_from(log_fpath1, verbose=False)
    log2 = utool.read_from(log_fpath2, verbose=False)

    target1 = utool.unindent('''
    <__LOG_START__>
    logging to log_fpath=%r
    [test][func1]enter func1
    [test][func1]exit  func1
    [test][func2]enter func2
    [test][func2][func1]enter func1
    [test][func2][func1]exit  func1
    [test][func2]exit  func2
    <__LOG_STOP__>''' % log_fpath1).strip()

    target2 = utool.unindent('''
    <__LOG_START__>
    logging to log_fpath=%r
    [test]This line is logged
    <__LOG_STOP__>''' % log_fpath2).strip()

    output1 = remove_timestamp(log1).strip()
    output2 = remove_timestamp(log2).strip()

    try:
        assert target1 == output1, 'target1 failed'
        assert target2 == output2, 'target2 failed'
        builtins.print('TEST PASSED')
    except AssertionError:
        builtins.print('\n<!!! TEST FAILED !!!>')

        builtins.print('\ntarget1:')
        builtins.print(target1)
        builtins.print('\noutput1:')
        builtins.print(output1)

        builtins.print('\ntarget2:')
        builtins.print(target2)
        builtins.print('\noutput2:')
        builtins.print(output2)

        builtins.print('</!!! TEST FAILED !!!>\n')
        raise
Пример #2
0
def test():
    print('enter test')
    log_fpath1 = utool.get_app_resource_dir('utool', 'test_logfile1.txt')
    log_fpath2 = utool.get_app_resource_dir('utool', 'test_logfile2.txt')

    utool.start_logging(log_fpath1, 'w')
    func1()
    func2()
    utool.stop_logging()

    print('\n\n')
    print('This line is NOT logged')
    print('\n\n')

    utool.start_logging(log_fpath2, 'w')
    print('This line is logged')
    utool.stop_logging()

    log1 = utool.read_from(log_fpath1, verbose=False)
    log2 = utool.read_from(log_fpath2, verbose=False)

    target1 = utool.unindent('''
    <__LOG_START__>
    logging to log_fpath=%r
    [test][func1]enter func1
    [test][func1]exit  func1
    [test][func2]enter func2
    [test][func2][func1]enter func1
    [test][func2][func1]exit  func1
    [test][func2]exit  func2
    <__LOG_STOP__>''' % log_fpath1).strip()

    target2 = utool.unindent('''
    <__LOG_START__>
    logging to log_fpath=%r
    [test]This line is logged
    <__LOG_STOP__>''' % log_fpath2).strip()

    output1 = remove_timestamp(log1).strip()
    output2 = remove_timestamp(log2).strip()

    try:
        assert target1 == output1, 'target1 failed'
        assert target2 == output2, 'target2 failed'
        builtins.print('TEST PASSED')
    except AssertionError:
        builtins.print('\n<!!! TEST FAILED !!!>')

        builtins.print('\ntarget1:')
        builtins.print(target1)
        builtins.print('\noutput1:')
        builtins.print(output1)

        builtins.print('\ntarget2:')
        builtins.print(target2)
        builtins.print('\noutput2:')
        builtins.print(output2)

        builtins.print('</!!! TEST FAILED !!!>\n')
        raise
Пример #3
0
def get_flann_fpath(dpts,
                    cache_dir='default',
                    cfgstr='',
                    flann_params={},
                    use_params_hash=True,
                    use_data_hash=True,
                    appname='vtool',
                    verbose=True):
    """ returns filepath for flann index """
    if cache_dir == 'default':
        if verbose:
            print('[flann] using default cache dir')
        cache_dir = utool.get_app_resource_dir(appname)
        utool.ensuredir(cache_dir)
    flann_cfgstr = get_flann_cfgstr(dpts,
                                    flann_params,
                                    cfgstr,
                                    use_params_hash=use_params_hash,
                                    use_data_hash=use_data_hash)
    if verbose:
        print('...flann_cache cfgstr = %r: ' % flann_cfgstr)
    # Append any user labels
    flann_fname = 'flann_index' + flann_cfgstr + '.flann'
    flann_fpath = normpath(join(cache_dir, flann_fname))
    return flann_fpath
Пример #4
0
def test_akmeans(full_test=False,
                 plot_test=False,
                 num_pca_dims=2,
                 data_dim=2,
                 nump=1000):
    import numpy as np
    from vtool import clustering
    nump = nump
    dims = data_dim  # 128
    dtype = np.uint8
    print('Make %d random %d-dimensional %s points.' % (nump, dims, dtype))
    # Seed for a determenistic test
    np.random.seed(42)
    data = np.array(np.random.randint(0, 255, (nump, dims)), dtype=dtype)

    num_clusters = 10
    max_iters = 2
    ave_unchanged_thresh = 0
    ave_unchanged_iterwin = 10
    flann_params = {}

    cache_dir = utool.get_app_resource_dir('vtool', 'test_cache')
    utool.ensuredir(cache_dir)

    # Test precomputing
    dx2_label, centers = clustering.precompute_akmeans(data,
                                                       num_clusters,
                                                       max_iters=max_iters,
                                                       cache_dir=cache_dir)
    # internal names
    datax2_clusterx, centroids = dx2_label, centers

    if plot_test:
        clustering.plot_clusters(data,
                                 datax2_clusterx,
                                 centroids,
                                 num_pca_dims=num_pca_dims)

    assert centers.shape == (num_clusters, dims), 'sanity check'
    assert dx2_label.shape == (nump, ), 'sanity check'

    # Test regular computing
    if full_test:
        dx2_label, centers = clustering.akmeans(data,
                                                num_clusters,
                                                max_iters=max_iters)
        assert centers.shape == (num_clusters, dims), 'sanity check'
        assert dx2_label.shape == (nump, ), 'sanity check'

    if False:
        # other test (development)
        import pyflann
        flann_lib_inst = pyflann.flann
        flann_class_inst = pyflann.FLANN()
        flann_class_inst.build_index(data)
    return locals()
Пример #5
0
def test_file_hash():
    resdir = utool.get_app_resource_dir('utool')
    test_fpath = join(resdir, 'lorium_ipsum.txt')
    if not utool.checkpath(test_fpath, verbose=True, n=100):
        utool.write_to(test_fpath, lorium_text)
    hash_ = utool.get_file_hash(test_fpath)
    target_hash_ = b'\xd1Y\xe5\xa2\xc1\xd8\xb8\nS\xb1?\x16\xfe\xc5\x88\xbd\x9e\xb4\xe3\xda'
    print(repr(hash_))
    print(repr(target_hash_))
    assert hash_ == target_hash_
Пример #6
0
def test_file_hash():
    resdir = utool.get_app_resource_dir('utool')
    test_fpath = join(resdir, 'lorium_ipsum.txt')
    if not utool.checkpath(test_fpath, verbose=True, n=100):
        utool.write_to(test_fpath, lorium_text)
    hash_ = utool.get_file_hash(test_fpath)
    target_hash_ = b'\xd1Y\xe5\xa2\xc1\xd8\xb8\nS\xb1?\x16\xfe\xc5\x88\xbd\x9e\xb4\xe3\xda'
    print(repr(hash_))
    print(repr(target_hash_))
    assert hash_ == target_hash_
Пример #7
0
def render_latex_text(input_text, nest_in_doc=False, appname='utool', verbose=None):
    """ testing function """
    import utool as ut
    if verbose is None:
        verbose = ut.VERBOSE
    dpath = ut.get_app_resource_dir(appname)
    # put a latex framgent in a full document
    print(input_text)
    pdf_fpath = ut.compile_latex_text(input_text, dpath=dpath, verbose=verbose)
    ut.startfile(pdf_fpath)
    return pdf_fpath
Пример #8
0
def ensure_model(model, redownload=False):
    try:
        url = MODEL_DOMAIN + MODEL_URLS[model]
        extracted_fpath = ut.grab_file_url(url, appname='ibeis_cnn',
                                           redownload=redownload,
                                           check_hash=True)
    except KeyError as ex:
        ut.printex(ex, 'model is not uploaded', iswarning=True)
        extracted_fpath = ut.unixjoin(ut.get_app_resource_dir('ibeis_cnn'), model)
        ut.assert_exists(extracted_fpath)
    return extracted_fpath
Пример #9
0
def compute_distinctivness(vecs_list, species="zebra_plains"):
    """
    hack in distinctivness
    """
    from ibeis.algo.hots import distinctiveness_normalizer

    cachedir = ut.get_app_resource_dir("ibeis", "distinctiveness_model")
    dstcnvs_normer = distinctiveness_normalizer.DistinctivnessNormalizer(species, cachedir=cachedir)
    dstcnvs_normer.load(cachedir)
    dstncvs_list = [dstcnvs_normer.get_distinctiveness(vecs) for vecs in vecs_list]
    return dstncvs_list
Пример #10
0
def compute_distinctivness(vecs_list, species='zebra_plains'):
    """
    hack in distinctivness
    """
    from ibeis.algo.hots import distinctiveness_normalizer
    cachedir = ut.get_app_resource_dir('ibeis', 'distinctiveness_model')
    dstcnvs_normer = distinctiveness_normalizer.DistinctivnessNormalizer(
        species, cachedir=cachedir)
    dstcnvs_normer.load(cachedir)
    dstncvs_list = [
        dstcnvs_normer.get_distinctiveness(vecs) for vecs in vecs_list
    ]
    return dstncvs_list
Пример #11
0
def get_cached_vocabs():
    import parse
    # Parse some of the training data from fname
    parse_str = '{}nC={num_cent},{}_DPTS(({num_dpts},{dim}){}'
    smkdir = ut.get_app_resource_dir('smk')
    fname_list = ut.glob(smkdir, 'akmeans*')
    fpath_list = [join(smkdir, fname) for fname in fname_list]
    result_list = [parse.parse(parse_str, fpath) for fpath in fpath_list]
    nCent_list = [int(res['num_cent']) for res in result_list]
    nDpts_list = [int(res['num_dpts']) for res in result_list]
    key_list = zip(nCent_list, nDpts_list)
    fpath_sorted = ut.sortedby(fpath_list, key_list, reverse=True)
    return fpath_sorted
Пример #12
0
def get_cached_vocabs():
    import parse
    # Parse some of the training data from fname
    parse_str = '{}nC={num_cent},{}_DPTS(({num_dpts},{dim}){}'
    smkdir = ut.get_app_resource_dir('smk')
    fname_list = ut.glob(smkdir, 'akmeans*')
    fpath_list = [join(smkdir, fname) for fname in fname_list]
    result_list = [parse.parse(parse_str, fpath) for fpath in fpath_list]
    nCent_list = [int(res['num_cent']) for res in result_list]
    nDpts_list = [int(res['num_dpts']) for res in result_list]
    key_list = zip(nCent_list, nDpts_list)
    fpath_sorted = ut.sortedby(fpath_list, key_list, reverse=True)
    return fpath_sorted
Пример #13
0
def find_tomcat(verbose=ut.NOT_QUIET):
    r"""
    Searches likely places for tomcat to be installed

    Returns:
        str: tomcat_dpath

    Ignore:
        locate --regex "tomcat/webapps$"

    CommandLine:
        python -m ibeis find_tomcat

    Example:
        >>> # SCRIPT
        >>> from ibeis.control.wildbook_manager import *  # NOQA
        >>> tomcat_dpath = find_tomcat()
        >>> result = ('tomcat_dpath = %s' % (str(tomcat_dpath),))
        >>> print(result)
    """
    # Tomcat folder must be named one of these and contain specific files
    fname_list = ['Tomcat', 'tomcat']
    #required_subpaths = ['webapps', 'bin', 'bin/catalina.sh']
    required_subpaths = ['webapps']

    # Places for local install of tomcat
    priority_paths = [
        # Number one preference is the CATALINA_HOME directory
        os.environ.get('CATALINA_HOME', None),
        # We put tomcat here if we can't find it
        ut.get_app_resource_dir('ibeis', 'tomcat')
    ]
    if ut.is_developer():
        # For my machine to use local catilina
        dpath_list = []
    else:
        # Places for system install of tomcat
        if ut.WIN32:
            dpath_list = ['C:/Program Files (x86)', 'C:/Program Files']
        elif ut.DARWIN:
            dpath_list = ['/Library']  # + dpath_list
        else:
            dpath_list = ['/var/lib', '/usr/share', '/opt', '/lib']
    return_path = ut.search_candidate_paths(dpath_list,
                                            fname_list,
                                            priority_paths,
                                            required_subpaths,
                                            verbose=verbose)
    tomcat_dpath = return_path
    print('tomcat_dpath = %r ' % (tomcat_dpath, ))
    return tomcat_dpath
Пример #14
0
def render_latex_text(input_text,
                      nest_in_doc=False,
                      appname='utool',
                      verbose=None):
    """ testing function """
    import utool as ut
    if verbose is None:
        verbose = ut.VERBOSE
    dpath = ut.get_app_resource_dir(appname)
    # put a latex framgent in a full document
    print(input_text)
    pdf_fpath = ut.compile_latex_text(input_text, dpath=dpath, verbose=verbose)
    ut.startfile(pdf_fpath)
    return pdf_fpath
Пример #15
0
def find_tomcat(verbose=ut.NOT_QUIET):
    r"""
    Searches likely places for tomcat to be installed

    Returns:
        str: tomcat_dpath

    Ignore:
        locate --regex "tomcat/webapps$"

    CommandLine:
        python -m ibeis find_tomcat

    Example:
        >>> # SCRIPT
        >>> from ibeis.control.wildbook_manager import *  # NOQA
        >>> tomcat_dpath = find_tomcat()
        >>> result = ('tomcat_dpath = %s' % (str(tomcat_dpath),))
        >>> print(result)
    """
    # Tomcat folder must be named one of these and contain specific files
    fname_list = ['Tomcat', 'tomcat']
    #required_subpaths = ['webapps', 'bin', 'bin/catalina.sh']
    required_subpaths = ['webapps']

    # Places for local install of tomcat
    priority_paths = [
        # Number one preference is the CATALINA_HOME directory
        os.environ.get('CATALINA_HOME', None),
        # We put tomcat here if we can't find it
        ut.get_app_resource_dir('ibeis', 'tomcat')
    ]
    if ut.is_developer():
        # For my machine to use local catilina
        dpath_list = []
    else:
        # Places for system install of tomcat
        if ut.WIN32:
            dpath_list = ['C:/Program Files (x86)', 'C:/Program Files']
        elif ut.DARWIN:
            dpath_list = ['/Library'] + dpath_list
        else:
            dpath_list = ['/var/lib', '/usr/share', '/opt', '/lib']
    return_path = ut.search_candidate_paths(
        dpath_list, fname_list, priority_paths, required_subpaths,
        verbose=verbose)
    tomcat_dpath = return_path
    print('tomcat_dpath = %r ' % (tomcat_dpath,))
    return tomcat_dpath
Пример #16
0
def find_tomcat(verbose=ut.NOT_QUIET):
    r"""
    Returns:
        str: tomcat_dpath

    CommandLine:
        python -m ibeis.control.manual_wildbook_funcs --test-find_tomcat
        python -m ibeis --tf find_tomcat

    Example:
        >>> # SCRIPT
        >>> from ibeis.control.manual_wildbook_funcs import *  # NOQA
        >>> tomcat_dpath = find_tomcat()
        >>> result = ('tomcat_dpath = %s' % (str(tomcat_dpath),))
        >>> print(result)
    """
    import utool as ut
    import os
    fname_list = ['Tomcat', 'tomcat']
    if ALLOW_SYSTEM_TOMCAT:
        # Places for system install of tomcat
        if ut.WIN32:
            dpath_list = ['C:/Program Files (x86)', 'C:/Program Files']
        else:
            dpath_list = ['/var/lib', '/usr/share', '/opt', '/lib']
        if ut.DARWIN:
            dpath_list = ['/Library'] + dpath_list
    else:
        dpath_list = []

    priority_paths = [
        # Numberone preference is the CATALINA_HOME directory
        os.environ.get('CATALINA_HOME', None),
        # We put tomcat here if we can't find it
        ut.get_app_resource_dir('ibeis', 'tomcat')
    ]

    required_subpaths = [
        'webapps',
        'bin',
        'bin/catalina.sh',
    ]

    return_path = ut.search_candidate_paths(dpath_list, fname_list,
                                            priority_paths, required_subpaths,
                                            verbose=verbose)
    tomcat_dpath = return_path
    print('tomcat_dpath = %r ' % (tomcat_dpath,))
    return tomcat_dpath
Пример #17
0
def learn_visual_words(annots_df, taids, nWords, use_cache=True):
    """
    Computes visual words
    >>> from ibeis.model.hots.smk.smk import *  # NOQA
    >>> ibs, annots_df, taids, daids, qaids, nWords = testdata()
    >>> words = learn_visual_words(annots_df, taids, nWords)
    """
    vecs_list = annots_df['vecs'][taids].as_matrix()
    train_vecs = np.vstack(vecs_list)
    print('Training %d word vocabulary with %d annots and %d descriptors' %
          (nWords, len(taids), len(train_vecs)))
    cache_dir = utool.get_app_resource_dir('smk')
    words = clustertool.cached_akmeans(train_vecs, nWords, max_iters=100,
                                       use_cache=use_cache, cache_dir=cache_dir)
    return words
Пример #18
0
def reset_local_wildbook():
    r"""
    CommandLine:
        python -m ibeis.control.manual_wildbook_funcs --test-reset_local_wildbook

    Example:
        >>> # SCRIPT
        >>> from ibeis.control.manual_wildbook_funcs import *  # NOQA
        >>> reset_local_wildbook()
    """
    import utool as ut
    try:
        shutdown_wildbook_server()
    except ImportError:
        pass
    ut.delete(ut.unixjoin(ut.get_app_resource_dir('ibeis'), 'tomcat'))
Пример #19
0
def test_akmeans(full_test=False, plot_test=False, num_pca_dims=2, data_dim=2,
                 nump=1000):
    import numpy as np
    from vtool import clustering
    nump = nump
    dims = data_dim  # 128
    dtype = np.uint8
    print('Make %d random %d-dimensional %s points.' % (nump, dims, dtype))
    # Seed for a determenistic test
    np.random.seed(42)
    data = np.array(np.random.randint(0, 255, (nump, dims)), dtype=dtype)

    num_clusters = 10
    max_iters = 2
    ave_unchanged_thresh = 0
    ave_unchanged_iterwin = 10
    flann_params = {}

    cache_dir = utool.get_app_resource_dir('vtool', 'test_cache')
    utool.ensuredir(cache_dir)

    # Test precomputing
    dx2_label, centers = clustering.precompute_akmeans(data, num_clusters,
                                                       max_iters=max_iters,
                                                       cache_dir=cache_dir)
    # internal names
    datax2_clusterx, centroids = dx2_label, centers

    if plot_test:
        clustering.plot_clusters(data, datax2_clusterx, centroids, num_pca_dims=num_pca_dims)

    assert centers.shape == (num_clusters, dims), 'sanity check'
    assert dx2_label.shape == (nump,), 'sanity check'

    # Test regular computing
    if full_test:
        dx2_label, centers = clustering.akmeans(data, num_clusters, max_iters=max_iters)
        assert centers.shape == (num_clusters, dims), 'sanity check'
        assert dx2_label.shape == (nump,), 'sanity check'

    if False:
        # other test (development)
        import pyflann
        flann_lib_inst = pyflann.flann
        flann_class_inst = pyflann.FLANN()
        flann_class_inst.build_index(data)
    return locals()
Пример #20
0
def test_pygist():
    print('[pygist] Testing pygist')
    # Ensure you have test data
    print('[pygist] Ensuring testdata')
    datafile       = utool.grab_file_url(TEST_MODEL_URL, appname='utool')
    test_image_dir = utool.grab_zipped_url(TEST_IMAGES_URL, appname='utool')
    imgpaths       = utool.list_images(test_image_dir, fullpath=True)   # test image paths
    outdir = utool.get_app_resource_dir('pygist')  # where to put results
    # Run pygist on test images
    print('[pygist] Running tests')
    test_results = pygist.test(imgpaths, outdir=outdir, datafile=datafile)
    # Print results
    target_results = [-1, -1, 1, -1, 1, -1, -1, -1, 1, 1, -1, 1, 1]
    assert target_results == target_results, 'results do not match'
    print('test_results = %r' % (test_results,))
    print(utool.list_str(list(izip(imgpaths, test_results))))
    return locals()
Пример #21
0
def compute_forgroundness(fpath1, kpts1, species='zebra_plains'):
    """
    hack in foregroundness
    """
    import pyrf
    import vtool as vt
    from os.path import exists
    # hack for getting a model (not entirely ibeis independent)
    trees_path = ut.get_app_resource_dir('ibeis', 'detectmodels', 'rf',
                                         species)
    tree_fpath_list = ut.glob(trees_path, '*.txt')
    detector = pyrf.Random_Forest_Detector()
    # TODO; might need to downsample
    forest = detector.forest(tree_fpath_list, verbose=False)
    gpath_list = [fpath1]
    output_gpath_list = [
        gpath + '.' + species + '.probchip.png' for gpath in gpath_list
    ]
    detectkw = {
        'scale_list': [1.15, 1.0, 0.85, 0.7, 0.55, 0.4, 0.25, 0.1],
        'output_gpath_list': output_gpath_list,
        'mode': 1,  # mode one outputs probimage
    }
    results_iter = detector.detect(forest, gpath_list, **detectkw)
    results_list = list(results_iter)  # NOQA
    probchip_list = [
        vt.imread(gpath, grayscale=True) if exists(gpath) else None
        for gpath in output_gpath_list
    ]
    #vtpatch.get_warped_patches()
    fgweights_list = []
    kpts_list = [kpts1]
    for probchip, kpts in zip(probchip_list, kpts_list):
        patch_list = [
            vt.get_warped_patch(probchip, kp)[0].astype(np.float32) / 255.0
            for kp in kpts
        ]
        weight_list = [
            vt.gaussian_average_patch(patch) for patch in patch_list
        ]
        #weight_list = [patch.sum() / (patch.size) for patch in patch_list]
        weights = np.array(weight_list, dtype=np.float32)
        fgweights_list.append(weights)
    fgweights = fgweights_list[0]
    detector.free_forest(forest)
    return fgweights
Пример #22
0
def vim_grep_project(pat, hashid=None):
    import vim
    import utool as ut
    ut.ENABLE_COLORS = False
    ut.util_str.ENABLE_COLORS = False
    if hashid is None:
        hashid = ut.hashstr27(pat)
    print('Grepping for pattern = %r' % (pat,))
    msg_list = ut.grep_projects([pat], verbose=False, colored=False)
    fname = 'tmp_grep_' + hashid + '.txt'
    dpath = ut.get_app_resource_dir('utool')
    fpath = ut.unixjoin(dpath, fname)
    #pyvim_funcs.vim_fpath_cmd('split', fpath)
    vim_fpath_cmd('new', fpath)
    text = '\n'.join(msg_list)
    overwrite_text(text)
    vim.command(":exec ':w'")
Пример #23
0
def get_flann_fpath(dpts, cache_dir='default', cfgstr='', flann_params={},
                    use_params_hash=True, use_data_hash=True, appname='vtool',
                    verbose=True):
    """ returns filepath for flann index """
    if cache_dir == 'default':
        if verbose:
            print('[flann] using default cache dir')
        cache_dir = utool.get_app_resource_dir(appname)
        utool.ensuredir(cache_dir)
    flann_cfgstr = get_flann_cfgstr(dpts, flann_params, cfgstr,
                                    use_params_hash=use_params_hash,
                                    use_data_hash=use_data_hash)
    if verbose:
        print('...flann_cache cfgstr = %r: ' % flann_cfgstr)
    # Append any user labels
    flann_fname = 'flann_index' + flann_cfgstr + '.flann'
    flann_fpath = normpath(join(cache_dir, flann_fname))
    return flann_fpath
Пример #24
0
def test_pygist():
    print('[pygist] Testing pygist')
    # Ensure you have test data
    print('[pygist] Ensuring testdata')
    datafile = utool.grab_file_url(TEST_MODEL_URL, appname='utool')
    test_image_dir = utool.grab_zipped_url(TEST_IMAGES_URL, appname='utool')
    imgpaths = utool.list_images(test_image_dir,
                                 fullpath=True)  # test image paths
    outdir = utool.get_app_resource_dir('pygist')  # where to put results
    # Run pygist on test images
    print('[pygist] Running tests')
    test_results = pygist.test(imgpaths, outdir=outdir, datafile=datafile)
    # Print results
    target_results = [-1, -1, 1, -1, 1, -1, -1, -1, 1, 1, -1, 1, 1]
    assert target_results == target_results, 'results do not match'
    print('test_results = %r' % (test_results, ))
    print(utool.list_str(list(izip(imgpaths, test_results))))
    return locals()
Пример #25
0
def learn_visual_words(annots_df, taids, nWords, use_cache=True):
    """
    Computes visual words
    >>> from ibeis.model.hots.smk.smk import *  # NOQA
    >>> ibs, annots_df, taids, daids, qaids, nWords = testdata()
    >>> words = learn_visual_words(annots_df, taids, nWords)
    """
    vecs_list = annots_df['vecs'][taids].as_matrix()
    train_vecs = np.vstack(vecs_list)
    print('Training %d word vocabulary with %d annots and %d descriptors' %
          (nWords, len(taids), len(train_vecs)))
    cache_dir = utool.get_app_resource_dir('smk')
    words = clustertool.cached_akmeans(train_vecs,
                                       nWords,
                                       max_iters=100,
                                       use_cache=use_cache,
                                       cache_dir=cache_dir)
    return words
Пример #26
0
def index_data_annots(annots_df, daids, words, with_internals=True):
    """
    Create inverted index for database annotations
    >>> from ibeis.model.hots.smk.smk import *  # NOQA
    >>> ibs, annots_df, taids, daids, qaids, nWords = testdata()
    >>> words = learn_visual_words(annots_df, taids, nWords)
    >>> with_internals = True
    >>> invindex = index_data_annots(annots_df, daids, words, with_internals)
    """
    vecs_list = ensure_values(annots_df['vecs'][daids])
    flann_params = {}
    cache_dir = utool.get_app_resource_dir('smk')
    wordflann = nntool.flann_cache(words, flann_params=flann_params, cache_dir=cache_dir)
    _daids = ensure_values(daids)
    idx2_dvec, idx2_daid, idx2_dfx = nntool.invertable_stack(vecs_list, _daids)
    invindex = InvertedIndex(words, wordflann, idx2_dvec, idx2_daid, idx2_dfx, _daids)
    if with_internals:
        invindex.compute_internals()
    return invindex
Пример #27
0
def testdata_coverage(fname=None):
    """ testing function """
    import vtool as vt
    # build test data
    kpts, vecs = vt.dummy.get_testdata_kpts(fname, with_vecs=True)
    # HACK IN DISTINCTIVENESS
    if fname is not None:
        from ibeis.algo.hots import distinctiveness_normalizer
        cachedir = ut.get_app_resource_dir('ibeis', 'distinctiveness_model')
        species = 'zebra_plains'
        dstcnvs_normer = distinctiveness_normalizer.DistinctivnessNormalizer(species, cachedir=cachedir)
        dstcnvs_normer.load(cachedir)
        weights = dstcnvs_normer.get_distinctiveness(vecs)
    else:
        kpts = np.vstack((kpts, [0, 0, 1, 1, 1, 0]))
        kpts = np.vstack((kpts, [0.01, 10, 1, 1, 1, 0]))
        kpts = np.vstack((kpts, [0.94, 11.5, 1, 1, 1, 0]))
        weights = np.ones(len(kpts))
    chipsize = tuple(vt.iceil(vt.get_kpts_image_extent(kpts)).tolist())
    return kpts, chipsize, weights
Пример #28
0
def purge_local_wildbook():
    r"""
    Shuts down the server and then purges the server on disk

    CommandLine:
        python -m ibeis purge_local_wildbook
        python -m ibeis purge_local_wildbook --purge-war

    Example:
        >>> # SCRIPT
        >>> from ibeis.control.wildbook_manager import *  # NOQA
        >>> purge_local_wildbook()
    """
    try:
        shutdown_wildbook_server()
    except ImportError:
        pass
    ut.delete(ut.unixjoin(ut.get_app_resource_dir('ibeis'), 'tomcat'))
    if ut.get_argflag('--purge-war'):
        war_fpath = find_or_download_wilbook_warfile(ensure=False)
        ut.delete(war_fpath)
Пример #29
0
def testdata_coverage(fname=None):
    """ testing function """
    import vtool as vt
    # build test data
    kpts, vecs = vt.dummy.get_testdata_kpts(fname, with_vecs=True)
    # HACK IN DISTINCTIVENESS
    if fname is not None:
        from ibeis.algo.hots import distinctiveness_normalizer
        cachedir = ut.get_app_resource_dir('ibeis', 'distinctiveness_model')
        species = 'zebra_plains'
        dstcnvs_normer = distinctiveness_normalizer.DistinctivnessNormalizer(
            species, cachedir=cachedir)
        dstcnvs_normer.load(cachedir)
        weights = dstcnvs_normer.get_distinctiveness(vecs)
    else:
        kpts = np.vstack((kpts, [0, 0, 1, 1, 1, 0]))
        kpts = np.vstack((kpts, [0.01, 10, 1, 1, 1, 0]))
        kpts = np.vstack((kpts, [0.94, 11.5, 1, 1, 1, 0]))
        weights = np.ones(len(kpts))
    chipsize = tuple(vt.iceil(vt.get_kpts_image_extent(kpts)).tolist())
    return kpts, chipsize, weights
Пример #30
0
def purge_local_wildbook():
    r"""
    Shuts down the server and then purges the server on disk

    CommandLine:
        python -m ibeis purge_local_wildbook
        python -m ibeis purge_local_wildbook --purge-war

    Example:
        >>> # SCRIPT
        >>> from ibeis.control.wildbook_manager import *  # NOQA
        >>> purge_local_wildbook()
    """
    try:
        shutdown_wildbook_server()
    except ImportError:
        pass
    ut.delete(ut.unixjoin(ut.get_app_resource_dir('ibeis'), 'tomcat'))
    if ut.get_argflag('--purge-war'):
        war_fpath = find_or_download_wilbook_warfile(ensure=False)
        ut.delete(war_fpath)
Пример #31
0
def index_data_annots(annots_df, daids, words, with_internals=True):
    """
    Create inverted index for database annotations
    >>> from ibeis.model.hots.smk.smk import *  # NOQA
    >>> ibs, annots_df, taids, daids, qaids, nWords = testdata()
    >>> words = learn_visual_words(annots_df, taids, nWords)
    >>> with_internals = True
    >>> invindex = index_data_annots(annots_df, daids, words, with_internals)
    """
    vecs_list = ensure_values(annots_df['vecs'][daids])
    flann_params = {}
    cache_dir = utool.get_app_resource_dir('smk')
    wordflann = nntool.flann_cache(words,
                                   flann_params=flann_params,
                                   cache_dir=cache_dir)
    _daids = ensure_values(daids)
    idx2_dvec, idx2_daid, idx2_dfx = nntool.invertable_stack(vecs_list, _daids)
    invindex = InvertedIndex(words, wordflann, idx2_dvec, idx2_daid, idx2_dfx,
                             _daids)
    if with_internals:
        invindex.compute_internals()
    return invindex
Пример #32
0
def compute_forgroundness(fpath1, kpts1, species="zebra_plains"):
    """
    hack in foregroundness
    """
    import pyrf
    import vtool as vt
    from os.path import exists

    # hack for getting a model (not entirely ibeis independent)
    trees_path = ut.get_app_resource_dir("ibeis", "detectmodels", "rf", species)
    tree_fpath_list = ut.glob(trees_path, "*.txt")
    detector = pyrf.Random_Forest_Detector()
    # TODO; might need to downsample
    forest = detector.forest(tree_fpath_list, verbose=False)
    gpath_list = [fpath1]
    output_gpath_list = [gpath + "." + species + ".probchip.png" for gpath in gpath_list]
    detectkw = {
        "scale_list": [1.15, 1.0, 0.85, 0.7, 0.55, 0.4, 0.25, 0.1],
        "output_gpath_list": output_gpath_list,
        "mode": 1,  # mode one outputs probimage
    }
    results_iter = detector.detect(forest, gpath_list, **detectkw)
    results_list = list(results_iter)  # NOQA
    probchip_list = [vt.imread(gpath, grayscale=True) if exists(gpath) else None for gpath in output_gpath_list]
    # vtpatch.get_warped_patches()
    fgweights_list = []
    kpts_list = [kpts1]
    for probchip, kpts in zip(probchip_list, kpts_list):
        patch_list = [vt.get_warped_patch(probchip, kp)[0].astype(np.float32) / 255.0 for kp in kpts]
        weight_list = [vt.gaussian_average_patch(patch) for patch in patch_list]
        # weight_list = [patch.sum() / (patch.size) for patch in patch_list]
        weights = np.array(weight_list, dtype=np.float32)
        fgweights_list.append(weights)
    fgweights = fgweights_list[0]
    detector.free_forest(forest)
    return fgweights
Пример #33
0
def get_unofficial_package_hrefs(nocache=None):
    """
    Downloads the entire webpage of available hrefs, or returns a cached copy
    """
    if nocache is None:
        nocache = FORCE

    cachedir = ut.get_app_resource_dir('utool')
    try:
        if nocache:
            raise Exception('cachemiss')
        all_href_list = ut.load_cache(cachedir, 'win32_hrefs', 'all_href_list')
        page_str = ut.load_cache(cachedir, 'win32_hrefs', 'page_str')
        print('all_href_list cache hit')
        return all_href_list, page_str
    except Exception:
        print('all_href_list cache miss')
        pass
    # Read page html
    headers = {'User-Agent': 'Mozilla/5.0'}
    print('Sending request to %r' % (UNOFFICIAL_WEBURL, ))
    req = urllib2.Request(UNOFFICIAL_WEBURL, None, headers)
    page = urllib2.urlopen(req)
    page_str = page.read()
    encrypted_lines = list(
        filter(lambda x: x.find('onclick') > -1, page_str.split('\n')))

    print('Read %d encrypted lines ' % (len(encrypted_lines, )))

    # List of all download links, now choose wisely, because we don't want
    # to hack for evil
    #line = encrypted_lines[0]
    def parse_encrypted(line):
        """
        <script type="text/javascript">
        // <![CDATA[
        if (top.location!=location) top.location.href=location.href;
        function dc(ml,mi){
            var ot="";
            for(var j=0;j<mi.length;j++)
                ot+=String.fromCharCode(ml[mi.charCodeAt(j)-48]);
            document.write(ot);
            }
        function dl1(ml,mi){
            var ot="";
            for(var j=0;j<mi.length;j++)
                ot+=String.fromCharCode(ml[mi.charCodeAt(j)-48]);
            location.href=ot;
            }
        function dl(ml,mi){
        mi=mi.replace('&lt;','<');
        mi=mi.replace('&gt;','>');
        mi=mi.replace('&amp;','&');
        setTimeout(function(){ dl1(ml,mi) }, 1500);}
        // ]]>
        </script>
        #start = line.find('javascript:dl') + len('javascript:dl') + 2
        #end   = line.find('title') - 4
        #code = line[start: end]
        #mid = code.find(']')
        #left = code[0:mid]
        #right = code[mid + 4:]
        #ml = left
        #mi = right
        """
        _, ml, mi, _ = parse.parse('{}javascript:dl([{}], "{}"){}', line)
        mi_ = mi.replace('&lt;', '<').replace('&gt;',
                                              '>').replace('&amp;', '&')

        #ml_ = eval('[' + ml + ']')
        ml_ = eval(ml)
        href_ = ''.join([chr(ml_[ord(michar) - 48]) for michar in mi_])
        href = ''.join([UNOFFICIAL_WEBURL, href_])
        return href

    all_href_list = list(map(parse_encrypted, encrypted_lines))
    print('decrypted %d lines' % (len(all_href_list)))
    ut.save_cache(cachedir, 'win32_hrefs', 'all_href_list', all_href_list)
    ut.save_cache(cachedir, 'win32_hrefs', 'page_str', page_str)
    return all_href_list, page_str
Пример #34
0
from __future__ import absolute_import, division, print_function
import utool
from os.path import exists, join, realpath
(print, print_, printDBG, rrr, profile) = utool.inject(
    __name__, '[grabmodels]', DEBUG=False)


#DETECTMODELS_DIR = realpath(join(dirname(__file__), 'rf'))
DETECTMODELS_DIR = utool.get_app_resource_dir('ibeis', 'detectmodels')

MODEL_DIRS = {
    'rf': join(DETECTMODELS_DIR, 'rf'),
}

MODEL_URLS = {
    'rf': 'https://dl.dropboxusercontent.com/s/9814r3d2rkiq5t3/rf.zip'
}


def assert_models():
    for model_dir in MODEL_DIRS.values():
        assert exists(model_dir), ('model_dir=%r does not exist' % model_dir)


def ensure_models():
    utool.ensuredir(DETECTMODELS_DIR)
    for algo, model_dir in MODEL_DIRS.items():
        if not exists(model_dir):
            download_model(algo, model_dir)
    assert_models()
Пример #35
0
def get_unofficial_package_hrefs(nocache=None):
    """
    Downloads the entire webpage of available hrefs, or returns a cached copy
    """
    if nocache is None:
        nocache = FORCE

    cachedir = ut.get_app_resource_dir('utool')
    try:
        if nocache:
            raise Exception('cachemiss')
        all_href_list = ut.load_cache(cachedir, 'win32_hrefs', 'all_href_list')
        page_str      = ut.load_cache(cachedir, 'win32_hrefs', 'page_str')
        print('all_href_list cache hit')
        return all_href_list, page_str
    except Exception:
        print('all_href_list cache miss')
        pass
    # Read page html
    headers = { 'User-Agent' : 'Mozilla/5.0' }
    print('Sending request to %r' % (UNOFFICIAL_WEBURL,))
    req = urllib2.Request(UNOFFICIAL_WEBURL, None, headers)
    page = urllib2.urlopen(req)
    page_str = page.read()
    encrypted_lines = list(filter(lambda x: x.find('onclick') > -1, page_str.split('\n')))

    print('Read %d encrypted lines ' % (len(encrypted_lines,)))
    # List of all download links, now choose wisely, because we don't want
    # to hack for evil
    #line = encrypted_lines[0]
    def parse_encrypted(line):
        """
        <script type="text/javascript">
        // <![CDATA[
        if (top.location!=location) top.location.href=location.href;
        function dc(ml,mi){
            var ot="";
            for(var j=0;j<mi.length;j++)
                ot+=String.fromCharCode(ml[mi.charCodeAt(j)-48]);
            document.write(ot);
            }
        function dl1(ml,mi){
            var ot="";
            for(var j=0;j<mi.length;j++)
                ot+=String.fromCharCode(ml[mi.charCodeAt(j)-48]);
            location.href=ot;
            }
        function dl(ml,mi){
        mi=mi.replace('&lt;','<');
        mi=mi.replace('&gt;','>');
        mi=mi.replace('&amp;','&');
        setTimeout(function(){ dl1(ml,mi) }, 1500);}
        // ]]>
        </script>
        #start = line.find('javascript:dl') + len('javascript:dl') + 2
        #end   = line.find('title') - 4
        #code = line[start: end]
        #mid = code.find(']')
        #left = code[0:mid]
        #right = code[mid + 4:]
        #ml = left
        #mi = right
        """
        _, ml, mi, _ = parse.parse('{}javascript:dl([{}], "{}"){}', line)
        mi_ = mi.replace('&lt;', '<').replace('&gt;', '>').replace('&amp;', '&')

        #ml_ = eval('[' + ml + ']')
        ml_ = eval(ml)
        href_ = ''.join([chr(ml_[ord(michar) - 48]) for michar in mi_])
        href  = ''.join([UNOFFICIAL_WEBURL, href_])
        return href
    all_href_list = list(map(parse_encrypted, encrypted_lines))
    print('decrypted %d lines' % (len(all_href_list)))
    ut.save_cache(cachedir, 'win32_hrefs', 'all_href_list', all_href_list)
    ut.save_cache(cachedir, 'win32_hrefs', 'page_str', page_str)
    return all_href_list, page_str
Пример #36
0
 def view_app_files_dir(back):
     print('[back] view_model_dir')
     utool.view_directory(utool.get_app_resource_dir('ibeis'))
     pass
Пример #37
0
# -*- coding: utf-8 -*-
import logging
import utool as ut
import six
from os.path import exists, join, realpath

(print, rrr, profile) = ut.inject2(__name__, '[grabmodels]')
logger = logging.getLogger('wbia')

# DETECTMODELS_DIR = realpath(join(dirname(__file__), 'rf'))
DEFAULT_DETECTMODELS_DIR = ut.get_app_resource_dir('wbia', 'detectmodels')

DETECTOR_KEY_RF = 'rf'

MODEL_ALGO_SUBDIRS = {
    DETECTOR_KEY_RF: 'rf',
}

MODEL_URLS = {
    DETECTOR_KEY_RF:
    'https://wildbookiarepository.azureedge.net/models/rf.v3.zip',
}


def _expand_modeldir(modeldir='default'):
    """ returns default unless another path is specified """
    if modeldir == 'default':
        modeldir = DEFAULT_DETECTMODELS_DIR
    return modeldir

Пример #38
0
    browser = webdriver.Firefox()

    fp = browser.firefox_profile
    fp.set_preference("browser.helperApps.neverAsk.saveToDisk", "application/exe")
    fp.set_preference("browser.helperApps.neverAsk.saveToDisk", "application/exe")
    fp.set_preference("browser.download.manager.showWhenStarting", False)
    fp.set_preference("browser.helperApps.neverAsk.saveToDisk", "application/binary")
    fp.set_preference('browser.download.folderList', 2)
    fp.set_preference('browser.download.dir', dldir)
else:
    #install32 = normpath(r'C:\Program Files (x86)')
    #chromepath = normpath(r'\Google\Chrome\Application')
    #chromeexe = join(install32, chromepath, 'chrome.exe')
    import utool
    DRIVER_URL = 'http://chromedriver.storage.googleapis.com/2.9/chromedriver_win32.zip'
    chromedriverexe = utool.get_app_resource_dir('utool') + '/chromedriver.exe'
    if not utool.checkpath(chromedriverexe):
        utool.grab_zipped_url(DRIVER_URL, appname='utool')
    print(type(chromedriverexe))
    print(chromedriverexe)
    #chromedriverexe = normpath(r'C:\Users\joncrall\local\PATH\chromedriver.exe')
    browser = webdriver.Chrome(executable_path=chromedriverexe)

weburl = 'http://www.lfd.uci.edu/~gohlke/pythonlibs'
browser.get(weburl)

source = browser.page_source


def clean_package_names(pkgname_list):
    clean_list = []
Пример #39
0
 def delete_detection_models(back):
     print('[back] delete_detection_models')
     utool.delete(utool.get_app_resource_dir('ibeis', 'detectmodels'))
     pass
Пример #40
0
 def delete_global_prefs(back):
     print('[back] delete_global_prefs')
     # TODO: Add are you sure dialog?
     utool.delete(utool.get_app_resource_dir('ibeis', 'global_cache'))
     pass