Example #1
0
def grab_selenium_chromedriver(redownload=False):
    r"""
    Automatically download selenium chrome driver if needed

    CommandLine:
        python -m utool.util_grabdata --test-grab_selenium_chromedriver:1

    Example:
        >>> # DISABLE_DOCTEST
        >>> ut.grab_selenium_chromedriver()
        >>> import selenium.webdriver
        >>> driver = selenium.webdriver.Chrome()
        >>> driver.get('http://www.google.com')
        >>> search_field = driver.find_element_by_name('q')
        >>> search_field.send_keys('puppies')
        >>> search_field.send_keys(selenium.webdriver.common.keys.Keys.ENTER)

    Example1:
        >>> # DISABLE_DOCTEST
        >>> import selenium.webdriver
        >>> driver = selenium.webdriver.Firefox()
        >>> driver.get('http://www.google.com')
        >>> search_field = driver.find_element_by_name('q')
        >>> search_field.send_keys('puppies')
        >>> search_field.send_keys(selenium.webdriver.common.keys.Keys.ENTER)
    """
    import utool as ut
    import os
    import stat
    # TODO: use a better download dir (but it must be in the PATh or selenium freaks out)
    chromedriver_dpath = ut.ensuredir(ut.truepath('~/bin'))
    chromedriver_fpath = join(chromedriver_dpath, 'chromedriver')
    if not ut.checkpath(chromedriver_fpath) or redownload:
        assert chromedriver_dpath in os.environ['PATH'].split(os.pathsep)
        # TODO: make this work for windows as well
        if ut.LINUX and ut.util_cplat.is64bit_python():
            import requests
            rsp = requests.get('http://chromedriver.storage.googleapis.com/LATEST_RELEASE')
            assert rsp.status_code == 200
            url = 'http://chromedriver.storage.googleapis.com/' + rsp.text.strip() + '/chromedriver_linux64.zip'
            ut.grab_zipped_url(url, download_dir=chromedriver_dpath, redownload=True)
        else:
            raise AssertionError('unsupported chrome driver getter script')
        if not ut.WIN32:
            st = os.stat(chromedriver_fpath)
            os.chmod(chromedriver_fpath, st.st_mode | stat.S_IEXEC)
    ut.assert_exists(chromedriver_fpath)
    os.environ['webdriver.chrome.driver'] = chromedriver_fpath
    return chromedriver_fpath
Example #2
0
def ensure_ctags_win32():
    import utool as ut
    from os.path import join
    dpath = ut.grab_zipped_url('http://prdownloads.sourceforge.net/ctags/ctags58.zip')
    """
    TODO: Download the zipfile, then unzip and take ONLY the
    file ctags58/ctags58/ctags.exe and move it somewhere in the path
    the best place might be C;\ProgFiles\Git\mingw64\bin

    ALSO:
    make a win setup file

    Downloads fonts from https://www.dropbox.com/sh/49h1ht1e2t7dlbj/AACzVIDrfn1GkImP5l_C3Vtia?dl=1
    """

    ctags_fname = 'ctags.exe'
    ctags_src = join(dpath, ctags_fname)
    def find_mingw_bin():
        pathdirs = ut.get_path_dirs()
        copydir = None
        # hueristic for finding mingw bin
        for pathdir in pathdirs:
            pathdir_ = pathdir.lower()
            ismingwbin = (pathdir_.find('mingw') > -1 and pathdir_.endswith('bin'))
            if ismingwbin:
                issmaller = (copydir is None or len(pathdir) < len(copydir))
                if issmaller:
                    copydir = pathdir
        return copydir
    copydir = find_mingw_bin()
    ctags_dst = join(copydir, ctags_fname)
    ut.copy(ctags_src, ctags_dst, overwrite=False)
Example #3
0
def ensure_db_from_url(zipped_db_url):
    """ SeeAlso ibeis.init.sysres """
    from ibeis import sysres
    workdir = sysres.get_workdir()
    dbdir = ut.grab_zipped_url(zipped_url=zipped_db_url, ensure=True, download_dir=workdir)
    print('have %s=%r' % (zipped_db_url, dbdir,))
    return dbdir
Example #4
0
def init_console2():
    assert ut.WIN32, 'win32 only script'
    url = 'http://downloads.sourceforge.net/project/console/console-devel/2.00/Console-2.00b148-Beta_32bit.zip'
    unzipped_fpath = ut.grab_zipped_url(url)
    # FIXME: bugged
    unzipped_fpath2 = join(dirname(unzipped_fpath), 'Console2')
    win32_bin = ut.truepath('~/local/PATH')
    ut.copy(ut.ls(unzipped_fpath2), win32_bin)
Example #5
0
def ensure_db_from_url(zipped_db_url):
    """ SeeAlso ibeis.init.sysres """
    from ibeis import sysres
    dbname = 'testdb2'
    workdir = sysres.get_workdir()
    zipped_db_url = 'https://dl.dropboxusercontent.com/s/or2ngpaodrb42gd/testdb2.tar.gz'
    dbdir = ut.grab_zipped_url(zipped_db_url, ensure=True, download_dir=workdir)
    print('have %s=%r' % (dbname, dbdir,))
Example #6
0
def ensure_pz_mtest():
    """
    Ensures that you have the PZ_MTEST dataset

    CommandLine:
        python -m ibeis.init.sysres --exec-ensure_pz_mtest
        python -m ibeis --tf ensure_pz_mtest

    Example:
        >>> # SCRIPT
        >>> from ibeis.init.sysres import *  # NOQA
        >>> ensure_pz_mtest()
    """
    print('ensure_pz_mtest')
    from ibeis import sysres
    workdir = sysres.get_workdir()
    mtest_zipped_url = const.ZIPPED_URLS.PZ_MTEST
    mtest_dir = ut.grab_zipped_url(mtest_zipped_url, ensure=True, download_dir=workdir)
    print('have mtest_dir=%r' % (mtest_dir,))
    # update the the newest database version
    import ibeis
    ibs = ibeis.opendb('PZ_MTEST')
    print('cleaning up old database and ensureing everything is properly computed')
    ibs.db.vacuum()
    valid_aids = ibs.get_valid_aids()
    assert len(valid_aids) == 119
    ibs.update_annot_semantic_uuids(valid_aids)
    if ut.VERYVERBOSE:
        ibs.print_annotation_table()
    nid = ibs.get_name_rowids_from_text('', ensure=False)
    if nid is not None:
        ibs.set_name_texts([nid], ['lostname'])

    # Remove old imagesets and update to new special ones
    all_imgset_ids = ibs.get_valid_imgsetids()
    special_imgset_ids = ibs.get_special_imgsetids()
    other_imgset_ids = ut.setdiff(all_imgset_ids, special_imgset_ids)
    ibs.delete_imagesets(other_imgset_ids)
    ibs.set_exemplars_from_quality_and_viewpoint()
    ibs.update_all_image_special_imageset()

    occurrence_gids = [2, 9, 12, 16, 25, 26, 29, 30, 32, 33, 35, 46, 47, 52,
                       57, 61, 66, 70, 71, 73, 74, 76, 77, 78, 79, 87, 88, 90,
                       96, 97, 103, 106, 108, 110, 112, 113]

    other_gids = ut.setdiff(ibs.get_valid_gids(), occurrence_gids)
    other_gids1 = other_gids[0::2]
    other_gids2 = other_gids[1::2]
    ibs.set_image_imagesettext(occurrence_gids, ['Occurrence 1'] * len(occurrence_gids))
    ibs.set_image_imagesettext(other_gids1, ['Occurrence 2'] * len(other_gids1))
    ibs.set_image_imagesettext(other_gids2, ['Occurrence 3'] * len(other_gids2))

    # hack in some tags
    print('Hacking in some tags')
    foal_aids = [4, 8, 15, 21, 28, 34, 38, 41, 45, 49, 51, 56, 60, 66, 69, 74, 80, 83, 91, 97, 103, 107, 109, 119]
    mother_aids = [9, 16, 35, 42, 52, 57, 61, 67, 75, 84, 98, 104, 108, 114]
    ibs.append_annot_case_tags(foal_aids, ['foal'] * len(foal_aids))
    ibs.append_annot_case_tags(mother_aids, ['mother'] * len(mother_aids))
Example #7
0
def grab_mnist1():
    # This is the same mnist data used in the lasange script
    train_imgs_fpath = ut.grab_zipped_url(
        'http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz')
    train_lbls_fpath = ut.grab_zipped_url(
        'http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz')
    test_imgs_fpath = ut.grab_zipped_url(
        'http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz')
    test_lbls_fpath = ut.grab_zipped_url(
        'http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz')

    train_images, train_labels = open_mnist_files(train_imgs_fpath,
                                                  train_lbls_fpath)
    test_images, test_labels = open_mnist_files(test_imgs_fpath,
                                                test_lbls_fpath)
    data = np.vstack((train_images, test_images))
    labels = np.append(train_labels, test_labels)
    metadata = None
    return data, labels, metadata
Example #8
0
def ensure_db_from_url(zipped_db_url):
    """ SeeAlso wbia.init.sysres """
    from wbia import sysres

    workdir = sysres.get_workdir()
    dbdir = ut.grab_zipped_url(
        zipped_url=zipped_db_url, ensure=True, download_dir=workdir
    )
    logger.info('have %s=%r' % (zipped_db_url, dbdir,))
    return dbdir
Example #9
0
def get_testdata_dir(ensure=True, key='testdb1'):
    """
    Gets test img directory and downloads it if it doesn't exist
    """
    testdata_map = {
        'testdb1': 'https://dl.dropboxusercontent.com/s/of2s82ed4xf86m6/testdata.zip',
    }
    zipped_testdata_url = testdata_map[key]
    testdata_dir = ut.grab_zipped_url(zipped_testdata_url, ensure=ensure)
    return testdata_dir
Example #10
0
def get_testdata_dir(ensure=True, key='testdb1'):
    """
    Gets test img directory and downloads it if it doesn't exist
    """
    testdata_map = {
        'testdb1': 'https://cthulhu.dyn.wildme.io/public/data/testdata.zip'
    }
    zipped_testdata_url = testdata_map[key]
    testdata_dir = utool.grab_zipped_url(zipped_testdata_url, ensure=ensure)
    return testdata_dir
Example #11
0
def get_testdata_dir(ensure=True, key='testdb1'):
    """
    Gets test img directory and downloads it if it doesn't exist
    """
    testdata_map = {
        'testdb1': 'https://lev.cs.rpi.edu/public/data/testdata.zip'
    }
    zipped_testdata_url = testdata_map[key]
    testdata_dir = utool.grab_zipped_url(zipped_testdata_url, ensure=ensure)
    return testdata_dir
Example #12
0
def get_testdata_dir(ensure=True, key='testdb1'):
    """
    Gets test img directory and downloads it if it doesn't exist
    """
    testdata_map = {
        'testdb1': 'https://lev.cs.rpi.edu/public/data/testdata.zip',
    }
    zipped_testdata_url = testdata_map[key]
    testdata_dir = ut.grab_zipped_url(zipped_testdata_url, ensure=ensure)
    return testdata_dir
Example #13
0
    def __init__(bing,
                 default=True,
                 verbose=VERBOSE_BING,
                 quiet=QUIET_BING,
                 **kwargs):
        '''
            Create the C object for the PyBING detector.

            Args:
                verbose (bool, optional): verbose flag; defaults to --verbbing flag

            Kwargs:
                base (int)
                W (int)
                NNS (int)

            Returns:
                detector (object): the BING Detector object
        '''
        bing.verbose = verbose
        bing.quiet = quiet

        # Default values
        params = odict([
            ('base', 2.0),
            ('W', 8),
            ('NNS', 2),
            ('verbose', verbose),
            ('quiet', quiet),
        ])
        params.update(kwargs)
        params_list = list(params.values())

        if bing.verbose and not bing.quiet:
            """ debug with dmesg | tail -n 200 """
            print('[pybing.py] Start Create New BING Object')
            ut.print_dict(params)
            print('[pybing.py] params_list = %r' % (params_list, ))
            print('[pybing.py] type of params = %r' %
                  (list(map(type, params_list)), ))
            pass

        bing.detector_c_obj = BING_CLIB.init(*params_list)

        if bing.verbose and not bing.quiet:
            print('[pybing.py] Finished Create New BING Object')

        if default:
            model_path = ut.grab_zipped_url(VOC2007_MODEL_URL,
                                            appname='pybing')
            model_path = join(model_path, 'model')
            print('Loading models: %r' % (model_path, ))
            bing.model(model_path)
Example #14
0
def ensure_nongit_plugins():
    import utool as ut
    import REPOS1

    BUNDLE_DPATH = util_git1.BUNDLE_DPATH
    for url in REPOS1.VIM_NONGIT_PLUGINS:
        fpath = ut.grab_zipped_url(url, download_dir=BUNDLE_DPATH)
        if fpath.endswith(".vba"):
            cmd_ = "vim " + fpath + ' -c "so % | q"'
            ut.cmd(cmd_)
        print("url = %r" % (url,))
        pass
Example #15
0
def build_command():
    """ Build command run by utool.util_setup """
    if util_cplat.WIN32:
        fftw_win32_url = 'ftp://ftp.fftw.org/pub/fftw/fftw-3.3.4-dll32.zip'
        #fftw_win64_url = 'ftp://ftp.fftw.org/pub/fftw/fftw-3.3.4-dll64.zip'
        # Ensure you have everything to build on windows
        setup_dir = utool.dirname(__file__)
        fftw_dir = utool.grab_zipped_url(fftw_win32_url, download_dir=setup_dir)
        print(fftw_dir)
        util_cplat.shell('mingw_build.bat')
    else:
        util_cplat.shell('./unix_build.sh')
Example #16
0
def ensure_nongit_plugins():
    try:
        import utool as ut
        import REPOS1
        BUNDLE_DPATH = util_git1.BUNDLE_DPATH
        for url in REPOS1.VIM_NONGIT_PLUGINS:
            fpath = ut.grab_zipped_url(url, download_dir=BUNDLE_DPATH)
            if fpath.endswith('.vba'):
                cmd_ = 'vim ' + fpath + ' -c "so % | q"'
                ut.cmd(cmd_)
            print('url = %r' % (url,))
    except ImportError:
        print('Cant do nongit plugins without utool')
Example #17
0
    def __init__(bing, default=True, verbose=VERBOSE_BING, quiet=QUIET_BING, **kwargs):
        '''
            Create the C object for the PyBING detector.

            Args:
                verbose (bool, optional): verbose flag; defaults to --verbbing flag

            Kwargs:
                base (int)
                W (int)
                NNS (int)

            Returns:
                detector (object): the BING Detector object
        '''
        bing.verbose = verbose
        bing.quiet = quiet

        # Default values
        params = odict([
            ('base',       2.0),
            ('W',          8),
            ('NNS',        2),
            ('verbose',    verbose),
            ('quiet',      quiet),
        ])
        params.update(kwargs)
        params_list = list(params.values())

        if bing.verbose and not bing.quiet:
            """ debug with dmesg | tail -n 200 """
            print('[pybing.py] Start Create New BING Object')
            ut.print_dict(params)
            print('[pybing.py] params_list = %r' % (params_list,))
            print('[pybing.py] type of params = %r' % (list(map(type, params_list)),))
            pass

        bing.detector_c_obj = BING_CLIB.init(*params_list)

        if bing.verbose and not bing.quiet:
            print('[pybing.py] Finished Create New BING Object')

        if default:
            model_path = ut.grab_zipped_url(VOC2007_MODEL_URL, appname='pybing')
            model_path = join(model_path, 'model')
            print('Loading models: %r' % (model_path, ))
            bing.model(model_path)
Example #18
0
def test_pygist():
    print('[pygist] Testing pygist')
    # Ensure you have test data
    print('[pygist] Ensuring testdata')
    datafile       = utool.grab_file_url(TEST_MODEL_URL, appname='utool')
    test_image_dir = utool.grab_zipped_url(TEST_IMAGES_URL, appname='utool')
    imgpaths       = utool.list_images(test_image_dir, fullpath=True)   # test image paths
    outdir = utool.get_app_resource_dir('pygist')  # where to put results
    # Run pygist on test images
    print('[pygist] Running tests')
    test_results = pygist.test(imgpaths, outdir=outdir, datafile=datafile)
    # Print results
    target_results = [-1, -1, 1, -1, 1, -1, -1, -1, 1, 1, -1, 1, 1]
    assert target_results == target_results, 'results do not match'
    print('test_results = %r' % (test_results,))
    print(utool.list_str(list(izip(imgpaths, test_results))))
    return locals()
Example #19
0
def test_pygist():
    print('[pygist] Testing pygist')
    # Ensure you have test data
    print('[pygist] Ensuring testdata')
    datafile = utool.grab_file_url(TEST_MODEL_URL, appname='utool')
    test_image_dir = utool.grab_zipped_url(TEST_IMAGES_URL, appname='utool')
    imgpaths = utool.list_images(test_image_dir,
                                 fullpath=True)  # test image paths
    outdir = utool.get_app_resource_dir('pygist')  # where to put results
    # Run pygist on test images
    print('[pygist] Running tests')
    test_results = pygist.test(imgpaths, outdir=outdir, datafile=datafile)
    # Print results
    target_results = [-1, -1, 1, -1, 1, -1, -1, -1, 1, 1, -1, 1, 1]
    assert target_results == target_results, 'results do not match'
    print('test_results = %r' % (test_results, ))
    print(utool.list_str(list(izip(imgpaths, test_results))))
    return locals()
Example #20
0
def ensure_db_from_url(zipped_db_url):
    """ SeeAlso wbia.init.sysres """
    from wbia import sysres

    workdir = sysres.get_workdir()
    dbdir = ut.grab_zipped_url(
        zipped_url=zipped_db_url, ensure=True, download_dir=workdir
    )

    # Determine if the implementation is using a URI for database connection.
    # This is confusing, sorry. If the URI is set we are using a non-sqlite
    # database connection. As such, we most translate the sqlite db.
    uri = get_wbia_db_uri(dbdir)
    if uri:
        logger.info(f"Copying '{dbdir}' databases to the database at: {uri}")
        for _, future, _, _ in copy_sqlite_to_postgres(Path(dbdir), uri):
            future.result()  # will raise if there is a problem

    logger.info('have %s=%r' % (zipped_db_url, dbdir))
    return dbdir
Example #21
0
def ensure_pz_mtest():
    """
    Ensures that you have the PZ_MTEST dataset

    CommandLine:
        python -m ibeis.init.sysres --exec-ensure_pz_mtest
        python -m ibeis --tf ensure_pz_mtest

    Example:
        >>> # SCRIPT
        >>> from ibeis.init.sysres import *  # NOQA
        >>> ensure_pz_mtest()
    """
    print('ensure_pz_mtest')
    from ibeis import sysres
    workdir = sysres.get_workdir()
    mtest_zipped_url = const.ZIPPED_URLS.PZ_MTEST
    mtest_dir = ut.grab_zipped_url(mtest_zipped_url, ensure=True, download_dir=workdir)
    print('have mtest_dir=%r' % (mtest_dir,))
    # update the the newest database version
    import ibeis
    ibs = ibeis.opendb('PZ_MTEST')
    print('cleaning up old database and ensureing everything is properly computed')
    ibs.db.vacuum()
    valid_aids = ibs.get_valid_aids()
    assert len(valid_aids) == 119
    ibs.update_annot_semantic_uuids(valid_aids)
    if ut.VERYVERBOSE:
        ibs.print_annotation_table()
    nid = ibs.get_name_rowids_from_text('', ensure=False)
    if nid is not None:
        ibs.set_name_texts([nid], ['lostname'])

    # hack in some tags
    print('Hacking in some tags')
    foal_aids = [4, 8, 15, 21, 28, 34, 38, 41, 45, 49, 51, 56, 60, 66, 69, 74, 80, 83, 91, 97, 103, 107, 109, 119]
    mother_aids = [9, 16, 35, 42, 52, 57, 61, 67, 75, 84, 98, 104, 108, 114]
    ibs.append_annot_case_tags(foal_aids, ['foal'] * len(foal_aids))
    ibs.append_annot_case_tags(mother_aids, ['mother'] * len(mother_aids))
Example #22
0
def ensure_ctags_win32():
    import utool as ut
    from os.path import join

    dpath = ut.grab_zipped_url("http://prdownloads.sourceforge.net/ctags/ctags58.zip")
    ctags_fname = "ctags.exe"
    ctags_src = join(dpath, ctags_fname)

    def find_mingw_bin():
        pathdirs = ut.get_path_dirs()
        copydir = None
        # hueristic for finding mingw bin
        for pathdir in pathdirs:
            pathdir_ = pathdir.lower()
            ismingwbin = pathdir_.find("mingw") > -1 and pathdir_.endswith("bin")
            if ismingwbin:
                issmaller = copydir is None or len(pathdir) < len(copydir)
                if issmaller:
                    copydir = pathdir
        return copydir

    copydir = find_mingw_bin()
    ctags_dst = join(copydir, ctags_fname)
    ut.copy(ctags_src, ctags_dst, overwrite=False)
def download_baseline_distinctiveness_normalizer(cachedir, species):
    zipped_url = BASELINE_DISTINCTIVNESS_URLS[species]
    ut.grab_zipped_url(zipped_url, ensure=True, download_dir=cachedir)
Example #24
0
#!/usr/bin/env python
from six.moves import input
import sys
import random
import numpy
from yael import ynumpy

# ensure the test data
import utool
utool.ensuredir('selective_match_kernel_v289')
url = 'https://gforge.inria.fr/frs/download.php/file/33650/yael_mini_demo_v0.2.tgz'
utool.grab_zipped_url(url, download_dir='.')

# change directory to test data
import os
os.chdir('yael_mini_demo_v0.2')

# prepare for plotting
try:
    from plottool.__MPL_INIT__ import init_matplotlib
    init_matplotlib()
    #import matplotlib as mpl
    #mpl.use('Qt4Agg')
    import matplotlib.pyplot as plt
    # Python Imaging Library (used to load images)
    from PIL import Image
    show = True
    plt.ion()
except ImportError as RuntimeError:
    print("Cannot import matplotlib or PIL. Not showing graphics!")
    show = False
Example #25
0
#!/usr/bin/env python2.7
# -*- coding: utf-8 -*-
"""
downloads an archive file and then unzips it into a directory with the same name
as the archive (sans the .zip or .tar.gz)

grabzippedurl.py "https://download.zotero.org/standalone/4.0.26.3/Zotero-4.0.26.3_linux-x86_64.tar.bz2"

"""
from __future__ import absolute_import, division, print_function
import sys
import utool

if __name__ == '__main__':
    url = sys.argv[1]
    download_dir = '.'
    utool.grab_zipped_url(url,
                          download_dir=download_dir,
                          cleanup=False,
                          spoof=True)
Example #26
0
    # Create a directory for the demo database
    workdir = ibeis.get_workdir()
    demodir = join(workdir, 'demo')

    if utool.get_arg('--reset'):
        # Remove the previous demo if it exists
        utool.delete(demodir)

    # Start a new database there
    main_locals = ibeis.main(dbdir=demodir)

    # Get a handle to the GUIBackend Control
    back = main_locals['back']

    # Get a directory with some images in it

    testurl = 'https://www.dropbox.com/s/s4gkjyxjgghr18c/testdata_detect.zip'
    testdir = utool.grab_zipped_url(testurl)

    execstr = ibeis.main_loop(main_locals)
    exec(execstr)

    script = """
    back.import_images_from_dir(testdir)
    back.detect_grevys_quick()
    back.compute_encounters()
    """

    #execstr = ibeis.main_loop(main_locals)
    #exec(execstr)
Example #27
0
def ensure_db_from_url(zipped_db_url):
    """ SeeAlso ibeis.init.sysres """
    from ibeis import sysres
    workdir = sysres.get_workdir()
    dbdir = ut.grab_zipped_url(zipped_url=zipped_db_url, ensure=True, download_dir=workdir)
    print('have %s=%r' % (zipped_db_url, dbdir,))
Example #28
0
def grab_liberty_siam_dataset(pairs=250000):
    """
    References:
        http://www.cs.ubc.ca/~mbrown/patchdata/patchdata.html
        https://github.com/osdf/datasets/blob/master/patchdata/dataset.py

    Notes:
        "info.txt" contains the match information Each row of info.txt
        corresponds corresponds to a separate patch, with the patches ordered
        from left to right and top to bottom in each bitmap image.

        3 types of metadata files

        info.txt - contains patch ids that correspond with the order of patches
          in the bmp images
          In the format:
              pointid, unused

        interest.txt -
            interest points corresponding to patches with patchids
            has same number of rows as info.txt
            In the format:
                reference image id, x, y, orientation, scale (in log2 units)

        m50_<d>_<d>_0.txt -
             matches files
             patchID1  3DpointID1  unused1  patchID2  3DpointID2  unused2

    CommandLine:
        python -m ibeis_cnn.ingest_data --test-grab_liberty_siam_dataset --show

    Example:
        >>> # ENABLE_DOCTEST
        >>> from ibeis_cnn.ingest_data import *  # NOQA
        >>> pairs = 500
        >>> dataset = grab_liberty_siam_dataset(pairs)
        >>> ut.quit_if_noshow()
        >>> from ibeis_cnn import draw_results
        >>> #ibsplugin.rrr()
        >>> flat_metadata = {}
        >>> data, labels = dataset.subset('full')
        >>> ut.quit_if_noshow()
        >>> warped_patch1_list = data[::2]
        >>> warped_patch2_list = data[1::2]
        >>> dataset.interact()
        >>> ut.show_if_requested()
    """
    datakw = {
        'detector': 'dog',
        'pairs': pairs,
    }

    assert datakw['detector'] in ['dog', 'harris']
    assert pairs in [500, 50000, 100000, 250000]

    liberty_urls = {
        'dog': 'http://www.cs.ubc.ca/~mbrown/patchdata/liberty.zip',
        'harris': 'http://www.cs.ubc.ca/~mbrown/patchdata/liberty_harris.zip',
    }
    url = liberty_urls[datakw['detector']]
    ds_path = ut.grab_zipped_url(url)

    ds_name = splitext(basename(ds_path))[0]
    alias_key = 'liberty;' + ut.dict_str(datakw, nl=False, explicit=True)
    cfgstr = ','.join([str(val) for key, val in ut.iteritems_sorted(datakw)])

    # TODO: allow a move of the base data prefix

    training_dpath = ut.ensure_app_resource_dir('ibeis_cnn', 'training',
                                                ds_name)
    if ut.get_argflag('--vtd'):
        ut.vd(training_dpath)
    ut.ensuredir(training_dpath)

    data_fpath = join(training_dpath, 'liberty_data_' + cfgstr + '.pkl')
    labels_fpath = join(training_dpath, 'liberty_labels_' + cfgstr + '.pkl')

    if not ut.checkpath(data_fpath, verbose=True):
        data, labels = ingest_helpers.extract_liberty_style_patches(
            ds_path, pairs)
        ut.save_data(data_fpath, data)
        ut.save_data(labels_fpath, labels)

    # hack for caching num_labels
    labels = ut.load_data(labels_fpath)
    num_labels = len(labels)

    dataset = DataSet.new_training_set(
        alias_key=alias_key,
        data_fpath=data_fpath,
        labels_fpath=labels_fpath,
        metadata_fpath=None,
        training_dpath=training_dpath,
        data_shape=(64, 64, 1),
        data_per_label=2,
        output_dims=1,
        num_labels=num_labels,
    )
    return dataset
# DUPLICATE CODE, DELETE
from __future__ import absolute_import, division, print_function
from plottool import interact_multi_image
from plottool import draw_func2 as df2
import utool
#import ibeis


def test_interact_multimage(imgpaths):
    print("len: ", len(imgpaths))
    bboxes_list = [[]] * len(imgpaths)

    bboxes_list[0] = [(-200, -100, 400, 400)]
    print(bboxes_list)
    iteract_obj = interact_multi_image.MultiImageInteraction(imgpaths, nPerPage=4, bboxes_list=bboxes_list)
# def test_interact_multimage(imgpaths, gid_list=None, aids_list=None, bboxes_list=None):
#     img_list = imread_many(imgpaths)
#     iteract_obj = interact_multi_image.MultiImageInteraction(img_list +
#                                                              img_list,
#                                                              gid_list, aids_list, bboxes_list,
#                                                              nPerPage=6)
    return iteract_obj

if __name__ == '__main__':
    TEST_IMAGES_URL = 'https://lev.cs.rpi.edu/public/data/testdata.zip'
    test_image_dir = utool.grab_zipped_url(TEST_IMAGES_URL, appname='utool')
    imgpaths       = utool.list_images(test_image_dir, fullpath=True, recursive=False)   # test image paths
    iteract_obj = test_interact_multimage(imgpaths)
    exec(df2.present())
Example #30
0
page = urllib2.urlopen(req)
page_str = page.read()
page_str = ut.ensure_unicode(page_str)

next = False
lines = page_str.split('\n')
for index, x in enumerate(lines):
    if next:
        print(x)
        import parse
        url_suffix = parse.parse('{foo}href="{href}"{other}', x)['href']
        url = r'https://cmake.org' + url_suffix
        break
    if 'Linux x86_64' in x:
        next = True
url = url.replace('.sh', '.tar.gz')
cmake_unzipped_fpath = ut.grab_zipped_url(url)
install_prefix = ut.unixpath('~')
for dname in ['bin', 'doc', 'man', 'share']:
    install_dst = join(install_prefix, dname)
    install_src = join(cmake_unzipped_fpath, dname)
    # FIXME: this broke
    #ut.util_path.copy(install_src, install_dst)
    # HACK AROUND IT
    from os.path import dirname
    cmd = str('cp -r "' + install_src + '" "' + dirname(install_dst) + '"')
    print(cmd)
    ut.cmd(cmd)
    #os.system(cmd)
print(cmake_unzipped_fpath)
Example #31
0
#!/usr/bin/env python
from six.moves import input
import sys
import random
import numpy
from yael import ynumpy

# ensure the test data
import utool
utool.ensuredir('selective_match_kernel_v289')
url = 'https://gforge.inria.fr/frs/download.php/file/33650/yael_mini_demo_v0.2.tgz'
utool.grab_zipped_url(url, download_dir='.')

# change directory to test data
import os
os.chdir('yael_mini_demo_v0.2')

# prepare for plotting
try:
    from plottool.__MPL_INIT__ import init_matplotlib
    init_matplotlib()
    #import matplotlib as mpl
    #mpl.use('Qt4Agg')
    import matplotlib.pyplot as plt
    # Python Imaging Library (used to load images)
    from PIL import Image
    show = True
    plt.ion()
except ImportError as RuntimeError:
    print("Cannot import matplotlib or PIL. Not showing graphics!")
    show = False
Example #32
0
    fp = browser.firefox_profile
    fp.set_preference("browser.helperApps.neverAsk.saveToDisk", "application/exe")
    fp.set_preference("browser.helperApps.neverAsk.saveToDisk", "application/exe")
    fp.set_preference("browser.download.manager.showWhenStarting", False)
    fp.set_preference("browser.helperApps.neverAsk.saveToDisk", "application/binary")
    fp.set_preference('browser.download.folderList', 2)
    fp.set_preference('browser.download.dir', dldir)
else:
    #install32 = normpath(r'C:\Program Files (x86)')
    #chromepath = normpath(r'\Google\Chrome\Application')
    #chromeexe = join(install32, chromepath, 'chrome.exe')
    import utool
    DRIVER_URL = 'http://chromedriver.storage.googleapis.com/2.9/chromedriver_win32.zip'
    chromedriverexe = utool.get_app_resource_dir('utool') + '/chromedriver.exe'
    if not utool.checkpath(chromedriverexe):
        utool.grab_zipped_url(DRIVER_URL, appname='utool')
    print(type(chromedriverexe))
    print(chromedriverexe)
    #chromedriverexe = normpath(r'C:\Users\joncrall\local\PATH\chromedriver.exe')
    browser = webdriver.Chrome(executable_path=chromedriverexe)

weburl = 'http://www.lfd.uci.edu/~gohlke/pythonlibs'
browser.get(weburl)

source = browser.page_source


def clean_package_names(pkgname_list):
    clean_list = []
    unsuported_list = []
    for pkgname in pkgname_list:
def download_baseline_distinctiveness_normalizer(cachedir, species):
    zipped_url = BASELINE_DISTINCTIVNESS_URLS[species]
    ut.grab_zipped_url(zipped_url, ensure=True, download_dir=cachedir)
Example #34
0
def ensure_pz_mtest():
    """
    Ensures that you have the PZ_MTEST dataset

    CommandLine:
        python -m ibeis.init.sysres --exec-ensure_pz_mtest
        python -m ibeis --tf ensure_pz_mtest

    Example:
        >>> # SCRIPT
        >>> from ibeis.init.sysres import *  # NOQA
        >>> ensure_pz_mtest()
    """
    print('ensure_pz_mtest')
    from ibeis import sysres
    workdir = sysres.get_workdir()
    mtest_zipped_url = const.ZIPPED_URLS.PZ_MTEST
    mtest_dir = ut.grab_zipped_url(mtest_zipped_url,
                                   ensure=True,
                                   download_dir=workdir)
    print('have mtest_dir=%r' % (mtest_dir, ))
    # update the the newest database version
    import ibeis
    ibs = ibeis.opendb('PZ_MTEST')
    print(
        'cleaning up old database and ensureing everything is properly computed'
    )
    ibs.db.vacuum()
    valid_aids = ibs.get_valid_aids()
    assert len(valid_aids) == 119
    ibs.update_annot_semantic_uuids(valid_aids)
    if ut.VERYVERBOSE:
        ibs.print_annotation_table()
    nid = ibs.get_name_rowids_from_text('', ensure=False)
    if nid is not None:
        ibs.set_name_texts([nid], ['lostname'])

    # Remove old imagesets and update to new special ones
    all_imgset_ids = ibs.get_valid_imgsetids()
    special_imgset_ids = ibs.get_special_imgsetids()
    other_imgset_ids = ut.setdiff(all_imgset_ids, special_imgset_ids)
    ibs.delete_imagesets(other_imgset_ids)
    ibs.set_exemplars_from_quality_and_viewpoint()
    ibs.update_all_image_special_imageset()

    occurrence_gids = [
        2, 9, 12, 16, 25, 26, 29, 30, 32, 33, 35, 46, 47, 52, 57, 61, 66, 70,
        71, 73, 74, 76, 77, 78, 79, 87, 88, 90, 96, 97, 103, 106, 108, 110,
        112, 113
    ]

    other_gids = ut.setdiff(ibs.get_valid_gids(), occurrence_gids)
    other_gids1 = other_gids[0::2]
    other_gids2 = other_gids[1::2]
    ibs.set_image_imagesettext(occurrence_gids,
                               ['Occurrence 1'] * len(occurrence_gids))
    ibs.set_image_imagesettext(other_gids1,
                               ['Occurrence 2'] * len(other_gids1))
    ibs.set_image_imagesettext(other_gids2,
                               ['Occurrence 3'] * len(other_gids2))

    # hack in some tags
    print('Hacking in some tags')
    foal_aids = [
        4, 8, 15, 21, 28, 34, 38, 41, 45, 49, 51, 56, 60, 66, 69, 74, 80, 83,
        91, 97, 103, 107, 109, 119
    ]
    mother_aids = [9, 16, 35, 42, 52, 57, 61, 67, 75, 84, 98, 104, 108, 114]
    ibs.append_annot_case_tags(foal_aids, ['foal'] * len(foal_aids))
    ibs.append_annot_case_tags(mother_aids, ['mother'] * len(mother_aids))
Example #35
0
def test_pyrf():
    category = 'zebra_plains'

    detect_config = {
        'save_detection_images':        True,
        'save_scales':                  True,
        'percentage_top':               0.40,
    }

    #=================================
    # Train / Detect Initialization
    #=================================

    testdata_dir = utool.unixpath('~/code/pyrf/results')
    # assert utool.checkpath(testdata_dir)
    if utool.get_argflag('--vd'):
        print(utool.ls(testdata_dir))

    # Create detector
    detector = Random_Forest_Detector()

    test_path = utool.grab_zipped_url(TEST_DATA_DETECT_URL, appname='utool')
    models_path = utool.grab_zipped_url(TEST_DATA_MODEL_URL, appname='utool')
    trees_path = join(models_path, category)

    results_path  = join(utool.unixpath('~/code/pyrf/results'), category)
    # detect_path   = join(results_path, 'detect')
    trees_path    = join(results_path, 'trees')

    detect_path = join(test_path, category, 'detect')
    utool.ensuredir(detect_path)
    utool.ensuredir(test_path)
    utool.ensuredir(trees_path)

    #=================================
    # Detect using Random Forest
    #=================================

    # Get input images
    from vtool import image
    big_gpath_list = utool.list_images(test_path, fullpath=True, recursive=False)
    print(big_gpath_list)
    # Resize images to standard size
    if utool.get_argflag('--small'):
        big_gpath_list = big_gpath_list[0:8]
    #big_gpath_list = big_gpath_list[0:8]
    output_dir = join(test_path, 'resized')
    std_gpath_list = image.resize_imagelist_to_sqrtarea(big_gpath_list,
                                                        sqrt_area=800,
                                                        output_dir=output_dir,
                                                        checkexists=True)
    dst_gpath_list = [join(detect_path, split(gpath)[1]) for gpath in std_gpath_list]
    #utool.view_directory(test_path)
    #utool.view_directory('.')
    print(std_gpath_list)
    num_images = len(std_gpath_list)
    #assert num_images == 16, 'the test has diverged!'
    print('Testing on %r images' % num_images)

    # Load forest, so we don't have to reload every time
    forest = detector.load(trees_path, category + '-', num_trees=25)
    detector.set_detect_params(**detect_config)
    results_list1 = []
    with utool.Timer('[test_pyrf] for loop detector.detect') as t1:
        if not utool.get_argflag('--skip1'):
            for ix, (img_fpath, dst_fpath) in enumerate(zip(std_gpath_list, dst_gpath_list)):
                #img_fname = split(img_fpath)[1]
                #dst_fpath = join(detect_path, img_fname)
                #print('  * img_fpath = %r' % img_fpath)
                #print('  * dst_fpath = %r' % dst_fpath)
                with utool.Timer('[test_pyrf] detector.detect ix=%r' % (ix,)):
                    results = detector.detect(forest, img_fpath, dst_fpath)
                results_list1.append(results)
                print('num results = %r' % len(results))
            else:
                print('...skipped')

    # with utool.Timer('[test_pyrf] detector.detect_many') as t2:
    #     results_list2 = detector.detect_many(forest, std_gpath_list,
    #                                          dst_gpath_list, use_openmp=True)

    print('')
    print('+ --------------')
    print('| total time1: %r' % t1.ellapsed)
    # print('| total time2: %r' % t2.ellapsed)
    print('|')
    print('| num results1 = %r' % (list(map(len, results_list1))))
    # print('| num results2 = %r' % (list(map(len, results_list2))))
    #assert results_list2 == results_list1
    return locals()
Example #36
0
#!/usr/bin/env python2.7
"""
downloads an archive file and then unzips it into a directory with the same name
as the archive (sans the .zip or .tar.gz)

grabzippedurl.py "https://download.zotero.org/standalone/4.0.26.3/Zotero-4.0.26.3_linux-x86_64.tar.bz2"

"""
from __future__ import absolute_import, division, print_function
import sys
import utool

if __name__ == '__main__':
    url = sys.argv[1]
    download_dir = '.'
    utool.grab_zipped_url(url, download_dir=download_dir, cleanup=False, spoof=True)
Example #37
0
def test_pyrf():
    r"""
    CommandLine:
        python run_tests.py --test-test_pyrf

    Example:
        >>> # ENABLE_DOCTEST
        >>> from run_tests import *  # NOQA
        >>> result = test_pyrf()
        >>> print(result)
    """

    #=================================
    # Initialization
    #=================================

    category = 'zebra_plains'

    #detect_config = {
    #    'save_detection_images':        True,
    #    'percentage_top':               0.40,
    #}

    testdata_dir = ut.unixpath('~/code/pyrf/results')
    # assert ut.checkpath(testdata_dir)
    if ut.get_argflag('--vd'):
        print(ut.ls(testdata_dir))

    # Create detector
    detector = Random_Forest_Detector()

    test_path = ut.grab_zipped_url(TEST_DATA_DETECT_URL, appname='utool')
    models_path = ut.grab_zipped_url(TEST_DATA_MODEL_URL, appname='utool')
    trees_path = join(models_path, category)
    detect_path = join(test_path, category, 'detect')
    ut.ensuredir(detect_path)
    ut.ensuredir(test_path)
    ut.ensuredir(trees_path)

    #=================================
    # Load Input Images
    #=================================

    # Get input images
    big_gpath_list = ut.list_images(test_path, fullpath=True, recursive=False)
    print(big_gpath_list)
    # Resize images to standard size
    if ut.get_argflag('--small'):
        big_gpath_list = big_gpath_list[0:8]
    #big_gpath_list = big_gpath_list[0:8]
    output_dir = join(test_path, 'resized')
    std_gpath_list = resize_imagelist_to_sqrtarea(big_gpath_list,
                                                  sqrt_area=800,
                                                  output_dir=output_dir,
                                                  checkexists=True)
    dst_gpath_list = [join(detect_path, split(gpath)[1]) for gpath in std_gpath_list]
    #ut.view_directory(test_path)
    #ut.view_directory('.')
    print(std_gpath_list)
    num_images = len(std_gpath_list)
    #assert num_images == 16, 'the test has diverged!'
    print('Testing on %r images' % num_images)

    #=================================
    # Load Pretrained Forests
    #=================================

    # Load forest, so we don't have to reload every time
    trees_fpath_list = ut.ls(trees_path, '*.txt')
    #forest = detector.load(trees_path, category + '-')
    forest = detector.forest(trees_fpath_list)
    #detector.set_detect_params(**detect_config)
    results_list1 = []

    #=================================
    # Detect using Random Forest
    #=================================

    with ut.Timer('[test_pyrf] for loop detector.detect') as t1:
        if not ut.get_argflag('--skip1'):
            results_list1 = detector.detect(forest, std_gpath_list, output_gpath_list=dst_gpath_list)
            #for ix, (img_fpath, dst_fpath) in enumerate(zip(std_gpath_list, dst_gpath_list)):
            #    #img_fname = split(img_fpath)[1]
            #    #dst_fpath = join(detect_path, img_fname)
            #    #print('  * img_fpath = %r' % img_fpath)
            #    #print('  * dst_fpath = %r' % dst_fpath)
            #    with ut.Timer('[test_pyrf] detector.detect ix=%r' % (ix,)):
            #        results = detector.detect(forest, img_fpath, dst_fpath)
            #    results_list1.append(results)
            #    print('num results = %r' % len(results))
            #else:
            #    print('...skipped')

    #with ut.Timer('[test_pyrf] detector.detect_many') as t2:
    #    results_list2 = detector.detect_many(forest, std_gpath_list,
    #                                         dst_gpath_list, use_openmp=True)
    detector.free_forest(forest)

    print('')
    print('+ --------------')
    print('| total time1: %r' % t1.ellapsed)
    #print('| total time2: %r' % t2.ellapsed)
    print('|')
    print('| num results1 = %r' % (list(map(len, results_list1))))
    #print('| num results2 = %r' % (list(map(len, results_list2))))
    #assert results_list2 == results_list1
    return locals()
Example #38
0
def classify(vector_list, weight_filepath, verbose=VERBOSE_SVM, **kwargs):
    """
    Args:
        thumbail_list (list of str): the list of image thumbnails that need classifying

    Returns:
        iter
    """
    import multiprocessing
    import numpy as np

    # Get correct weight if specified with shorthand
    if weight_filepath in CONFIG_URL_DICT:
        weight_url = CONFIG_URL_DICT[weight_filepath]
        if weight_url.endswith('.zip'):
            weight_filepath = ut.grab_zipped_url(weight_url, appname='ibeis')
        else:
            weight_filepath = ut.grab_file_url(weight_url, appname='ibeis',
                                               check_hash=True)

    # Get ensemble
    is_ensemble = isdir(weight_filepath)
    if is_ensemble:
        weight_filepath_list = sorted([
            join(weight_filepath, filename) for filename in listdir(weight_filepath)
            if isfile(join(weight_filepath, filename))
        ])
    else:
        weight_filepath_list = [weight_filepath]
    num_weights = len(weight_filepath_list)
    assert num_weights > 0

    # Form dictionaries
    num_vectors = len(vector_list)
    index_list = list(range(num_vectors))

    # Generate parallelized wrapper
    OLD = False
    if is_ensemble and OLD:
        vectors_list = [ vector_list for _ in range(num_weights) ]
        args_list = zip(weight_filepath_list, vectors_list)
        nTasks = num_weights
        print('Processing ensembles in parallel using %d ensembles' % (num_weights, ))
    else:
        num_cpus = multiprocessing.cpu_count()
        vector_batch = int(np.ceil(float(num_vectors) / num_cpus))
        vector_rounds = int(np.ceil(float(num_vectors) / vector_batch))

        args_list = []
        for vector_round in range(vector_rounds):
            start_index = vector_round * vector_batch
            stop_index = (vector_round + 1) * vector_batch
            assert start_index < num_vectors
            stop_index = min(stop_index, num_vectors)
            # print('Slicing index range: [%r, %r)' % (start_index, stop_index, ))

            # Slice gids and get feature data
            index_list_ = list(range(start_index, stop_index))
            vector_list_ = vector_list[start_index: stop_index]
            assert len(index_list_) == len(vector_list_)
            for weight_filepath in weight_filepath_list:
                args = (weight_filepath, vector_list_, index_list_)
                args_list.append(args)

        nTasks = len(args_list)
        print('Processing vectors in parallel using vector_batch = %r' % (vector_batch, ))

    # Perform inference
    classify_iter = ut.generate2(classify_helper, args_list, nTasks=nTasks,
                                 ordered=True, force_serial=False)

    # Classify with SVM for each image vector
    score_dict = { index: [] for index in index_list }
    class_dict = { index: [] for index in index_list }
    for score_dict_, class_dict_ in classify_iter:
        for index in index_list:
            if index in score_dict_:
                score_dict[index] += score_dict_[index]
            if index in class_dict_:
                class_dict[index] += class_dict_[index]

    # Organize and compute mode and average for class and score
    for index in index_list:
        score_list_ = score_dict[index]
        class_list_ = class_dict[index]
        score_ = sum(score_list_) / len(score_list_)
        class_ = max(set(class_list_), key=class_list_.count)
        class_ = 'positive' if int(class_) == 1 else 'negative'
        yield score_, class_
Example #39
0
def _download_training_kit():
    training_kit_url = CONFIG_URL_DICT['training_kit']
    training_kit_path = ut.grab_zipped_url(training_kit_url,
                                           appname='lightnet')
    return training_kit_path
Example #40
0
def train_pyrf():
    boosting = 3
    num_trees = 5
    category = 'giraffe'

    #=================================
    # Train / Detect Configurations
    #=================================

    train_config = {
        'object_min_width':        32,
        'object_min_height':       32,
        'mine_negatives':          True,
        'mine_max_keep':           1,
        'mine_exclude_categories': [category],
        'mine_width_min':          128,
        'mine_width_max':          512,
        'mine_height_min':         128,
        'mine_height_max':         512,

        'neg_exclude_categories':  [category],
        'max_rois_pos':            325,
        'max_rois_neg':            600,
        'num_trees':               num_trees,
    }

    detect_config = {
        'save_detection_images':   True,
        'detection_height':        208,
    }

    #=================================
    # Train / Detect Initialization
    #=================================

    # Create detector
    detector = Random_Forest_Detector()

    # Gather Dataset
    dataset_path = utool.unixpath('~/code/IBEIS2014/')
    dataset = IBEIS_Data(dataset_path, **train_config)

    results_path  = join(utool.unixpath('~/code/pyrf/results'), category)
    pos_path      = join(results_path, 'train-positives')
    neg_path      = join(results_path, 'train-negatives')
    val_path      = join(results_path, 'val')
    test_path     = join(results_path, 'test')
    test_pos_path = join(results_path, 'test-positives')
    test_neg_path = join(results_path, 'test-negatives')
    detect_path   = join(results_path, 'detect')
    trees_path    = join(results_path, 'trees')

    # Ensure result path for the category
    # rmtreedir(results_path)
    ensuredir(results_path)

    for phase in range(1, boosting + 1):
        print("*********************")
        print("Phase: %s" % phase)
        print("*********************")
        # raw_input()
        # =================================
        # Train Random Forest
        #=================================
        detector.train(dataset, category, pos_path, neg_path, val_path,
                        test_path, test_pos_path, test_neg_path,
                        trees_path, reshuffle=(phase == 1), **train_config)

        if phase < boosting:
            #=================================
            # Detect using Random Forest
            #=================================
            # Load forest, so we don't have to reload every time
            forest = detector.load(trees_path, category + '-', num_trees=(phase * num_trees))
            detector.set_detect_params(**detect_config)

            # Ensure output detection paths
            rmtreedir(detect_path)
            ensuredir(detect_path)

            # Calculate error on test set
            direct = Directory(test_path, include_file_extensions=["jpg"])
            accuracy_list = []
            image_filepath_list = direct.files()
            dst_filepath_list   = [ join(detect_path, split(image_filepath)[1]) for image_filepath in image_filepath_list ]
            predictions_list = detector.detect_many(forest, image_filepath_list, dst_filepath_list, use_openmp=True)
            for index, (predictions, image_filepath) in enumerate(zip(predictions_list, image_filepath_list)):
                image_path, image_filename = split(image_filepath)
                image = dataset[image_filename]
                accuracy, true_pos, false_pos, false_neg = image.accuracy(predictions, category)
                accuracy_list.append(accuracy)
                progress = "%0.2f" % (float(index) / len(image_filepath_list))
                print("TEST %s %0.4f %s" % (image, accuracy, progress), end='\r')
                sys.stdout.flush()
                # image.show(prediction_list=predictions, category=category)
            print(' ' * 100, end='\r')
            print("TEST ERROR: %0.4f" % (1.0 - (float(sum(accuracy_list)) / len(accuracy_list))))

            #=================================
            # Eval and prep boosting train set
            #=================================
            detector.boosting(phase, forest, dataset, category, pos_path, neg_path,
                              test_pos_path, test_neg_path, detect_path)

    ####################################
    # New FAST
    ####################################

    detector = Random_Forest_Detector(
        scales='6 1.0 0.75 0.55 0.40 0.30 0.20'
    )

    # Ensure output detection paths
    detect_path_temp = detect_path + "_1"
    rmtreedir(detect_path_temp)
    ensuredir(detect_path_temp)

    # Load forest, so we don't have to reload every time
    forest = detector.load(trees_path, category + '-', num_trees=15)
    detector.set_detect_params(**detect_config)

    # Calculate error on test set
    direct = Directory(test_path, include_file_extensions=["jpg"])
    accuracy_list  = []
    true_pos_list  = []
    false_pos_list = []
    false_neg_list = []
    image_filepath_list = direct.files()
    dst_filepath_list   = [ join(detect_path_temp, split(image_filepath)[1]) for image_filepath in image_filepath_list ]
    predictions_list = detector.detect_many(forest, image_filepath_list, dst_filepath_list, use_openmp=True)
    for index, (predictions, image_filepath) in enumerate(zip(predictions_list, image_filepath_list)):
        image_path, image_filename = split(image_filepath)
        image = dataset[image_filename]
        accuracy, true_pos, false_pos, false_neg = image.accuracy(predictions, category)
        accuracy_list.append(accuracy)
        true_pos_list.append(true_pos)
        false_pos_list.append(false_pos)
        false_neg_list.append(false_neg)
        progress = "%0.2f" % (float(index) / len(image_filepath_list))
        print("TEST %s %0.4f %s" % (image, accuracy, progress), end='\r')
        sys.stdout.flush()
        # image.show(prediction_list=predictions, category=category)
    print(' ' * 100, end='\r')
    print("1 TEST ERROR     : %0.4f" % (1.0 - (float(sum(accuracy_list)) / len(accuracy_list))))
    print("1 TEST TRUE POS  : %d" % (sum(true_pos_list)))
    print("1 TEST FALSE POS : %d" % (sum(false_pos_list)))
    print("1 TEST FALSE NEG : %d" % (sum(false_neg_list)))

    ####################################
    # New SLOW
    ####################################

    detector = Random_Forest_Detector(
        scales='11 1.5 1.25 1.0 0.8 0.64 0.51 0.41 0.33 0.26 0.21 0.17'
    )

    # Ensure output detection paths
    detect_path_temp = detect_path + "_2"
    rmtreedir(detect_path_temp)
    ensuredir(detect_path_temp)

    # Load forest, so we don't have to reload every time
    forest = detector.load(trees_path, category + '-', num_trees=15)
    detector.set_detect_params(**detect_config)

    # Calculate error on test set
    direct = Directory(test_path, include_file_extensions=["jpg"])
    accuracy_list  = []
    true_pos_list  = []
    false_pos_list = []
    false_neg_list = []
    image_filepath_list = direct.files()
    dst_filepath_list   = [ join(detect_path_temp, split(image_filepath)[1]) for image_filepath in image_filepath_list ]
    predictions_list = detector.detect_many(forest, image_filepath_list, dst_filepath_list, use_openmp=True)
    for index, (predictions, image_filepath) in enumerate(zip(predictions_list, image_filepath_list)):
        image_path, image_filename = split(image_filepath)
        image = dataset[image_filename]
        accuracy, true_pos, false_pos, false_neg = image.accuracy(predictions, category)
        accuracy_list.append(accuracy)
        true_pos_list.append(true_pos)
        false_pos_list.append(false_pos)
        false_neg_list.append(false_neg)
        progress = "%0.2f" % (float(index) / len(image_filepath_list))
        print("TEST %s %0.4f %s" % (image, accuracy, progress), end='\r')
        sys.stdout.flush()
        # image.show(prediction_list=predictions, category=category)
    print(' ' * 100, end='\r')
    print("2 TEST ERROR     : %0.4f" % (1.0 - (float(sum(accuracy_list)) / len(accuracy_list))))
    print("2 TEST TRUE POS  : %d" % (sum(true_pos_list)))
    print("2 TEST FALSE POS : %d" % (sum(false_pos_list)))
    print("2 TEST FALSE NEG : %d" % (sum(false_neg_list)))

    ####################################
    # Current FAST
    ####################################

    detector = Random_Forest_Detector(
        scales='6 1.0 0.75 0.55 0.40 0.30 0.20'
    )

    # Use pre-trained trees?
    TEST_DATA_MODEL_URL = 'https://lev.cs.rpi.edu/public/models/rf.zip'
    models_path = utool.grab_zipped_url(TEST_DATA_MODEL_URL, appname='utool')
    trees_path = join(models_path, category)

    # Ensure output detection paths
    detect_path_temp = detect_path + "_3"
    rmtreedir(detect_path_temp)
    ensuredir(detect_path_temp)

    # Load forest, so we don't have to reload every time
    forest = detector.load(trees_path, category + '-', num_trees=10)
    detector.set_detect_params(**detect_config)

    # Calculate error on test set
    direct = Directory(test_path, include_file_extensions=["jpg"])
    accuracy_list  = []
    true_pos_list  = []
    false_pos_list = []
    false_neg_list = []
    image_filepath_list = direct.files()
    dst_filepath_list   = [ join(detect_path_temp, split(image_filepath)[1]) for image_filepath in image_filepath_list ]
    predictions_list = detector.detect_many(forest, image_filepath_list, dst_filepath_list, use_openmp=True)
    for index, (predictions, image_filepath) in enumerate(zip(predictions_list, image_filepath_list)):
        image_path, image_filename = split(image_filepath)
        image = dataset[image_filename]
        accuracy, true_pos, false_pos, false_neg = image.accuracy(predictions, category)
        accuracy_list.append(accuracy)
        true_pos_list.append(true_pos)
        false_pos_list.append(false_pos)
        false_neg_list.append(false_neg)
        progress = "%0.2f" % (float(index) / len(image_filepath_list))
        print("TEST %s %0.4f %s" % (image, accuracy, progress), end='\r')
        sys.stdout.flush()
        # image.show(prediction_list=predictions, category=category)
    print(' ' * 100, end='\r')
    print("3 TEST ERROR     : %0.4f" % (1.0 - (float(sum(accuracy_list)) / len(accuracy_list))))
    print("3 TEST TRUE POS  : %d" % (sum(true_pos_list)))
    print("3 TEST FALSE POS : %d" % (sum(false_pos_list)))
    print("3 TEST FALSE NEG : %d" % (sum(false_neg_list)))

    ####################################
    # Current SLOW
    ####################################

    detector = Random_Forest_Detector(
        scales='11 1.5 1.25 1.0 0.8 0.64 0.51 0.41 0.33 0.26 0.21 0.17'
    )

    # Ensure output detection paths
    detect_path_temp = detect_path + "_4"
    rmtreedir(detect_path_temp)
    ensuredir(detect_path_temp)

    # Load forest, so we don't have to reload every time
    forest = detector.load(trees_path, category + '-', num_trees=10)
    detector.set_detect_params(**detect_config)

    # Calculate error on test set
    direct = Directory(test_path, include_file_extensions=["jpg"])
    accuracy_list  = []
    true_pos_list  = []
    false_pos_list = []
    false_neg_list = []
    image_filepath_list = direct.files()
    dst_filepath_list   = [ join(detect_path_temp, split(image_filepath)[1]) for image_filepath in image_filepath_list ]
    predictions_list = detector.detect_many(forest, image_filepath_list, dst_filepath_list, use_openmp=True)
    for index, (predictions, image_filepath) in enumerate(zip(predictions_list, image_filepath_list)):
        image_path, image_filename = split(image_filepath)
        image = dataset[image_filename]
        accuracy, true_pos, false_pos, false_neg = image.accuracy(predictions, category)
        accuracy_list.append(accuracy)
        true_pos_list.append(true_pos)
        false_pos_list.append(false_pos)
        false_neg_list.append(false_neg)
        progress = "%0.2f" % (float(index) / len(image_filepath_list))
        print("TEST %s %0.4f %s" % (image, accuracy, progress), end='\r')
        sys.stdout.flush()
        # image.show(prediction_list=predictions, category=category)
    print(' ' * 100, end='\r')
    print("4 TEST ERROR     : %0.4f" % (1.0 - (float(sum(accuracy_list)) / len(accuracy_list))))
    print("4 TEST TRUE POS  : %d" % (sum(true_pos_list)))
    print("4 TEST FALSE POS : %d" % (sum(false_pos_list)))
    print("4 TEST FALSE NEG : %d" % (sum(false_neg_list)))
Example #41
0
    # Build parameters
    bbox_list = [dummy_bbox(img), dummy_bbox(img, (-.25, -.25), .1)]
    showkw = {
        'title': 'test axis title',
        # The list of bounding boxes to be drawn on the image
        'bbox_list': bbox_list,
        'theta_list': [tau * .7, tau * .9],
        'sel_list': [True, False],
        'label_list': ['test label', 'lbl2'],
    }
    # Print the keyword arguments to illustrate their format
    print('showkw = ' + utool.dict_str(showkw))
    # Display the image in figure-num 42, using a 1x1 axis grid in the first
    # axis. Pass showkw as keyword arguments.
    viz_image2.show_image(img, fnum=42, pnum=(1, 1, 1), **showkw)
    df2.set_figtitle('Test figure title')


if __name__ == '__main__':
    TEST_IMAGES_URL = 'https://lev.cs.rpi.edu/public/data/testdata.zip'
    test_image_dir = utool.grab_zipped_url(TEST_IMAGES_URL, appname='utool')
    imgpaths = utool.list_images(test_image_dir,
                                 fullpath=True,
                                 recursive=False)  # test image paths
    # Get one image filepath to load and display
    img_fpath = imgpaths[0]
    # Run Test
    test_viz_image(img_fpath)
    # Magic exec which displays or puts you into IPython with --cmd flag
    exec(df2.present())