コード例 #1
0
    def __init__(dark, config_filepath=None, weight_filepath=None, verbose=VERBOSE_DARK, quiet=QUIET_DARK):
        """
            Create the C object for the PyDarknet YOLO detector.

            Args:
                verbose (bool, optional): verbose flag; defaults to --verbdark flag

            Returns:
                detector (object): the Darknet YOLO Detector object
        """
        dark.CLASS_LIST = None
        if config_filepath in ["default", "v2", None]:
            config_filepath = ut.grab_file_url(DEFAULT_CONFIG_URL, appname="pydarknet")
            dark.CLASS_LIST = CLASS_LIST
        elif config_filepath in ["v1", "old", "original"]:
            dark.CLASS_LIST = OLD_CLASS_LIST
            config_filepath = ut.grab_file_url(OLD_DEFAULT_CONFIG_URL, appname="pydarknet")

        if weight_filepath in ["default", "v2", None]:
            weight_filepath = ut.grab_file_url(DEFAULT_WEIGHTS_URL, appname="pydarknet")
        elif weight_filepath in ["v1", "old", "original"]:
            weight_filepath = ut.grab_file_url(OLD_DEFAULT_WEIGHTS_URL, appname="pydarknet")

        dark.verbose = verbose
        dark.quiet = quiet

        dark._load(config_filepath, weight_filepath)

        if dark.verbose and not dark.quiet:
            print("[pydarknet py] New Darknet_YOLO Object Created")
コード例 #2
0
def models_cnn(
    ibs,
    config_dict,
    parse_classes_func,
    parse_line_func,
    check_hash=False,
    hidden_models=[],
    **kwargs,
):
    import urllib

    model_dict = {}
    for config_tag in config_dict:
        if config_tag in hidden_models:
            continue

        try:
            config_url = config_dict[config_tag]
            classes_url = parse_classes_func(config_url)
            try:
                classes_filepath = ut.grab_file_url(
                    classes_url, appname='wbia', check_hash=check_hash
                )
                assert exists(classes_filepath)
            except (urllib.error.HTTPError, AssertionError):
                continue

            classes_filepath = ut.truepath(classes_filepath)
            line_list = parse_line_func(classes_filepath)
            model_dict[config_tag] = line_list
        except Exception:
            pass

    return model_dict
コード例 #3
0
def download_tomcat():
    """
    Put tomcat into a directory controlled by ibeis

    CommandLine:
        # Reset
        python -c "import utool as ut; ut.delete(ut.unixjoin(ut.get_app_resource_dir('ibeis'), 'tomcat'))"
    """
    from os.path import splitext, dirname
    print('Grabbing tomcat')
    # FIXME: need to make a stable link
    if ut.WIN32:
        tomcat_binary_url = 'http://mirrors.advancedhosters.com/apache/tomcat/tomcat-8/v8.0.24/bin/apache-tomcat-8.0.24-windows-x86.zip'
    else:
        tomcat_binary_url = 'http://mirrors.advancedhosters.com/apache/tomcat/tomcat-8/v8.0.24/bin/apache-tomcat-8.0.24.zip'
    zip_fpath = ut.grab_file_url(tomcat_binary_url, appname='ibeis')
    # Download tomcat into the IBEIS resource directory
    tomcat_dpath = join(dirname(zip_fpath), 'tomcat')
    if not ut.checkpath(tomcat_dpath, verbose=True):
        # hack because unzipping is still weird
        ut.unzip_file(zip_fpath)
        tomcat_dpath_tmp = splitext(zip_fpath)[0]
        ut.move(tomcat_dpath_tmp, tomcat_dpath)
    if ut.checkpath(join(tomcat_dpath, 'bin'), verbose=True):
        scriptnames = ['catalina.sh', 'startup.sh', 'shutdown.sh']
        for fname in scriptnames:
            fpath = join(tomcat_dpath, 'bin', fname)
            if not ut.is_file_executable(fpath):
                print('Adding executable bits to script %r' % (fpath,))
                ut.chmod_add_executable(fpath)
    return tomcat_dpath
コード例 #4
0
def pretrained_camvid_segnet_basic():
    import utool as ut
    from pysseg.harness import Harness
    from pysseg.tasks import CamVid

    workdir = expanduser('~/data/work/camvid/')
    arch = 'segnet_basic'

    harn = Harness(workdir=workdir, arch=arch)
    harn.task = CamVid()
    harn.test_batch_size = 1
    harn.test_imdir  = '~/sseg/SegNet/CamVid/test'
    harn.test_gtdir  = '~/sseg/SegNet/CamVid/testannot'
    harn.gpu_num = gpu_util.find_unused_gpu()
    pretrained_weights_fpath = ut.grab_file_url(
        'http://mi.eng.cam.ac.uk/~agk34/resources/SegNet/segnet_basic_camvid.caffemodel')
    harn.test_weights_fpath = pretrained_weights_fpath
    harn.prepare_test_model()
    harn.prepare_test_predict_dpath()

    """
    diff -u --ignore-space-change \
        /data/jon.crall/segnet-exact/SegNet-Tutorial/Models/segnet_basic_inference.prototxt \
        /home/local/KHQ/jon.crall/data/work/camvid/models/segnet_basic_predict_hmkcclslvxtlpgwozhiaonfvfmvhigxc.prototext
    """

    # harn.prepare_test_input()
    harn.evaluate()
コード例 #5
0
def download_tomcat():
    """
    Put tomcat into a directory controlled by ibeis

    CommandLine:
        # Reset
        python -c "import utool as ut; ut.delete(ut.unixjoin(ut.get_app_resource_dir('ibeis'), 'tomcat'))"
    """
    print('Grabbing tomcat')
    # FIXME: need to make a stable link
    if ut.WIN32:
        tomcat_binary_url = 'http://mirrors.advancedhosters.com/apache/tomcat/tomcat-8/v8.0.36/bin/apache-tomcat-8.0.36-windows-x86.zip'
    else:
        tomcat_binary_url = 'http://mirrors.advancedhosters.com/apache/tomcat/tomcat-8/v8.0.36/bin/apache-tomcat-8.0.36.zip'
    zip_fpath = ut.grab_file_url(tomcat_binary_url, appname='ibeis')
    # Download tomcat into the IBEIS resource directory
    tomcat_dpath = join(dirname(zip_fpath), 'tomcat')
    if not ut.checkpath(tomcat_dpath, verbose=True):
        # hack because unzipping is still weird
        ut.unzip_file(zip_fpath)
        tomcat_dpath_tmp = splitext(zip_fpath)[0]
        ut.move(tomcat_dpath_tmp, tomcat_dpath)
    if ut.checkpath(join(tomcat_dpath, 'bin'), verbose=True):
        scriptnames = ['catalina.sh', 'startup.sh', 'shutdown.sh']
        for fname in scriptnames:
            fpath = join(tomcat_dpath, 'bin', fname)
            if not ut.is_file_executable(fpath):
                print('Adding executable bits to script %r' % (fpath, ))
                ut.chmod_add_executable(fpath)
    return tomcat_dpath
コード例 #6
0
ファイル: classifier.py プロジェクト: Erotemic/ibeis
def classify_thumbnail_list(thumbnail_list, model='v1'):
    print('[classifier] Loading the classifier training data')
    data_list = np.array(thumbnail_list, dtype=np.uint8)

    print('[mnist] Loading the data into a JPCNN_Data')
    data = JPCNN_Data()
    data.set_data_list(data_list)

    print('[classifier] Create the JPCNN_Model used for testing')
    url = MODEL_DOMAIN + MODEL_URLS[model]
    model_path = ut.grab_file_url(url, appname='ibeis')
    model = Classifier_Model(model_path)

    print('[mnist] Create the JPCNN_network and start testing')
    net = JPCNN_Network(model, data)
    test_results = net.test('.', best_weights=True)
    prediction_list = test_results['label_list']
    confidence_list = test_results['confidence_list']

    result_list = list(zip(confidence_list, prediction_list))

    # Release memory
    data = None
    model = None
    net = None

    return result_list
コード例 #7
0
def get_flukebook_image_uuids(ibs):
    from datetime import datetime
    import uuid
    import pytz

    PST = pytz.timezone('US/Pacific')

    url = 'https://www.flukebook.org/acmIdSync.jsp'

    now = datetime.now(tz=PST)
    timestamp = now.strftime('%Y-%m-%d-%H-00-00')
    filename = 'flukebook.image.admid.%s.json' % (timestamp, )
    filepath = ut.grab_file_url(url, appname='wbia', fname=filename)

    with open(filepath, 'r') as file:
        file_content = file.read()
        file_json = ut.from_json(file_content)
    logger.info('Loaded %d Image ACM string UUIDs from Flukebook' %
                (len(file_json), ))

    uuid_list = []
    for uuid_str in file_json:
        try:
            uuid_ = uuid.UUID(uuid_str)
            uuid_list.append(uuid_)
        except ValueError:
            continue

    logger.info('Validated %d Image UUIDs from Flukebook' % (len(uuid_list), ))
    flukebook_image_uuid_list = list(set(uuid_list))
    logger.info('Validated %d de-duplicated Image UUIDs from Flukebook' %
                (len(uuid_list), ))

    return flukebook_image_uuid_list
コード例 #8
0
ファイル: ingest_database.py プロジェクト: heroinlin/ibeis
 def download_image_urls(image_url_info_list):
     # Find ones that we already have
     print('Requested %d downloaded images' % (len(image_url_info_list)))
     full_gpath_list = [join(image_dir, basename(gpath)) for gpath in image_url_info_list]
     exists_list = [ut.checkpath(gpath) for gpath in full_gpath_list]
     image_url_info_list_ = ut.compress(image_url_info_list, ut.not_list(exists_list))
     print('Already have %d/%d downloaded images' % (
         len(image_url_info_list) - len(image_url_info_list_), len(image_url_info_list)))
     print('Need to download %d images' % (len(image_url_info_list_)))
     #import sys
     #sys.exit(0)
     # Download the rest
     imgurl_prefix = 'https://snapshotserengeti.s3.msi.umn.edu/'
     image_url_list = [imgurl_prefix + suffix for suffix in image_url_info_list_]
     for img_url in ut.ProgressIter(image_url_list, lbl='Downloading image'):
         ut.grab_file_url(img_url, download_dir=image_dir)
     return full_gpath_list
コード例 #9
0
ファイル: labeler.py プロジェクト: whaozl/ibeis
def label_chip_list(chip_list, model='v1'):
    print('[classifier] Loading the classifier training data')

    data_list = np.array(chip_list, dtype=np.uint8)

    print('[mnist] Loading the data into a JPCNN_Data')
    data = JPCNN_Data()
    data.set_data_list(data_list)

    print('[classifier] Create the JPCNN_Model used for testing')
    url = MODEL_DOMAIN + MODEL_URLS[model]
    model_path = ut.grab_file_url(url, appname='ibeis')
    model = Labeler_Model(model_path)

    print('[mnist] Create the JPCNN_network and start testing')
    net = JPCNN_Network(model, data)
    test_results = net.test('.', best_weights=True)

    class_list = list(net.config['data_label_encoder'].classes_)
    prediction_list = test_results['label_list']
    confidence_list = test_results['confidence_list']
    probability_list = test_results['probability_list']

    species_list = []
    viewpoint_list = []
    for prediction in prediction_list:
        prediction = prediction.strip()
        if ':' in prediction:
            prediction = prediction.split(':')
            species, viewpoint = prediction
        else:
            species = prediction
            viewpoint = None
        if species.lower() == 'ignore':
            species = const.UNKNOWN
        species_list.append(species)
        viewpoint_list.append(viewpoint)

    quality_list = [const.QUAL_UNKNOWN] * len(prediction_list)
    orientation_list = [0.0] * len(prediction_list)

    probability_dict_list = []
    for probability in probability_list:
        probability_dict = {
            class_ : prob
            for class_, prob in zip(class_list, probability)
        }
        probability_dict_list.append(probability_dict)

    result_list = list(zip(confidence_list, species_list, viewpoint_list,
                       quality_list, orientation_list, probability_dict_list))

    # Release memory
    data = None
    model = None
    net = None

    return result_list
コード例 #10
0
    def get_database_icon(ibs, max_dsize=(None, 192), aid=None):
        r"""
        Args:
            max_dsize (tuple): (default = (None, 192))

        Returns:
            None: None

        CommandLine:
            python -m wbia.control.IBEISControl --exec-get_database_icon --show
            python -m wbia.control.IBEISControl --exec-get_database_icon --show --db Oxford

        Example:
            >>> # DISABLE_DOCTEST
            >>> from wbia.control.IBEISControl import *  # NOQA
            >>> import wbia
            >>> ibs = wbia.opendb(defaultdb='testdb1')
            >>> icon = ibs.get_database_icon()
            >>> ut.quit_if_noshow()
            >>> import wbia.plottool as pt
            >>> pt.imshow(icon)
            >>> ut.show_if_requested()
        """
        # if ibs.get_dbname() == 'Oxford':
        #    pass
        # else:
        import vtool as vt

        if hasattr(ibs, 'force_icon_aid'):
            aid = ibs.force_icon_aid
        if aid is None:
            species = ibs.get_primary_database_species()
            # Use a url to get the icon
            url = {
                ibs.const.TEST_SPECIES.GIR_MASAI:
                'http://i.imgur.com/tGDVaKC.png',
                ibs.const.TEST_SPECIES.ZEB_PLAIN:
                'http://i.imgur.com/2Ge1PRg.png',
                ibs.const.TEST_SPECIES.ZEB_GREVY:
                'http://i.imgur.com/PaUT45f.png',
            }.get(species, None)
            if url is not None:
                icon = vt.imread(ut.grab_file_url(url), orient='auto')
            else:
                # HACK: (this should probably be a db setting)
                # use an specific aid to get the icon
                aid = {
                    'Oxford': 73,
                    'seaturtles': 37
                }.get(ibs.get_dbname(), None)
                if aid is None:
                    # otherwise just grab a random aid
                    aid = ibs.get_valid_aids()[0]
        if aid is not None:
            icon = ibs.get_annot_chips(aid)
        icon = vt.resize_to_maxdims(icon, max_dsize)
        return icon
コード例 #11
0
def find_or_download_wilbook_warfile():
    r"""
    scp [email protected]:/var/lib/tomcat/webapps/ibeis.war \
            ~/Downloads/pachy_ibeis.war wget
    http://dev.wildme.org/ibeis_data_dir/ibeis.war
    """
    war_url = 'http://dev.wildme.org/ibeis_data_dir/ibeis.war'
    war_fpath = ut.grab_file_url(war_url, appname='ibeis')
    return war_fpath
コード例 #12
0
def test(gpath_list, canonical_weight_filepath=None, **kwargs):
    from wbia.detecttools.directory import Directory

    # Get correct weight if specified with shorthand
    archive_url = None

    ensemble_index = None
    if canonical_weight_filepath is not None and ':' in canonical_weight_filepath:
        assert canonical_weight_filepath.count(':') == 1
        canonical_weight_filepath, ensemble_index = canonical_weight_filepath.split(
            ':')
        ensemble_index = int(ensemble_index)

    if canonical_weight_filepath in ARCHIVE_URL_DICT:
        archive_url = ARCHIVE_URL_DICT[canonical_weight_filepath]
        archive_path = ut.grab_file_url(archive_url,
                                        appname='wbia',
                                        check_hash=True)
    else:
        raise RuntimeError('canonical_weight_filepath %r not recognized' %
                           (canonical_weight_filepath, ))

    assert os.path.exists(archive_path)
    archive_path = ut.truepath(archive_path)

    ensemble_path = archive_path.strip('.zip')
    if not os.path.exists(ensemble_path):
        ut.unarchive_file(archive_path, output_dir=ensemble_path)

    assert os.path.exists(ensemble_path)
    direct = Directory(ensemble_path,
                       include_file_extensions=['weights'],
                       recursive=True)
    weights_path_list = direct.files()
    weights_path_list = sorted(weights_path_list)
    assert len(weights_path_list) > 0

    if ensemble_index is not None:
        assert 0 <= ensemble_index and ensemble_index < len(weights_path_list)
        weights_path_list = [weights_path_list[ensemble_index]]
        assert len(weights_path_list) > 0

    logger.info('Using weights in the ensemble: %s ' %
                (ut.repr3(weights_path_list), ))
    result_list = test_ensemble(gpath_list, weights_path_list, **kwargs)
    for result in result_list:
        x0 = max(result['x0'], 0.0)
        y0 = max(result['y0'], 0.0)
        x1 = max(result['x1'], 0.0)
        y1 = max(result['y1'], 0.0)
        yield (
            x0,
            y0,
            x1,
            y1,
        )
コード例 #13
0
def get_cls_net(config, **kwargs):
    model = HighResolutionNet(config, **kwargs)
    model_url = HRNET_PRETRAINED_URL
    model_fname = model_url.split('/')[-1]
    model_path = ut.grab_file_url(model_url,
                                  appname='wbia_orientation',
                                  check_hash=True,
                                  fname=model_fname)
    model.init_weights(model_path)
    return model
コード例 #14
0
def ensure_model(model, redownload=False):
    try:
        url = MODEL_DOMAIN + MODEL_URLS[model]
        extracted_fpath = ut.grab_file_url(url, appname='ibeis_cnn',
                                           redownload=redownload,
                                           check_hash=True)
    except KeyError as ex:
        ut.printex(ex, 'model is not uploaded', iswarning=True)
        extracted_fpath = ut.unixjoin(ut.get_app_resource_dir('ibeis_cnn'), model)
        ut.assert_exists(extracted_fpath)
    return extracted_fpath
コード例 #15
0
ファイル: IBEISControl.py プロジェクト: Erotemic/ibeis
    def get_database_icon(ibs, max_dsize=(None, 192), aid=None):
        r"""
        Args:
            max_dsize (tuple): (default = (None, 192))

        Returns:
            None: None

        CommandLine:
            python -m ibeis.control.IBEISControl --exec-get_database_icon --show
            python -m ibeis.control.IBEISControl --exec-get_database_icon --show --db Oxford

        Example:
            >>> # DISABLE_DOCTEST
            >>> from ibeis.control.IBEISControl import *  # NOQA
            >>> import ibeis
            >>> ibs = ibeis.opendb(defaultdb='testdb1')
            >>> icon = ibs.get_database_icon()
            >>> ut.quit_if_noshow()
            >>> import plottool as pt
            >>> pt.imshow(icon)
            >>> ut.show_if_requested()
        """
        #if ibs.get_dbname() == 'Oxford':
        #    pass
        #else:
        import vtool as vt
        if hasattr(ibs, 'force_icon_aid'):
            aid = ibs.force_icon_aid
        if aid is None:
            species = ibs.get_primary_database_species()
            # Use a url to get the icon
            url = {
                ibs.const.TEST_SPECIES.GIR_MASAI: 'http://i.imgur.com/tGDVaKC.png',
                ibs.const.TEST_SPECIES.ZEB_PLAIN: 'http://i.imgur.com/2Ge1PRg.png',
                ibs.const.TEST_SPECIES.ZEB_GREVY: 'http://i.imgur.com/PaUT45f.png',
            }.get(species, None)
            if url is not None:
                icon = vt.imread(ut.grab_file_url(url), orient='auto')
            else:
                # HACK: (this should probably be a db setting)
                # use an specific aid to get the icon
                aid = {
                    'Oxford': 73,
                    'seaturtles': 37,
                }.get(ibs.get_dbname(), None)
                if aid is None:
                    # otherwise just grab a random aid
                    aid = ibs.get_valid_aids()[0]
        if aid is not None:
            icon = ibs.get_annot_chips(aid)
        icon = vt.resize_to_maxdims(icon, max_dsize)
        return icon
コード例 #16
0
ファイル: _plugin.py プロジェクト: karenc/wbia-plugin-pie
def _ensure_model_exists(ibs, aid_list, config_path):

    species = ibs.get_annot_species_texts(aid_list[0])
    model_url = MODEL_URLS[species]

    # get expected model location from config file. Couple lines copied from Olga's compute_db.py

    with open(config_path) as config_buffer:
        config = json.loads(config_buffer.read())
    exp_folder = os.path.join(
        _PLUGIN_FOLDER, config['train']['exp_dir'], config['train']['exp_id']
    )
    local_fpath = os.path.join(exp_folder, 'best_weights.h5')

    if os.path.isfile(local_fpath):
        return True

    # download the model and put it in the model_folder
    os.makedirs(exp_folder, exist_ok=True)
    ut.grab_file_url(model_url, download_dir=exp_folder, fname=local_fpath)
    return True
コード例 #17
0
ファイル: wildbook_manager.py プロジェクト: Erotemic/ibeis
def find_or_download_wilbook_warfile(ensure=True, redownload=False):
    r"""
    scp [email protected]:/var/lib/tomcat/webapps/ibeis.war \
            ~/Downloads/pachy_ibeis.war wget
    http://dev.wildme.org/ibeis_data_dir/ibeis.war
    """
    #war_url = 'http://dev.wildme.org/ibeis_data_dir/ibeis.war'
    war_url = 'http://springbreak.wildbook.org/tools/latest.war'
    war_fpath = ut.grab_file_url(war_url, appname='ibeis',
                                 ensure=ensure, redownload=redownload,
                                 fname='ibeis.war')
    return war_fpath
コード例 #18
0
def setup_kp_network(network_str):
    fn = KP_NETWORK_OPTIONS[network_str]['url']
    file_url = join('https://lev.cs.rpi.edu/public/models/', fn)
    network_params_path = ut.grab_file_url(file_url, appname='ibeis')
    network_params = ut.load_cPkl(network_params_path)
    # network_params also includes normalization constants needed for the dataset, and is assumed to be a dictionary
    # with keys mean, std, and params
    network_exp = KP_NETWORK_OPTIONS[network_str]['exp']()
    ll.set_all_param_values(network_exp, network_params['params'])
    X = T.tensor4()
    network_fn = tfn([X], ll.get_output(network_exp, X, deterministic=True))
    return {'mean': network_params['mean'], 'std': network_params['std'], 'networkfn': network_fn,
            'input_size': KP_NETWORK_OPTIONS[network_str]['size']}
コード例 #19
0
def grab_mnist2():
    """ Follows lasange example """
    train_data_gz = ut.grab_file_url(
        'http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz')
    train_labels_gz = ut.grab_file_url(
        'http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz')
    test_data_gz = ut.grab_file_url(
        'http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz')
    test_labels_gz = ut.grab_file_url(
        'http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz')

    train_data = load_mnist_images(train_data_gz)
    test_data = load_mnist_images(test_data_gz)

    train_labels = load_mnist_labels(train_labels_gz)
    test_labels = load_mnist_labels(test_labels_gz)

    data = np.vstack((train_data, test_data))
    labels = np.append(train_labels, test_labels)
    metadata = {}
    metadata['splitset'] = ['train'] * len(train_data) + ['test'
                                                          ] * len(test_labels)
    return data, labels, metadata
コード例 #20
0
def find_or_download_wilbook_warfile(ensure=True, redownload=False):
    r"""
    scp [email protected]:/var/lib/tomcat/webapps/ibeis.war \
            ~/Downloads/pachy_ibeis.war wget
    http://dev.wildme.org/ibeis_data_dir/ibeis.war
    """
    #war_url = 'http://dev.wildme.org/ibeis_data_dir/ibeis.war'
    war_url = 'http://springbreak.wildbook.org/tools/latest.war'
    war_fpath = ut.grab_file_url(war_url,
                                 appname='ibeis',
                                 ensure=ensure,
                                 redownload=redownload,
                                 fname='ibeis.war')
    return war_fpath
コード例 #21
0
ファイル: install_scripts.py プロジェクト: Erotemic/local
def install_cockatrice():
    cockatrice_url = 'http://www.woogerworks.com/files/cockatrice.weeklybuilds/Cockatrice-WindowsClient.exe'
    import utool as ut
    fpath = ut.grab_file_url(cockatrice_url)
    # run setup script
    ut.cmd(fpath)
    # press enter a few times
    import win32com.client as w32
    shell = w32.Dispatch("WScript.Shell")
    shell.AppActivate('Cockatrice Setup')
    shell.SendKeys("{ENTER}")
    shell.SendKeys("{ENTER}")
    shell.SendKeys("{ENTER}")
    shell.SendKeys("{ENTER}")
コード例 #22
0
    def _load_published(self, ibs, species, task_key):
        """
        >>> from wbia.algo.verif.vsone import *  # NOQA
        >>> self = Deployer()
        >>> species = 'zebra_plains'
        >>> task_key = 'match_state'
        """

        base_url = 'https://{remote}/public/models/pairclf'.format(
            **self.publish_info)

        task_fnames = self.published[species]
        fname = task_fnames[task_key]

        grabkw = dict(appname='wbia', check_hash=False, verbose=0)

        meta_url = base_url + '/' + fname + self.meta_suffix
        meta_fpath = ut.grab_file_url(meta_url, **grabkw)  # NOQA

        deploy_url = base_url + '/' + fname
        deploy_fpath = ut.grab_file_url(deploy_url, **grabkw)

        verif = self._make_verifier(ibs, deploy_fpath, task_key)
        return verif
コード例 #23
0
def setup_te_network(network_str):
    fn = TE_NETWORK_OPTIONS[network_str]['url']
    file_url = join('https://lev.cs.rpi.edu/public/models/', fn)
    network_params_path = ut.grab_file_url(file_url, appname='ibeis')
    network_params = ut.load_cPkl(network_params_path)
    # network_params also includes normalization constants needed for the dataset, and is assumed to be a dictionary
    # with keys mean, std, and params
    network_exp = TE_NETWORK_OPTIONS[network_str]['exp']()
    ll.set_all_param_values(network_exp, network_params['params'])
    X = T.tensor4()
    network_fn = tfn([X], ll.get_output(
        network_exp[-1], X, deterministic=True))
    retdict = {'mean': network_params['mean'], 'std': network_params[
        'std'], 'networkfn': network_fn}
    if any([i in network_str for i in ('upsample', 'jet')]):
        retdict['mod_acc'] = 8
    return retdict
コード例 #24
0
def setup_kp_network(network_str):
    fn = KP_NETWORK_OPTIONS[network_str]['url']
    file_url = join('https://lev.cs.rpi.edu/public/models/', fn)
    network_params_path = ut.grab_file_url(file_url, appname='ibeis')
    network_params = ut.load_cPkl(network_params_path)
    # network_params also includes normalization constants needed for the dataset, and is assumed to be a dictionary
    # with keys mean, std, and params
    network_exp = KP_NETWORK_OPTIONS[network_str]['exp']()
    ll.set_all_param_values(network_exp, network_params['params'])
    X = T.tensor4()
    network_fn = tfn([X], ll.get_output(network_exp, X, deterministic=True))
    return {
        'mean': network_params['mean'],
        'std': network_params['std'],
        'networkfn': network_fn,
        'input_size': KP_NETWORK_OPTIONS[network_str]['size']
    }
コード例 #25
0
ファイル: test_pygist.py プロジェクト: hjweide/pygist
def test_pygist():
    print('[pygist] Testing pygist')
    # Ensure you have test data
    print('[pygist] Ensuring testdata')
    datafile       = utool.grab_file_url(TEST_MODEL_URL, appname='utool')
    test_image_dir = utool.grab_zipped_url(TEST_IMAGES_URL, appname='utool')
    imgpaths       = utool.list_images(test_image_dir, fullpath=True)   # test image paths
    outdir = utool.get_app_resource_dir('pygist')  # where to put results
    # Run pygist on test images
    print('[pygist] Running tests')
    test_results = pygist.test(imgpaths, outdir=outdir, datafile=datafile)
    # Print results
    target_results = [-1, -1, 1, -1, 1, -1, -1, -1, 1, 1, -1, 1, 1]
    assert target_results == target_results, 'results do not match'
    print('test_results = %r' % (test_results,))
    print(utool.list_str(list(izip(imgpaths, test_results))))
    return locals()
コード例 #26
0
def test_pygist():
    print('[pygist] Testing pygist')
    # Ensure you have test data
    print('[pygist] Ensuring testdata')
    datafile = utool.grab_file_url(TEST_MODEL_URL, appname='utool')
    test_image_dir = utool.grab_zipped_url(TEST_IMAGES_URL, appname='utool')
    imgpaths = utool.list_images(test_image_dir,
                                 fullpath=True)  # test image paths
    outdir = utool.get_app_resource_dir('pygist')  # where to put results
    # Run pygist on test images
    print('[pygist] Running tests')
    test_results = pygist.test(imgpaths, outdir=outdir, datafile=datafile)
    # Print results
    target_results = [-1, -1, 1, -1, 1, -1, -1, -1, 1, 1, -1, 1, 1]
    assert target_results == target_results, 'results do not match'
    print('test_results = %r' % (test_results, ))
    print(utool.list_str(list(izip(imgpaths, test_results))))
    return locals()
コード例 #27
0
ファイル: sysres.py プロジェクト: heroinlin/ibeis
def grab_example_smart_xml_fpath():
    """ Gets smart example xml

    CommandLine:
        python -m ibeis.init.sysres --test-grab_example_smart_xml_fpath

    Example:
        >>> # DISABLE_DOCTEST
        >>> import ibeis
        >>> import os
        >>> smart_xml_fpath = ibeis.sysres.grab_example_smart_xml_fpath()
        >>> os.system('gvim ' + smart_xml_fpath)
        >>> #ut.editfile(smart_xml_fpath)

    """
    smart_xml_url = 'https://www.dropbox.com/s/g1mpjzp57wfnhk6/LWC_000261.xml'
    smart_sml_fpath = ut.grab_file_url(smart_xml_url, ensure=True, appname='ibeis')
    return smart_sml_fpath
コード例 #28
0
def setup_te_network(network_str):
    fn = TE_NETWORK_OPTIONS[network_str]['url']
    file_url = join('https://lev.cs.rpi.edu/public/models/', fn)
    network_params_path = ut.grab_file_url(file_url, appname='ibeis')
    network_params = ut.load_cPkl(network_params_path)
    # network_params also includes normalization constants needed for the dataset, and is assumed to be a dictionary
    # with keys mean, std, and params
    network_exp = TE_NETWORK_OPTIONS[network_str]['exp']()
    ll.set_all_param_values(network_exp, network_params['params'])
    X = T.tensor4()
    network_fn = tfn([X], ll.get_output(network_exp[-1], X,
                                        deterministic=True))
    retdict = {
        'mean': network_params['mean'],
        'std': network_params['std'],
        'networkfn': network_fn
    }
    if any([i in network_str for i in ('upsample', 'jet')]):
        retdict['mod_acc'] = 8
    return retdict
コード例 #29
0
def grab_example_smart_xml_fpath():
    """ Gets smart example xml

    CommandLine:
        python -m ibeis.init.sysres --test-grab_example_smart_xml_fpath

    Example:
        >>> # DISABLE_DOCTEST
        >>> import ibeis
        >>> import os
        >>> smart_xml_fpath = ibeis.sysres.grab_example_smart_xml_fpath()
        >>> os.system('gvim ' + smart_xml_fpath)
        >>> #ut.editfile(smart_xml_fpath)

    """
    smart_xml_url = 'https://lev.cs.rpi.edu/public/data/LWC_000261.xml'
    smart_sml_fpath = ut.grab_file_url(smart_xml_url,
                                       ensure=True,
                                       appname='ibeis')
    return smart_sml_fpath
コード例 #30
0
def load_assigner_classifier(ibs, aid_list, fallback_species='wild_dog'):
    species_with_part = ibs.get_annot_species(aid_list[0])
    species = species_with_part.split('+')[0]
    if species in INMEM_ASSIGNER_MODELS.keys():
        clf = INMEM_ASSIGNER_MODELS[species]
    else:
        if species not in SPECIES_CONFIG_MAP.keys():
            print(
                'WARNING: Assigner called for species %s which does not have an assigner modelfile specified. Falling back to the model for %s'
                % species,
                fallback_species,
            )
            species = fallback_species

        model_url = SPECIES_CONFIG_MAP[species]['model_url']
        model_fpath = ut.grab_file_url(model_url)
        from joblib import load

        clf = load(model_fpath)

    return clf
コード例 #31
0
def pretrained_camvid_segnet_driving():
    import utool as ut
    from pysseg.harness import Harness
    from pysseg.tasks import CamVid

    workdir = expanduser('~/data/work/camvid/')
    arch = 'segnet_proper'  # not really

    harn = Harness(workdir=workdir, arch=arch)
    harn.task = CamVid()
    harn.test_imdir  = '~/sseg/SegNet/CamVid/test'
    harn.test_gtdir  = '~/sseg/SegNet/CamVid/testannot'
    harn.test_suffix = 'pretrained'
    harn.gpu_num = gpu_util.find_unused_gpu()
    pretrained_weights_fpath = ut.grab_file_url(
        'http://mi.eng.cam.ac.uk/~agk34/resources/SegNet/segnet_weights_driving_webdemo.caffemodel')
    harn.test_weights_fpath = pretrained_weights_fpath
    harn.prepare_test_model()
    # harn.prepare_test_input()
    # Driving demo seems to have a different model...
    # perhaps the labels were trained differently here
    # I THINK THIS ONE WAS TRAINED WITH AN EXTRA CATEGORY
    harn.test_model_fpath = expanduser('~/sseg/SegNet/')
    harn.evaluate()
コード例 #32
0
def detect(gpath_list, config_filepath, weight_filepath, class_filepath, sensitivity,
           verbose=VERBOSE_SS, use_gpu=True, use_gpu_id=0,
           **kwargs):
    """
    Args:
        gpath_list (list of str): the list of image paths that need proposal candidates

    Kwargs (optional): refer to the SSD documentation for configuration settings

    Returns:
        iter
    """
    def _get_label_name(class_labelmap, label_list):
        if not isinstance(label_list, list):
            label_list = [label_list]
        item_list = class_labelmap.item
        name_list = []
        for label in label_list:
            found = False
            for i in range(len(item_list)):
                if label == item_list[i].label:
                    found = True
                    name_list.append(item_list[i].display_name)
                    break
            assert found
        return name_list

    # Get correct config if specified with shorthand
    config_url = None
    if config_filepath in CONFIG_URL_DICT:
        config_url = CONFIG_URL_DICT[config_filepath]
        config_filepath = ut.grab_file_url(config_url, appname='ibeis',
                                           check_hash=True)

    # Get correct weights if specified with shorthand
    if weight_filepath in CONFIG_URL_DICT:
        if weight_filepath is None and config_url is not None:
            config_url_ = config_url
        else:
            config_url_ = CONFIG_URL_DICT[weight_filepath]
        weight_url = _parse_weight_from_cfg(config_url_)
        weight_filepath = ut.grab_file_url(weight_url, appname='ibeis',
                                            check_hash=True)

    if class_filepath is None:
        class_url = _parse_classes_from_cfg(config_url)
        class_filepath = ut.grab_file_url(class_url, appname='ibeis',
                                          check_hash=True, verbose=verbose)

    # load class labels
    with open(class_filepath, 'r') as class_file:
        class_labelmap = caffe_pb2.LabelMap()
        class_str = str(file.read(class_file))
        text_format.Merge(class_str, class_labelmap)

    # Need to convert unicode strings to Python strings to support Boost Python
    # call signatures in caffe
    prototxt_filepath = str(config_filepath)  # alias to Caffe nomenclature
    caffemodel_filepath = str(weight_filepath)  # alias to Caffe nomenclature

    assert exists(prototxt_filepath), 'Specified prototxt file not found'
    assert exists(caffemodel_filepath), 'Specified caffemodel file not found'

    if use_gpu:
        caffe.set_mode_gpu()
        caffe.set_device(use_gpu_id)
    else:
        caffe.set_mode_cpu()

    net = caffe.Net(prototxt_filepath, caffemodel_filepath, caffe.TEST)

    # Determine input size from prototext
    with open(prototxt_filepath, 'r') as prototxt_file:
        # load all lines
        line_list = prototxt_file.readlines()
        # look for dim size lines
        line_list = [line for line in line_list if 'dim:' in line]
        line_list = line_list[:4]
        # Get last line
        line = line_list[-1]
        line_ = line.strip().split(' ')
        # Filter empty spaces
        line_ = [ _ for _ in line_ if len(_) > 0]
        # Get last value on line, which should be the image size
        image_resize = int(line_[-1])
        # Check to make sure
        assert image_resize in [300, 500, 512]
        print('FOUND image_resize = %r' % (image_resize, ))

    # Input preprocessing: 'data' is the name of the input blob == net.inputs[0]
    transformer = caffe.io.Transformer({'data': net.blobs['data'].data.shape})
    transformer.set_transpose('data', (2, 0, 1))
    # Mean pixel value
    transformer.set_mean('data', np.array([104, 117, 123]))
    # The reference model operates on images in [0,255] range instead of [0,1]
    transformer.set_raw_scale('data', 255)
    # The reference model has channels in BGR order instead of RGB
    transformer.set_channel_swap('data', (2, 1, 0))
    # Set batch size to 1 and set testing image size
    net.blobs['data'].reshape(1, 3, image_resize, image_resize)

    results_list_ = []
    for gpath in gpath_list:
        image = caffe.io.load_image(gpath)
        transformed_image = transformer.preprocess('data', image)
        net.blobs['data'].data[...] = transformed_image

        # Forward pass.
        detections = net.forward()['detection_out']

        # Parse the outputs.
        det_label = detections[0, 0, : , 1]
        det_conf  = detections[0, 0, : , 2]
        det_xmin  = detections[0, 0, : , 3]
        det_ymin  = detections[0, 0, : , 4]
        det_xmax  = detections[0, 0, : , 5]
        det_ymax  = detections[0, 0, : , 6]

        # Get detections with confidence higher than 0.6.
        top_indices = [
            i for
            i, conf in enumerate(det_conf)
            if conf >= sensitivity
        ]
        top_conf = det_conf[top_indices]
        top_label_indices = det_label[top_indices].tolist()
        top_labels = _get_label_name(class_labelmap, top_label_indices)
        top_xmin = det_xmin[top_indices]
        top_ymin = det_ymin[top_indices]
        top_xmax = det_xmax[top_indices]
        top_ymax = det_ymax[top_indices]
        height, width = image.shape[:2]

        # Compile results
        result_list_ = []
        zipped = zip(top_xmin, top_ymin, top_xmax, top_ymax, top_labels, top_conf)
        for (xmin, ymin, xmax, ymax, label, conf) in zipped:
            xtl = int(np.around(xmin * width))
            ytl = int(np.around(ymin * height))
            xbr = int(np.around(xmax * width))
            ybr = int(np.around(ymax * height))
            confidence = float(conf)
            result_dict = {
                'xtl'        : xtl,
                'ytl'        : ytl,
                'width'      : xbr - xtl,
                'height'     : ybr - ytl,
                'class'      : label,
                'confidence' : confidence,
            }
            result_list_.append(result_dict)
        results_list_.append(result_list_)

    results_list = zip(gpath_list, results_list_)
    return results_list
コード例 #33
0
def detect(gpath_list,
           config_filepath,
           weight_filepath,
           class_filepath,
           sensitivity,
           verbose=VERBOSE_SS,
           use_gpu=True,
           use_gpu_id=0,
           **kwargs):
    """
    Args:
        gpath_list (list of str): the list of image paths that need proposal candidates

    Kwargs (optional): refer to the Faster R-CNN documentation for configuration settings

    Returns:
        iter
    """
    cfg.TEST.HAS_RPN = True  # Use RPN for proposals

    # Get correct config if specified with shorthand
    config_url = None
    if config_filepath in CONFIG_URL_DICT:
        config_url = CONFIG_URL_DICT[config_filepath]
        config_filepath = ut.grab_file_url(config_url,
                                           appname='ibeis',
                                           check_hash=True)

    # Get correct weights if specified with shorthand
    if weight_filepath in CONFIG_URL_DICT:
        if weight_filepath is None and config_url is not None:
            config_url_ = config_url
        else:
            config_url_ = CONFIG_URL_DICT[weight_filepath]
        weight_url = _parse_weight_from_cfg(config_url_)
        weight_filepath = ut.grab_file_url(weight_url,
                                           appname='ibeis',
                                           check_hash=True)

    if class_filepath is None:
        class_url = _parse_classes_from_cfg(config_url)
        class_filepath = ut.grab_file_url(class_url,
                                          appname='ibeis',
                                          check_hash=True,
                                          verbose=verbose)
    class_list = _parse_class_list(class_filepath)

    # Need to convert unicode strings to Python strings to support Boost Python
    # call signatures in caffe
    prototxt_filepath = str(config_filepath)  # alias to Caffe nomenclature
    caffemodel_filepath = str(weight_filepath)  # alias to Caffe nomenclature

    assert exists(prototxt_filepath), 'Specified prototxt file not found'
    assert exists(caffemodel_filepath), 'Specified caffemodel file not found'

    if use_gpu:
        caffe.set_mode_gpu()
        caffe.set_device(use_gpu_id)
        cfg.GPU_ID = use_gpu_id
    else:
        caffe.set_mode_cpu()

    net = caffe.Net(prototxt_filepath, caffemodel_filepath, caffe.TEST)

    # Warm-up network on a dummy image
    im = 128 * np.ones((300, 500, 3), dtype=np.uint8)
    for i in range(2):
        _, _ = im_detect(net, im)

    results_list_ = []
    for gpath in gpath_list:
        image = cv2.imread(gpath)
        score_list, bbox_list = im_detect(net, image)

        # Compile results
        result_list_ = []
        for class_index, class_name in enumerate(class_list[1:]):
            class_index += 1  # because we skipped background
            class_boxes = bbox_list[:, 4 * class_index:4 * (class_index + 1)]
            class_scores = score_list[:, class_index]
            dets_list = np.hstack((class_boxes, class_scores[:, np.newaxis]))
            dets_list = dets_list.astype(np.float32)
            # # Perform NMS
            # keep_list = nms(dets_list, nms_sensitivity)
            # dets_list = dets_list[keep_list, :]
            # Perform sensitivity check
            keep_list = np.where(dets_list[:, -1] >= sensitivity)[0]
            dets_list = dets_list[keep_list, :]
            for (xtl, ytl, xbr, ybr, conf) in dets_list:
                xtl = int(np.around(xtl))
                ytl = int(np.around(ytl))
                xbr = int(np.around(xbr))
                ybr = int(np.around(ybr))
                confidence = float(conf)
                result_dict = {
                    'xtl': xtl,
                    'ytl': ytl,
                    'width': xbr - xtl,
                    'height': ybr - ytl,
                    'class': class_name,
                    'confidence': confidence,
                }
                result_list_.append(result_dict)
        results_list_.append(result_list_)

    results_list = zip(gpath_list, results_list_)
    return results_list
コード例 #34
0
def classify(vector_list, weight_filepath, verbose=VERBOSE_SVM, **kwargs):
    """
    Args:
        thumbail_list (list of str): the list of image thumbnails that need classifying

    Returns:
        iter
    """
    import multiprocessing
    import numpy as np

    # Get correct weight if specified with shorthand
    if weight_filepath in CONFIG_URL_DICT:
        weight_url = CONFIG_URL_DICT[weight_filepath]
        if weight_url.endswith('.zip'):
            weight_filepath = ut.grab_zipped_url(weight_url, appname='ibeis')
        else:
            weight_filepath = ut.grab_file_url(weight_url, appname='ibeis',
                                               check_hash=True)

    # Get ensemble
    is_ensemble = isdir(weight_filepath)
    if is_ensemble:
        weight_filepath_list = sorted([
            join(weight_filepath, filename) for filename in listdir(weight_filepath)
            if isfile(join(weight_filepath, filename))
        ])
    else:
        weight_filepath_list = [weight_filepath]
    num_weights = len(weight_filepath_list)
    assert num_weights > 0

    # Form dictionaries
    num_vectors = len(vector_list)
    index_list = list(range(num_vectors))

    # Generate parallelized wrapper
    OLD = False
    if is_ensemble and OLD:
        vectors_list = [ vector_list for _ in range(num_weights) ]
        args_list = zip(weight_filepath_list, vectors_list)
        nTasks = num_weights
        print('Processing ensembles in parallel using %d ensembles' % (num_weights, ))
    else:
        num_cpus = multiprocessing.cpu_count()
        vector_batch = int(np.ceil(float(num_vectors) / num_cpus))
        vector_rounds = int(np.ceil(float(num_vectors) / vector_batch))

        args_list = []
        for vector_round in range(vector_rounds):
            start_index = vector_round * vector_batch
            stop_index = (vector_round + 1) * vector_batch
            assert start_index < num_vectors
            stop_index = min(stop_index, num_vectors)
            # print('Slicing index range: [%r, %r)' % (start_index, stop_index, ))

            # Slice gids and get feature data
            index_list_ = list(range(start_index, stop_index))
            vector_list_ = vector_list[start_index: stop_index]
            assert len(index_list_) == len(vector_list_)
            for weight_filepath in weight_filepath_list:
                args = (weight_filepath, vector_list_, index_list_)
                args_list.append(args)

        nTasks = len(args_list)
        print('Processing vectors in parallel using vector_batch = %r' % (vector_batch, ))

    # Perform inference
    classify_iter = ut.generate2(classify_helper, args_list, nTasks=nTasks,
                                 ordered=True, force_serial=False)

    # Classify with SVM for each image vector
    score_dict = { index: [] for index in index_list }
    class_dict = { index: [] for index in index_list }
    for score_dict_, class_dict_ in classify_iter:
        for index in index_list:
            if index in score_dict_:
                score_dict[index] += score_dict_[index]
            if index in class_dict_:
                class_dict[index] += class_dict_[index]

    # Organize and compute mode and average for class and score
    for index in index_list:
        score_list_ = score_dict[index]
        class_list_ = class_dict[index]
        score_ = sum(score_list_) / len(score_list_)
        class_ = max(set(class_list_), key=class_list_.count)
        class_ = 'positive' if int(class_) == 1 else 'negative'
        yield score_, class_
コード例 #35
0
def get_injured_sharks():
    """
    >>> from wbia.scripts.getshark import *  # NOQA
    """
    import requests

    url = 'http://www.whaleshark.org/getKeywordImages.jsp'
    resp = requests.get(url)
    assert resp.status_code == 200
    keywords = resp.json()['keywords']
    key_list = ut.take_column(keywords, 'indexName')
    key_to_nice = {k['indexName']: k['readableName'] for k in keywords}

    injury_patterns = [
        'injury',
        'net',
        'hook',
        'trunc',
        'damage',
        'scar',
        'nicks',
        'bite',
    ]

    injury_keys = [
        key for key in key_list if any([pat in key for pat in injury_patterns])
    ]
    noninjury_keys = ut.setdiff(key_list, injury_keys)
    injury_nice = ut.lmap(lambda k: key_to_nice[k], injury_keys)  # NOQA
    noninjury_nice = ut.lmap(lambda k: key_to_nice[k], noninjury_keys)  # NOQA
    key_list = injury_keys

    keyed_images = {}
    for key in ut.ProgIter(key_list, lbl='reading index', bs=True):
        key_url = url + '?indexName={indexName}'.format(indexName=key)
        key_resp = requests.get(key_url)
        assert key_resp.status_code == 200
        key_imgs = key_resp.json()['images']
        keyed_images[key] = key_imgs

    key_hist = {key: len(imgs) for key, imgs in keyed_images.items()}
    key_hist = ut.sort_dict(key_hist, 'vals')
    logger.info(ut.repr3(key_hist))
    nice_key_hist = ut.map_dict_keys(lambda k: key_to_nice[k], key_hist)
    nice_key_hist = ut.sort_dict(nice_key_hist, 'vals')
    logger.info(ut.repr3(nice_key_hist))

    key_to_urls = {
        key: ut.take_column(vals, 'url')
        for key, vals in keyed_images.items()
    }
    overlaps = {}
    import itertools

    overlap_img_list = []
    for k1, k2 in itertools.combinations(key_to_urls.keys(), 2):
        overlap_imgs = ut.isect(key_to_urls[k1], key_to_urls[k2])
        num_overlap = len(overlap_imgs)
        overlaps[(k1, k2)] = num_overlap
        overlaps[(k1, k1)] = len(key_to_urls[k1])
        if num_overlap > 0:
            # logger.info('[%s][%s], overlap=%r' % (k1, k2, num_overlap))
            overlap_img_list.extend(overlap_imgs)

    all_img_urls = list(set(ut.flatten(key_to_urls.values())))
    num_all = len(all_img_urls)  # NOQA
    logger.info('num_all = %r' % (num_all, ))

    # Determine super-categories
    categories = ['nicks', 'scar', 'trunc']

    # Force these keys into these categories
    key_to_cat = {'scarbite': 'other_injury'}

    cat_to_keys = ut.ddict(list)

    for key in key_to_urls.keys():
        flag = 1
        if key in key_to_cat:
            cat = key_to_cat[key]
            cat_to_keys[cat].append(key)
            continue
        for cat in categories:
            if cat in key:
                cat_to_keys[cat].append(key)
                flag = 0
        if flag:
            cat = 'other_injury'
            cat_to_keys[cat].append(key)

    cat_urls = ut.ddict(list)
    for cat, keys in cat_to_keys.items():
        for key in keys:
            cat_urls[cat].extend(key_to_urls[key])

    cat_hist = {}
    for cat in list(cat_urls.keys()):
        cat_urls[cat] = list(set(cat_urls[cat]))
        cat_hist[cat] = len(cat_urls[cat])

    logger.info(ut.repr3(cat_to_keys))
    logger.info(ut.repr3(cat_hist))

    key_to_cat = dict([(val, key) for key, vals in cat_to_keys.items()
                       for val in vals])

    # ingestset = {
    #    '__class__': 'ImageSet',
    #    'images': ut.ddict(dict)
    # }
    # for key, key_imgs in keyed_images.items():
    #    for imgdict in key_imgs:
    #        url = imgdict['url']
    #        encid = imgdict['correspondingEncounterNumber']
    #        # Make structure
    #        encdict = encounters[encid]
    #        encdict['__class__'] = 'Encounter'
    #        imgdict = ut.delete_keys(imgdict.copy(), ['correspondingEncounterNumber'])
    #        imgdict['__class__'] = 'Image'
    #        cat = key_to_cat[key]
    #        annotdict = {'relative_bbox': [.01, .01, .98, .98], 'tags': [cat, key]}
    #        annotdict['__class__'] = 'Annotation'

    #        # Ensure structures exist
    #        encdict['images'] = encdict.get('images', [])
    #        imgdict['annots'] = imgdict.get('annots', [])

    #        # Add an image to this encounter
    #        encdict['images'].append(imgdict)
    #        # Add an annotation to this image
    #        imgdict['annots'].append(annotdict)

    # # http://springbreak.wildbook.org/rest/org.ecocean.Encounter/1111
    # get_enc_url = 'http://www.whaleshark.org/rest/org.ecocean.Encounter/%s' % (encid,)
    # resp = requests.get(get_enc_url)
    # logger.info(ut.repr3(encdict))
    # logger.info(ut.repr3(encounters))

    # Download the files to the local disk
    # fpath_list =

    all_urls = ut.unique(
        ut.take_column(
            ut.flatten(
                ut.dict_subset(keyed_images,
                               ut.flatten(cat_to_keys.values())).values()),
            'url',
        ))

    dldir = ut.truepath('~/tmpsharks')
    from os.path import commonprefix, basename  # NOQA

    prefix = commonprefix(all_urls)
    suffix_list = [url_[len(prefix):] for url_ in all_urls]
    fname_list = [suffix.replace('/', '--') for suffix in suffix_list]

    fpath_list = []
    for url, fname in ut.ProgIter(zip(all_urls, fname_list),
                                  lbl='downloading imgs',
                                  freq=1):
        fpath = ut.grab_file_url(url,
                                 download_dir=dldir,
                                 fname=fname,
                                 verbose=False)
        fpath_list.append(fpath)

    # Make sure we keep orig info
    # url_to_keys = ut.ddict(list)
    url_to_info = ut.ddict(dict)
    for key, imgdict_list in keyed_images.items():
        for imgdict in imgdict_list:
            url = imgdict['url']
            info = url_to_info[url]
            for k, v in imgdict.items():
                info[k] = info.get(k, [])
                info[k].append(v)
            info['keys'] = info.get('keys', [])
            info['keys'].append(key)
            # url_to_keys[url].append(key)

    info_list = ut.take(url_to_info, all_urls)
    for info in info_list:
        if len(set(info['correspondingEncounterNumber'])) > 1:
            assert False, 'url with two different encounter nums'
    # Combine duplicate tags

    hashid_list = [
        ut.get_file_uuid(fpath_, stride=8)
        for fpath_ in ut.ProgIter(fpath_list, bs=True)
    ]
    groupxs = ut.group_indices(hashid_list)[1]

    # Group properties by duplicate images
    # groupxs = [g for g in groupxs if len(g) > 1]
    fpath_list_ = ut.take_column(ut.apply_grouping(fpath_list, groupxs), 0)
    url_list_ = ut.take_column(ut.apply_grouping(all_urls, groupxs), 0)
    info_list_ = [
        ut.map_dict_vals(ut.flatten, ut.dict_accum(*info_))
        for info_ in ut.apply_grouping(info_list, groupxs)
    ]

    encid_list_ = [
        ut.unique(info_['correspondingEncounterNumber'])[0]
        for info_ in info_list_
    ]
    keys_list_ = [ut.unique(info_['keys']) for info_ in info_list_]
    cats_list_ = [ut.unique(ut.take(key_to_cat, keys)) for keys in keys_list_]

    clist = ut.ColumnLists({
        'gpath': fpath_list_,
        'url': url_list_,
        'encid': encid_list_,
        'key': keys_list_,
        'cat': cats_list_,
    })

    # for info_ in ut.apply_grouping(info_list, groupxs):
    #    info = ut.dict_accum(*info_)
    #    info = ut.map_dict_vals(ut.flatten, info)
    #    x = ut.unique(ut.flatten(ut.dict_accum(*info_)['correspondingEncounterNumber']))
    #    if len(x) > 1:
    #        info = info.copy()
    #        del info['keys']
    #        logger.info(ut.repr3(info))

    flags = ut.lmap(ut.fpath_has_imgext, clist['gpath'])
    clist = clist.compress(flags)

    import wbia

    ibs = wbia.opendb('WS_Injury', allow_newdir=True)

    gid_list = ibs.add_images(clist['gpath'])
    clist['gid'] = gid_list

    failed_flags = ut.flag_None_items(clist['gid'])
    logger.info('# failed %s' % (sum(failed_flags), ))
    passed_flags = ut.not_list(failed_flags)
    clist = clist.compress(passed_flags)
    ut.assert_all_not_None(clist['gid'])
    # ibs.get_image_uris_original(clist['gid'])
    ibs.set_image_uris_original(clist['gid'], clist['url'], overwrite=True)

    # ut.zipflat(clist['cat'], clist['key'])
    if False:
        # Can run detection instead
        clist['tags'] = ut.zipflat(clist['cat'])
        aid_list = ibs.use_images_as_annotations(clist['gid'],
                                                 adjust_percent=0.01,
                                                 tags_list=clist['tags'])
        aid_list

    import wbia.plottool as pt
    from wbia import core_annots

    pt.qt4ensure()
    # annots = ibs.annots()
    # aids = [1, 2]
    # ibs.depc_annot.get('hog', aids , 'hog')
    # ibs.depc_annot.get('chip', aids, 'img')
    for aid in ut.InteractiveIter(ibs.get_valid_aids()):
        hogs = ibs.depc_annot.d.get_hog_hog([aid])
        chips = ibs.depc_annot.d.get_chips_img([aid])
        chip = chips[0]
        hogimg = core_annots.make_hog_block_image(hogs[0])
        pt.clf()
        pt.imshow(hogimg, pnum=(1, 2, 1))
        pt.imshow(chip, pnum=(1, 2, 2))
        fig = pt.gcf()
        fig.show()
        fig.canvas.draw()

    # logger.info(len(groupxs))

    # if False:
    # groupxs = ut.find_duplicate_items(ut.lmap(basename, suffix_list)).values()
    # logger.info(ut.repr3(ut.apply_grouping(all_urls, groupxs)))
    #    # FIX
    #    for fpath, fname in zip(fpath_list, fname_list):
    #        if ut.checkpath(fpath):
    #            ut.move(fpath, join(dirname(fpath), fname))
    #            logger.info('fpath = %r' % (fpath,))

    # import wbia
    # from wbia.dbio import ingest_dataset
    # dbdir = wbia.sysres.lookup_dbdir('WS_ALL')
    # self = ingest_dataset.Ingestable2(dbdir)

    if False:
        # Show overlap matrix
        import wbia.plottool as pt
        import pandas as pd
        import numpy as np

        dict_ = overlaps
        s = pd.Series(dict_, index=pd.MultiIndex.from_tuples(overlaps))
        df = s.unstack()
        lhs, rhs = df.align(df.T)
        df = lhs.add(rhs, fill_value=0).fillna(0)

        label_texts = df.columns.values

        def label_ticks(label_texts):
            import wbia.plottool as pt

            truncated_labels = [repr(lbl[0:100]) for lbl in label_texts]
            ax = pt.gca()
            ax.set_xticks(list(range(len(label_texts))))
            ax.set_xticklabels(truncated_labels)
            [lbl.set_rotation(-55) for lbl in ax.get_xticklabels()]
            [
                lbl.set_horizontalalignment('left')
                for lbl in ax.get_xticklabels()
            ]

            # xgrid, ygrid = np.meshgrid(range(len(label_texts)), range(len(label_texts)))
            # pt.plot_surface3d(xgrid, ygrid, disjoint_mat)
            ax.set_yticks(list(range(len(label_texts))))
            ax.set_yticklabels(truncated_labels)
            [
                lbl.set_horizontalalignment('right')
                for lbl in ax.get_yticklabels()
            ]
            [
                lbl.set_verticalalignment('center')
                for lbl in ax.get_yticklabels()
            ]
            # [lbl.set_rotation(20) for lbl in ax.get_yticklabels()]

        # df = df.sort(axis=0)
        # df = df.sort(axis=1)

        sortx = np.argsort(df.sum(axis=1).values)[::-1]
        df = df.take(sortx, axis=0)
        df = df.take(sortx, axis=1)

        fig = pt.figure(fnum=1)
        fig.clf()
        mat = df.values.astype(np.int32)
        mat[np.diag_indices(len(mat))] = 0
        vmax = mat[(1 - np.eye(len(mat))).astype(np.bool)].max()
        import matplotlib.colors

        norm = matplotlib.colors.Normalize(vmin=0, vmax=vmax, clip=True)
        pt.plt.imshow(mat, cmap='hot', norm=norm, interpolation='none')
        pt.plt.colorbar()
        pt.plt.grid('off')
        label_ticks(label_texts)
        fig.tight_layout()

    # overlap_df = pd.DataFrame.from_dict(overlap_img_list)

    class TmpImage(ut.NiceRepr):
        pass

    from skimage.feature import hog
    from skimage import data, color, exposure
    import wbia.plottool as pt

    image2 = color.rgb2gray(data.astronaut())  # NOQA

    fpath = './GOPR1120.JPG'

    import vtool as vt

    for fpath in [fpath]:
        """
        http://scikit-image.org/docs/dev/auto_examples/plot_hog.html
        """

        image = vt.imread(fpath, grayscale=True)
        image = pt.color_funcs.to_base01(image)

        fig = pt.figure(fnum=2)
        fd, hog_image = hog(
            image,
            orientations=8,
            pixels_per_cell=(16, 16),
            cells_per_block=(1, 1),
            visualise=True,
        )

        fig, (ax1, ax2) = pt.plt.subplots(1,
                                          2,
                                          figsize=(8, 4),
                                          sharex=True,
                                          sharey=True)

        ax1.axis('off')
        ax1.imshow(image, cmap=pt.plt.cm.gray)
        ax1.set_title('Input image')
        ax1.set_adjustable('box-forced')

        # Rescale histogram for better display
        hog_image_rescaled = exposure.rescale_intensity(hog_image,
                                                        in_range=(0, 0.02))

        ax2.axis('off')
        ax2.imshow(hog_image_rescaled, cmap=pt.plt.cm.gray)
        ax2.set_title('Histogram of Oriented Gradients')
        ax1.set_adjustable('box-forced')
        pt.plt.show()
コード例 #36
0
def clean_mitex():
    # if mitex does not install correctly
    install_dir = 'C:\Program Files (x86)\MiKTeX 2.9'
    ut.delete(install_dir)


def install_mathtools():
    """

    wget http://mirrors.ctan.org/install/macros/latex/contrib/mathtools.tds.zip
    mkdir tmp2
    #7z x -o"tmp2" mathtools.tds.zip
    7z x -o"C:/Program Files (x86)/MiKTeX 2.9" mathtools.tds.zip

    """
    pass


if __name__ == '__main__':
    """
    python %USERPROFILE%/local/windows/init_mitex.py
    """
    assert ut.WIN32
    url = 'http://mirrors.ctan.org/systems/win32/miktex/setup/basic-miktex-2.9.5105.exe'
    fpath = ut.grab_file_url(url)
    ut.cmd(fpath)



コード例 #37
0
    def _train_setup(dark, voc_path, weight_path):

        class_list = []
        annotations_path = join(voc_path, "Annotations")
        imagesets_path = join(voc_path, "ImageSets")
        jpegimages_path = join(voc_path, "JPEGImages")
        label_path = join(voc_path, "labels")

        ut.delete(label_path)
        ut.ensuredir(label_path)

        def _convert_annotation(image_id):
            import xml.etree.ElementTree as ET

            def _convert(size, box):
                dw = 1.0 / size[0]
                dh = 1.0 / size[1]
                x = (box[0] + box[1]) / 2.0
                y = (box[2] + box[3]) / 2.0
                w = box[1] - box[0]
                h = box[3] - box[2]
                x = x * dw
                w = w * dw
                y = y * dh
                h = h * dh
                return (x, y, w, h)

            with open(join(label_path, "%s.txt" % (image_id,)), "w") as out_file:
                with open(join(annotations_path, "%s.xml" % (image_id,)), "r") as in_file:
                    tree = ET.parse(in_file)
                    root = tree.getroot()
                    size = root.find("size")
                    w = int(size.find("width").text)
                    h = int(size.find("height").text)

                    for obj in root.iter("object"):
                        if int(obj.find("difficult").text) == 1:
                            continue
                        class_ = obj.find("name").text
                        if class_ not in class_list:
                            class_list.append(class_)
                        class_id = class_list.index(class_)
                        xmlbox = obj.find("bndbox")
                        b = tuple(
                            map(
                                float,
                                [
                                    xmlbox.find("xmin").text,
                                    xmlbox.find("xmax").text,
                                    xmlbox.find("ymin").text,
                                    xmlbox.find("ymax").text,
                                ],
                            )
                        )
                        bb = _convert((w, h), b)
                        bb_str = " ".join([str(_) for _ in bb])
                        out_file.write("%s %s\n" % (class_id, bb_str))

        num_images = 0
        print("[pydarknet py train] Processing manifest...")
        manifest_filename = join(voc_path, "manifest.txt")
        with open(manifest_filename, "w") as manifest:
            # for dataset_name in ['train', 'val', 'test']:
            for dataset_name in ["train", "val"]:
                dataset_filename = join(imagesets_path, "Main", "%s.txt" % dataset_name)
                with open(dataset_filename, "r") as dataset:
                    image_id_list = dataset.read().strip().split()

                for image_id in image_id_list:
                    print("[pydarknet py train]     processing: %r" % (image_id,))
                    image_filepath = abspath(join(jpegimages_path, "%s.jpg" % image_id))
                    if exists(image_filepath):
                        manifest.write("%s\n" % (image_filepath,))
                        _convert_annotation(image_id)
                        num_images += 1

        print("[pydarknet py train] Processing config and pretrained weights...")
        # Load default config and pretrained weights
        config_filepath = ut.grab_file_url(DEFAULT_CONFIG_TEMPLATE_URL, appname="pydarknet")
        with open(config_filepath, "r") as config:
            config_template_str = config.read()

        config_filename = basename(config_filepath).replace(".template.", ".%d." % (len(class_list),))
        config_filepath = join(weight_path, config_filename)
        with open(config_filepath, "w") as config:
            replace_list = [
                ("_^_OUTPUT_^_", SIDES * SIDES * (BOXES * 5 + len(class_list))),
                ("_^_CLASSES_^_", len(class_list)),
                ("_^_SIDES_^_", SIDES),
                ("_^_BOXES_^_", BOXES),
            ]
            for needle, replacement in replace_list:
                config_template_str = config_template_str.replace(needle, str(replacement))
            config.write(config_template_str)

        class_filepath = "%s.classes" % (config_filepath,)
        with open(class_filepath, "w") as class_file:
            for class_ in class_list:
                class_file.write("%s\n" % (class_,))

        weight_filepath = ut.grab_file_url(DEFAULT_PRETRAINED_URL, appname="pydarknet")
        dark._load(config_filepath, weight_filepath)

        print("class_list = %r" % (class_list,))
        print("num_images = %r" % (num_images,))

        return manifest_filename, num_images, config_filepath, class_filepath
コード例 #38
0
def detect(gpath_list,
           config_filepath,
           weight_filepath,
           class_filepath,
           sensitivity,
           verbose=VERBOSE_LN,
           **kwargs):
    """Detect image filepaths with lightnet.

    Args:
        gpath_list (list of str): the list of image paths that need proposal candidates

    Kwargs (optional): refer to the Lightnet documentation for configuration settings

    Returns:
        iter
    """
    assert config_filepath is None, 'lightnet does not have a config file'

    # Get correct weight if specified with shorthand
    weight_url = None
    if weight_filepath in WEIGHT_URL_DICT:
        weight_url = WEIGHT_URL_DICT[weight_filepath]
        weight_filepath = ut.grab_file_url(weight_url,
                                           appname='ibeis',
                                           check_hash=True)

    if class_filepath in WEIGHT_URL_DICT:
        if class_filepath is None and weight_url is not None:
            weight_url_ = weight_url
        else:
            weight_url_ = WEIGHT_URL_DICT[weight_filepath]
        class_url = _parse_classes_from_weights(weight_url_)
        class_filepath = ut.grab_file_url(class_url,
                                          appname='ibeis',
                                          check_hash=True,
                                          verbose=verbose)

    assert exists(weight_filepath)
    weight_filepath = ut.truepath(weight_filepath)
    assert exists(class_filepath)
    class_filepath = ut.truepath(class_filepath)

    class_list = _parse_class_list(class_filepath)

    network_size = (416, 416)
    conf_thresh = sensitivity
    nms_thresh = 1.0  # Turn off NMS
    network = _create_network(weight_filepath, class_list, conf_thresh,
                              nms_thresh, network_size)

    # Execute detector for each image
    results_list_ = []
    for gpath in tqdm(gpath_list):
        image, output_list = _detect(network, gpath, network_size)
        output_list = output_list[0]

        result_list_ = []
        for output in list(output_list):
            xtl = int(np.around(float(output.x_top_left)))
            ytl = int(np.around(float(output.y_top_left)))
            xbr = int(np.around(float(output.x_top_left + output.width)))
            ybr = int(np.around(float(output.y_top_left + output.height)))
            class_ = output.class_label
            conf = float(output.confidence)
            result_dict = {
                'xtl': xtl,
                'ytl': ytl,
                'width': xbr - xtl,
                'height': ybr - ytl,
                'class': class_,
                'confidence': conf,
            }
            result_list_.append(result_dict)
        results_list_.append(result_list_)

    if len(results_list_) != len(gpath_list):
        raise ValueError('Lightnet did not return valid data')

    results_list = zip(gpath_list, results_list_)
    return results_list
コード例 #39
0
ファイル: densenet.py プロジェクト: Hrmirzadeh/wildbook-ia
def test(
    gpath_list,
    classifier_weight_filepath=None,
    return_dict=False,
    multiclass=False,
    **kwargs,
):
    from wbia.detecttools.directory import Directory

    # Get correct weight if specified with shorthand
    archive_url = None

    ensemble_index = None
    if classifier_weight_filepath is not None and ':' in classifier_weight_filepath:
        assert classifier_weight_filepath.count(':') == 1
        classifier_weight_filepath, ensemble_index = classifier_weight_filepath.split(
            ':')
        ensemble_index = int(ensemble_index)

    if classifier_weight_filepath in ARCHIVE_URL_DICT:
        archive_url = ARCHIVE_URL_DICT[classifier_weight_filepath]
        archive_path = ut.grab_file_url(archive_url,
                                        appname='wbia',
                                        check_hash=True)
    else:
        logger.info('classifier_weight_filepath %r not recognized' %
                    (classifier_weight_filepath, ))
        raise RuntimeError

    assert os.path.exists(archive_path)
    archive_path = ut.truepath(archive_path)

    ensemble_path = archive_path.strip('.zip')
    if not os.path.exists(ensemble_path):
        ut.unarchive_file(archive_path, output_dir=ensemble_path)

    assert os.path.exists(ensemble_path)
    direct = Directory(ensemble_path,
                       include_file_extensions=['weights'],
                       recursive=True)
    weights_path_list = direct.files()
    weights_path_list = sorted(weights_path_list)
    assert len(weights_path_list) > 0

    kwargs.pop('classifier_algo', None)

    logger.info('Using weights in the ensemble, index %r: %s ' %
                (ensemble_index, ut.repr3(weights_path_list)))
    result_list = test_ensemble(
        gpath_list,
        weights_path_list,
        classifier_weight_filepath,
        ensemble_index,
        multiclass=multiclass,
        **kwargs,
    )
    for result in result_list:
        best_key = None
        best_score = -1.0
        for key, score in result.items():
            if score > best_score:
                best_key = key
                best_score = score
        assert best_score >= 0.0 and best_key is not None
        if return_dict:
            yield best_score, best_key, result
        else:
            yield best_score, best_key
コード例 #40
0
ファイル: lightnet.py プロジェクト: yeniherdiyeni/wildbook-ia
def detect(
    gpath_list,
    config_filepath=None,
    weight_filepath=None,
    classes_filepath=None,
    sensitivity=0.0,
    verbose=VERBOSE_LN,
    flip=False,
    batch_size=192,
    **kwargs,
):
    """Detect image filepaths with lightnet.

    Args:
        gpath_list (list of str): the list of image paths that need proposal candidates

    Kwargs (optional): refer to the Lightnet documentation for configuration settings

    Returns:
        iter
    """
    # Get correct weight if specified with shorthand
    config_url = None
    if config_filepath in CONFIG_URL_DICT:
        config_url = CONFIG_URL_DICT[config_filepath]
        config_filepath = ut.grab_file_url(config_url,
                                           appname='lightnet',
                                           check_hash=True)

    # Get correct weights if specified with shorthand
    if weight_filepath in CONFIG_URL_DICT:
        if weight_filepath is None and config_url is not None:
            config_url_ = config_url
        else:
            config_url_ = CONFIG_URL_DICT[weight_filepath]
        weight_url = _parse_weights_from_cfg(config_url_)
        weight_filepath = ut.grab_file_url(weight_url,
                                           appname='lightnet',
                                           check_hash=True)

    assert exists(config_filepath)
    config_filepath = ut.truepath(config_filepath)
    assert exists(weight_filepath)
    weight_filepath = ut.truepath(weight_filepath)

    conf_thresh = sensitivity
    nms_thresh = 1.0  # Turn off NMS

    params = _create_network(config_filepath, weight_filepath, conf_thresh,
                             nms_thresh)

    # Execute detector for each image
    results_list_ = []
    for gpath_batch_list in tqdm(list(ut.ichunks(gpath_list, batch_size))):
        try:
            result_list, img_sizes = _detect(params,
                                             gpath_batch_list,
                                             flip=flip)
        except cv2.error:
            result_list, img_sizes = [], []

        for result, img_size in zip(result_list, img_sizes):
            img_w, img_h = img_size

            result_list_ = []
            for output in list(result):
                xtl = int(np.around(float(output.x_top_left)))
                ytl = int(np.around(float(output.y_top_left)))
                xbr = int(np.around(float(output.x_top_left + output.width)))
                ybr = int(np.around(float(output.y_top_left + output.height)))
                width = xbr - xtl
                height = ybr - ytl
                class_ = output.class_label
                conf = float(output.confidence)
                if flip:
                    xtl = img_w - xbr
                result_dict = {
                    'xtl': xtl,
                    'ytl': ytl,
                    'width': width,
                    'height': height,
                    'class': class_,
                    'confidence': conf,
                }
                result_list_.append(result_dict)
            results_list_.append(result_list_)

    if len(results_list_) != len(gpath_list):
        raise ValueError('Lightnet did not return valid data')

    results_list = zip(gpath_list, results_list_)
    return results_list
コード例 #41
0
    ytl = ymin
    w = xmax - xmin
    h = ymax - ymin
    return (
        xtl,
        ytl,
        w,
        h,
    )


test_uuid_list = []
for model_tag in TEST_URLS:
    json_url = TEST_URLS[model_tag]
    json_filepath = ut.grab_file_url(json_url,
                                     appname='wbia_2d_orientation',
                                     check_hash=False)

    with open(json_filepath, 'r') as file:
        json_data = yaml.load(file, Loader=yaml.FullLoader)
        annotation_data = json_data['annotations']
        test_uuid_list_ = ut.take_column(annotation_data, 'uuid')
        print(len(set(test_uuid_list_)))
        test_uuid_list += test_uuid_list_
assert len(test_uuid_list) == len(set(test_uuid_list))
test_uuid_list = [uuid.UUID(test_uuid) for test_uuid in test_uuid_list]
test_uuid_set = set(test_uuid_list)

################################################################################

desired_species_list = [
コード例 #42
0
ファイル: ingest_database.py プロジェクト: heroinlin/ibeis
def ingest_serengeti_mamal_cameratrap(species):
    """
    Downloads data from Serengeti dryad server

    References:
        http://datadryad.org/resource/doi:10.5061/dryad.5pt92
        Swanson AB, Kosmala M, Lintott CJ, Simpson RJ, Smith A, Packer C (2015)
        Snapshot Serengeti, high-frequency annotated camera trap images of 40
        mammalian species in an African savanna. Scientific Data 2: 150026.
        http://dx.doi.org/10.1038/sdata.2015.26
        Swanson AB, Kosmala M, Lintott CJ, Simpson RJ, Smith A, Packer C (2015)
        Data from: Snapshot Serengeti, high-frequency annotated camera trap
        images of 40 mammalian species in an African savanna. Dryad Digital
        Repository. http://dx.doi.org/10.5061/dryad.5pt92

    Args:
        species (?):

    CommandLine:
        python -m ibeis.dbio.ingest_database --test-ingest_serengeti_mamal_cameratrap --species zebra_plains
        python -m ibeis.dbio.ingest_database --test-ingest_serengeti_mamal_cameratrap --species cheetah

    Example:
        >>> # SCRIPT
        >>> from ibeis.dbio.ingest_database import *  # NOQA
        >>> import ibeis
        >>> species = ut.get_argval('--species', type_=str, default=ibeis.const.TEST_SPECIES.ZEB_PLAIN)
        >>> # species = ut.get_argval('--species', type_=str, default='cheetah')
        >>> result = ingest_serengeti_mamal_cameratrap(species)
        >>> print(result)
    """
    'https://snapshotserengeti.s3.msi.umn.edu/'
    import ibeis

    if species is None:
        code = 'ALL'
    elif species == 'zebra_plains':
        code = 'PZ'
    elif species == 'cheetah':
        code = 'CHTH'
    else:
        raise NotImplementedError()

    if species == 'zebra_plains':
        serengeti_sepcies = 'zebra'
    else:
        serengeti_sepcies = species

    print('species = %r' % (species,))
    print('serengeti_sepcies = %r' % (serengeti_sepcies,))

    dbname = code + '_Serengeti'
    print('dbname = %r' % (dbname,))
    dbdir = ut.ensuredir(join(ibeis.sysres.get_workdir(), dbname))
    print('dbdir = %r' % (dbdir,))
    image_dir = ut.ensuredir(join(dbdir, 'images'))

    base_url = 'http://datadryad.org/bitstream/handle/10255'
    all_images_url         = base_url + '/dryad.86392/all_images.csv'
    consensus_metadata_url = base_url + '/dryad.86348/consensus_data.csv'
    search_effort_url      = base_url + '/dryad.86347/search_effort.csv'
    gold_standard_url      = base_url + '/dryad.76010/gold_standard_data.csv'

    all_images_fpath         = ut.grab_file_url(all_images_url, download_dir=dbdir)
    consensus_metadata_fpath = ut.grab_file_url(consensus_metadata_url, download_dir=dbdir)
    search_effort_fpath      = ut.grab_file_url(search_effort_url, download_dir=dbdir)
    gold_standard_fpath      = ut.grab_file_url(gold_standard_url, download_dir=dbdir)

    print('all_images_fpath         = %r' % (all_images_fpath,))
    print('consensus_metadata_fpath = %r' % (consensus_metadata_fpath,))
    print('search_effort_fpath      = %r' % (search_effort_fpath,))
    print('gold_standard_fpath      = %r' % (gold_standard_fpath,))

    def read_csv(csv_fpath):
        import utool as ut
        csv_text = ut.read_from(csv_fpath)
        csv_lines = csv_text.split('\n')
        print(ut.list_str(csv_lines[0:2]))
        csv_data = [[field.strip('"').strip('\r') for field in line.split(',')]
                    for line in csv_lines if len(line) > 0]
        csv_header = csv_data[0]
        csv_data = csv_data[1:]
        return csv_data, csv_header

    def download_image_urls(image_url_info_list):
        # Find ones that we already have
        print('Requested %d downloaded images' % (len(image_url_info_list)))
        full_gpath_list = [join(image_dir, basename(gpath)) for gpath in image_url_info_list]
        exists_list = [ut.checkpath(gpath) for gpath in full_gpath_list]
        image_url_info_list_ = ut.compress(image_url_info_list, ut.not_list(exists_list))
        print('Already have %d/%d downloaded images' % (
            len(image_url_info_list) - len(image_url_info_list_), len(image_url_info_list)))
        print('Need to download %d images' % (len(image_url_info_list_)))
        #import sys
        #sys.exit(0)
        # Download the rest
        imgurl_prefix = 'https://snapshotserengeti.s3.msi.umn.edu/'
        image_url_list = [imgurl_prefix + suffix for suffix in image_url_info_list_]
        for img_url in ut.ProgressIter(image_url_list, lbl='Downloading image'):
            ut.grab_file_url(img_url, download_dir=image_dir)
        return full_gpath_list

    # Data contains information about which events have which animals
    if False:
        species_class_csv_data, species_class_header = read_csv(gold_standard_fpath)
        species_class_eventid_list    = ut.get_list_column(species_class_csv_data, 0)
        #gold_num_species_annots_list = ut.get_list_column(gold_standard_csv_data, 2)
        species_class_species_list    = ut.get_list_column(species_class_csv_data, 2)
        #gold_count_list              = ut.get_list_column(gold_standard_csv_data, 3)
    else:
        species_class_csv_data, species_class_header = read_csv(consensus_metadata_fpath)
        species_class_eventid_list    = ut.get_list_column(species_class_csv_data, 0)
        species_class_species_list    = ut.get_list_column(species_class_csv_data, 7)

    # Find the zebra events
    serengeti_sepcies_set = sorted(list(set(species_class_species_list)))
    print('serengeti_sepcies_hist = %s' %
          ut.dict_str(ut.dict_hist(species_class_species_list), key_order_metric='val'))
    #print('serengeti_sepcies_set = %s' % (ut.list_str(serengeti_sepcies_set),))

    assert serengeti_sepcies in serengeti_sepcies_set, 'not a known  seregeti species'
    species_class_chosen_idx_list = ut.list_where(
        [serengeti_sepcies == species_ for species_ in species_class_species_list])
    chosen_eventid_list = ut.take(species_class_eventid_list, species_class_chosen_idx_list)

    print('Number of chosen species:')
    print(' * len(species_class_chosen_idx_list) = %r' % (len(species_class_chosen_idx_list),))
    print(' * len(chosen_eventid_list) = %r' % (len(chosen_eventid_list),))

    # Read info about which events have which images
    images_csv_data, image_csv_header = read_csv(all_images_fpath)
    capture_event_id_list = ut.get_list_column(images_csv_data, 0)
    image_url_info_list = ut.get_list_column(images_csv_data, 1)
    # Group photos by eventid
    eventid_to_photos = ut.group_items(image_url_info_list, capture_event_id_list)

    # Filter to only chosens
    unflat_chosen_url_infos = ut.dict_take(eventid_to_photos, chosen_eventid_list)
    chosen_url_infos = ut.flatten(unflat_chosen_url_infos)
    image_url_info_list = chosen_url_infos
    chosen_path_list = download_image_urls(chosen_url_infos)

    ibs = ibeis.opendb(dbdir=dbdir, allow_newdir=True)
    gid_list_ = ibs.add_images(chosen_path_list, auto_localize=False)  # NOQA

    # Attempt to automatically detect the annotations
    #aids_list = ibs.detect_random_forest(gid_list_, species)
    #aids_list

    #if False:
    #    # remove non-zebra photos
    #    from os.path import basename
    #    base_gname_list = list(map(basename, zebra_url_infos))
    #    all_gname_list = ut.list_images(image_dir)
    #    nonzebra_gname_list = ut.setdiff_ordered(all_gname_list, base_gname_list)
    #    nonzebra_gpath_list = ut.fnames_to_fpaths(nonzebra_gname_list, image_dir)
    #    ut.remove_fpaths(nonzebra_gpath_list)
    return ibs
コード例 #43
0
ファイル: server.py プロジェクト: WildMeOrg/wbia-tpl-kaggle7
    def post(self):
        global NETWORK_MODEL_TAG
        global NETWORK
        global NETWORK_VALUES

        response = {'success': False}

        # ut.embed()

        try:
            with ut.Timer('Pre'):
                parser = reqparse.RequestParser()
                parser.add_argument('image', type=str)
                parser.add_argument('config', type=dict)
                args = parser.parse_args()

                image_base64_str = args['image']
                image = get_image_from_base64_str(image_base64_str)

                config = args['config']
                model_tag = config.get('model_tag', None)
                num_returns = config.get('topk', 100)

                model_url = model_url_dict.get(model_tag, None)

            assert model_url is not None, 'Model tag %r is not recognized' % (
                model_tag, )
            if model_tag != NETWORK_MODEL_TAG:
                with ut.Timer('Loading network'):
                    print('Loading network from weights %r' % (model_tag, ))
                    values_url = model_url.replace('.pth', '.values.pth')

                    # Download files
                    model_filepath = ut.grab_file_url(model_url,
                                                      appname='kaggle7',
                                                      check_hash=True)
                    values_filepath = ut.grab_file_url(values_url,
                                                       appname='kaggle7',
                                                       check_hash=True)

                    model_values = torch.load(values_filepath)
                    classes = model_values['classes']
                    num_classes = len(classes)

                    model_weights = torch.load(model_filepath,
                                               map_location=get_device())
                    network_model, mutliple = make_new_network(
                        num_classes, RING_HEADS, GEM_CONST, pretrained=False)

                    if mutliple:
                        pass

                    if torch.cuda.is_available():
                        network_model = network_model.cuda()

                    # model_weights = model_weights['model']
                    network_model.load_state_dict(model_weights)
                    network_model.eval()

                    NETWORK_MODEL_TAG = model_tag
                    NETWORK = network_model
                    NETWORK_VALUES = model_values

            print('Using network %r' % (NETWORK_MODEL_TAG, ))
            with ut.Timer('Loading input tensor'):
                input_image = image.convert(CMODE).convert('LA').convert(CMODE)
                input_image = TFRM_RESIZE(input_image)
                input_image = pil2tensor(input_image, np.float32)
                input_image = input_image.div_(255)
                input_image = TFRM_WHITEN(input_image)

                size = input_image.size()
                input_tensor = input_image.view(-1, size[0], size[1], size[2])
                input_tensor = input_tensor.to(get_device())

            # Run inference
            with ut.Timer('Inference'):
                print('Running inference on input tensor %r' %
                      (input_tensor.size(), ))
                output = NETWORK(input_tensor)
                print('...done')
                preds_list, feats_list = output

            with ut.Timer('Post1'):
                print('Performing post-processing')
                prediction_raw = preds_list[-1][0]
                features_raw = TFRM_L2NORM(torch.cat(feats_list, dim=1))[0]

            with ut.Timer('Post2'):
                print('...classifier')
                # Post Process classification
                classifier_temp = NETWORK_VALUES['thresholds'][
                    'classifier_softmax_temp']
                classifier_prediction = torch.softmax(prediction_raw /
                                                      classifier_temp,
                                                      dim=0)

            with ut.Timer('Post3'):
                # Post process features
                print('...features')
                train_feats = NETWORK_VALUES['train_feats']
                train_gt = NETWORK_VALUES['train_gt']
                size = features_raw.size()
                features = features_raw.view(-1, size[0])
                distance_matrix_imgs = batched_dmv(features, train_feats)
                distance_matrix_classes = dm2cm(distance_matrix_imgs, train_gt)
                features_sim = (2.0 - distance_matrix_classes) * 0.5
                features_sim = features_sim[0]

                features_temp = NETWORK_VALUES['thresholds'][
                    'feature_softmax_temp']
                features_prediction = torch.softmax(features_sim /
                                                    features_temp,
                                                    dim=0)

            with ut.Timer('Post4'):
                print('...mixing')
                p = NETWORK_VALUES['thresholds']['mixing_value']
                classifier_prediction = classifier_prediction.to('cpu')
                final_prediction = (p * classifier_prediction +
                                    (1.0 - p) * features_prediction)

            with ut.Timer('Collection'):
                print('Collecting prediction')
                top_k_score_list, top_k_index_list = final_prediction.topk(
                    num_returns, 0)
                top_k_score_list = top_k_score_list.detach().tolist()
                classes = NETWORK_VALUES['classes']
                top_k_class_list = ut.take(classes, top_k_index_list)

                response['scores'] = {}
                for top_k_class, top_k_score in zip(top_k_class_list,
                                                    top_k_score_list):
                    response['scores'][top_k_class] = top_k_score
                response['success'] = True

            print('...done')
        except Exception as ex:
            message = str(ex)
            response['message'] = message
            print('!!!ERROR!!!')
            print(response)

        # if torch.cuda.is_available():
        #     torch.cuda.empty_cache()

        return response
コード例 #44
0
ファイル: labelShark.py プロジェクト: Hrmirzadeh/wildbook-ia
def classifyShark(ibs, gid_list):

    suffix = 'lenet'
    batch_size = 32
    model_name = 'injur-shark-' + suffix
    model = classify_shark.WhaleSharkInjuryModel(name=model_name,
                                                 output_dims=2,
                                                 data_shape=(224, 224, 3),
                                                 batch_size=batch_size)
    model.init_arch()
    filep = ut.grab_file_url(modelStateLocation)
    model.load_model_state(fpath=filep)
    model.rrr()

    config = {
        'algo': 'yolo',
        'sensitivity': 0.2,
        'config_filepath': 'default',
    }
    depc = ibs.depc_image

    images = ibs.images(gid_list)
    images = images.compress([ext_ not in ['.gif'] for ext_ in images.exts])

    gid_list = images.gids
    # uuid_gid_list = [str(item) for item in images.uuids]
    results_list = depc.get_property('localizations',
                                     gid_list,
                                     None,
                                     config=config)  # NOQA

    results_list2 = []
    multi_gids = []
    failed_gids = []

    for gid, res in zip(gid_list, results_list):
        score, bbox_list, theta_list, conf_list, class_list = res
        if len(bbox_list) == 0:
            failed_gids.append(gid)
        elif len(bbox_list) == 1:
            results_list2.append((gid, bbox_list, theta_list))
        elif len(bbox_list) > 1:
            # Take only a single annotation per bounding box.
            multi_gids.append(gid)
            idx = conf_list.argmax()
            res2 = (gid, bbox_list[idx:idx + 1], theta_list[idx:idx + 1])
            results_list2.append(res2)

    # Reorder empty_info to be aligned with results
    localized_imgs = ibs.images(ut.take_column(results_list2, 0))

    # Override old bboxes
    bboxes = np.array(ut.take_column(results_list2, 1))[:, 0, :]
    thetas = np.array(ut.take_column(results_list2, 2))[:, 0]

    species = ['whale_shark'] * len(localized_imgs)
    aid_list = ibs.add_annots(localized_imgs.gids,
                              bbox_list=bboxes,
                              theta_list=thetas,
                              species_list=species)

    config = {'dim_size': (224, 224), 'resize_dim': 'wh'}
    chip_gen = ibs.depc_annot.get('chips',
                                  aid_list,
                                  'img',
                                  eager=False,
                                  config=config)
    data_shape = config['dim_size'] + (3, )
    iter_ = iter(ut.ProgIter(chip_gen, nTotal=len(aid_list), lbl='load chip'))
    shape = (len(aid_list), ) + data_shape
    data = vt.fromiter_nd(iter_, shape=shape, dtype=np.uint8)  # NOQA
    results = model._predict(data)
    predictions = results['predictions']
    classes = np.array(['healthy', 'injured'])
    prediction_class = classes[np.array(predictions)]
    return {
        'predictions': prediction_class.tolist(),
        'confidences': results['confidences'].tolist(),
    }