Exemplo n.º 1
0
def export_tagged_chips(ibs, aid_list, dpath='.'):
    """
    DEPRICATE

    CommandLine:
        python -m wbia.tag_funcs --exec-export_tagged_chips --tags Hard interesting needswork --db PZ_Master1
        python -m wbia.tag_funcs --exec-export_tagged_chips --logic=or --any_startswith quality occlusion --has_any lighting needswork interesting hard --db GZ_Master1 --dpath=/media/raid
        python -m wbia.tag_funcs --exec-export_tagged_chips --db GZ_Master1 --min_num=1  --dpath /media/raid

    Example:
        >>> # SCRIPT
        >>> from wbia.tag_funcs import *  # NOQA
        >>> import wbia
        >>> ibs = wbia.opendb(defaultdb='testdb1')
        >>> kwargs = ut.argparse_dict(ut.get_kwdefaults2(filterflags_general_tags), type_hint=ut.ddict(list, logic=str))
        >>> ut.print_dict(kwargs, 'filter args')
        >>> aid_list = ibs.filter_annots_by_tags(**kwargs)
        >>> print('len(aid_list) = %r' % (len(aid_list),))
        >>> dpath = ut.get_argval('--dpath', default='')
        >>> all_tags = ut.flatten(ibs.get_annot_all_tags(aid_list))
        >>> filtered_tag_hist = ut.dict_hist(all_tags)
        >>> ut.print_dict(filtered_tag_hist, key_order_metric='val')
        >>> export_tagged_chips(ibs, aid_list, dpath)
    """
    visual_uuid_hashid = ibs.get_annot_hashid_visual_uuid(aid_list)
    zip_fpath = ut.unixjoin(
        dpath,
        'exported_chips2_' + ibs.get_dbname() + visual_uuid_hashid + '.zip')
    chip_fpath = ibs.get_annot_chip_fpath(aid_list)
    ut.archive_files(zip_fpath, chip_fpath, common_prefix=True)
Exemplo n.º 2
0
def export_tagged_chips(ibs, aid_list, dpath='.'):
    """
    CommandLine:
        python -m ibeis.tag_funcs --exec-export_tagged_chips --tags Hard interesting needswork --db PZ_Master1
        python -m ibeis.tag_funcs --exec-export_tagged_chips --logic=or --any_startswith quality occlusion --has_any lighting needswork interesting hard --db GZ_Master1 --dpath=/media/raid
        python -m ibeis.tag_funcs --exec-export_tagged_chips --db GZ_Master1 --min_num=1  --dpath /media/raid

    Example:
        >>> # SCRIPT
        >>> from ibeis.tag_funcs import *  # NOQA
        >>> import ibeis
        >>> ibs = ibeis.opendb(defaultdb='testdb1')
        >>> kwargs = ut.argparse_dict(ut.get_kwdefaults2(filterflags_general_tags), type_hint=ut.ddict(list, logic=str))
        >>> ut.print_dict(kwargs, 'filter args')
        >>> aid_list = ibs.filter_annots_by_tags(**kwargs)
        >>> print('len(aid_list) = %r' % (len(aid_list),))
        >>> dpath = ut.get_argval('--dpath', default='')
        >>> all_tags = ut.flatten(ibs.get_annot_all_tags(aid_list))
        >>> filtered_tag_hist = ut.dict_hist(all_tags)
        >>> ut.print_dict(filtered_tag_hist, key_order_metric='val')
        >>> export_tagged_chips(ibs, aid_list, dpath)
    """
    visual_uuid_hashid = ibs.get_annot_hashid_visual_uuid(aid_list, _new=True)
    zip_fpath = ut.unixjoin(dpath, 'exported_chips2_' + ibs.get_dbname() +
                            visual_uuid_hashid + '.zip')
    chip_fpath = ibs.get_annot_chip_fpath(aid_list)
    ut.archive_files(zip_fpath, chip_fpath, common_prefix=True)
Exemplo n.º 3
0
def labeler_train(ibs,
                  species_list=None,
                  species_mapping=None,
                  viewpoint_mapping=None,
                  ensembles=3,
                  **kwargs):
    from ibeis_cnn.ingest_ibeis import get_cnn_labeler_training_images_pytorch
    from ibeis.algo.detect import densenet

    species = '-'.join(species_list)
    args = (species, )
    data_path = join(ibs.get_cachedir(), 'extracted-labeler-%s' % args)
    extracted_path = get_cnn_labeler_training_images_pytorch(
        ibs,
        category_list=species_list,
        category_mapping=species_mapping,
        viewpoint_mapping=viewpoint_mapping,
        dest_path=data_path,
        **kwargs)

    weights_path_list = []
    for ensemble_num in range(ensembles):
        args = (
            species,
            ensemble_num,
        )
        output_path = join(ibs.get_cachedir(), 'training',
                           'labeler-%s-ensemble-%d' % args)
        if exists(output_path):
            ut.delete(output_path)
        weights_path = densenet.train(extracted_path,
                                      output_path,
                                      blur=False,
                                      flip=False)
        weights_path_list.append(weights_path)

    args = (species, )
    output_name = 'labeler.%s' % args
    ensemble_path = join(ibs.get_cachedir(), 'training', output_name)
    ut.ensuredir(ensemble_path)

    archive_path = '%s.zip' % (ensemble_path)
    ensemble_weights_path_list = []

    for index, weights_path in enumerate(sorted(weights_path_list)):
        assert exists(weights_path)
        ensemble_weights_path = join(ensemble_path,
                                     'labeler.%d.weights' % (index, ))
        ut.copy(weights_path, ensemble_weights_path)
        ensemble_weights_path_list.append(ensemble_weights_path)

    ensemble_weights_path_list = [ensemble_path] + ensemble_weights_path_list
    ut.archive_files(archive_path,
                     ensemble_weights_path_list,
                     overwrite=True,
                     common_prefix=True)

    return archive_path
Exemplo n.º 4
0
def canonical_classifier_train(ibs,
                               species,
                               ensembles=3,
                               extracted_path=None,
                               **kwargs):
    from wbia.other.detectexport import (
        get_cnn_classifier_canonical_training_images_pytorch, )
    from wbia.algo.detect import densenet

    args = (species, )
    data_path = join(ibs.get_cachedir(),
                     'extracted-classifier-canonical-%s' % args)
    if extracted_path is None:
        extracted_path = get_cnn_classifier_canonical_training_images_pytorch(
            ibs,
            species,
            dest_path=data_path,
        )

    weights_path_list = []
    for ensemble_num in range(ensembles):
        args = (
            species,
            ensemble_num,
        )
        output_path = join(ibs.get_cachedir(), 'training',
                           'classifier-canonical-%s-ensemble-%d' % args)
        if exists(output_path):
            ut.delete(output_path)
        weights_path = densenet.train(extracted_path,
                                      output_path,
                                      blur=False,
                                      flip=False)
        weights_path_list.append(weights_path)

    args = (species, )
    output_name = 'classifier.canonical.%s' % args
    ensemble_path = join(ibs.get_cachedir(), 'training', output_name)
    ut.ensuredir(ensemble_path)

    archive_path = '%s.zip' % (ensemble_path)
    ensemble_weights_path_list = []

    for index, weights_path in enumerate(sorted(weights_path_list)):
        assert exists(weights_path)
        ensemble_weights_path = join(
            ensemble_path, 'classifier.canonical.%d.weights' % (index, ))
        ut.copy(weights_path, ensemble_weights_path)
        ensemble_weights_path_list.append(ensemble_weights_path)

    ensemble_weights_path_list = [ensemble_path] + ensemble_weights_path_list
    ut.archive_files(archive_path,
                     ensemble_weights_path_list,
                     overwrite=True,
                     common_prefix=True)

    return archive_path
 def archive(dstcnvs_normer, cachedir=None, overwrite=False):
     cachedir = dstcnvs_normer.cachedir if cachedir is None else cachedir
     data_fpath = dstcnvs_normer.get_fpath(cachedir)
     # flann_fpath   = dstcnvs_normer.get_flann_fpath(cachedir)
     archive_fpath = dstcnvs_normer.get_fpath(cachedir, ext='.zip')
     fpath_list = [
         data_fpath,
         # flann_fpath
     ]
     ut.archive_files(archive_fpath, fpath_list, overwrite=overwrite)
     return archive_fpath
Exemplo n.º 6
0
 def archive(dstcnvs_normer, cachedir=None, overwrite=False):
     cachedir      = dstcnvs_normer.cachedir if cachedir is None else cachedir
     data_fpath    = dstcnvs_normer.get_fpath(cachedir)
     #flann_fpath   = dstcnvs_normer.get_flann_fpath(cachedir)
     archive_fpath = dstcnvs_normer.get_fpath(cachedir, ext='.zip')
     fpath_list = [
         data_fpath,
         #flann_fpath
     ]
     ut.archive_files(archive_fpath, fpath_list, overwrite=overwrite)
     return archive_fpath
Exemplo n.º 7
0
def canonical_localizer_train(ibs, species, ensembles=3, **kwargs):
    from ibeis_cnn.ingest_ibeis import get_cnn_localizer_canonical_training_images_pytorch
    from ibeis.algo.detect import canonical

    args = (species, )
    data_path = join(ibs.get_cachedir(),
                     'extracted-localizer-canonical-%s' % args)
    extracted_path = get_cnn_localizer_canonical_training_images_pytorch(
        ibs,
        species,
        dest_path=data_path,
    )

    weights_path_list = []
    for ensemble_num in range(ensembles):
        args = (
            species,
            ensemble_num,
        )
        output_path = join(ibs.get_cachedir(), 'training',
                           'localizer-canonical-%s-ensemble-%d' % args)
        weights_path = canonical.train(extracted_path, output_path)
        weights_path_list.append(weights_path)

    args = (species, )
    output_name = 'localizer.canonical.%s' % args
    ensemble_path = join(ibs.get_cachedir(), 'training', output_name)
    ut.ensuredir(ensemble_path)

    archive_path = '%s.zip' % (ensemble_path)
    ensemble_weights_path_list = []

    for index, weights_path in enumerate(sorted(weights_path_list)):
        assert exists(weights_path)
        ensemble_weights_path = join(
            ensemble_path, 'localizer.canonical.%d.weights' % (index, ))
        ut.copy(weights_path, ensemble_weights_path)
        ensemble_weights_path_list.append(ensemble_weights_path)

    ensemble_weights_path_list = [ensemble_path] + ensemble_weights_path_list
    ut.archive_files(archive_path,
                     ensemble_weights_path_list,
                     overwrite=True,
                     common_prefix=True)

    return archive_path
Exemplo n.º 8
0
def classifier_cameratrap_densenet_train(ibs,
                                         positive_imageset_id,
                                         negative_imageset_id,
                                         ensembles=3,
                                         **kwargs):
    from wbia.other.detectexport import (
        get_cnn_classifier_cameratrap_binary_training_images_pytorch, )
    from wbia.algo.detect import densenet

    data_path = join(ibs.get_cachedir(), 'extracted-classifier-cameratrap')
    extracted_path = get_cnn_classifier_cameratrap_binary_training_images_pytorch(
        ibs,
        positive_imageset_id,
        negative_imageset_id,
        dest_path=data_path,
        image_size=densenet.INPUT_SIZE,
        **kwargs,
    )

    weights_path_list = []
    for ensemble_num in range(ensembles):
        args = (ensemble_num, )
        output_path = join(ibs.get_cachedir(), 'training',
                           'classifier-cameratrap-ensemble-%d' % args)
        weights_path = densenet.train(extracted_path,
                                      output_path,
                                      blur=True,
                                      flip=True)
        weights_path_list.append(weights_path)

    archive_name = 'classifier.cameratrap.zip'
    archive_path = join(ibs.get_cachedir(), 'training', archive_name)
    ensemble_weights_path_list = []

    for index, weights_path in enumerate(sorted(weights_path_list)):
        assert exists(weights_path)
        ensemble_weights_path = 'classifier.cameratrap.%d.weights' % (index, )
        ut.copy(weights_path, ensemble_weights_path)
        ensemble_weights_path_list.append(ensemble_weights_path)

    ut.archive_files(archive_path,
                     ensemble_weights_path_list,
                     overwrite=True,
                     common_prefix=True)

    return archive_path
Exemplo n.º 9
0
    def tozip():
        re_fpath = ut.named_field('fpath', 'figure.*?[jp][pn]g') + '}'
        patterns = [
            'chapter4-application.tex', 'figdef4*', 'main.tex', 'def.tex',
            'Crall*', 'thesis.cls', 'header*', 'colordef.tex', '*.bib'
        ]
        exclude_dirs = ['guts']
        fpaths = sorted(
            ut.glob('.', patterns, recursive=True, exclude_dirs=exclude_dirs))

        tup = ut.grep(re_fpath, fpath_list=fpaths, verbose=True)
        found_fpath_list, found_lines_list, found_lxs_list = tup
        fig_fpath_list = []
        for line in ut.flatten(found_lines_list):
            if not line.startswith('%'):
                for match in re.finditer(re_fpath, line):
                    fig_fpath = match.groupdict()['fpath']
                    if 'junc' not in fig_fpath and 'markov' not in fig_fpath and 'bayes' not in fig_fpath:
                        fig_fpath_list += [fig_fpath]

        fpath_list = fig_fpath_list + fpaths
        ut.archive_files('chap4.zip', fpath_list)
Exemplo n.º 10
0
def build_linux_zip_binaries():
    fpath_list = ut.ls('dist/ibeis')
    archive_fpath = 'dist/ibeis-linux-binary.zip'
    ut.archive_files(archive_fpath, fpath_list)
    return archive_fpath
Exemplo n.º 11
0
def build_linux_zip_binaries():
    fpath_list = ut.ls('dist/ibeis')
    archive_fpath = 'dist/ibeis-linux-binary.zip'
    ut.archive_files(archive_fpath, fpath_list)
    return archive_fpath
Exemplo n.º 12
0
def classifier_multiclass_densenet_train(ibs,
                                         gid_list,
                                         label_list,
                                         ensembles=3,
                                         **kwargs):
    """
    >>> import uuid
    >>> manifest_filepath = join(ibs.dbdir, 'flukebook_groundtruth.csv')
    >>> with open(manifest_filepath, 'r') as manifest_file:
    >>>     line_list = manifest_file.readlines()
    >>>
    >>> label_dict = {
    >>>     'Left Dorsal Fin'  : 'left_dorsal_fin',
    >>>     'Right Dorsal Fin' : 'right_dorsal_fin',
    >>>     'Tail Fluke'       : 'tail_fluke',
    >>> }
    >>>
    >>> uuid_list = []
    >>> label_list = []
    >>> for line in line_list:
    >>>     line = line.strip().split(',')
    >>>     assert len(line) == 2
    >>>     uuid_, label_ = line
    >>>     uuid_ = uuid.UUID(uuid_)
    >>>     label_ = label_.strip()
    >>>     print(uuid_, label_)
    >>>     uuid_list.append(uuid_)
    >>>     label_ = label_dict.get(label_, None)
    >>>     assert label_ is not None
    >>>     label_list.append(label_)
    >>>
    >>> gid_list = ibs.get_image_gids_from_uuid(uuid_list)
    >>> assert None not in gid_list
    >>> # archive_path = ibs.classifier_multiclass_densenet_train(gid_list, label_list)
    >>> ibs.classifier2_precision_recall_algo_display(test_gid_list=gid_list, test_label_list=label_list)
    """
    from ibeis_cnn.ingest_ibeis import get_cnn_classifier_multiclass_training_images_pytorch
    from ibeis.algo.detect import densenet

    data_path = join(ibs.get_cachedir(), 'extracted-classifier-multiclass')
    extracted_path = get_cnn_classifier_multiclass_training_images_pytorch(
        ibs,
        gid_list,
        label_list,
        dest_path=data_path,
        image_size=densenet.INPUT_SIZE,
        **kwargs)

    weights_path_list = []
    for ensemble_num in range(ensembles):
        args = (ensemble_num, )
        output_path = join(ibs.get_cachedir(), 'training',
                           'classifier-multiclass-ensemble-%d' % args)
        weights_path = densenet.train(extracted_path,
                                      output_path,
                                      blur=True,
                                      flip=False)
        weights_path_list.append(weights_path)

    archive_name = 'classifier.multiclass.zip'
    archive_path = join(ibs.get_cachedir(), 'training', archive_name)
    ensemble_weights_path_list = []

    for index, weights_path in enumerate(sorted(weights_path_list)):
        assert exists(weights_path)
        ensemble_weights_path = 'classifier.multiclass.%d.weights' % (index, )
        ut.copy(weights_path, ensemble_weights_path)
        ensemble_weights_path_list.append(ensemble_weights_path)

    ut.archive_files(archive_path,
                     ensemble_weights_path_list,
                     overwrite=True,
                     common_prefix=True)

    return archive_path