예제 #1
0
def canonical_classifier_train(ibs,
                               species,
                               ensembles=3,
                               extracted_path=None,
                               **kwargs):
    from wbia.other.detectexport import (
        get_cnn_classifier_canonical_training_images_pytorch, )
    from wbia.algo.detect import densenet

    args = (species, )
    data_path = join(ibs.get_cachedir(),
                     'extracted-classifier-canonical-%s' % args)
    if extracted_path is None:
        extracted_path = get_cnn_classifier_canonical_training_images_pytorch(
            ibs,
            species,
            dest_path=data_path,
        )

    weights_path_list = []
    for ensemble_num in range(ensembles):
        args = (
            species,
            ensemble_num,
        )
        output_path = join(ibs.get_cachedir(), 'training',
                           'classifier-canonical-%s-ensemble-%d' % args)
        if exists(output_path):
            ut.delete(output_path)
        weights_path = densenet.train(extracted_path,
                                      output_path,
                                      blur=False,
                                      flip=False)
        weights_path_list.append(weights_path)

    args = (species, )
    output_name = 'classifier.canonical.%s' % args
    ensemble_path = join(ibs.get_cachedir(), 'training', output_name)
    ut.ensuredir(ensemble_path)

    archive_path = '%s.zip' % (ensemble_path)
    ensemble_weights_path_list = []

    for index, weights_path in enumerate(sorted(weights_path_list)):
        assert exists(weights_path)
        ensemble_weights_path = join(
            ensemble_path, 'classifier.canonical.%d.weights' % (index, ))
        ut.copy(weights_path, ensemble_weights_path)
        ensemble_weights_path_list.append(ensemble_weights_path)

    ensemble_weights_path_list = [ensemble_path] + ensemble_weights_path_list
    ut.archive_files(archive_path,
                     ensemble_weights_path_list,
                     overwrite=True,
                     common_prefix=True)

    return archive_path
예제 #2
0
def classifier_cameratrap_densenet_train(ibs,
                                         positive_imageset_id,
                                         negative_imageset_id,
                                         ensembles=3,
                                         **kwargs):
    from wbia.other.detectexport import (
        get_cnn_classifier_cameratrap_binary_training_images_pytorch, )
    from wbia.algo.detect import densenet

    data_path = join(ibs.get_cachedir(), 'extracted-classifier-cameratrap')
    extracted_path = get_cnn_classifier_cameratrap_binary_training_images_pytorch(
        ibs,
        positive_imageset_id,
        negative_imageset_id,
        dest_path=data_path,
        image_size=densenet.INPUT_SIZE,
        **kwargs,
    )

    weights_path_list = []
    for ensemble_num in range(ensembles):
        args = (ensemble_num, )
        output_path = join(ibs.get_cachedir(), 'training',
                           'classifier-cameratrap-ensemble-%d' % args)
        weights_path = densenet.train(extracted_path,
                                      output_path,
                                      blur=True,
                                      flip=True)
        weights_path_list.append(weights_path)

    archive_name = 'classifier.cameratrap.zip'
    archive_path = join(ibs.get_cachedir(), 'training', archive_name)
    ensemble_weights_path_list = []

    for index, weights_path in enumerate(sorted(weights_path_list)):
        assert exists(weights_path)
        ensemble_weights_path = 'classifier.cameratrap.%d.weights' % (index, )
        ut.copy(weights_path, ensemble_weights_path)
        ensemble_weights_path_list.append(ensemble_weights_path)

    ut.archive_files(archive_path,
                     ensemble_weights_path_list,
                     overwrite=True,
                     common_prefix=True)

    return archive_path
예제 #3
0
def classifier_multiclass_densenet_train(ibs,
                                         gid_list,
                                         label_list,
                                         ensembles=3,
                                         **kwargs):
    """
    >>> import uuid
    >>> manifest_filepath = join(ibs.dbdir, 'flukebook_groundtruth.csv')
    >>> with open(manifest_filepath, 'r') as manifest_file:
    >>>     line_list = manifest_file.readlines()
    >>>
    >>> label_dict = {
    >>>     'Left Dorsal Fin'  : 'left_dorsal_fin',
    >>>     'Right Dorsal Fin' : 'right_dorsal_fin',
    >>>     'Tail Fluke'       : 'tail_fluke',
    >>> }
    >>>
    >>> uuid_list = []
    >>> label_list = []
    >>> for line in line_list:
    >>>     line = line.strip().split(',')
    >>>     assert len(line) == 2
    >>>     uuid_, label_ = line
    >>>     uuid_ = uuid.UUID(uuid_)
    >>>     label_ = label_.strip()
    >>>     print(uuid_, label_)
    >>>     uuid_list.append(uuid_)
    >>>     label_ = label_dict.get(label_, None)
    >>>     assert label_ is not None
    >>>     label_list.append(label_)
    >>>
    >>> gid_list = ibs.get_image_gids_from_uuid(uuid_list)
    >>> assert None not in gid_list
    >>> # archive_path = ibs.classifier_multiclass_densenet_train(gid_list, label_list)
    >>> ibs.classifier2_precision_recall_algo_display(test_gid_list=gid_list, test_label_list=label_list)
    """
    from wbia.other.detectexport import (
        get_cnn_classifier_multiclass_training_images_pytorch, )
    from wbia.algo.detect import densenet

    data_path = join(ibs.get_cachedir(), 'extracted-classifier-multiclass')
    extracted_path = get_cnn_classifier_multiclass_training_images_pytorch(
        ibs,
        gid_list,
        label_list,
        dest_path=data_path,
        image_size=densenet.INPUT_SIZE,
        **kwargs,
    )

    weights_path_list = []
    for ensemble_num in range(ensembles):
        args = (ensemble_num, )
        output_path = join(ibs.get_cachedir(), 'training',
                           'classifier-multiclass-ensemble-%d' % args)
        weights_path = densenet.train(extracted_path,
                                      output_path,
                                      blur=True,
                                      flip=False)
        weights_path_list.append(weights_path)

    archive_name = 'classifier.multiclass.zip'
    archive_path = join(ibs.get_cachedir(), 'training', archive_name)
    ensemble_weights_path_list = []

    for index, weights_path in enumerate(sorted(weights_path_list)):
        assert exists(weights_path)
        ensemble_weights_path = 'classifier.multiclass.%d.weights' % (index, )
        ut.copy(weights_path, ensemble_weights_path)
        ensemble_weights_path_list.append(ensemble_weights_path)

    ut.archive_files(archive_path,
                     ensemble_weights_path_list,
                     overwrite=True,
                     common_prefix=True)

    return archive_path
예제 #4
0
def labeler_train(
    ibs,
    species_list=None,
    species_mapping=None,
    viewpoint_mapping=None,
    ensembles=3,
    **kwargs,
):
    from wbia.other.detectexport import get_cnn_labeler_training_images_pytorch
    from wbia.algo.detect import densenet

    species = '-'.join(species_list)
    args = (species, )
    data_path = join(ibs.get_cachedir(), 'extracted-labeler-%s' % args)
    extracted_path = get_cnn_labeler_training_images_pytorch(
        ibs,
        category_list=species_list,
        category_mapping=species_mapping,
        viewpoint_mapping=viewpoint_mapping,
        dest_path=data_path,
        **kwargs,
    )

    weights_path_list = []
    for ensemble_num in range(ensembles):
        args = (
            species,
            ensemble_num,
        )
        output_path = join(ibs.get_cachedir(), 'training',
                           'labeler-%s-ensemble-%d' % args)
        if exists(output_path):
            ut.delete(output_path)
        weights_path = densenet.train(extracted_path,
                                      output_path,
                                      blur=False,
                                      flip=False)
        weights_path_list.append(weights_path)

    args = (species, )
    output_name = 'labeler.%s' % args
    ensemble_path = join(ibs.get_cachedir(), 'training', output_name)
    ut.ensuredir(ensemble_path)

    archive_path = '%s.zip' % (ensemble_path)
    ensemble_weights_path_list = []

    for index, weights_path in enumerate(sorted(weights_path_list)):
        assert exists(weights_path)
        ensemble_weights_path = join(ensemble_path,
                                     'labeler.%d.weights' % (index, ))
        ut.copy(weights_path, ensemble_weights_path)
        ensemble_weights_path_list.append(ensemble_weights_path)

    ensemble_weights_path_list = [ensemble_path] + ensemble_weights_path_list
    ut.archive_files(archive_path,
                     ensemble_weights_path_list,
                     overwrite=True,
                     common_prefix=True)

    return archive_path