Exemple #1
0
def cached_impaint(bgr_img,
                   cached_mask_fpath=None,
                   label_colors=None,
                   init_mask=None,
                   aug=False,
                   refine=False):
    import vtool as vt
    if cached_mask_fpath is None:
        cached_mask_fpath = 'image_' + ut.hashstr_arr(bgr_img) + '.png'
    if aug:
        cached_mask_fpath += '.' + ut.hashstr_arr(bgr_img)
        if label_colors is not None:
            cached_mask_fpath += ut.hashstr_arr(label_colors)
        cached_mask_fpath += '.png'
    #cached_mask_fpath = 'tmp_mask.png'
    if refine or not ut.checkpath(cached_mask_fpath):
        if refine and ut.checkpath(cached_mask_fpath):
            if init_mask is None:
                init_mask = vt.imread(cached_mask_fpath, grayscale=True)
        custom_mask = impaint_mask(bgr_img,
                                   label_colors=label_colors,
                                   init_mask=init_mask)
        vt.imwrite(cached_mask_fpath, custom_mask)
    else:
        custom_mask = vt.imread(cached_mask_fpath, grayscale=True)
    return custom_mask
Exemple #2
0
def _resize_worker(gfpath, new_gfpath, new_size):
    """ worker function for parallel generator """
    import vtool as vt
    img = vt.imread(gfpath)
    new_img = vt.resize(img, new_size)
    vt.imwrite(new_gfpath, new_img)
    return new_gfpath
Exemple #3
0
def write_dirty_aids(ibs, dirty_probchip_fpath_list, dirty_aids, config2_,
                     species):
    if config2_ is None:
        fw_detector = ibs.cfg.featweight_cfg.fw_detector
    else:
        fw_detector = config2_.get('fw_detector')

    if fw_detector == 'rf':
        (
            extramargin_fpath_list,
            probchip_extramargin_fpath_list,
            halfoffset_cs_list,
        ) = compute_extramargin_detectchip(ibs,
                                           dirty_aids,
                                           config2_=config2_,
                                           species=species,
                                           FACTOR=4)
        #dirty_cfpath_list  = ibs.get_annot_chip_fpath(dirty_aids, ensure=True, config2_=config2_)

        config = {
            'scale_list': [1.0],
            'output_gpath_list': probchip_extramargin_fpath_list,
            'mode': 1,
        }
        probchip_generator = randomforest.detect_gpath_list_with_species(
            ibs, extramargin_fpath_list, species, **config)
        # Evalutate genrator until completion
        ut.evaluate_generator(probchip_generator)
        extramargin_mask_gen = (vt.imread(fpath, grayscale=True)
                                for fpath in probchip_extramargin_fpath_list)
        # Crop the extra margin off of the new probchips
        _iter = zip(dirty_probchip_fpath_list, extramargin_mask_gen,
                    halfoffset_cs_list)
        for (probchip_fpath, extramargin_probchip, halfmargin) in _iter:
            half_w, half_h = halfmargin
            probchip = extramargin_probchip[half_h:-half_h, half_w:-half_w]
            vt.imwrite(probchip_fpath, probchip)
    elif fw_detector == 'cnn':
        # dont use extrmargin here (for now)
        chip_fpath_list = ibs.get_annot_chip_fpath(dirty_aids,
                                                   config2_=config2_)
        mask_gen = ibs.generate_species_background_mask(
            chip_fpath_list, species)
        _iter = zip(dirty_probchip_fpath_list, mask_gen)
        for chunk in ut.ichunks(_iter, 64):
            for probchip_fpath, probchip in ut.ProgressIter(
                    chunk,
                    lbl='write probchip chunk',
                    adjust=True,
                    time_thresh=30.0):
                probchip = postprocess_mask(probchip)
                vt.imwrite(probchip_fpath, probchip)
    else:
        raise NotImplementedError('bad fw_detector=%r' % (fw_detector, ))
Exemple #4
0
def multidb_montage():
    r"""
    CommandLine:
        python -m ibeis.scripts.specialdraw multidb_montage --save montage.jpg --dpath ~/slides --diskshow --show

    Example:
        >>> # DISABLE_DOCTEST
        >>> from ibeis.scripts.specialdraw import *  # NOQA
        >>> multidb_montage()
    """
    import ibeis
    import plottool as pt
    import vtool as vt
    import numpy as np
    pt.ensure_pylab_qt4()
    ibs1 = ibeis.opendb('PZ_MTEST')
    ibs2 = ibeis.opendb('GZ_ALL')
    ibs3 = ibeis.opendb('GIRM_Master1')

    chip_lists = []
    aids_list = []

    for ibs in [ibs1, ibs2, ibs3]:
        aids = ibs.sample_annots_general(minqual='good', sample_size=400)
        aids_list.append(aids)

    print(ut.depth_profile(aids_list))

    for ibs, aids in zip([ibs1, ibs2, ibs3], aids_list):
        chips = ibs.get_annot_chips(aids)
        chip_lists.append(chips)

    chip_list = ut.flatten(chip_lists)
    np.random.shuffle(chip_list)

    widescreen_ratio = 16 / 9
    ratio = ut.PHI
    ratio = widescreen_ratio

    fpath = pt.get_save_directions()

    #height = 6000
    width = 6000
    #width = int(height * ratio)
    height = int(width / ratio)
    dsize = (width, height)
    dst = vt.montage(chip_list, dsize)
    vt.imwrite(fpath, dst)
    if ut.get_argflag('--show'):
        pt.imshow(dst)
Exemple #5
0
def multidb_montage():
    r"""
    CommandLine:
        python -m ibeis.scripts.specialdraw multidb_montage --save montage.jpg --dpath ~/slides --diskshow --show

    Example:
        >>> # DISABLE_DOCTEST
        >>> from ibeis.scripts.specialdraw import *  # NOQA
        >>> multidb_montage()
    """
    import ibeis
    import plottool as pt
    import vtool as vt
    import numpy as np
    pt.ensure_pylab_qt4()
    ibs1 = ibeis.opendb('PZ_MTEST')
    ibs2 = ibeis.opendb('GZ_ALL')
    ibs3 = ibeis.opendb('GIRM_Master1')

    chip_lists = []
    aids_list = []

    for ibs in [ibs1, ibs2, ibs3]:
        aids = ibs.sample_annots_general(minqual='good', sample_size=400)
        aids_list.append(aids)

    print(ut.depth_profile(aids_list))

    for ibs, aids in zip([ibs1, ibs2, ibs3], aids_list):
        chips = ibs.get_annot_chips(aids)
        chip_lists.append(chips)

    chip_list = ut.flatten(chip_lists)
    np.random.shuffle(chip_list)

    widescreen_ratio = 16 / 9
    ratio = ut.PHI
    ratio = widescreen_ratio

    fpath = pt.get_save_directions()

    #height = 6000
    width = 6000
    #width = int(height * ratio)
    height = int(width / ratio)
    dsize = (width, height)
    dst = vt.montage(chip_list, dsize)
    vt.imwrite(fpath, dst)
    if ut.get_argflag('--show'):
        pt.imshow(dst)
 def dummy_manual_chipmask(depc, parent_rowids, config=None):
     import vtool as vt
     from plottool import interact_impaint
     mask_dpath = join(depc.cache_dpath, 'ManualChipMask')
     ut.ensuredir(mask_dpath)
     if config is None:
         config = {}
     print('Requesting user defined chip mask')
     for rowid in parent_rowids:
         img = vt.imread(gpath_list[rowid])
         mask = interact_impaint.impaint_mask2(img)
         mask_fpath = join(mask_dpath, 'mask%d.png' % (rowid, ))
         vt.imwrite(mask_fpath, mask)
         w, h = vt.get_size(mask)
         yield (w, h), mask_fpath
Exemple #7
0
 def dummy_manual_chipmask(depc, parent_rowids, config=None):
     import vtool as vt
     from plottool import interact_impaint
     mask_dpath = ut.unixjoin(depc.cache_dpath, 'ManualChipMask')
     ut.ensuredir(mask_dpath)
     if config is None:
         config = {}
     print('Requesting user defined chip mask')
     for rowid in parent_rowids:
         img = vt.imread(gpath_list[rowid])
         mask = interact_impaint.impaint_mask2(img)
         mask_fpath = ut.unixjoin(mask_dpath, 'mask%d.png' % (rowid,))
         vt.imwrite(mask_fpath, mask)
         w, h = vt.get_size(mask)
         yield (w, h), mask_fpath
        def nosql_draw2(check_func, match):
            from matplotlib.backends.backend_agg import FigureCanvas

            try:
                from matplotlib.backends.backend_agg import Figure
            except ImportError:
                from matplotlib.figure import Figure

            was_interactive = mpl.is_interactive()
            if was_interactive:
                mpl.interactive(False)
            # fnum = 32
            fig = Figure()
            canvas = FigureCanvas(fig)  # NOQA
            # fig.clf()
            ax = fig.add_subplot(1, 1, 1)
            if check_func is not None and check_func():
                return
            ax, xywh1, xywh2 = match.show(ax=ax)
            if check_func is not None and check_func():
                return
            savekw = {
                # 'dpi' : 60,
                'dpi': 80,
            }
            axes_extents = pt.extract_axes_extents(fig)
            # assert len(axes_extents) == 1, 'more than one axes'
            extent = axes_extents[0]
            with io.BytesIO() as stream:
                # This call takes 23% - 15% of the time depending on settings
                fig.savefig(stream, bbox_inches=extent, **savekw)
                stream.seek(0)
                data = np.fromstring(stream.getvalue(), dtype=np.uint8)
            if check_func is not None and check_func():
                return
            pt.plt.close(fig)
            image = cv2.imdecode(data, 1)
            thumbsize = 221
            max_dsize = (thumbsize, thumbsize)
            dsize, sx, sy = vt.resized_clamped_thumb_dims(
                vt.get_size(image), max_dsize)
            if check_func is not None and check_func():
                return
            image = vt.resize(image, dsize)
            vt.imwrite(fpath, image)
            if check_func is not None and check_func():
                return
Exemple #9
0
def gen_chip(tup):
    r"""
    Parallel worker. Crops chip out of an image, applies filters, etc

    THERE MAY BE AN ERROR IN HERE DUE TO IMWITE BEING INSIDE A PARALLEL FUNCTION
    BUT IT MAY BE SOMETHING ELSE?

    Either way we probably shouldn't call imwrite inside here
    """
    # print('generating chip')
    cfpath, gfpath, bbox, theta, new_size, filter_list = tup
    chipBGR = vt.compute_chip(gfpath, bbox, theta, new_size, filter_list)
    # if DEBUG:
    # print('write chip: %r' % cfpath)
    height, width = chipBGR.shape[0:2]
    vt.imwrite(cfpath, chipBGR)
    return cfpath, width, height
Exemple #10
0
def gen_chip(tup):
    r"""
    Parallel worker. Crops chip out of an image, applies filters, etc

    THERE MAY BE AN ERROR IN HERE DUE TO IMWITE BEING INSIDE A PARALLEL FUNCTION
    BUT IT MAY BE SOMETHING ELSE?

    Either way we probably shouldn't call imwrite inside here
    """
    #print('generating chip')
    cfpath, gfpath, bbox, theta, new_size, filter_list = tup
    chipBGR = vt.compute_chip(gfpath, bbox, theta, new_size, filter_list)
    #if DEBUG:
    #print('write chip: %r' % cfpath)
    height, width = chipBGR.shape[0:2]
    vt.imwrite(cfpath, chipBGR)
    return cfpath, width, height
Exemple #11
0
def write_dirty_aids(ibs, dirty_probchip_fpath_list, dirty_aids, config2_, species):
    if config2_ is None:
        featweight_detector = ibs.cfg.featweight_cfg.featweight_detector
    else:
        featweight_detector = config2_.get('featweight_detector')

    if featweight_detector == 'rf':
        (extramargin_fpath_list,
         probchip_extramargin_fpath_list,
         halfoffset_cs_list,
         ) = compute_extramargin_detectchip(
             ibs, dirty_aids, config2_=config2_, species=species, FACTOR=4)
        #dirty_cfpath_list  = ibs.get_annot_chip_fpath(dirty_aids, ensure=True, config2_=config2_)

        config = {
            'scale_list': [1.0],
            'output_gpath_list': probchip_extramargin_fpath_list,
            'mode': 1,
        }
        probchip_generator = randomforest.detect_gpath_list_with_species(
            ibs, extramargin_fpath_list, species, **config)
        # Evalutate genrator until completion
        ut.evaluate_generator(probchip_generator)
        extramargin_mask_gen = (
            vt.imread(fpath, grayscale=True) for fpath in probchip_extramargin_fpath_list
        )
        # Crop the extra margin off of the new probchips
        _iter = zip(dirty_probchip_fpath_list,
                    extramargin_mask_gen,
                    halfoffset_cs_list)
        for (probchip_fpath, extramargin_probchip, halfmargin) in _iter:
            half_w, half_h = halfmargin
            probchip = extramargin_probchip[half_h:-half_h, half_w:-half_w]
            vt.imwrite(probchip_fpath, probchip)
    elif featweight_detector == 'cnn':
        # dont use extrmargin here (for now)
        chip_fpath_list = ibs.get_annot_chip_fpath(dirty_aids, config2_=config2_)
        mask_gen = ibs.generate_species_background_mask(chip_fpath_list, species)
        _iter = zip(dirty_probchip_fpath_list, mask_gen)
        for chunk in ut.ichunks(_iter, 64):
            for probchip_fpath, probchip in ut.ProgressIter(chunk, lbl='write probchip chunk', adjust=True, time_thresh=30.0):
                probchip = postprocess_mask(probchip)
                vt.imwrite(probchip_fpath, probchip)
    else:
        raise NotImplementedError('bad featweight_detector=%r' % (featweight_detector,))
Exemple #12
0
def cached_impaint(bgr_img, cached_mask_fpath=None, label_colors=None,
                   init_mask=None, aug=False, refine=False):
    import vtool as vt
    if cached_mask_fpath is None:
        cached_mask_fpath = 'image_' + ut.hashstr_arr(bgr_img) + '.png'
    if aug:
        cached_mask_fpath += '.' + ut.hashstr_arr(bgr_img)
        if label_colors is not None:
            cached_mask_fpath += ut.hashstr_arr(label_colors)
        cached_mask_fpath += '.png'
    #cached_mask_fpath = 'tmp_mask.png'
    if refine or not ut.checkpath(cached_mask_fpath):
        if refine and ut.checkpath(cached_mask_fpath):
            if init_mask is None:
                init_mask = vt.imread(cached_mask_fpath, grayscale=True)
        custom_mask = impaint_mask(bgr_img, label_colors=label_colors, init_mask=init_mask)
        vt.imwrite(cached_mask_fpath, custom_mask)
    else:
        custom_mask = vt.imread(cached_mask_fpath, grayscale=True)
    return custom_mask
Exemple #13
0
    def saliency(dream, Xb, yb):
        """
        num = 10
        Xb = model.prepare_data(X_test[0:num])
        yb = y_test[0:num]

        dpath = ''
        dataset = None
        """
        dpath = '.'

        import theano.tensor as T
        import lasagne
        import vtool as vt
        import theano
        model = dream.model

        # Use current weights to find the score of a particular class
        Xb_shared = theano.shared(Xb)
        yb_shared = theano.shared(yb.astype(np.int32))

        # Get the final layer and remove the softmax nonlinearity to access the
        # pre-activation. (Softmax encourages minimization of other classes)
        import copy
        #softmax = copy.copy(model.output_layer)
        #softmax.nonlinearity = lasagne.nonlinearities.identity
        softmax = copy.copy(model.output_layer)

        class_probs = lasagne.layers.get_output(softmax, Xb_shared,
                                                deterministic=True)

        # werid way to index into position of target
        flat_idx = (T.arange(yb_shared.shape[0]) * class_probs.shape[1]) + yb_shared
        class_probs_target = T.flatten(class_probs)[flat_idx]

        # Get derivative of scores for the target class wrt the input
        d_score_wrt_input = theano.grad(class_probs_target.mean(), Xb_shared)
        w = np.array(d_score_wrt_input.eval())
        saliency = w.max(axis=1, keepdims=True)

        outs = saliency.transpose((0, 2, 3, 1))
        X = Xb.transpose((0, 2, 3, 1))

        for count in range(len(Xb)):
            img = X[count]
            y = yb[count]
            out = vt.norm01(outs[count])
            overlay = vt.blend_images_multiply(out, img)

            vt.imwrite(join(dpath, 'out%d_A_image_t=%s.jpg' % (count, y)),
                       vt.rectify_to_uint8(img))
            vt.imwrite(join(dpath, 'out%d_B_heat_t=%s.jpg' % (count, y)),
                       vt.rectify_to_uint8(out))
            vt.imwrite(join(dpath, 'out%d_C_overlay_t=%s.jpg' % (count, y)),
                       vt.rectify_to_uint8(overlay))
Exemple #14
0
def export_to_xml(
    ibs,
    species_list,
    species_mapping=None,
    offset='auto',
    enforce_viewpoint=False,
    target_size=900,
    purge=False,
    use_maximum_linear_dimension=True,
    use_existing_train_test=True,
    include_parts=False,
    gid_list=None,
    output_path=None,
    allow_empty_images=False,
    min_annot_size=5,
    **kwargs,
):
    """Create training XML for training models."""
    import random
    from datetime import date
    from wbia.detecttools.pypascalmarkup import PascalVOC_Markup_Annotation

    logger.info('Received species_mapping = %r' % (species_mapping, ))

    if species_list is None:
        species_list = sorted(set(species_mapping.values()))

    logger.info('Using species_list = %r' % (species_list, ))

    def _add_annotation(
        annotation,
        bbox,
        theta,
        species_name,
        viewpoint,
        interest,
        decrease,
        width,
        height,
        part_type=None,
    ):
        # Transformation matrix
        R = vt.rotation_around_bbox_mat3x3(theta, bbox)
        # Get verticies of the annotation polygon
        verts = vt.verts_from_bbox(bbox, close=True)
        # Rotate and transform vertices
        xyz_pts = vt.add_homogenous_coordinate(np.array(verts).T)
        trans_pts = vt.remove_homogenous_coordinate(R.dot(xyz_pts))
        new_verts = np.round(trans_pts).astype(np.int).T.tolist()
        x_points = [pt[0] for pt in new_verts]
        y_points = [pt[1] for pt in new_verts]
        xmin = int(min(x_points) * decrease)
        xmax = int(max(x_points) * decrease)
        ymin = int(min(y_points) * decrease)
        ymax = int(max(y_points) * decrease)
        # Bounds check
        xmin = max(xmin, 0)
        ymin = max(ymin, 0)
        xmax = min(xmax, width - 1)
        ymax = min(ymax, height - 1)
        # Get info
        info = {}

        w_ = xmax - xmin
        h_ = ymax - ymin
        if w_ < min_annot_size:
            return
        if h_ < min_annot_size:
            return

        if viewpoint != -1 and viewpoint is not None:
            info['pose'] = viewpoint

        if interest is not None:
            info['interest'] = '1' if interest else '0'

        if part_type is not None:
            species_name = '%s+%s' % (
                species_name,
                part_type,
            )

        area = w_ * h_
        logger.info('\t\tAdding %r with area %0.04f pixels^2' % (
            species_name,
            area,
        ))

        annotation.add_object(species_name, (xmax, xmin, ymax, ymin), **info)

    current_year = int(date.today().year)
    information = {'database_name': ibs.get_dbname()}
    import datetime

    now = datetime.datetime.now()
    folder = 'VOC%d' % (now.year, )

    if output_path is None:
        output_path = ibs.get_cachedir()

    datadir = join(output_path, 'VOCdevkit', folder)
    imagedir = join(datadir, 'JPEGImages')
    annotdir = join(datadir, 'Annotations')
    setsdir = join(datadir, 'ImageSets')
    mainsetsdir = join(setsdir, 'Main')

    if purge:
        ut.delete(datadir)

    ut.ensuredir(datadir)
    ut.ensuredir(imagedir)
    ut.ensuredir(annotdir)
    ut.ensuredir(setsdir)
    ut.ensuredir(mainsetsdir)

    # Get all gids and process them
    if gid_list is None:
        gid_list = sorted(ibs.get_valid_gids())

    sets_dict = {
        'test': [],
        'train': [],
        'trainval': [],
        'val': [],
    }
    index = 1 if offset == 'auto' else offset

    # Make a preliminary train / test split as imagesets or use the existing ones
    if not use_existing_train_test:
        ibs.imageset_train_test_split(**kwargs)

    train_gid_set = set(general_get_imageset_gids(ibs, 'TRAIN_SET', **kwargs))
    test_gid_set = set(general_get_imageset_gids(ibs, 'TEST_SET', **kwargs))

    logger.info('Exporting %d images' % (len(gid_list), ))
    for gid in gid_list:
        aid_list = ibs.get_image_aids(gid)
        image_uri = ibs.get_image_uris(gid)
        image_path = ibs.get_image_paths(gid)
        if len(aid_list) > 0 or allow_empty_images:
            fulldir = image_path.split('/')
            filename = fulldir.pop()
            extension = filename.split('.')[-1]  # NOQA
            out_name = '%d_%06d' % (
                current_year,
                index,
            )
            out_img = '%s.jpg' % (out_name, )

            _image = ibs.get_images(gid)
            height, width, channels = _image.shape

            condition = width > height if use_maximum_linear_dimension else width < height
            if condition:
                ratio = height / width
                decrease = target_size / width
                width = target_size
                height = int(target_size * ratio)
            else:
                ratio = width / height
                decrease = target_size / height
                height = target_size
                width = int(target_size * ratio)

            dst_img = join(imagedir, out_img)
            _image = vt.resize(_image, (width, height))
            vt.imwrite(dst_img, _image)

            annotation = PascalVOC_Markup_Annotation(dst_img,
                                                     folder,
                                                     out_img,
                                                     source=image_uri,
                                                     **information)
            bbox_list = ibs.get_annot_bboxes(aid_list)
            theta_list = ibs.get_annot_thetas(aid_list)
            species_name_list = ibs.get_annot_species_texts(aid_list)
            viewpoint_list = ibs.get_annot_viewpoints(aid_list)
            interest_list = ibs.get_annot_interest(aid_list)

            part_rowids_list = ibs.get_annot_part_rowids(aid_list)
            zipped = zip(
                bbox_list,
                theta_list,
                species_name_list,
                viewpoint_list,
                interest_list,
                part_rowids_list,
            )
            for (
                    bbox,
                    theta,
                    species_name,
                    viewpoint,
                    interest,
                    part_rowid_list,
            ) in zipped:
                if species_mapping is not None:
                    species_name = species_mapping.get(species_name,
                                                       species_name)

                if species_name is not None and species_name not in species_list:
                    continue

                _add_annotation(
                    annotation,
                    bbox,
                    theta,
                    species_name,
                    viewpoint,
                    interest,
                    decrease,
                    width,
                    height,
                )

                if include_parts and len(part_rowid_list) > 0:
                    part_bbox_list = ibs.get_part_bboxes(part_rowid_list)
                    part_theta_list = ibs.get_part_thetas(part_rowid_list)
                    part_type_list = ibs.get_part_types(part_rowid_list)
                    part_zipped = zip(part_bbox_list, part_theta_list,
                                      part_type_list)
                    for part_bbox, part_theta, part_type in part_zipped:
                        part_viewpoint = viewpoint
                        part_interest = None
                        _add_annotation(
                            annotation,
                            part_bbox,
                            part_theta,
                            species_name,
                            part_viewpoint,
                            part_interest,
                            decrease,
                            width,
                            height,
                            part_type=part_type,
                        )

            out_filename = '%s.xml' % (out_name, )
            dst_annot = join(annotdir, out_filename)

            if gid in test_gid_set:
                sets_dict['test'].append(out_name)
            elif gid in train_gid_set:
                state = random.uniform(0.0, 1.0)
                if state <= 0.75:
                    sets_dict['train'].append(out_name)
                    sets_dict['trainval'].append(out_name)
                else:
                    sets_dict['val'].append(out_name)
                    sets_dict['trainval'].append(out_name)
            else:
                raise AssertionError(
                    'All gids must be either in the TRAIN_SET or TEST_SET imagesets'
                )

            # Write XML
            logger.info('Copying:\n%r\n%r\n%r\n\n' % (
                image_path,
                dst_img,
                (width, height),
            ))
            xml_data = open(dst_annot, 'w')
            xml_data.write(annotation.xml())
            xml_data.close()
            while exists(dst_annot):
                index += 1
                if offset != 'auto':
                    break
                out_filename = '%d_%06d.xml' % (
                    current_year,
                    index,
                )
                dst_annot = join(annotdir, out_filename)
        else:
            logger.info('Skipping:\n%r\n\n' % (image_path, ))

    for key in sets_dict.keys():
        manifest_filename = '%s.txt' % (key, )
        manifest_filepath = join(mainsetsdir, manifest_filename)
        with open(manifest_filepath, 'w') as file_:
            sets_dict[key].append('')
            content = sets_dict[key]
            content = '\n'.join(content)
            file_.write(content)

    logger.info('...completed')
    return datadir
Exemple #15
0
def export_to_coco(
    ibs,
    species_list,
    species_mapping={},
    viewpoint_mapping={},
    target_size=2400,
    use_maximum_linear_dimension=True,
    use_existing_train_test=True,
    include_parts=False,
    gid_list=None,
    include_reviews=True,
    require_image_reviewed=False,
    require_named=False,
    output_images=True,
    use_global_train_set=False,
    **kwargs,
):
    """Create training COCO dataset for training models."""
    from datetime import date
    import datetime
    import random
    import json

    logger.info('Received species_mapping = %r' % (species_mapping, ))
    logger.info('Received viewpoint_mapping = %r' % (viewpoint_mapping, ))

    if species_list is None:
        species_list = sorted(set(species_mapping.values()))

    logger.info('Using species_list = %r' % (species_list, ))

    current_year = int(date.today().year)
    datadir = abspath(join(ibs.get_cachedir(), 'coco'))
    annotdir = join(datadir, 'annotations')
    imagedir = join(datadir, 'images')
    image_dir_dict = {
        'train': join(imagedir, 'train%s' % (current_year, )),
        'val': join(imagedir, 'val%s' % (current_year, )),
        'test': join(imagedir, 'test%s' % (current_year, )),
    }

    ut.delete(datadir)
    ut.ensuredir(datadir)
    ut.ensuredir(annotdir)
    ut.ensuredir(imagedir)
    for dataset in image_dir_dict:
        ut.ensuredir(image_dir_dict[dataset])

    info = {
        'description': 'Wild Me %s Dataset' % (ibs.dbname, ),
        # 'url'          : 'http://www.greatgrevysrally.com',
        'url': 'http://www.wildme.org',
        'version': '1.0',
        'year': current_year,
        'contributor': 'Wild Me <*****@*****.**>',
        'date_created': datetime.datetime.utcnow().isoformat(' '),
        'name': ibs.get_db_name(),
        'uuid': str(ibs.get_db_init_uuid()),
    }

    licenses = [
        {
            'url': 'http://creativecommons.org/licenses/by-nc-nd/2.0/',
            'id': 3,
            'name': 'Attribution-NonCommercial-NoDerivs License',
        },
    ]

    assert len(species_list) == len(
        set(species_list)), 'Cannot have duplicate species in species_list'
    category_dict = {}
    categories = []
    for index, species in enumerate(sorted(species_list)):

        species = species_mapping.get(species, species)

        categories.append({
            'id': index,
            'name': species,
            'supercategory': 'animal'
        })
        category_dict[species] = index

    def _add_annotation_or_part(
        image_index,
        annot_index,
        annot_uuid,
        bbox,
        theta,
        species_name,
        viewpoint,
        interest,
        annot_name,
        decrease,
        width,
        height,
        individuals,
        part_index=None,
        part_uuid=None,
    ):
        is_part = part_index is not None

        R = vt.rotation_around_bbox_mat3x3(theta, bbox)
        verts = vt.verts_from_bbox(bbox, close=True)
        xyz_pts = vt.add_homogenous_coordinate(np.array(verts).T)
        trans_pts = vt.remove_homogenous_coordinate(R.dot(xyz_pts))
        new_verts = np.round(trans_pts).astype(np.int).T.tolist()

        x_points = [int(np.around(pt[0] * decrease)) for pt in new_verts]
        y_points = [int(np.around(pt[1] * decrease)) for pt in new_verts]
        segmentation = ut.flatten(list(zip(x_points, y_points)))

        xmin = max(min(x_points), 0)
        ymin = max(min(y_points), 0)
        xmax = min(max(x_points), width - 1)
        ymax = min(max(y_points), height - 1)

        w = xmax - xmin
        h = ymax - ymin
        area = w * h

        xtl_, ytl_, w_, h_ = bbox
        xtl_ *= decrease
        ytl_ *= decrease
        w_ *= decrease
        h_ *= decrease

        annot_part = {
            'bbox': [xtl_, ytl_, w_, h_],
            'theta': theta,
            'viewpoint': viewpoint,
            'segmentation': [segmentation],
            'segmentation_bbox': [xmin, ymin, w, h],
            'area': area,
            'iscrowd': 0,
            'id': part_index if is_part else annot_index,
            'image_id': image_index,
            'category_id': category_dict[species_name],
            'uuid': str(part_uuid if is_part else annot_uuid),
            'individual_ids': individuals,
        }
        if is_part:
            annot_part['annot_id'] = annot_index
        else:
            annot_part['isinterest'] = int(interest)
            annot_part['name'] = annot_name

        return annot_part, area

    output_dict = {}
    for dataset in ['train', 'val', 'test']:
        output_dict[dataset] = {
            'info': info,
            'licenses': licenses,
            'categories': categories,
            'images': [],
            'annotations': [],
            'parts': [],
        }

    # Get all gids and process them
    if gid_list is None:
        if require_named:
            aid_list = ibs.get_valid_aids()
            species_list_ = ibs.get_annot_species(aid_list)
            flag_list = [
                species_mapping.get(species_, species_) in species_list
                for species_ in species_list_
            ]
            aid_list = ut.compress(aid_list, flag_list)
            nid_list = ibs.get_annot_nids(aid_list)
            flag_list = [nid >= 0 for nid in nid_list]
            aid_list = ut.compress(aid_list, flag_list)
            gid_list = list(set(ibs.get_annot_gids(aid_list)))
        else:
            gid_list = ibs.get_valid_gids()

        if require_image_reviewed:
            image_reviewed_list = ibs.get_image_reviewed(gid_list)
            gid_list = ut.compress(gid_list, image_reviewed_list)

        gid_list = sorted(list(set(gid_list)))

    # Make a preliminary train / test split as imagesets or use the existing ones
    if not use_existing_train_test:
        ibs.imageset_train_test_split(**kwargs)

    train_gid_set = set(general_get_imageset_gids(ibs, 'TRAIN_SET', **kwargs))
    test_gid_set = set(general_get_imageset_gids(ibs, 'TEST_SET', **kwargs))

    image_index = 1
    annot_index = 1
    part_index = 1

    aid_dict = {}

    logger.info('Exporting %d images' % (len(gid_list), ))
    for gid in gid_list:

        if use_global_train_set:
            dataset = 'train'
        else:
            if gid in test_gid_set:
                dataset = 'test'
            elif gid in train_gid_set:
                state = random.uniform(0.0, 1.0)
                if state <= 0.75:
                    dataset = 'train'
                else:
                    dataset = 'val'
            else:
                # raise AssertionError('All gids must be either in the TRAIN_SET or TEST_SET imagesets')
                logger.info('GID = %r was not in the TRAIN_SET or TEST_SET' %
                            (gid, ))
                dataset = 'test'

        width, height = ibs.get_image_sizes(gid)
        if target_size is None:
            decrease = 1.0
        else:
            condition = width > height if use_maximum_linear_dimension else width < height
            if condition:
                ratio = height / width
                decrease = target_size / width
                width = target_size
                height = int(target_size * ratio)
            else:
                ratio = width / height
                decrease = target_size / height
                height = target_size
                width = int(target_size * ratio)

        image_path = ibs.get_image_paths(gid)
        image_filename = '%012d.jpg' % (image_index, )
        image_filepath = join(image_dir_dict[dataset], image_filename)

        if output_images:
            _image = ibs.get_images(gid)
            _image = vt.resize(_image, (width, height))
            vt.imwrite(image_filepath, _image)

        image_gps = ibs.get_image_gps(gid)
        if image_gps is None or len(image_gps) != 2 or None in image_gps:
            image_gps_lat, image_gps_lon = None
        else:
            image_gps_lat, image_gps_lon = image_gps
            image_gps_lat = '%03.06f' % (image_gps_lat, )
            image_gps_lon = '%03.06f' % (image_gps_lon, )

        output_dict[dataset]['images'].append({
            'license':
            3,
            'file_name':
            image_filename,
            # 'file_name'     : basename(ibs.get_image_uris_original(gid)),
            'photographer':
            ibs.get_image_notes(gid),
            'coco_url':
            None,
            'height':
            height,
            'width':
            width,
            'date_captured':
            ibs.get_image_datetime_str(gid).replace('/', '-'),
            'gps_lat_captured':
            image_gps_lat,
            'gps_lon_captured':
            image_gps_lon,
            'flickr_url':
            None,
            'id':
            image_index,
            'uuid':
            str(ibs.get_image_uuids(gid)),
        })

        logger.info('Copying:\n%r\n%r\n%r\n\n' % (
            image_path,
            image_filepath,
            (width, height),
        ))

        aid_list = ibs.get_image_aids(gid)
        bbox_list = ibs.get_annot_bboxes(aid_list)
        theta_list = ibs.get_annot_thetas(aid_list)
        species_name_list = ibs.get_annot_species_texts(aid_list)
        viewpoint_list = ibs.get_annot_viewpoints(aid_list)
        interest_list = ibs.get_annot_interest(aid_list)
        annot_uuid_list = ibs.get_annot_uuids(aid_list)
        annot_name_list = ibs.get_annot_name_texts(aid_list)
        part_rowids_list = ibs.get_annot_part_rowids(aid_list)
        nid_list = ibs.get_annot_nids(aid_list)

        zipped = zip(
            aid_list,
            bbox_list,
            theta_list,
            species_name_list,
            viewpoint_list,
            interest_list,
            annot_uuid_list,
            annot_name_list,
            part_rowids_list,
            nid_list,
        )
        for (
                aid,
                bbox,
                theta,
                species_name,
                viewpoint,
                interest,
                annot_uuid,
                annot_name,
                part_rowid_list,
                nid,
        ) in zipped:
            species_name = species_mapping.get(species_name, species_name)

            if species_name is None:
                continue

            if species_name not in species_list:
                continue

            if require_named and nid < 0:
                continue

            viewpoint = viewpoint_mapping.get(species_name,
                                              {}).get(viewpoint, viewpoint)

            # if viewpoint is None:
            #     continue

            individuals = ibs.get_name_aids(ibs.get_annot_nids(aid))

            # Transformation matrix
            annot, area = _add_annotation_or_part(
                image_index,
                annot_index,
                annot_uuid,
                bbox,
                theta,
                species_name,
                viewpoint,
                interest,
                annot_name,
                decrease,
                width,
                height,
                individuals,
            )
            logger.info('\t\tAdding annot %r with area %0.04f pixels^2' % (
                species_name,
                area,
            ))

            if include_reviews:
                reviews = ibs.get_review_rowids_from_single([aid])[0]
                user_list = ibs.get_review_identity(reviews)
                aid_tuple_list = ibs.get_review_aid_tuple(reviews)
                decision_list = ibs.get_review_decision_str(reviews)

                ids = []
                decisions = []
                zipped = zip(user_list, aid_tuple_list, decision_list)
                for user, aid_tuple, decision in zipped:
                    if 'user:web' not in user:
                        continue
                    match = list(set(aid_tuple) - set([aid]))
                    assert len(match) == 1
                    ids.append(match[0])
                    decisions.append(decision.lower())
                annot['review_ids'] = list(zip(ids, decisions))

            output_dict[dataset]['annotations'].append(annot)

            if include_parts and len(part_rowid_list) > 0:

                part_uuid_list = ibs.get_part_uuids(part_rowid_list)
                part_bbox_list = ibs.get_part_bboxes(part_rowid_list)
                part_theta_list = ibs.get_part_thetas(part_rowid_list)
                part_type_list = ibs.get_part_types(part_rowid_list)

                part_zipped = zip(part_uuid_list, part_bbox_list,
                                  part_theta_list, part_type_list)
                for part_uuid, part_bbox, part_theta, part_type in part_zipped:
                    part_species_name = '%s+%s' % (
                        species_name,
                        part_type,
                    )

                    part_species_name = species_mapping.get(
                        part_species_name, part_species_name)

                    if part_species_name is None:
                        continue

                    if part_species_name not in species_list:
                        continue

                    part, area = _add_annotation_or_part(
                        image_index,
                        annot_index,
                        annot_uuid,
                        part_bbox,
                        part_theta,
                        part_species_name,
                        viewpoint,
                        interest,
                        annot_name,
                        decrease,
                        width,
                        height,
                        individuals,
                        part_index=part_index,
                        part_uuid=part_uuid,
                    )
                    logger.info(
                        '\t\tAdding part %r with area %0.04f pixels^2' % (
                            part_species_name,
                            area,
                        ))
                    output_dict[dataset]['parts'].append(part)

                part_index += 1

            aid_dict[aid] = annot_index
            annot_index += 1

        image_index += 1

    for dataset in output_dict:
        annots = output_dict[dataset]['annotations']
        for index in range(len(annots)):
            annot = annots[index]

            # Map internal aids to external annot index
            individual_ids = annot['individual_ids']
            individual_ids_ = []
            for individual_id in individual_ids:
                if individual_id not in aid_dict:
                    continue
                individual_id_ = aid_dict[individual_id]
                individual_ids_.append(individual_id_)
            annot['individual_ids'] = individual_ids_

            # Map reviews
            if include_reviews:
                review_ids = annot['review_ids']
                review_ids_ = []
                for review in review_ids:
                    review_id, review_decision = review
                    if review_id not in aid_dict:
                        continue
                    review_id_ = aid_dict[review_id]
                    review_ = (
                        review_id_,
                        review_decision,
                    )
                    review_ids_.append(review_)
                annot['review_ids'] = review_ids_

            # Store
            output_dict[dataset]['annotations'][index] = annot

    for dataset in output_dict:
        json_filename = 'instances_%s%s.json' % (
            dataset,
            current_year,
        )
        json_filepath = join(annotdir, json_filename)

        with open(json_filepath, 'w') as json_file:
            json.dump(output_dict[dataset], json_file)

    logger.info('...completed')
    return datadir
Exemple #16
0
def compute_and_write_chips(ibs, aid_list, config2_=None):
    r"""
    Starts the compute chip process

    Args:
        ibs (IBEISController):  ibeis controller object
        aid_list (list):  list of annotation rowids
        config2_ (dict): (default = None)

    CommandLine:
        python -m ibeis.algo.preproc.preproc_chip --test-compute_and_write_chips

    FIXME: THERE IS A FREEZE THAT HAPPENS HERE
        ./reset_dbs.py
        python -m ibeis.expt.harness --exec-precompute_test_configuration_features -t custom --expt-preload

    Example:
        >>> # SLOW_DOCTEST
        >>> from ibeis.algo.preproc.preproc_chip import *  # NOQA
        >>> from os.path import basename
        >>> config2_ = None
        >>> ibs, aid_list = testdata_ibeis()
        >>> # delete chips
        >>> ibs.delete_annot_chips(aid_list)
        >>> # ensure they were deleted
        >>> cid_list = ibs.get_annot_chip_rowids(aid_list, ensure=False)
        >>> assert all([cid is None for cid in cid_list]), 'should be gone'
        >>> chip_result_list = list(compute_and_write_chips(ibs, aid_list))
        >>> cfpath_list = ut.get_list_column(chip_result_list, 0)
        >>> cfname_list = ut.lmap(basename, cfpath_list)
        >>> print(cfname_list)
        >>> # cids should still be None. IBEIS does not know we computed chips
        >>> cid_list = ibs.get_annot_chip_rowids(aid_list, ensure=False)
        >>> assert all([cid is None for cid in cid_list]), 'should be gone'
        >>> # Now this function should have been executed again implictly
        >>> cid_list = ibs.get_annot_chip_rowids(aid_list, ensure=True)
        >>> assert ibs.get_chip_fpath(cid_list) == cfpath_list, 'should be what we had before'

    Ignore:
        >>> from ibeis.algo.preproc.preproc_chip import *  # NOQA
        from os.path import basename
        import ibeis
        ibs = ibeis.opendb('GZ_Master1')
        aid_list = [1915]
        extract_chip_from_img(imgBGR, bbox, theta, new_size)
        gfpath_list = ibs.get_annot_image_paths(aid_list)
        bbox_list   = ibs.get_annot_bboxes(aid_list)
        theta_list  = ibs.get_annot_thetas(aid_list)
        bbox_size_list = ut.get_list_column(bbox_list, [2, 3])
        newsize_list = vt.get_scaled_sizes_with_area(target_area, bbox_size_list)
        gfpath = gfpath_list[0]
        img = vt.imread(gfpath)
        dim_size = 450
        dim_size = 500
        target_area = dim_size ** 2
        bbox = bbox_list[0]
        theta = theta_list[0]
        new_size = newsize_list[0]
        #new_size = bbox[2:4]
        chipBGR = vt.compute_chip(gfpath, bbox, theta, new_size, {})
        import plottool as pt
        print('chipBGR.shape = %r' % (chipBGR.shape,))
        print('chipBGR.shape = %r' % (np.sqrt(np.prod(chipBGR.shape[0:2])),))
        pt.imshow(chipBGR, fnum=1)
        pt.imshow(img, fnum=2)
        pt.iup()
        pt.present()

    """
    ut.ensuredir(ibs.get_chipdir())
    # CONFIG INFO
    # Get chip configuration information
    if config2_ is not None:
        chip_cfg_dict = config2_.get("chip_cfg_dict")
        dim_size = config2_.get("dim_size")
        assert dim_size is not None
        assert chip_cfg_dict is not None
    else:
        # use ibs if config2_ is None
        dim_size = ibs.cfg.chip_cfg.dim_size
        chip_cfg_dict = ibs.cfg.chip_cfg.to_dict()
    # Get chip dest information (output path),
    # source information (image, annotation_bbox, theta)
    # Get how big to resize each chip, etc...
    nChips = len(aid_list)
    filter_list = get_filter_list(chip_cfg_dict)
    cfpath_list = make_annot_chip_fpath_list(ibs, aid_list, config2_=config2_)
    gfpath_list = ibs.get_annot_image_paths(aid_list)
    bbox_list = ibs.get_annot_bboxes(aid_list)
    theta_list = ibs.get_annot_thetas(aid_list)
    # target_area = dim_size ** 2
    bbox_size_list = ut.get_list_column(bbox_list, [2, 3])

    scale_func_dict = {"width": vt.get_scaled_size_with_width, "root_area": vt.get_scaled_size_with_area}
    resize_dim = chip_cfg_dict["resize_dim"]
    scale_func = scale_func_dict[resize_dim]
    if resize_dim == "root_area":
        target_dim_size = dim_size ** 2
    else:
        target_dim_size = dim_size
    newsize_list = [scale_func(target_dim_size, w, h) for (w, h) in bbox_size_list]

    # newsize_list = vt.get_scaled_sizes_with_area(target_area, bbox_size_list)
    invalid_aids = [aid for aid, (w, h) in zip(aid_list, bbox_size_list) if w == 0 or h == 0]
    filtlist_iter = (filter_list for _ in range(nChips))
    # Check for invalid chips
    if len(invalid_aids) > 0:
        msg = "REMOVE INVALID (BAD WIDTH AND/OR HEIGHT) AIDS TO COMPUTE AND WRITE CHIPS"
        msg += "INVALID AIDS: %r" % (invalid_aids,)
        print(msg)
        raise Exception(msg)
    # Define "Asynchronous" generator
    arg_iter = zip(cfpath_list, gfpath_list, bbox_list, theta_list, newsize_list, filtlist_iter)
    arg_list = list(arg_iter)
    # ut.embed()
    # We have to force serial here until we can figure out why parallel chip
    # generation causes a freeze
    # utool has a unstable test that reproduces this reliably (BECAUSE OF
    # CV2.WARP_AFFINE WITH BIG OUTPUT)
    if ut.VERBOSE:
        print("Computing %d chips" % (len(cfpath_list)))

    DO_IMWRITE_IN_WORKER = False
    if DO_IMWRITE_IN_WORKER:
        # Compute and write chips in asychronous process
        chip_result_iter = ut.util_parallel.generate(gen_chip, arg_list, ordered=True, force_serial=True, freq=10)
        # chip_result_list = list(chip_result_iter)
        for _ in chip_result_iter:
            yield _
    else:
        # Compute chips in asychronous process. Write here
        chip_result_iter = ut.util_parallel.generate(gen_chip2, arg_list, ordered=True, force_serial=True, freq=10)
        # chip_result_list = []
        for chipBGR, cfpath, width, height in chip_result_iter:
            vt.imwrite(cfpath, chipBGR)
            del chipBGR
            yield (cfpath, width, height)
    # print(ut.util_parallel.__POOL__)
    if not ut.VERBOSE:
        print("Done computing chips")
Exemple #17
0
def compute_and_write_chips(ibs, aid_list, config2_=None):
    r"""
    Starts the compute chip process

    Args:
        ibs (IBEISController):  ibeis controller object
        aid_list (list):  list of annotation rowids
        config2_ (dict): (default = None)

    CommandLine:
        python -m ibeis.algo.preproc.preproc_chip --test-compute_and_write_chips

    FIXME: THERE IS A FREEZE THAT HAPPENS HERE
        ./reset_dbs.py
        python -m ibeis.expt.harness --exec-precompute_test_configuration_features -t custom --expt-preload

    Example:
        >>> # SLOW_DOCTEST
        >>> from ibeis.algo.preproc.preproc_chip import *  # NOQA
        >>> from os.path import basename
        >>> config2_ = None
        >>> ibs, aid_list = testdata_ibeis()
        >>> # delete chips
        >>> ibs.delete_annot_chips(aid_list)
        >>> # ensure they were deleted
        >>> cid_list = ibs.get_annot_chip_rowids(aid_list, ensure=False)
        >>> assert all([cid is None for cid in cid_list]), 'should be gone'
        >>> chip_result_list = list(compute_and_write_chips(ibs, aid_list))
        >>> cfpath_list = ut.get_list_column(chip_result_list, 0)
        >>> cfname_list = ut.lmap(basename, cfpath_list)
        >>> print(cfname_list)
        >>> # cids should still be None. IBEIS does not know we computed chips
        >>> cid_list = ibs.get_annot_chip_rowids(aid_list, ensure=False)
        >>> assert all([cid is None for cid in cid_list]), 'should be gone'
        >>> # Now this function should have been executed again implictly
        >>> cid_list = ibs.get_annot_chip_rowids(aid_list, ensure=True)
        >>> assert ibs.get_chip_fpath(cid_list) == cfpath_list, 'should be what we had before'

    Ignore:
        >>> from ibeis.algo.preproc.preproc_chip import *  # NOQA
        from os.path import basename
        import ibeis
        ibs = ibeis.opendb('GZ_Master1')
        aid_list = [1915]
        extract_chip_from_img(imgBGR, bbox, theta, new_size)
        gfpath_list = ibs.get_annot_image_paths(aid_list)
        bbox_list   = ibs.get_annot_bboxes(aid_list)
        theta_list  = ibs.get_annot_thetas(aid_list)
        bbox_size_list = ut.get_list_column(bbox_list, [2, 3])
        newsize_list = vt.get_scaled_sizes_with_area(target_area, bbox_size_list)
        gfpath = gfpath_list[0]
        img = vt.imread(gfpath)
        dim_size = 450
        dim_size = 500
        target_area = dim_size ** 2
        bbox = bbox_list[0]
        theta = theta_list[0]
        new_size = newsize_list[0]
        #new_size = bbox[2:4]
        chipBGR = vt.compute_chip(gfpath, bbox, theta, new_size, {})
        import plottool as pt
        print('chipBGR.shape = %r' % (chipBGR.shape,))
        print('chipBGR.shape = %r' % (np.sqrt(np.prod(chipBGR.shape[0:2])),))
        pt.imshow(chipBGR, fnum=1)
        pt.imshow(img, fnum=2)
        pt.iup()
        pt.present()

    """
    ut.ensuredir(ibs.get_chipdir())
    # CONFIG INFO
    # Get chip configuration information
    if config2_ is not None:
        chip_cfg_dict = config2_.get('chip_cfg_dict')
        dim_size = config2_.get('dim_size')
        assert dim_size is not None
        assert chip_cfg_dict is not None
    else:
        # use ibs if config2_ is None
        dim_size = ibs.cfg.chip_cfg.dim_size
        chip_cfg_dict = ibs.cfg.chip_cfg.to_dict()
    # Get chip dest information (output path),
    # source information (image, annotation_bbox, theta)
    # Get how big to resize each chip, etc...
    nChips = len(aid_list)
    filter_list = get_filter_list(chip_cfg_dict)
    cfpath_list = make_annot_chip_fpath_list(ibs, aid_list, config2_=config2_)
    gfpath_list = ibs.get_annot_image_paths(aid_list)
    bbox_list = ibs.get_annot_bboxes(aid_list)
    theta_list = ibs.get_annot_thetas(aid_list)
    #target_area = dim_size ** 2
    bbox_size_list = ut.get_list_column(bbox_list, [2, 3])

    scale_func_dict = {
        'width': vt.get_scaled_size_with_width,
        'root_area': vt.get_scaled_size_with_area,
    }
    resize_dim = chip_cfg_dict['resize_dim']
    scale_func = scale_func_dict[resize_dim]
    if resize_dim == 'root_area':
        target_dim_size = dim_size**2
    else:
        target_dim_size = dim_size
    newsize_list = [
        scale_func(target_dim_size, w, h) for (w, h) in bbox_size_list
    ]

    #newsize_list = vt.get_scaled_sizes_with_area(target_area, bbox_size_list)
    invalid_aids = [
        aid for aid, (w, h) in zip(aid_list, bbox_size_list)
        if w == 0 or h == 0
    ]
    filtlist_iter = (filter_list for _ in range(nChips))
    # Check for invalid chips
    if len(invalid_aids) > 0:
        msg = (
            "REMOVE INVALID (BAD WIDTH AND/OR HEIGHT) AIDS TO COMPUTE AND WRITE CHIPS"
        )
        msg += ("INVALID AIDS: %r" % (invalid_aids, ))
        print(msg)
        raise Exception(msg)
    # Define "Asynchronous" generator
    arg_iter = zip(cfpath_list, gfpath_list, bbox_list, theta_list,
                   newsize_list, filtlist_iter)
    arg_list = list(arg_iter)
    #ut.embed()
    # We have to force serial here until we can figure out why parallel chip
    # generation causes a freeze
    # utool has a unstable test that reproduces this reliably (BECAUSE OF
    # CV2.WARP_AFFINE WITH BIG OUTPUT)
    if ut.VERBOSE:
        print('Computing %d chips' % (len(cfpath_list)))

    DO_IMWRITE_IN_WORKER = False
    if DO_IMWRITE_IN_WORKER:
        # Compute and write chips in asychronous process
        chip_result_iter = ut.util_parallel.generate(gen_chip,
                                                     arg_list,
                                                     ordered=True,
                                                     force_serial=True,
                                                     freq=10)
        #chip_result_list = list(chip_result_iter)
        for _ in chip_result_iter:
            yield _
    else:
        # Compute chips in asychronous process. Write here
        chip_result_iter = ut.util_parallel.generate(gen_chip2,
                                                     arg_list,
                                                     ordered=True,
                                                     force_serial=True,
                                                     freq=10)
        #chip_result_list = []
        for chipBGR, cfpath, width, height in chip_result_iter:
            vt.imwrite(cfpath, chipBGR)
            del chipBGR
            yield (cfpath, width, height)
    #print(ut.util_parallel.__POOL__)
    if not ut.VERBOSE:
        print('Done computing chips')
Exemple #18
0
    def nosql_draw(check_func, rchip1_fpath, rchip2_fpath, kpts1, kpts2):
        # This gets executed in the child thread and does drawing async style
        #from matplotlib.backends.backend_pdf import FigureCanvasPdf as FigureCanvas
        #from matplotlib.backends.backend_pdf import Figure
        #from matplotlib.backends.backend_svg import FigureCanvas
        #from matplotlib.backends.backend_svg import Figure
        from matplotlib.backends.backend_agg import FigureCanvas
        try:
            from matplotlib.backends.backend_agg import Figure
        except ImportError:
            from matplotlib.figure import Figure

        kpts1_ = vt.offset_kpts(kpts1, (0, 0), (resize_factor, resize_factor))
        kpts2_ = vt.offset_kpts(kpts2, (0, 0), (resize_factor, resize_factor))

        #from matplotlib.figure import Figure
        if check_func is not None and check_func():
            return

        rchip1 = vt.imread(rchip1_fpath)
        rchip1 = vt.resize_image_by_scale(rchip1, resize_factor)
        if check_func is not None and check_func():
            return
        rchip2 = vt.imread(rchip2_fpath)
        rchip2 = vt.resize_image_by_scale(rchip2, resize_factor)
        if check_func is not None and check_func():
            return

        try:
            idx = cm.daid2_idx[daid]
            fm   = cm.fm_list[idx]
            fsv  = None if cm.fsv_list is None else cm.fsv_list[idx]
            fs   = None if fsv is None else fsv.prod(axis=1)
        except KeyError:
            fm = []
            fs = None
            fsv = None

        maxnum = 200
        if fs is not None and len(fs) > maxnum:
            # HACK TO ONLY SHOW TOP MATCHES
            sortx = fs.argsort()[::-1]
            fm = fm.take(sortx[:maxnum], axis=0)
            fs = fs.take(sortx[:maxnum], axis=0)

        was_interactive = mpl.is_interactive()
        if was_interactive:
            mpl.interactive(False)
        #fnum = 32
        fig = Figure()
        canvas = FigureCanvas(fig)  # NOQA
        #fig.clf()
        ax = fig.add_subplot(1, 1, 1)
        if check_func is not None and check_func():
            return
        #fig = pt.plt.figure(fnum)
        #H1 = np.eye(3)
        #H2 = np.eye(3)
        #H1[0, 0] = .5
        #H1[1, 1] = .5
        #H2[0, 0] = .5
        #H2[1, 1] = .5
        ax, xywh1, xywh2 = pt.show_chipmatch2(rchip1, rchip2, kpts1_, kpts2_, fm,
                                              fs=fs, colorbar_=False, ax=ax)
        if check_func is not None and check_func():
            return
        savekw = {
            # 'dpi' : 60,
            'dpi' : 80,
        }
        axes_extents = pt.extract_axes_extents(fig)
        #assert len(axes_extents) == 1, 'more than one axes'
        extent = axes_extents[0]
        with io.BytesIO() as stream:
            # This call takes 23% - 15% of the time depending on settings
            fig.savefig(stream, bbox_inches=extent, **savekw)
            stream.seek(0)
            data = np.fromstring(stream.getvalue(), dtype=np.uint8)
        if check_func is not None and check_func():
            return
        pt.plt.close(fig)
        image = cv2.imdecode(data, 1)
        thumbsize = 221
        max_dsize = (thumbsize, thumbsize)
        dsize, sx, sy = vt.resized_clamped_thumb_dims(vt.get_size(image), max_dsize)
        if check_func is not None and check_func():
            return
        image = vt.resize(image, dsize)
        vt.imwrite(fpath, image)
        if check_func is not None and check_func():
            return