def preproc_cropped_chips(depc, cid_list, tipid_list, config=None):
    """
    CommandLine:
        python -m ibeis_flukematch.plugin --exec-preproc_cropped_chips --show

    Example:
        >>> # DISABLE_DOCTEST
        >>> from ibeis_flukematch.plugin import *  # NOQA
        >>> ibs, aid_list = testdata_humpbacks()
        >>> config = CropChipConfig(crop_enabled=True)
        >>> cid_list = ibs.depc.get_rowids('chips', aid_list, config)
        >>> tipid_list = ibs.depc.get_rowids('Notch_Tips', aid_list, config)
        >>> depc = ibs.depc
        >>> list(preproc_cropped_chips(depc, cid_list, tipid_list, config))
        >>> #cpid_list = ibs.depc.d.get_Cropped_Chips_rowids(aid_list, config)
        >>> #cpid_list = ibs.depc.w.Cropped_Chips.get_rowids(aid_list, config)
        >>> chip_list = ibs.depc.get('Cropped_Chips', aid_list, 'img', config)
        >>> notch_tips = ibs.depc.get('Cropped_Chips', aid_list, ('notch', 'left', 'right'), config)
        >>> import plottool as pt
        >>> ut.ensure_pylab_qt4()
        >>> for notch, chip, aid in ut.InteractiveIter(zip(notch_tips, chip_list, aid_list)):
        >>>     pt.reset()
        >>>     pt.imshow(chip)
        >>>     print(ibs.depc.get('Cropped_Chips', [aid], 'img', config, read_extern=False)[0])
        >>>     kpts_ = np.array(notch)
        >>>     pt.draw_kpts2(kpts_, pts=True, ell=False, pts_size=20)
        >>>     pt.update()
        >>> ut.show_if_requested()
    """
    # crop first
    img_list = depc.get_native_property(const.CHIP_TABLE, cid_list, 'img')
    tips_list = depc.get_native_property('Notch_Tips', tipid_list, ('left', 'notch', 'right'))

    #imgpath_list = depc.get_native_property(const.CHIP_TABLE, cid_list, 'img',
    #                                        read_extern=False)

    cropped_chip_dpath = depc.controller.get_chipdir() + '_crop'
    ut.ensuredir(cropped_chip_dpath)
    #crop_path_list = [ut.augpath(path, '_crop' + config.get_hashid())
    #                  for path in imgpath_list]

    #for img, tips, path in zip(img_list, tips_list, crop_path_list):
    for img, tips in zip(img_list, tips_list):
        left, notch, right = tips
        bbox = (0, 0, img.shape[1], img.shape[0])  # default to bbox being whole image
        chip_size = (img.shape[1], img.shape[0])
        if left[0] > right[0]:
            # HACK: Ugh, I don't like this
            # TODO: maybe move this to infer_kp?
            right, left = (left, right)
        if config['crop_enabled']:
            # figure out bbox (x, y, w, h) w/x, y on top left
            # assume left is on the left note: this may not be a good assumption
            # note: lol that's not a good assumption
            # what do when network predicts left on right and right on left?
            bbox = (left[0],  # leftmost x value
                    0,  # top of the image
                    (right[0] - left[0]),  # width
                    img.shape[0],  # height
                    )
        if config['crop_dim_size'] is not None:
            # we're only resizing in x, but after the crop
            # as a result we need to make sure we use the image dimensions apparent to get the chip size
            # we want to preserve the aspect ratio of the crop, not the whole image
            new_x = config['crop_dim_size']
            #ratio = bbox[2] / bbox[3]  # w/h
            #new_y = int(new_x / ratio)
            #chip_size = (new_x, new_y)
            try:
                #print("[cropped-chips] %s: bbox: %r, l/n/r %r" % (path, bbox,tips))
                chip_size = vt.get_scaled_size_with_width(new_x, bbox[2], bbox[3])
            except OverflowError:
                print("[cropped chip] WARNING: Probably got a bad keypoint prediction: bbox: %r" % (bbox,))
                yield None
        M = vt.get_image_to_chip_transform(bbox, chip_size, 0)
        with ut.embed_on_exception_context:
            new_img = cv2.warpAffine(img, M[:-1, :], chip_size)

        notch_, left_, right_ = vt.transform_points_with_homography(M, np.array([notch, left, right]).T).T

        notch_ = bound_point(notch_, chip_size)
        left_  = bound_point(left_, chip_size)
        right_ = bound_point(right_, chip_size)
        #vt.imwrite(path, new_img)
        yield (new_img, bbox[2], bbox[3], M, notch_, left_, right_)
Exemple #2
0
def preproc_cropped_chips(depc, cid_list, tipid_list, config=None):
    """
    CommandLine:
        python -m ibeis_flukematch.plugin --exec-preproc_cropped_chips --show

    Example:
        >>> # DISABLE_DOCTEST
        >>> from ibeis_flukematch.plugin import *  # NOQA
        >>> ibs, aid_list = testdata_humpbacks()
        >>> config = CropChipConfig(crop_enabled=True)
        >>> cid_list = ibs.depc.get_rowids('chips', aid_list, config)
        >>> tipid_list = ibs.depc.get_rowids('Notch_Tips', aid_list, config)
        >>> depc = ibs.depc
        >>> list(preproc_cropped_chips(depc, cid_list, tipid_list, config))
        >>> #cpid_list = ibs.depc.d.get_Cropped_Chips_rowids(aid_list, config)
        >>> #cpid_list = ibs.depc.w.Cropped_Chips.get_rowids(aid_list, config)
        >>> chip_list = ibs.depc.get('Cropped_Chips', aid_list, 'img', config)
        >>> notch_tips = ibs.depc.get('Cropped_Chips', aid_list, ('notch', 'left', 'right'), config)
        >>> import plottool as pt
        >>> ut.ensure_pylab_qt4()
        >>> for notch, chip, aid in ut.InteractiveIter(zip(notch_tips, chip_list, aid_list)):
        >>>     pt.reset()
        >>>     pt.imshow(chip)
        >>>     print(ibs.depc.get('Cropped_Chips', [aid], 'img', config, read_extern=False)[0])
        >>>     kpts_ = np.array(notch)
        >>>     pt.draw_kpts2(kpts_, pts=True, ell=False, pts_size=20)
        >>>     pt.update()
        >>> ut.show_if_requested()
    """
    # crop first
    img_list = depc.get_native_property(const.CHIP_TABLE, cid_list, 'img')
    tips_list = depc.get_native_property('Notch_Tips', tipid_list,
                                         ('left', 'notch', 'right'))

    #imgpath_list = depc.get_native_property(const.CHIP_TABLE, cid_list, 'img',
    #                                        read_extern=False)

    cropped_chip_dpath = depc.controller.get_chipdir() + '_crop'
    ut.ensuredir(cropped_chip_dpath)
    #crop_path_list = [ut.augpath(path, '_crop' + config.get_hashid())
    #                  for path in imgpath_list]

    #for img, tips, path in zip(img_list, tips_list, crop_path_list):
    for img, tips in zip(img_list, tips_list):
        left, notch, right = tips
        bbox = (0, 0, img.shape[1], img.shape[0]
                )  # default to bbox being whole image
        chip_size = (img.shape[1], img.shape[0])
        if left[0] > right[0]:
            # HACK: Ugh, I don't like this
            # TODO: maybe move this to infer_kp?
            right, left = (left, right)
        if config['crop_enabled']:
            # figure out bbox (x, y, w, h) w/x, y on top left
            # assume left is on the left note: this may not be a good assumption
            # note: lol that's not a good assumption
            # what do when network predicts left on right and right on left?
            bbox = (
                left[0],  # leftmost x value
                0,  # top of the image
                (right[0] - left[0]),  # width
                img.shape[0],  # height
            )
        if config['crop_dim_size'] is not None:
            # we're only resizing in x, but after the crop
            # as a result we need to make sure we use the image dimensions apparent to get the chip size
            # we want to preserve the aspect ratio of the crop, not the whole image
            new_x = config['crop_dim_size']
            #ratio = bbox[2] / bbox[3]  # w/h
            #new_y = int(new_x / ratio)
            #chip_size = (new_x, new_y)
            try:
                #print("[cropped-chips] %s: bbox: %r, l/n/r %r" % (path, bbox,tips))
                chip_size = vt.get_scaled_size_with_width(
                    new_x, bbox[2], bbox[3])
            except OverflowError:
                print(
                    "[cropped chip] WARNING: Probably got a bad keypoint prediction: bbox: %r"
                    % (bbox, ))
                yield None
        M = vt.get_image_to_chip_transform(bbox, chip_size, 0)
        with ut.embed_on_exception_context:
            new_img = cv2.warpAffine(img, M[:-1, :], chip_size)

        notch_, left_, right_ = vt.transform_points_with_homography(
            M,
            np.array([notch, left, right]).T).T

        notch_ = bound_point(notch_, chip_size)
        left_ = bound_point(left_, chip_size)
        right_ = bound_point(right_, chip_size)
        #vt.imwrite(path, new_img)
        yield (new_img, bbox[2], bbox[3], M, notch_, left_, right_)
Exemple #3
0
def compute_labels_localizations(depc, loc_id_list, config=None):
    r"""
    Extracts the detections for a given input image

    Args:
        depc (ibeis.depends_cache.DependencyCache):
        loc_id_list (list):  list of localization rowids
        config (dict): (default = None)

    Yields:
        (float, str): tup

    CommandLine:
        ibeis compute_labels_localizations

    Example:
        >>> # DISABLE_DOCTEST
        >>> from ibeis.core_images import *  # NOQA
        >>> import ibeis
        >>> defaultdb = 'PZ_MTEST'
        >>> ibs = ibeis.opendb(defaultdb=defaultdb)
        >>> depc = ibs.depc_image
        >>> gid_list = ibs.get_valid_gids()[0:100]
        >>> depc.delete_property('labeler', gid_list)
        >>> results = depc.get_property('labeler', gid_list, None)
        >>> results = depc.get_property('labeler', gid_list, 'species')
        >>> print(results)
    """
    from ibeis.algo.detect.labeler.labeler import label_chip_list
    print('[ibs] Process Localization Labels')
    print('config = %r' % (config,))
    # Get controller
    ibs = depc.controller
    depc = ibs.depc_image

    gid_list_ = depc.get_ancestor_rowids('localizations', loc_id_list, 'images')
    assert len(gid_list_) == len(loc_id_list)

    # Grab the localizations
    bboxes_list = depc.get_native('localizations', loc_id_list, 'bboxes')
    thetas_list = depc.get_native('localizations', loc_id_list, 'thetas')
    gids_list   = [
        np.array([gid] * len(bbox_list))
        for gid, bbox_list in zip(gid_list_, bboxes_list)
    ]

    # Flatten all of these lists for efficiency
    bbox_list      = ut.flatten(bboxes_list)
    theta_list     = ut.flatten(thetas_list)
    gid_list       = ut.flatten(gids_list)
    bbox_size_list = ut.take_column(bbox_list, [2, 3])
    newsize_list   = [(128, 128)] * len(bbox_list)
    # Checks
    invalid_flags = [w == 0 or h == 0 for (w, h) in bbox_size_list]
    invalid_bboxes = ut.compress(bbox_list, invalid_flags)
    assert len(invalid_bboxes) == 0, 'invalid bboxes=%r' % (invalid_bboxes,)

    # Build transformation from image to chip
    M_list = [
        vt.get_image_to_chip_transform(bbox, new_size, theta)
        for bbox, theta, new_size in zip(bbox_list, theta_list, newsize_list)
    ]

    # Extract "chips"
    flags = cv2.INTER_LANCZOS4
    borderMode = cv2.BORDER_CONSTANT
    warpkw = dict(flags=flags, borderMode=borderMode)

    last_gid = None
    chip_list = []
    for gid, new_size, M in zip(gid_list, newsize_list, M_list):
        if gid != last_gid:
            img = ibs.get_images(gid)
            last_gid = gid
        chip = cv2.warpAffine(img, M[0:2], tuple(new_size), **warpkw)
        # cv2.imshow('', chip)
        # cv2.waitKey()
        assert chip.shape[0] == 128 and chip.shape[1] == 128
        chip_list.append(chip)

    # Get the results from the algorithm
    result_list = label_chip_list(chip_list)
    assert len(gid_list) == len(result_list)

    # Group the results
    group_dict = {}
    for gid, result in zip(gid_list, result_list):
        if gid not in group_dict:
            group_dict[gid] = []
        group_dict[gid].append(result)
    assert len(gid_list_) == len(group_dict.keys())

    # Return the results
    for gid in gid_list_:
        result_list = group_dict[gid]
        zipped_list = zip(*result_list)
        ret_tuple = (
            np.array(zipped_list[0]),
            np.array(zipped_list[1]),
            np.array(zipped_list[2]),
            np.array(zipped_list[3]),
            np.array(zipped_list[4]),
            list(zipped_list[5]),
        )
        # print(ret_tuple[:-1])
        # print('-------')
        yield ret_tuple