Beispiel #1
0
 def convert_colorspace(inputs, colorspace, inplace=False):
     if colorspace.lower() == inputs.colorspace.lower():
         if not inplace:
             return inputs.im
         return
     im_out = np.empty_like(inputs.im)
     dst = np.ascontiguousarray(np.empty_like(inputs.im[0]))
     for ix, im in enumerate(inputs.im):
         util.convert_colorspace(im, src_space=inputs.colorspace,
                                 dst_space=colorspace, dst=dst)
         im_out[ix] = dst
     if inplace:
         inputs.im = im_out
         inputs.colorspace = colorspace
     else:
         return im_out
Beispiel #2
0
def grab_test_image(key='astro', space='rgb'):
    from netharn.util import convert_colorspace
    import cv2
    if key == 'astro':
        url = 'https://i.imgur.com/KXhKM72.png'
    elif key == 'carl':
        url = 'https://i.imgur.com/oHGsmvF.png'
    else:
        raise KeyError(key)
    fpath = ub.grabdata(url)
    bgr = cv2.imread(fpath)
    image = convert_colorspace(bgr, space, src_space='bgr')
    return image
Beispiel #3
0
def grab_test_image(key='astro', space='rgb'):
    """
    Args:
        key (str): which test image to grab. Valid choices are:
            astro - an astronaught
            carl - Carl Sagan
            paraview - ParaView logo
            stars - picture of stars in the sky

        space (str): which colorspace to return in (defaults to RGB)

    Example:
        >>> for key in grab_test_image.keys():
        ...     grab_test_image(key)
    """
    from netharn.util import convert_colorspace
    import cv2
    fpath = grab_test_image_fpath(key)
    bgr = cv2.imread(fpath)
    image = convert_colorspace(bgr, space, src_space='bgr')
    return image
Beispiel #4
0
    def load_inputs(dset, index):
        """
        Ignore:
            >>> inputs, task = cifar_inputs(train=False)
            >>> workdir = ub.ensuredir(ub.truepath('~/data/work/cifar'))
            >>> dset = CIFAR_Wrapper(inputs, task, workdir, 'LAB')
            >>> dset._make_normalizer('independent')
            >>> index = 0
            >>> im, gt = dset.load_inputs(index)

        Example:
            >>> inputs, task = cifar_inputs(train=False)
            >>> workdir = ub.ensuredir(ub.truepath('~/data/work/cifar'))
            >>> dset = CIFAR_Wrapper(inputs, task, workdir, 'RGB')
            >>> index = 0
            >>> im, gt = dset.load_inputs(index)
            >>> from netharn.util import mplutil
            >>> mplutil.qtensure()
            >>> dset = CIFAR_Wrapper(inputs, task, workdir, 'RGB')
            >>> dset.augment = True
            >>> im, gt = dset.load_inputs(index)
            >>> mplutil.imshow(im, colorspace='rgb')

            >>> dset = CIFAR_Wrapper(inputs, task, workdir, 'LAB')
            >>> dset.augment = True
            >>> im, gt = dset.load_inputs(index)
            >>> mplutil.imshow(im, colorspace='LAB')
        """
        assert dset.inputs.colorspace.lower() == 'rgb', (
            'we must be in rgb for augmentation')
        im = dset.inputs.im[index]

        if dset.inputs.gt is not None:
            gt = dset.inputs.gt[index]
        else:
            gt = None

        if dset.augment:
            # Image augmentation must be done in RGB
            # Augment intensity independently
            # im = dset.im_augment(im)
            # Augment geometry consistently

            # params = dset.rand_aff.random_params()
            # im = dset.rand_aff.warp(im, params, interp='cubic', backend='cv2')

            im = util.convert_colorspace(im,
                                         src_space=dset.inputs.colorspace,
                                         dst_space='rgb')
            # Do augmentation in uint8 RGB
            im = (im * 255).astype(np.uint8)
            im = dset.augmenter.augment_image(im)
            im = (im / 255).astype(np.float32)
            im = util.convert_colorspace(im,
                                         src_space='rgb',
                                         dst_space=dset.output_colorspace)
        else:
            im = util.convert_colorspace(im,
                                         src_space=dset.inputs.colorspace,
                                         dst_space=dset.output_colorspace)
        # Do centering of inputs
        if dset.center_inputs:
            im = dset.center_inputs(im)
        return im, gt