Ejemplo n.º 1
0
def render_image(img = None, spd = None, rfl = None, out = 'img_hyp', \
                 refspd = None, D = None, cieobs = _CIEOBS, \
                 cspace = 'xyz', cspace_tf = {}, CSF = None,\
                 interp_type = 'nd', k_neighbours = 4, show = True,
                 verbosity = 0, show_ref_img = True,\
                 stack_test_ref = 12,\
                 write_to_file = None):
    """
    Render image under specified light source spd.
    
    Args:
        :img: 
            | None or str or ndarray with float (max = 1) rgb image.
            | None load a default image.
        :spd: 
            | ndarray, optional
            | Light source spectrum for rendering
            | If None: use CIE illuminant F4
        :rfl: 
            | ndarray, optional
            | Reflectance set for color coordinate to rfl mapping.
        :out: 
            | 'img_hyp' or str, optional
            |  (other option: 'img_ren': rendered image under :spd:)
        :refspd:
            | None, optional
            | Reference spectrum for color coordinate to rfl mapping.
            | None defaults to D65 (srgb has a D65 white point)
        :D: 
            | None, optional
            | Degree of (von Kries) adaptation from spd to refspd. 
        :cieobs:
            | _CIEOBS, optional
            | CMF set for calculation of xyz from spectral data.
        :cspace:
            | 'xyz',  optional
            | Color space for color coordinate to rfl mapping.
            | Tip: Use linear space (e.g. 'xyz', 'Yuv',...) for (interp_type == 'nd'),
            |      and perceptually uniform space (e.g. 'ipt') for (interp_type == 'nearest')
        :cspace_tf:
            | {}, optional
            | Dict with parameters for xyz_to_cspace and cspace_to_xyz transform.
        :CSF:
            | None, optional
            | RGB camera response functions.
            | If None: input :xyz: contains raw rgb values. Override :cspace:
            | argument and perform estimation directly in raw rgb space!!!
        :interp_type:
            | 'nd', optional
            | Options:
            | - 'nd': perform n-dimensional linear interpolation using Delaunay triangulation.
            | - 'nearest': perform nearest neighbour interpolation. 
        :k_neighbours:
            | 4 or int, optional
            | Number of nearest neighbours for reflectance spectrum interpolation.
            | Neighbours are found using scipy.spatial.cKDTree
        :show: 
            | True, optional
            |  Show images.
        :verbosity:
            | 0, optional
            | If > 0: make a plot of the color coordinates of original and 
              rendered image pixels.
        :show_ref_img:
            | True, optional
            | True: shows rendered image under reference spd. False: shows
            |  original image.
        :write_to_file:
            | None, optional
            | None: do nothing, else: write to filename(+path) in :write_to_file:
        :stack_test_ref: 
            | 12, optional
            |   - 12: left (test), right (ref) format for show and imwrite
            |   - 21: top (test), bottom (ref)
            |   - 1: only show/write test
            |   - 2: only show/write ref
            |   - 0: show both, write test

    Returns:
        :returns: 
            | img_hyp, img_ren, 
            | ndarrays with float hyperspectral image and rendered images 
    """

    # Get image:
    #imread = lambda x: plt.imread(x) #matplotlib.pyplot

    if img is not None:
        if isinstance(img, str):
            img = plt.imread(img)  # use matplotlib.pyplot's imread
    else:
        img = plt.imread(_HYPSPCIM_DEFAULT_IMAGE)
    if isinstance(img, np.uint8):
        img = img / 255
    elif isinstance(img, np.uint16):
        img = img / (2**16 - 1)

    # Convert to 2D format:
    rgb = img.reshape(img.shape[0] * img.shape[1], 3)  # *1.0: make float
    rgb[rgb == 0] = _EPS  # avoid division by zero for pure blacks.

    # Get unique rgb values and positions:
    rgb_u, rgb_indices = np.unique(rgb, return_inverse=True, axis=0)

    # get rfl set:
    if rfl is None:  # use IESTM30['4880'] set
        rfl = _CRI_RFL['ies-tm30']['4880']['5nm']
    wlr = rfl[
        0]  # spectral reflectance set determines wavelength range for estimation (xyz_to_rfl())

    # get Ref spd:
    if refspd is None:
        refspd = _CIE_ILLUMINANTS['D65'].copy()
    refspd = cie_interp(
        refspd, wlr,
        kind='linear')  # force spd to same wavelength range as rfl

    # Convert rgb_u to xyz and lab-type values under assumed refspd:
    if CSF is None:
        xyz_wr = spd_to_xyz(refspd, cieobs=cieobs, relative=True)
        xyz_ur = colortf(rgb_u * 255, tf='srgb>xyz')
    else:
        xyz_ur = rgb_u  # for input in xyz_to_rfl (when CSF is not None: this functions assumes input is indeed rgb !!!)

    # Estimate rfl's for xyz_ur:
    rfl_est, xyzri = xyz_to_rfl(xyz_ur, rfl = rfl, out = 'rfl_est,xyz_est', \
                 refspd = refspd, D = D, cieobs = cieobs, \
                 cspace = cspace, cspace_tf = cspace_tf, CSF = CSF,\
                 interp_type = interp_type, k_neighbours = k_neighbours,
                 verbosity = verbosity)

    # Get default test spd if none supplied:
    if spd is None:
        spd = _CIE_ILLUMINANTS['F4']

    if CSF is None:
        # calculate xyz values under test spd:
        xyzti, xyztw = spd_to_xyz(spd, rfl=rfl_est, cieobs=cieobs, out=2)

        # Chromatic adaptation from test spd to refspd:
        if D is not None:
            xyzti = cat.apply(xyzti, xyzw1=xyztw, xyzw2=xyz_wr, D=D)

        # Convert xyzti under test spd to srgb:
        rgbti = colortf(xyzti, tf='srgb') / 255
    else:
        # Calculate rgb coordinates from camera sensitivity functions under spd:
        rgbti = rfl_to_rgb(rfl_est, spd=spd, CSF=CSF, wl=None)

        # Chromatic adaptation from test spd to refspd:
        if D is not None:
            white = np.ones_like(spd)
            white[0] = spd[0]
            rgbwr = rfl_to_rgb(white, spd=refspd, CSF=CSF, wl=None)
            rgbwt = rfl_to_rgb(white, spd=spd, CSF=CSF, wl=None)
            rgbti = cat.apply_vonkries2(rgbti,
                                        rgbwt,
                                        rgbwr,
                                        xyzw0=np.array([[1.0, 1.0, 1.0]]),
                                        in_='rgb',
                                        out_='rgb',
                                        D=1)

    # Reconstruct original locations for rendered image rgbs:
    img_ren = rgbti[rgb_indices]
    img_ren.shape = img.shape  # reshape back to 3D size of original
    img_ren = img_ren

    # For output:
    if show_ref_img == True:
        rgb_ref = colortf(xyzri, tf='srgb') / 255 if (
            CSF is None
        ) else xyzri  # if CSF not None: xyzri contains rgbri !!!
        img_ref = rgb_ref[rgb_indices]
        img_ref.shape = img.shape  # reshape back to 3D size of original
        img_str = 'Rendered (under ref. spd)'
        img = img_ref
    else:
        img_str = 'Original'
        img = img

    if (stack_test_ref > 0) | show == True:
        if stack_test_ref == 21:
            img_original_rendered = np.vstack(
                (img_ren, np.ones((4, img.shape[1], 3)), img))
            img_original_rendered_str = 'Rendered (under test spd)\n ' + img_str
        elif stack_test_ref == 12:
            img_original_rendered = np.hstack(
                (img_ren, np.ones((img.shape[0], 4, 3)), img))
            img_original_rendered_str = 'Rendered (under test spd) | ' + img_str
        elif stack_test_ref == 1:
            img_original_rendered = img_ren
            img_original_rendered_str = 'Rendered (under test spd)'
        elif stack_test_ref == 2:
            img_original_rendered = img
            img_original_rendered_str = img_str
        elif stack_test_ref == 0:
            img_original_rendered = img_ren
            img_original_rendered_str = 'Rendered (under test spd)'

    if write_to_file is not None:
        # Convert from RGB to BGR formatand write:
        #print('Writing rendering results to image file: {}'.format(write_to_file))
        with warnings.catch_warnings():
            warnings.simplefilter("ignore")
            imsave(write_to_file, img_original_rendered)

    if show == True:
        # show images using pyplot.show():
        plt.figure()

        plt.imshow(img_original_rendered)
        plt.title(img_original_rendered_str)
        plt.gca().get_xaxis().set_ticklabels([])
        plt.gca().get_yaxis().set_ticklabels([])

        if stack_test_ref == 0:
            plt.figure()
            plt.imshow(img)
            plt.title(img_str)
            plt.axis('off')

    if 'img_hyp' in out.split(','):
        # Create hyper_spectral image:
        rfl_image_2D = rfl_est[
            rgb_indices +
            1, :]  # create array with all rfls required for each pixel
        img_hyp = rfl_image_2D.reshape(img.shape[0], img.shape[1],
                                       rfl_image_2D.shape[1])

    # Setup output:
    if out == 'img_hyp':
        return img_hyp
    elif out == 'img_ren':
        return img_ren
    else:
        return eval(out)
Ejemplo n.º 2
0
    Ill2M = Ill2M[:(n + 1), :, :]

    # Module output plot:
    import matplotlib.pyplot as plt
    _cam_o = lambda xyz, xyzw, forward: lx.xyz_to_jabz(xyz)
    xyz, xyzw = lx.spd_to_xyz(Ill1,
                              cieobs=cieobs,
                              relative=True,
                              rfl=rflM,
                              out=2)
    jabch = _cam(xyz, xyzw=xyzw, forward=True, outin='J,aM,bM')
    out_ = _cam_o(xyz, xyzw=xyzw, forward=True)
    plt.figure()
    plt.plot(jabch[..., 1], jabch[..., 2], 'c.')
    plt.plot(out_[..., 1], out_[..., 2], 'r.')
    plt.axis('equal')

    out = 'J,aM,bM,M,C,s,h'.split(',')
    # Single data for sample and illuminant:
    # test input to _simple_cam():
    print('\n\n1: xyz in:')
    out_1 = _cam(xyz1, xyzw=xyzw1, forward=True, outin=out)
    xyz_1 = _cam(out_1[..., :3], xyzw=xyzw1, forward=False, outin=out[:3])
    print((xyz1 - xyz_1).sum())

    # Multiple data for sample and illuminants:
    print('\n\n2: xyz in:')
    out_2 = _cam(xyz2, xyzw=xyzw2, forward=True, outin=out)
    xyz_2 = _cam(out_2[..., :3], xyzw=xyzw2, forward=False, outin=out[:3])
    print((xyz2 - xyz_2).sum())