示例#1
0
def _plot_DEs_vs_digital_values(DEslab, DEsl, DEsab, rgbcal, avg = lambda x: ((x**2).mean()**0.5), nbit = 8, verbosity = 1):
    """ Make a plot of the lab, l and ab color differences for the different calibration stimulus types. """
    if verbosity > 0:
        p_pure = [(rgbcal[:,1]==0) & (rgbcal[:,2]==0), 
              (rgbcal[:,0]==0) & (rgbcal[:,2]==0), 
              (rgbcal[:,0]==0) & (rgbcal[:,1]==0)] 
        p_grays = (rgbcal[:,0] == rgbcal[:,1]) & (rgbcal[:,0] == rgbcal[:,2])
        p_whites = (rgbcal[:,0] == (2**nbit-1)) & (rgbcal[:,1] == (2**nbit-1)) & (rgbcal[:,2] == (2**nbit-1))
        p_cyans = (rgbcal[:,0]==0) & (rgbcal[:,1]!=0) & (rgbcal[:,2]!=0)
        p_yellows = (rgbcal[:,0]!=0) & (rgbcal[:,1]!=0) & (rgbcal[:,2]==0)
        p_magentas = (rgbcal[:,0]!=0) & (rgbcal[:,1]==0) & (rgbcal[:,2]==0)
        fig,(ax0,ax1,ax2) = plt.subplots(nrows=1,ncols=3, figsize = (15,4))
        rgb_colors='rgb'
        rgb_labels=['red','green','blue']
        marker ='o'
        markersize = 10
        if p_whites.any():
            ax0.plot(rgbcal[p_whites,0], DEslab[p_whites],'ks',markersize = markersize, label='white')
            ax1.plot(rgbcal[p_whites,0], DEsl[p_whites],'ks',markersize = markersize,label='white')
            ax2.plot(rgbcal[p_whites,0], DEsab[p_whites],'ks',markersize = markersize,label='white')
        if p_grays.any():
            ax0.plot(rgbcal[p_grays,0], DEslab[p_grays], color = 'gray', marker = marker,linestyle='none',label='gray')
            ax1.plot(rgbcal[p_grays,0], DEsl[p_grays], color = 'gray', marker = marker,linestyle='none',label='gray')
            ax2.plot(rgbcal[p_grays,0], DEsab[p_grays], color = 'gray', marker = marker,linestyle='none',label='gray')
        for i in range(3):
            if p_pure[i].any():
                ax0.plot(rgbcal[p_pure[i],i], DEslab[p_pure[i]],rgb_colors[i]+marker,label=rgb_labels[i])
                ax1.plot(rgbcal[p_pure[i],i], DEsl[p_pure[i]],rgb_colors[i]+marker,label=rgb_labels[i])
                ax2.plot(rgbcal[p_pure[i],i], DEsab[p_pure[i]],rgb_colors[i]+marker,label=rgb_labels[i])
        if p_cyans.any():
            ax0.plot(rgbcal[p_cyans,1], DEslab[p_cyans],'c'+marker,label='cyan')
            ax1.plot(rgbcal[p_cyans,1], DEsl[p_cyans],'c'+marker,label='cyan')
            ax2.plot(rgbcal[p_cyans,1], DEsab[p_cyans],'c'+marker,label='cyan')
        if p_yellows.any():
            ax0.plot(rgbcal[p_yellows,0], DEslab[p_yellows],'y'+marker,label='yellow')
            ax1.plot(rgbcal[p_yellows,0], DEsl[p_yellows],'y'+marker,label='yellow')
            ax2.plot(rgbcal[p_yellows,0], DEsab[p_yellows],'y'+marker,label='yellow')
        if p_magentas.any():
            ax0.plot(rgbcal[p_magentas,0], DEslab[p_magentas],'m'+marker,label='magenta')
            ax1.plot(rgbcal[p_magentas,0], DEsl[p_magentas],'m'+marker,label='magenta')
            ax2.plot(rgbcal[p_magentas,0], DEsab[p_magentas],'m'+marker,label='magenta')
        ax0.plot(np.array([0,(2**nbit-1)*1.05]),np.hstack((avg(DEslab),avg(DEslab))),color = 'r',linewidth=2,linestyle='--')
        ax0.set_xlabel('digital values')
        ax0.set_ylabel('Color difference DElab')
        ax0.axis([0,(2**nbit-1)*1.05,0,max(DEslab)*1.1])
        ax0.set_title('DElab')
        ax1.plot(np.array([0,(2**nbit-1)*1.05]),np.hstack((avg(DEsl),avg(DEsl))),color = 'r',linewidth=2,linestyle='--')
        ax1.set_xlabel('digital values')
        ax1.set_ylabel('Color difference DEl')
        ax1.axis([0,(2**nbit-1)*1.05,0,max(DEslab)*1.1])
        ax1.set_title('DEl')
        ax2.plot(np.array([0,(2**nbit-1)*1.05]),np.hstack((avg(DEsab),avg(DEsab))),color = 'r',linewidth=2,linestyle='--')
        ax2.set_xlabel('digital values')
        ax2.set_ylabel('Color difference DEab')
        ax2.set_title('DEab')
        ax2.axis([0,(2**nbit-1)*1.05,0,max(DEslab)*1.1])
        ax2.legend(loc='upper left')
示例#2
0
def _rgb_delinearizer(rgblin, tr, tr_type = 'lut'):
    """ De-linearize linear rgblin using tr tone response function or lut """
    if tr_type == 'gog':
        return np.array([TRi(rgblin[:,i],*tr[i]) for i in range(3)]).T
    elif tr_type == 'lut':
        maxv = (tr.shape[0] - 1)
        bins = np.vstack((tr-np.diff(tr,axis=0,prepend=0)/2,tr[-1,:]+0.01)) # create bins
        idxs = np.array([(np.digitize(rgblin[:,i],bins[:,i]) - 1)  for i in range(3)]).T # find bin indices
        idxs[idxs>maxv] = maxv 
        rgb = np.arange(tr.shape[0])[idxs]
        return rgb
示例#3
0
def _cri_ref_i(cct,
               wl3=_WL,
               ref_type='iestm30',
               mix_range=[4000, 5000],
               cieobs='1931_2',
               force_daylight_below4000K=False,
               n=None,
               daylight_locus=None):
    """
    Calculates a reference illuminant spectrum based on cct 
    for color rendering index calculations.
    """
    if mix_range is None:
        mix_range = _CRI_REF_TYPES[ref_type]
    if (cct < mix_range[0]) | (ref_type == 'BB'):
        return blackbody(cct, wl3, n=n)
    elif (cct > mix_range[0]) | (ref_type == 'DL'):
        return daylightphase(
            cct,
            wl3,
            force_daylight_below4000K=force_daylight_below4000K,
            cieobs=cieobs,
            daylight_locus=daylight_locus)
    else:
        SrBB = blackbody(cct, wl3, n=n)
        SrDL = daylightphase(
            cct,
            wl3,
            verbosity=None,
            force_daylight_below4000K=force_daylight_below4000K,
            cieobs=cieobs,
            daylight_locus=daylight_locus)
        cmf = _CMF[cieobs]['bar'] if isinstance(cieobs, str) else cieobs
        wl = SrBB[0]
        ld = getwld(wl)

        SrBB = 100.0 * SrBB[1] / np.array(np.sum(SrBB[1] * cmf[2] * ld))
        SrDL = 100.0 * SrDL[1] / np.array(np.sum(SrDL[1] * cmf[2] * ld))
        Tb, Te = float(mix_range[0]), float(mix_range[1])
        cBB, cDL = (Te - cct) / (Te - Tb), (cct - Tb) / (Te - Tb)
        if cBB < 0.0:
            cBB = 0.0
        elif cBB > 1:
            cBB = 1.0
        if cDL < 0.0:
            cDL = 0.0
        elif cDL > 1:
            cDL = 1.0

        Sr = SrBB * cBB + SrDL * cDL
        Sr[Sr == float('NaN')] = 0.0
        Sr = np.vstack((wl, (Sr / Sr[_POS_WL560])))

        return Sr
示例#4
0
def plot_rgb_color_patches(rgb,
                           patch_shape=(100, 100),
                           patch_layout=None,
                           ax=None,
                           show=True):
    """
    Create (and plot) an image with patches with specified rgb values.
    
    Args:
        :rgb:
            | ndarray with rgb values for each of the patches
        :patch_shape:
            | (100,100), optional
            | shape of each of the patches in the image
        :patch_layout:
            | None, optional
            | If None: layout is calculated automatically to give a 'good' aspect ratio
        :ax:
            | None, optional
            | Axes to plot the image in. If None: a new axes is created.
        :show:
            | True, optional
            | If True: plot image in axes and return axes handle; else: return ndarray with image.
            
    Return:
        :ax: or :imagae: 
            | Axes is returned if show == True, else: ndarray with rgb image is returned.
    """
    if ax is None:
        fig, ax = plt.subplots(1, 1)

    if patch_layout is None:
        patch_layout = get_subplot_layout(rgb.shape[0])

    image = np.zeros(
        np.hstack((np.array(patch_shape) * np.array(patch_layout), 3)))
    for i in range(rgb.shape[0]):
        r, c = np.unravel_index(i, patch_layout)
        R = int(r * patch_shape[0])
        C = int(c * patch_shape[1])
        image[R:R + patch_shape[0],
              C:C + patch_shape[1], :] = np.ones(np.hstack(
                  (patch_shape, 3))) * rgb[i, None, :]

    if show == False:
        return image
    else:
        ax.imshow(image.astype('uint8'))
        ax.axis('off')
        return ax
示例#5
0
def spd_to_thornton_cpi(spd):
    """
    Calculate Thornton's Color Preference Index (CPI).
    
    Args:
        :spd:
            | nd array with spectral power distribution(s) of the test light source(s).
            
    Returns:
        :cpi:
            | ndarray with CPI values.
        
    Reference:
        1. `Thornton, W. A. (1974). A Validation of the Color-Preference Index.
        Journal of the Illuminating Engineering Society, 4(1), 48–52. 
        <https://doi.org/10.1080/00994480.1974.10732288>`_
    """

    # sample 1976 u'v' coordinates for test and reference
    # using CIE Ra calculation engine
    # (only cspace, sampleset (with added white), and catf have changed;
    # catf = None so no CAT is applied as Thornton CPI,
    # like Judd's Flattery index, uses a Judd-type translational CAT)
    Yuv_t, Yuv_r = spd_to_jab_t_r(spd,
                                  cri_type='ciera',
                                  cspace={
                                      'type': 'Yuv',
                                      'xyzw': None
                                  },
                                  catf=None,
                                  sampleset=_RFL_CPIw)

    # Convert to 1960 UCS:
    Yuv_t[..., 2] *= (2 / 3)
    Yuv_r[..., 2] *= (2 / 3)

    # Perform Judd-type translational CAT with white point stored in last row:
    Yuv_t[..., 1:] -= Yuv_t[..., 1:][-1]
    Yuv_r[..., 1:] -= Yuv_r[..., 1:][-1]

    # Remove last row (white point):
    Yuv_t = Yuv_t[:-1, ...]
    Yuv_r = Yuv_r[:-1, ...]

    # Define preferred chromaticity shifts for 8 CIE CRI samples:
    # (*5, because Thorton uses full preferred shifts unlike Judd's Flattery Index)
    uv_shifts = np.array([[0.0020, 0.0008], [0.0000, 0.0000], [
        -0.0020, 0.0008
    ], [-0.0020, 0.0010], [-0.0020, -0.0004], [-0.0012, -0.0020],
                          [0.0008, -0.0020], [0.0020, -0.0010]]) * 5

    # Calculate chromaticity difference between test and shifted ref coordinates:
    DE = 800 * ((
        (Yuv_t[..., 1:] -
         (Yuv_r[..., 1:] + uv_shifts[:, None, :]))**2).sum(axis=-1))**0.5

    # Calculate CPI:
    CPI = 156 - 7.317 * DE.mean(
        axis=0)  # in Thornton 1974 we find 7.18, but then CPI(D65)!=100
    return CPI
示例#6
0
def _get_gij_fmc_2(xyz, cspace = 'Yxy'):
    """
    Get gij matrices describing the discrimination ellipses for xyz using FMC-1.
    
    Reference:
        Chickering, K.D. (1971), FMC Color-Difference Formulas: Clarification Concerning Usage, 61(1), p.118-122
    """
    # Convert xyz to pqs coordinates:
    pqs = _xyz_to_pqs(xyz)
    
    # get FMC-2 Cij matrix (for X,Y,Z):
    D = (pqs[...,0]**2 + pqs[...,1]**2)**0.5
    a = (17.3e-6*D**2/(1 + 2.73*((pqs[...,0]*pqs[...,1])**2)/(pqs[...,0]**4 + pqs[...,1]**4)))**0.5
    b = (3.098e-4*(pqs[...,2]**2 + 0.2015*xyz[...,1]**2))**0.5
    K1 = 0.55669 + xyz[...,1]*(0.049434 + xyz[...,1]*(-0.82575e-3 + xyz[...,1]*(0.79172e-5 - 0.30087e-7*xyz[...,1])))
    K2 = 0.17548 + xyz[...,1]*(0.027556 + xyz[...,1]*(-0.57262e-3 + xyz[...,1]*(0.63893e-5 - 0.26731e-7*xyz[...,1]))) 
    e1 = K1*pqs[...,2]/(b*D**2)
    e2 = K1/b
    e3 = 0.279*K2/(a*D)
    e4 = K1/(a*D)
    C11 = (e1**2 + e3**2)*pqs[...,0]**2 + e4**2*pqs[...,1]**2
    C12 = (e1**2 + e3**2 - e4**2)*pqs[...,0]*pqs[...,1]
    C22 = (e1**2 + e3**2)*pqs[...,1]**2 + e4**2*pqs[...,0]**2
    C13 = -e1*e2*pqs[...,0]
    C23 = -e1*e2*pqs[...,1]
    C33 = e2**2
    C = np.array([[C11, C12, C13],[C12, C22, C23], [C13, C23, C33]])
    if cspace == 'Yxy':
        return _cij_to_gij(xyz,C)
    else:
        return C
示例#7
0
def _get_gij_fmc_1(xyz, cspace = 'Yxy'):
    """
    Get gij matrices describing the discrimination ellipses for xyz using FMC-1.
    
    Reference:
        Chickering, K.D. (1967), Optimization of the MacAdam-Modified 1965 Friele Color-Difference Formula, 57(4), p.537-541
    """
    # Convert xyz to pqs coordinates:
    pqs = _xyz_to_pqs(xyz)
    
    # get FMC-1 Cij matrix (for X,Y,Z):
    D2 = (pqs[...,0]**2 + pqs[...,1]**2)
    b2 = 3.098e-4*(pqs[...,2]**2 + 0.2015*xyz[...,1]**2)
    A2 = 57780*(1 + 2.73*((pqs[...,0]*pqs[...,1])**2)/(pqs[...,0]**4 + pqs[...,1]**4))
    C11 = (A2*(0.0778*pqs[...,0]**2 + pqs[...,1]**2) + ((pqs[...,0]*pqs[...,2])**2)/b2)/D2**2
    C12 = (-0.9222*A2*pqs[...,0]*pqs[...,1] + (pqs[...,0]*pqs[...,1]*pqs[...,2]**2)/b2)/D2**2
    C22 = (A2*(pqs[...,0]**2 + 0.0778*pqs[...,1]**2) + ((pqs[...,1]*pqs[...,2])**2)/b2)/D2**2
    C13 = -pqs[...,0]*pqs[...,2]/(b2*D2) # or -PQ/b2*D2 ??
    C33 = 1/b2
    C23 = pqs[...,1]*C13/pqs[...,0]
    C = np.array([[C11, C12, C13],[C12, C22, C23], [C13, C23, C33]])
    if cspace == 'Yxy':
        return _cij_to_gij(xyz,C)
    else:
        return C
示例#8
0
def line_intersect(a1, a2, b1, b2):
    """
    Line intersections of series of two line segments a and b. 
        
    Args:
        :a1: 
            | ndarray (.shape  = (N,2)) specifying end-point 1 of line a
        :a2: 
            | ndarray (.shape  = (N,2)) specifying end-point 2 of line a
        :b1: 
            | ndarray (.shape  = (N,2)) specifying end-point 1 of line b
        :b2: 
            | ndarray (.shape  = (N,2)) specifying end-point 2 of line b
    
    Note: 
        N is the number of line segments a and b.
    
    Returns:
        :returns: 
            | ndarray with line-intersections (.shape = (N,2))
    
    References:
        1. https://stackoverflow.com/questions/3252194/numpy-and-line-intersections
    """
    T = np.array([[0.0, -1.0], [1.0, 0.0]])
    da = np.atleast_2d(a2 - a1)
    db = np.atleast_2d(b2 - b1)
    dp = np.atleast_2d(a1 - b1)
    dap = np.dot(da, T)
    denom = np.sum(dap * db, axis=1)
    num = np.sum(dap * dp, axis=1)
    return np.atleast_2d(num / denom).T * db + b1
示例#9
0
def smet2017_D(xyzw, Dmax=None):
    """
    Calculate the degree of adaptation based on chromaticity following 
    Smet et al. (2017) 
    
    Args:
        :xyzw: 
            | ndarray with white point data (CIE 1964 10° XYZs!!)
        :Dmax:
            | None or float, optional
            | Defaults to 0.6539 (max D obtained under experimental conditions, 
            | but probably too low due to dark surround leading to incomplete 
            | chromatic adaptation even for neutral illuminants 
            | resulting in background luminance (fov~50°) of 760 cd/m²))
            
    Returns:
        :D: 
            | ndarray with degrees of adaptation
    
    References: 
        1. `Smet, K.A.G.*, Zhai, Q., Luo, M.R., Hanselaer, P., (2017), 
        Study of chromatic adaptation using memory color matches, 
        Part II: colored illuminants, 
        Opt. Express, 25(7), pp. 8350-8365.
        <https://www.osapublishing.org/oe/abstract.cfm?uri=oe-25-7-8350&origin=search)>`_

    """

    # Convert xyzw to log-compressed Macleod_Boyton coordinates:
    Vl, rl, bl = asplit(
        np.log(xyz_to_Vrb_mb(xyzw, M=_MCATS['hpe']))
    )  # force use of HPE matrix (which was the one used when deriving the model parameters!!)

    # apply Dmodel (technically only for cieobs = '1964_10')
    pD = (1.0e7) * np.array([
        0.021081326530436, 4.751255762876845, -0.000000071025181,
        -0.000000063627042, -0.146952821492957, 3.117390441655821
    ])  #D model parameters for gaussian model in log(MB)-space (july 2016)
    if Dmax is None:
        Dmax = 0.6539  # max D obtained under experimental conditions (probably too low due to dark surround leading to incomplete chromatic adaptation even for neutral illuminants resulting in background luminance (fov~50°) of 760 cd/m²)
    return Dmax * math.bvgpdf(x=rl,
                              y=bl,
                              mu=pD[2:4],
                              sigmainv=np.linalg.inv(
                                  np.array([[pD[0], pD[4]], [pD[4], pD[1]]
                                            ])))**pD[5]
示例#10
0
文件: helpers.py 项目: simongr2/luxpy
 def normalize_to_Lw(Ill, Lw, cieobs, rflM):
     xyzw = lx.spd_to_xyz(Ill, cieobs=cieobs, relative=False)
     for i in range(Ill.shape[0] - 1):
         Ill[i + 1] = Lw * Ill[i + 1] / xyzw[i, 1]
     IllM = []
     for i in range(Ill.shape[0] - 1):
         IllM.append(np.vstack((Ill1[0], Ill[i + 1] * rflM[1:, :])))
     IllM = np.transpose(np.array(IllM), (1, 0, 2))
     return Ill, IllM
示例#11
0
def plotcircle(center=np.array([[0., 0.]]),
               radii=np.arange(0, 60, 10),
               angles=np.arange(0, 350, 10),
               color='k',
               linestyle='--',
               out=None,
               axh=None,
               **kwargs):
    """
    Plot one or more concentric circles.
    
    Args:
        :center: 
            | np.array([[0.,0.]]) or ndarray with center coordinates, optional
        :radii:
            | np.arange(0,60,10) or ndarray with radii of circle(s), optional
        :angles:
            | np.arange(0,350,10) or ndarray with angles (°), optional
        :color:
            | 'k', optional
            | Color for plotting.
        :linestyle:
            | '--', optional
            | Linestyle of circles.
        :out: 
            | None, optional
            | If None: plot circles, return (x,y) otherwise.
    """
    xs = np.array([0])
    ys = xs.copy()
    if ((out != 'x,y') & (axh is None)):
        fig, axh = plt.subplots(rows=1, ncols=1)
    for ri in radii:
        x = center[:, 0] + ri * np.cos(angles * np.pi / 180)
        y = center[:, 1] + ri * np.sin(angles * np.pi / 180)
        xs = np.hstack((xs, x))
        ys = np.hstack((ys, y))
        if (out != 'x,y'):
            axh.plot(x, y, color=color, linestyle=linestyle, **kwargs)
    if out == 'x,y':
        return xs, ys
    elif out == 'axh':
        return axh
示例#12
0
def get_discrimination_ellipse(Yxy = np.array([[100,1/3,1/3]]), etype = 'fmc2', nsteps = 10, k_neighbours = 3, average_cik = True, Y = None):
    """
    Get discrimination ellipse(s) in v-format (R,r, xc, yc, theta) for Yxy using an interpolation of the MacAdam ellipses or using FMC-1 or FMC-2.
    
    Args:
        :Yxy:
            | 2D ndarray with [Y,]x,y coordinate centers. 
            | If Yxy.shape[-1]==2: Y is added using the value from the Y-input argument.
        :etype:
            | 'fmc2', optional
            | Type color discrimination ellipse estimation to use.
            | options: 'macadam', 'fmc1', 'fmc2' 
            |  - 'macadam': interpolate covariance matrices of closest MacAdam ellipses (see: get_macadam_ellipse?).
            |  - 'fmc1': use FMC-1 from ref 2 (see get_fmc_discrimination_ellipse?).
            |  - 'fmc2': use FMC-1 from ref 3 (see get_fmc_discrimination_ellipse?).
        :nsteps:
            | 10, optional
            | Set multiplication factor for ellipses 
            | (nsteps=1 corresponds to approximately 1 MacAdam step, 
            | for FMC-2, Y also has to be 10.69, see note below).
        :k_neighbours:
            | 3, optional
            | Only for option 'macadam'.
            | Number of nearest ellipses to use to calculate ellipse at xy 
        :average_cik:
            | True, optional
            | Only for option 'macadam'.
            | If True: take distance weighted average of inverse 
            |   'covariance ellipse' elements cik. 
            | If False: average major & minor axis lengths and 
            |   ellipse orientation angles directly.
        :Y:
            | None, optional
            | Only for option 'fmc2'(see note below).
            | If not None: Y = 10.69 and overrides values in Yxy. 
    
    Note:
        1. FMC-2 is almost identical to FMC-1 is Y = 10.69!; see [3]
    
    References:
       1. MacAdam DL. Visual Sensitivities to Color Differences in Daylight*. J Opt Soc Am. 1942;32(5):247-274.
       2. Chickering, K.D. (1967), Optimization of the MacAdam-Modified 1965 Friele Color-Difference Formula, 57(4):537-541
       3. Chickering, K.D. (1971), FMC Color-Difference Formulas: Clarification Concerning Usage, 61(1):118-122
    """
    if Yxy.shape[-1] == 2:
        Yxy = np.hstack((100*np.ones((Yxy.shape[0],1)),Yxy))
    if Y is not None:
        Yxy[...,0] = Y
    if etype == 'macadam':
        return get_macadam_ellipse(xy = Yxy[...,1:], k_neighbours = k_neighbours, nsteps = nsteps, average_cik = average_cik)
    else:
        return get_fmc_discrimination_ellipse(Yxy = Yxy, etype = etype, nsteps = nsteps, Y = Y)
示例#13
0
def _cij_to_gij(xyz,C):
    """ Convert from matrix elements describing the discrimination ellipses from Cij (XYZ) to gij (Yxy)"""
    SIG = xyz[...,0] + xyz[...,1] + xyz[...,2]
    M1 = np.array([SIG, -SIG*xyz[...,0]/xyz[...,1], xyz[...,0]/xyz[...,1]])
    M2 = np.array([np.zeros_like(SIG), np.zeros_like(SIG), np.ones_like(SIG)])
    M3 = np.array([-SIG, -SIG*(xyz[...,1] + xyz[...,2])/xyz[...,1], xyz[...,2]/xyz[...,1]])
    M = np.array((M1,M2,M3))
    
    M = _transpose_02(M) # move stimulus dimension to axis = 0
    
    C = _transpose_02(C) # move stimulus dimension to axis = 0
    
    # convert Cij (XYZ) to gij' (xyY):
    AM = np.einsum('ij,kjl->kil', _M_XYZ_TO_PQS, M)
    CAM = np.einsum('kij,kjl->kil', C, AM) 
#    ATCAM = np.einsum('ij,kjl->kil', _M_XYZ_TO_PQS.T, CAM)
#    gij = np.einsum('kij,kjl->kil', np.transpose(M,(0,2,1)), ATCAM) # gij = M.T*A.T*C**A*M = (AM).T*C*A*M
    gij = np.einsum('kij,kjl->kil', np.transpose(AM,(0,2,1)), CAM) # gij = M.T*A.T*C**A*M = (AM).T*C*A*M

    # convert gij' (xyY) to gij (Yxy):
    gij = np.roll(np.roll(gij,1,axis=2),1,axis=1)
    
    return gij
示例#14
0
def xyz_to_srgb(xyz, gamma=2.4, **kwargs):
    """
    Calculates IEC:61966 sRGB values from xyz.

    Args:
        :xyz: 
            | ndarray with relative tristimulus values.
        :gamma: 
            | 2.4, optional
            | compression in sRGB

    Returns:
        :rgb: 
            | ndarray with R,G,B values (uint8).
    """

    xyz = np2d(xyz)

    # define 3x3 matrix
    M = np.array([[3.2404542, -1.5371385, -0.4985314],
                  [-0.9692660, 1.8760108, 0.0415560],
                  [0.0556434, -0.2040259, 1.0572252]])

    if len(xyz.shape) == 3:
        srgb = np.einsum('ij,klj->kli', M, xyz / 100)
    else:
        srgb = np.einsum('ij,lj->li', M, xyz / 100)

    # perform clipping:
    srgb[np.where(srgb > 1)] = 1
    srgb[np.where(srgb < 0)] = 0

    # test for the dark colours in the non-linear part of the function:
    dark = np.where(srgb <= 0.0031308)

    # apply gamma function:
    g = 1 / gamma

    # and scale to range 0-255:
    rgb = srgb.copy()
    rgb = (1.055 * rgb**g - 0.055) * 255

    # non-linear bit for dark colours
    rgb[dark] = (srgb[dark].copy() * 12.92) * 255

    # clip to range:
    rgb[rgb > 255] = 255
    rgb[rgb < 0] = 0

    return rgb
示例#15
0
文件: helpers.py 项目: simongr2/luxpy
def _update_parameter_dict(args,
                           parameters={},
                           cieobs=_CAM_DEFAULT_CIEOBS,
                           match_conversionmatrix_to_cieobs=False,
                           Mxyz2lms_whitepoint=None):
    """
    Get parameter dict and update with values in args dict. 
     | Also replace the xyz-to-lms conversion matrix with the one corresponding 
     | to cieobs and normalize it to illuminant E.
     
    Args:
        :args: 
            | dictionary with updated values. 
            | (get by placing 'args = locals().copy()' immediately after the start
            | of the function from which the update is called, 
            | see _simple_cam() code for an example.)
        :parameters:
            | dictionary with all (adjustable) parameter values used by the model   
        :cieobs: 
            | String with the CIE observer CMFs (one of _CMF['types'] of the input data
            | Is used to get the Mxyz2lms matrix when  match_conversionmatrix_to_cieobs == True)
        :match_conversionmatrix_to_cieobs:
            | False, optional
            | If False: keep the Mxyz2lms in the parameters dict
        :Mxyz2lms_whitepoint:
            | None, optional
            | If not None: update the Mxyz2lms key in the parameters dict
            | so that the conversion matrix is the one in _CMF[cieobs]['M'], 
            | in other such that it matches the cieobs of the input data.
    
    Returns:
        :parameters:
            | updated dictionary with model parameters for further use in the CAM.
            
    Notes:
        For an example on the use, see code _simple_cam() (type: _simple_cam??)
    """
    parameters = put_args_in_db(
        parameters,
        args)  #overwrite parameters with other (not-None) args input
    if match_conversionmatrix_to_cieobs == True:
        parameters['Mxyz2lms'] = _CMF[cieobs]['M'].copy()
    if Mxyz2lms_whitepoint is None:
        Mxyz2lms_whitepoint = np.array([[1.0, 1.0, 1.0]])
    parameters['Mxyz2lms'] = math.normalize_3x3_matrix(
        parameters['Mxyz2lms'], Mxyz2lms_whitepoint
    )  # normalize matrix for xyz-> lms conversion to ill. E
    return parameters
示例#16
0
def _hue_bin_data_to_rg(hue_bin_data):
    jabt_closed = np.vstack(
        (hue_bin_data['jabt_hj'], hue_bin_data['jabt_hj'][:1, ...]))
    jabr_closed = np.vstack(
        (hue_bin_data['jabr_hj'], hue_bin_data['jabr_hj'][:1, ...]))
    notnan_t = np.logical_not(np.isnan(
        jabt_closed[..., 1]))  # avoid NaN's (i.e. empty hue-bins)
    notnan_r = np.logical_not(np.isnan(jabr_closed[..., 1]))
    Rg = np.array([[
        100 * _polyarea(jabt_closed[notnan_t[:, i], i, 1],
                        jabt_closed[notnan_t[:, i], i, 2]) /
        _polyarea(jabr_closed[notnan_r[:, i], i, 1],
                  jabr_closed[notnan_r[:, i], i, 2])
        for i in range(notnan_r.shape[-1])
    ]])
    return Rg
示例#17
0
def get_fmc_discrimination_ellipse(Yxy = np.array([[100,1/3,1/3]]), etype = 'fmc2', Y = None, nsteps = 10):
    """
    Get discrimination ellipse(s) in v-format (R,r, xc, yc, theta) for Yxy using FMC-1 or FMC-2.
    
    Args:
        :Yxy:
            | 2D ndarray with [Y,]x,y coordinate centers. 
            | If Yxy.shape[-1]==2: Y is added using the value from the Y-input argument.
        :etype:
            | 'fmc2', optional
            | Type of FMC color discrimination equations to use (see references below).
            | options: 'fmc1', fmc2'
        :Y:
            | None, optional
            | Only affects FMC-2 (see note below).
            | If not None: Y = 10.69 and overrides values in Yxy. 
        :nsteps:
            | 10, optional
            | Set multiplication factor for ellipses 
            | (nsteps=1 corresponds to approximately 1 MacAdam step, 
            | for FMC-2, Y also has to be 10.69, see note below).
    
    Note:
        1. FMC-2 is almost identical to FMC-1 is Y = 10.69!; see [2]
    
    References:
        1. Chickering, K.D. (1967), Optimization of the MacAdam-Modified 1965 Friele Color-Difference Formula, 57(4), p.537-541
        2. Chickering, K.D. (1971), FMC Color-Difference Formulas: Clarification Concerning Usage, 61(1), p.118-122
    """
    # Get matrix elements for discrimination ellipses at Yxy:
    gij = get_gij_fmc(Yxy, etype = etype, ellipsoid = False, Y = Y, cspace = 'Yxy')
    
    # Convert cik (gij) to v-format (R,r,xc,yc, theta):
    if Yxy.shape[-1]==2:
        xyc = Yxy
    else:
        xyc = Yxy[...,1:]
    v = math.cik_to_v(gij, xyc = xyc)
    
    # convert to desired number of MacAdam-steps:
    v[:,0:2] = v[:,0:2]*nsteps
    
    return v
示例#18
0
def srgb_to_xyz(rgb, gamma=2.4, **kwargs):
    """
    Calculates xyz from IEC:61966 sRGB values.

    Args:
        :rgb: 
            | ndarray with srgb values (uint8).
        :gamma: 
            | 2.4, optional
            | compression in sRGB
            
    Returns:
        :xyz: 
            | ndarray with relative tristimulus values.

    """
    rgb = np2d(rgb)

    # define 3x3 matrix
    M = np.array([[0.4124564, 0.3575761, 0.1804375],
                  [0.2126729, 0.7151522, 0.0721750],
                  [0.0193339, 0.1191920, 0.9503041]])

    # scale device coordinates:
    sRGB = rgb / 255

    # test for non-linear part of conversion
    nonlin = np.where((rgb / 255) < 0.0031308)  #0.03928)

    # apply gamma function to convert to sRGB
    srgb = sRGB.copy()
    srgb = ((srgb + 0.055) / 1.055)**gamma

    srgb[nonlin] = sRGB[nonlin] / 12.92

    if len(srgb.shape) == 3:
        xyz = np.einsum('ij,klj->kli', M, srgb) * 100
    else:
        xyz = np.einsum('ij,lj->li', M, srgb) * 100
    return xyz
示例#19
0
def _update_parameter_dict(args,
                           parameters=None,
                           cieobs='2006_10',
                           match_to_conversionmatrix_to_cieobs=True):
    """
    Get parameter dict and update with values in args dict. 
    Also replace the xyz-to-lms conversion matrix with the one corresponding 
    to cieobs and normalize it to illuminant E.
    """
    if parameters is None:
        parameters = _CAM_SWW16_PARAMETERS['JOSA']
    if isinstance(parameters, str):
        parameters = _CAM_SWW16_PARAMETERS[parameters]
    parameters = put_args_in_db(
        parameters,
        args)  #overwrite parameters with other (not-None) args input
    if match_to_conversionmatrix_to_cieobs == True:
        parameters['Mxyz2lms'] = _CMF[cieobs]['M'].copy()
    parameters['Mxyz2lms'] = math.normalize_3x3_matrix(
        parameters['Mxyz2lms'],
        np.array([[1.0, 1.0, 1.0]
                  ]))  # normalize matrix for xyz-> lms conversion to ill. E
    return parameters
示例#20
0
def normalize_3x3_matrix(M, xyz0 = np.array([[1.0,1.0,1.0]])):
    """
    Normalize 3x3 matrix M to xyz0 -- > [1,1,1]
    
    | If M.shape == (1,9): M is reshaped to (3,3)
    
    Args:
        :M: 
            | ndarray((3,3) or ndarray((1,9))
        :xyz0: 
            | 2darray, optional 
        
    Returns:
        :returns: 
            | normalized matrix such that M*xyz0 = [1,1,1]
    """
    M = np2d(M)
    if M.shape[-1]==9:
        M = M.reshape(3,3)
    if xyz0.shape[0] == 1:
        return np.dot(np.diagflat(1/(np.dot(M,xyz0.T))),M)
    else:
        return np.concatenate([np.dot(np.diagflat(1/(np.dot(M,xyz0[1].T))),M) for i in range(xyz0.shape[0])],axis=0).reshape(xyz0.shape[0],3,3)
示例#21
0
def xyz_to_cct_ohno2011(xyz):
    """
    Calculate cct and Duv from CIE 1931 2° xyz following Ohno (2011).
    
    Args:
        :xyz:
            | ndarray with CIE 1931 2° X,Y,Z tristimulus values
            
    Returns:
        :cct, duv:
            | ndarrays with correlated color temperatures and distance to blackbody locus in CIE 1960 uv
            
    References:
        1. Ohno, Y. (2011). Calculation of CCT and Duv and Practical Conversion Formulae. 
        CORM 2011 Conference, Gaithersburg, MD, May 3-5, 2011
    """
    uvp = xyz_to_Yuv(xyz)[..., 1:]
    uv = uvp * np.array([[1, 2 / 3]])
    Lfp = ((uv[..., 0] - 0.292)**2 + (uv[..., 1] - 0.24)**2)**0.5
    a = np.arctan((uv[..., 1] - 0.24) / (uv[..., 0] - 0.292))
    a[a < 0] = a[a < 0] + np.pi
    Lbb = np.polyval(_KIJ[0, :], a)
    Duv = Lfp - Lbb

    T1 = 1 / np.polyval(_KIJ[1, :], a)
    T1[a >= 2.54] = 1 / np.polyval(_KIJ[2, :], a[a >= 2.54])
    dTc1 = np.polyval(_KIJ[3, :], a) * (Lbb + 0.01) / Lfp * Duv / 0.01
    dTc1[a >= 2.54] = 1 / np.polyval(_KIJ[4, :], a[a >= 2.54]) * (
        Lbb[a >= 2.54] + 0.01) / Lfp[a >= 2.54] * Duv[a >= 2.54] / 0.01
    T2 = T1 - dTc1
    c = np.log10(T2)
    c[T2 == 0] = -np.inf
    dTc2 = np.polyval(_KIJ[5, :], c)
    dTc2[Duv < 0] = np.polyval(_KIJ[6, :], c[Duv < 0]) * np.abs(
        Duv[Duv < 0] / 0.03)**2
    Tfinal = T2 - dTc2
    return Tfinal, Duv
示例#22
0
def plotcircle(radii = np.arange(0,60,10), \
               angles = np.arange(0,350,10),\
               color = 'k',linestyle = '--', out = None):
    """
    Plot one or more concentric circles around (0,0).
    
    Args:
        :radii:
            | np.arange(0,60,10) or ndarray with radii of circle(s), optional
        :angles:
            | np.arange(0,350,10) or ndarray with angles (°), optional
        :color: 
            | 'k', optional
            | Color for plotting.
        :linestyle:
            | '--', optional
            | Linestyle of circles.
        :out: 
            | None, optional
            | If None: plot circles, return (x,y) otherwise.
               
     Returns:
          :x,y:
               | ndarrays with circle coordinates (only returned if out is 'x,y')
    """
    x = np.array([0])
    y = x.copy()
    for ri in radii:
        xi = ri * np.cos(angles * np.pi / 180)
        yi = ri * np.sin(angles * np.pi / 180)
        x = np.hstack((x, xi))
        y = np.hstack((y, yi))
        if out != 'x,y':
            plt.plot(xi, yi, color=color, linestyle=linestyle)
    if out == 'x,y':
        return x, y
示例#23
0
def get_pixel_coordinates(jab,
                          jab_ranges=None,
                          jab_deltas=None,
                          limit_grid_radius=0):
    """
    Get pixel coordinates corresponding to array of jab color coordinates.
    
    Args:
        :jab: 
            | ndarray of color coordinates
        :jab_ranges:
            | None or ndarray, optional
            | Specifies the pixelization of color space.
            |    (ndarray.shape = (3,3), with  first axis: J,a,b, and second 
                 axis: min, max, delta)
        :jab_deltas:
            | float or ndarray, optional
            | Specifies the sampling range. 
            | A float uses jab_deltas as the maximum Euclidean distance to select
            | samples around each pixel center. A ndarray of 3 deltas, uses
            | a city block sampling around each pixel center.
        :limit_grid_radius: 
            | 0, optional
            | A value of zeros keeps grid as specified by axr,bxr.
            | A value > 0 only keeps (a,b) coordinates within :limit_grid_radius: 
    
    Returns:
        :returns:
            | gridp, idxp, jabp, samplenrs, samplesIDs
            |   - :gridp: ndarray with coordinates of all pixel centers.
            |   - :idxp: list[int] with pixel index for each non-empty pixel
            |   - :jabp: ndarray with center color coordinates of non-empty pixels
            |   - :samplenrs: list[list[int]] with sample numbers belong to each 
            |                 non-empty pixel
            |   - :sampleIDs: summarizing list, 
            |                 with column order: 'idxp, jabp, samplenrs'
    """
    if jab_deltas is None:
        jab_deltas = np.array([_VF_DELTAR, _VF_DELTAR, _VF_DELTAR])
    if jab_ranges is None:
        jab_ranges = np.vstack(
            ([0, 100, jab_deltas[0]
              ], [-_VF_MAXR, _VF_MAXR + jab_deltas[1], jab_deltas[1]],
             [-_VF_MAXR, _VF_MAXR + jab_deltas[2], jab_deltas[2]]))

    # Get pixel grid:
    gridp = generate_grid(jab_ranges=jab_ranges,
                          limit_grid_radius=limit_grid_radius)

    # determine pixel coordinates of each sample in jab:
    samplesIDs = []
    for idx in range(gridp.shape[0]):

        # get pixel coordinates:
        jp = gridp[idx, 0]
        ap = gridp[idx, 1]
        bp = gridp[idx, 2]
        #Cp = np.sqrt(ap**2+bp**2)

        if type(jab_deltas) == np.ndarray:
            sampleID = np.where(
                ((np.abs(jab[..., 0] - jp) <= jab_deltas[0] / 2) &
                 (np.abs(jab[..., 1] - ap) <= jab_deltas[1] / 2) &
                 (np.abs(jab[..., 2] - bp) <= jab_deltas[2] / 2)))
        else:
            sampleID = np.where(
                (np.sqrt((jab[..., 0] - jp)**2 + (jab[..., 1] - ap)**2 +
                         (jab[..., 2] - bp)**2) <= jab_deltas / 2))

        if (sampleID[0].shape[0] > 0):
            samplesIDs.append(
                np.hstack((idx, np.array([jp, ap, bp]), sampleID[0])))

    idxp = [np.int(samplesIDs[i][0]) for i in range(len(samplesIDs))]
    jabp = np.vstack([samplesIDs[i][1:4] for i in range(len(samplesIDs))])
    samplenrs = [
        np.array(samplesIDs[i][4:], dtype=int).tolist()
        for i in range(len(samplesIDs))
    ]

    return gridp, idxp, jabp, samplenrs, samplesIDs
示例#24
0
文件: helpers.py 项目: simongr2/luxpy
def _simple_cam(
        data,
        dataw=None,
        Lw=100.0,
        relative=True,
        inputtype='xyz',
        direction='forward',
        cie_illuminant='D65',
        parameters={
            'cA': 1,
            'ca': np.array([1, -1, 0]),
            'cb': (1 / 3) * np.array([0.5, 0.5, -1]),
            'n': 1 / 3,
            'Mxyz2lms': _CMF['1931_2']['M'].copy()
        },
        cieobs='2006_10',
        match_to_conversionmatrix_to_cieobs=True):
    """
    An example CAM illustration the usage of the functions in luxpy.cam.helpers 
    
    | Note that this example uses NO chromatic adaptation 
    | and SIMPLE compression, opponent and correlate processing.
    | THIS IS ONLY FOR ILLUSTRATION PURPOSES !!!

    Args:
        :data: 
            | ndarray with input:
            |  - tristimulus values 
            | or
            |  - spectral data 
            | or 
            |  - input color appearance correlates
            | Can be of shape: (N [, xM], x 3), whereby: 
            | N refers to samples and M refers to light sources.
            | Note that for spectral input shape is (N x (M+1) x wl) 
        :dataw: 
            | None or ndarray, optional
            | Input tristimulus values or spectral data of white point.
            | None defaults to the use of :cie_illuminant:
        :cie_illuminant:
            | 'D65', optional
            | String corresponding to one of the illuminants (keys) 
            | in luxpy._CIE_ILLUMINANT
            | If ndarray, then use this one.
            | This is ONLY USED WHEN dataw is NONE !!!
        :Lw:
            | 100.0, optional
            | Luminance (cd/m²) of white point.
        :relative:
            | True or False, optional
            | True: data and dataw input is relative (i.e. Yw = 100)
        :parameters:
            | {'cA': 1, 'ca':np.array([1,-1,0]), 'cb':(1/3)*np.array([0.5,0.5,-1]),
            |  'n': 1/3, 'Mxyz2lms': _CMF['1931_2']['M'].copy()}
            | Dict with model parameters 
            | (For illustration purposes of match_conversionmatrix_to_cieobs, 
            |  the conversion matrix luxpy._CMF['1931_2']['M'] does NOT match
            |  the default observer specification of the input data in :cieobs: !!!)
        :inputtype:
            | 'xyz' or 'spd', optional
            | Specifies the type of input: 
            |     tristimulus values or spectral data for the forward mode.
        :direction:
            | 'forward' or 'inverse', optional
            |   -'forward': xyz -> cam
            |   -'inverse': cam -> xyz 
        :cieobs:
            | '2006_10', optional
            | CMF set to use to perform calculations where spectral data 
            | is involved (inputtype == 'spd'; dataw = None)
            | Other options: see luxpy._CMF['types']
        :match_conversionmatrix_to_cieobs:
            | True, optional
            | When changing to a different CIE observer, change the xyz_to_lms
            | matrix to the one corresponding to that observer. 
            | Set to False to keep the one in the parameter dict!
    
    Returns:
        :returns: 
            | ndarray with:
            | - color appearance correlates (:direction: == 'forward')
            |  or 
            | - XYZ tristimulus values (:direction: == 'inverse')
    """
    #--------------------------------------------------------------------------
    # Get model parameters:
    #--------------------------------------------------------------------------
    args = locals().copy(
    )  # gets all local variables (i.e. the function arguments)

    parameters = _update_parameter_dict(
        args,
        parameters=parameters,
        cieobs=cieobs,
        match_conversionmatrix_to_cieobs=match_to_conversionmatrix_to_cieobs,
        Mxyz2lms_whitepoint=np.array([[1, 1, 1]]))

    #unpack model parameters:
    (Mxyz2lms, cA, ca, cb,
     n) = [parameters[x] for x in sorted(parameters.keys())]

    #--------------------------------------------------------------------------
    # Setup default white point / adaptation field:
    #--------------------------------------------------------------------------
    dataw = _setup_default_adaptation_field(dataw=dataw,
                                            Lw=Lw,
                                            cie_illuminant='C',
                                            inputtype=inputtype,
                                            relative=relative,
                                            cieobs=cieobs)

    #--------------------------------------------------------------------------
    # Redimension input data to ensure most appropriate sizes
    # for easy and efficient looping and initialize output array:
    #--------------------------------------------------------------------------
    n_out = 5  # this example outputs 5 'correlates': J, a, b, C, h
    (data, dataw, camout,
     originalshape) = _massage_input_and_init_output(data,
                                                     dataw,
                                                     inputtype=inputtype,
                                                     direction=direction,
                                                     n_out=n_out)

    #--------------------------------------------------------------------------
    # Do precomputations needed for both the forward and inverse model,
    # and which do not depend on sample or light source data:
    #--------------------------------------------------------------------------
    # Create matrix with scale factors for L, M, S
    # for quick matrix multiplications to obtain neural signals:
    MAab = np.array([[cA, cA, cA], ca, cb])

    if direction == 'inverse':
        invMxyz2lms = np.linalg.inv(
            Mxyz2lms)  # Calculate the inverse lms-to-xyz conversion matrix
        invMAab = np.linalg.inv(
            MAab)  # Pre-calculate its inverse to avoid repeat in loop.

    #--------------------------------------------------------------------------
    # Apply forward/inverse model by looping over each row (=light source dim.)
    # in data:
    #--------------------------------------------------------------------------
    N = data.shape[0]
    for i in range(N):
        #++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
        #  START FORWARD MODE and common part of inverse mode
        #++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

        #-----------------------------------------------------------------------------
        # Get tristimulus values for stimulus field and white point for row i:
        #-----------------------------------------------------------------------------
        # Note that xyzt will contain a None in case of inverse mode !!!
        xyzt, xyzw, xyzw_abs = _get_absolute_xyz_xyzw(data,
                                                      dataw,
                                                      i=i,
                                                      Lw=Lw,
                                                      direction=direction,
                                                      cieobs=cieobs,
                                                      inputtype=inputtype,
                                                      relative=relative)

        #---------------------------------------------------------------------
        # stage 1 (white point): calculate lms values of white:
        #----------------------------------------------------------------------
        lmsw = np.dot(Mxyz2lms, xyzw.T).T

        #------------------------------------------------------------------
        # stage 2 (white): apply simple chromatic adaptation:
        #------------------------------------------------------------------
        lmsw_a = lmsw / lmsw

        #----------------------------------------------------------------------
        # stage 3 (white point): apply simple compression to lms values
        #----------------------------------------------------------------------
        lmsw_ac = lmsw_a**n

        #----------------------------------------------------------------------
        # stage 4 (white point): calculate achromatic A, and opponent signals a,b):
        #----------------------------------------------------------------------
        Aabw = np.dot(MAab, lmsw_ac.T).T

        #++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
        # SPLIT CALCULATION STEPS IN FORWARD AND INVERSE MODES:
        #++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

        if direction == 'forward':
            #------------------------------------------------------------------
            # stage 1 (stimulus): calculate lms values
            #------------------------------------------------------------------
            lms = np.dot(Mxyz2lms, xyzt.T).T

            #------------------------------------------------------------------
            # stage 2 (stimulus): apply simple chromatic adaptation:
            #------------------------------------------------------------------
            lms_a = lms / lmsw

            #------------------------------------------------------------------
            # stage 3 (stimulus): apply simple compression to lms values
            #------------------------------------------------------------------
            lms_ac = lms_a**n

            #------------------------------------------------------------------
            # stage 3 (stimulus): calculate achromatic A, and opponent signals a,b:
            #------------------------------------------------------------------
            Aab = np.dot(MAab, lms_ac.T).T

            #------------------------------------------------------------------
            # stage 4 (stimulus): calculate J, C, h
            #------------------------------------------------------------------
            J = Aab[..., 0] / Aabw[..., 0]
            C = (Aab[..., 1]**2 + Aab[..., 2]**2)**0.5
            h = math.positive_arctan(Aab[..., 1], Aab[..., 2])

            # # stack together:
            camout[i] = np.vstack((J, Aab[..., 1], Aab[..., 2], C, h)).T

        #--------------------------------------
        # INVERSE MODE FROM PERCEPTUAL SIGNALS:
        #--------------------------------------
        elif direction == 'inverse':
            pass

    return _massage_output_data_to_original_shape(camout, originalshape)
示例#25
0
def calibrate(rgbcal, xyzcal, L_type = 'lms', tr_type = 'lut', cieobs = '1931_2', 
              nbit = 8, cspace = 'lab', avg = lambda x: ((x**2).mean()**0.5), ensure_increasing_lut_at_low_rgb = 0.2,
              verbosity = 1, sep=',',header=None): 
    """
    Calculate TR parameters/lut and conversion matrices.
    
    Args:
        :rgbcal:
            | ndarray [Nx3] or string with filename of RGB values 
            | rgcal must contain at least the following type of settings:
            | - pure R,G,B: e.g. for pure R: (R != 0) & (G==0) & (B == 0)
            | - white(s): R = G = B = 2**nbit-1
            | - gray(s): R = G = B
            | - black(s): R = G = B = 0
            | - binary colors: cyan (G = B, R = 0), yellow (G = R, B = 0), magenta (R = B, G = 0)
        :xyzcal:
            | ndarray [Nx3] or string with filename of measured XYZ values for 
            | the RGB settings in rgbcal.
        :L_type:
            | 'lms', optional
            | Type of response to use in the derivation of the Tone-Response curves.
            | options:
            |  - 'lms': use cone fundamental responses: L vs R, M vs G and S vs B 
            |           (reduces noise and generally leads to more accurate characterization) 
            |  - 'Y': use the luminance signal: Y vs R, Y vs G, Y vs B
        :tr_type:
            | 'lut', optional
            | options:
            |  - 'lut': Derive/specify Tone-Response as a look-up-table
            |  - 'gog': Derive/specify Tone-Response as a gain-offset-gamma function
        :cieobs:
            | '1931_2', optional
            | CIE CMF set used to determine the XYZ tristimulus values
            | (needed when L_type == 'lms': determines the conversion matrix to
            | convert xyz to lms values)
        :nbit:
            | 8, optional
            | RGB values in nbit format (e.g. 8, 16, ...)
        :cspace:
            | color space or chromaticity diagram to calculate color differences in
            | when optimizing the xyz_to_rgb and rgb_to_xyz conversion matrices.
        :avg:
            | lambda x: ((x**2).mean()**0.5), optional
            | Function used to average the color differences of the individual RGB settings
            | in the optimization of the xyz_to_rgb and rgb_to_xyz conversion matrices.
        :ensure_increasing_lut_at_low_rgb:
            | 0.2 or float (max = 1.0) or None, optional
            | Ensure an increasing lut by setting all values below the RGB with the maximum
            | zero-crossing of np.diff(lut) and RGB/RGB.max() values of :ensure_increasing_lut_at_low_rgb:
            | (values of 0.2 are a good rule of thumb value)
            | Non-strictly increasing lut values can be caused at low RGB values due
            | to noise and low measurement signal. 
            | If None: don't force lut, but keep as is.
        :verbosity:
            | 1, optional
            | > 0: print and plot optimization results
        :sep:
            | ',', optional
            | separator in files with rgbcal and xyzcal data
        :header:
            | None, optional
            | header specifier for files with rgbcal and xyzcal data 
            | (see pandas.read_csv)
            
    Returns:
        :M:
            | linear rgb to xyz conversion matrix
        :N:
            | xyz to linear rgb conversion matrix
        :tr:
            | Tone Response function parameters or lut
        :xyz_black:
            | ndarray with XYZ tristimulus values of black
        :xyz_white:
            | ndarray with tristimlus values of white
    """
    
    # process rgb, xyzcal inputs:
    rgbcal, xyzcal = _parse_rgbxyz_input(rgbcal, xyz = xyzcal, sep = sep, header=header)
    
    # get black-positions and average black xyz (flare):
    p_blacks = (rgbcal[:,0]==0) & (rgbcal[:,1]==0) & (rgbcal[:,2]==0)
    xyz_black = xyzcal[p_blacks,:].mean(axis=0,keepdims=True)
    
    # Calculate flare corrected xyz:
    xyz_fc = xyzcal - xyz_black
    
    # get positions of pure r, g, b values:
    p_pure = [(rgbcal[:,1]==0) & (rgbcal[:,2]==0), 
              (rgbcal[:,0]==0) & (rgbcal[:,2]==0), 
              (rgbcal[:,0]==0) & (rgbcal[:,1]==0)] 
    
    # set type of L-response to use: Y for R,G,B or L,M,S for R,G,B:
    if L_type == 'Y':
        L = np.array([xyz_fc[:,1] for i in range(3)]).T
    elif L_type == 'lms':
        lms = (math.normalize_3x3_matrix(_CMF[cieobs]['M'].copy()) @ xyz_fc.T).T
        L = np.array([lms[:,i] for i in range(3)]).T
        
    # Get rgb linearizer parameters or lut and apply to all rgb's:
    if tr_type == 'gog':
        par = np.array([sp.optimize.curve_fit(TR, rgbcal[p_pure[i],i], L[p_pure[i],i]/L[p_pure[i],i].max(), p0=[1,0,1])[0] for i in range(3)]) # calculate parameters of each TR
        tr = par
    elif tr_type == 'lut':
        dac = np.arange(2**nbit)
        # lut = np.array([cie_interp(np.vstack((rgbcal[p_pure[i],i],L[p_pure[i],i]/L[p_pure[i],i].max())), dac, kind ='cubic')[1,:] for i in range(3)]).T
        lut = np.array([sp.interpolate.PchipInterpolator(rgbcal[p_pure[i],i],L[p_pure[i],i]/L[p_pure[i],i].max())(dac) for i in range(3)]).T # use this one to avoid potential overshoot with cubic spline interpolation (but slightly worse performance)
        lut[lut<0] = 0
          
        # ensure monotonically increasing lut values for low signal:
        if ensure_increasing_lut_at_low_rgb is not None:
            #ensure_increasing_lut_at_low_rgb = 0.2 # anything below that has a zero-crossing for diff(lut) will be set to zero
            for i in range(3):
                p0 = np.where((np.diff(lut[dac/dac.max() < ensure_increasing_lut_at_low_rgb,i])<=0))[0]
                if p0.any():
                    p0 = range(0,p0[-1])
                    lut[p0,i] = 0
        tr = lut

    
    # plot:
    if verbosity > 0:
        colors = 'rgb'
        linestyles = ['-','--',':']
        rgball = np.repeat(np.arange(2**8)[:,None],3,axis=1)
        Lall = _rgb_linearizer(rgball, tr, tr_type = tr_type)
        plt.figure()
        for i in range(3):
            plt.plot(rgbcal[p_pure[i],i],L[p_pure[i],i]/L[p_pure[i],i].max(),colors[i]+'o')
            plt.plot(rgball[:,i],Lall[:,i],colors[i]+linestyles[i],label=colors[i])
        plt.xlabel('Display RGB')
        plt.ylabel('Linear RGB')
        plt.legend()
        plt.title('Tone response curves')
    
    # linearize all rgb values and clamp to 0
    rgblin = _rgb_linearizer(rgbcal, tr, tr_type = tr_type) 
 
    # get rgblin to xyz_fc matrix:
    M = np.linalg.lstsq(rgblin, xyz_fc, rcond=None)[0].T 
    
    # get xyz_fc to rgblin matrix:
    N = np.linalg.inv(M)
    
    # get better approximation for conversion matrices:
    p_grays = (rgbcal[:,0] == rgbcal[:,1]) & (rgbcal[:,0] == rgbcal[:,2])
    p_whites = (rgbcal[:,0] == (2**nbit-1)) & (rgbcal[:,1] == (2**nbit-1)) & (rgbcal[:,2] == (2**nbit-1))
    xyz_white = xyzcal[p_whites,:].mean(axis=0,keepdims=True) # get xyzw for input into xyz_to_lab() or colortf()
    def optfcn(x, rgbcal, xyzcal, tr, xyz_black, cspace, p_grays, p_whites,out,verbosity):
        M = x.reshape((3,3))
        xyzest = rgb_to_xyz(rgbcal, M, tr, xyz_black, tr_type)
        xyzw = xyzcal[p_whites,:].mean(axis=0) # get xyzw for input into xyz_to_lab() or colortf()
        labcal, labest = colortf(xyzcal,tf=cspace,xyzw=xyzw), colortf(xyzest,tf=cspace,xyzw=xyzw) # calculate lab coord. of cal. and est.
        DEs = ((labcal-labest)**2).sum(axis=1)**0.5
        DEg = DEs[p_grays]
        DEw = DEs[p_whites]
        F = (avg(DEs)**2 + avg(DEg)**2 + avg(DEw**2))**0.5
        if verbosity > 1:
            print('\nPerformance of TR + rgb-to-xyz conversion matrix M:')
            print('all: DE(jab): avg = {:1.4f}, std = {:1.4f}'.format(avg(DEs),np.std(DEs)))
            print('grays: DE(jab): avg = {:1.4f}, std = {:1.4f}'.format(avg(DEg),np.std(DEg)))
            print('whites(s) DE(jab): avg = {:1.4f}, std = {:1.4f}'.format(avg(DEw),np.std(DEw)))
        if out == 'F':
            return F
        else:
            return eval(out)
    x0 = M.ravel()
    res = math.minimizebnd(optfcn, x0, args =(rgbcal, xyzcal, tr, xyz_black, cspace, p_grays, p_whites,'F',0), use_bnd=False)
    xf = res['x_final']
    M = optfcn(xf, rgbcal, xyzcal, tr, xyz_black, cspace, p_grays, p_whites,'M',verbosity)
    N = np.linalg.inv(M)
    return M, N, tr, xyz_black, xyz_white
示例#26
0
文件: helpers.py 项目: simongr2/luxpy
def _massage_input_and_init_output(data,
                                   dataw,
                                   inputtype='xyz',
                                   direction='forward',
                                   n_out=3):
    """
    Redimension input data to ensure most they have the appropriate sizes for easy and efficient looping.
    |
    | 1. Convert data and dataw to atleast_2d ndarrays
    | 2. Make axis 1 of dataw have 'same' dimensions as data
    | 3. Make dataw have same lights source axis size as data
    | 4. Flip light source axis to axis=0 for efficient looping
    | 5. Initialize output array camout to 'same' shape as data but with camout.shape[-1] == n_out
    
    Args:
        :data: 
            | ndarray with input tristimulus values 
            | or spectral data 
            | or input color appearance correlates
            | Can be of shape: (N [, xM], x 3), whereby: 
            | N refers to samples and M refers to light sources.
            | Note that for spectral input shape is (N x (M+1) x wl) 
        :dataw: 
            | None or ndarray, optional
            | Input tristimulus values or spectral data of white point.
            | None defaults to the use of CIE illuminant C.
        :inputtype:
            | 'xyz' or 'spd', optional
            | Specifies the type of input: 
            |     tristimulus values or spectral data for the forward mode.
        :direction:
            | 'forward' or 'inverse', optional
            |   -'forward': xyz -> cam
            |   -'inverse': cam -> xyz 
        :n_out:
            | 3, optional
            | output size of last dimension of camout 
            | (e.g. n_out=3 for j,a,b output or n_out = 5 for J,M,h,a,b output)
            
    Returns:
        :data:
            | ndarray with reshaped data
        :dataw:
            | ndarray with reshaped dataw
        :camout:
            | NaN filled ndarray for output of CAMv (camout.shape[-1] == Nout) 
        :originalshape:
            | original shape of data
            
    Notes:
        For an example on the use, see code _simple_cam() (type: _simple_cam??)
    """
    # Convert data and dataw to atleast_2d ndarrays:
    data = np2d(data).copy(
    )  # stimulus data (can be upto NxMx3 for xyz, or [N x (M+1) x wl] for spd))
    dataw = np2d(dataw).copy(
    )  # white point (can be upto Nx3 for xyz, or [(N+1) x wl] for spd)
    originalshape = data.shape  # to restore output to same shape

    # Make axis 1 of dataw have 'same' dimensions as data:
    if (data.ndim == 2):
        data = np.expand_dims(data, axis=1)  # add light source axis 1

    # Flip light source dim to axis 0:
    data = np.transpose(data, axes=(1, 0, 2))

    dataw = np.expand_dims(
        dataw, axis=1)  # add extra axis to move light source to axis 0

    # Make dataw have same lights source dimension size as data:
    if inputtype == 'xyz':
        if dataw.shape[0] == 1:
            dataw = np.repeat(dataw, data.shape[0], axis=0)
        if (data.shape[0] == 1) & (dataw.shape[0] > 1):
            data = np.repeat(data, dataw.shape[0], axis=0)
    else:
        dataw = np.array([
            np.vstack((dataw[:1, 0, :], dataw[i + 1:i + 2, 0, :]))
            for i in range(dataw.shape[0] - 1)
        ])
        if (data.shape[0] == 1) & (dataw.shape[0] > 1):
            data = np.repeat(data, dataw.shape[0], axis=0)

    # Initialize output array:
    if n_out is not None:
        dshape = list((data).shape)
        dshape[-1] = n_out  # requested number of correlates: e.g. j,a,b
        if (inputtype != 'xyz') & (direction == 'forward'):
            dshape[-2] = dshape[
                -2] - 1  # wavelength row doesn't count & only with forward can the input data be spectral
        camout = np.zeros(dshape)
        camout.fill(np.nan)
    else:
        camout = None
    return data, dataw, camout, originalshape
示例#27
0
def calculate_VF_PX_models(S, cri_type = _VF_CRI_DEFAULT, sampleset = None, pool = False, \
                           pcolorshift = {'href': np.arange(np.pi/10,2*np.pi,2*np.pi/10),\
                                          'Cref' : _VF_MAXR, 'sig' : _VF_SIG, 'labels' : '#'},\
                           vfcolor = 'k', verbosity = 0):
    """
    Calculate Vector Field and Pixel color shift models.
    
    Args:
        :cri_type: 
            | _VF_CRI_DEFAULT or str or dict, optional
            | Specifies type of color fidelity model to use. 
            | Controls choice of ref. ill., sample set, averaging, scaling, etc.
            | See luxpy.cri.spd_to_cri for more info.
        :sampleset:
            | None or str or ndarray, optional
            | Sampleset to be used when calculating vector field model.
        :pool:
            | False, optional
            | If :S: contains multiple spectra, True pools all jab data before 
            | modeling the vector field, while False models a different field 
            |  for each spectrum.
        :pcolorshift: 
            | default dict (see below) or user defined dict, optional
            | Dict containing the specification input 
            |  for apply_poly_model_at_hue_x().
            | Default dict = {'href': np.arange(np.pi/10,2*np.pi,2*np.pi/10),
            |                'Cref' : _VF_MAXR, 
            |                'sig' : _VF_SIG, 
            |                'labels' : '#'} 
            | The polynomial models of degree 5 and 6 can be fully specified or 
            | summarized by the model parameters themselved OR by calculating the
            | dCoverC and dH at resp. 5 and 6 hues.
        :vfcolor:
            | 'k', optional
            | For plotting the vector fields.
        :verbosity: 
            | 0, optional
            | Report warnings or not.
    
    Returns:
        :returns:
            | :dataVF:, :dataPX: 
            | Dicts, for more info, see output description of resp.: 
            | luxpy.cri.VF_colorshift_model() and luxpy.cri.PX_colorshift_model()
    """
    # calculate VectorField cri_color_shift model:
    dataVF = VF_colorshift_model(S,
                                 cri_type=cri_type,
                                 sampleset=sampleset,
                                 vfcolor=vfcolor,
                                 pcolorshift=pcolorshift,
                                 pool=pool,
                                 verbosity=verbosity)

    # Set jab_ranges and _deltas for PX-model pixel calculations:
    PX_jab_deltas = np.array([_VF_DELTAR, _VF_DELTAR, _VF_DELTAR
                              ])  #set same as for vectorfield generation
    PX_jab_ranges = np.vstack(
        ([0, 100, _VF_DELTAR], [-_VF_MAXR, _VF_MAXR + _VF_DELTAR, _VF_DELTAR],
         [-_VF_MAXR, _VF_MAXR + _VF_DELTAR, _VF_DELTAR]))  #IES4880 gamut

    # Calculate shift vectors using vectorfield and pixel methods:
    delta_SvsVF_vshift_ab_mean = np.zeros((len(dataVF), 1))
    delta_SvsVF_vshift_ab_mean.fill(np.nan)
    delta_SvsVF_vshift_ab_mean_normalized = delta_SvsVF_vshift_ab_mean.copy()
    delta_PXvsVF_vshift_ab_mean = np.zeros((len(dataVF), 1))
    delta_PXvsVF_vshift_ab_mean.fill(np.nan)
    delta_PXvsVF_vshift_ab_mean_normalized = delta_PXvsVF_vshift_ab_mean.copy()
    dataPX = [[] for k in range(len(dataVF))]
    for Snr in range(len(dataVF)):

        # Calculate shifts using pixel method, PX:
        dataPX[Snr] = PX_colorshift_model(dataVF[Snr]['Jab']['Jabt'][:, 0, :],
                                          dataVF[Snr]['Jab']['Jabr'][:, 0, :],
                                          jab_ranges=PX_jab_ranges,
                                          jab_deltas=PX_jab_deltas,
                                          limit_grid_radius=_VF_MAXR)

        # Calculate shift difference between Samples (S) and VectorField model predictions (VF):
        delta_SvsVF_vshift_ab = dataVF[Snr]['vshifts']['vshift_ab_s'] - dataVF[
            Snr]['vshifts']['vshift_ab_s_vf']
        delta_SvsVF_vshift_ab_mean[Snr] = np.nanmean(np.sqrt(
            (delta_SvsVF_vshift_ab[..., 1:3]**2).sum(
                axis=delta_SvsVF_vshift_ab[..., 1:3].ndim - 1)),
                                                     axis=0)
        delta_SvsVF_vshift_ab_mean_normalized[
            Snr] = delta_SvsVF_vshift_ab_mean[Snr] / dataVF[Snr]['Jab'][
                'DEi'].mean(axis=0)

        # Calculate shift difference between PiXel method (PX) and VectorField (VF):
        delta_PXvsVF_vshift_ab = dataPX[Snr]['vshifts'][
            'vectorshift_ab_J0'] - dataVF[Snr]['vshifts']['vshift_ab_vf']
        delta_PXvsVF_vshift_ab_mean[Snr] = np.nanmean(np.sqrt(
            (delta_PXvsVF_vshift_ab[..., 1:3]**2).sum(
                axis=delta_PXvsVF_vshift_ab[..., 1:3].ndim - 1)),
                                                      axis=0)
        delta_PXvsVF_vshift_ab_mean_normalized[
            Snr] = delta_PXvsVF_vshift_ab_mean[Snr] / dataVF[Snr]['Jab'][
                'DEi'].mean(axis=0)

        dataVF[Snr]['vshifts'][
            'delta_PXvsVF_vshift_ab_mean'] = delta_PXvsVF_vshift_ab_mean[Snr]
        dataVF[Snr]['vshifts'][
            'delta_SvsVF_vshift_ab_mean'] = delta_SvsVF_vshift_ab_mean[Snr]
        dataVF[Snr]['vshifts'][
            'delta_SvsVF_vshift_ab_mean_normalized'] = delta_SvsVF_vshift_ab_mean_normalized[
                Snr]
        dataVF[Snr]['vshifts'][
            'delta_PXvsVF_vshift_ab_mean_normalized'] = delta_PXvsVF_vshift_ab_mean_normalized[
                Snr]
        dataPX[Snr]['vshifts']['delta_PXvsVF_vshift_ab_mean'] = dataVF[Snr][
            'vshifts']['delta_PXvsVF_vshift_ab_mean']
        dataPX[Snr]['vshifts'][
            'delta_PXvsVF_vshift_ab_mean_normalized'] = dataVF[Snr]['vshifts'][
                'delta_PXvsVF_vshift_ab_mean_normalized']

    return dataVF, dataPX
示例#28
0
文件: mcri.py 项目: simongr2/luxpy
from ..utils.helpers import _get_hue_bin_data, _hue_bin_data_to_rg


_MCRI_DEFAULTS = {'sampleset': "_CRI_RFL['mcri']", 
                  'ref_type' : None, 
                  'cieobs' : {'xyz' : '1964_10', 'cct': '1931_2'}, 
                  'avg': math.geomean, 
                  'scale' : {'fcn': psy_scale, 'cfactor': [21.7016,   4.2106,   2.4154]}, 
                  'cspace': {'type': 'ipt', 'Mxyz2lms': [[ 0.400070,    0.707270,   -0.080674],[-0.228111, 1.150561,    0.061230],[0.0, 0.0,    0.931757]]}, 
                  'catf': {'xyzw': [94.81,  100.00,  107.32], 'mcat': 'cat02', 'cattype': 'vonkries', 'F':1, 'Yb': 20.0,'Dtype':'cat02', 'catmode' : '1>2'}, 
                  'rg_pars' : {'nhbins': None, 'start_hue':0.0, 'normalize_gamut': False, 'normalized_chroma_ref' : 100}, 
                  'cri_specific_pars' : {'similarity_ai' : np.array([[-0.09651, 0.41354, 40.64, 16.55, -0.17],
                                                                     [0.16548, 0.38877, 58.27,    20.37,    -0.59],
                                                                     [0.32825, 0.49673, 35.97    , 18.05,-6.04],
                                                                     [0.02115, -0.13658, 261.62, 110.99, -44.86], 
                                                                     [-0.12686, -0.22593, 99.06, 55.90, -39.86],
                                                                     [ 0.18488, 0.01172, 58.23, 62.55, -22.86],
                                                                     [-0.03440, 0.23480, 94.71, 32.12, 2.90],
                                                                     [ 0.04258, 0.05040, 205.54, 53.08, -35.20], 
                                                                     [0.15829,  0.13624, 90.21,  70.83, -19.01],
                                                                     [-0.01933, -0.02168, 742.97, 297.66, -227.30]])}
                }


###############################################################################
def spd_to_mcri(SPD, D = 0.9, E = None, Yb = 20.0, out = 'Rm', wl = None):
    """
    Calculates the MCRI or Memory Color Rendition Index, Rm
    
    Args: 
        :SPD: 
            | ndarray with spectral data (can be multiple SPDs, 
示例#29
0
def render_image(img = None, spd = None, rfl = None, out = 'img_hyp', \
                 refspd = None, D = None, cieobs = _CIEOBS, \
                 cspace = 'xyz', cspace_tf = {}, CSF = None,\
                 interp_type = 'nd', k_neighbours = 4, show = True,
                 verbosity = 0, show_ref_img = True,\
                 stack_test_ref = 12,\
                 write_to_file = None):
    """
    Render image under specified light source spd.
    
    Args:
        :img: 
            | None or str or ndarray with float (max = 1) rgb image.
            | None load a default image.
        :spd: 
            | ndarray, optional
            | Light source spectrum for rendering
            | If None: use CIE illuminant F4
        :rfl: 
            | ndarray, optional
            | Reflectance set for color coordinate to rfl mapping.
        :out: 
            | 'img_hyp' or str, optional
            |  (other option: 'img_ren': rendered image under :spd:)
        :refspd:
            | None, optional
            | Reference spectrum for color coordinate to rfl mapping.
            | None defaults to D65 (srgb has a D65 white point)
        :D: 
            | None, optional
            | Degree of (von Kries) adaptation from spd to refspd. 
        :cieobs:
            | _CIEOBS, optional
            | CMF set for calculation of xyz from spectral data.
        :cspace:
            | 'xyz',  optional
            | Color space for color coordinate to rfl mapping.
            | Tip: Use linear space (e.g. 'xyz', 'Yuv',...) for (interp_type == 'nd'),
            |      and perceptually uniform space (e.g. 'ipt') for (interp_type == 'nearest')
        :cspace_tf:
            | {}, optional
            | Dict with parameters for xyz_to_cspace and cspace_to_xyz transform.
        :CSF:
            | None, optional
            | RGB camera response functions.
            | If None: input :xyz: contains raw rgb values. Override :cspace:
            | argument and perform estimation directly in raw rgb space!!!
        :interp_type:
            | 'nd', optional
            | Options:
            | - 'nd': perform n-dimensional linear interpolation using Delaunay triangulation.
            | - 'nearest': perform nearest neighbour interpolation. 
        :k_neighbours:
            | 4 or int, optional
            | Number of nearest neighbours for reflectance spectrum interpolation.
            | Neighbours are found using scipy.spatial.cKDTree
        :show: 
            | True, optional
            |  Show images.
        :verbosity:
            | 0, optional
            | If > 0: make a plot of the color coordinates of original and 
              rendered image pixels.
        :show_ref_img:
            | True, optional
            | True: shows rendered image under reference spd. False: shows
            |  original image.
        :write_to_file:
            | None, optional
            | None: do nothing, else: write to filename(+path) in :write_to_file:
        :stack_test_ref: 
            | 12, optional
            |   - 12: left (test), right (ref) format for show and imwrite
            |   - 21: top (test), bottom (ref)
            |   - 1: only show/write test
            |   - 2: only show/write ref
            |   - 0: show both, write test

    Returns:
        :returns: 
            | img_hyp, img_ren, 
            | ndarrays with float hyperspectral image and rendered images 
    """

    # Get image:
    #imread = lambda x: plt.imread(x) #matplotlib.pyplot

    if img is not None:
        if isinstance(img, str):
            img = plt.imread(img)  # use matplotlib.pyplot's imread
    else:
        img = plt.imread(_HYPSPCIM_DEFAULT_IMAGE)
    if isinstance(img, np.uint8):
        img = img / 255
    elif isinstance(img, np.uint16):
        img = img / (2**16 - 1)

    # Convert to 2D format:
    rgb = img.reshape(img.shape[0] * img.shape[1], 3)  # *1.0: make float
    rgb[rgb == 0] = _EPS  # avoid division by zero for pure blacks.

    # Get unique rgb values and positions:
    rgb_u, rgb_indices = np.unique(rgb, return_inverse=True, axis=0)

    # get rfl set:
    if rfl is None:  # use IESTM30['4880'] set
        rfl = _CRI_RFL['ies-tm30']['4880']['5nm']
    wlr = rfl[
        0]  # spectral reflectance set determines wavelength range for estimation (xyz_to_rfl())

    # get Ref spd:
    if refspd is None:
        refspd = _CIE_ILLUMINANTS['D65'].copy()
    refspd = cie_interp(
        refspd, wlr,
        kind='linear')  # force spd to same wavelength range as rfl

    # Convert rgb_u to xyz and lab-type values under assumed refspd:
    if CSF is None:
        xyz_wr = spd_to_xyz(refspd, cieobs=cieobs, relative=True)
        xyz_ur = colortf(rgb_u * 255, tf='srgb>xyz')
    else:
        xyz_ur = rgb_u  # for input in xyz_to_rfl (when CSF is not None: this functions assumes input is indeed rgb !!!)

    # Estimate rfl's for xyz_ur:
    rfl_est, xyzri = xyz_to_rfl(xyz_ur, rfl = rfl, out = 'rfl_est,xyz_est', \
                 refspd = refspd, D = D, cieobs = cieobs, \
                 cspace = cspace, cspace_tf = cspace_tf, CSF = CSF,\
                 interp_type = interp_type, k_neighbours = k_neighbours,
                 verbosity = verbosity)

    # Get default test spd if none supplied:
    if spd is None:
        spd = _CIE_ILLUMINANTS['F4']

    if CSF is None:
        # calculate xyz values under test spd:
        xyzti, xyztw = spd_to_xyz(spd, rfl=rfl_est, cieobs=cieobs, out=2)

        # Chromatic adaptation from test spd to refspd:
        if D is not None:
            xyzti = cat.apply(xyzti, xyzw1=xyztw, xyzw2=xyz_wr, D=D)

        # Convert xyzti under test spd to srgb:
        rgbti = colortf(xyzti, tf='srgb') / 255
    else:
        # Calculate rgb coordinates from camera sensitivity functions under spd:
        rgbti = rfl_to_rgb(rfl_est, spd=spd, CSF=CSF, wl=None)

        # Chromatic adaptation from test spd to refspd:
        if D is not None:
            white = np.ones_like(spd)
            white[0] = spd[0]
            rgbwr = rfl_to_rgb(white, spd=refspd, CSF=CSF, wl=None)
            rgbwt = rfl_to_rgb(white, spd=spd, CSF=CSF, wl=None)
            rgbti = cat.apply_vonkries2(rgbti,
                                        rgbwt,
                                        rgbwr,
                                        xyzw0=np.array([[1.0, 1.0, 1.0]]),
                                        in_='rgb',
                                        out_='rgb',
                                        D=1)

    # Reconstruct original locations for rendered image rgbs:
    img_ren = rgbti[rgb_indices]
    img_ren.shape = img.shape  # reshape back to 3D size of original
    img_ren = img_ren

    # For output:
    if show_ref_img == True:
        rgb_ref = colortf(xyzri, tf='srgb') / 255 if (
            CSF is None
        ) else xyzri  # if CSF not None: xyzri contains rgbri !!!
        img_ref = rgb_ref[rgb_indices]
        img_ref.shape = img.shape  # reshape back to 3D size of original
        img_str = 'Rendered (under ref. spd)'
        img = img_ref
    else:
        img_str = 'Original'
        img = img

    if (stack_test_ref > 0) | show == True:
        if stack_test_ref == 21:
            img_original_rendered = np.vstack(
                (img_ren, np.ones((4, img.shape[1], 3)), img))
            img_original_rendered_str = 'Rendered (under test spd)\n ' + img_str
        elif stack_test_ref == 12:
            img_original_rendered = np.hstack(
                (img_ren, np.ones((img.shape[0], 4, 3)), img))
            img_original_rendered_str = 'Rendered (under test spd) | ' + img_str
        elif stack_test_ref == 1:
            img_original_rendered = img_ren
            img_original_rendered_str = 'Rendered (under test spd)'
        elif stack_test_ref == 2:
            img_original_rendered = img
            img_original_rendered_str = img_str
        elif stack_test_ref == 0:
            img_original_rendered = img_ren
            img_original_rendered_str = 'Rendered (under test spd)'

    if write_to_file is not None:
        # Convert from RGB to BGR formatand write:
        #print('Writing rendering results to image file: {}'.format(write_to_file))
        with warnings.catch_warnings():
            warnings.simplefilter("ignore")
            imsave(write_to_file, img_original_rendered)

    if show == True:
        # show images using pyplot.show():
        plt.figure()

        plt.imshow(img_original_rendered)
        plt.title(img_original_rendered_str)
        plt.gca().get_xaxis().set_ticklabels([])
        plt.gca().get_yaxis().set_ticklabels([])

        if stack_test_ref == 0:
            plt.figure()
            plt.imshow(img)
            plt.title(img_str)
            plt.axis('off')

    if 'img_hyp' in out.split(','):
        # Create hyper_spectral image:
        rfl_image_2D = rfl_est[
            rgb_indices +
            1, :]  # create array with all rfls required for each pixel
        img_hyp = rfl_image_2D.reshape(img.shape[0], img.shape[1],
                                       rfl_image_2D.shape[1])

    # Setup output:
    if out == 'img_hyp':
        return img_hyp
    elif out == 'img_ren':
        return img_ren
    else:
        return eval(out)
示例#30
0
_HYPSPCIM_DEFAULT_IMAGE = _PKG_PATH + _SEP + 'toolboxes' + _SEP + 'hypspcim' + _SEP + 'data' + _SEP + 'testimage1.jpg'

_ROUNDING = 6  # to speed up xyz_to_rfl search algorithm

# Nikon D700 camera sensitivity functions:
_CSF_NIKON_D700 = np.vstack(
    (np.arange(400, 710, 10),
     np.array([[
         0.005, 0.007, 0.012, 0.015, 0.023, 0.025, 0.030, 0.026, 0.024, 0.019,
         0.010, 0.004, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000,
         0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000,
         0.000
     ],
               [
                   0.000, 0.000, 0.000, 0.000, 0.000, 0.001, 0.002, 0.003,
                   0.005, 0.007, 0.012, 0.013, 0.015, 0.016, 0.017, 0.020,
                   0.013, 0.011, 0.009, 0.005, 0.001, 0.001, 0.001, 0.001,
                   0.001, 0.001, 0.001, 0.001, 0.002, 0.002, 0.003
               ],
               [
                   0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000,
                   0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000,
                   0.001, 0.003, 0.010, 0.012, 0.013, 0.022, 0.020, 0.020,
                   0.018, 0.017, 0.016, 0.016, 0.014, 0.014, 0.013
               ]])[::-1]))


def xyz_to_rfl(xyz, CSF = None, rfl = None, out = 'rfl_est', \
                 refspd = None, D = None, cieobs = _CIEOBS, \
                 cspace = 'xyz', cspace_tf = {},\
                 interp_type = 'nd', k_neighbours = 4, verbosity = 0):
    """