Ejemplo n.º 1
0
def v_to_cik(v, inverse = False):
    """
    Calculate 2x2 '(covariance matrix)^-1' elements cik 
    
    Args:
        :v: 
            | (Nx5) np.ndarray
            | ellipse parameters [Rmax,Rmin,xc,yc,theta]
        :inverse:
            | If True: return inverse of cik.
    
    Returns:
        :cik: 
            | 'Nx2x2' (covariance matrix)^-1
    
    Notes:
        | cik is not actually a covariance matrix,
        | only for a Gaussian or normal distribution!

    """
    v = np.atleast_2d(v)
    g11 = (1/v[:,0]*np.cos(v[:,4]))**2 + (1/v[:,1]*np.sin(v[:,4]))**2
    g22 = (1/v[:,0]*np.sin(v[:,4]))**2 + (1/v[:,1]*np.cos(v[:,4]))**2
    g12 = (1/v[:,0]**2 - 1/v[:,1]**2)*np.sin(v[:,4])*np.cos(v[:,4])
    cik = np.zeros((g11.shape[0],2,2))

    for i in range(g11.shape[0]):
        cik[i,:,:] = np.vstack((np.hstack((g11[i],g12[i])), np.hstack((g12[i],g22[i]))))
        if inverse == True:
            cik[i,:,:] = np.linalg.inv(cik[i,:,:])
    return cik
Ejemplo n.º 2
0
def spher2cart(theta, phi, r = 1., deg = True):
    """
    Convert spherical to cartesian coordinates.
    
    Args:
        :theta:
            | Float, int or ndarray
            | Angle with positive z-axis.
        :phi:
            | Float, int or ndarray
            | Angle around positive z-axis starting from x-axis.
        :r:
            | 1, optional
            | Float, int or ndarray
            | radius
            
    Returns:
        :x, y, z:
            | tuple of floats, ints or ndarrays
            | Cartesian coordinates
    """
    if deg == True:
        theta = np.deg2rad(theta)
        phi = np.deg2rad(phi)
    x= r*np.sin(theta)*np.cos(phi)
    y= r*np.sin(theta)*np.sin(phi)
    z= r*np.cos(theta)
    return x,y,z
Ejemplo n.º 3
0
 def get_xyz(self, *args):
     """ get cartesian coordinates """
     theta, phi, r = args
     x = r * np.sin(theta) * np.cos(phi)
     y = r * np.sin(theta) * np.sin(phi)
     z = r * np.cos(theta)
     z[np.abs(z) < self._TINY] = 0.0
     return x, y, z
Ejemplo n.º 4
0
def _hue_bin_data_to_Rxhj(hue_bin_data, scale_factor):

    nhbins = hue_bin_data['nhbins']
    start_hue = hue_bin_data['start_hue']

    # A. Local color fidelity, Rfhj:
    #DEhj = ((hue_bin_data['jabt_hj']-hue_bin_data['jabr_hj'])**2).sum(axis=-1)**0.5
    DEhj = hue_bin_data[
        'DE_hj']  #TM30 specifies average of DEi per hue bin, not DE of average jabt, jabr
    Rfhj = log_scale(DEhj, scale_factor=scale_factor)

    # B.Local chroma shift and hue shift, [Rcshi, Rhshi]:
    # B.1 relative paths:
    dab = (hue_bin_data['jabt_hj'] - hue_bin_data['jabr_hj'])[..., 1:] / (
        hue_bin_data['Cr_hj'][..., None])

    # B.2 Reference unit circle:
    hbincenters = np.arange(start_hue + np.pi / nhbins, 2 * np.pi,
                            2 * np.pi / nhbins)[..., None]
    arc = np.cos(hbincenters)
    brc = np.sin(hbincenters)

    # B.3 calculate local chroma shift, Rcshi:
    Rcshi = dab[..., 0] * arc + dab[..., 1] * brc

    # B.4 calculate local hue shift, Rcshi:
    Rhshi = dab[..., 1] * arc - dab[..., 0] * brc

    return Rcshi, Rhshi, Rfhj, DEhj
Ejemplo n.º 5
0
def pol2cart(theta, r = None, htype = 'deg'):
    """
    Convert Cartesion to polar coordinates.
    
    Args:
        :theta: 
            | float or ndarray with theta-coordinates
        :r: 
            | None or float or ndarray with r-coordinates, optional
            | If None, r-coordinates are assumed to be in :theta:.
        :htype:
            | 'deg' or 'rad, optional
            | Intput type of :theta:.
    
    Returns:
        :returns:
            | (float or ndarray of x, float or ndarray of y) coordinates 
    """
    if htype == 'deg':
        d2r = np.pi/180.0
    else:
        d2r = 1.0
    if r is None:
        r = theta[...,1].copy()
        theta = theta[...,0].copy()
    theta = theta*d2r
    return r*np.cos(theta), r*np.sin(theta)
Ejemplo n.º 6
0
def cik_to_v(cik, xyc = None, inverse = False):
    """
    Calculate v-format ellipse descriptor from 2x2 'covariance matrix'^-1 cik 
    
    Args:
        :cik: 
            | 'Nx2x2' (covariance matrix)^-1
        :inverse:
            | If True: input is inverse of cik.
              
            
    Returns:
        :v: 
            | (Nx5) np.ndarray
            | ellipse parameters [Rmax,Rmin,xc,yc,theta]

    Notes:
        | cik is not actually the inverse covariance matrix,
        | only for a Gaussian or normal distribution!

    """
    if cik.ndim < 3:
        cik = cik[None,...]
    
    if inverse == True:
        for i in range(cik.shape[0]):
            cik[i,:,:] = np.linalg.inv(cik[i,:,:])
            
    g11 = cik[:,0,0]
    g22 = cik[:,1,1] 
    g12 = cik[:,0,1]

    theta = 0.5*np.arctan2(2*g12,(g11-g22)) + (np.pi/2)*(g12<0)
    #theta = theta2 + (np.pi/2)*(g12<0)
    #theta2 = theta
    cottheta = np.cos(theta)/np.sin(theta) #np.cot(theta)
    cottheta[np.isinf(cottheta)] = 0

    a = 1/np.sqrt((g22 + g12*cottheta))
    b = 1/np.sqrt((g11 - g12*cottheta))

    # ensure largest ellipse axis is first (correct angle):
    c = b>a; a[c], b[c], theta[c] = b[c],a[c],theta[c]+np.pi/2

    v = np.vstack((a, b, np.zeros(a.shape), np.zeros(a.shape), theta)).T
    
    # add center coordinates:
    if xyc is not None:
        v[:,2:4] = xyc
    
    return v
Ejemplo n.º 7
0
def dtlz2_(x, M):
    """
    DTLZ2 multi-objective function
    
    | This function represents a hyper-sphere.
    | Using k = 10, the number of dimensions must be n = (M - 1) + k.
    | The Pareto optimal solutions are obtained when the last k variables of x
    | are equal to 0.5.
    
    Args:
        :x: 
            | a n x mu ndarray with mu points and n dimensions
        :M: 
            | a scalar with the number of objectives
    
    Returns:
        :f: 
            | a m x mu ndarray with mu points and their m objectives computed at
            | the input
    """
    k = 10
    
    # Error check: the number of dimensions must be M-1+k
    n = (M-1) + k; #this is the default
    if x.shape[0] != n:
       raise Exception('Using k = 10, it is required that the number of dimensions be n = (M - 1) + k = {:1.0f} in this case.'.format(n))
    
    xm = x[(n-k):,:].copy() #xm contains the last k variables
    g = ((xm - 0.5)**2).sum(axis = 0)
    
    # Computes the functions:
    f = np.empty((M,x.shape[1]))
    f[0,:] = (1 + g)*np.prod(np.cos(np.pi/2*x[:(M-1),:]), axis = 0)
    for ii in range(1,M-1):
        f[ii,:] = (1 + g) * np.prod(np.cos(np.pi/2*x[:(M-ii-1),:]), axis = 0) * np.sin(np.pi/2*x[M-ii-1,:])
    f[M-1,:] = (1 + g) * np.sin(np.pi/2*x[0,:])
    return f
Ejemplo n.º 8
0
def plotcircle(center=np.array([[0., 0.]]),
               radii=np.arange(0, 60, 10),
               angles=np.arange(0, 350, 10),
               color='k',
               linestyle='--',
               out=None,
               axh=None,
               **kwargs):
    """
    Plot one or more concentric circles.
    
    Args:
        :center: 
            | np.array([[0.,0.]]) or ndarray with center coordinates, optional
        :radii:
            | np.arange(0,60,10) or ndarray with radii of circle(s), optional
        :angles:
            | np.arange(0,350,10) or ndarray with angles (°), optional
        :color:
            | 'k', optional
            | Color for plotting.
        :linestyle:
            | '--', optional
            | Linestyle of circles.
        :out: 
            | None, optional
            | If None: plot circles, return (x,y) otherwise.
    """
    xs = np.array([0])
    ys = xs.copy()
    if ((out != 'x,y') & (axh is None)):
        fig, axh = plt.subplots(rows=1, ncols=1)
    for ri in radii:
        x = center[:, 0] + ri * np.cos(angles * np.pi / 180)
        y = center[:, 1] + ri * np.sin(angles * np.pi / 180)
        xs = np.hstack((xs, x))
        ys = np.hstack((ys, y))
        if (out != 'x,y'):
            axh.plot(x, y, color=color, linestyle=linestyle, **kwargs)
    if out == 'x,y':
        return xs, ys
    elif out == 'axh':
        return axh
Ejemplo n.º 9
0
def plotcircle(radii = np.arange(0,60,10), \
               angles = np.arange(0,350,10),\
               color = 'k',linestyle = '--', out = None):
    """
    Plot one or more concentric circles around (0,0).
    
    Args:
        :radii:
            | np.arange(0,60,10) or ndarray with radii of circle(s), optional
        :angles:
            | np.arange(0,350,10) or ndarray with angles (°), optional
        :color: 
            | 'k', optional
            | Color for plotting.
        :linestyle:
            | '--', optional
            | Linestyle of circles.
        :out: 
            | None, optional
            | If None: plot circles, return (x,y) otherwise.
               
     Returns:
          :x,y:
               | ndarrays with circle coordinates (only returned if out is 'x,y')
    """
    x = np.array([0])
    y = x.copy()
    for ri in radii:
        xi = ri * np.cos(angles * np.pi / 180)
        yi = ri * np.sin(angles * np.pi / 180)
        x = np.hstack((x, xi))
        y = np.hstack((y, yi))
        if out != 'x,y':
            plt.plot(xi, yi, color=color, linestyle=linestyle)
    if out == 'x,y':
        return x, y
Ejemplo n.º 10
0
def plotellipse(v, cspace_in = 'Yxy', cspace_out = None, nsamples = 100, \
                show = True, axh = None, \
                line_color = 'darkgray', line_style = ':', line_width = 1, line_marker = '', line_markersize = 4,\
                plot_center = False, center_marker = 'o', center_color = 'darkgray', center_markersize = 4,\
                show_grid = False, llabel = '', label_fontname = 'Times New Roman', label_fontsize = 12,\
                out = None):
    """
    Plot ellipse(s) given in v-format [Rmax,Rmin,xc,yc,theta].
    
    Args:
        :v: 
            | (Nx5) ndarray
            | ellipse parameters [Rmax,Rmin,xc,yc,theta]
        :cspace_in:
            | 'Yxy', optional
            | Color space of v.
            | If None: no color space assumed. Axis labels assumed ('x','y').
        :cspace_out:
            | None, optional
            | Color space to plot ellipse(s) in.
            | If None: plot in cspace_in.
        :nsamples:
            | 100 or int, optional
            | Number of points (samples) in ellipse boundary
        :show:
            | True or boolean, optional
            | Plot ellipse(s) (True) or not (False)
        :axh: 
            | None, optional
            | Ax-handle to plot ellipse(s) in.
            | If None: create new figure with axes.
        :line_color:
            | 'darkgray', optional
            | Color to plot ellipse(s) in.
        :line_style:
            | ':', optional
            | Linestyle of ellipse(s).
        :line_width':
            | 1, optional
            | Width of ellipse boundary line.
        :line_marker:
            | 'none', optional
            | Marker for ellipse boundary.
        :line_markersize:
            | 4, optional
            | Size of markers in ellipse boundary.
        :plot_center:
            | False, optional
            | Plot center of ellipse: yes (True) or no (False)
        :center_color:
            | 'darkgray', optional
            | Color to plot ellipse center in.
        :center_marker:
            | 'o', optional
            | Marker for ellipse center.
        :center_markersize:
            | 4, optional
            | Size of marker of ellipse center.
        :show_grid:
            | False, optional
            | Show grid (True) or not (False)
        :llabel:
            | None,optional
            | Legend label for ellipse boundary.
        :label_fontname: 
            | 'Times New Roman', optional
            | Sets font type of axis labels.
        :label_fontsize:
            | 12, optional
            | Sets font size of axis labels.
        :out:
            | None, optional
            | Output of function
            | If None: returns None. Can be used to output axh of newly created
            |      figure axes or to return Yxys an ndarray with coordinates of 
            |       ellipse boundaries in cspace_out (shape = (nsamples,3,N)) 
            
        
    Returns:
        :returns: None, or whatever set by :out:.
    """
    Yxys = np.zeros((nsamples, 3, v.shape[0]))
    ellipse_vs = np.zeros((v.shape[0], 5))
    for i, vi in enumerate(v):

        # Set sample density of ellipse boundary:
        t = np.linspace(0, 2 * np.pi, int(nsamples))

        a = vi[0]  # major axis
        b = vi[1]  # minor axis
        xyc = vi[2:4, None]  # center
        theta = vi[-1]  # rotation angle

        # define rotation matrix:
        R = np.hstack((np.vstack((np.cos(theta), np.sin(theta))),
                       np.vstack((-np.sin(theta), np.cos(theta)))))

        # Calculate ellipses:
        Yxyc = np.vstack((1, xyc)).T
        Yxy = np.vstack(
            (np.ones((1, nsamples)),
             xyc + np.dot(R, np.vstack((a * np.cos(t), b * np.sin(t)))))).T
        Yxys[:, :, i] = Yxy

        # Convert to requested color space:
        if (cspace_out is not None) & (cspace_in is not None):
            Yxy = colortf(Yxy, cspace_in + '>' + cspace_out)
            Yxyc = colortf(Yxyc, cspace_in + '>' + cspace_out)
            Yxys[:, :, i] = Yxy

            # get ellipse parameters in requested color space:
            ellipse_vs[i, :] = math.fit_ellipse(Yxy[:, 1:])
            #de = np.sqrt((Yxy[:,1]-Yxyc[:,1])**2 + (Yxy[:,2]-Yxyc[:,2])**2)
            #ellipse_vs[i,:] = np.hstack((de.max(),de.min(),Yxyc[:,1],Yxyc[:,2],np.nan)) # nan because orientation is xy, but request is some other color space. Change later to actual angle when fitellipse() has been implemented

        # plot ellipses:
        if show == True:
            if (axh is None) & (i == 0):
                fig = plt.figure()
                axh = fig.add_subplot(111)

            if (cspace_in is None):
                xlabel = 'x'
                ylabel = 'y'
            else:
                xlabel = _CSPACE_AXES[cspace_in][1]
                ylabel = _CSPACE_AXES[cspace_in][2]

            if (cspace_out is not None):
                xlabel = _CSPACE_AXES[cspace_out][1]
                ylabel = _CSPACE_AXES[cspace_out][2]

            if plot_center == True:
                axh.plot(Yxyc[:, 1],
                         Yxyc[:, 2],
                         color=center_color,
                         linestyle='none',
                         marker=center_marker,
                         markersize=center_markersize)
            if llabel is None:
                axh.plot(Yxy[:, 1],
                         Yxy[:, 2],
                         color=line_color,
                         linestyle=line_style,
                         linewidth=line_width,
                         marker=line_marker,
                         markersize=line_markersize)
            else:
                axh.plot(Yxy[:, 1],
                         Yxy[:, 2],
                         color=line_color,
                         linestyle=line_style,
                         linewidth=line_width,
                         marker=line_marker,
                         markersize=line_markersize,
                         label=llabel)

            axh.set_xlabel(xlabel,
                           fontname=label_fontname,
                           fontsize=label_fontsize)
            axh.set_ylabel(ylabel,
                           fontname=label_fontname,
                           fontsize=label_fontsize)
            if show_grid == True:
                plt.grid(True)
            #plt.show()
    Yxys = np.transpose(Yxys, axes=(0, 2, 1))
    if out is not None:
        return eval(out)
    else:
        return None
Ejemplo n.º 11
0
def apply_poly_model_at_hue_x(poly_model, pmodel, dCHoverC_res, \
                              hx = None, Cxr = 40, sig = _VF_SIG):
    """
    Applies base color shift model at (hue,chroma) coordinates
    
    Args:
        :poly_model: 
            | function handle to model
        :pmodel:
            | ndarray with model parameters.
        :dCHoverC_res:
            | ndarray with residuals between 'dCoverC,dH' of samples 
            | and 'dCoverC,dH' predicted by the model.
            | Note: dCoverC = (Ct - Cr)/Cr and dH = ht - hr 
            |      (predicted from model, see notes luxpy.cri.get_poly_model())
        :hx:
            | None or ndarray, optional
            | None defaults to np.arange(np.pi/10.0,2*np.pi,2*np.pi/10.0)
        :Cxr:
            | 40, optional
        :sig: 
            | _VF_SIG or float, optional
            | Determines smooth transition between hue-bin-boundaries (no hard 
            | cutoff at hue bin boundary).
        
    Returns:
        :returns: 
            | ndarrays with dCoverC_x, dCoverC_x_sig, dH_x, dH_x_sig
            | Note '_sig' denotes the uncertainty: 
            |     e.g.  dH_x_sig is the uncertainty of dH at input (hue/chroma).
    """

    if hx is None:
        dh = 2 * np.pi / 10.0
        hx = np.arange(
            dh / 2, 2 * np.pi, dh
        )  #hue angles at which to apply model, i.e. calculate 'average' measures

    # A calculate reference coordinates:
    axr = Cxr * np.cos(hx)
    bxr = Cxr * np.sin(hx)

    # B apply model at reference coordinates to obtain test coordinates:
    axt, bxt, Cxt, hxt, axr, bxr, Cxr, hxr = apply_poly_model_at_x(
        poly_model, pmodel, axr, bxr)

    # C Calculate dC/C, dH for test and ref at fixed hues:
    dCoverC_x = (Cxt - Cxr) / (np.hstack((Cxr + Cxt)).max())
    dH_x = (180 / np.pi) * (hxt - hxr)
    #    dCoverC_x = np.round(dCoverC_x,decimals = 2)
    #    dH_x = np.round(dH_x,decimals = 0)

    # D calculate 'average' noise measures using sig-value:
    href = dCHoverC_res[:, 0:1]
    dCoverC_res = dCHoverC_res[:, 1:2]
    dHoverC_res = dCHoverC_res[:, 2:3]
    dHsigi = np.exp((np.dstack(
        (np.abs(hx - href), np.abs((hx - href - 2 * np.pi)),
         np.abs(hx - href - 2 * np.pi))).min(axis=2)**2) / (-2) / sig)
    dH_x_sig = (180 / np.pi) * (np.sqrt(
        (dHsigi * (dHoverC_res**2)).sum(axis=0, keepdims=True) /
        dHsigi.sum(axis=0, keepdims=True)))
    #dH_x_sig_avg = np.sqrt(np.sum(dH_x_sig**2,axis=1)/hx.shape[0])
    dCoverC_x_sig = (np.sqrt(
        (dHsigi * (dCoverC_res**2)).sum(axis=0, keepdims=True) /
        dHsigi.sum(axis=0, keepdims=True)))
    #dCoverC_x_sig_avg = np.sqrt(np.sum(dCoverC_x_sig**2,axis=1)/hx.shape[0])

    return dCoverC_x, dCoverC_x_sig, dH_x, dH_x_sig
Ejemplo n.º 12
0
def get_poly_model(jabt, jabr, modeltype=_VF_MODEL_TYPE):
    """
    Setup base color shift model (delta_a, delta_b), 
    determine model parameters and accuracy.
    
    | Calculates a base color shift (delta) from the ref. chromaticity ar, br.
    
    Args:
        :jabt: 
            | ndarray with jab color coordinates under the test SPD.
        :jabr: 
            | ndarray with jab color coordinates under the reference SPD.
        :modeltype:
            | _VF_MODEL_TYPE or 'M6' or 'M5', optional
            | Specifies degree 5 or degree 6 polynomial model in ab-coordinates.
            | (see notes below)
            
    Returns:
        :returns: 
            | (poly_model, 
            |       pmodel, 
            |       dab_model, 
            |        dab_res, 
            |        dCHoverC_res, 
            |        dab_std, 
            |        dCHoverC_std)
            |
            | :poly_model: function handle to model
            | :pmodel: ndarray with model parameters
            | :dab_model: ndarray with ab model predictions from ar, br.
            | :dab_res: ndarray with residuals between 'da,db' of samples and 
            |            'da,db' predicted by the model.
            | :dCHoverC_res: ndarray with residuals between 'dCoverC,dH' 
            |                 of samples and 'dCoverC,dH' predicted by the model.
            |     Note: dCoverC = (Ct - Cr)/Cr and dH = ht - hr 
            |         (predicted from model, see notes below)
            | :dab_std: ndarray with std of :dab_res:
            | :dCHoverC_std: ndarray with std of :dCHoverC_res: 

    Notes: 
        1. Model types:
            | poly5_model = lambda a,b,p:         p[0]*a + p[1]*b + p[2]*(a**2) + p[3]*a*b + p[4]*(b**2)
            | poly6_model = lambda a,b,p:  p[0] + p[1]*a + p[2]*b + p[3]*(a**2) + p[4]*a*b + p[5]*(b**2)
        
        2. Calculation of dCoverC and dH:
            | dCoverC = (np.cos(hr)*da + np.sin(hr)*db)/Cr
            | dHoverC = (np.cos(hr)*db - np.sin(hr)*da)/Cr    
    """
    at = jabt[..., 1]
    bt = jabt[..., 2]
    ar = jabr[..., 1]
    br = jabr[..., 2]

    # A. Calculate da, db:
    da = at - ar
    db = bt - br

    # B.1 Calculate model matrix:
    # 5-parameter model:
    M5 = np.array([[
        np.sum(ar * ar),
        np.sum(ar * br),
        np.sum(ar * ar**2),
        np.sum(ar * ar * br),
        np.sum(ar * br**2)
    ],
                   [
                       np.sum(br * ar),
                       np.sum(br * br),
                       np.sum(br * ar**2),
                       np.sum(br * ar * br),
                       np.sum(br * br**2)
                   ],
                   [
                       np.sum((ar**2) * ar),
                       np.sum((ar**2) * br),
                       np.sum((ar**2) * ar**2),
                       np.sum((ar**2) * ar * br),
                       np.sum((ar**2) * br**2)
                   ],
                   [
                       np.sum(ar * br * ar),
                       np.sum(ar * br * br),
                       np.sum(ar * br * ar**2),
                       np.sum(ar * br * ar * br),
                       np.sum(ar * br * br**2)
                   ],
                   [
                       np.sum((br**2) * ar),
                       np.sum((br**2) * br),
                       np.sum((br**2) * ar**2),
                       np.sum((br**2) * ar * br),
                       np.sum((br**2) * br**2)
                   ]])
    #6-parameters model
    M6 = np.array([[
        ar.size,
        np.sum(1.0 * ar),
        np.sum(1.0 * br),
        np.sum(1.0 * ar**2),
        np.sum(1.0 * ar * br),
        np.sum(1.0 * br**2)
    ],
                   [
                       np.sum(ar * 1.0),
                       np.sum(ar * ar),
                       np.sum(ar * br),
                       np.sum(ar * ar**2),
                       np.sum(ar * ar * br),
                       np.sum(ar * br**2)
                   ],
                   [
                       np.sum(br * 1.0),
                       np.sum(br * ar),
                       np.sum(br * br),
                       np.sum(br * ar**2),
                       np.sum(br * ar * br),
                       np.sum(br * br**2)
                   ],
                   [
                       np.sum((ar**2) * 1.0),
                       np.sum((ar**2) * ar),
                       np.sum((ar**2) * br),
                       np.sum((ar**2) * ar**2),
                       np.sum((ar**2) * ar * br),
                       np.sum((ar**2) * br**2)
                   ],
                   [
                       np.sum(ar * br * 1.0),
                       np.sum(ar * br * ar),
                       np.sum(ar * br * br),
                       np.sum(ar * br * ar**2),
                       np.sum(ar * br * ar * br),
                       np.sum(ar * br * br**2)
                   ],
                   [
                       np.sum((br**2) * 1.0),
                       np.sum((br**2) * ar),
                       np.sum((br**2) * br),
                       np.sum((br**2) * ar**2),
                       np.sum((br**2) * ar * br),
                       np.sum((br**2) * br**2)
                   ]])

    # B.2 Define model function:
    poly5_model = lambda a, b, p: p[0] * a + p[1] * b + p[2] * (a**2) + p[
        3] * a * b + p[4] * (b**2)
    poly6_model = lambda a, b, p: p[0] + p[1] * a + p[2] * b + p[3] * (
        a**2) + p[4] * a * b + p[5] * (b**2)

    if modeltype == 'M5':
        M = M5
        poly_model = poly5_model
    else:
        M = M6
        poly_model = poly6_model

    M = np.linalg.inv(M)

    # C.1 Data a,b analysis output:
    if modeltype == 'M5':
        da_model_parameters = np.dot(
            M,
            np.array([
                np.sum(da * ar),
                np.sum(da * br),
                np.sum(da * ar**2),
                np.sum(da * ar * br),
                np.sum(da * br**2)
            ]))
        db_model_parameters = np.dot(
            M,
            np.array([
                np.sum(db * ar),
                np.sum(db * br),
                np.sum(db * ar**2),
                np.sum(db * ar * br),
                np.sum(db * br**2)
            ]))
    else:
        da_model_parameters = np.dot(
            M,
            np.array([
                np.sum(da * 1.0),
                np.sum(da * ar),
                np.sum(da * br),
                np.sum(da * ar**2),
                np.sum(da * ar * br),
                np.sum(da * br**2)
            ]))
        db_model_parameters = np.dot(
            M,
            np.array([
                np.sum(db * 1.0),
                np.sum(db * ar),
                np.sum(db * br),
                np.sum(db * ar**2),
                np.sum(db * ar * br),
                np.sum(db * br**2)
            ]))
    pmodel = np.vstack((da_model_parameters, db_model_parameters))

    # D.1 Calculate model da, db:
    da_model = poly_model(ar, br, pmodel[0])
    db_model = poly_model(ar, br, pmodel[1])
    dab_model = np.hstack((da_model, db_model))

    # D.2 Calculate residuals for da & db:
    da_res = da - da_model
    db_res = db - db_model
    dab_res = np.hstack((da_res, db_res))
    dab_std = np.vstack((np.std(da_res, axis=0), np.std(db_res, axis=0)))

    # E Calculate href, Cref:
    href = np.arctan2(br, ar)
    Cref = (ar**2 + br**2)**0.5

    # F Calculate dC/C, dH/C for data and model and calculate residuals:
    dCoverC = (np.cos(href) * da + np.sin(href) * db) / Cref
    dHoverC = (np.cos(href) * db - np.sin(href) * da) / Cref
    dCoverC_model = (np.cos(href) * da_model + np.sin(href) * db_model) / Cref
    dHoverC_model = (np.cos(href) * db_model - np.sin(href) * da_model) / Cref
    dCoverC_res = dCoverC - dCoverC_model
    dHoverC_res = dHoverC - dHoverC_model
    dCHoverC_std = np.vstack((np.std(dCoverC_res,
                                     axis=0), np.std(dHoverC_res, axis=0)))

    dCHoverC_res = np.hstack((href, dCoverC_res, dHoverC_res))

    return poly_model, pmodel, dab_model, dab_res, dCHoverC_res, dab_std, dCHoverC_std
Ejemplo n.º 13
0
def cam_sww16(data,
              dataw=None,
              Yb=20.0,
              Lw=400.0,
              Ccwb=None,
              relative=True,
              inputtype='xyz',
              direction='forward',
              parameters=None,
              cieobs='2006_10',
              match_to_conversionmatrix_to_cieobs=True):
    """
    A simple principled color appearance model based on a mapping of 
    the Munsell color system.
    
    | This function implements the JOSA A (parameters = 'JOSA') published model. 
    
    Args:
        :data: 
            | ndarray with input tristimulus values 
            | or spectral data 
            | or input color appearance correlates
            | Can be of shape: (N [, xM], x 3), whereby: 
            | N refers to samples and M refers to light sources.
            | Note that for spectral input shape is (N x (M+1) x wl) 
        :dataw: 
            | None or ndarray, optional
            | Input tristimulus values or spectral data of white point.
            | None defaults to the use of CIE illuminant C.
        :Yb: 
            | 20.0, optional
            | Luminance factor of background (perfect white diffuser, Yw = 100)
        :Lw:
            | 400.0, optional
            | Luminance (cd/m²) of white point.
        :Ccwb:
            | None,  optional
            | Degree of cognitive adaptation (white point balancing)
            | If None: use [..,..] from parameters dict.
        :relative:
            | True or False, optional
            | True: xyz tristimulus values are relative (Yw = 100)
        :parameters:
            | None or str or dict, optional
            | Dict with model parameters.
            |    - None: defaults to luxpy.cam._CAM_SWW_2016_PARAMETERS['JOSA']
            |    - str: 'best-fit-JOSA' or 'best-fit-all-Munsell'
            |    - dict: user defined model parameters 
            |            (dict should have same structure)
        :inputtype:
            | 'xyz' or 'spd', optional
            | Specifies the type of input: 
            |     tristimulus values or spectral data for the forward mode.
        :direction:
            | 'forward' or 'inverse', optional
            |   -'forward': xyz -> cam_sww_2016
            |   -'inverse': cam_sww_2016 -> xyz 
        :cieobs:
            | '2006_10', optional
            | CMF set to use to perform calculations where spectral data 
            | is involved (inputtype == 'spd'; dataw = None)
            | Other options: see luxpy._CMF['types']
        :match_to_conversionmatrix_to_cieobs:
            | When channging to a different CIE observer, change the xyz-to_lms
            | matrix to the one corresponding to that observer. If False: use 
            | the one set in parameters or _CAM_SWW16_PARAMETERS
    
    Returns:
        :returns: 
            | ndarray with color appearance correlates (:direction: == 'forward')
            |  or 
            | XYZ tristimulus values (:direction: == 'inverse')
    
    Notes:
        | This function implements the JOSA A (parameters = 'JOSA') 
        | published model. 
        | With:
        |    1. A correction for the parameter 
        |         in Eq.4 of Fig. 11: 0.952 --> -0.952 
        |         
        |     2. The delta_ac and delta_bc white-balance shifts in Eq. 5e & 5f 
        |         should be: -0.028 & 0.821 
        |  
        |     (cfr. Ccwb = 0.66 in: 
        |         ab_test_out = ab_test_int - Ccwb*ab_gray_adaptation_field_int))
             
    References:
        1. `Smet, K. A. G., Webster, M. A., & Whitehead, L. A. (2016). 
        A simple principled approach for modeling and understanding uniform color metrics. 
        Journal of the Optical Society of America A, 33(3), A319–A331. 
        <https://doi.org/10.1364/JOSAA.33.00A319>`_

    """
    #--------------------------------------------------------------------------
    # Get model parameters:
    #--------------------------------------------------------------------------
    args = locals().copy()
    parameters = _update_parameter_dict(
        args,
        parameters=parameters,
        match_to_conversionmatrix_to_cieobs=match_to_conversionmatrix_to_cieobs
    )

    #unpack model parameters:
    Cc, Ccwb, Cf, Mxyz2lms, cLMS, cab_int, cab_out, calpha, cbeta, cga1, cga2, cgb1, cgb2, cl_int, clambda, lms0 = [
        parameters[x] for x in sorted(parameters.keys())
    ]

    #--------------------------------------------------------------------------
    # Setup default adaptation field:
    #--------------------------------------------------------------------------
    dataw = _setup_default_adaptation_field(dataw=dataw,
                                            Lw=Lw,
                                            inputtype=inputtype,
                                            relative=relative,
                                            cieobs=cieobs)

    #--------------------------------------------------------------------------
    # Redimension input data to ensure most appropriate sizes
    # for easy and efficient looping and initialize output array:
    #--------------------------------------------------------------------------
    data, dataw, camout, originalshape = _massage_input_and_init_output(
        data, dataw, inputtype=inputtype, direction=direction)

    #--------------------------------------------------------------------------
    # Do precomputations needed for both the forward and inverse model,
    # and which do not depend on sample or light source data:
    #--------------------------------------------------------------------------
    Mxyz2lms = np.dot(
        np.diag(cLMS), Mxyz2lms
    )  # weight the xyz-to-lms conversion matrix with cLMS (cfr. stage 1 calculations)
    invMxyz2lms = np.linalg.inv(
        Mxyz2lms)  # Calculate the inverse lms-to-xyz conversion matrix
    MAab = np.array(
        [clambda, calpha, cbeta]
    )  # Create matrix with scale factors for L, M, S for quick matrix multiplications
    invMAab = np.linalg.inv(
        MAab)  # Pre-calculate its inverse to avoid repeat in loop.

    #--------------------------------------------------------------------------
    # Apply forward/inverse model by looping over each row (=light source dim.)
    # in data:
    #--------------------------------------------------------------------------
    N = data.shape[0]
    for i in range(N):
        #++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
        #  START FORWARD MODE and common part of inverse mode
        #++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

        #-----------------------------------------------------------------------------
        # Get absolute tristimulus values for stimulus field and white point for row i:
        #-----------------------------------------------------------------------------
        xyzt, xyzw, xyzw_abs = _get_absolute_xyz_xyzw(data,
                                                      dataw,
                                                      i=i,
                                                      Lw=Lw,
                                                      direction=direction,
                                                      cieobs=cieobs,
                                                      inputtype=inputtype,
                                                      relative=relative)

        #-----------------------------------------------------------------------------
        # stage 1: calculate photon rates of stimulus and white white, and
        # adapting field: i.e. lmst, lmsw and lmsf
        #-----------------------------------------------------------------------------
        # Convert to white point l,m,s:
        lmsw = 683.0 * np.dot(Mxyz2lms, xyzw.T).T / _CMF[cieobs]['K']

        # Calculate adaptation field and convert to l,m,s:
        lmsf = (Yb / 100.0) * lmsw

        # Calculate lms of stimulus
        # or put adaptation lmsf in test field lmst for later use in inverse-mode (no xyz in 'inverse' mode!!!):
        lmst = (683.0 * np.dot(Mxyz2lms, xyzt.T).T /
                _CMF[cieobs]['K']) if (direction == 'forward') else lmsf

        #-----------------------------------------------------------------------------
        # stage 2: calculate cone outputs of stimulus lmstp
        #-----------------------------------------------------------------------------
        lmstp = math.erf(Cc *
                         (np.log(lmst / lms0) +
                          Cf * np.log(lmsf / lms0)))  # stimulus test field
        lmsfp = math.erf(Cc * (np.log(lmsf / lms0) +
                               Cf * np.log(lmsf / lms0)))  # adaptation field

        # add adaptation field lms temporarily to lmstp for quick calculation
        lmstp = np.vstack((lmsfp, lmstp))

        #-----------------------------------------------------------------------------
        # stage 3: calculate optic nerve signals, lam*, alphp, betp:
        #-----------------------------------------------------------------------------
        lstar, alph, bet = asplit(np.dot(MAab, lmstp.T).T)

        alphp = cga1[0] * alph
        alphp[alph < 0] = cga1[1] * alph[alph < 0]
        betp = cgb1[0] * bet
        betp[bet < 0] = cgb1[1] * bet[bet < 0]

        #-----------------------------------------------------------------------------
        #  stage 4: calculate recoded nerve signals, alphapp, betapp:
        #-----------------------------------------------------------------------------
        alphpp = cga2[0] * (alphp + betp)
        betpp = cgb2[0] * (alphp - betp)

        #-----------------------------------------------------------------------------
        #  stage 5: calculate conscious color perception:
        #-----------------------------------------------------------------------------
        lstar_int = cl_int[0] * (lstar + cl_int[1])
        alph_int = cab_int[0] * (np.cos(cab_int[1] * np.pi / 180.0) * alphpp -
                                 np.sin(cab_int[1] * np.pi / 180.0) * betpp)
        bet_int = cab_int[0] * (np.sin(cab_int[1] * np.pi / 180.0) * alphpp +
                                np.cos(cab_int[1] * np.pi / 180.0) * betpp)
        lstar_out = lstar_int

        #++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
        #  stage 5 continued but SPLIT IN FORWARD AND INVERSE MODES:
        #++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

        #--------------------------------------
        # FORWARD MODE TO PERCEPTUAL SIGNALS:
        #--------------------------------------
        if direction == 'forward':
            if Ccwb is None:
                alph_out = alph_int - cab_out[0]
                bet_out = bet_int - cab_out[1]

            else:
                Ccwb = Ccwb * np.ones((2))
                Ccwb[Ccwb < 0.0] = 0.0
                Ccwb[Ccwb > 1.0] = 1.0

                # white balance shift using adaptation gray background (Yb=20%), with Ccw: degree of adaptation:
                alph_out = alph_int - Ccwb[0] * alph_int[0]
                bet_out = bet_int - Ccwb[1] * bet_int[0]

            # stack together and remove adaptation field from vertical stack
            # camout is an ndarray with perceptual signals:
            camout[i] = np.vstack((lstar_out[1:], alph_out[1:], bet_out[1:])).T

        #--------------------------------------
        # INVERSE MODE FROM PERCEPTUAL SIGNALS:
        #--------------------------------------
        elif direction == 'inverse':

            # stack cognitive pre-adapted adaptation field signals (first on stack) together:
            #labf_int = np.hstack((lstar_int[0],alph_int[0],bet_int[0]))

            # get lstar_out, alph_out & bet_out for data
            #(contains model perceptual signals in inverse mode!!!):
            lstar_out, alph_out, bet_out = asplit(data[i])

            #------------------------------------------------------------------------
            #  Inverse stage 5: undo cortical white-balance:
            #------------------------------------------------------------------------
            if Ccwb is None:
                alph_int = alph_out + cab_out[0]
                bet_int = bet_out + cab_out[1]
            else:
                Ccwb = Ccwb * np.ones((2))
                Ccwb[Ccwb < 0.0] = 0.0
                Ccwb[Ccwb > 1.0] = 1.0

                #  inverse white balance shift using adaptation gray background (Yb=20%), with Ccw: degree of adaptation
                alph_int = alph_out + Ccwb[0] * alph_int[0]
                bet_int = bet_out + Ccwb[1] * bet_int[0]

            alphpp = (1.0 / cab_int[0]) * (
                np.cos(-cab_int[1] * np.pi / 180.0) * alph_int -
                np.sin(-cab_int[1] * np.pi / 180.0) * bet_int)
            betpp = (1.0 / cab_int[0]) * (
                np.sin(-cab_int[1] * np.pi / 180.0) * alph_int +
                np.cos(-cab_int[1] * np.pi / 180.0) * bet_int)
            lstar_int = lstar_out
            lstar = (lstar_int / cl_int[0]) - cl_int[1]

            #---------------------------------------------------------------------------
            #  Inverse stage 4: pre-adapted perceptual signals to recoded nerve signals:
            #---------------------------------------------------------------------------
            alphp = 0.5 * (alphpp / cga2[0] + betpp / cgb2[0]
                           )  # <-- alphpp = (Cga2.*(alphp+betp));
            betp = 0.5 * (alphpp / cga2[0] - betpp / cgb2[0]
                          )  # <-- betpp = (Cgb2.*(alphp-betp));

            #---------------------------------------------------------------------------
            #  Inverse stage 3: recoded nerve signals to optic nerve signals:
            #---------------------------------------------------------------------------
            alph = alphp / cga1[0]
            bet = betp / cgb1[0]
            sa = np.sign(cga1[1])
            sb = np.sign(cgb1[1])
            alph[(sa * alphp) < 0.0] = alphp[(sa * alphp) < 0] / cga1[1]
            bet[(sb * betp) < 0.0] = betp[(sb * betp) < 0] / cgb1[1]
            lab = ajoin((lstar, alph, bet))

            #---------------------------------------------------------------------------
            #  Inverse stage 2: optic nerve signals to cone outputs:
            #---------------------------------------------------------------------------
            lmstp = np.dot(invMAab, lab.T).T
            lmstp[lmstp < -1.0] = -1.0
            lmstp[lmstp > 1.0] = 1.0

            #---------------------------------------------------------------------------
            #  Inverse stage 1: cone outputs to photon rates:
            #---------------------------------------------------------------------------
            lmstp = math.erfinv(lmstp) / Cc - Cf * np.log(lmsf / lms0)
            lmst = np.exp(lmstp) * lms0

            #---------------------------------------------------------------------------
            #  Photon rates to absolute or relative tristimulus values:
            #---------------------------------------------------------------------------
            xyzt = np.dot(invMxyz2lms, lmst.T).T * (_CMF[cieobs]['K'] / 683.0)
            if relative == True:
                xyzt = (100 / Lw) * xyzt

            # store in same named variable as forward mode:
            camout[i] = xyzt

            #++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
            #  END inverse mode
            #++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

    return _massage_output_data_to_original_shape(camout, originalshape)
Ejemplo n.º 14
0
def cam18sl(data,
            datab=None,
            Lb=[100],
            fov=10.0,
            inputtype='xyz',
            direction='forward',
            outin='Q,aS,bS',
            parameters=None):
    """
    Convert between CIE 2006 10°  XYZ tristimulus values (or spectral data) 
    and CAM18sl color appearance correlates.
    
    Args:
        :data: 
            | ndarray of CIE 2006 10°  absolute XYZ tristimulus values or spectral data
            |  or color appearance attributes of stimulus
        :datab: 
            | ndarray of CIE 2006 10°  absolute XYZ tristimulus values or spectral data
            |  of stimulus background
        :Lb: 
            | [100], optional
            | Luminance (cd/m²) value(s) of background(s) calculated using the CIE 2006 10° CMFs 
            | (only used in case datab == None and the background is assumed to be an Equal-Energy-White)
        :fov: 
            | 10.0, optional
            | Field-of-view of stimulus (for size effect on brightness)
        :inputtpe:
            | 'xyz' or 'spd', optional
            | Specifies the type of input: 
            |     tristimulus values or spectral data for the forward mode.
        :direction:
            | 'forward' or 'inverse', optional
            |   -'forward': xyz -> cam18sl
            |   -'inverse': cam18sl -> xyz 
        :outin:
            | 'Q,aS,bS' or str, optional
            | 'Q,aS,bS' (brightness and opponent signals for saturation)
            |  other options: 'Q,aM,bM' (colorfulness) 
            |                 (Note that 'Q,aW,bW' would lead to a Cartesian 
            |                  a,b-coordinate system centered at (1,0))
            | Str specifying the type of 
            |     input (:direction: == 'inverse') and 
            |     output (:direction: == 'forward')
        :parameters:
            | None or dict, optional
            | Set of model parameters.
            |   - None: defaults to luxpy.cam._CAM18SL_PARAMETERS 
            |    (see references below)
    
    Returns:
        :returns: 
            | ndarray with color appearance correlates (:direction: == 'forward')
            |  or 
            | XYZ tristimulus values (:direction: == 'inverse')
            
    Notes:
        | * Instead of using the CIE 1964 10° CMFs in some places of the model,
        |   the CIE 2006 10° CMFs are used througout, making it more self_consistent.
        |   This has an effect on the k scaling factors (now different those in CAM15u) 
        |   and the illuminant E normalization for use in the chromatic adaptation transform.
        |   (see future erratum to Hermans et al., 2018)
        | * The paper also used an equation for the amount of white W, which is
        |   based on a Q value not expressed in 'bright' ('cA' = 0.937 instead of 123). 
        |   This has been corrected for in the luxpy version of the model, i.e.
        |   _CAM18SL_PARAMETERS['cW'][0] has been changed from 2.29 to 1/11672.
        |   (see future erratum to Hermans et al., 2018)
        | * Default output was 'Q,aW,bW' prior to March 2020, but since this
        |   is an a,b Cartesian system centered on (1,0), the default output
        |   has been changed to 'Q,aS,bS'.

    References: 
        1. `Hermans, S., Smet, K. A. G., & Hanselaer, P. (2018). 
        "Color appearance model for self-luminous stimuli."
        Journal of the Optical Society of America A, 35(12), 2000–2009. 
        <https://doi.org/10.1364/JOSAA.35.002000>`_ 
     """

    if parameters is None:
        parameters = _CAM18SL_PARAMETERS

    outin = outin.split(',')

    #unpack model parameters:
    cA, cAlms, cHK, cM, cW, ca, calms, cb, cblms, cfov, cieobs, k, naka, unique_hue_data = [
        parameters[x] for x in sorted(parameters.keys())
    ]

    # precomputations:
    Mlms2xyz = np.linalg.inv(_CMF[cieobs]['M'])
    MAab = np.array([cAlms, calms, cblms])
    invMAab = np.linalg.inv(MAab)

    #-------------------------------------------------
    # setup EEW reference field and default background field (Lr should be equal to Lb):
    # Get Lb values:
    if datab is not None:
        if inputtype != 'xyz':
            Lb = spd_to_xyz(datab, cieobs=cieobs, relative=False)[..., 1:2]
        else:
            Lb = datab[..., 1:2]
    else:
        if isinstance(Lb, list):
            Lb = np2dT(Lb)

    # Setup EEW ref of same luminance as datab:
    if inputtype == 'xyz':
        wlr = getwlr(_CAM18SL_WL3)
    else:
        if datab is None:
            wlr = data[0]  # use wlr of stimulus data
        else:
            wlr = datab[0]  # use wlr of background data
    datar = np.vstack((wlr, np.ones(
        (Lb.shape[0], wlr.shape[0]))))  # create eew
    xyzr = spd_to_xyz(datar, cieobs=cieobs,
                      relative=False)  # get abs. tristimulus values
    datar[1:] = datar[1:] / xyzr[..., 1:2] * Lb

    # Create datab if None:
    if (datab is None):
        if inputtype != 'xyz':
            datab = datar.copy()
        else:
            datab = spd_to_xyz(datar, cieobs=cieobs, relative=False)

    # prepare data and datab for loop over backgrounds:
    # make axis 1 of datab have 'same' dimensions as data:
    if (data.ndim == 2):
        data = np.expand_dims(data, axis=1)  # add light source axis 1

    if inputtype == 'xyz':
        datar = spd_to_xyz(datar, cieobs=cieobs,
                           relative=False)  # convert to xyz!!
        if datab.shape[
                0] == 1:  #make datab and datar have same lights source dimension (used to store different backgrounds) size as data
            datab = np.repeat(datab, data.shape[1], axis=0)
            datar = np.repeat(datar, data.shape[1], axis=0)
    else:
        if datab.shape[0] == 2:
            datab = np.vstack(
                (datab[0], np.repeat(datab[1:], data.shape[1], axis=0)))
        if datar.shape[0] == 2:
            datar = np.vstack(
                (datar[0], np.repeat(datar[1:], data.shape[1], axis=0)))

    # Flip light source/ background dim to axis 0:
    data = np.transpose(data, axes=(1, 0, 2))

    #-------------------------------------------------

    #initialize camout:
    dshape = list(data.shape)
    dshape[-1] = len(outin)  # requested number of correlates
    if (inputtype != 'xyz') & (direction == 'forward'):
        dshape[-2] = dshape[
            -2] - 1  # wavelength row doesn't count & only with forward can the input data be spectral
    camout = np.zeros(dshape)
    camout.fill(np.nan)

    for i in range(data.shape[0]):

        # get rho, gamma, beta of background and reference white:
        if (inputtype != 'xyz'):
            xyzb = spd_to_xyz(np.vstack((datab[0], datab[i + 1:i + 2, :])),
                              cieobs=cieobs,
                              relative=False)
            xyzr = spd_to_xyz(np.vstack((datar[0], datar[i + 1:i + 2, :])),
                              cieobs=cieobs,
                              relative=False)
        else:
            xyzb = datab[i:i + 1, :]
            xyzr = datar[i:i + 1, :]

        lmsb = np.dot(_CMF[cieobs]['M'], xyzb.T).T  # convert to l,m,s
        rgbb = (lmsb / _CMF[cieobs]['K']) * k  # convert to rho, gamma, beta
        #lmsr = np.dot(_CMF[cieobs]['M'],xyzr.T).T # convert to l,m,s
        #rgbr = (lmsr / _CMF[cieobs]['K']) * k # convert to rho, gamma, beta
        #rgbr = rgbr/rgbr[...,1:2]*Lb[i] # calculated EEW cone excitations at same luminance values as background
        rgbr = np.ones(xyzr.shape) * Lb[
            i]  # explicitely equal EEW cone excitations at same luminance values as background

        if direction == 'forward':
            # get rho, gamma, beta of stimulus:
            if (inputtype != 'xyz'):
                xyz = spd_to_xyz(data[i], cieobs=cieobs, relative=False)
            elif (inputtype == 'xyz'):
                xyz = data[i]
            lms = np.dot(_CMF[cieobs]['M'], xyz.T).T  # convert to l,m,s
            rgb = (lms / _CMF[cieobs]['K']) * k  # convert to rho, gamma, beta

            # apply von-kries cat with D = 1:
            if (rgbb == 0).any():
                Mcat = np.eye(3)
            else:
                Mcat = np.diag((rgbr / rgbb)[0])
            rgba = np.dot(Mcat, rgb.T).T

            # apply naka-rushton compression:
            rgbc = naka_rushton(rgba,
                                n=naka['n'],
                                sig=naka['sig'](rgbr.mean()),
                                noise=naka['noise'],
                                scaling=naka['scaling'])

            #rgbc = np.ones(rgbc.shape)*rgbc.mean() # test if eew ends up at origin

            # calculate achromatic and color difference signals, A, a, b:
            Aab = np.dot(MAab, rgbc.T).T
            A, a, b = asplit(Aab)
            a = ca * a
            b = cb * b

            # calculate colorfullness like signal M:
            M = cM * ((a**2.0 + b**2.0)**0.5)

            # calculate brightness Q:
            Q = cA * (
                A + cHK[0] * M**cHK[1]
            )  # last term is contribution of Helmholtz-Kohlrausch effect on brightness

            # calculate saturation, s:
            s = M / Q
            S = s  # make extra variable, jsut in case 'S' is called

            # calculate amount of white, W:
            W = 1 / (1.0 + cW[0] * (s**cW[1]))

            #  adjust Q for size (fov) of stimulus (matter of debate whether to do this before or after calculation of s or W, there was no data on s, M or W for different sized stimuli: after)
            Q = Q * (fov / 10.0)**cfov

            # calculate hue, h and Hue quadrature, H:
            h = hue_angle(a, b, htype='deg')
            if 'H' in outin:
                H = hue_quadrature(h, unique_hue_data=unique_hue_data)
            else:
                H = None

            # calculate cart. co.:
            if 'aM' in outin:
                aM = M * np.cos(h * np.pi / 180.0)
                bM = M * np.sin(h * np.pi / 180.0)

            if 'aS' in outin:
                aS = s * np.cos(h * np.pi / 180.0)
                bS = s * np.sin(h * np.pi / 180.0)

            if 'aW' in outin:
                aW = W * np.cos(h * np.pi / 180.0)
                bW = W * np.sin(h * np.pi / 180.0)

            if (outin != ['Q', 'as', 'bs']):
                camout[i] = eval('ajoin((' + ','.join(outin) + '))')
            else:
                camout[i] = ajoin((Q, aS, bS))

        elif direction == 'inverse':

            # get Q, M and a, b depending on input type:
            if 'aW' in outin:
                Q, a, b = asplit(data[i])
                Q = Q / (
                    (fov / 10.0)**cfov
                )  #adjust Q for size (fov) of stimulus back to that 10° ref
                W = (a**2.0 + b**2.0)**0.5
                s = (((1.0 / W) - 1.0) / cW[0])**(1.0 / cW[1])
                M = s * Q

            if 'aM' in outin:
                Q, a, b = asplit(data[i])
                Q = Q / (
                    (fov / 10.0)**cfov
                )  #adjust Q for size (fov) of stimulus back to that 10° ref
                M = (a**2.0 + b**2.0)**0.5

            if 'aS' in outin:
                Q, a, b = asplit(data[i])
                Q = Q / (
                    (fov / 10.0)**cfov
                )  #adjust Q for size (fov) of stimulus back to that 10° ref
                s = (a**2.0 + b**2.0)**0.5
                M = s * Q

            if 'h' in outin:
                Q, WsM, h = asplit(data[i])
                Q = Q / (
                    (fov / 10.0)**cfov
                )  #adjust Q for size (fov) of stimulus back to that 10° ref
                if 'W' in outin:
                    s = (((1.0 / WsM) - 1.0) / cW[0])**(1.0 / cW[1])
                    M = s * Q
                elif 's' in outin:
                    M = WsM * Q
                elif 'M' in outin:
                    M = WsM

            # calculate achromatic signal, A from Q and M:
            A = Q / cA - cHK[0] * M**cHK[1]

            # calculate hue angle:
            h = hue_angle(a, b, htype='rad')

            # calculate a,b from M and h:
            a = (M / cM) * np.cos(h)
            b = (M / cM) * np.sin(h)

            a = a / ca
            b = b / cb

            # create Aab:
            Aab = ajoin((A, a, b))

            # calculate rgbc:
            rgbc = np.dot(invMAab, Aab.T).T

            # decompress rgbc to (adapted) rgba :
            rgba = naka_rushton(rgbc,
                                n=naka['n'],
                                sig=naka['sig'](rgbr.mean()),
                                noise=naka['noise'],
                                scaling=naka['scaling'],
                                direction='inverse')

            # apply inverse von-kries cat with D = 1:
            rgb = np.dot(np.diag((rgbb / rgbr)[0]), rgba.T).T

            # convert rgb to lms to xyz:
            lms = rgb / k * _CMF[cieobs]['K']
            xyz = np.dot(Mlms2xyz, lms.T).T

            camout[i] = xyz

    camout = np.transpose(camout, axes=(1, 0, 2))

    if camout.shape[1] == 1:
        camout = np.squeeze(camout, axis=1)

    return camout
Ejemplo n.º 15
0
def run(data,
        xyzw=None,
        outin='J,aM,bM',
        cieobs=_CIEOBS,
        conditions=None,
        forward=True,
        mcat='cat16',
        **kwargs):
    """ 
    Run the Jz,az,bz based color appearance model in forward or backward modes.
    
    Args:
        :data:
            | ndarray with relative sample xyz values (forward mode) or J'a'b' coordinates (inverse mode)
        :xyzw:
            | ndarray with relative white point tristimulus values
            | None defaults to D65
        :cieobs:
            | _CIEOBS, optional
            | CMF set to use when calculating :xyzw: if this is None.
        :conditions:
            | None, optional
            | Dictionary with viewing condition parameters for:
            |       La, Yb, D and surround.
            |  surround can contain:
            |      - str (options: 'avg','dim','dark') or 
            |      - dict with keys c, Nc, F.
            | None results in:
            |   {'La':100, 'Yb':20, 'D':1, 'surround':'avg'}
        :forward:
            | True, optional
            | If True: run in CAM in forward mode, else: inverse mode.
        :outin:
            | 'J,aM,bM', optional
            | String with requested output (e.g. "J,aM,bM,M,h") [Forward mode]
            | String with inputs in data. 
            | Input must have data.shape[-1]==3 and last dim of data must have 
            | the following structure: 
            |  * data[...,0] = J or Q,
            |  * data[...,1:] = (aM,bM) or (aC,bC) or (aS,bS)
        :mcat:
            | 'cat16', optional
            | Specifies CAT sensor space.
            | - options:
            |    - None defaults to 'cat16'
            |    - str: see see luxpy.cat._MCATS.keys() for options 
            |         (details on type, ?luxpy.cat)
            |    - ndarray: matrix with sensor primaries
    Returns:
        :camout: 
            | ndarray with color appearance correlates (forward mode) 
            |  or 
            | XYZ tristimulus values (inverse mode)
     
    References:
        1. `Safdar, M., Cui, G., Kim,Y. J., and  Luo, M. R.(2017).
        Perceptually uniform color space for image signals including high dynamic range and wide gamut.
        Opt. Express, vol. 25, no. 13, pp. 15131–15151, Jun. 2017. 
        <https://www.opticsexpress.org/abstract.cfm?URI=oe-25-13-15131>`_
    
        2. `Safdar, M., Hardeberg, J., Cui, G., Kim, Y. J., and Luo, M. R.(2018).
        A Colour Appearance Model based on Jzazbz Colour Space, 
        26th Color and Imaging Conference (2018), Vancouver, Canada, November 12-16, 2018, pp96-101.
        <https://doi.org/10.2352/ISSN.2169-2629.2018.26.96>`_
    """
    outin = outin.split(',') if isinstance(outin, str) else outin

    #--------------------------------------------
    # Get condition parameters:
    if conditions is None:
        conditions = _DEFAULT_CONDITIONS

    D, Dtype, La, Yb, surround = (conditions[x]
                                  for x in sorted(conditions.keys()))

    surround_parameters = _SURROUND_PARAMETERS
    if isinstance(surround, str):
        surround = surround_parameters[conditions['surround']]
    F, FLL, Nc, c = [surround[x] for x in sorted(surround.keys())]

    # Define cone/chromatic adaptation sensor space:
    if (mcat is None) | (mcat == 'cat16'):
        mcat = cat._MCATS['cat16']
    elif isinstance(mcat, str):
        mcat = cat._MCATS[mcat]
    invmcat = np.linalg.inv(mcat)

    #--------------------------------------------
    # Get white point of D65 fro chromatic adaptation transform (CAT)
    xyzw_d65 = np.array([[
        9.5047e+01, 1.0000e+02, 1.0888e+02
    ]]) if cieobs == '1931_2' else spd_to_xyz(_CIE_D65, cieobs=cieobs)

    #--------------------------------------------
    # Get default white point:
    if xyzw is None:
        xyzw = xyzw_d65.copy()

    #--------------------------------------------
    # calculate condition dependent parameters:
    Yw = xyzw[..., 1].T
    k = 1.0 / (5.0 * La + 1.0)
    FL = 0.2 * (k**4.0) * (5.0 * La) + 0.1 * ((1.0 - k**4.0)**2.0) * (
        (5.0 * La)**(1.0 / 3.0))  # luminance adaptation factor
    n = Yb / Yw
    Nbb = 0.725 * (1 / n)**0.2
    z = 1.48 + FLL * n**0.5

    #--------------------------------------------
    # Calculate degree of chromatic adaptation:
    if D is None:
        D = F * (1.0 - (1.0 / 3.6) * np.exp((-La - 42.0) / 92.0))

    #===================================================================
    # WHITE POINT transformations (common to forward and inverse modes):

    #--------------------------------------------
    # Apply CAT to white point:
    xyzwc = cat.apply_vonkries1(xyzw,
                                xyzw,
                                xyzw_d65,
                                D=D,
                                mcat=mcat,
                                invmcat=invmcat)

    #--------------------------------------------
    # Get Iz,az,bz coordinates:
    iabzw = xyz_to_jabz(xyzwc, ztype='iabz')

    #===================================================================
    # STIMULUS transformations:

    #--------------------------------------------
    # massage shape of data for broadcasting:
    original_ndim = data.ndim
    if data.ndim == 2: data = data[:, None]

    if forward:
        # Apply CAT to D65:
        xyzc = cat.apply_vonkries1(data,
                                   xyzw,
                                   xyzw_d65,
                                   D=D,
                                   mcat=mcat,
                                   invmcat=invmcat)

        # Get Iz,az,bz coordinates:
        iabz = xyz_to_jabz(xyzc, ztype='iabz')

        #--------------------------------------------
        # calculate hue h and eccentricity factor, et:
        h = hue_angle(iabz[..., 1], iabz[..., 2], htype='deg')
        et = 1.01 + np.cos(h * np.pi / 180 + 1.55)

        #--------------------------------------------
        # calculate Hue quadrature (if requested in 'out'):
        if 'H' in outin:
            H = hue_quadrature(h, unique_hue_data=_UNIQUE_HUE_DATA)
        else:
            H = None

        #--------------------------------------------
        # calculate lightness, J:
        if ('J' in outin) | ('Q' in outin) | ('C' in outin) | (
                'M' in outin) | ('s' in outin) | ('aS' in outin) | (
                    'aC' in outin) | ('aM' in outin):
            J = 100.0 * (iabz[..., 0] / iabzw[..., 0])**(c * z)

        #--------------------------------------------
        # calculate brightness, Q:
        if ('Q' in outin) | ('s' in outin) | ('aS' in outin):
            Q = 192.5 * (J / c) * (FL**0.64)

        #--------------------------------------------
        # calculate chroma, C:
        if ('C' in outin) | ('M' in outin) | ('s' in outin) | (
                'aS' in outin) | ('aC' in outin) | ('aM' in outin):
            C = ((1 / n)**0.074) * (
                (iabz[..., 1]**2.0 + iabz[..., 2]**2.0)**0.37) * (et**0.067)

        #--------------------------------------------
        # calculate colorfulness, M:
        if ('M' in outin) | ('s' in outin) | ('aM' in outin) | ('aS' in outin):
            M = 1.42 * C * FL**0.25

        #--------------------------------------------
        # calculate saturation, s:
        if ('s' in outin) | ('aS' in outin):
            s = 100.0 * (M / Q)**0.5

        #--------------------------------------------
        # calculate cartesian coordinates:
        if ('aS' in outin):
            aS = s * np.cos(h * np.pi / 180.0)
            bS = s * np.sin(h * np.pi / 180.0)

        if ('aC' in outin):
            aC = C * np.cos(h * np.pi / 180.0)
            bC = C * np.sin(h * np.pi / 180.0)

        if ('aM' in outin):
            aM = M * np.cos(h * np.pi / 180.0)
            bM = M * np.sin(h * np.pi / 180.0)

        #--------------------------------------------
        if outin != ['J', 'aM', 'bM']:
            camout = eval('ajoin((' + ','.join(outin) + '))')
        else:
            camout = ajoin((J, aM, bM))

        if (camout.shape[1] == 1) & (original_ndim < 3):
            camout = camout[:, 0, :]

        return camout

    elif forward == False:
        #--------------------------------------------
        # Get Lightness J from data:
        if ('J' in outin[0]):
            J = data[..., 0].copy()
        elif ('Q' in outin[0]):
            Q = data[..., 0].copy()
            J = c * (Q / (192.25 * FL**0.64))
        else:
            raise Exception(
                'No lightness or brightness values in data[...,0]. Inverse CAM-transform not possible!'
            )

        #--------------------------------------------
        if 'a' in outin[1]:
            # calculate hue h:
            h = hue_angle(data[..., 1], data[..., 2], htype='deg')

            #--------------------------------------------
            # calculate Colorfulness M or Chroma C or Saturation s from a,b:
            MCs = (data[..., 1]**2.0 + data[..., 2]**2.0)**0.5
        else:
            h = data[..., 2]
            MCs = data[..., 1]

        if ('aS' in outin):
            Q = 192.5 * (J / c) * (FL**0.64)
            M = Q * (MCs / 100.0)**2.0
            C = M / (1.42 * FL**0.25)

        if ('aM' in outin):  # convert M to C:
            C = MCs / (1.42 * FL**0.25)

        if ('aC' in outin):
            C = MCs

        #--------------------------------------------
        # calculate achromatic signal, Iz:
        Iz = iabzw[..., 0] * (J / 100.0)**(1.0 / (c * z))

        #--------------------------------------------
        # calculate eccentricity factor, et:
        et = 1.01 + np.cos(h * np.pi / 180 + 1.55)

        #--------------------------------------------
        # calculate t (=a**2+b**2) from C:
        t = (n**0.074 * C * (1 / et)**0.067)**(1 / 0.37)

        #--------------------------------------------
        # Calculate az, bz:
        az = (t / (1 + np.tan(h * np.pi / 180)**2))**0.5
        bz = az * np.tan(h * np.pi / 180)

        #--------------------------------------------
        # join values and convert to xyz:
        xyzc = jabz_to_xyz(ajoin((Iz, az, bz)), ztype='iabz')

        #-------------------------------------------
        # Apply CAT from D65:
        xyz = cat.apply_vonkries1(xyzc,
                                  xyzw_d65,
                                  xyzw,
                                  D=D,
                                  mcat=mcat,
                                  invmcat=invmcat)

        return xyz
Ejemplo n.º 16
0
def spd_to_ies_tm30_metrics(SPD, cri_type = None, \
                            hbins = 16, start_hue = 0.0,\
                            scalef = 100, \
                            vf_model_type = _VF_MODEL_TYPE, \
                            vf_pcolorshift = _VF_PCOLORSHIFT,\
                            scale_vf_chroma_to_sample_chroma = False):
    """
    Calculates IES TM30 metrics from spectral data.      
      
      Args:
        :data:
            | numpy.ndarray with spectral data 
        :cri_type:
            | None, optional
            | If None: defaults to cri_type = 'iesrf'.
            | Not none values of :hbins:, :start_hue: and :scalef: overwrite 
            | input in cri_type['rg_pars'] 
        :hbins:
            | None or numpy.ndarray with sorted hue bin centers (°), optional
        :start_hue: 
            | None, optional
        :scalef:
            | None, optional
            | Scale factor for reference circle.
        :vf_pcolorshift:
            | _VF_PCOLORSHIFT or user defined dict, optional
            | The polynomial models of degree 5 and 6 can be fully specified or 
            | summarized by the model parameters themselved OR by calculating the
            | dCoverC and dH at resp. 5 and 6 hues. :VF_pcolorshift: specifies 
            | these hues and chroma level.
        :scale_vf_chroma_to_sample_chroma: 
            | False, optional
            | Scale chroma of reference and test vf fields such that average of 
            | binned reference chroma equals that of the binned sample chroma
            | before calculating hue bin metrics.
            
    Returns:
        :data: 
            | dict with color rendering data:
            | - 'SPD'  : ndarray test SPDs
            | - 'bjabt': ndarray with binned jab data under test SPDs
            | - 'bjabr': ndarray with binned jab data under reference SPDs
            | - 'jabti': ndarray with individual jab data under test SPDs (scaled such that bjabr are on a circle)
            | - 'jabri': ndarray with individual jab data under reference SPDs (scaled such that bjabr are on a circle)
            | - 'hbinnr': ndarray with the hue bin number the samples belong to.
            | - 'cct'  : ndarray with CCT of test SPD
            | - 'duv'  : ndarray with distance to blackbody locus of test SPD
            | - 'Rf'   : ndarray with general color fidelity indices
            | - 'Rg'   : ndarray with gamut area indices
            | - 'Rfi'  : ndarray with specific color fidelity indices
            | - 'Rfhi' : ndarray with local (hue binned) fidelity indices
            | - 'Rcshi': ndarray with local chroma shifts indices
            | - 'Rhshi': ndarray with local hue shifts indices
            | - 'Rt'  : ndarray with general metameric uncertainty index Rt
            | - 'Rti' : ndarray with specific metameric uncertainty indices Rti
            | - 'Rfhi_vf' : ndarray with local (hue binned) fidelity indices 
            |               obtained from VF model predictions at color space
            |               pixel coordinates
            | - 'Rcshi_vf': ndarray with local chroma shifts indices 
            |               (same as above)
            | - 'Rhshi_vf': ndarray with local hue shifts indices 
            |               (same as above)
    """
    if cri_type is None:
        cri_type = 'iesrf'

    #Calculate color rendering measures for SPDs in data:
    out = 'Rf,Rg,cct,duv,Rfi,jabt,jabr,Rfhi,Rcshi,Rhshi,cri_type'
    if isinstance(cri_type, str):  # get dict
        cri_type = copy.deepcopy(_CRI_DEFAULTS[cri_type])
    if hbins is not None:
        cri_type['rg_pars']['nhbins'] = hbins
    if start_hue is not None:
        cri_type['rg_pars']['start_hue'] = start_hue
    if scalef is not None:
        cri_type['rg_pars']['normalized_chroma_ref'] = scalef
    Rf, Rg, cct, duv, Rfi, jabt, jabr, Rfhi, Rcshi, Rhshi, cri_type = spd_to_cri(
        SPD, cri_type=cri_type, out=out)
    rg_pars = cri_type['rg_pars']

    #Calculate Metameric uncertainty and base color shifts:
    dataVF = VF_colorshift_model(SPD,
                                 cri_type=cri_type,
                                 model_type=vf_model_type,
                                 cspace=cri_type['cspace'],
                                 sampleset=eval(cri_type['sampleset']),
                                 pool=False,
                                 pcolorshift=vf_pcolorshift,
                                 vfcolor=0)
    Rf_ = np.array([dataVF[i]['metrics']['Rf'] for i in range(len(dataVF))]).T
    Rt = np.array([dataVF[i]['metrics']['Rt'] for i in range(len(dataVF))]).T
    Rti = np.array([dataVF[i]['metrics']['Rti']
                    for i in range(len(dataVF))][0])

    # Get normalized and sliced sample data for plotting:
    rg_pars = cri_type['rg_pars']
    nhbins, normalize_gamut, normalized_chroma_ref, start_hue = [
        rg_pars[x] for x in sorted(rg_pars.keys())
    ]
    normalized_chroma_ref = scalef
    # np.sqrt((jabr[...,1]**2 + jabr[...,2]**2)).mean(axis = 0).mean()

    if scale_vf_chroma_to_sample_chroma == True:
        normalize_gamut = False
        bjabt, bjabr = gamut_slicer(
            jabt,
            jabr,
            out='jabt,jabr',
            nhbins=nhbins,
            start_hue=start_hue,
            normalize_gamut=normalize_gamut,
            normalized_chroma_ref=normalized_chroma_ref,
            close_gamut=True)
        Cr_s = (np.sqrt(bjabr[:-1, ..., 1]**2 + bjabr[:-1, ..., 2]**2)).mean(
            axis=0)  # for rescaling vector field average reference chroma

    normalize_gamut = True  #(for plotting)
    bjabt, bjabr, binnrs, jabti, jabri = gamut_slicer(
        jabt,
        jabr,
        out='jabt,jabr,binnr,jabti,jabri',
        nhbins=nhbins,
        start_hue=start_hue,
        normalize_gamut=normalize_gamut,
        normalized_chroma_ref=normalized_chroma_ref,
        close_gamut=True)

    Rfhi_vf = np.empty(Rfhi.shape)
    Rcshi_vf = np.empty(Rcshi.shape)
    Rhshi_vf = np.empty(Rhshi.shape)
    for i in range(cct.shape[0]):

        # Get normalized and sliced VF data for hue specific metrics:
        vfjabt = np.hstack(
            (np.ones(dataVF[i]['fielddata']['vectorfield']['axt'].shape),
             dataVF[i]['fielddata']['vectorfield']['axt'],
             dataVF[i]['fielddata']['vectorfield']['bxt']))
        vfjabr = np.hstack(
            (np.ones(dataVF[i]['fielddata']['vectorfield']['axr'].shape),
             dataVF[i]['fielddata']['vectorfield']['axr'],
             dataVF[i]['fielddata']['vectorfield']['bxr']))
        nhbins, normalize_gamut, normalized_chroma_ref, start_hue = [
            rg_pars[x] for x in sorted(rg_pars.keys())
        ]
        vfbjabt, vfbjabr, vfbDEi = gamut_slicer(
            vfjabt,
            vfjabr,
            out='jabt,jabr,DEi',
            nhbins=nhbins,
            start_hue=start_hue,
            normalize_gamut=normalize_gamut,
            normalized_chroma_ref=normalized_chroma_ref,
            close_gamut=False)

        if scale_vf_chroma_to_sample_chroma == True:
            #rescale vfbjabt and vfbjabr to same chroma level as bjabr.
            Cr_vfb = np.sqrt(vfbjabr[..., 1]**2 + vfbjabr[..., 2]**2)
            Cr_vf = np.sqrt(vfjabr[..., 1]**2 + vfjabr[..., 2]**2)
            hr_vf = np.arctan2(vfjabr[..., 2], vfjabr[..., 1])
            Ct_vf = np.sqrt(vfjabt[..., 1]**2 + vfjabt[..., 2]**2)
            ht_vf = np.arctan2(vfjabt[..., 2], vfjabt[..., 1])
            fC = Cr_s.mean() / Cr_vfb.mean()
            vfjabr[..., 1] = fC * Cr_vf * np.cos(hr_vf)
            vfjabr[..., 2] = fC * Cr_vf * np.sin(hr_vf)
            vfjabt[..., 1] = fC * Ct_vf * np.cos(ht_vf)
            vfjabt[..., 2] = fC * Ct_vf * np.sin(ht_vf)
            vfbjabt, vfbjabr, vfbDEi = gamut_slicer(
                vfjabt,
                vfjabr,
                out='jabt,jabr,DEi',
                nhbins=nhbins,
                start_hue=start_hue,
                normalize_gamut=normalize_gamut,
                normalized_chroma_ref=normalized_chroma_ref,
                close_gamut=False)

        scale_factor = cri_type['scale']['cfactor']
        scale_fcn = cri_type['scale']['fcn']
        vfRfhi, vfRcshi, vfRhshi = jab_to_rhi(
            jabt=vfbjabt,
            jabr=vfbjabr,
            DEi=vfbDEi,
            cri_type=cri_type,
            scale_factor=scale_factor,
            scale_fcn=scale_fcn,
            use_bin_avg_DEi=True
        )  # [:-1,...] removes last row from jab as this was added to close the gamut.

        Rfhi_vf[:, i:i + 1] = vfRfhi
        Rhshi_vf[:, i:i + 1] = vfRhshi
        Rcshi_vf[:, i:i + 1] = vfRcshi

    # Create dict with CRI info:
    data = {'SPD' : SPD, 'cct' : cct, 'duv' : duv, 'bjabt' : bjabt, 'bjabr' : bjabr,\
            'jabti':jabti, 'jabri':jabri, 'hbinnr':binnrs,\
           'Rf' : Rf, 'Rg' : Rg, 'Rfi': Rfi, 'Rfhi' : Rfhi, 'Rcshi' : Rcshi, 'Rhshi' : Rhshi, \
           'Rt' : Rt, 'Rti' : Rti,  'Rfhi_vf' : Rfhi_vf, 'Rfcshi_vf' : Rcshi_vf, 'Rfhshi_vf' : Rhshi_vf, \
           'dataVF' : dataVF,'cri_type' : cri_type,
           # 'jabt_':jabt_,'jabr_':jabr_
           }
    return data
Ejemplo n.º 17
0
def cam15u(data,
           fov=10.0,
           inputtype='xyz',
           direction='forward',
           outin='Q,aW,bW',
           parameters=None):
    """
    Convert between CIE 2006 10°  XYZ tristimulus values (or spectral data) 
    and CAM15u color appearance correlates.
    
    Args:
        :data: 
            | ndarray of CIE 2006 10°  XYZ tristimulus values or spectral data
            |  or color appearance attributes
        :fov: 
            | 10.0, optional
            | Field-of-view of stimulus (for size effect on brightness)
        :inputtpe:
            | 'xyz' or 'spd', optional
            | Specifies the type of input: 
            |     tristimulus values or spectral data for the forward mode.
        :direction:
            | 'forward' or 'inverse', optional
            |   -'forward': xyz -> cam15u
            |   -'inverse': cam15u -> xyz 
        :outin:
            | 'Q,aW,bW' or str, optional
            | 'Q,aW,bW' (brightness and opponent signals for amount-of-neutral)
            |  other options: 'Q,aM,bM' (colorfulness) and 'Q,aS,bS' (saturation)
            | Str specifying the type of 
            |     input (:direction: == 'inverse') and 
            |     output (:direction: == 'forward')
        :parameters:
            | None or dict, optional
            | Set of model parameters.
            |   - None: defaults to luxpy.cam._CAM15U_PARAMETERS 
            |    (see references below)
    
    Returns:
        :returns: 
            | ndarray with color appearance correlates (:direction: == 'forward')
            |  or 
            | XYZ tristimulus values (:direction: == 'inverse')

    References: 
        1. `M. Withouck, K. A. G. Smet, W. R. Ryckaert, and P. Hanselaer, 
        “Experimental driven modelling of the color appearance of 
        unrelated self-luminous stimuli: CAM15u,” 
        Opt. Express, vol. 23, no. 9, pp. 12045–12064, 2015.
        <https://www.osapublishing.org/oe/abstract.cfm?uri=oe-23-9-12045&origin=search>`_
        2. `M. Withouck, K. A. G. Smet, and P. Hanselaer, (2015), 
        “Brightness prediction of different sized unrelated self-luminous stimuli,” 
        Opt. Express, vol. 23, no. 10, pp. 13455–13466. 
        <https://www.osapublishing.org/oe/abstract.cfm?uri=oe-23-10-13455&origin=search>`_  
     """

    if parameters is None:
        parameters = _CAM15U_PARAMETERS

    outin = outin.split(',')

    #unpack model parameters:
    Mxyz2rgb, cA, cAlms, cHK, cM, cW, ca, calms, cb, cblms, cfov, cp, k, unique_hue_data = [
        parameters[x] for x in sorted(parameters.keys())
    ]

    # precomputations:
    invMxyz2rgb = np.linalg.inv(Mxyz2rgb)
    MAab = np.array([cAlms, calms, cblms])
    invMAab = np.linalg.inv(MAab)

    #initialize data and camout:
    data = np2d(data)
    if len(data.shape) == 2:
        data = np.expand_dims(data, axis=0)  # avoid looping if not necessary

    if (data.shape[0] > data.shape[1]):  # loop over shortest dim.
        flipaxis0and1 = True
        data = np.transpose(data, axes=(1, 0, 2))
    else:
        flipaxis0and1 = False

    dshape = list(data.shape)
    dshape[-1] = len(outin)  # requested number of correlates
    if (inputtype != 'xyz') & (direction == 'forward'):
        dshape[-2] = dshape[
            -2] - 1  # wavelength row doesn't count & only with forward can the input data be spectral

    camout = np.zeros(dshape)
    camout.fill(np.nan)

    for i in range(data.shape[0]):

        if (inputtype != 'xyz') & (direction == 'forward'):
            xyz = spd_to_xyz(data[i], cieobs='2006_10', relative=False)
            lms = np.dot(_CMF['2006_10']['M'], xyz.T).T  # convert to l,m,s
            rgb = (lms /
                   _CMF['2006_10']['K']) * k  # convert to rho, gamma, beta
        elif (inputtype == 'xyz') & (direction == 'forward'):
            rgb = np.dot(Mxyz2rgb, data[i].T).T

        if direction == 'forward':

            # apply cube-root compression:
            rgbc = rgb**(cp)

            # calculate achromatic and color difference signals, A, a, b:
            Aab = np.dot(MAab, rgbc.T).T
            A, a, b = asplit(Aab)
            A = cA * A
            a = ca * a
            b = cb * b

            # calculate colorfullness like signal M:
            M = cM * ((a**2.0 + b**2.0)**0.5)

            # calculate brightness Q:
            Q = A + cHK[0] * M**cHK[
                1]  # last term is contribution of Helmholtz-Kohlrausch effect on brightness

            # calculate saturation, s:
            s = M / Q

            # calculate amount of white, W:
            W = 100.0 / (1.0 + cW[0] * (s**cW[1]))

            #  adjust Q for size (fov) of stimulus (matter of debate whether to do this before or after calculation of s or W, there was no data on s, M or W for different sized stimuli: after)
            Q = Q * (fov / 10.0)**cfov

            # calculate hue, h and Hue quadrature, H:
            h = hue_angle(a, b, htype='deg')

            if 'H' in outin:
                H = hue_quadrature(h, unique_hue_data=unique_hue_data)
            else:
                H = None

            # calculate cart. co.:
            if 'aM' in outin:
                aM = M * np.cos(h * np.pi / 180.0)
                bM = M * np.sin(h * np.pi / 180.0)

            if 'aS' in outin:
                aS = s * np.cos(h * np.pi / 180.0)
                bS = s * np.sin(h * np.pi / 180.0)

            if 'aW' in outin:
                aW = W * np.cos(h * np.pi / 180.0)
                bW = W * np.sin(h * np.pi / 180.0)

            if (outin != ['Q', 'aW', 'bW']):
                camout[i] = eval('ajoin((' + ','.join(outin) + '))')
            else:
                camout[i] = ajoin((Q, aW, bW))

        elif direction == 'inverse':

            # get Q, M and a, b depending on input type:
            if 'aW' in outin:
                Q, a, b = asplit(data[i])
                Q = Q / (
                    (fov / 10.0)**cfov
                )  #adjust Q for size (fov) of stimulus back to that 10° ref
                W = (a**2.0 + b**2.0)**0.5
                s = (((100 / W) - 1.0) / cW[0])**(1.0 / cW[1])
                M = s * Q

            if 'aM' in outin:
                Q, a, b = asplit(data[i])
                Q = Q / (
                    (fov / 10.0)**cfov
                )  #adjust Q for size (fov) of stimulus back to that 10° ref
                M = (a**2.0 + b**2.0)**0.5

            if 'aS' in outin:
                Q, a, b = asplit(data[i])
                Q = Q / (
                    (fov / 10.0)**cfov
                )  #adjust Q for size (fov) of stimulus back to that 10° ref
                s = (a**2.0 + b**2.0)**0.5
                M = s * Q

            if 'h' in outin:
                Q, WsM, h = asplit(data[i])
                Q = Q / (
                    (fov / 10.0)**cfov
                )  #adjust Q for size (fov) of stimulus back to that 10° ref
                if 'W' in outin:
                    s = (((100.0 / WsM) - 1.0) / cW[0])**(1.0 / cW[1])
                    M = s * Q
                elif 's' in outin:
                    M = WsM * Q
                elif 'M' in outin:
                    M = WsM

            # calculate achromatic signal, A from Q and M:
            A = Q - cHK[0] * M**cHK[1]
            A = A / cA

            # calculate hue angle:
            h = hue_angle(a, b, htype='rad')

            # calculate a,b from M and h:
            a = (M / cM) * np.cos(h)
            b = (M / cM) * np.sin(h)
            a = a / ca
            b = b / cb

            # create Aab:
            Aab = ajoin((A, a, b))

            # calculate rgbc:
            rgbc = np.dot(invMAab, Aab.T).T

            # decompress rgbc to rgb:
            rgb = rgbc**(1 / cp)

            # convert rgb to xyz:
            xyz = np.dot(invMxyz2rgb, rgb.T).T

            camout[i] = xyz

    if flipaxis0and1 == True:  # loop over shortest dim.
        camout = np.transpose(camout, axes=(1, 0, 2))

    if camout.shape[0] == 1:
        camout = np.squeeze(camout, axis=0)

    return camout
Ejemplo n.º 18
0
def run(data,
        xyzw=None,
        outin='J,aM,bM',
        cieobs=_CIEOBS,
        conditions=None,
        forward=True,
        mcat='cat02',
        **kwargs):
    """ 
    Run the Jz,az,bz based color appearance model in forward or backward modes.
    
    Args:
        :data:
            | ndarray with relative sample xyz values (forward mode) or J'a'b' coordinates (inverse mode)
        :xyzw:
            | ndarray with relative white point tristimulus values
            | None defaults to D65
        :cieobs:
            | _CIEOBS, optional
            | CMF set to use when calculating :xyzw: if this is None.
        :conditions:
            | None, optional
            | Dictionary with viewing condition parameters for:
            |       La, Yb, D and surround.
            |  surround can contain:
            |      - str (options: 'avg','dim','dark') or 
            |      - dict with keys c, Nc, F.
            | None results in:
            |   {'La':100, 'Yb':20, 'D':1, 'surround':'avg'}
        :forward:
            | True, optional
            | If True: run in CAM in forward mode, else: inverse mode.
        :outin:
            | 'J,aM,bM', optional
            | String with requested output (e.g. "J,aM,bM,M,h") [Forward mode]
            | - attributes: 'J': lightness,'Q': brightness,
            |               'M': colorfulness,'C': chroma, 's': saturation,
            |               'h': hue angle, 'H': hue quadrature/composition,
            |               'Wz': whiteness, 'Kz':blackness, 'Sz': saturation, 'V': vividness
            | String with inputs in data [inverse mode]. 
            | Input must have data.shape[-1]==3 and last dim of data must have 
            | the following structure for inverse mode: 
            |  * data[...,0] = J or Q,
            |  * data[...,1:] = (aM,bM) or (aC,bC) or (aS,bS) or (M,h) or (C, h), ...
        :mcat:
            | 'cat02', optional
            | Specifies CAT sensor space.
            | - options:
            |    - None defaults to 'cat02'
            |    - str: see see luxpy.cat._MCATS.keys() for options 
            |         (details on type, ?luxpy.cat)
            |    - ndarray: matrix with sensor primaries
    Returns:
        :camout: 
            | ndarray with color appearance correlates (forward mode) 
            |  or 
            | XYZ tristimulus values (inverse mode)
     
    References:
        1. `Safdar, M., Cui, G., Kim,Y. J., and  Luo, M. R.(2017).
        Perceptually uniform color space for image signals including high dynamic range and wide gamut.
        Opt. Express, vol. 25, no. 13, pp. 15131–15151, Jun. 2017. 
        <https://www.opticsexpress.org/abstract.cfm?URI=oe-25-13-15131>`_
    
        2. `Safdar, M., Hardeberg, J., Cui, G., Kim, Y. J., and Luo, M. R.(2018).
        A Colour Appearance Model based on Jzazbz Colour Space, 
        26th Color and Imaging Conference (2018), Vancouver, Canada, November 12-16, 2018, pp96-101.
        <https://doi.org/10.2352/ISSN.2169-2629.2018.26.96>`_
        
        3. Safdar, M., Hardeberg, J.Y., Luo, M.R. (2021) 
        "ZCAM, a psychophysical model for colour appearance prediction", 
        Optics Express.
    """
    print(
        "WARNING: Z-CAM is as yet unpublished and under development, so parameter values might change! (07 Oct, 2020"
    )
    outin = outin.split(',') if isinstance(outin, str) else outin

    #--------------------------------------------
    # Get condition parameters:
    if conditions is None:
        conditions = _DEFAULT_CONDITIONS

    D, Dtype, La, Yb, surround = (conditions[x]
                                  for x in sorted(conditions.keys()))

    surround_parameters = _SURROUND_PARAMETERS
    if isinstance(surround, str):
        surround = surround_parameters[conditions['surround']]
    F, FLL, Nc, c = [surround[x] for x in sorted(surround.keys())]

    # Define cone/chromatic adaptation sensor space:
    if (mcat is None):
        mcat = cat._MCATS['cat02']
    elif isinstance(mcat, str):
        mcat = cat._MCATS[mcat]
    invmcat = np.linalg.inv(mcat)

    #--------------------------------------------
    # Get white point of D65 fro chromatic adaptation transform (CAT)
    xyzw_d65 = np.array([[
        9.5047e+01, 1.0000e+02, 1.08883e+02
    ]]) if cieobs == '1931_2' else spd_to_xyz(_CIE_D65, cieobs=cieobs)

    #--------------------------------------------
    # Get default white point:
    if xyzw is None:
        xyzw = xyzw_d65.copy()

    #--------------------------------------------
    # calculate condition dependent parameters:
    Yw = xyzw[..., 1].T
    FL = 0.171 * La**(1 / 3) * (1 - np.exp(-48 / 9 * La)
                                )  # luminance adaptation factor
    n = Yb / Yw
    Fb = 1.045 + 1.46 * n**0.5  # background factor
    Fs = c  # surround factor

    #--------------------------------------------
    # Calculate degree of chromatic adaptation:
    if D is None:
        D = F * (1.0 - (1.0 / 3.6) * np.exp((-La - 42.0) / 92.0))

    #===================================================================
    # WHITE POINT transformations (common to forward and inverse modes):

    #--------------------------------------------
    # Apply CAT to white point:
    xyzwc = cat.apply_vonkries2(xyzw,
                                xyzw1=xyzw,
                                xyzw2=xyzw_d65,
                                D=D,
                                mcat=mcat,
                                invmcat=invmcat,
                                use_Yw=True)

    #--------------------------------------------
    # Get Iz,az,bz coordinates:
    iabzw = xyz_to_jabz(xyzwc, ztype='iabz', use_zcam_parameters=True)

    # Get brightness of white point:
    Qw = 925 * iabzw[..., 0]**1.17 * (FL / Fs)**0.5

    #===================================================================
    # STIMULUS transformations:

    #--------------------------------------------
    # massage shape of data for broadcasting:
    original_ndim = data.ndim
    if data.ndim == 2: data = data[:, None]

    if forward:
        # Apply CAT to D65:
        xyzc = cat.apply_vonkries2(data,
                                   xyzw1=xyzw,
                                   xyzw2=xyzw_d65,
                                   D=D,
                                   mcat=mcat,
                                   invmcat=invmcat,
                                   use_Yw=True)

        # Get Iz,az,bz coordinates:
        iabz = xyz_to_jabz(xyzc, ztype='iabz', use_zcam_parameters=True)

        #--------------------------------------------
        # calculate hue h and eccentricity factor, et:
        h = hue_angle(iabz[..., 1], iabz[..., 2], htype='deg')
        ez = 1.014 + np.cos((h + 89.038) * np.pi / 180)
        # ez = 1.014 + np.cos((h*np.pi/180) + 89.038)

        #--------------------------------------------
        # calculate Hue quadrature (if requested in 'out'):
        if 'H' in outin:
            H = hue_quadrature(h, unique_hue_data=_UNIQUE_HUE_DATA)
        else:
            H = None

        #--------------------------------------------
        # calculate brightness, Q:
        Q = 925 * iabz[..., 0]**1.17 * (FL / Fs)**0.5

        #--------------------------------------------
        # calculate lightness, J:
        J = 100.0 * (Q / Qw)**(Fs * Fb)

        #--------------------------------------------
        # calculate colorfulness, M:
        M = 50 * ((iabz[..., 1]**2.0 + iabz[..., 2]**2.0)**
                  0.368) * (ez**0.068) * (FL**0.2) / ((iabzw[..., 0]**2.52) *
                                                      (Fb**0.3))

        #--------------------------------------------
        # calculate chroma, C:
        C = 100 * M / Qw

        #--------------------------------------------
        # calculate saturation, s:
        s = 100.0 * (M / Q)
        S = s  # make extra variable, jsut in case 'S' is called

        #--------------------------------------------
        # calculate whiteness, W:
        if ('Wz' in outin) | ('aWz' in outin):
            Wz = 100 - 0.68 * ((100 - J)**2 + C**2)**0.5

        #--------------------------------------------
        # calculate blackness, K:
        if ('Kz' in outin) | ('aKz' in outin):
            Kz = 100 - 0.82 * (J**2 + C**2)**0.5

        #--------------------------------------------
        # calculate saturation, S:
        if ('Sz' in outin) | ('aSz' in outin):
            Sz = 8 + 0.5 * ((J - 55)**2 + C**2)**0.5

        #--------------------------------------------
        # calculate vividness, V:
        if ('Vz' in outin) | ('aVz' in outin):
            Sz = 8 + 0.4 * ((J - 70)**2 + C**2)**0.5

        #--------------------------------------------
        # calculate cartesian coordinates:
        if ('aS' in outin):
            aS = s * np.cos(h * np.pi / 180.0)
            bS = s * np.sin(h * np.pi / 180.0)

        if ('aC' in outin):
            aC = C * np.cos(h * np.pi / 180.0)
            bC = C * np.sin(h * np.pi / 180.0)

        if ('aM' in outin):
            aM = M * np.cos(h * np.pi / 180.0)
            bM = M * np.sin(h * np.pi / 180.0)

        #--------------------------------------------
        if outin != ['J', 'aM', 'bM']:
            camout = eval('ajoin((' + ','.join(outin) + '))')
        else:
            camout = ajoin((J, aM, bM))

        if (camout.shape[1] == 1) & (original_ndim < 3):
            camout = camout[:, 0, :]

        return camout

    elif forward == False:
        #--------------------------------------------
        # Get Lightness J and brightness Q from data:
        if ('J' in outin[0]):
            J = data[..., 0].copy()
            Q = Qw * (J / 100)**(1 / (Fs * Fb))
        elif ('Q' in outin[0]):
            Q = data[..., 0].copy()
            J = 100.0 * (Q / Qw)**(Fs * Fb)
        else:
            raise Exception(
                'No lightness or brightness values in data[...,0]. Inverse CAM-transform not possible!'
            )

        #--------------------------------------------
        # calculate achromatic signal, Iz:
        Iz = (Qw / 925 * ((J / 100)**(1 / (Fs * Fb))) * (Fs / FL)**0.5)**(1 /
                                                                          1.17)

        #--------------------------------------------
        if 'a' in outin[1]:
            # calculate hue h:
            h = hue_angle(data[..., 1], data[..., 2], htype='deg')

            #--------------------------------------------
            # calculate Colorfulness M or Chroma C or Saturation s from a,b:
            MCs = (data[..., 1]**2.0 + data[..., 2]**2.0)**0.5
        else:
            h = data[..., 2]
            MCs = data[..., 1]

        if ('aS' in outin) | ('S' in outin):
            Q = Qw * (J / 100)**(1 / (Fs * Fb))
            M = Q * (MCs / 100.0)
            C = 100 * M / Qw

        if ('aM' in outin) | ('M' in outin):
            C = 100 * MCs / Qw

        if ('aC' in outin) | ('C' in outin):  # convert C to M:
            C = MCs

        if ('Wz' in outin) | ('aWz' in outin):  #whiteness
            C = ((100 / 68 * (100 - MCs))**2 - (J - 100)**2)**0.5

        if ('Kz' in outin) | ('aKz' in outin):  # blackness
            C = ((100 / 82 * (100 - MCs))**2 - (J)**2)**0.5

        if ('Sz' in outin) | ('aSz' in outin):  # saturation
            C = ((10 / 5 * (MCs - 8))**2 - (J - 55)**2)**0.5

        if ('Vz' in outin) | ('aVz' in outin):  # vividness
            C = ((10 / 4 * (MCs - 8))**2 - (J - 70)**2)**0.5

        #--------------------------------------------
        # Calculate colorfulness, M:
        M = Qw * C / 100

        #--------------------------------------------
        # calculate eccentricity factor, et:
        # ez = 1.014 + np.cos(h*np.pi/180 + 89.038)
        ez = 1.014 + np.cos((h + 89.038) * np.pi / 180)

        #--------------------------------------------
        # calculate t (=sqrt(a**2+b**2)) from M:
        t = (((M / 50) * (iabzw[..., 0]**2.52) * (Fb**0.3)) /
             ((ez**0.068) * (FL**0.2)))**(1 / 0.368 / 2)

        #--------------------------------------------
        # Calculate az, bz:
        az = t * np.cos(h * np.pi / 180)
        bz = t * np.sin(h * np.pi / 180)

        #--------------------------------------------
        # join values and convert to xyz:
        xyzc = jabz_to_xyz(ajoin((Iz, az, bz)),
                           ztype='iabz',
                           use_zcam_parameters=True)

        #-------------------------------------------
        # Apply CAT from D65:
        xyz = cat.apply_vonkries2(xyzc,
                                  xyzw_d65,
                                  xyzw,
                                  D=D,
                                  mcat=mcat,
                                  invmcat=invmcat,
                                  use_Yw=True)

        return xyz
Ejemplo n.º 19
0
def _tm30_process_spd(spd, cri_type = 'ies-tm30',**kwargs):
    """
    Calculate all required parameters for plotting from spd using cri.spd_to_cri()
    
    Args:
        :spd:
            | ndarray or dict
            | If ndarray: single spectral power distribution.
            | If dict: dictionary with pre-computed parameters.
            |  required keys:
            |   'Rf','Rg','cct','duv','Sr','cri_type','xyzri','xyzrw',
            |   'hbinnrs','Rfi','Rfhi','Rcshi','Rhshi',
            |   'jabt_binned','jabr_binned',
            |   'nhbins','start_hue','normalize_gamut','normalized_chroma_ref'
            | see cri.spd_to_cri() for more info on parameters.
        :cri_type:
            | _CRI_TYPE_DEFAULT or str or dict, optional
            |   -'str: specifies dict with default cri model parameters 
            |     (for supported types, see luxpy.cri._CRI_DEFAULTS['cri_types'])
            |   - dict: user defined model parameters 
            |     (see e.g. luxpy.cri._CRI_DEFAULTS['cierf'] 
            |     for required structure)
            | Note that any non-None input arguments (in kwargs) 
            | to the function will override default values in cri_type dict.
        :kwargs:
            | Additional optional keyword arguments, 
            | the same as in cri.spd_to_cri()
            
    Returns:
        :data:
            | dictionary with required parameters for plotting functions.      
    """
    out = 'Rf,Rg,cct,duv,Sr,cri_type,xyzri,xyzrw,binnrs,Rfi,Rfhi,Rcshi,Rhshi,jabt_binned,jabr_binned,nhbins,start_hue,normalize_gamut,normalized_chroma_ref'
    if not isinstance(spd,dict):
        tpl = spd_to_cri(spd, cri_type = cri_type, out = out, **kwargs)
        data = {'spd':spd}
        for i,key in enumerate(out.split(',')):
            if key == 'normalized_chroma_ref': key = 'scalef' # rename
            if key == 'binnrs': key = 'hbinnrs' # rename
            data[key] = tpl[i]
            
        # Normalize chroma to scalef and fit ellipse to gamut:
        scalef = data['scalef']
        jabt = data['jabt_binned'].copy()
        jabr = data['jabr_binned'].copy()
        Cr = (jabr[...,1]**2 + jabr[...,2]**2)**0.5
        Ct = ((jabt[...,1]**2 + jabt[...,2]**2)**0.5)/Cr*scalef
        ht = math.positive_arctan(jabt[...,1],jabt[...,2], htype = 'rad')
        hr = math.positive_arctan(jabr[...,1],jabr[...,2], htype = 'rad')
        jabt[...,1] = Ct*np.cos(ht)
        jabt[...,2] = Ct*np.sin(ht)
        jabr[...,1] = scalef*np.cos(hr)
        jabr[...,2] = scalef*np.sin(hr) 
        ecc = np.ones((1,jabt.shape[1]))*np.nan
        theta = np.ones((1,jabt.shape[1]))*np.nan
        v = np.ones((jabt.shape[1],5))*np.nan
        data['hue_bin_data'] = {'jabt_hj_closed':data['jabt_binned'],'jabr_hj_closed':data['jabr_binned'],
                                'jabtn_hj_closed':jabt,'jabrn_hj_closed':jabr}
        for i in range(jabt.shape[1]):
            try:
                v[i,:] = math.fit_ellipse(jabt[:,i,1:])
                a,b = v[i,0], v[i,1] # major and minor ellipse axes
                ecc[0,i] = a/b
                theta[0,i] = np.rad2deg(v[i,4]) # orientation angle
                if theta[0,i]>180: theta[0,i] -= 180
            except:
                v[i,:] = np.nan*np.ones((1,5))
                ecc[0,i] = np.nan
                theta[0,i] = np.nan # orientation angle
        data['gamut_ellipse_fit'] = {'v':v, 'a/b':ecc,'thetad': theta}
    else:
        data = spd
    return data
Ejemplo n.º 20
0
def plot_tm30_cvg(spd, cri_type = 'ies-tm30',  
                  gamut_line_color = 'r',
                  gamut_line_style = '-',
                  gamut_line_marker = 'o',
                  gamut_line_label = None,
                  plot_vectors = True,
                  plot_index_values = True,
                  axh = None, axtype = 'cart',
                  **kwargs):
    """
    Plot TM30 Color Vector Graphic (CVG).
    
    Args:
        :spd:
            | ndarray or dict
            | If ndarray: single spectral power distribution.
            | If dict: dictionary with pre-computed parameters (using _tm30_process_spd()).
            |  required keys:
            |   'Rf','Rg','cct','duv','Sr','cri_type','xyzri','xyzrw',
            |   'hbinnrs','Rfi','Rfhi','Rcshi','Rhshi',
            |   'jabt_binned','jabr_binned',
            |   'nhbins','start_hue','normalize_gamut','normalized_chroma_ref'
            | see cri.spd_to_cri() for more info on parameters.
        :cri_type:
            | _CRI_TYPE_DEFAULT or str or dict, optional
            |   -'str: specifies dict with default cri model parameters 
            |     (for supported types, see luxpy.cri._CRI_DEFAULTS['cri_types'])
            |   - dict: user defined model parameters 
            |     (see e.g. luxpy.cri._CRI_DEFAULTS['cierf'] 
            |     for required structure)
            | Note that any non-None input arguments (in kwargs) 
            | to the function will override default values in cri_type dict.
        :gamut_line_color:
            | 'r', optional
            | Plotting line style for the line connecting the 
            | average test chromaticity in the hue bins.
        :gamut_line_style:
            | 'r', optional
            | Plotting color for the line connecting the 
            | average test chromaticity in the hue bins.
        :gamut_line_marker:
            | '-', optional
            | Markers to plot the test color gamut points for each hue bin in 
            | (only used when plot_vectors = False).
        :gamut_line_label:
            | None, optional
            | Label for gamut line. (only used when plot_vectors = False).
        :plot_vectors:
            | True, optional
            | Plot color shift vectors in CVG (True) or not (False).
        :plot_index_values:
            | True, optional
            | Print Rf, Rg, CCT and Duv in corners of CVG (True) or not (False).
        :axh: 
            | None, optional
            | If None: create new figure with single axes, else plot on specified axes. 
        :axtype: 
            | 'cart' (or 'polar'), optional
            | Make Cartesian (default) or polar plot.    
        :kwargs:
            | Additional optional keyword arguments, 
            | the same as in cri.spd_to_cri()
            
    Returns:
        :axh: 
            | handle to figure axes.
        :data:
            | dictionary with required parameters for plotting functions. 
    """

    data = _tm30_process_spd(spd, cri_type = 'ies-tm30',**kwargs)
    
    # Normalize chroma to scalef:
    scalef = data['scalef']
    jabt = data['jabt_binned'][:,0,:]
    jabr = data['jabr_binned'][:,0,:]
    Cr = (jabr[...,1]**2 + jabr[...,2]**2)**0.5
    Ct = ((jabt[...,1]**2 + jabt[...,2]**2)**0.5)/Cr*scalef
    ht = math.positive_arctan(jabt[...,1],jabt[...,2], htype = 'rad')
    hr = math.positive_arctan(jabr[...,1],jabr[...,2], htype = 'rad')
    jabt[...,1] = Ct*np.cos(ht)
    jabt[...,2] = Ct*np.sin(ht)
    jabr[...,1] = scalef*np.cos(hr)
    jabr[...,2] = scalef*np.sin(hr)
    
    # Plot color vector graphic
    _, axh, _ = plot_ColorVectorGraphic(jabt = jabt, jabr = jabr, 
                                        hbins = data['nhbins'], start_hue = data['start_hue'], 
                                        bin_labels = '',
                                        gamut_line_color = gamut_line_color,
                                        gamut_line_style = gamut_line_style,
                                        gamut_line_marker = gamut_line_marker,
                                        gamut_line_label = gamut_line_label,
                                        plot_vectors = plot_vectors,
                                        ax = axh, axtype = axtype,
                                        force_CVG_layout = True,
                                        plot_axis_labels = False)
    
    # Print Rf, Rg, CCT and Duv in plot:
    if plot_index_values == True:
        Rf, Rg, cct, duv = data['Rf'], data['Rg'], data['cct'], data['duv']
        axh.text(-1.30*scalef,1.30*scalef,'{:1.0f}'.format(Rf[0,0]),fontsize = 15, fontweight='bold', horizontalalignment='center',verticalalignment='center',color = 'k')
        axh.text(-1.33*scalef,1.12*scalef,'$R_f$',fontsize = 13, style='italic', horizontalalignment='center',verticalalignment='center',color = 'k')
        axh.text(1.30*scalef,1.30*scalef,'{:1.0f}'.format(Rg[0,0]),fontsize = 15, fontweight='bold', horizontalalignment='center',verticalalignment='center',color = 'k')
        axh.text(1.33*scalef,1.12*scalef,'$R_g$',fontsize = 13, style='italic', horizontalalignment='center',verticalalignment='center',color = 'k')
        axh.text(-1.43*scalef,-1.45*scalef,'{:1.0f}'.format(cct[0,0]),fontsize = 15, fontweight='bold', horizontalalignment='left',verticalalignment='bottom',color = 'k')
        axh.text(-1.43*scalef,-1.25*scalef,'$CCT$',fontsize = 13, style='italic', horizontalalignment='left',verticalalignment='bottom',color = 'k')
        axh.text(1.43*scalef,-1.45*scalef,'{:1.4f}'.format(duv[0,0]),fontsize = 15, fontweight='bold', horizontalalignment='right',verticalalignment='bottom',color = 'k')
        axh.text(1.43*scalef,-1.25*scalef,'$D_{uv}$',fontsize = 13, style='italic', horizontalalignment='right',verticalalignment='bottom',color = 'k')
    axh.set_xticks([])
    axh.set_yticks([])
    return axh, data 
Ejemplo n.º 21
0
def rotate(v,
           vecA=None,
           vecB=None,
           rot_axis=None,
           rot_angle=None,
           deg=True,
           norm=False):
    """
    Rotate vector around rotation axis over angle.
    
    Args:
        :v: 
            | vec3 vector.
        :rot_axis:
            | None, optional
            | vec3 vector specifying rotation axis.
        :rot_angle:
            | None, optional
            | float or int rotation angle.
        :deg:
            | True, optional
            | If False, rot_angle is in radians.
        :vecA:, :vecB:
            | None, optional
            | vec3 vectors defining a normal direction (cross(vecA, vecB)) around 
            | which to rotate the vector in :v:. If rot_angle is None: rotation
            | angle is defined by the in-plane angle between vecA and vecB.
        :norm:
            | False, optional
            | Normalize rotated vector.
        
    """

    if (vecA is not None) & (vecB is not None):
        rot_axis = cross(vecA, vecB)  # rotation axis
        if rot_angle is None:
            costheta = dot(vecA, vecB, norm=True)  # rotation angle
            costheta[costheta > 1] = 1
            costheta[costheta < -1] = -1
            rot_angle = np.arccos(costheta)
    elif (rot_angle is not None):
        if deg == True:
            rot_angle = np.deg2rad(rot_angle)
    else:
        raise Exception('vec3.rotate: insufficient not-None input args.')

    # normalize rot_axis
    rot_axis = rot_axis / rot_axis.norm()

    # Create short-hand variables:
    u = rot_axis
    cost = np.cos(rot_angle)
    sint = np.sin(rot_angle)

    # Setup rotation matrix:
    R = np.asarray([[np.zeros(u.x.shape) for j in range(3)] for i in range(3)])
    R[0, 0] = cost + u.x * u.x * (1 - cost)
    R[0, 1] = u.x * u.y * (1 - cost) - u.z * sint
    R[0, 2] = u.x * u.z * (1 - cost) + u.y * sint
    R[1, 0] = u.x * u.y * (1 - cost) + u.z * sint
    R[1, 1] = cost + u.y * u.y * (1 - cost)
    R[1, 2] = u.y * u.z * (1 - cost) - u.x * sint
    R[2, 0] = u.z * u.x * (1 - cost) - u.y * sint
    R[2, 1] = u.z * u.y * (1 - cost) + u.x * sint
    R[2, 2] = cost + u.z * u.z * (1 - cost)

    # calculate dot product of matrix M with vector v:
    v3 = vec3(R[0,0]*v.x + R[0,1]*v.y + R[0,2]*v.z, \
                R[1,0]*v.x + R[1,1]*v.y + R[1,2]*v.z, \
                R[2,0]*v.x + R[2,1]*v.y + R[2,2]*v.z)
    if norm == True:
        v3 = v3 / v3.norm()
    return v3
Ejemplo n.º 22
0
def _xyz_to_jab_cam02ucs(xyz, xyzw, ucs=True, conditions=None):
    """ 
    Calculate CAM02-UCS J'a'b' coordinates from xyz tristimulus values of sample and white point.
    
    Args:
        :xyz:
            | ndarray with sample tristimulus values
        :xyzw:
            | ndarray with white point tristimulus values  
        :conditions:
            | None, optional
            | Dictionary with viewing conditions.
            | None results in:
            |   {'La':100, 'Yb':20, 'D':1, 'surround':'avg'}
            | For more info see luxpy.cam.ciecam02()?
    
    Returns:
        :jab:
            | ndarray with J'a'b' coordinates.
    """
    #--------------------------------------------
    # Get/ set conditions parameters:
    if conditions is not None:
        surround_parameters = {
            'surrounds': ['avg', 'dim', 'dark'],
            'avg': {
                'c': 0.69,
                'Nc': 1.0,
                'F': 1.0,
                'FLL': 1.0
            },
            'dim': {
                'c': 0.59,
                'Nc': 0.9,
                'F': 0.9,
                'FLL': 1.0
            },
            'dark': {
                'c': 0.525,
                'Nc': 0.8,
                'F': 0.8,
                'FLL': 1.0
            }
        }
        La = conditions['La']
        Yb = conditions['Yb']
        D = conditions['D']
        surround = conditions['surround']
        if isinstance(surround, str):
            surround = surround_parameters[conditions['surround']]
        F, FLL, Nc, c = [surround[x] for x in sorted(surround.keys())]
    else:
        # set defaults:
        La, Yb, D, F, FLL, Nc, c = 100, 20, 1, 1, 1, 1, 0.69

    #--------------------------------------------
    # Define sensor space and cat matrices:
    mhpe = np.array([[0.38971, 0.68898, -0.07868], [-0.22981, 1.1834, 0.04641],
                     [0.0, 0.0, 1.0]
                     ])  # Hunt-Pointer-Estevez sensors (cone fundamentals)

    mcat = np.array([[0.7328, 0.4296, -0.1624], [-0.7036, 1.6975, 0.0061],
                     [0.0030, 0.0136, 0.9834]])  # CAT02 sensor space

    #--------------------------------------------
    # pre-calculate some matrices:
    invmcat = np.linalg.inv(mcat)
    mhpe_x_invmcat = np.dot(mhpe, invmcat)

    #--------------------------------------------
    # calculate condition dependent parameters:
    Yw = xyzw[..., 1:2].T
    k = 1.0 / (5.0 * La + 1.0)
    FL = 0.2 * (k**4.0) * (5.0 * La) + 0.1 * ((1.0 - k**4.0)**2.0) * (
        (5.0 * La)**(1.0 / 3.0))  # luminance adaptation factor
    n = Yb / Yw
    Nbb = 0.725 * (1 / n)**0.2
    Ncb = Nbb
    z = 1.48 + FLL * n**0.5

    if D is None:
        D = F * (1.0 - (1.0 / 3.6) * np.exp((-La - 42.0) / 92.0))

    #--------------------------------------------
    # transform from xyz, xyzw to cat sensor space:
    rgb = math.dot23(mcat, xyz.T)
    rgbw = mcat @ xyzw.T

    #--------------------------------------------
    # apply von Kries cat:
    rgbc = (
        (D * Yw / rgbw)[..., None] + (1 - D)
    ) * rgb  # factor 100 from ciecam02 is replaced with Yw[i] in ciecam16, but see 'note' in Fairchild's "Color Appearance Models" (p291 ni 3ed.)
    rgbwc = (
        (D * Yw / rgbw) + (1 - D)
    ) * rgbw  # factor 100 from ciecam02 is replaced with Yw[i] in ciecam16, but see 'note' in Fairchild's "Color Appearance Models" (p291 ni 3ed.)

    #--------------------------------------------
    # convert from cat02 sensor space to cone sensors (hpe):
    rgbp = math.dot23(mhpe_x_invmcat, rgbc).T
    rgbwp = (mhpe_x_invmcat @ rgbwc).T

    #--------------------------------------------
    # apply Naka_rushton repsonse compression:
    naka_rushton = lambda x: 400 * x**0.42 / (x**0.42 + 27.13) + 0.1

    rgbpa = naka_rushton(FL * rgbp / 100.0)
    p = np.where(rgbp < 0)
    rgbpa[p] = 0.1 - (naka_rushton(FL * np.abs(rgbp[p]) / 100.0) - 0.1)

    rgbwpa = naka_rushton(FL * rgbwp / 100.0)
    pw = np.where(rgbwp < 0)
    rgbwpa[pw] = 0.1 - (naka_rushton(FL * np.abs(rgbwp[pw]) / 100.0) - 0.1)

    #--------------------------------------------
    # Calculate achromatic signal:
    A = (2.0 * rgbpa[..., 0] + rgbpa[..., 1] +
         (1.0 / 20.0) * rgbpa[..., 2] - 0.305) * Nbb
    Aw = (2.0 * rgbwpa[..., 0] + rgbwpa[..., 1] +
          (1.0 / 20.0) * rgbwpa[..., 2] - 0.305) * Nbb

    #--------------------------------------------
    # calculate initial opponent channels:
    a = rgbpa[..., 0] - 12.0 * rgbpa[..., 1] / 11.0 + rgbpa[..., 2] / 11.0
    b = (1.0 / 9.0) * (rgbpa[..., 0] + rgbpa[..., 1] - 2.0 * rgbpa[..., 2])

    #--------------------------------------------
    # calculate hue h and eccentricity factor, et:
    h = np.arctan2(b, a)
    et = (1.0 / 4.0) * (np.cos(h + 2.0) + 3.8)

    #--------------------------------------------
    # calculate lightness, J:
    J = 100.0 * (A / Aw)**(c * z)

    #--------------------------------------------
    # calculate chroma, C:
    t = ((50000.0 / 13.0) * Nc * Ncb * et *
         ((a**2.0 + b**2.0)**0.5)) / (rgbpa[..., 0] + rgbpa[..., 1] +
                                      (21.0 / 20.0 * rgbpa[..., 2]))
    C = (t**0.9) * ((J / 100.0)**0.5) * (1.64 - 0.29**n)**0.73

    #--------------------------------------------
    # Calculate colorfulness, M:
    M = C * FL**0.25

    #--------------------------------------------
    # convert to cam02ucs J', aM', bM':
    if ucs == True:
        KL, c1, c2 = 1.0, 0.007, 0.0228
        Jp = (1.0 + 100.0 * c1) * J / (1.0 + c1 * J)
        Mp = (1.0 / c2) * np.log(1.0 + c2 * M)
    else:
        Jp = J
        Mp = M
    aMp = Mp * np.cos(h)
    bMp = Mp * np.sin(h)

    return np.dstack((Jp, aMp, bMp))
Ejemplo n.º 23
0
def run(data, xyzw, conditions=None, ucs_type='ucs', forward=True):
    """ 
    Run the CAM02-UCS[,-LCD,-SDC] color appearance difference model in forward or backward modes.
    
    Args:
        :data:
            | ndarray with sample xyz values (forward mode) or J'a'b' coordinates (inverse mode)
        :xyzw:
            | ndarray with white point tristimulus values  
        :conditions:
            | None, optional
            | Dictionary with viewing conditions.
            | None results in:
            |   {'La':100, 'Yb':20, 'D':1, 'surround':'avg'}
            | For more info see luxpy.cam.ciecam02()?
        :ucs_type:
            | 'ucs', optional
            | String with type of color difference appearance space
            | options: 'ucs', 'scd', 'lcd'
        :forward:
            | True, optional
            | If True: run in CAM in forward mode, else: inverse mode.

    Returns:
        :camout:
            | ndarray with J'a'b' coordinates or whatever correlates requested in out.
    
    Note:
        * This is a simplified, less flexible, but faster version than the main cam02ucs().
    """
    # get ucs parameters:
    if isinstance(ucs_type, str):
        ucs_pars = {
            'ucs': {
                'KL': 1.0,
                'c1': 0.007,
                'c2': 0.0228
            },
            'lcd': {
                'KL': 0.77,
                'c1': 0.007,
                'c2': 0.0053
            },
            'scd': {
                'KL': 1.24,
                'c1': 0.007,
                'c2': 0.0363
            }
        }
        ucs = ucs_pars[ucs_type]
    else:
        ucs = ucs_type
    KL, c1, c2 = ucs['KL'], ucs['c1'], ucs['c2']

    if forward == True:

        # run ciecam02 to get JMh:
        data = ciecam02(data,
                        xyzw,
                        out='J,M,h',
                        conditions=conditions,
                        forward=True)

        camout = np.zeros_like(data)  # for output

        #--------------------------------------------
        # convert to cam02ucs J', aM', bM':
        camout[...,
               0] = (1.0 + 100.0 * c1) * data[...,
                                              0] / (1.0 + c1 * data[..., 0])
        Mp = (1.0 / c2) * np.log(1.0 + c2 * data[..., 1])
        camout[..., 1] = Mp * np.cos(data[..., 2] * np.pi / 180)
        camout[..., 2] = Mp * np.sin(data[..., 2] * np.pi / 180)

        return camout

    else:
        #--------------------------------------------
        # convert cam02ucs J', aM', bM' to xyz:

        # calc CAM02 hue angle
        #Jp, aMp, bMp = asplit(data)
        h = np.arctan2(data[..., 2], data[..., 1])

        # calc CAM02 and CIECAM02 colourfulness
        Mp = (data[..., 1]**2.0 + data[..., 2]**2.0)**0.5
        M = (np.exp(c2 * Mp) - 1.0) / c2

        # calculate ciecam02 aM, bM:
        aM = M * np.cos(h)
        bM = M * np.sin(h)

        # calc CAM02 lightness
        J = data[..., 0] / (1.0 + (100.0 - data[..., 0]) * c1)

        # run ciecam02 in inverse mode to get xyz:
        return ciecam02(ajoin((J, aM, bM)),
                        xyzw,
                        out='J,aM,bM',
                        conditions=conditions,
                        forward=False)
Ejemplo n.º 24
0
def run(data,
        xyzw=_DEFAULT_WHITE_POINT,
        Yw=None,
        outin='J,aM,bM',
        conditions=None,
        forward=True,
        yellowbluepurplecorrect=False,
        mcat='cat02'):
    """ 
    Run CIECAM02 color appearance model in forward or backward modes.
    
    Args:
        :data:
            | ndarray with relative sample xyz values (forward mode) or J'a'b' coordinates (inverse mode)
        :xyzw:
            | ndarray with relative white point tristimulus values 
        :Yw: 
            | None, optional
            | Luminance factor of white point.
            | If None: xyz (in data) and xyzw are entered as relative tristimulus values 
            |          (normalized to Yw = 100). 
            | If not None: input tristimulus are absolute and Yw is used to
            |              rescale the absolute values to relative ones 
            |              (relative to a reference perfect white diffuser 
            |               with Ywr = 100). 
            | Yw can be < 100 for e.g. paper as white point. If Yw is None, it 
            | is assumed that the relative Y-tristimulus value in xyzw 
            | represents the luminance factor Yw.
        :conditions:
            | None, optional
            | Dictionary with viewing condition parameters for:
            |       La, Yb, D and surround.
            |  surround can contain:
            |      - str (options: 'avg','dim','dark') or 
            |      - dict with keys c, Nc, F.
            | None results in:
            |   {'La':100, 'Yb':20, 'D':1, 'surround':'avg'}
        :forward:
            | True, optional
            | If True: run in CAM in forward mode, else: inverse mode.
        :outin:
            | 'J,aM,bM', optional
            | String with requested output (e.g. "J,aM,bM,M,h") [Forward mode]
            | - attributes: 'J': lightness,'Q': brightness,
            |               'M': colorfulness,'C': chroma, 's': saturation,
            |               'h': hue angle, 'H': hue quadrature/composition,
            | String with inputs in data [inverse mode]. 
            | Input must have data.shape[-1]==3 and last dim of data must have 
            | the following structure for inverse mode: 
            |  * data[...,0] = J or Q,
            |  * data[...,1:] = (aM,bM) or (aC,bC) or (aS,bS) or (M,h) or (C, h), ...
        :yellowbluepurplecorrect:
            | False, optional
            | If False: don't correct for yellow-blue and purple problems in ciecam02. 
            | If 'brill-suss': 
            |       for yellow-blue problem, see: 
            |          - Brill [Color Res Appl, 2006; 31, 142-145] and 
            |          - Brill and Süsstrunk [Color Res Appl, 2008; 33, 424-426] 
            | If 'jiang-luo': 
            |       for yellow-blue problem + purple line problem, see:
            |          - Jiang, Jun et al. [Color Res Appl 2015: 40(5), 491-503] 
        :mcat:
            | 'cat02', optional
            | Specifies CAT sensor space.
            | - options:
            |    - None defaults to 'cat02' 
            |         (others e.g. 'cat02-bs', 'cat02-jiang',
            |         all trying to correct gamut problems of original cat02 matrix)
            |    - str: see see luxpy.cat._MCATS.keys() for options 
            |         (details on type, ?luxpy.cat)
            |    - ndarray: matrix with sensor primaries
    Returns:
        :camout: 
            | ndarray with color appearance correlates (forward mode) 
            |  or 
            | XYZ tristimulus values (inverse mode)
        
    References:
        1. `N. Moroney, M. D. Fairchild, R. W. G. Hunt, C. Li, M. R. Luo, and T. Newman, (2002), 
        "The CIECAM02 color appearance model,” 
        IS&T/SID Tenth Color Imaging Conference. p. 23, 2002.
        <http://rit-mcsl.org/fairchild/PDFs/PRO19.pdf>`_
    """
    outin = outin.split(',') if isinstance(outin, str) else outin

    #--------------------------------------------
    # Get condition parameters:
    if conditions is None:
        conditions = _DEFAULT_CONDITIONS
    D, Dtype, La, Yb, surround = (conditions[x]
                                  for x in sorted(conditions.keys()))

    surround_parameters = _SURROUND_PARAMETERS
    if isinstance(surround, str):
        surround = surround_parameters[conditions['surround']]
    F, FLL, Nc, c = [surround[x] for x in sorted(surround.keys())]

    #--------------------------------------------
    # Define sensor space and cat matrices:
    # Hunt-Pointer-Estevez sensors (cone fundamentals)
    mhpe = cat._MCATS['hpe']

    # chromatic adaptation sensors:
    if (mcat is None) | (mcat == 'cat02'):
        mcat = cat._MCATS['cat02']
        if yellowbluepurplecorrect == 'brill-suss':
            mcat = cat._MCATS[
                'cat02-bs']  # for yellow-blue problem, Brill [Color Res Appl 2006;31:142-145] and Brill and Süsstrunk [Color Res Appl 2008;33:424-426]
        elif yellowbluepurplecorrect == 'jiang-luo':
            mcat = cat._MCATS[
                'cat02-jiang-luo']  # for yellow-blue problem + purple line problem
    elif isinstance(mcat, str):
        mcat = cat._MCATS[mcat]

    #--------------------------------------------
    # pre-calculate some matrices:
    invmcat = np.linalg.inv(mcat)
    mhpe_x_invmcat = np.dot(mhpe, invmcat)
    if not forward: mcat_x_invmhpe = np.dot(mcat, np.linalg.inv(mhpe))

    #--------------------------------------------
    # Set Yw:
    if Yw is not None:
        Yw = (Yw * np.ones_like(xyzw2[..., 1:2]).T)
    else:
        Yw = xyzw[..., 1:2].T

    #--------------------------------------------
    # calculate condition dependent parameters:
    k = 1.0 / (5.0 * La + 1.0)
    FL = 0.2 * (k**4.0) * (5.0 * La) + 0.1 * ((1.0 - k**4.0)**2.0) * (
        (5.0 * La)**(1.0 / 3.0))  # luminance adaptation factor
    n = Yb / Yw
    Nbb = 0.725 * (1 / n)**0.2
    Ncb = Nbb
    z = 1.48 + FLL * n**0.5
    yw = xyzw[..., 1:2].T  # original Y in xyzw (pre-transposed)

    #--------------------------------------------
    # Calculate degree of chromatic adaptation:
    if D is None:
        D = F * (1.0 - (1.0 / 3.6) * np.exp((-La - 42.0) / 92.0))

    #===================================================================
    # WHITE POINT transformations (common to forward and inverse modes):

    #--------------------------------------------
    # Normalize white point (keep transpose for next step):
    xyzw = Yw * xyzw.T / yw

    #--------------------------------------------
    # transform from xyzw to cat sensor space:
    rgbw = math.dot23(mcat, xyzw)

    #--------------------------------------------
    # apply von Kries cat:
    rgbwc = (
        (D * Yw / rgbw) + (1 - D)
    ) * rgbw  # factor 100 from ciecam02 is replaced with Yw[i] in ciecam16, but see 'note' in Fairchild's "Color Appearance Models" (p291 ni 3ed.)

    #--------------------------------------------
    # convert from cat02 sensor space to cone sensors (hpe):
    rgbwp = math.dot23(mhpe_x_invmcat, rgbwc).T

    #--------------------------------------------
    # apply Naka_rushton repsonse compression to white:
    NK = lambda x, forward: naka_rushton(x,
                                         scaling=400,
                                         n=0.42,
                                         sig=27.13**(1 / 0.42),
                                         noise=0.1,
                                         forward=forward)

    pw = np.where(rgbwp < 0)

    # if requested apply yellow-blue correction:
    if (yellowbluepurplecorrect == 'brill-suss'
        ):  # Brill & Susstrunck approach, for purple line problem
        rgbwp[pw] = 0.0
    rgbwpa = NK(FL * rgbwp / 100.0, True)
    rgbwpa[pw] = 0.1 - (NK(FL * np.abs(rgbwp[pw]) / 100.0, True) - 0.1)

    #--------------------------------------------
    # Calculate achromatic signal of white:
    Aw = (2.0 * rgbwpa[..., 0] + rgbwpa[..., 1] +
          (1.0 / 20.0) * rgbwpa[..., 2] - 0.305) * Nbb

    # massage shape of data for broadcasting:
    original_ndim = data.ndim
    if data.ndim == 2: data = data[:, None]

    #===================================================================
    # STIMULUS transformations
    if forward:

        #--------------------------------------------
        # Normalize xyz (keep transpose for matrix multiplication in next step):
        xyz = (Yw / yw)[..., None] * data.T

        #--------------------------------------------
        # transform from xyz to cat sensor space:
        rgb = math.dot23(mcat, xyz)

        #--------------------------------------------
        # apply von Kries cat:
        rgbc = (
            (D * Yw / rgbw)[..., None] + (1 - D)
        ) * rgb  # factor 100 from ciecam02 is replaced with Yw[i] in ciecam16, but see 'note' in Fairchild's "Color Appearance Models" (p291 ni 3ed.)

        #--------------------------------------------
        # convert from cat02 sensor space to cone sensors (hpe):
        rgbp = math.dot23(mhpe_x_invmcat, rgbc).T

        #--------------------------------------------
        # apply Naka_rushton repsonse compression:
        p = np.where(rgbp < 0)
        if (yellowbluepurplecorrect == 'brill-suss'
            ):  # Brill & Susstrunck approach, for purple line problem
            rgbp[p] = 0.0
        rgbpa = NK(FL * rgbp / 100.0, forward)
        rgbpa[p] = 0.1 - (NK(FL * np.abs(rgbp[p]) / 100.0, forward) - 0.1)

        #--------------------------------------------
        # Calculate achromatic signal:
        A = (2.0 * rgbpa[..., 0] + rgbpa[..., 1] +
             (1.0 / 20.0) * rgbpa[..., 2] - 0.305) * Nbb

        #--------------------------------------------
        # calculate initial opponent channels:
        a = rgbpa[..., 0] - 12.0 * rgbpa[..., 1] / 11.0 + rgbpa[..., 2] / 11.0
        b = (1.0 / 9.0) * (rgbpa[..., 0] + rgbpa[..., 1] - 2.0 * rgbpa[..., 2])

        #--------------------------------------------
        # calculate hue h and eccentricity factor, et:
        h = hue_angle(a, b, htype='deg')
        et = (1.0 / 4.0) * (np.cos(h * np.pi / 180 + 2.0) + 3.8)

        #--------------------------------------------
        # calculate Hue quadrature (if requested in 'out'):
        if 'H' in outin:
            H = hue_quadrature(h, unique_hue_data=_UNIQUE_HUE_DATA)
        else:
            H = None

        #--------------------------------------------
        # calculate lightness, J:
        J = 100.0 * (A / Aw)**(c * z)

        #--------------------------------------------
        # calculate brightness, Q:
        Q = (4.0 / c) * ((J / 100.0)**0.5) * (Aw + 4.0) * (FL**0.25)

        #--------------------------------------------
        # calculate chroma, C:
        t = ((50000.0 / 13.0) * Nc * Ncb * et *
             ((a**2.0 + b**2.0)**0.5)) / (rgbpa[..., 0] + rgbpa[..., 1] +
                                          (21.0 / 20.0 * rgbpa[..., 2]))
        C = (t**0.9) * ((J / 100.0)**0.5) * (1.64 - 0.29**n)**0.73

        #--------------------------------------------
        # calculate colorfulness, M:
        M = C * FL**0.25

        #--------------------------------------------
        # calculate saturation, s:
        s = 100.0 * (M / Q)**0.5
        S = s  # make extra variable, jsut in case 'S' is called

        #--------------------------------------------
        # calculate cartesian coordinates:
        if ('aS' in outin):
            aS = s * np.cos(h * np.pi / 180.0)
            bS = s * np.sin(h * np.pi / 180.0)

        if ('aC' in outin):
            aC = C * np.cos(h * np.pi / 180.0)
            bC = C * np.sin(h * np.pi / 180.0)

        if ('aM' in outin):
            aM = M * np.cos(h * np.pi / 180.0)
            bM = M * np.sin(h * np.pi / 180.0)

        #--------------------------------------------
        if outin != ['J', 'aM', 'bM']:
            camout = eval('ajoin((' + ','.join(outin) + '))')
        else:
            camout = ajoin((J, aM, bM))

        if (camout.shape[1] == 1) & (original_ndim < 3):
            camout = camout[:, 0, :]

        return camout

    elif forward == False:

        #--------------------------------------------
        # Get Lightness J from data:
        if ('J' in outin[0]):
            J = data[..., 0].copy()
        elif ('Q' in outin[0]):
            Q = data[..., 0].copy()
            J = 100.0 * (Q / ((Aw + 4.0) * (FL**0.25) * (4.0 / c)))**2.0
        else:
            raise Exception(
                'No lightness or brightness values in data. Inverse CAM-transform not possible!'
            )

        #--------------------------------------------
        if 'a' in outin[1]:
            # calculate hue h:
            h = hue_angle(data[..., 1], data[..., 2], htype='deg')

            #--------------------------------------------
            # calculate Colorfulness M or Chroma C or Saturation s from a,b:
            MCs = (data[..., 1]**2.0 + data[..., 2]**2.0)**0.5
        else:
            h = data[..., 2]
            MCs = data[..., 1]

        if ('S' in outin[1]):
            Q = (4.0 / c) * ((J / 100.0)**0.5) * (Aw + 4.0) * (FL**0.25)
            M = Q * (MCs / 100.0)**2.0
            C = M / (FL**0.25)

        if ('M' in outin[1]):  # convert M to C:
            C = MCs / (FL**0.25)

        if ('C' in outin[1]):
            C = MCs

        #--------------------------------------------
        # calculate t from J, C:
        t = (C / ((J / 100.0)**(1.0 / 2.0) * (1.64 - 0.29**n)**0.73))**(1.0 /
                                                                        0.9)

        #--------------------------------------------
        # calculate eccentricity factor, et:
        et = (np.cos(h * np.pi / 180.0 + 2.0) + 3.8) / 4.0

        #--------------------------------------------
        # calculate achromatic signal, A:
        A = Aw * (J / 100.0)**(1.0 / (c * z))

        #--------------------------------------------
        # calculate temporary cart. co. at, bt and p1,p2,p3,p4,p5:
        at = np.cos(h * np.pi / 180.0)
        bt = np.sin(h * np.pi / 180.0)
        p1 = (50000.0 / 13.0) * Nc * Ncb * et / t
        p2 = A / Nbb + 0.305
        p3 = 21.0 / 20.0
        p4 = p1 / bt
        p5 = p1 / at

        #--------------------------------------------
        #q = np.where(np.abs(bt) < np.abs(at))[0]
        q = (np.abs(bt) < np.abs(at))

        b = p2 * (2.0 + p3) * (460.0 / 1403.0) / (p4 + (2.0 + p3) *
                                                  (220.0 / 1403.0) *
                                                  (at / bt) -
                                                  (27.0 / 1403.0) + p3 *
                                                  (6300.0 / 1403.0))
        a = b * (at / bt)

        a[q] = p2[q] * (2.0 + p3) * (460.0 / 1403.0) / (p5[q] + (2.0 + p3) *
                                                        (220.0 / 1403.0) -
                                                        ((27.0 / 1403.0) - p3 *
                                                         (6300.0 / 1403.0)) *
                                                        (bt[q] / at[q]))
        b[q] = a[q] * (bt[q] / at[q])

        #--------------------------------------------
        # calculate post-adaptation values
        rpa = (460.0 * p2 + 451.0 * a + 288.0 * b) / 1403.0
        gpa = (460.0 * p2 - 891.0 * a - 261.0 * b) / 1403.0
        bpa = (460.0 * p2 - 220.0 * a - 6300.0 * b) / 1403.0

        #--------------------------------------------
        # join values:
        rgbpa = ajoin((rpa, gpa, bpa))

        #--------------------------------------------
        # decompress signals:
        rgbp = (100.0 / FL) * NK(rgbpa, forward)

        # apply yellow-blue correction:
        if (yellowbluepurplecorrect == 'brill-suss'
            ):  # Brill & Susstrunck approach, for purple line problem
            p = np.where(rgbp < 0.0)
            rgbp[p] = 0.0

        #--------------------------------------------
        # convert from to cone sensors (hpe) cat02 sensor space:
        rgbc = math.dot23(mcat_x_invmhpe, rgbp.T)

        #--------------------------------------------
        # apply inverse von Kries cat:
        rgb = rgbc / ((D * Yw / rgbw)[..., None] + (1.0 - D))

        #--------------------------------------------
        # transform from cat sensor space to xyz:
        xyz = math.dot23(invmcat, rgb)

        #--------------------------------------------
        # unnormalize xyz:
        xyz = ((yw / Yw)[..., None] * xyz).T

        return xyz
Ejemplo n.º 25
0
def Ydlep_to_xyz(Ydlep,
                 cieobs=_CIEOBS,
                 xyzw=_COLORTF_DEFAULT_WHITE_POINT,
                 flip_axes=False,
                 SL_max_lambda=None,
                 **kwargs):
    """
    Convert Y, dominant (complementary) wavelength and excitation purity to XYZ
    tristimulus values.

    Args:
        :Ydlep: 
            | ndarray with Y, dominant (complementary) wavelength
              and excitation purity
        :xyzw: 
            | None or narray with tristimulus values of a single (!) native white point, optional
            | None defaults to xyz of CIE D65 using the :cieobs: observer.
        :cieobs:
            | luxpy._CIEOBS, optional
            | CMF set to use when calculating spectrum locus coordinates.
        :flip_axes:
            | False, optional
            | If True: flip axis 0 and axis 1 in Ydelep to increase speed of loop in function.
            |          (single xyzw with is not flipped!)
        :SL_max_lambda:
            | None or float, optional
            | Maximum wavelength of spectrum locus before it turns back on itelf in the high wavelength range (~700 nm)

    Returns:
        :xyz: 
            | ndarray with tristimulus values
    """

    Ydlep3 = np3d(Ydlep).copy().astype(np.float)

    # flip axis so that longest dim is on first axis  (save time in looping):
    if (Ydlep3.shape[0] < Ydlep3.shape[1]) & (flip_axes == True):
        axes12flipped = True
        Ydlep3 = Ydlep3.transpose((1, 0, 2))
    else:
        axes12flipped = False

    # convert xyzw to Yxyw:
    Yxyw = xyz_to_Yxy(xyzw)
    Yxywo = Yxyw.copy()

    # get spectrum locus Y,x,y and wavelengths:
    SL = _CMF[cieobs]['bar']
    SL = SL[:, SL[1:].sum(axis=0) >
            0]  # avoid div by zero in xyz-to-Yxy conversion
    wlsl = SL[0, None].T
    Yxysl = xyz_to_Yxy(SL[1:4].T)[:, None]

    # Get maximum wavelength of spectrum locus (before it turns back on itself)
    if SL_max_lambda is None:
        pmaxlambda = Yxysl[..., 1].argmax()  # lambda with largest x value
        dwl = np.diff(
            Yxysl[:, 0,
                  1])  # spectrumlocus in that range should have increasing x
        dwl[wlsl[:-1, 0] < 600] = 10000
        pmaxlambda = np.where(
            dwl <= 0)[0][0]  # Take first element with zero or <zero slope
    else:
        pmaxlambda = np.abs(wlsl - SL_max_lambda).argmin()
    Yxysl = Yxysl[:(pmaxlambda + 1), :]
    wlsl = wlsl[:(pmaxlambda + 1), :1]

    # center on xyzw:
    Yxysl = Yxysl - Yxyw
    Yxyw = Yxyw - Yxyw

    #split:
    Y, dom, pur = asplit(Ydlep3)
    Yw, xw, yw = asplit(Yxyw)
    Ywo, xwo, ywo = asplit(Yxywo)
    Ysl, xsl, ysl = asplit(Yxysl)

    # loop over longest dim:
    x = np.empty(Y.shape)
    y = np.empty(Y.shape)
    for i in range(Ydlep3.shape[1]):

        # find closest wl's to dom:
        #wlslb,wlib = meshblock(wlsl,np.abs(dom[i,:])) #abs because dom<0--> complemtary wl
        wlib, wlslb = np.meshgrid(np.abs(dom[:, i]), wlsl)

        dwl = wlslb - wlib
        q1 = np.abs(dwl).argmin(axis=0)  # index of closest wl
        sign_q1 = np.sign(dwl[q1])
        dwl[np.sign(dwl) ==
            sign_q1] = 1000000  # set all dwl on the same side as q1 to a very large value
        q2 = np.abs(dwl).argmin(
            axis=0)  # index of second closest (enclosing) wl

        # calculate x,y of dom:
        x_dom_wl = xsl[q1, 0] + (xsl[q2, 0] - xsl[q1, 0]) * (
            np.abs(dom[:, i]) - wlsl[q1, 0]) / (wlsl[q2, 0] - wlsl[q1, 0]
                                                )  # calculate x of dom. wl
        y_dom_wl = ysl[q1, 0] + (ysl[q2, 0] - ysl[q1, 0]) * (
            np.abs(dom[:, i]) - wlsl[q1, 0]) / (wlsl[q2, 0] - wlsl[q1, 0]
                                                )  # calculate y of dom. wl

        # calculate x,y of test:
        d_wl = (x_dom_wl**2.0 +
                y_dom_wl**2.0)**0.5  # distance from white point to dom
        d = pur[:, i] * d_wl
        hdom = math.positive_arctan(x_dom_wl, y_dom_wl, htype='deg')
        x[:, i] = d * np.cos(hdom * np.pi / 180.0)
        y[:, i] = d * np.sin(hdom * np.pi / 180.0)

        # complementary:
        pc = np.where(dom[:, i] < 0.0)
        hdom[pc] = hdom[pc] - np.sign(dom[:, i][pc] -
                                      180.0) * 180.0  # get positive hue angle

        # calculate intersection of line through white point and test point and purple line:
        xy = np.vstack((x_dom_wl, y_dom_wl)).T
        xyw = np.vstack((xw, yw)).T
        xypl1 = np.vstack((xsl[0, None], ysl[0, None])).T
        xypl2 = np.vstack((xsl[-1, None], ysl[-1, None])).T
        da = (xy - xyw)
        db = (xypl2 - xypl1)
        dp = (xyw - xypl1)
        T = np.array([[0.0, -1.0], [1.0, 0.0]])
        dap = np.dot(da, T)
        denom = np.sum(dap * db, axis=1, keepdims=True)
        num = np.sum(dap * dp, axis=1, keepdims=True)
        xy_linecross = (num / denom) * db + xypl1
        d_linecross = np.atleast_2d(
            (xy_linecross[:, 0]**2.0 + xy_linecross[:, 1]**2.0)**0.5).T[:, 0]
        x[:, i][pc] = pur[:, i][pc] * d_linecross[pc] * np.cos(
            hdom[pc] * np.pi / 180)
        y[:, i][pc] = pur[:, i][pc] * d_linecross[pc] * np.sin(
            hdom[pc] * np.pi / 180)
    Yxy = np.dstack((Ydlep3[:, :, 0], x + xwo, y + ywo))
    if axes12flipped == True:
        Yxy = Yxy.transpose((1, 0, 2))
    else:
        Yxy = Yxy.transpose((0, 1, 2))
    return Yxy_to_xyz(Yxy).reshape(Ydlep.shape)
Ejemplo n.º 26
0
def cam_sww16(data, dataw = None, Yb = 20.0, Lw = 400.0, Ccwb = None, relative = True, \
              parameters = None, inputtype = 'xyz', direction = 'forward', \
              cieobs = '2006_10'):
    """
    A simple principled color appearance model based on a mapping 
    of the Munsell color system.
    
    | This function implements the JOSA A (parameters = 'JOSA') published model. 
    
    Args:
        :data: 
            | ndarray with input tristimulus values 
            | or spectral data 
            | or input color appearance correlates
            | Can be of shape: (N [, xM], x 3), whereby: 
            | N refers to samples and M refers to light sources.
            | Note that for spectral input shape is (N x (M+1) x wl) 
        :dataw: 
            | None or ndarray, optional
            | Input tristimulus values or spectral data of white point.
            | None defaults to the use of CIE illuminant C.
        :Yb: 
            | 20.0, optional
            | Luminance factor of background (perfect white diffuser, Yw = 100)
        :Lw:
            | 400.0, optional
            | Luminance (cd/m²) of white point.
        :Ccwb:
            | None,  optional
            | Degree of cognitive adaptation (white point balancing)
            | If None: use [..,..] from parameters dict.
        :relative:
            | True or False, optional
            | True: xyz tristimulus values are relative (Yw = 100)
        :parameters:
            | None or str or dict, optional
            | Dict with model parameters.
            |    - None: defaults to luxpy.cam._CAM_SWW_2016_PARAMETERS['JOSA']
            |    - str: 'best-fit-JOSA' or 'best-fit-all-Munsell'
            |    - dict: user defined model parameters 
            |            (dict should have same structure)
        :inputtype:
            | 'xyz' or 'spd', optional
            | Specifies the type of input: 
            |     tristimulus values or spectral data for the forward mode.
        :direction:
            | 'forward' or 'inverse', optional
            |   -'forward': xyz -> cam_sww_2016
            |   -'inverse': cam_sww_2016 -> xyz 
        :cieobs:
            | '2006_10', optional
            | CMF set to use to perform calculations where spectral data 
              is involved (inputtype == 'spd'; dataw = None)
            | Other options: see luxpy._CMF['types']
    
    Returns:
        :returns: 
            | ndarray with color appearance correlates (:direction: == 'forward')
            |  or 
            | XYZ tristimulus values (:direction: == 'inverse')
    
    Notes:
        | This function implements the JOSA A (parameters = 'JOSA') 
          published model. 
        | With:
        |    1. A correction for the parameter 
        |         in Eq.4 of Fig. 11: 0.952 --> -0.952 
        |         
        |     2. The delta_ac and delta_bc white-balance shifts in Eq. 5e & 5f 
        |         should be: -0.028 & 0.821 
        |  
        |     (cfr. Ccwb = 0.66 in: 
        |         ab_test_out = ab_test_int - Ccwb*ab_gray_adaptation_field_int))
             
    References:
        1. `Smet, K. A. G., Webster, M. A., & Whitehead, L. A. (2016). 
        A simple principled approach for modeling and understanding uniform color metrics. 
        Journal of the Optical Society of America A, 33(3), A319–A331. 
        <https://doi.org/10.1364/JOSAA.33.00A319>`_

    """

    # get model parameters
    args = locals().copy()
    if parameters is None:
        parameters = _CAM_SWW16_PARAMETERS['JOSA']
    if isinstance(parameters, str):
        parameters = _CAM_SWW16_PARAMETERS[parameters]
    parameters = put_args_in_db(
        parameters,
        args)  #overwrite parameters with other (not-None) args input

    #unpack model parameters:
    Cc, Ccwb, Cf, Mxyz2lms, cLMS, cab_int, cab_out, calpha, cbeta, cga1, cga2, cgb1, cgb2, cl_int, clambda, lms0 = [
        parameters[x] for x in sorted(parameters.keys())
    ]

    # setup default adaptation field:
    if (dataw is None):
        dataw = _CIE_ILLUMINANTS['C'].copy()  # get illuminant C
        xyzw = spd_to_xyz(dataw, cieobs=cieobs,
                          relative=False)  # get abs. tristimulus values
        if relative == False:  #input is expected to be absolute
            dataw[1:] = Lw * dataw[
                1:] / xyzw[:, 1:2]  #dataw = Lw*dataw # make absolute
        else:
            dataw = dataw  # make relative (Y=100)
        if inputtype == 'xyz':
            dataw = spd_to_xyz(dataw, cieobs=cieobs, relative=relative)

    # precomputations:
    Mxyz2lms = np.dot(
        np.diag(cLMS),
        math.normalize_3x3_matrix(Mxyz2lms, np.array([[1, 1, 1]]))
    )  # normalize matrix for xyz-> lms conversion to ill. E weighted with cLMS
    invMxyz2lms = np.linalg.inv(Mxyz2lms)
    MAab = np.array([clambda, calpha, cbeta])
    invMAab = np.linalg.inv(MAab)

    #initialize data and camout:
    data = np2d(data).copy(
    )  # stimulus data (can be upto NxMx3 for xyz, or [N x (M+1) x wl] for spd))
    dataw = np2d(dataw).copy(
    )  # white point (can be upto Nx3 for xyz, or [(N+1) x wl] for spd)

    # make axis 1 of dataw have 'same' dimensions as data:
    if (data.ndim == 2):
        data = np.expand_dims(data, axis=1)  # add light source axis 1

    if inputtype == 'xyz':
        if dataw.shape[
                0] == 1:  #make dataw have same lights source dimension size as data
            dataw = np.repeat(dataw, data.shape[1], axis=0)
    else:
        if dataw.shape[0] == 2:
            dataw = np.vstack(
                (dataw[0], np.repeat(dataw[1:], data.shape[1], axis=0)))

    # Flip light source dim to axis 0:
    data = np.transpose(data, axes=(1, 0, 2))

    # Initialize output array:
    dshape = list(data.shape)
    dshape[-1] = 3  # requested number of correlates: l_int, a_int, b_int
    if (inputtype != 'xyz') & (direction == 'forward'):
        dshape[-2] = dshape[
            -2] - 1  # wavelength row doesn't count & only with forward can the input data be spectral
    camout = np.zeros(dshape)
    camout.fill(np.nan)

    # apply forward/inverse model for each row in data:
    for i in range(data.shape[0]):

        # stage 1: calculate photon rates of stimulus and adapting field, lmst & lmsf:
        if (inputtype != 'xyz'):
            if relative == True:
                xyzw_abs = spd_to_xyz(np.vstack((dataw[0], dataw[i + 1])),
                                      cieobs=cieobs,
                                      relative=False)
                dataw[i +
                      1] = Lw * dataw[i + 1] / xyzw_abs[0, 1]  # make absolute
            xyzw = spd_to_xyz(np.vstack((dataw[0], dataw[i + 1])),
                              cieobs=cieobs,
                              relative=False)
            lmsw = 683.0 * np.dot(Mxyz2lms, xyzw.T).T / _CMF[cieobs]['K']
            lmsf = (Yb / 100.0
                    ) * lmsw  # calculate adaptation field and convert to l,m,s
            if (direction == 'forward'):
                if relative == True:
                    data[i, 1:, :] = Lw * data[i, 1:, :] / xyzw_abs[
                        0, 1]  # make absolute
                xyzt = spd_to_xyz(data[i], cieobs=cieobs,
                                  relative=False) / _CMF[cieobs]['K']
                lmst = 683.0 * np.dot(Mxyz2lms, xyzt.T).T  # convert to l,m,s
            else:
                lmst = lmsf  # put lmsf in lmst for inverse-mode

        elif (inputtype == 'xyz'):
            if relative == True:
                dataw[i] = Lw * dataw[i] / 100.0  # make absolute
            lmsw = 683.0 * np.dot(
                Mxyz2lms, dataw[i].T).T / _CMF[cieobs]['K']  # convert to lms
            lmsf = (Yb / 100.0) * lmsw
            if (direction == 'forward'):
                if relative == True:
                    data[i] = Lw * data[i] / 100.0  # make absolute
                lmst = 683.0 * np.dot(
                    Mxyz2lms,
                    data[i].T).T / _CMF[cieobs]['K']  # convert to lms
            else:
                lmst = lmsf  # put lmsf in lmst for inverse-mode

        # stage 2: calculate cone outputs of stimulus lmstp
        lmstp = math.erf(Cc * (np.log(lmst / lms0) + Cf * np.log(lmsf / lms0)))
        lmsfp = math.erf(Cc * (np.log(lmsf / lms0) + Cf * np.log(lmsf / lms0)))
        lmstp = np.vstack(
            (lmsfp, lmstp)
        )  # add adaptation field lms temporarily to lmsp for quick calculation

        # stage 3: calculate optic nerve signals, lam*, alphp, betp:
        lstar, alph, bet = asplit(np.dot(MAab, lmstp.T).T)

        alphp = cga1[0] * alph
        alphp[alph < 0] = cga1[1] * alph[alph < 0]
        betp = cgb1[0] * bet
        betp[bet < 0] = cgb1[1] * bet[bet < 0]

        # stage 4: calculate recoded nerve signals, alphapp, betapp:
        alphpp = cga2[0] * (alphp + betp)
        betpp = cgb2[0] * (alphp - betp)

        # stage 5: calculate conscious color perception:
        lstar_int = cl_int[0] * (lstar + cl_int[1])
        alph_int = cab_int[0] * (np.cos(cab_int[1] * np.pi / 180.0) * alphpp -
                                 np.sin(cab_int[1] * np.pi / 180.0) * betpp)
        bet_int = cab_int[0] * (np.sin(cab_int[1] * np.pi / 180.0) * alphpp +
                                np.cos(cab_int[1] * np.pi / 180.0) * betpp)
        lstar_out = lstar_int

        if direction == 'forward':
            if Ccwb is None:
                alph_out = alph_int - cab_out[0]
                bet_out = bet_int - cab_out[1]
            else:
                Ccwb = Ccwb * np.ones((2))
                Ccwb[Ccwb < 0.0] = 0.0
                Ccwb[Ccwb > 1.0] = 1.0
                alph_out = alph_int - Ccwb[0] * alph_int[
                    0]  # white balance shift using adaptation gray background (Yb=20%), with Ccw: degree of adaptation
                bet_out = bet_int - Ccwb[1] * bet_int[0]

            camout[i] = np.vstack(
                (lstar_out[1:], alph_out[1:], bet_out[1:])
            ).T  # stack together and remove adaptation field from vertical stack
        elif direction == 'inverse':
            labf_int = np.hstack((lstar_int[0], alph_int[0], bet_int[0]))

            # get lstar_out, alph_out & bet_out for data:
            lstar_out, alph_out, bet_out = asplit(data[i])

            # stage 5 inverse:
            # undo cortical white-balance:
            if Ccwb is None:
                alph_int = alph_out + cab_out[0]
                bet_int = bet_out + cab_out[1]
            else:
                Ccwb = Ccwb * np.ones((2))
                Ccwb[Ccwb < 0.0] = 0.0
                Ccwb[Ccwb > 1.0] = 1.0
                alph_int = alph_out + Ccwb[0] * alph_int[
                    0]  #  inverse white balance shift using adaptation gray background (Yb=20%), with Ccw: degree of adaptation
                bet_int = bet_out + Ccwb[1] * bet_int[0]

            lstar_int = lstar_out
            alphpp = (1.0 / cab_int[0]) * (
                np.cos(-cab_int[1] * np.pi / 180.0) * alph_int -
                np.sin(-cab_int[1] * np.pi / 180.0) * bet_int)
            betpp = (1.0 / cab_int[0]) * (
                np.sin(-cab_int[1] * np.pi / 180.0) * alph_int +
                np.cos(-cab_int[1] * np.pi / 180.0) * bet_int)
            lstar_int = lstar_out
            lstar = (lstar_int / cl_int[0]) - cl_int[1]

            # stage 4 inverse:
            alphp = 0.5 * (alphpp / cga2[0] + betpp / cgb2[0]
                           )  # <-- alphpp = (Cga2.*(alphp+betp));
            betp = 0.5 * (alphpp / cga2[0] - betpp / cgb2[0]
                          )  # <-- betpp = (Cgb2.*(alphp-betp));

            # stage 3 invers:
            alph = alphp / cga1[0]
            bet = betp / cgb1[0]
            sa = np.sign(cga1[1])
            sb = np.sign(cgb1[1])
            alph[(sa * alphp) < 0.0] = alphp[(sa * alphp) < 0] / cga1[1]
            bet[(sb * betp) < 0.0] = betp[(sb * betp) < 0] / cgb1[1]
            lab = ajoin((lstar, alph, bet))

            # stage 2 inverse:
            lmstp = np.dot(invMAab, lab.T).T
            lmstp[lmstp < -1.0] = -1.0
            lmstp[lmstp > 1.0] = 1.0

            lmstp = math.erfinv(lmstp) / Cc - Cf * np.log(lmsf / lms0)
            lmst = np.exp(lmstp) * lms0

            # stage 1 inverse:
            xyzt = np.dot(invMxyz2lms, lmst.T).T

            if relative == True:
                xyzt = (100.0 / Lw) * xyzt

            camout[i] = xyzt

#    if flipaxis0and1 == True: # loop over shortest dim.
#        camout = np.transpose(camout, axes = (1,0,2))

# Flip light source dim back to axis 1:
    camout = np.transpose(camout, axes=(1, 0, 2))

    if camout.shape[0] == 1:
        camout = np.squeeze(camout, axis=0)

    return camout
Ejemplo n.º 27
0
def run(data,
        xyzw=_DEFAULT_WHITE_POINT,
        Yw=None,
        conditions=None,
        ucstype='ucs',
        forward=True,
        yellowbluepurplecorrect=False,
        mcat='cat02'):
    """ 
    Run the CAM02-UCS[,-LCD,-SDC] color appearance difference model in forward or backward modes.
    
    Args:
        :data:
            | ndarray with sample xyz values (forward mode) or J'a'b' coordinates (inverse mode)
        :xyzw:
            | ndarray with white point tristimulus values  
        :conditions:
            | None, optional
            | Dictionary with viewing conditions.
            | None results in:
            |   {'La':100, 'Yb':20, 'D':1, 'surround':'avg'}
            | For more info see luxpy.cam.ciecam02()?
        :ucstype:
            | 'ucs', optional
            | String with type of color difference appearance space
            | options: 'ucs', 'scd', 'lcd'
        :forward:
            | True, optional
            | If True: run in CAM in forward mode, else: inverse mode.
        :yellowbluepurplecorrect:
            | False, optional
            | If False: don't correct for yellow-blue and purple problems in ciecam02. 
            | If 'brill-suss': 
            |       for yellow-blue problem, see: 
            |          - Brill [Color Res Appl, 2006; 31, 142-145] and 
            |          - Brill and Süsstrunk [Color Res Appl, 2008; 33, 424-426] 
            | If 'jiang-luo': 
            |       for yellow-blue problem + purple line problem, see:
            |          - Jiang, Jun et al. [Color Res Appl 2015: 40(5), 491-503] 
        :mcat:
            | 'cat02', optional
            | Specifies CAT sensor space.
            | - options:
            |    - None defaults to 'cat02' 
            |         (others e.g. 'cat02-bs', 'cat02-jiang',
            |         all trying to correct gamut problems of original cat02 matrix)
            |    - str: see see luxpy.cat._MCATS.keys() for options 
            |         (details on type, ?luxpy.cat)
            |    - ndarray: matrix with sensor primaries
    Returns:
        :camout:
            | ndarray with J'a'b' coordinates (forward mode) 
            |  or 
            | XYZ tristimulus values (inverse mode)
    
    References:
        1. `M.R. Luo, G. Cui, and C. Li, 
        'Uniform colour spaces based on CIECAM02 colour appearance model,' 
        Color Res. Appl., vol. 31, no. 4, pp. 320–330, 2006.
        <http://onlinelibrary.wiley.com/doi/10.1002/col.20227/abstract)>`_
    """
    # get ucs parameters:
    if isinstance(ucstype, str):
        ucs_pars = _CAM_UCS_PARAMETERS
        ucs = ucs_pars[ucstype]
    else:
        ucs = ucstype
    KL, c1, c2 = ucs['KL'], ucs['c1'], ucs['c2']

    # set conditions to use in CIECAM02 (overrides None-default in ciecam02() !!!)
    if conditions is None:
        conditions = _DEFAULT_CONDITIONS

    if forward == True:

        # run ciecam02 to get JMh:
        data = ciecam02(data,
                        xyzw,
                        outin='J,M,h',
                        conditions=conditions,
                        forward=True,
                        mcat=mcat,
                        yellowbluepurplecorrect=yellowbluepurplecorrect)

        camout = np.zeros_like(data)  # for output

        #--------------------------------------------
        # convert to cam02ucs J', aM', bM':
        camout[...,
               0] = (1.0 + 100.0 * c1) * data[...,
                                              0] / (1.0 + c1 * data[..., 0])
        Mp = ((1.0 / c2) *
              np.log(1.0 + c2 * data[..., 1])) if (c2 != 0) else data[..., 1]
        camout[..., 1] = Mp * np.cos(data[..., 2] * np.pi / 180)
        camout[..., 2] = Mp * np.sin(data[..., 2] * np.pi / 180)

        return camout

    else:
        #--------------------------------------------
        # convert cam02ucs J', aM', bM' to xyz:

        # calc ciecam02 hue angle
        #Jp, aMp, bMp = asplit(data)
        h = np.arctan2(data[..., 2], data[..., 1])

        # calc cam02ucs and CIECAM02 colourfulness
        Mp = (data[..., 1]**2.0 + data[..., 2]**2.0)**0.5
        M = ((np.exp(c2 * Mp) - 1.0) / c2) if (c2 != 0) else Mp

        # calculate ciecam02 aM, bM:
        aM = M * np.cos(h)
        bM = M * np.sin(h)

        # calc ciecam02 lightness
        J = data[..., 0] / (1.0 + (100.0 - data[..., 0]) * c1)

        # run ciecam02 in inverse mode to get xyz:
        return ciecam02(ajoin((J, aM, bM)),
                        xyzw,
                        outin='J,aM,bM',
                        conditions=conditions,
                        forward=False,
                        mcat=mcat,
                        yellowbluepurplecorrect=yellowbluepurplecorrect)
Ejemplo n.º 28
0
def plot_hue_bins(hbins = 16, start_hue = 0.0, scalef = 100, \
        plot_axis_labels = False, bin_labels = '#', plot_edge_lines = True, \
        plot_center_lines = False, plot_bin_colors = True, \
        plot_10_20_circles = False,\
        axtype = 'polar', ax = None, force_CVG_layout = False):
    """
    Makes basis plot for Color Vector Graphic (CVG).
    
    Args:
        :hbins:
            | 16 or ndarray with sorted hue bin centers (°), optional
        :start_hue:
            | 0.0, optional
        :scalef:
            | 100, optional
            | Scale factor for graphic.
        :plot_axis_labels:
            | False, optional
            | Turns axis ticks on/off (True/False).
        :bin_labels:
            | None or list[str] or '#', optional
            | Plots labels at the bin center hues.
            |   - None: don't plot.
            |   - list[str]: list with str for each bin. 
            |                (len(:bin_labels:) = :nhbins:)
            |   - '#': plots number.
        :plot_edge_lines:
            | True or False, optional
            | Plot grey bin edge lines with '--'.
        :plot_center_lines:
            | False or True, optional
            | Plot colored lines at 'center' of hue bin.
        :plot_bin_colors:
            | True, optional
            | Colorize hue bins.
        :plot_10_20_circles:
            | False, optional
            | If True and :axtype: == 'cart': Plot white circles at 
            | 80%, 90%, 100%, 110% and 120% of :scalef: 
        :axtype: 
            | 'polar' or 'cart', optional
            | Make polar or Cartesian plot.
        :ax: 
            | None or 'new' or 'same', optional
            |   - None or 'new' creates new plot
            |   - 'same': continue plot on same axes.
            |   - axes handle: plot on specified axes.
        :force_CVG_layout:
            | False or True, optional
            | True: Force plot of basis of CVG on first encounter.
            
    Returns:
        :returns: 
            | gcf(), gca(), list with rgb colors for hue bins (for use in 
              other plotting fcns)
        
    """

    # Setup hbincenters and hsv_hues:
    if isinstance(hbins, float) | isinstance(hbins, int):
        nhbins = hbins
        dhbins = 360 / (nhbins)  # hue bin width
        hbincenters = np.arange(start_hue + dhbins / 2, 360, dhbins)
        hbincenters = np.sort(hbincenters)

    else:
        hbincenters = hbins
        idx = np.argsort(hbincenters)
        if isinstance(bin_labels, list) | isinstance(bin_labels, np.ndarray):
            bin_labels = bin_labels[idx]
        hbincenters = hbincenters[idx]
        nhbins = hbincenters.shape[0]
    hbincenters = hbincenters * np.pi / 180

    # Setup hbin labels:
    if bin_labels is '#':
        bin_labels = ['#{:1.0f}'.format(i + 1) for i in range(nhbins)]
    elif isinstance(bin_labels, str):
        bin_labels = [
            bin_labels + '{:1.0f}'.format(i + 1) for i in range(nhbins)
        ]

    # initializing the figure
    cmap = None
    if (ax is None) or (ax == 'new'):
        fig = plt.figure()
        newfig = True
    else:
        fig = plt.gcf()
        newfig = False
    rect = [0.1, 0.1, 0.8,
            0.8]  # setting the axis limits in [left, bottom, width, height]

    if axtype == 'polar':
        # the polar axis:
        if newfig == True:
            ax = fig.add_axes(rect, polar=True, frameon=False)
    else:
        #cartesian axis:
        if newfig == True:
            ax = fig.add_axes(rect)

    if (newfig == True) | (force_CVG_layout == True):

        # Calculate hue-bin boundaries:
        r = np.vstack((np.zeros(hbincenters.shape),
                       1. * scalef * np.ones(hbincenters.shape)))
        theta = np.vstack((np.zeros(hbincenters.shape), hbincenters))
        #t = hbincenters.copy()
        dU = np.roll(hbincenters.copy(), -1)
        dL = np.roll(hbincenters.copy(), 1)
        dtU = dU - hbincenters
        dtL = hbincenters - dL
        dtU[dtU < 0] = dtU[dtU < 0] + 2 * np.pi
        dtL[dtL < 0] = dtL[dtL < 0] + 2 * np.pi
        dL = hbincenters - dtL / 2
        dU = hbincenters + dtU / 2
        dt = (dU - dL)
        dM = dL + dt / 2

        # Setup color for plotting hue bins:
        hsv_hues = hbincenters - 30 * np.pi / 180
        hsv_hues = hsv_hues / hsv_hues.max()

        edges = np.vstack(
            (np.zeros(hbincenters.shape), dL))  # setup hue bin edges array

        if axtype == 'cart':
            if plot_center_lines == True:
                hx = r * np.cos(theta) * 1.2
                hy = r * np.sin(theta) * 1.2
            if bin_labels is not None:
                hxv = np.vstack((np.zeros(hbincenters.shape),
                                 1.4 * scalef * np.cos(hbincenters)))
                hyv = np.vstack((np.zeros(hbincenters.shape),
                                 1.4 * scalef * np.sin(hbincenters)))
            if plot_edge_lines == True:
                #hxe = np.vstack((np.zeros(hbincenters.shape),1.2*scalef*np.cos(dL)))
                #hye = np.vstack((np.zeros(hbincenters.shape),1.2*scalef*np.sin(dL)))
                hxe = np.vstack(
                    (0.1 * scalef * np.cos(dL), 1.5 * scalef * np.cos(dL)))
                hye = np.vstack(
                    (0.1 * scalef * np.sin(dL), 1.5 * scalef * np.sin(dL)))

        # Plot hue-bins:
        for i in range(nhbins):

            # Create color from hue angle:
            #c = np.abs(np.array(colorsys.hsv_to_rgb(hsv_hues[i], 0.75, 0.85)))
            c = np.abs(np.array(colorsys.hls_to_rgb(hsv_hues[i], 0.45, 0.5)))
            if i == 0:
                cmap = [c]
            else:
                cmap.append(c)

            if axtype == 'polar':
                if plot_edge_lines == True:
                    ax.plot(edges[:, i],
                            r[:, i] * 1.,
                            color='grey',
                            marker='None',
                            linestyle='--',
                            linewidth=1,
                            markersize=2)
                if plot_center_lines == True:
                    if np.mod(i, 2) == 1:
                        ax.plot(theta[:, i],
                                r[:, i],
                                color=c,
                                marker=None,
                                linestyle='--',
                                linewidth=1)
                    else:
                        ax.plot(theta[:, i],
                                r[:, i],
                                color=c,
                                marker=None,
                                linestyle='--',
                                linewidth=1,
                                markersize=10)
                if plot_bin_colors == True:
                    bar = ax.bar(dM[i],
                                 r[1, i],
                                 width=dt[i],
                                 color=c,
                                 alpha=0.25)
                if bin_labels is not None:
                    ax.text(hbincenters[i],
                            1.3 * scalef,
                            bin_labels[i],
                            fontsize=10,
                            horizontalalignment='center',
                            verticalalignment='center',
                            color=np.array([1, 1, 1]) * 0.45)
                if plot_axis_labels == False:
                    ax.set_xticklabels([])
                    ax.set_yticklabels([])
            else:
                axis_ = 1. * np.array(
                    [-scalef * 1.5, scalef * 1.5, -scalef * 1.5, scalef * 1.5])
                if plot_edge_lines == True:
                    ax.plot(hxe[:, i],
                            hye[:, i],
                            color='grey',
                            marker='None',
                            linestyle='--',
                            linewidth=1,
                            markersize=2)

                if plot_center_lines == True:
                    if np.mod(i, 2) == 1:
                        ax.plot(hx[:, i],
                                hy[:, i],
                                color=c,
                                marker=None,
                                linestyle='--',
                                linewidth=1)
                    else:
                        ax.plot(hx[:, i],
                                hy[:, i],
                                color=c,
                                marker=None,
                                linestyle='--',
                                linewidth=1,
                                markersize=10)
                if bin_labels is not None:
                    ax.text(hxv[1, i],
                            hyv[1, i],
                            bin_labels[i],
                            fontsize=10,
                            horizontalalignment='center',
                            verticalalignment='center',
                            color=np.array([1, 1, 1]) * 0.45)
                ax.axis(axis_)

        if plot_axis_labels == False:
            ax.set_xticklabels([])
            ax.set_yticklabels([])
        else:
            ax.set_xlabel("a'")
            ax.set_ylabel("b'")

        ax.plot(0, 0, color='grey', marker='+', linestyle=None, markersize=6)

        if (axtype != 'polar') & (plot_10_20_circles == True):
            r = np.array([
                0.8, 0.9, 1.1, 1.2
            ]) * scalef  # plot circles at 80, 90, 100, 110, 120 % of scale f
            plotcircle(radii=r,
                       angles=np.arange(0, 365, 5),
                       color='w',
                       linestyle='-',
                       axh=ax,
                       linewidth=0.5)
            plotcircle(radii=[scalef],
                       angles=np.arange(0, 365, 5),
                       color='k',
                       linestyle='-',
                       axh=ax,
                       linewidth=1)
            ax.text(0,
                    -0.75 * scalef,
                    '-20%',
                    fontsize=8,
                    horizontalalignment='center',
                    verticalalignment='center',
                    color='w')
            ax.text(0,
                    -1.25 * scalef,
                    '+20%',
                    fontsize=8,
                    horizontalalignment='center',
                    verticalalignment='center',
                    color='w')

        if (axtype != 'polar') & (plot_bin_colors == True) & (_CVG_BG
                                                              is not None):
            ax.imshow(_CVG_BG, origin='upper', extent=axis_)

    return fig, ax, cmap
Ejemplo n.º 29
0
def DE2000(xyzt,
           xyzr,
           dtype='xyz',
           DEtype='jab',
           avg=None,
           avg_axis=0,
           out='DEi',
           xyzwt=None,
           xyzwr=None,
           KLCH=None):
    """
    Calculate DE2000 color difference.
    
    Args:
        :xyzt: 
            | ndarray with tristimulus values of test data.
        :xyzr:
            | ndarray with tristimulus values of reference data.
        :dtype:
            | 'xyz' or 'lab', optional
            | Specifies data type in :xyzt: and :xyzr:.
        :xyzwt:
            | None or ndarray, optional
            |   White point tristimulus values of test data
            |   None defaults to the one set in lx.xyz_to_lab()
        :xyzwr:
            | None or ndarray, optional
            |    Whitepoint tristimulus values of reference data
            |    None defaults to the one set in lx.xyz_to_lab()
        :DEtype:
            | 'jab' or str, optional
            | Options: 
            |    - 'jab' : calculates full color difference over all 3 dimensions.
            |    - 'ab'  : calculates chromaticity difference.
            |    - 'j'   : calculates lightness or brightness difference 
            |             (depending on :outin:).
            |    - 'j,ab': calculates both 'j' and 'ab' options 
            |              and returns them as a tuple.
        :KLCH: 
            | None, optional
            | Weigths for L, C, H 
            | None: default to [1,1,1] 
        :avg:
            | None, optional
            | None: don't calculate average DE, 
            |       otherwise use function handle in :avg:.
        :avg_axis:
            | axis to calculate average over, optional
        :out: 
            | 'DEi' or str, optional
            | Requested output.
        
    Note:
        For the other input arguments, see specific color space used.
        
    Returns:
        :returns: 
            | ndarray with DEi [, DEa] or other as specified by :out:
            
    References:
        1. `Sharma, G., Wu, W., & Dalal, E. N. (2005). 
        The CIEDE2000 color‐difference formula: Implementation notes, 
        supplementary test data, and mathematical observations. 
        Color Research & Application, 30(1), 21–30. 
        <https://doi.org/10.1002/col.20070>`_
    """

    if KLCH is None:
        KLCH = [1, 1, 1]

    if dtype == 'xyz':
        labt = xyz_to_lab(xyzt, xyzw=xyzwt)
        labr = xyz_to_lab(xyzr, xyzw=xyzwr)
    else:
        labt = xyzt
        labr = xyzr

    Lt = labt[..., 0:1]
    at = labt[..., 1:2]
    bt = labt[..., 2:3]
    Ct = np.sqrt(at**2 + bt**2)
    #ht = cam.hue_angle(at,bt,htype = 'rad')

    Lr = labr[..., 0:1]
    ar = labr[..., 1:2]
    br = labr[..., 2:3]
    Cr = np.sqrt(ar**2 + br**2)
    #hr = cam.hue_angle(at,bt,htype = 'rad')

    # Step 1:
    Cavg = (Ct + Cr) / 2
    G = 0.5 * (1 - np.sqrt((Cavg**7.0) / ((Cavg**7.0) + (25.0**7))))
    apt = (1 + G) * at
    apr = (1 + G) * ar

    Cpt = np.sqrt(apt**2 + bt**2)
    Cpr = np.sqrt(apr**2 + br**2)
    Cpprod = Cpt * Cpr

    hpt = cam.hue_angle(apt, bt, htype='deg')
    hpr = cam.hue_angle(apr, br, htype='deg')
    hpt[(apt == 0) * (bt == 0)] = 0
    hpr[(apr == 0) * (br == 0)] = 0

    # Step 2:
    dL = np.abs(Lr - Lt)
    dCp = np.abs(Cpr - Cpt)
    dhp_ = hpr - hpt

    dhp = dhp_.copy()
    dhp[np.where(np.abs(dhp_) > 180)] = dhp[np.where(np.abs(dhp_) > 180)] - 360
    dhp[np.where(
        np.abs(dhp_) < -180)] = dhp[np.where(np.abs(dhp_) < -180)] + 360
    dhp[np.where(Cpprod == 0)] = 0

    #dH = 2*np.sqrt(Cpprod)*np.sin(dhp/2*np.pi/180)
    dH = deltaH(dhp, Cpprod, htype='deg')

    # Step 3:
    Lp = (Lr + Lt) / 2
    Cp = (Cpr + Cpt) / 2

    hps = hpt + hpr
    hp = (hpt + hpr) / 2
    hp[np.where((np.abs(dhp_) > 180)
                & (hps < 360))] = hp[np.where((np.abs(dhp_) > 180)
                                              & (hps < 360))] + 180
    hp[np.where((np.abs(dhp_) > 180)
                & (hps >= 360))] = hp[np.where((np.abs(dhp_) > 180)
                                               & (hps >= 360))] - 180
    hp[np.where(Cpprod == 0)] = 0

    T = 1 - 0.17*np.cos((hp - 30)*np.pi/180) + 0.24*np.cos(2*hp*np.pi/180) +\
        0.32*np.cos((3*hp + 6)*np.pi/180) - 0.20*np.cos((4*hp - 63)*np.pi/180)
    dtheta = 30 * np.exp(-((hp - 275) / 25)**2)
    RC = 2 * np.sqrt((Cp**7) / ((Cp**7) + (25**7)))
    SL = 1 + ((0.015 * (Lp - 50)**2) / np.sqrt(20 + (Lp - 50)**2))
    SC = 1 + 0.045 * Cp
    SH = 1 + 0.015 * Cp * T
    RT = -np.sin(2 * dtheta * np.pi / 180) * RC

    kL, kC, kH = KLCH

    DEi = ((dL / (kL * SL))**2, (dCp / (kC * SC))**2 + (dH / (kH * SH))**2 +
           RT * (dCp / (kC * SC)) * (dH / (kH * SH)))

    return _process_DEi(DEi,
                        DEtype=DEtype,
                        avg=avg,
                        avg_axis=avg_axis,
                        out=out)
Ejemplo n.º 30
0
def spd_to_ies_tm30_metrics(St, cri_type = None, \
                            hbins = 16, start_hue = 0.0,\
                            scalef = 100, \
                            vf_model_type = _VF_MODEL_TYPE, \
                            vf_pcolorshift = _VF_PCOLORSHIFT,\
                            scale_vf_chroma_to_sample_chroma = False):
    """
    Calculates IES TM30 metrics from spectral data.      
      
      Args:
        :St:
            | numpy.ndarray with spectral data 
        :cri_type:
            | None, optional
            | If None: defaults to cri_type = 'iesrf'.
            | Not none values of :hbins:, :start_hue: and :scalef: overwrite 
            | input in cri_type['rg_pars'] 
        :hbins:
            | None or numpy.ndarray with sorted hue bin centers (°), optional
        :start_hue: 
            | None, optional
        :scalef:
            | None, optional
            | Scale factor for reference circle.
        :vf_pcolorshift:
            | _VF_PCOLORSHIFT or user defined dict, optional
            | The polynomial models of degree 5 and 6 can be fully specified or 
            | summarized by the model parameters themselved OR by calculating the
            | dCoverC and dH at resp. 5 and 6 hues. :VF_pcolorshift: specifies 
            | these hues and chroma level.
        :scale_vf_chroma_to_sample_chroma: 
            | False, optional
            | Scale chroma of reference and test vf fields such that average of 
            | binned reference chroma equals that of the binned sample chroma
            | before calculating hue bin metrics.
            
    Returns:
        :data: 
            | Dictionary with color rendering data:
            | 
            | - 'St, Sr'  : ndarray of test SPDs and corresponding ref. illuminants.
            | - 'xyz_cct': xyz of white point calculate with cieobs defined for cct calculations in cri_type['cieobs']
            | - 'cct, duv': CCT and Duv obtained with cieobs in cri_type['cieobs']['cct']
            | - 'xyzti, xyzri': ndarray tristimulus values of test and ref. samples (obtained with with cieobs in cri_type['cieobs']['xyz'])
            | - 'xyztw, xyzrw': ndarray tristimulus values of test and ref. white points (obtained with with cieobs in cri_type['cieobs']['xyz'])
            | - 'DEi, DEa': ndarray with individual sample color differences DEi and average DEa between test and ref.       
            | - 'Rf'  : ndarray with general color fidelity index values
            | - 'Rg'  : ndarray with color gamut area index values
            | - 'Rfi'  : ndarray with specific (sample) color fidelity indices
            | - 'Rfhj' : ndarray with local (hue binned) fidelity indices
            | - 'DEhj' : ndarray with local (hue binned) color differences
            | - 'Rcshj': ndarray with local chroma shifts indices
            | - 'Rhshj': ndarray with local hue shifts indices
            | - 'hue_bin_data': dict with output from _get_hue_bin_data() [see its help for more info]
            | - 'cri_type': same as input (for reference purposes)
            | - 'vf' : dictionary with vector field measures and data.
            |         Keys:
            |           - 'Rt'  : ndarray with general metameric uncertainty index Rt
            |           - 'Rti' : ndarray with specific metameric uncertainty indices Rti
            |           - 'Rfhj' : ndarray with local (hue binned) fidelity indices 
            |                            obtained from VF model predictions at color space
            |                            pixel coordinates
            |           - 'DEhj' : ndarray with local (hue binned) color differences
            |                           (same as above)
            |           - 'Rcshj': ndarray with local chroma shifts indices for vectorfield coordinates
            |                           (same as above)
            |           - 'Rhshj': ndarray with local hue shifts indicesfor vectorfield coordinates
            |                           (same as above)
            |           - 'Rfi': ndarray with sample fidelity indices for vectorfield coordinates
            |                           (same as above)
            |           - 'DEi': ndarray with sample color differences for vectorfield coordinates
            |                           (same as above)
            |           - 'hue_bin_data': dict with output from _get_hue_bin_data() for vectorfield coordinates
            |           - 'dataVF': dictionary with output of cri.VFPX.VF_colorshift_model()
    """
    if cri_type is None:
        cri_type = 'iesrf'

    if isinstance(cri_type,str): # get dict 
        cri_type = copy.deepcopy(_CRI_DEFAULTS[cri_type])
    if hbins is not None:
        cri_type['rg_pars']['nhbins'] = hbins 
    if start_hue is not None:
        cri_type['rg_pars']['start_hue'] = start_hue
    if scalef is not None:
        cri_type['rg_pars']['normalized_chroma_ref'] = scalef
    
    #Calculate color rendering measures for SPDs in St:      
    data,_ = spd_to_cri(St, cri_type = cri_type, out = 'data,hue_bin_data', 
                        fit_gamut_ellipse = True)
    hdata = data['hue_bin_data']
    Rfhj, Rcshj, Rhshj = data['Rfhj'], data['Rcshj'], data['Rhshj']
    cct = data['cct']
    
    #Calculate Metameric uncertainty and base color shifts:
    dataVF = VF_colorshift_model(St, cri_type = cri_type, 
                                 model_type = vf_model_type, 
                                 cspace = cri_type['cspace'], 
                                 sampleset = eval(cri_type['sampleset']), 
                                 pool = False, 
                                 pcolorshift = vf_pcolorshift, 
                                 vfcolor = 0)
    Rf_ = np.array([dataVF[i]['metrics']['Rf'] for i in range(len(dataVF))]).T
    Rt = np.array([dataVF[i]['metrics']['Rt'] for i in range(len(dataVF))]).T
    Rti = np.array([dataVF[i]['metrics']['Rti'] for i in range(len(dataVF))][0])
    _data_vf = {'Rt' : Rt, 'Rti' : Rti, 'Rf_' : Rf_} # add to dict for output

    # Get normalized and sliced hue-bin _hj data for plotting:
    rg_pars = cri_type['rg_pars']
    nhbins, normalize_gamut, normalized_chroma_ref, start_hue = [rg_pars[x] for x in sorted(rg_pars.keys())]
    
    # Get chroma of samples:    
    if scale_vf_chroma_to_sample_chroma == True:
        jabt_hj_closed, jabr_hj_closed = hdata['jabt_hj_closed'], hdata['jabr_hj_closed']
        Cr_hj_s = (np.sqrt(jabr_hj_closed[:-1,...,1]**2 + jabr_hj_closed[:-1,...,2]**2)).mean(axis=0) # for rescaling vector field average reference chroma

    #jabtn_hj_closed, jabrn_hj_closed = hdata['jabtn_hj_closed'], hdata['jabrn_hj_closed']
    
    # get vector field data for each source (must be on 2nd dim)
    jabt_vf = np.transpose(np.array([np.hstack((np.ones(dataVF[i]['fielddata']['vectorfield']['axt'].shape),dataVF[i]['fielddata']['vectorfield']['axt'],dataVF[i]['fielddata']['vectorfield']['bxt'])) for i in range(cct.shape[0])]),(1,0,2))
    jabr_vf = np.transpose(np.array([np.hstack((np.ones(dataVF[i]['fielddata']['vectorfield']['axr'].shape),dataVF[i]['fielddata']['vectorfield']['axr'],dataVF[i]['fielddata']['vectorfield']['bxr'])) for i in range(cct.shape[0])]),(1,0,2))
    
    # Get hue bin data for vector field data:
    hue_bin_data_vf = _get_hue_bin_data(jabt_vf, jabr_vf, 
                                        start_hue = start_hue, nhbins = nhbins,
                                        normalized_chroma_ref = normalized_chroma_ref )
    
    # Rescale chroma of vector field such that it is on average equal to that of the binned samples:
    if scale_vf_chroma_to_sample_chroma == True:
        Cr_vf_hj, Cr_vf, Ct_vf = hue_bin_data_vf['Cr_hj'], hue_bin_data_vf['Cr'], hue_bin_data_vf['Ct']
        hr_vf, ht_vf = hue_bin_data_vf['hr'], hue_bin_data_vf['ht']
        fC = np.nanmean(Cr_hj_s)/np.nanmean(Cr_vf_hj)
        jabr_vf[...,1], jabr_vf[...,2] = fC * Cr_vf*np.cos(hr_vf), fC * Cr_vf*np.sin(hr_vf)
        jabt_vf[...,1], jabt_vf[...,2] = fC * Ct_vf*np.cos(ht_vf), fC * Ct_vf*np.sin(ht_vf)
        
        # Get new hue bin data for rescaled vector field data:
        hue_bin_data_vf = _get_hue_bin_data(jabt_vf, jabr_vf, 
                                            start_hue = start_hue, nhbins = nhbins,
                                            normalized_chroma_ref = normalized_chroma_ref )
    
    # Get scale factor and scaling function for Rfx:
    scale_factor = cri_type['scale']['cfactor']
    scale_fcn = cri_type['scale']['fcn']

    # Calculate Local color fidelity, chroma and hue shifts for vector field data:
    (Rcshj_vf, Rhshj_vf,
     Rfhj_vf, DEhj_vf) = _hue_bin_data_to_rxhj(hue_bin_data_vf, 
                                               cri_type = cri_type,
                                               scale_factor = scale_factor,
                                               scale_fcn = scale_fcn) 
                                               
    # Get sample color fidelity for vector field data:
    (Rfi_vf, DEi_vf) = _hue_bin_data_to_rfi(hue_bin_data_vf, 
                                            cri_type = cri_type,
                                            scale_factor = scale_factor,
                                            scale_fcn = scale_fcn)
    # Store in dict:
    _data_vf.update({'Rfi' : Rfi_vf, 'DEi' : DEi_vf,
                     'Rcshj' : Rcshj_vf, 'Rhshj' : Rhshj_vf,
                     'Rfhj' : Rfhj_vf, 'DEhj': DEhj_vf,
                     'dataVF' : dataVF, 'hue_bin_data' : hue_bin_data_vf})
    
    # Add to main dictionary:
    data['vf'] = _data_vf
    return data