Пример #1
0
def cct_to_neutral_loci_smet2018(cct, nlocitype='uw', out='duv,D'):
    """
    Calculate the most neutral appearing Duv10 in and the degree of neutrality for a specified CCT using the models in Smet et al. (2018).
    
    Args:
        :cct10:
            | ndarray CCT 
        :nlocitype:
            | 'uw', optional
            | 'uw': use unique white models published in Smet et al. (2014).
            | 'ca': use degree of chromatic adaptation model from Smet et al. (2017).
        :out:
            | 'duv,D', optional
            | Specifies requested output (other options: 'duv', 'D').
            
    Returns:
        :duv: 
            | ndarray with most neutral Duv10 value corresponding to the cct input.
        :D: 
            | ndarray with the degree of neutrality at (cct, duv).
        
    References:
        1. `Smet, K.A.G., (2018), 
        Two Neutral White Illumination Loci Based on Unique White Rating and Degree of Chromatic Adaptation. 
        LEUKOS, 14(2), 55–67. <https://doi.org/10.1080/15502724.2017.1385400>`_

    Notes:
        1. Duv is specified in the CIE 1960 u10v10 chromatity diagram as the models were developed using CIE 1964 10° tristimulus, chromaticity and CCT values.
        2. The parameter +0.0172 in Eq. 4b should be -0.0172.
    """
    if nlocitype == 'uw':
        duv = 0.0202 * np.log(cct / 3325) * np.exp(
            -1.445 * np.log(cct / 3325)**2) - 0.0137
        D = np.exp(-(6368 * ((1 / cct) -
                             (1 / 6410)))**2)  # degree of neutrality
    elif nlocitype == 'ca':
        duv = 0.0382 * np.log(cct / 2194) * np.exp(
            -0.679 * np.log(cct / 2194)**2) - 0.0172
        D = np.exp(-(3912 * ((1 / cct) -
                             (1 / 6795)))**2)  # degree of adaptation
    else:
        raise Exception('Unrecognized nlocitype')

    if out == 'duv,D':
        return duv, D
    elif out == 'duv':
        return duv
    elif out == 'D':
        return D
    else:
        raise Exception('smet_white_loci(): Requested output unrecognized.')
Пример #2
0
def log_scale(data,
              scale_factor=[6.73],
              scale_max=100.0):  # defaults from cie-224-2017 cri
    """
    Log-based color rendering index scale from Davis & Ohno (2009): 
    
    | Rfi,a = 10 * ln(exp((100 - c1*DEi,a)/10) + 1).
                    
    Args:
        :data: 
            | float or list[floats] or ndarray 
        :scale_factor:
            | [6.73] or list[float] or ndarray, optional
            | Rescales color differences before subtracting them from :scale_max:
            | Note that the default value is the one from cie-224-2017.
        :scale_max:
            | 100.0, optional
            | Maximum value of linear scale
    
    Returns:
        :returns:
            | float or list[floats] or ndarray
        
    References:
        1. `W. Davis and Y. Ohno, 
        “Color quality scale,” (2010), 
        Opt. Eng., vol. 49, no. 3, pp. 33602–33616.
        <http://spie.org/Publications/Journal/10.1117/1.3360335>`_
        2. `CIE224:2017. CIE 2017 Colour Fidelity Index for accurate scientific use. 
        Vienna, Austria: CIE. (2017).
        <http://www.cie.co.at/index.php?i_ca_id=1027>`_

    """
    return 10.0 * np.log(
        np.exp((scale_max - scale_factor[0] * data) / 10.0) + 1.0)
Пример #3
0
def smet2017_D(xyzw, Dmax=None):
    """
    Calculate the degree of adaptation based on chromaticity following 
    Smet et al. (2017) 
    
    Args:
        :xyzw: 
            | ndarray with white point data (CIE 1964 10° XYZs!!)
        :Dmax:
            | None or float, optional
            | Defaults to 0.6539 (max D obtained under experimental conditions, 
            | but probably too low due to dark surround leading to incomplete 
            | chromatic adaptation even for neutral illuminants 
            | resulting in background luminance (fov~50°) of 760 cd/m²))
            
    Returns:
        :D: 
            | ndarray with degrees of adaptation
    
    References: 
        1. `Smet, K.A.G.*, Zhai, Q., Luo, M.R., Hanselaer, P., (2017), 
        Study of chromatic adaptation using memory color matches, 
        Part II: colored illuminants, 
        Opt. Express, 25(7), pp. 8350-8365.
        <https://www.osapublishing.org/oe/abstract.cfm?uri=oe-25-7-8350&origin=search)>`_

    """

    # Convert xyzw to log-compressed Macleod_Boyton coordinates:
    Vl, rl, bl = asplit(
        np.log(xyz_to_Vrb_mb(xyzw, M=_MCATS['hpe']))
    )  # force use of HPE matrix (which was the one used when deriving the model parameters!!)

    # apply Dmodel (technically only for cieobs = '1964_10')
    pD = (1.0e7) * np.array([
        0.021081326530436, 4.751255762876845, -0.000000071025181,
        -0.000000063627042, -0.146952821492957, 3.117390441655821
    ])  #D model parameters for gaussian model in log(MB)-space (july 2016)
    if Dmax is None:
        Dmax = 0.6539  # max D obtained under experimental conditions (probably too low due to dark surround leading to incomplete chromatic adaptation even for neutral illuminants resulting in background luminance (fov~50°) of 760 cd/m²)
    return Dmax * math.bvgpdf(x=rl,
                              y=bl,
                              mu=pD[2:4],
                              sigmainv=np.linalg.inv(
                                  np.array([[pD[0], pD[4]], [pD[4], pD[1]]
                                            ])))**pD[5]
Пример #4
0
def spd_to_mcri(SPD, D = 0.9, E = None, Yb = 20.0, out = 'Rm', wl = None):
    """
    Calculates the MCRI or Memory Color Rendition Index, Rm
    
    Args: 
        :SPD: 
            | ndarray with spectral data (can be multiple SPDs, 
            |  first axis are the wavelengths)
        :D: 
            | 0.9, optional
            | Degree of adaptation.
        :E: 
            | None, optional
            | Illuminance in lux 
            |  (used to calculate La = (Yb/100)*(E/pi) to then calculate D 
            |  following the 'cat02' model). 
            | If None: the degree is determined by :D:
            |  If (:E: is not None) & (:Yb: is None):  :E: is assumed to contain 
            |  the adapting field luminance La (cd/m²).
        :Yb: 
            | 20.0, optional
            | Luminance factor of background. (used when calculating La from E)
            | If None, E contains La (cd/m²).
        :out: 
            | 'Rm' or str, optional
            | Specifies requested output (e.g. 'Rm,Rmi,cct,duv') 
        :wl: 
            | None, optional
            | Wavelengths (or [start, end, spacing]) to interpolate the SPDs to. 
            | None: default to no interpolation   
    
    Returns:
        :returns: 
            | float or ndarray with MCRI Rm for :out: 'Rm'
            | Other output is also possible by changing the :out: str value.        
          
    References:
        1. `K.A.G. Smet, W.R. Ryckaert, M.R. Pointer, G. Deconinck, P. Hanselaer,(2012)
        “A memory colour quality metric for white light sources,” 
        Energy Build., vol. 49, no. C, pp. 216–225.
        <http://www.sciencedirect.com/science/article/pii/S0378778812000837>`_
    """
    SPD = np2d(SPD)
    
    if wl is not None: 
        SPD = spd(data = SPD, interpolation = _S_INTERP_TYPE, kind = 'np', wl = wl)
    
    
    # unpack metric default values:
    avg, catf, cieobs, cri_specific_pars, cspace, ref_type, rg_pars, sampleset, scale = [_MCRI_DEFAULTS[x] for x in sorted(_MCRI_DEFAULTS.keys())] 
    similarity_ai = cri_specific_pars['similarity_ai']
    Mxyz2lms = cspace['Mxyz2lms'] 
    scale_fcn = scale['fcn']
    scale_factor = scale['cfactor']
    sampleset = eval(sampleset)
    
    # A. calculate xyz:
    xyzti, xyztw = spd_to_xyz(SPD, cieobs = cieobs['xyz'],  rfl = sampleset, out = 2)
    if 'cct' in out.split(','):
        cct, duv = xyz_to_cct(xyztw, cieobs = cieobs['cct'], out = 'cct,duv',mode = 'lut')
        
    # B. perform chromatic adaptation to adopted whitepoint of ipt color space, i.e. D65:
    if catf is not None:
        Dtype_cat, F, Yb_cat, catmode_cat, cattype_cat, mcat_cat, xyzw_cat = [catf[x] for x in sorted(catf.keys())]
        
        # calculate degree of adaptationn D:
        if E is not None:
            if Yb is not None:
                La = (Yb/100.0)*(E/np.pi)
            else:
                La = E
            D = cat.get_degree_of_adaptation(Dtype = Dtype_cat, F = F, La = La)
        else:
            Dtype_cat = None # direct input of D

        if (E is None) and (D is None):
            D = 1.0 # set degree of adaptation to 1 !
        if D > 1.0: D = 1.0
        if D < 0.6: D = 0.6 # put a limit on the lowest D

        # apply cat:
        xyzti = cat.apply(xyzti, cattype = cattype_cat, catmode = catmode_cat, xyzw1 = xyztw,xyzw0 = None, xyzw2 = xyzw_cat, D = D, mcat = [mcat_cat], Dtype = Dtype_cat)
        xyztw = cat.apply(xyztw, cattype = cattype_cat, catmode = catmode_cat, xyzw1 = xyztw,xyzw0 = None, xyzw2 = xyzw_cat, D = D, mcat = [mcat_cat], Dtype = Dtype_cat)
     
    # C. convert xyz to ipt and split:
    ipt = xyz_to_ipt(xyzti, cieobs = cieobs['xyz'], M = Mxyz2lms) #input matrix as published in Smet et al. 2012, Energy and Buildings
    I,P,T = asplit(ipt)  

    # D. calculate specific (hue dependent) similarity indicators, Si:
    if len(xyzti.shape) == 3:
        ai = np.expand_dims(similarity_ai, axis = 1)
    else: 
        ai = similarity_ai
    a1,a2,a3,a4,a5 = asplit(ai)
    mahalanobis_d2 = (a3*np.power((P - a1),2.0) + a4*np.power((T - a2),2.0) + 2.0*a5*(P-a1)*(T-a2))
    if (len(mahalanobis_d2.shape)==3) & (mahalanobis_d2.shape[-1]==1):
        mahalanobis_d2 = mahalanobis_d2[:,:,0].T
    Si = np.exp(-0.5*mahalanobis_d2)

    # E. calculate general similarity indicator, Sa:
    Sa = avg(Si, axis = 0,keepdims = True)

    # F. rescale similarity indicators (Si, Sa) with a 0-1 scale to memory color rendition indices (Rmi, Rm) with a 0 - 100 scale:
    Rmi = scale_fcn(np.log(Si),scale_factor = scale_factor)
    Rm = np2d(scale_fcn(np.log(Sa),scale_factor = scale_factor))

    # G. calculate Rg (polyarea of test / polyarea of memory colours):
    if 'Rg' in out.split(','):
        I = I[...,None] #broadcast_shape(I, target_shape = None,expand_2d_to_3d = 0)
        a1 = a1[:,None]*np.ones(I.shape)#broadcast_shape(a1, target_shape = None,expand_2d_to_3d = 0)
        a2 = a2[:,None]*np.ones(I.shape) #broadcast_shape(a2, target_shape = None,expand_2d_to_3d = 0)
        a12 = np.concatenate((a1,a2),axis=2) #broadcast_shape(np.hstack((a1,a2)), target_shape = ipt.shape,expand_2d_to_3d = 0)
        ipt_mc = np.concatenate((I,a12),axis=2)
        nhbins, normalize_gamut, normalized_chroma_ref, start_hue  = [rg_pars[x] for x in sorted(rg_pars.keys())]
    
        hue_bin_data = _get_hue_bin_data(ipt, ipt_mc, 
                                         start_hue = start_hue, nhbins = nhbins,
                                         normalized_chroma_ref = normalized_chroma_ref)
        Rg = _hue_bin_data_to_rg(hue_bin_data)

    if (out != 'Rm'):
        return  eval(out)
    else:
        return Rm
Пример #5
0
def box_m(*X, ni = None, verbosity = 0, robust = False, robust_alpha = 0.01):
    """
    Perform Box's M test (p>=2) to check equality of covariance matrices or Bartlett's test (p==1) for equality of variances.
    
    Args:
        :X: 
            | A number  (k groups) or list of 2d-ndarrays (rows: samples, cols: variables) with data.
            | or a number of 2d-ndarrays with covariance matrices (supply ni!)
        :ni:
            | None, optional
            | If None: X contains data, else, X contains covariance matrices.
        :verbosity: 
            | 0, optional
            | If 1: print results.
        :robust:
            | False, optional
            | If True: remove outliers beyond the confidence ellipsoid before calculating
            |          the covariances.
        :robust_alpha:
            | 0.01, optional
            | Significance level of confidence ellipsoid marking the boundary for outliers.
    
    Returns:
        :statistic:
            | F or chi2 value (see len(dfs))
        :pval:
            | p-value
        :df:
            | degrees of freedom.
            | if len(dfs) == 2: F-test was used.
            | if len(dfs) == 1: chi2 approx. was used.
    
    Notes:
        1. If p==1: Reduces to Bartlett's test for equal variances.
        2. If (ni>20).all() & (p<6) & (k<6): then a more appropriate chi2 test is used in a some cases.
    """

    k = len(X) # groups
    p = np.atleast_2d(X[0]).shape[1] # variables
    if p == 1: # for p == 1: only variance!
        det = lambda x: np.array(x)
    else:
        det = lambda x: np.linalg.det(x)
    if ni is None: # samples in each group
        
        # remove outliers before calculation of box M:
        if robust == True:
            X = [remove_outliers(Xi, alpha = robust_alpha) for Xi in X]
            
        ni = np.array([Xi.shape[0] for Xi in X])
        Si = np.array([np.cov(Xi.T) for Xi in X])
        if p == 1:
            Si = np.atleast_2d(Si).T
    else:
        Si = np.array([Xi for Xi in X]) # input are already cov matrices!
        ni = np.array(ni)
        if ni.shape[0] == 1:
            ni = ni*np.ones((k,))
        
    N = ni.sum()
    S = np.array([(ni[i]-1)*Si[i] for i in range(len(ni))]).sum(axis=0)/(N - k)

    M = (N-k)*np.log(det(S)) - ((ni-1)*np.log(det(Si))).sum()
    if p == 1:
        M = M[0]
    A1 = (2*p**2 + 3*p -1)/(6*(p+1)*(k-1))*((1/(ni-1)) - 1/(N - k)).sum()
    v1 = p*(p+1)*(k-1)/2
    A2 = (p-1)*(p+2)/(6*(k-1))*((1/(ni-1)**2) - 1/(N - k)**2).sum()

    if (A2 - A1**2) > 0:
        v2 = (v1 + 2)/(A2 - A1**2)
        b = v1/(1 - A1 -(v1/v2))
        Fv1v2 = M/b
        statistic = Fv1v2
        pval = 1.0 - sp.stats.f.cdf(Fv1v2,v1,v2)
        dfs = [v1,v2]
        
        if verbosity == 1:
            print('M = {:1.4f}, F = {:1.4f}, df1 = {:1.1f}, df2 = {:1.1f}, p = {:1.4f}'.format(M,Fv1v2,v1,v2,pval))
    else:
        v2 = (v1 + 2)/(A1**2 - A2)
        b = v2/(1 - A1 + (2/v2))
        Fv1v2 = v2*M/(v1*(b - M))
        statistic = Fv1v2
        pval = 1.0 - sp.stats.f.cdf(Fv1v2,v1,v2)
        dfs = [v1,v2]

        if (ni>20).all() & (p<6) & (k<6): #use Chi2v1
            chi2v1 = M*(1-A1)
            statistic = chi2v1
            pval = 1.0 - sp.stats.chi2.cdf(chi2v1,v1)
            dfs = [v1]
            if verbosity == 1:
                print('M = {:1.4f}, chi2 = {:1.4f}, df1 = {:1.1f}, p = {:1.4f}'.format(M,chi2v1,v1,pval))

        else:
            if verbosity == 1:
                print('M = {:1.4f}, F = {:1.4f}, df1 = {:1.1f}, df2 = {:1.1f}, p = {:1.4f}'.format(M,Fv1v2,v1,v2,pval))

    return statistic, pval, dfs
Пример #6
0
def _xyz_to_jab_cam02ucs(xyz, xyzw, ucs=True, conditions=None):
    """ 
    Calculate CAM02-UCS J'a'b' coordinates from xyz tristimulus values of sample and white point.
    
    Args:
        :xyz:
            | ndarray with sample tristimulus values
        :xyzw:
            | ndarray with white point tristimulus values  
        :conditions:
            | None, optional
            | Dictionary with viewing conditions.
            | None results in:
            |   {'La':100, 'Yb':20, 'D':1, 'surround':'avg'}
            | For more info see luxpy.cam.ciecam02()?
    
    Returns:
        :jab:
            | ndarray with J'a'b' coordinates.
    """
    #--------------------------------------------
    # Get/ set conditions parameters:
    if conditions is not None:
        surround_parameters = {
            'surrounds': ['avg', 'dim', 'dark'],
            'avg': {
                'c': 0.69,
                'Nc': 1.0,
                'F': 1.0,
                'FLL': 1.0
            },
            'dim': {
                'c': 0.59,
                'Nc': 0.9,
                'F': 0.9,
                'FLL': 1.0
            },
            'dark': {
                'c': 0.525,
                'Nc': 0.8,
                'F': 0.8,
                'FLL': 1.0
            }
        }
        La = conditions['La']
        Yb = conditions['Yb']
        D = conditions['D']
        surround = conditions['surround']
        if isinstance(surround, str):
            surround = surround_parameters[conditions['surround']]
        F, FLL, Nc, c = [surround[x] for x in sorted(surround.keys())]
    else:
        # set defaults:
        La, Yb, D, F, FLL, Nc, c = 100, 20, 1, 1, 1, 1, 0.69

    #--------------------------------------------
    # Define sensor space and cat matrices:
    mhpe = np.array([[0.38971, 0.68898, -0.07868], [-0.22981, 1.1834, 0.04641],
                     [0.0, 0.0, 1.0]
                     ])  # Hunt-Pointer-Estevez sensors (cone fundamentals)

    mcat = np.array([[0.7328, 0.4296, -0.1624], [-0.7036, 1.6975, 0.0061],
                     [0.0030, 0.0136, 0.9834]])  # CAT02 sensor space

    #--------------------------------------------
    # pre-calculate some matrices:
    invmcat = np.linalg.inv(mcat)
    mhpe_x_invmcat = np.dot(mhpe, invmcat)

    #--------------------------------------------
    # calculate condition dependent parameters:
    Yw = xyzw[..., 1:2].T
    k = 1.0 / (5.0 * La + 1.0)
    FL = 0.2 * (k**4.0) * (5.0 * La) + 0.1 * ((1.0 - k**4.0)**2.0) * (
        (5.0 * La)**(1.0 / 3.0))  # luminance adaptation factor
    n = Yb / Yw
    Nbb = 0.725 * (1 / n)**0.2
    Ncb = Nbb
    z = 1.48 + FLL * n**0.5

    if D is None:
        D = F * (1.0 - (1.0 / 3.6) * np.exp((-La - 42.0) / 92.0))

    #--------------------------------------------
    # transform from xyz, xyzw to cat sensor space:
    rgb = math.dot23(mcat, xyz.T)
    rgbw = mcat @ xyzw.T

    #--------------------------------------------
    # apply von Kries cat:
    rgbc = (
        (D * Yw / rgbw)[..., None] + (1 - D)
    ) * rgb  # factor 100 from ciecam02 is replaced with Yw[i] in ciecam16, but see 'note' in Fairchild's "Color Appearance Models" (p291 ni 3ed.)
    rgbwc = (
        (D * Yw / rgbw) + (1 - D)
    ) * rgbw  # factor 100 from ciecam02 is replaced with Yw[i] in ciecam16, but see 'note' in Fairchild's "Color Appearance Models" (p291 ni 3ed.)

    #--------------------------------------------
    # convert from cat02 sensor space to cone sensors (hpe):
    rgbp = math.dot23(mhpe_x_invmcat, rgbc).T
    rgbwp = (mhpe_x_invmcat @ rgbwc).T

    #--------------------------------------------
    # apply Naka_rushton repsonse compression:
    naka_rushton = lambda x: 400 * x**0.42 / (x**0.42 + 27.13) + 0.1

    rgbpa = naka_rushton(FL * rgbp / 100.0)
    p = np.where(rgbp < 0)
    rgbpa[p] = 0.1 - (naka_rushton(FL * np.abs(rgbp[p]) / 100.0) - 0.1)

    rgbwpa = naka_rushton(FL * rgbwp / 100.0)
    pw = np.where(rgbwp < 0)
    rgbwpa[pw] = 0.1 - (naka_rushton(FL * np.abs(rgbwp[pw]) / 100.0) - 0.1)

    #--------------------------------------------
    # Calculate achromatic signal:
    A = (2.0 * rgbpa[..., 0] + rgbpa[..., 1] +
         (1.0 / 20.0) * rgbpa[..., 2] - 0.305) * Nbb
    Aw = (2.0 * rgbwpa[..., 0] + rgbwpa[..., 1] +
          (1.0 / 20.0) * rgbwpa[..., 2] - 0.305) * Nbb

    #--------------------------------------------
    # calculate initial opponent channels:
    a = rgbpa[..., 0] - 12.0 * rgbpa[..., 1] / 11.0 + rgbpa[..., 2] / 11.0
    b = (1.0 / 9.0) * (rgbpa[..., 0] + rgbpa[..., 1] - 2.0 * rgbpa[..., 2])

    #--------------------------------------------
    # calculate hue h and eccentricity factor, et:
    h = np.arctan2(b, a)
    et = (1.0 / 4.0) * (np.cos(h + 2.0) + 3.8)

    #--------------------------------------------
    # calculate lightness, J:
    J = 100.0 * (A / Aw)**(c * z)

    #--------------------------------------------
    # calculate chroma, C:
    t = ((50000.0 / 13.0) * Nc * Ncb * et *
         ((a**2.0 + b**2.0)**0.5)) / (rgbpa[..., 0] + rgbpa[..., 1] +
                                      (21.0 / 20.0 * rgbpa[..., 2]))
    C = (t**0.9) * ((J / 100.0)**0.5) * (1.64 - 0.29**n)**0.73

    #--------------------------------------------
    # Calculate colorfulness, M:
    M = C * FL**0.25

    #--------------------------------------------
    # convert to cam02ucs J', aM', bM':
    if ucs == True:
        KL, c1, c2 = 1.0, 0.007, 0.0228
        Jp = (1.0 + 100.0 * c1) * J / (1.0 + c1 * J)
        Mp = (1.0 / c2) * np.log(1.0 + c2 * M)
    else:
        Jp = J
        Mp = M
    aMp = Mp * np.cos(h)
    bMp = Mp * np.sin(h)

    return np.dstack((Jp, aMp, bMp))
Пример #7
0
def run(data,
        xyzw=_DEFAULT_WHITE_POINT,
        Yw=None,
        conditions=None,
        ucstype='ucs',
        forward=True,
        yellowbluepurplecorrect=False,
        mcat='cat02'):
    """ 
    Run the CAM02-UCS[,-LCD,-SDC] color appearance difference model in forward or backward modes.
    
    Args:
        :data:
            | ndarray with sample xyz values (forward mode) or J'a'b' coordinates (inverse mode)
        :xyzw:
            | ndarray with white point tristimulus values  
        :conditions:
            | None, optional
            | Dictionary with viewing conditions.
            | None results in:
            |   {'La':100, 'Yb':20, 'D':1, 'surround':'avg'}
            | For more info see luxpy.cam.ciecam02()?
        :ucstype:
            | 'ucs', optional
            | String with type of color difference appearance space
            | options: 'ucs', 'scd', 'lcd'
        :forward:
            | True, optional
            | If True: run in CAM in forward mode, else: inverse mode.
        :yellowbluepurplecorrect:
            | False, optional
            | If False: don't correct for yellow-blue and purple problems in ciecam02. 
            | If 'brill-suss': 
            |       for yellow-blue problem, see: 
            |          - Brill [Color Res Appl, 2006; 31, 142-145] and 
            |          - Brill and Süsstrunk [Color Res Appl, 2008; 33, 424-426] 
            | If 'jiang-luo': 
            |       for yellow-blue problem + purple line problem, see:
            |          - Jiang, Jun et al. [Color Res Appl 2015: 40(5), 491-503] 
        :mcat:
            | 'cat02', optional
            | Specifies CAT sensor space.
            | - options:
            |    - None defaults to 'cat02' 
            |         (others e.g. 'cat02-bs', 'cat02-jiang',
            |         all trying to correct gamut problems of original cat02 matrix)
            |    - str: see see luxpy.cat._MCATS.keys() for options 
            |         (details on type, ?luxpy.cat)
            |    - ndarray: matrix with sensor primaries
    Returns:
        :camout:
            | ndarray with J'a'b' coordinates (forward mode) 
            |  or 
            | XYZ tristimulus values (inverse mode)
    
    References:
        1. `M.R. Luo, G. Cui, and C. Li, 
        'Uniform colour spaces based on CIECAM02 colour appearance model,' 
        Color Res. Appl., vol. 31, no. 4, pp. 320–330, 2006.
        <http://onlinelibrary.wiley.com/doi/10.1002/col.20227/abstract)>`_
    """
    # get ucs parameters:
    if isinstance(ucstype, str):
        ucs_pars = _CAM_UCS_PARAMETERS
        ucs = ucs_pars[ucstype]
    else:
        ucs = ucstype
    KL, c1, c2 = ucs['KL'], ucs['c1'], ucs['c2']

    # set conditions to use in CIECAM02 (overrides None-default in ciecam02() !!!)
    if conditions is None:
        conditions = _DEFAULT_CONDITIONS

    if forward == True:

        # run ciecam02 to get JMh:
        data = ciecam02(data,
                        xyzw,
                        outin='J,M,h',
                        conditions=conditions,
                        forward=True,
                        mcat=mcat,
                        yellowbluepurplecorrect=yellowbluepurplecorrect)

        camout = np.zeros_like(data)  # for output

        #--------------------------------------------
        # convert to cam02ucs J', aM', bM':
        camout[...,
               0] = (1.0 + 100.0 * c1) * data[...,
                                              0] / (1.0 + c1 * data[..., 0])
        Mp = ((1.0 / c2) *
              np.log(1.0 + c2 * data[..., 1])) if (c2 != 0) else data[..., 1]
        camout[..., 1] = Mp * np.cos(data[..., 2] * np.pi / 180)
        camout[..., 2] = Mp * np.sin(data[..., 2] * np.pi / 180)

        return camout

    else:
        #--------------------------------------------
        # convert cam02ucs J', aM', bM' to xyz:

        # calc ciecam02 hue angle
        #Jp, aMp, bMp = asplit(data)
        h = np.arctan2(data[..., 2], data[..., 1])

        # calc cam02ucs and CIECAM02 colourfulness
        Mp = (data[..., 1]**2.0 + data[..., 2]**2.0)**0.5
        M = ((np.exp(c2 * Mp) - 1.0) / c2) if (c2 != 0) else Mp

        # calculate ciecam02 aM, bM:
        aM = M * np.cos(h)
        bM = M * np.sin(h)

        # calc ciecam02 lightness
        J = data[..., 0] / (1.0 + (100.0 - data[..., 0]) * c1)

        # run ciecam02 in inverse mode to get xyz:
        return ciecam02(ajoin((J, aM, bM)),
                        xyzw,
                        outin='J,aM,bM',
                        conditions=conditions,
                        forward=False,
                        mcat=mcat,
                        yellowbluepurplecorrect=yellowbluepurplecorrect)
Пример #8
0
def cam_sww16(data, dataw = None, Yb = 20.0, Lw = 400.0, Ccwb = None, relative = True, \
              parameters = None, inputtype = 'xyz', direction = 'forward', \
              cieobs = '2006_10'):
    """
    A simple principled color appearance model based on a mapping 
    of the Munsell color system.
    
    | This function implements the JOSA A (parameters = 'JOSA') published model. 
    
    Args:
        :data: 
            | ndarray with input tristimulus values 
            | or spectral data 
            | or input color appearance correlates
            | Can be of shape: (N [, xM], x 3), whereby: 
            | N refers to samples and M refers to light sources.
            | Note that for spectral input shape is (N x (M+1) x wl) 
        :dataw: 
            | None or ndarray, optional
            | Input tristimulus values or spectral data of white point.
            | None defaults to the use of CIE illuminant C.
        :Yb: 
            | 20.0, optional
            | Luminance factor of background (perfect white diffuser, Yw = 100)
        :Lw:
            | 400.0, optional
            | Luminance (cd/m²) of white point.
        :Ccwb:
            | None,  optional
            | Degree of cognitive adaptation (white point balancing)
            | If None: use [..,..] from parameters dict.
        :relative:
            | True or False, optional
            | True: xyz tristimulus values are relative (Yw = 100)
        :parameters:
            | None or str or dict, optional
            | Dict with model parameters.
            |    - None: defaults to luxpy.cam._CAM_SWW_2016_PARAMETERS['JOSA']
            |    - str: 'best-fit-JOSA' or 'best-fit-all-Munsell'
            |    - dict: user defined model parameters 
            |            (dict should have same structure)
        :inputtype:
            | 'xyz' or 'spd', optional
            | Specifies the type of input: 
            |     tristimulus values or spectral data for the forward mode.
        :direction:
            | 'forward' or 'inverse', optional
            |   -'forward': xyz -> cam_sww_2016
            |   -'inverse': cam_sww_2016 -> xyz 
        :cieobs:
            | '2006_10', optional
            | CMF set to use to perform calculations where spectral data 
              is involved (inputtype == 'spd'; dataw = None)
            | Other options: see luxpy._CMF['types']
    
    Returns:
        :returns: 
            | ndarray with color appearance correlates (:direction: == 'forward')
            |  or 
            | XYZ tristimulus values (:direction: == 'inverse')
    
    Notes:
        | This function implements the JOSA A (parameters = 'JOSA') 
          published model. 
        | With:
        |    1. A correction for the parameter 
        |         in Eq.4 of Fig. 11: 0.952 --> -0.952 
        |         
        |     2. The delta_ac and delta_bc white-balance shifts in Eq. 5e & 5f 
        |         should be: -0.028 & 0.821 
        |  
        |     (cfr. Ccwb = 0.66 in: 
        |         ab_test_out = ab_test_int - Ccwb*ab_gray_adaptation_field_int))
             
    References:
        1. `Smet, K. A. G., Webster, M. A., & Whitehead, L. A. (2016). 
        A simple principled approach for modeling and understanding uniform color metrics. 
        Journal of the Optical Society of America A, 33(3), A319–A331. 
        <https://doi.org/10.1364/JOSAA.33.00A319>`_

    """

    # get model parameters
    args = locals().copy()
    if parameters is None:
        parameters = _CAM_SWW16_PARAMETERS['JOSA']
    if isinstance(parameters, str):
        parameters = _CAM_SWW16_PARAMETERS[parameters]
    parameters = put_args_in_db(
        parameters,
        args)  #overwrite parameters with other (not-None) args input

    #unpack model parameters:
    Cc, Ccwb, Cf, Mxyz2lms, cLMS, cab_int, cab_out, calpha, cbeta, cga1, cga2, cgb1, cgb2, cl_int, clambda, lms0 = [
        parameters[x] for x in sorted(parameters.keys())
    ]

    # setup default adaptation field:
    if (dataw is None):
        dataw = _CIE_ILLUMINANTS['C'].copy()  # get illuminant C
        xyzw = spd_to_xyz(dataw, cieobs=cieobs,
                          relative=False)  # get abs. tristimulus values
        if relative == False:  #input is expected to be absolute
            dataw[1:] = Lw * dataw[
                1:] / xyzw[:, 1:2]  #dataw = Lw*dataw # make absolute
        else:
            dataw = dataw  # make relative (Y=100)
        if inputtype == 'xyz':
            dataw = spd_to_xyz(dataw, cieobs=cieobs, relative=relative)

    # precomputations:
    Mxyz2lms = np.dot(
        np.diag(cLMS),
        math.normalize_3x3_matrix(Mxyz2lms, np.array([[1, 1, 1]]))
    )  # normalize matrix for xyz-> lms conversion to ill. E weighted with cLMS
    invMxyz2lms = np.linalg.inv(Mxyz2lms)
    MAab = np.array([clambda, calpha, cbeta])
    invMAab = np.linalg.inv(MAab)

    #initialize data and camout:
    data = np2d(data).copy(
    )  # stimulus data (can be upto NxMx3 for xyz, or [N x (M+1) x wl] for spd))
    dataw = np2d(dataw).copy(
    )  # white point (can be upto Nx3 for xyz, or [(N+1) x wl] for spd)

    # make axis 1 of dataw have 'same' dimensions as data:
    if (data.ndim == 2):
        data = np.expand_dims(data, axis=1)  # add light source axis 1

    if inputtype == 'xyz':
        if dataw.shape[
                0] == 1:  #make dataw have same lights source dimension size as data
            dataw = np.repeat(dataw, data.shape[1], axis=0)
    else:
        if dataw.shape[0] == 2:
            dataw = np.vstack(
                (dataw[0], np.repeat(dataw[1:], data.shape[1], axis=0)))

    # Flip light source dim to axis 0:
    data = np.transpose(data, axes=(1, 0, 2))

    # Initialize output array:
    dshape = list(data.shape)
    dshape[-1] = 3  # requested number of correlates: l_int, a_int, b_int
    if (inputtype != 'xyz') & (direction == 'forward'):
        dshape[-2] = dshape[
            -2] - 1  # wavelength row doesn't count & only with forward can the input data be spectral
    camout = np.zeros(dshape)
    camout.fill(np.nan)

    # apply forward/inverse model for each row in data:
    for i in range(data.shape[0]):

        # stage 1: calculate photon rates of stimulus and adapting field, lmst & lmsf:
        if (inputtype != 'xyz'):
            if relative == True:
                xyzw_abs = spd_to_xyz(np.vstack((dataw[0], dataw[i + 1])),
                                      cieobs=cieobs,
                                      relative=False)
                dataw[i +
                      1] = Lw * dataw[i + 1] / xyzw_abs[0, 1]  # make absolute
            xyzw = spd_to_xyz(np.vstack((dataw[0], dataw[i + 1])),
                              cieobs=cieobs,
                              relative=False)
            lmsw = 683.0 * np.dot(Mxyz2lms, xyzw.T).T / _CMF[cieobs]['K']
            lmsf = (Yb / 100.0
                    ) * lmsw  # calculate adaptation field and convert to l,m,s
            if (direction == 'forward'):
                if relative == True:
                    data[i, 1:, :] = Lw * data[i, 1:, :] / xyzw_abs[
                        0, 1]  # make absolute
                xyzt = spd_to_xyz(data[i], cieobs=cieobs,
                                  relative=False) / _CMF[cieobs]['K']
                lmst = 683.0 * np.dot(Mxyz2lms, xyzt.T).T  # convert to l,m,s
            else:
                lmst = lmsf  # put lmsf in lmst for inverse-mode

        elif (inputtype == 'xyz'):
            if relative == True:
                dataw[i] = Lw * dataw[i] / 100.0  # make absolute
            lmsw = 683.0 * np.dot(
                Mxyz2lms, dataw[i].T).T / _CMF[cieobs]['K']  # convert to lms
            lmsf = (Yb / 100.0) * lmsw
            if (direction == 'forward'):
                if relative == True:
                    data[i] = Lw * data[i] / 100.0  # make absolute
                lmst = 683.0 * np.dot(
                    Mxyz2lms,
                    data[i].T).T / _CMF[cieobs]['K']  # convert to lms
            else:
                lmst = lmsf  # put lmsf in lmst for inverse-mode

        # stage 2: calculate cone outputs of stimulus lmstp
        lmstp = math.erf(Cc * (np.log(lmst / lms0) + Cf * np.log(lmsf / lms0)))
        lmsfp = math.erf(Cc * (np.log(lmsf / lms0) + Cf * np.log(lmsf / lms0)))
        lmstp = np.vstack(
            (lmsfp, lmstp)
        )  # add adaptation field lms temporarily to lmsp for quick calculation

        # stage 3: calculate optic nerve signals, lam*, alphp, betp:
        lstar, alph, bet = asplit(np.dot(MAab, lmstp.T).T)

        alphp = cga1[0] * alph
        alphp[alph < 0] = cga1[1] * alph[alph < 0]
        betp = cgb1[0] * bet
        betp[bet < 0] = cgb1[1] * bet[bet < 0]

        # stage 4: calculate recoded nerve signals, alphapp, betapp:
        alphpp = cga2[0] * (alphp + betp)
        betpp = cgb2[0] * (alphp - betp)

        # stage 5: calculate conscious color perception:
        lstar_int = cl_int[0] * (lstar + cl_int[1])
        alph_int = cab_int[0] * (np.cos(cab_int[1] * np.pi / 180.0) * alphpp -
                                 np.sin(cab_int[1] * np.pi / 180.0) * betpp)
        bet_int = cab_int[0] * (np.sin(cab_int[1] * np.pi / 180.0) * alphpp +
                                np.cos(cab_int[1] * np.pi / 180.0) * betpp)
        lstar_out = lstar_int

        if direction == 'forward':
            if Ccwb is None:
                alph_out = alph_int - cab_out[0]
                bet_out = bet_int - cab_out[1]
            else:
                Ccwb = Ccwb * np.ones((2))
                Ccwb[Ccwb < 0.0] = 0.0
                Ccwb[Ccwb > 1.0] = 1.0
                alph_out = alph_int - Ccwb[0] * alph_int[
                    0]  # white balance shift using adaptation gray background (Yb=20%), with Ccw: degree of adaptation
                bet_out = bet_int - Ccwb[1] * bet_int[0]

            camout[i] = np.vstack(
                (lstar_out[1:], alph_out[1:], bet_out[1:])
            ).T  # stack together and remove adaptation field from vertical stack
        elif direction == 'inverse':
            labf_int = np.hstack((lstar_int[0], alph_int[0], bet_int[0]))

            # get lstar_out, alph_out & bet_out for data:
            lstar_out, alph_out, bet_out = asplit(data[i])

            # stage 5 inverse:
            # undo cortical white-balance:
            if Ccwb is None:
                alph_int = alph_out + cab_out[0]
                bet_int = bet_out + cab_out[1]
            else:
                Ccwb = Ccwb * np.ones((2))
                Ccwb[Ccwb < 0.0] = 0.0
                Ccwb[Ccwb > 1.0] = 1.0
                alph_int = alph_out + Ccwb[0] * alph_int[
                    0]  #  inverse white balance shift using adaptation gray background (Yb=20%), with Ccw: degree of adaptation
                bet_int = bet_out + Ccwb[1] * bet_int[0]

            lstar_int = lstar_out
            alphpp = (1.0 / cab_int[0]) * (
                np.cos(-cab_int[1] * np.pi / 180.0) * alph_int -
                np.sin(-cab_int[1] * np.pi / 180.0) * bet_int)
            betpp = (1.0 / cab_int[0]) * (
                np.sin(-cab_int[1] * np.pi / 180.0) * alph_int +
                np.cos(-cab_int[1] * np.pi / 180.0) * bet_int)
            lstar_int = lstar_out
            lstar = (lstar_int / cl_int[0]) - cl_int[1]

            # stage 4 inverse:
            alphp = 0.5 * (alphpp / cga2[0] + betpp / cgb2[0]
                           )  # <-- alphpp = (Cga2.*(alphp+betp));
            betp = 0.5 * (alphpp / cga2[0] - betpp / cgb2[0]
                          )  # <-- betpp = (Cgb2.*(alphp-betp));

            # stage 3 invers:
            alph = alphp / cga1[0]
            bet = betp / cgb1[0]
            sa = np.sign(cga1[1])
            sb = np.sign(cgb1[1])
            alph[(sa * alphp) < 0.0] = alphp[(sa * alphp) < 0] / cga1[1]
            bet[(sb * betp) < 0.0] = betp[(sb * betp) < 0] / cgb1[1]
            lab = ajoin((lstar, alph, bet))

            # stage 2 inverse:
            lmstp = np.dot(invMAab, lab.T).T
            lmstp[lmstp < -1.0] = -1.0
            lmstp[lmstp > 1.0] = 1.0

            lmstp = math.erfinv(lmstp) / Cc - Cf * np.log(lmsf / lms0)
            lmst = np.exp(lmstp) * lms0

            # stage 1 inverse:
            xyzt = np.dot(invMxyz2lms, lmst.T).T

            if relative == True:
                xyzt = (100.0 / Lw) * xyzt

            camout[i] = xyzt

#    if flipaxis0and1 == True: # loop over shortest dim.
#        camout = np.transpose(camout, axes = (1,0,2))

# Flip light source dim back to axis 1:
    camout = np.transpose(camout, axes=(1, 0, 2))

    if camout.shape[0] == 1:
        camout = np.squeeze(camout, axis=0)

    return camout
Пример #9
0
def cam_sww16(data,
              dataw=None,
              Yb=20.0,
              Lw=400.0,
              Ccwb=None,
              relative=True,
              inputtype='xyz',
              direction='forward',
              parameters=None,
              cieobs='2006_10',
              match_to_conversionmatrix_to_cieobs=True):
    """
    A simple principled color appearance model based on a mapping of 
    the Munsell color system.
    
    | This function implements the JOSA A (parameters = 'JOSA') published model. 
    
    Args:
        :data: 
            | ndarray with input tristimulus values 
            | or spectral data 
            | or input color appearance correlates
            | Can be of shape: (N [, xM], x 3), whereby: 
            | N refers to samples and M refers to light sources.
            | Note that for spectral input shape is (N x (M+1) x wl) 
        :dataw: 
            | None or ndarray, optional
            | Input tristimulus values or spectral data of white point.
            | None defaults to the use of CIE illuminant C.
        :Yb: 
            | 20.0, optional
            | Luminance factor of background (perfect white diffuser, Yw = 100)
        :Lw:
            | 400.0, optional
            | Luminance (cd/m²) of white point.
        :Ccwb:
            | None,  optional
            | Degree of cognitive adaptation (white point balancing)
            | If None: use [..,..] from parameters dict.
        :relative:
            | True or False, optional
            | True: xyz tristimulus values are relative (Yw = 100)
        :parameters:
            | None or str or dict, optional
            | Dict with model parameters.
            |    - None: defaults to luxpy.cam._CAM_SWW_2016_PARAMETERS['JOSA']
            |    - str: 'best-fit-JOSA' or 'best-fit-all-Munsell'
            |    - dict: user defined model parameters 
            |            (dict should have same structure)
        :inputtype:
            | 'xyz' or 'spd', optional
            | Specifies the type of input: 
            |     tristimulus values or spectral data for the forward mode.
        :direction:
            | 'forward' or 'inverse', optional
            |   -'forward': xyz -> cam_sww_2016
            |   -'inverse': cam_sww_2016 -> xyz 
        :cieobs:
            | '2006_10', optional
            | CMF set to use to perform calculations where spectral data 
            | is involved (inputtype == 'spd'; dataw = None)
            | Other options: see luxpy._CMF['types']
        :match_to_conversionmatrix_to_cieobs:
            | When channging to a different CIE observer, change the xyz-to_lms
            | matrix to the one corresponding to that observer. If False: use 
            | the one set in parameters or _CAM_SWW16_PARAMETERS
    
    Returns:
        :returns: 
            | ndarray with color appearance correlates (:direction: == 'forward')
            |  or 
            | XYZ tristimulus values (:direction: == 'inverse')
    
    Notes:
        | This function implements the JOSA A (parameters = 'JOSA') 
        | published model. 
        | With:
        |    1. A correction for the parameter 
        |         in Eq.4 of Fig. 11: 0.952 --> -0.952 
        |         
        |     2. The delta_ac and delta_bc white-balance shifts in Eq. 5e & 5f 
        |         should be: -0.028 & 0.821 
        |  
        |     (cfr. Ccwb = 0.66 in: 
        |         ab_test_out = ab_test_int - Ccwb*ab_gray_adaptation_field_int))
             
    References:
        1. `Smet, K. A. G., Webster, M. A., & Whitehead, L. A. (2016). 
        A simple principled approach for modeling and understanding uniform color metrics. 
        Journal of the Optical Society of America A, 33(3), A319–A331. 
        <https://doi.org/10.1364/JOSAA.33.00A319>`_

    """
    #--------------------------------------------------------------------------
    # Get model parameters:
    #--------------------------------------------------------------------------
    args = locals().copy()
    parameters = _update_parameter_dict(
        args,
        parameters=parameters,
        match_to_conversionmatrix_to_cieobs=match_to_conversionmatrix_to_cieobs
    )

    #unpack model parameters:
    Cc, Ccwb, Cf, Mxyz2lms, cLMS, cab_int, cab_out, calpha, cbeta, cga1, cga2, cgb1, cgb2, cl_int, clambda, lms0 = [
        parameters[x] for x in sorted(parameters.keys())
    ]

    #--------------------------------------------------------------------------
    # Setup default adaptation field:
    #--------------------------------------------------------------------------
    dataw = _setup_default_adaptation_field(dataw=dataw,
                                            Lw=Lw,
                                            inputtype=inputtype,
                                            relative=relative,
                                            cieobs=cieobs)

    #--------------------------------------------------------------------------
    # Redimension input data to ensure most appropriate sizes
    # for easy and efficient looping and initialize output array:
    #--------------------------------------------------------------------------
    data, dataw, camout, originalshape = _massage_input_and_init_output(
        data, dataw, inputtype=inputtype, direction=direction)

    #--------------------------------------------------------------------------
    # Do precomputations needed for both the forward and inverse model,
    # and which do not depend on sample or light source data:
    #--------------------------------------------------------------------------
    Mxyz2lms = np.dot(
        np.diag(cLMS), Mxyz2lms
    )  # weight the xyz-to-lms conversion matrix with cLMS (cfr. stage 1 calculations)
    invMxyz2lms = np.linalg.inv(
        Mxyz2lms)  # Calculate the inverse lms-to-xyz conversion matrix
    MAab = np.array(
        [clambda, calpha, cbeta]
    )  # Create matrix with scale factors for L, M, S for quick matrix multiplications
    invMAab = np.linalg.inv(
        MAab)  # Pre-calculate its inverse to avoid repeat in loop.

    #--------------------------------------------------------------------------
    # Apply forward/inverse model by looping over each row (=light source dim.)
    # in data:
    #--------------------------------------------------------------------------
    N = data.shape[0]
    for i in range(N):
        #++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
        #  START FORWARD MODE and common part of inverse mode
        #++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

        #-----------------------------------------------------------------------------
        # Get absolute tristimulus values for stimulus field and white point for row i:
        #-----------------------------------------------------------------------------
        xyzt, xyzw, xyzw_abs = _get_absolute_xyz_xyzw(data,
                                                      dataw,
                                                      i=i,
                                                      Lw=Lw,
                                                      direction=direction,
                                                      cieobs=cieobs,
                                                      inputtype=inputtype,
                                                      relative=relative)

        #-----------------------------------------------------------------------------
        # stage 1: calculate photon rates of stimulus and white white, and
        # adapting field: i.e. lmst, lmsw and lmsf
        #-----------------------------------------------------------------------------
        # Convert to white point l,m,s:
        lmsw = 683.0 * np.dot(Mxyz2lms, xyzw.T).T / _CMF[cieobs]['K']

        # Calculate adaptation field and convert to l,m,s:
        lmsf = (Yb / 100.0) * lmsw

        # Calculate lms of stimulus
        # or put adaptation lmsf in test field lmst for later use in inverse-mode (no xyz in 'inverse' mode!!!):
        lmst = (683.0 * np.dot(Mxyz2lms, xyzt.T).T /
                _CMF[cieobs]['K']) if (direction == 'forward') else lmsf

        #-----------------------------------------------------------------------------
        # stage 2: calculate cone outputs of stimulus lmstp
        #-----------------------------------------------------------------------------
        lmstp = math.erf(Cc *
                         (np.log(lmst / lms0) +
                          Cf * np.log(lmsf / lms0)))  # stimulus test field
        lmsfp = math.erf(Cc * (np.log(lmsf / lms0) +
                               Cf * np.log(lmsf / lms0)))  # adaptation field

        # add adaptation field lms temporarily to lmstp for quick calculation
        lmstp = np.vstack((lmsfp, lmstp))

        #-----------------------------------------------------------------------------
        # stage 3: calculate optic nerve signals, lam*, alphp, betp:
        #-----------------------------------------------------------------------------
        lstar, alph, bet = asplit(np.dot(MAab, lmstp.T).T)

        alphp = cga1[0] * alph
        alphp[alph < 0] = cga1[1] * alph[alph < 0]
        betp = cgb1[0] * bet
        betp[bet < 0] = cgb1[1] * bet[bet < 0]

        #-----------------------------------------------------------------------------
        #  stage 4: calculate recoded nerve signals, alphapp, betapp:
        #-----------------------------------------------------------------------------
        alphpp = cga2[0] * (alphp + betp)
        betpp = cgb2[0] * (alphp - betp)

        #-----------------------------------------------------------------------------
        #  stage 5: calculate conscious color perception:
        #-----------------------------------------------------------------------------
        lstar_int = cl_int[0] * (lstar + cl_int[1])
        alph_int = cab_int[0] * (np.cos(cab_int[1] * np.pi / 180.0) * alphpp -
                                 np.sin(cab_int[1] * np.pi / 180.0) * betpp)
        bet_int = cab_int[0] * (np.sin(cab_int[1] * np.pi / 180.0) * alphpp +
                                np.cos(cab_int[1] * np.pi / 180.0) * betpp)
        lstar_out = lstar_int

        #++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
        #  stage 5 continued but SPLIT IN FORWARD AND INVERSE MODES:
        #++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

        #--------------------------------------
        # FORWARD MODE TO PERCEPTUAL SIGNALS:
        #--------------------------------------
        if direction == 'forward':
            if Ccwb is None:
                alph_out = alph_int - cab_out[0]
                bet_out = bet_int - cab_out[1]

            else:
                Ccwb = Ccwb * np.ones((2))
                Ccwb[Ccwb < 0.0] = 0.0
                Ccwb[Ccwb > 1.0] = 1.0

                # white balance shift using adaptation gray background (Yb=20%), with Ccw: degree of adaptation:
                alph_out = alph_int - Ccwb[0] * alph_int[0]
                bet_out = bet_int - Ccwb[1] * bet_int[0]

            # stack together and remove adaptation field from vertical stack
            # camout is an ndarray with perceptual signals:
            camout[i] = np.vstack((lstar_out[1:], alph_out[1:], bet_out[1:])).T

        #--------------------------------------
        # INVERSE MODE FROM PERCEPTUAL SIGNALS:
        #--------------------------------------
        elif direction == 'inverse':

            # stack cognitive pre-adapted adaptation field signals (first on stack) together:
            #labf_int = np.hstack((lstar_int[0],alph_int[0],bet_int[0]))

            # get lstar_out, alph_out & bet_out for data
            #(contains model perceptual signals in inverse mode!!!):
            lstar_out, alph_out, bet_out = asplit(data[i])

            #------------------------------------------------------------------------
            #  Inverse stage 5: undo cortical white-balance:
            #------------------------------------------------------------------------
            if Ccwb is None:
                alph_int = alph_out + cab_out[0]
                bet_int = bet_out + cab_out[1]
            else:
                Ccwb = Ccwb * np.ones((2))
                Ccwb[Ccwb < 0.0] = 0.0
                Ccwb[Ccwb > 1.0] = 1.0

                #  inverse white balance shift using adaptation gray background (Yb=20%), with Ccw: degree of adaptation
                alph_int = alph_out + Ccwb[0] * alph_int[0]
                bet_int = bet_out + Ccwb[1] * bet_int[0]

            alphpp = (1.0 / cab_int[0]) * (
                np.cos(-cab_int[1] * np.pi / 180.0) * alph_int -
                np.sin(-cab_int[1] * np.pi / 180.0) * bet_int)
            betpp = (1.0 / cab_int[0]) * (
                np.sin(-cab_int[1] * np.pi / 180.0) * alph_int +
                np.cos(-cab_int[1] * np.pi / 180.0) * bet_int)
            lstar_int = lstar_out
            lstar = (lstar_int / cl_int[0]) - cl_int[1]

            #---------------------------------------------------------------------------
            #  Inverse stage 4: pre-adapted perceptual signals to recoded nerve signals:
            #---------------------------------------------------------------------------
            alphp = 0.5 * (alphpp / cga2[0] + betpp / cgb2[0]
                           )  # <-- alphpp = (Cga2.*(alphp+betp));
            betp = 0.5 * (alphpp / cga2[0] - betpp / cgb2[0]
                          )  # <-- betpp = (Cgb2.*(alphp-betp));

            #---------------------------------------------------------------------------
            #  Inverse stage 3: recoded nerve signals to optic nerve signals:
            #---------------------------------------------------------------------------
            alph = alphp / cga1[0]
            bet = betp / cgb1[0]
            sa = np.sign(cga1[1])
            sb = np.sign(cgb1[1])
            alph[(sa * alphp) < 0.0] = alphp[(sa * alphp) < 0] / cga1[1]
            bet[(sb * betp) < 0.0] = betp[(sb * betp) < 0] / cgb1[1]
            lab = ajoin((lstar, alph, bet))

            #---------------------------------------------------------------------------
            #  Inverse stage 2: optic nerve signals to cone outputs:
            #---------------------------------------------------------------------------
            lmstp = np.dot(invMAab, lab.T).T
            lmstp[lmstp < -1.0] = -1.0
            lmstp[lmstp > 1.0] = 1.0

            #---------------------------------------------------------------------------
            #  Inverse stage 1: cone outputs to photon rates:
            #---------------------------------------------------------------------------
            lmstp = math.erfinv(lmstp) / Cc - Cf * np.log(lmsf / lms0)
            lmst = np.exp(lmstp) * lms0

            #---------------------------------------------------------------------------
            #  Photon rates to absolute or relative tristimulus values:
            #---------------------------------------------------------------------------
            xyzt = np.dot(invMxyz2lms, lmst.T).T * (_CMF[cieobs]['K'] / 683.0)
            if relative == True:
                xyzt = (100 / Lw) * xyzt

            # store in same named variable as forward mode:
            camout[i] = xyzt

            #++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
            #  END inverse mode
            #++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

    return _massage_output_data_to_original_shape(camout, originalshape)
Пример #10
0
def run(data, xyzw, conditions=None, ucs_type='ucs', forward=True):
    """ 
    Run the CAM02-UCS[,-LCD,-SDC] color appearance difference model in forward or backward modes.
    
    Args:
        :data:
            | ndarray with sample xyz values (forward mode) or J'a'b' coordinates (inverse mode)
        :xyzw:
            | ndarray with white point tristimulus values  
        :conditions:
            | None, optional
            | Dictionary with viewing conditions.
            | None results in:
            |   {'La':100, 'Yb':20, 'D':1, 'surround':'avg'}
            | For more info see luxpy.cam.ciecam02()?
        :ucs_type:
            | 'ucs', optional
            | String with type of color difference appearance space
            | options: 'ucs', 'scd', 'lcd'
        :forward:
            | True, optional
            | If True: run in CAM in forward mode, else: inverse mode.

    Returns:
        :camout:
            | ndarray with J'a'b' coordinates or whatever correlates requested in out.
    
    Note:
        * This is a simplified, less flexible, but faster version than the main cam02ucs().
    """
    # get ucs parameters:
    if isinstance(ucs_type, str):
        ucs_pars = {
            'ucs': {
                'KL': 1.0,
                'c1': 0.007,
                'c2': 0.0228
            },
            'lcd': {
                'KL': 0.77,
                'c1': 0.007,
                'c2': 0.0053
            },
            'scd': {
                'KL': 1.24,
                'c1': 0.007,
                'c2': 0.0363
            }
        }
        ucs = ucs_pars[ucs_type]
    else:
        ucs = ucs_type
    KL, c1, c2 = ucs['KL'], ucs['c1'], ucs['c2']

    if forward == True:

        # run ciecam02 to get JMh:
        data = ciecam02(data,
                        xyzw,
                        out='J,M,h',
                        conditions=conditions,
                        forward=True)

        camout = np.zeros_like(data)  # for output

        #--------------------------------------------
        # convert to cam02ucs J', aM', bM':
        camout[...,
               0] = (1.0 + 100.0 * c1) * data[...,
                                              0] / (1.0 + c1 * data[..., 0])
        Mp = (1.0 / c2) * np.log(1.0 + c2 * data[..., 1])
        camout[..., 1] = Mp * np.cos(data[..., 2] * np.pi / 180)
        camout[..., 2] = Mp * np.sin(data[..., 2] * np.pi / 180)

        return camout

    else:
        #--------------------------------------------
        # convert cam02ucs J', aM', bM' to xyz:

        # calc CAM02 hue angle
        #Jp, aMp, bMp = asplit(data)
        h = np.arctan2(data[..., 2], data[..., 1])

        # calc CAM02 and CIECAM02 colourfulness
        Mp = (data[..., 1]**2.0 + data[..., 2]**2.0)**0.5
        M = (np.exp(c2 * Mp) - 1.0) / c2

        # calculate ciecam02 aM, bM:
        aM = M * np.cos(h)
        bM = M * np.sin(h)

        # calc CAM02 lightness
        J = data[..., 0] / (1.0 + (100.0 - data[..., 0]) * c1)

        # run ciecam02 in inverse mode to get xyz:
        return ciecam02(ajoin((J, aM, bM)),
                        xyzw,
                        out='J,aM,bM',
                        conditions=conditions,
                        forward=False)