Beispiel #1
0
def cct_to_neutral_loci_smet2018(cct, nlocitype='uw', out='duv,D'):
    """
    Calculate the most neutral appearing Duv10 in and the degree of neutrality for a specified CCT using the models in Smet et al. (2018).
    
    Args:
        :cct10:
            | ndarray CCT 
        :nlocitype:
            | 'uw', optional
            | 'uw': use unique white models published in Smet et al. (2014).
            | 'ca': use degree of chromatic adaptation model from Smet et al. (2017).
        :out:
            | 'duv,D', optional
            | Specifies requested output (other options: 'duv', 'D').
            
    Returns:
        :duv: 
            | ndarray with most neutral Duv10 value corresponding to the cct input.
        :D: 
            | ndarray with the degree of neutrality at (cct, duv).
        
    References:
        1. `Smet, K.A.G., (2018), 
        Two Neutral White Illumination Loci Based on Unique White Rating and Degree of Chromatic Adaptation. 
        LEUKOS, 14(2), 55–67. <https://doi.org/10.1080/15502724.2017.1385400>`_

    Notes:
        1. Duv is specified in the CIE 1960 u10v10 chromatity diagram as the models were developed using CIE 1964 10° tristimulus, chromaticity and CCT values.
        2. The parameter +0.0172 in Eq. 4b should be -0.0172.
    """
    if nlocitype == 'uw':
        duv = 0.0202 * np.log(cct / 3325) * np.exp(
            -1.445 * np.log(cct / 3325)**2) - 0.0137
        D = np.exp(-(6368 * ((1 / cct) -
                             (1 / 6410)))**2)  # degree of neutrality
    elif nlocitype == 'ca':
        duv = 0.0382 * np.log(cct / 2194) * np.exp(
            -0.679 * np.log(cct / 2194)**2) - 0.0172
        D = np.exp(-(3912 * ((1 / cct) -
                             (1 / 6795)))**2)  # degree of adaptation
    else:
        raise Exception('Unrecognized nlocitype')

    if out == 'duv,D':
        return duv, D
    elif out == 'duv':
        return duv
    elif out == 'D':
        return D
    else:
        raise Exception('smet_white_loci(): Requested output unrecognized.')
Beispiel #2
0
def log_scale(data,
              scale_factor=[6.73],
              scale_max=100.0):  # defaults from cie-224-2017 cri
    """
    Log-based color rendering index scale from Davis & Ohno (2009): 
    
    | Rfi,a = 10 * ln(exp((100 - c1*DEi,a)/10) + 1).
                    
    Args:
        :data: 
            | float or list[floats] or ndarray 
        :scale_factor:
            | [6.73] or list[float] or ndarray, optional
            | Rescales color differences before subtracting them from :scale_max:
            | Note that the default value is the one from cie-224-2017.
        :scale_max:
            | 100.0, optional
            | Maximum value of linear scale
    
    Returns:
        :returns:
            | float or list[floats] or ndarray
        
    References:
        1. `W. Davis and Y. Ohno, 
        “Color quality scale,” (2010), 
        Opt. Eng., vol. 49, no. 3, pp. 33602–33616.
        <http://spie.org/Publications/Journal/10.1117/1.3360335>`_
        2. `CIE224:2017. CIE 2017 Colour Fidelity Index for accurate scientific use. 
        Vienna, Austria: CIE. (2017).
        <http://www.cie.co.at/index.php?i_ca_id=1027>`_

    """
    return 10.0 * np.log(
        np.exp((scale_max - scale_factor[0] * data) / 10.0) + 1.0)
Beispiel #3
0
def bvgpdf(x, y = None, mu = None, sigmainv = None):
    """
    Evaluate bivariate Gaussian probability density function (BVGPDF)
    
    Args:
        :x: 
            | scalar or list or ndarray (.ndim = 1 or 2) with 
            | x(y)-coordinates at which to evaluate bivariate Gaussian PD.
        :y: 
            | None or scalar or list or ndarray (.ndim = 1) with 
            | y-coordinates at which to evaluate bivariate Gaussian PD, optional.
            | If :y: is None, :x: should be a 2d array.
        :mu: 
            | None or ndarray (.ndim = 2) with center coordinates of 
            | bivariate Gaussian PD, optional. 
            | None defaults to ndarray([0,0]).
        :sigmainv:
            | None or ndarray with 'inverse covariance matrix', optional 
            | Determines the shape and orientation of the PD.
            | None default to numpy.eye(2).
     
    Returns:
         :returns:
             | ndarray with magnitude of BVGPDF(x,y)   
    
    """
    return np.exp(-0.5*mahalanobis2(x, y = y, mu = mu, sigmainv = sigmainv))
Beispiel #4
0
def psy_scale(data,
              scale_factor=[1.0 / 55.0, 3.0 / 2.0, 2.0],
              scale_max=100.0):  # defaults for cri2012
    """
    Psychometric based color rendering index scale from CRI2012: 
    
    | Rfi,a = 100 * (2 / (exp(c1*abs(DEi,a)**(c2) + 1))) ** c3.
        
    Args:
        :data: 
            | float or list[floats] or ndarray 
        :scale_factor:
            | [1/55, 3/2, 2.0] or list[float] or ndarray, optional
            | Rescales color differences before subtracting them from :scale_max:
            | Note that the default value is the one from (Smet et al. 2013, LRT).
        :scale_max: 
            | 100.0, optional
            | Maximum value of linear scale
    
    Returns:
        :returns: 
            | float or list[floats] or ndarray
        
    References:
        1. `Smet, K., Schanda, J., Whitehead, L., & Luo, R. (2013). 
        CRI2012: A proposal for updating the CIE colour rendering index. 
        Lighting Research and Technology, 45, 689–709. 
        <http://lrt.sagepub.com/content/45/6/689>`_  
        
    """
    return scale_max * np.power(
        2.0 /
        (np.exp(scale_factor[0] * np.power(np.abs(data), scale_factor[1])) +
         1.0), scale_factor[2])
Beispiel #5
0
 def spdBB(CCT=5500, wl=[400, 700, 5], Lw=25000, cieobs='1964_10'):
     wl = getwlr(wl)
     dl = wl[1] - wl[0]
     spd = 2 * np.pi * 6.626068E-34 * (299792458**2) / (
         (wl * 0.000000001)**
         5) / (np.exp(6.626068E-34 * 299792458 /
                      (wl * 0.000000001) / 1.3806503E-23 / CCT) - 1)
     spd = Lw * spd / (dl * 683 * (spd * cie_interp(
         _CMF[cieobs]['bar'].copy(), wl, kind='cmf')[2, :]).sum())
     return np.vstack((wl, spd))
Beispiel #6
0
def xyz_to_neutrality_smet2018(xyz10, nlocitype='uw', uw_model='Linvar'):
    """
    Calculate degree of neutrality using the unique white model in Smet et al. (2014) or the normalized (max = 1) degree of chromatic adaptation model from Smet et al. (2017).
    
    Args:
        :xyz10:
            | ndarray with CIE 1964 10° xyz tristimulus values.
        :nlocitype:
            | 'uw', optional
            | 'uw': use unique white models published in Smet et al. (2014).
            | 'ca': use degree of chromatic adaptation model from Smet et al. (2017).
        :uw_model:
            | 'Linvar', optional
            | Use Luminance invariant unique white model from Smet et al. (2014).
            | Other options: 'L200' (200 cd/m²), 'L1000' (1000 cd/m²) and 'L2000' (2000 cd/m²).
    
    Returns:
        :N: 
            | ndarray with calculated neutrality
            
    References:
        1. `Smet, K., Deconinck, G., & Hanselaer, P., (2014), 
        Chromaticity of unique white in object mode. 
        Optics Express, 22(21), 25830–25841. 
        <https://www.osapublishing.org/oe/abstract.cfm?uri=oe-22-21-25830>`_
        
        2. `Smet, K.A.G., Zhai, Q., Luo, M.R., Hanselaer, P., (2017), 
        Study of chromatic adaptation using memory color matches, 
        Part II: colored illuminants, 
        Opt. Express, 25(7), pp. 8350-8365. 
        <https://www.osapublishing.org/oe/abstract.cfm?uri=oe-25-7-8350&origin=search)>`_
    """

    if nlocitype == 'uw':
        uv = xyz_to_Yuv(xyz10)[..., 1:]
        G0 = lambda up, vp, a: np.exp(-0.5 * (a[0] * (up - a[2])**2 + a[1] *
                                              (vp - a[3])**2 + 2 * a[4] *
                                              (up - a[2]) * (vp - a[3])))
        return G0(uv[..., 0:1], uv[..., 1:2],
                  _UW_NEUTRALITY_PARAMETERS_SMET2014[uw_model])
    elif nlocitype == 'ca':
        return cat.smet2017_D(xyz10, Dmax=1).T
    else:
        raise Exception('Unrecognized nlocitype')
Beispiel #7
0
def fCLa(wl, Elv, integral, Norm = None, k = None, a_b_y = None, a_rod = None, RodSat = None,\
        Vphotl = None, Vscotl = None, Vl_mpl = None, Scl_mpl = None, Mcl = None, WL = None):
    """
    Local helper function that calculate CLa from El based on Eq. 1 
    in Rea et al (2012).
    
    Args:
        The various model parameters as described in the paper and contained 
        in the dict _LRC_CONST.
        
    Returns:
        ndarray with CLa values.
        
    References:
        1. `Rea MS, Figueiro MG, Bierman A, and Hamner R (2012). 
        Modelling the spectral sensitivity of the human circadian system. 
        Light. Res. Technol. 44, 386–396.  
        <https://doi.org/10.1177/1477153511430474>`_
            
        2. `Rea MS, Figueiro MG, Bierman A, and Hamner R (2012). 
        Erratum: Modeling the spectral sensitivity of the human circadian system 
        (Lighting Research and Technology (2012) 44:4 (386-396)). 
        Light. Res. Technol. 44, 516.
        <https://doi.org/10.1177/1477153512467607>`_
        
        
    """
    dl = getwld(wl) 
    
    # Calculate piecewise function in Eq. 1 in Rea et al. 2012:
    
    #calculate value of condition function (~second term of 1st fcn):
    cond_number = integral(Elv*Scl_mpl*dl) - k*integral(Elv*Vl_mpl*dl)

    # Calculate second fcn:
    fcn2 = integral(Elv*Mcl*dl)

    # Calculate last term of 1st fcn:
    fcn1_3 = a_rod * (1 - np.exp(-integral(Vscotl*Elv*dl)/RodSat))

    # Satisfying cond. is effectively adding fcn1_2 and fcn1_3 to fcn1_1:
    CLa = Norm*(fcn2 + 1*(cond_number>=0)*(a_b_y*cond_number - fcn1_3))
    
    return CLa
Beispiel #8
0
def run(data,
        xyzw=None,
        outin='J,aM,bM',
        cieobs=_CIEOBS,
        conditions=None,
        forward=True,
        mcat='cat16',
        **kwargs):
    """ 
    Run the Jz,az,bz based color appearance model in forward or backward modes.
    
    Args:
        :data:
            | ndarray with relative sample xyz values (forward mode) or J'a'b' coordinates (inverse mode)
        :xyzw:
            | ndarray with relative white point tristimulus values
            | None defaults to D65
        :cieobs:
            | _CIEOBS, optional
            | CMF set to use when calculating :xyzw: if this is None.
        :conditions:
            | None, optional
            | Dictionary with viewing condition parameters for:
            |       La, Yb, D and surround.
            |  surround can contain:
            |      - str (options: 'avg','dim','dark') or 
            |      - dict with keys c, Nc, F.
            | None results in:
            |   {'La':100, 'Yb':20, 'D':1, 'surround':'avg'}
        :forward:
            | True, optional
            | If True: run in CAM in forward mode, else: inverse mode.
        :outin:
            | 'J,aM,bM', optional
            | String with requested output (e.g. "J,aM,bM,M,h") [Forward mode]
            | String with inputs in data. 
            | Input must have data.shape[-1]==3 and last dim of data must have 
            | the following structure: 
            |  * data[...,0] = J or Q,
            |  * data[...,1:] = (aM,bM) or (aC,bC) or (aS,bS)
        :mcat:
            | 'cat16', optional
            | Specifies CAT sensor space.
            | - options:
            |    - None defaults to 'cat16'
            |    - str: see see luxpy.cat._MCATS.keys() for options 
            |         (details on type, ?luxpy.cat)
            |    - ndarray: matrix with sensor primaries
    Returns:
        :camout: 
            | ndarray with color appearance correlates (forward mode) 
            |  or 
            | XYZ tristimulus values (inverse mode)
     
    References:
        1. `Safdar, M., Cui, G., Kim,Y. J., and  Luo, M. R.(2017).
        Perceptually uniform color space for image signals including high dynamic range and wide gamut.
        Opt. Express, vol. 25, no. 13, pp. 15131–15151, Jun. 2017. 
        <https://www.opticsexpress.org/abstract.cfm?URI=oe-25-13-15131>`_
    
        2. `Safdar, M., Hardeberg, J., Cui, G., Kim, Y. J., and Luo, M. R.(2018).
        A Colour Appearance Model based on Jzazbz Colour Space, 
        26th Color and Imaging Conference (2018), Vancouver, Canada, November 12-16, 2018, pp96-101.
        <https://doi.org/10.2352/ISSN.2169-2629.2018.26.96>`_
    """
    outin = outin.split(',') if isinstance(outin, str) else outin

    #--------------------------------------------
    # Get condition parameters:
    if conditions is None:
        conditions = _DEFAULT_CONDITIONS

    D, Dtype, La, Yb, surround = (conditions[x]
                                  for x in sorted(conditions.keys()))

    surround_parameters = _SURROUND_PARAMETERS
    if isinstance(surround, str):
        surround = surround_parameters[conditions['surround']]
    F, FLL, Nc, c = [surround[x] for x in sorted(surround.keys())]

    # Define cone/chromatic adaptation sensor space:
    if (mcat is None) | (mcat == 'cat16'):
        mcat = cat._MCATS['cat16']
    elif isinstance(mcat, str):
        mcat = cat._MCATS[mcat]
    invmcat = np.linalg.inv(mcat)

    #--------------------------------------------
    # Get white point of D65 fro chromatic adaptation transform (CAT)
    xyzw_d65 = np.array([[
        9.5047e+01, 1.0000e+02, 1.0888e+02
    ]]) if cieobs == '1931_2' else spd_to_xyz(_CIE_D65, cieobs=cieobs)

    #--------------------------------------------
    # Get default white point:
    if xyzw is None:
        xyzw = xyzw_d65.copy()

    #--------------------------------------------
    # calculate condition dependent parameters:
    Yw = xyzw[..., 1].T
    k = 1.0 / (5.0 * La + 1.0)
    FL = 0.2 * (k**4.0) * (5.0 * La) + 0.1 * ((1.0 - k**4.0)**2.0) * (
        (5.0 * La)**(1.0 / 3.0))  # luminance adaptation factor
    n = Yb / Yw
    Nbb = 0.725 * (1 / n)**0.2
    z = 1.48 + FLL * n**0.5

    #--------------------------------------------
    # Calculate degree of chromatic adaptation:
    if D is None:
        D = F * (1.0 - (1.0 / 3.6) * np.exp((-La - 42.0) / 92.0))

    #===================================================================
    # WHITE POINT transformations (common to forward and inverse modes):

    #--------------------------------------------
    # Apply CAT to white point:
    xyzwc = cat.apply_vonkries1(xyzw,
                                xyzw,
                                xyzw_d65,
                                D=D,
                                mcat=mcat,
                                invmcat=invmcat)

    #--------------------------------------------
    # Get Iz,az,bz coordinates:
    iabzw = xyz_to_jabz(xyzwc, ztype='iabz')

    #===================================================================
    # STIMULUS transformations:

    #--------------------------------------------
    # massage shape of data for broadcasting:
    original_ndim = data.ndim
    if data.ndim == 2: data = data[:, None]

    if forward:
        # Apply CAT to D65:
        xyzc = cat.apply_vonkries1(data,
                                   xyzw,
                                   xyzw_d65,
                                   D=D,
                                   mcat=mcat,
                                   invmcat=invmcat)

        # Get Iz,az,bz coordinates:
        iabz = xyz_to_jabz(xyzc, ztype='iabz')

        #--------------------------------------------
        # calculate hue h and eccentricity factor, et:
        h = hue_angle(iabz[..., 1], iabz[..., 2], htype='deg')
        et = 1.01 + np.cos(h * np.pi / 180 + 1.55)

        #--------------------------------------------
        # calculate Hue quadrature (if requested in 'out'):
        if 'H' in outin:
            H = hue_quadrature(h, unique_hue_data=_UNIQUE_HUE_DATA)
        else:
            H = None

        #--------------------------------------------
        # calculate lightness, J:
        if ('J' in outin) | ('Q' in outin) | ('C' in outin) | (
                'M' in outin) | ('s' in outin) | ('aS' in outin) | (
                    'aC' in outin) | ('aM' in outin):
            J = 100.0 * (iabz[..., 0] / iabzw[..., 0])**(c * z)

        #--------------------------------------------
        # calculate brightness, Q:
        if ('Q' in outin) | ('s' in outin) | ('aS' in outin):
            Q = 192.5 * (J / c) * (FL**0.64)

        #--------------------------------------------
        # calculate chroma, C:
        if ('C' in outin) | ('M' in outin) | ('s' in outin) | (
                'aS' in outin) | ('aC' in outin) | ('aM' in outin):
            C = ((1 / n)**0.074) * (
                (iabz[..., 1]**2.0 + iabz[..., 2]**2.0)**0.37) * (et**0.067)

        #--------------------------------------------
        # calculate colorfulness, M:
        if ('M' in outin) | ('s' in outin) | ('aM' in outin) | ('aS' in outin):
            M = 1.42 * C * FL**0.25

        #--------------------------------------------
        # calculate saturation, s:
        if ('s' in outin) | ('aS' in outin):
            s = 100.0 * (M / Q)**0.5

        #--------------------------------------------
        # calculate cartesian coordinates:
        if ('aS' in outin):
            aS = s * np.cos(h * np.pi / 180.0)
            bS = s * np.sin(h * np.pi / 180.0)

        if ('aC' in outin):
            aC = C * np.cos(h * np.pi / 180.0)
            bC = C * np.sin(h * np.pi / 180.0)

        if ('aM' in outin):
            aM = M * np.cos(h * np.pi / 180.0)
            bM = M * np.sin(h * np.pi / 180.0)

        #--------------------------------------------
        if outin != ['J', 'aM', 'bM']:
            camout = eval('ajoin((' + ','.join(outin) + '))')
        else:
            camout = ajoin((J, aM, bM))

        if (camout.shape[1] == 1) & (original_ndim < 3):
            camout = camout[:, 0, :]

        return camout

    elif forward == False:
        #--------------------------------------------
        # Get Lightness J from data:
        if ('J' in outin[0]):
            J = data[..., 0].copy()
        elif ('Q' in outin[0]):
            Q = data[..., 0].copy()
            J = c * (Q / (192.25 * FL**0.64))
        else:
            raise Exception(
                'No lightness or brightness values in data[...,0]. Inverse CAM-transform not possible!'
            )

        #--------------------------------------------
        if 'a' in outin[1]:
            # calculate hue h:
            h = hue_angle(data[..., 1], data[..., 2], htype='deg')

            #--------------------------------------------
            # calculate Colorfulness M or Chroma C or Saturation s from a,b:
            MCs = (data[..., 1]**2.0 + data[..., 2]**2.0)**0.5
        else:
            h = data[..., 2]
            MCs = data[..., 1]

        if ('aS' in outin):
            Q = 192.5 * (J / c) * (FL**0.64)
            M = Q * (MCs / 100.0)**2.0
            C = M / (1.42 * FL**0.25)

        if ('aM' in outin):  # convert M to C:
            C = MCs / (1.42 * FL**0.25)

        if ('aC' in outin):
            C = MCs

        #--------------------------------------------
        # calculate achromatic signal, Iz:
        Iz = iabzw[..., 0] * (J / 100.0)**(1.0 / (c * z))

        #--------------------------------------------
        # calculate eccentricity factor, et:
        et = 1.01 + np.cos(h * np.pi / 180 + 1.55)

        #--------------------------------------------
        # calculate t (=a**2+b**2) from C:
        t = (n**0.074 * C * (1 / et)**0.067)**(1 / 0.37)

        #--------------------------------------------
        # Calculate az, bz:
        az = (t / (1 + np.tan(h * np.pi / 180)**2))**0.5
        bz = az * np.tan(h * np.pi / 180)

        #--------------------------------------------
        # join values and convert to xyz:
        xyzc = jabz_to_xyz(ajoin((Iz, az, bz)), ztype='iabz')

        #-------------------------------------------
        # Apply CAT from D65:
        xyz = cat.apply_vonkries1(xyzc,
                                  xyzw_d65,
                                  xyzw,
                                  D=D,
                                  mcat=mcat,
                                  invmcat=invmcat)

        return xyz
Beispiel #9
0
def _xyz_to_jab_cam02ucs(xyz, xyzw, ucs=True, conditions=None):
    """ 
    Calculate CAM02-UCS J'a'b' coordinates from xyz tristimulus values of sample and white point.
    
    Args:
        :xyz:
            | ndarray with sample tristimulus values
        :xyzw:
            | ndarray with white point tristimulus values  
        :conditions:
            | None, optional
            | Dictionary with viewing conditions.
            | None results in:
            |   {'La':100, 'Yb':20, 'D':1, 'surround':'avg'}
            | For more info see luxpy.cam.ciecam02()?
    
    Returns:
        :jab:
            | ndarray with J'a'b' coordinates.
    """
    #--------------------------------------------
    # Get/ set conditions parameters:
    if conditions is not None:
        surround_parameters = {
            'surrounds': ['avg', 'dim', 'dark'],
            'avg': {
                'c': 0.69,
                'Nc': 1.0,
                'F': 1.0,
                'FLL': 1.0
            },
            'dim': {
                'c': 0.59,
                'Nc': 0.9,
                'F': 0.9,
                'FLL': 1.0
            },
            'dark': {
                'c': 0.525,
                'Nc': 0.8,
                'F': 0.8,
                'FLL': 1.0
            }
        }
        La = conditions['La']
        Yb = conditions['Yb']
        D = conditions['D']
        surround = conditions['surround']
        if isinstance(surround, str):
            surround = surround_parameters[conditions['surround']]
        F, FLL, Nc, c = [surround[x] for x in sorted(surround.keys())]
    else:
        # set defaults:
        La, Yb, D, F, FLL, Nc, c = 100, 20, 1, 1, 1, 1, 0.69

    #--------------------------------------------
    # Define sensor space and cat matrices:
    mhpe = np.array([[0.38971, 0.68898, -0.07868], [-0.22981, 1.1834, 0.04641],
                     [0.0, 0.0, 1.0]
                     ])  # Hunt-Pointer-Estevez sensors (cone fundamentals)

    mcat = np.array([[0.7328, 0.4296, -0.1624], [-0.7036, 1.6975, 0.0061],
                     [0.0030, 0.0136, 0.9834]])  # CAT02 sensor space

    #--------------------------------------------
    # pre-calculate some matrices:
    invmcat = np.linalg.inv(mcat)
    mhpe_x_invmcat = np.dot(mhpe, invmcat)

    #--------------------------------------------
    # calculate condition dependent parameters:
    Yw = xyzw[..., 1:2].T
    k = 1.0 / (5.0 * La + 1.0)
    FL = 0.2 * (k**4.0) * (5.0 * La) + 0.1 * ((1.0 - k**4.0)**2.0) * (
        (5.0 * La)**(1.0 / 3.0))  # luminance adaptation factor
    n = Yb / Yw
    Nbb = 0.725 * (1 / n)**0.2
    Ncb = Nbb
    z = 1.48 + FLL * n**0.5

    if D is None:
        D = F * (1.0 - (1.0 / 3.6) * np.exp((-La - 42.0) / 92.0))

    #--------------------------------------------
    # transform from xyz, xyzw to cat sensor space:
    rgb = math.dot23(mcat, xyz.T)
    rgbw = mcat @ xyzw.T

    #--------------------------------------------
    # apply von Kries cat:
    rgbc = (
        (D * Yw / rgbw)[..., None] + (1 - D)
    ) * rgb  # factor 100 from ciecam02 is replaced with Yw[i] in ciecam16, but see 'note' in Fairchild's "Color Appearance Models" (p291 ni 3ed.)
    rgbwc = (
        (D * Yw / rgbw) + (1 - D)
    ) * rgbw  # factor 100 from ciecam02 is replaced with Yw[i] in ciecam16, but see 'note' in Fairchild's "Color Appearance Models" (p291 ni 3ed.)

    #--------------------------------------------
    # convert from cat02 sensor space to cone sensors (hpe):
    rgbp = math.dot23(mhpe_x_invmcat, rgbc).T
    rgbwp = (mhpe_x_invmcat @ rgbwc).T

    #--------------------------------------------
    # apply Naka_rushton repsonse compression:
    naka_rushton = lambda x: 400 * x**0.42 / (x**0.42 + 27.13) + 0.1

    rgbpa = naka_rushton(FL * rgbp / 100.0)
    p = np.where(rgbp < 0)
    rgbpa[p] = 0.1 - (naka_rushton(FL * np.abs(rgbp[p]) / 100.0) - 0.1)

    rgbwpa = naka_rushton(FL * rgbwp / 100.0)
    pw = np.where(rgbwp < 0)
    rgbwpa[pw] = 0.1 - (naka_rushton(FL * np.abs(rgbwp[pw]) / 100.0) - 0.1)

    #--------------------------------------------
    # Calculate achromatic signal:
    A = (2.0 * rgbpa[..., 0] + rgbpa[..., 1] +
         (1.0 / 20.0) * rgbpa[..., 2] - 0.305) * Nbb
    Aw = (2.0 * rgbwpa[..., 0] + rgbwpa[..., 1] +
          (1.0 / 20.0) * rgbwpa[..., 2] - 0.305) * Nbb

    #--------------------------------------------
    # calculate initial opponent channels:
    a = rgbpa[..., 0] - 12.0 * rgbpa[..., 1] / 11.0 + rgbpa[..., 2] / 11.0
    b = (1.0 / 9.0) * (rgbpa[..., 0] + rgbpa[..., 1] - 2.0 * rgbpa[..., 2])

    #--------------------------------------------
    # calculate hue h and eccentricity factor, et:
    h = np.arctan2(b, a)
    et = (1.0 / 4.0) * (np.cos(h + 2.0) + 3.8)

    #--------------------------------------------
    # calculate lightness, J:
    J = 100.0 * (A / Aw)**(c * z)

    #--------------------------------------------
    # calculate chroma, C:
    t = ((50000.0 / 13.0) * Nc * Ncb * et *
         ((a**2.0 + b**2.0)**0.5)) / (rgbpa[..., 0] + rgbpa[..., 1] +
                                      (21.0 / 20.0 * rgbpa[..., 2]))
    C = (t**0.9) * ((J / 100.0)**0.5) * (1.64 - 0.29**n)**0.73

    #--------------------------------------------
    # Calculate colorfulness, M:
    M = C * FL**0.25

    #--------------------------------------------
    # convert to cam02ucs J', aM', bM':
    if ucs == True:
        KL, c1, c2 = 1.0, 0.007, 0.0228
        Jp = (1.0 + 100.0 * c1) * J / (1.0 + c1 * J)
        Mp = (1.0 / c2) * np.log(1.0 + c2 * M)
    else:
        Jp = J
        Mp = M
    aMp = Mp * np.cos(h)
    bMp = Mp * np.sin(h)

    return np.dstack((Jp, aMp, bMp))
Beispiel #10
0
def run(data, xyzw, conditions=None, ucs_type='ucs', forward=True):
    """ 
    Run the CAM02-UCS[,-LCD,-SDC] color appearance difference model in forward or backward modes.
    
    Args:
        :data:
            | ndarray with sample xyz values (forward mode) or J'a'b' coordinates (inverse mode)
        :xyzw:
            | ndarray with white point tristimulus values  
        :conditions:
            | None, optional
            | Dictionary with viewing conditions.
            | None results in:
            |   {'La':100, 'Yb':20, 'D':1, 'surround':'avg'}
            | For more info see luxpy.cam.ciecam02()?
        :ucs_type:
            | 'ucs', optional
            | String with type of color difference appearance space
            | options: 'ucs', 'scd', 'lcd'
        :forward:
            | True, optional
            | If True: run in CAM in forward mode, else: inverse mode.

    Returns:
        :camout:
            | ndarray with J'a'b' coordinates or whatever correlates requested in out.
    
    Note:
        * This is a simplified, less flexible, but faster version than the main cam02ucs().
    """
    # get ucs parameters:
    if isinstance(ucs_type, str):
        ucs_pars = {
            'ucs': {
                'KL': 1.0,
                'c1': 0.007,
                'c2': 0.0228
            },
            'lcd': {
                'KL': 0.77,
                'c1': 0.007,
                'c2': 0.0053
            },
            'scd': {
                'KL': 1.24,
                'c1': 0.007,
                'c2': 0.0363
            }
        }
        ucs = ucs_pars[ucs_type]
    else:
        ucs = ucs_type
    KL, c1, c2 = ucs['KL'], ucs['c1'], ucs['c2']

    if forward == True:

        # run ciecam02 to get JMh:
        data = ciecam02(data,
                        xyzw,
                        out='J,M,h',
                        conditions=conditions,
                        forward=True)

        camout = np.zeros_like(data)  # for output

        #--------------------------------------------
        # convert to cam02ucs J', aM', bM':
        camout[...,
               0] = (1.0 + 100.0 * c1) * data[...,
                                              0] / (1.0 + c1 * data[..., 0])
        Mp = (1.0 / c2) * np.log(1.0 + c2 * data[..., 1])
        camout[..., 1] = Mp * np.cos(data[..., 2] * np.pi / 180)
        camout[..., 2] = Mp * np.sin(data[..., 2] * np.pi / 180)

        return camout

    else:
        #--------------------------------------------
        # convert cam02ucs J', aM', bM' to xyz:

        # calc CAM02 hue angle
        #Jp, aMp, bMp = asplit(data)
        h = np.arctan2(data[..., 2], data[..., 1])

        # calc CAM02 and CIECAM02 colourfulness
        Mp = (data[..., 1]**2.0 + data[..., 2]**2.0)**0.5
        M = (np.exp(c2 * Mp) - 1.0) / c2

        # calculate ciecam02 aM, bM:
        aM = M * np.cos(h)
        bM = M * np.sin(h)

        # calc CAM02 lightness
        J = data[..., 0] / (1.0 + (100.0 - data[..., 0]) * c1)

        # run ciecam02 in inverse mode to get xyz:
        return ciecam02(ajoin((J, aM, bM)),
                        xyzw,
                        out='J,aM,bM',
                        conditions=conditions,
                        forward=False)
Beispiel #11
0
def apply_poly_model_at_hue_x(poly_model, pmodel, dCHoverC_res, \
                              hx = None, Cxr = 40, sig = _VF_SIG):
    """
    Applies base color shift model at (hue,chroma) coordinates
    
    Args:
        :poly_model: 
            | function handle to model
        :pmodel:
            | ndarray with model parameters.
        :dCHoverC_res:
            | ndarray with residuals between 'dCoverC,dH' of samples 
            | and 'dCoverC,dH' predicted by the model.
            | Note: dCoverC = (Ct - Cr)/Cr and dH = ht - hr 
            |      (predicted from model, see notes luxpy.cri.get_poly_model())
        :hx:
            | None or ndarray, optional
            | None defaults to np.arange(np.pi/10.0,2*np.pi,2*np.pi/10.0)
        :Cxr:
            | 40, optional
        :sig: 
            | _VF_SIG or float, optional
            | Determines smooth transition between hue-bin-boundaries (no hard 
            | cutoff at hue bin boundary).
        
    Returns:
        :returns: 
            | ndarrays with dCoverC_x, dCoverC_x_sig, dH_x, dH_x_sig
            | Note '_sig' denotes the uncertainty: 
            |     e.g.  dH_x_sig is the uncertainty of dH at input (hue/chroma).
    """

    if hx is None:
        dh = 2 * np.pi / 10.0
        hx = np.arange(
            dh / 2, 2 * np.pi, dh
        )  #hue angles at which to apply model, i.e. calculate 'average' measures

    # A calculate reference coordinates:
    axr = Cxr * np.cos(hx)
    bxr = Cxr * np.sin(hx)

    # B apply model at reference coordinates to obtain test coordinates:
    axt, bxt, Cxt, hxt, axr, bxr, Cxr, hxr = apply_poly_model_at_x(
        poly_model, pmodel, axr, bxr)

    # C Calculate dC/C, dH for test and ref at fixed hues:
    dCoverC_x = (Cxt - Cxr) / (np.hstack((Cxr + Cxt)).max())
    dH_x = (180 / np.pi) * (hxt - hxr)
    #    dCoverC_x = np.round(dCoverC_x,decimals = 2)
    #    dH_x = np.round(dH_x,decimals = 0)

    # D calculate 'average' noise measures using sig-value:
    href = dCHoverC_res[:, 0:1]
    dCoverC_res = dCHoverC_res[:, 1:2]
    dHoverC_res = dCHoverC_res[:, 2:3]
    dHsigi = np.exp((np.dstack(
        (np.abs(hx - href), np.abs((hx - href - 2 * np.pi)),
         np.abs(hx - href - 2 * np.pi))).min(axis=2)**2) / (-2) / sig)
    dH_x_sig = (180 / np.pi) * (np.sqrt(
        (dHsigi * (dHoverC_res**2)).sum(axis=0, keepdims=True) /
        dHsigi.sum(axis=0, keepdims=True)))
    #dH_x_sig_avg = np.sqrt(np.sum(dH_x_sig**2,axis=1)/hx.shape[0])
    dCoverC_x_sig = (np.sqrt(
        (dHsigi * (dCoverC_res**2)).sum(axis=0, keepdims=True) /
        dHsigi.sum(axis=0, keepdims=True)))
    #dCoverC_x_sig_avg = np.sqrt(np.sum(dCoverC_x_sig**2,axis=1)/hx.shape[0])

    return dCoverC_x, dCoverC_x_sig, dH_x, dH_x_sig
def cie2006cmfsEx(age = 32,fieldsize = 10, wl = None,\
                  var_od_lens = 0, var_od_macula = 0, \
                  var_od_L = 0, var_od_M = 0, var_od_S = 0,\
                  var_shft_L = 0, var_shft_M = 0, var_shft_S = 0,\
                  out = 'LMS', allow_negative_values = False):
    """
    Generate Individual Observer CMFs (cone fundamentals) 
    based on CIE2006 cone fundamentals and published literature 
    on observer variability in color matching and in physiological parameters.
    
    Args:
        :age: 
            | 32 or float or int, optional
            | Observer age
        :fieldsize:
            | 10, optional
            | Field size of stimulus in degrees (between 2° and 10°).
        :wl: 
            | None, optional
            | Interpolation/extraplation of :LMS: output to specified wavelengths.
            | None: output original _WL = np.array([390,780,5])
        :var_od_lens:
            | 0, optional
            | Std Dev. in peak optical density [%] of lens.
        :var_od_macula:
            | 0, optional
            | Std Dev. in peak optical density [%] of macula.
        :var_od_L:
            | 0, optional
            | Std Dev. in peak optical density [%] of L-cone.
        :var_od_M:
            | 0, optional
            | Std Dev. in peak optical density [%] of M-cone.
        :var_od_S:
            | 0, optional
            | Std Dev. in peak optical density [%] of S-cone.
        :var_shft_L:
            | 0, optional
            | Std Dev. in peak wavelength shift [nm] of L-cone. 
        :var_shft_L:
            | 0, optional
            | Std Dev. in peak wavelength shift [nm] of M-cone.  
        :var_shft_S:
            | 0, optional
            | Std Dev. in peak wavelength shift [nm] of S-cone. 
        :out: 
            | 'LMS' or , optional
            | Determines output.
        :allow_negative_values:
            | False, optional
            | Cone fundamentals or color matching functions 
              should not have negative values.
            |     If False: X[X<0] = 0.
            
    Returns:
        :returns: 
            | - 'LMS' : ndarray with individual observer area-normalized 
            |           cone fundamentals. Wavelength have been added.
                
            | [- 'trans_lens': ndarray with lens transmission 
            |      (no wavelengths added, no interpolation)
            |  - 'trans_macula': ndarray with macula transmission 
            |      (no wavelengths added, no interpolation)
            |  - 'sens_photopig' : ndarray with photopigment sens. 
            |      (no wavelengths added, no interpolation)]
            
    References:
         1. `Asano Y, Fairchild MD, and Blondé L (2016). 
         Individual Colorimetric Observer Model. 
         PLoS One 11, 1–19. 
         <http://journals.plos.org/plosone/article?id=10.1371/journal.pone.0145671>`_
        
         2. `Asano Y, Fairchild MD, Blondé L, and Morvan P (2016). 
         Color matching experiment for highlighting interobserver variability. 
         Color Res. Appl. 41, 530–539. 
         <https://onlinelibrary.wiley.com/doi/abs/10.1002/col.21975>`_
         
         3. `CIE, and CIE (2006). 
         Fundamental Chromaticity Diagram with Physiological Axes - Part I 
         (Vienna: CIE). 
         <http://www.cie.co.at/publications/fundamental-chromaticity-diagram-physiological-axes-part-1>`_ 
         
         4. `Asano's Individual Colorimetric Observer Model 
         <https://www.rit.edu/cos/colorscience/re_AsanoObserverFunctions.php>`_
    """
    fs = fieldsize
    rmd = _INDVCMF_DATA['rmd'].copy()
    LMSa = _INDVCMF_DATA['LMSa'].copy()
    docul = _INDVCMF_DATA['docul'].copy()

    # field size corrected macular density:
    pkOd_Macula = 0.485 * np.exp(-fs / 6.132) * (
        1 + var_od_macula / 100)  # varied peak optical density of macula
    corrected_rmd = rmd * pkOd_Macula

    # age corrected lens/ocular media density:
    if (age <= 60):
        correct_lomd = docul[:1] * (1 + 0.02 * (age - 32)) + docul[1:2]
    else:
        correct_lomd = docul[:1] * (1.56 + 0.0667 * (age - 60)) + docul[1:2]
    correct_lomd = correct_lomd * (1 + var_od_lens / 100
                                   )  # varied overall optical density of lens

    # Peak Wavelength Shift:
    wl_shifted = np.empty(LMSa.shape)
    wl_shifted[0] = _WL + var_shft_L
    wl_shifted[1] = _WL + var_shft_M
    wl_shifted[2] = _WL + var_shft_S

    LMSa_shft = np.empty(LMSa.shape)
    kind = 'cubic'
    LMSa_shft[0] = sp.interpolate.interp1d(wl_shifted[0],
                                           LMSa[0],
                                           kind=kind,
                                           bounds_error=False,
                                           fill_value="extrapolate")(_WL)
    LMSa_shft[1] = sp.interpolate.interp1d(wl_shifted[1],
                                           LMSa[1],
                                           kind=kind,
                                           bounds_error=False,
                                           fill_value="extrapolate")(_WL)
    LMSa_shft[2] = sp.interpolate.interp1d(wl_shifted[2],
                                           LMSa[2],
                                           kind=kind,
                                           bounds_error=False,
                                           fill_value="extrapolate")(_WL)
    #    LMSa[2,np.where(_WL >= _WL_CRIT)] = 0 #np.nan # Not defined above 620nm
    #    LMSa_shft[2,np.where(_WL >= _WL_CRIT)] = 0

    ssw = np.hstack(
        (0, np.sign(np.diff(LMSa_shft[2, :]))
         ))  #detect poor interpolation (sign switch due to instability)
    LMSa_shft[2, np.where((ssw >= 0) & (_WL > 560))] = np.nan

    # corrected LMS (no age correction):
    pkOd_L = (0.38 + 0.54 * np.exp(-fs / 1.333)) * (
        1 + var_od_L / 100)  # varied peak optical density of L-cone
    pkOd_M = (0.38 + 0.54 * np.exp(-fs / 1.333)) * (
        1 + var_od_M / 100)  # varied peak optical density of M-cone
    pkOd_S = (0.30 + 0.45 * np.exp(-fs / 1.333)) * (
        1 + var_od_S / 100)  # varied peak optical density of S-cone

    alpha_lms = 0. * LMSa_shft
    alpha_lms[0] = 1 - 10**(-pkOd_L * (10**LMSa_shft[0]))
    alpha_lms[1] = 1 - 10**(-pkOd_M * (10**LMSa_shft[1]))
    alpha_lms[2] = 1 - 10**(-pkOd_S * (10**LMSa_shft[2]))

    # this fix is required because the above math fails for alpha_lms[2,:]==0
    alpha_lms[2, np.where(_WL >= _WL_CRIT)] = 0

    # Corrected to Corneal Incidence:
    lms_barq = alpha_lms * (10**(-corrected_rmd - correct_lomd)) * np.ones(
        alpha_lms.shape)

    # Corrected to Energy Terms:
    lms_bar = lms_barq * _WL

    # Set NaN values to zero:
    lms_bar[np.isnan(lms_bar)] = 0

    # normalized:
    LMS = 100 * lms_bar / np.nansum(lms_bar, axis=1, keepdims=True)

    # Output extra:
    trans_lens = 10**(-correct_lomd)
    trans_macula = 10**(-corrected_rmd)
    sens_photopig = alpha_lms * _WL

    # Add wavelengths:
    LMS = np.vstack((_WL, LMS))

    if ('xyz' in out.lower().split(',')):
        LMS = lmsb_to_xyzb(LMS,
                           fieldsize,
                           out='xyz',
                           allow_negative_values=allow_negative_values)
        out = out.replace('xyz', 'LMS').replace('XYZ', 'LMS')
    if ('lms' in out.lower().split(',')):
        out = out.replace('lms', 'LMS')

    # Interpolate/extrapolate:
    if wl is None:
        interpolation = None
    else:
        interpolation = 'cubic'
    LMS = spd(LMS, wl=wl, interpolation=interpolation, norm_type='area')

    if (out == 'LMS'):
        return LMS
    elif (out == 'LMS,trans_lens,trans_macula,sens_photopig'):
        return LMS, trans_lens, trans_macula, sens_photopig
    elif (out == 'LMS,trans_lens,trans_macula,sens_photopig,LMSa'):
        return LMS, trans_lens, trans_macula, sens_photopig, LMSa
    else:
        return eval(out)
Beispiel #13
0
def run(data,
        xyzw=_DEFAULT_WHITE_POINT,
        Yw=None,
        conditions=None,
        ucstype='ucs',
        forward=True,
        yellowbluepurplecorrect=False,
        mcat='cat02'):
    """ 
    Run the CAM02-UCS[,-LCD,-SDC] color appearance difference model in forward or backward modes.
    
    Args:
        :data:
            | ndarray with sample xyz values (forward mode) or J'a'b' coordinates (inverse mode)
        :xyzw:
            | ndarray with white point tristimulus values  
        :conditions:
            | None, optional
            | Dictionary with viewing conditions.
            | None results in:
            |   {'La':100, 'Yb':20, 'D':1, 'surround':'avg'}
            | For more info see luxpy.cam.ciecam02()?
        :ucstype:
            | 'ucs', optional
            | String with type of color difference appearance space
            | options: 'ucs', 'scd', 'lcd'
        :forward:
            | True, optional
            | If True: run in CAM in forward mode, else: inverse mode.
        :yellowbluepurplecorrect:
            | False, optional
            | If False: don't correct for yellow-blue and purple problems in ciecam02. 
            | If 'brill-suss': 
            |       for yellow-blue problem, see: 
            |          - Brill [Color Res Appl, 2006; 31, 142-145] and 
            |          - Brill and Süsstrunk [Color Res Appl, 2008; 33, 424-426] 
            | If 'jiang-luo': 
            |       for yellow-blue problem + purple line problem, see:
            |          - Jiang, Jun et al. [Color Res Appl 2015: 40(5), 491-503] 
        :mcat:
            | 'cat02', optional
            | Specifies CAT sensor space.
            | - options:
            |    - None defaults to 'cat02' 
            |         (others e.g. 'cat02-bs', 'cat02-jiang',
            |         all trying to correct gamut problems of original cat02 matrix)
            |    - str: see see luxpy.cat._MCATS.keys() for options 
            |         (details on type, ?luxpy.cat)
            |    - ndarray: matrix with sensor primaries
    Returns:
        :camout:
            | ndarray with J'a'b' coordinates (forward mode) 
            |  or 
            | XYZ tristimulus values (inverse mode)
    
    References:
        1. `M.R. Luo, G. Cui, and C. Li, 
        'Uniform colour spaces based on CIECAM02 colour appearance model,' 
        Color Res. Appl., vol. 31, no. 4, pp. 320–330, 2006.
        <http://onlinelibrary.wiley.com/doi/10.1002/col.20227/abstract)>`_
    """
    # get ucs parameters:
    if isinstance(ucstype, str):
        ucs_pars = _CAM_UCS_PARAMETERS
        ucs = ucs_pars[ucstype]
    else:
        ucs = ucstype
    KL, c1, c2 = ucs['KL'], ucs['c1'], ucs['c2']

    # set conditions to use in CIECAM02 (overrides None-default in ciecam02() !!!)
    if conditions is None:
        conditions = _DEFAULT_CONDITIONS

    if forward == True:

        # run ciecam02 to get JMh:
        data = ciecam02(data,
                        xyzw,
                        outin='J,M,h',
                        conditions=conditions,
                        forward=True,
                        mcat=mcat,
                        yellowbluepurplecorrect=yellowbluepurplecorrect)

        camout = np.zeros_like(data)  # for output

        #--------------------------------------------
        # convert to cam02ucs J', aM', bM':
        camout[...,
               0] = (1.0 + 100.0 * c1) * data[...,
                                              0] / (1.0 + c1 * data[..., 0])
        Mp = ((1.0 / c2) *
              np.log(1.0 + c2 * data[..., 1])) if (c2 != 0) else data[..., 1]
        camout[..., 1] = Mp * np.cos(data[..., 2] * np.pi / 180)
        camout[..., 2] = Mp * np.sin(data[..., 2] * np.pi / 180)

        return camout

    else:
        #--------------------------------------------
        # convert cam02ucs J', aM', bM' to xyz:

        # calc ciecam02 hue angle
        #Jp, aMp, bMp = asplit(data)
        h = np.arctan2(data[..., 2], data[..., 1])

        # calc cam02ucs and CIECAM02 colourfulness
        Mp = (data[..., 1]**2.0 + data[..., 2]**2.0)**0.5
        M = ((np.exp(c2 * Mp) - 1.0) / c2) if (c2 != 0) else Mp

        # calculate ciecam02 aM, bM:
        aM = M * np.cos(h)
        bM = M * np.sin(h)

        # calc ciecam02 lightness
        J = data[..., 0] / (1.0 + (100.0 - data[..., 0]) * c1)

        # run ciecam02 in inverse mode to get xyz:
        return ciecam02(ajoin((J, aM, bM)),
                        xyzw,
                        outin='J,aM,bM',
                        conditions=conditions,
                        forward=False,
                        mcat=mcat,
                        yellowbluepurplecorrect=yellowbluepurplecorrect)
Beispiel #14
0
def run(data,
        xyzw=_DEFAULT_WHITE_POINT,
        Yw=None,
        outin='J,aM,bM',
        conditions=None,
        forward=True,
        yellowbluepurplecorrect=False,
        mcat='cat02'):
    """ 
    Run CIECAM02 color appearance model in forward or backward modes.
    
    Args:
        :data:
            | ndarray with relative sample xyz values (forward mode) or J'a'b' coordinates (inverse mode)
        :xyzw:
            | ndarray with relative white point tristimulus values 
        :Yw: 
            | None, optional
            | Luminance factor of white point.
            | If None: xyz (in data) and xyzw are entered as relative tristimulus values 
            |          (normalized to Yw = 100). 
            | If not None: input tristimulus are absolute and Yw is used to
            |              rescale the absolute values to relative ones 
            |              (relative to a reference perfect white diffuser 
            |               with Ywr = 100). 
            | Yw can be < 100 for e.g. paper as white point. If Yw is None, it 
            | is assumed that the relative Y-tristimulus value in xyzw 
            | represents the luminance factor Yw.
        :conditions:
            | None, optional
            | Dictionary with viewing condition parameters for:
            |       La, Yb, D and surround.
            |  surround can contain:
            |      - str (options: 'avg','dim','dark') or 
            |      - dict with keys c, Nc, F.
            | None results in:
            |   {'La':100, 'Yb':20, 'D':1, 'surround':'avg'}
        :forward:
            | True, optional
            | If True: run in CAM in forward mode, else: inverse mode.
        :outin:
            | 'J,aM,bM', optional
            | String with requested output (e.g. "J,aM,bM,M,h") [Forward mode]
            | - attributes: 'J': lightness,'Q': brightness,
            |               'M': colorfulness,'C': chroma, 's': saturation,
            |               'h': hue angle, 'H': hue quadrature/composition,
            | String with inputs in data [inverse mode]. 
            | Input must have data.shape[-1]==3 and last dim of data must have 
            | the following structure for inverse mode: 
            |  * data[...,0] = J or Q,
            |  * data[...,1:] = (aM,bM) or (aC,bC) or (aS,bS) or (M,h) or (C, h), ...
        :yellowbluepurplecorrect:
            | False, optional
            | If False: don't correct for yellow-blue and purple problems in ciecam02. 
            | If 'brill-suss': 
            |       for yellow-blue problem, see: 
            |          - Brill [Color Res Appl, 2006; 31, 142-145] and 
            |          - Brill and Süsstrunk [Color Res Appl, 2008; 33, 424-426] 
            | If 'jiang-luo': 
            |       for yellow-blue problem + purple line problem, see:
            |          - Jiang, Jun et al. [Color Res Appl 2015: 40(5), 491-503] 
        :mcat:
            | 'cat02', optional
            | Specifies CAT sensor space.
            | - options:
            |    - None defaults to 'cat02' 
            |         (others e.g. 'cat02-bs', 'cat02-jiang',
            |         all trying to correct gamut problems of original cat02 matrix)
            |    - str: see see luxpy.cat._MCATS.keys() for options 
            |         (details on type, ?luxpy.cat)
            |    - ndarray: matrix with sensor primaries
    Returns:
        :camout: 
            | ndarray with color appearance correlates (forward mode) 
            |  or 
            | XYZ tristimulus values (inverse mode)
        
    References:
        1. `N. Moroney, M. D. Fairchild, R. W. G. Hunt, C. Li, M. R. Luo, and T. Newman, (2002), 
        "The CIECAM02 color appearance model,” 
        IS&T/SID Tenth Color Imaging Conference. p. 23, 2002.
        <http://rit-mcsl.org/fairchild/PDFs/PRO19.pdf>`_
    """
    outin = outin.split(',') if isinstance(outin, str) else outin

    #--------------------------------------------
    # Get condition parameters:
    if conditions is None:
        conditions = _DEFAULT_CONDITIONS
    D, Dtype, La, Yb, surround = (conditions[x]
                                  for x in sorted(conditions.keys()))

    surround_parameters = _SURROUND_PARAMETERS
    if isinstance(surround, str):
        surround = surround_parameters[conditions['surround']]
    F, FLL, Nc, c = [surround[x] for x in sorted(surround.keys())]

    #--------------------------------------------
    # Define sensor space and cat matrices:
    # Hunt-Pointer-Estevez sensors (cone fundamentals)
    mhpe = cat._MCATS['hpe']

    # chromatic adaptation sensors:
    if (mcat is None) | (mcat == 'cat02'):
        mcat = cat._MCATS['cat02']
        if yellowbluepurplecorrect == 'brill-suss':
            mcat = cat._MCATS[
                'cat02-bs']  # for yellow-blue problem, Brill [Color Res Appl 2006;31:142-145] and Brill and Süsstrunk [Color Res Appl 2008;33:424-426]
        elif yellowbluepurplecorrect == 'jiang-luo':
            mcat = cat._MCATS[
                'cat02-jiang-luo']  # for yellow-blue problem + purple line problem
    elif isinstance(mcat, str):
        mcat = cat._MCATS[mcat]

    #--------------------------------------------
    # pre-calculate some matrices:
    invmcat = np.linalg.inv(mcat)
    mhpe_x_invmcat = np.dot(mhpe, invmcat)
    if not forward: mcat_x_invmhpe = np.dot(mcat, np.linalg.inv(mhpe))

    #--------------------------------------------
    # Set Yw:
    if Yw is not None:
        Yw = (Yw * np.ones_like(xyzw2[..., 1:2]).T)
    else:
        Yw = xyzw[..., 1:2].T

    #--------------------------------------------
    # calculate condition dependent parameters:
    k = 1.0 / (5.0 * La + 1.0)
    FL = 0.2 * (k**4.0) * (5.0 * La) + 0.1 * ((1.0 - k**4.0)**2.0) * (
        (5.0 * La)**(1.0 / 3.0))  # luminance adaptation factor
    n = Yb / Yw
    Nbb = 0.725 * (1 / n)**0.2
    Ncb = Nbb
    z = 1.48 + FLL * n**0.5
    yw = xyzw[..., 1:2].T  # original Y in xyzw (pre-transposed)

    #--------------------------------------------
    # Calculate degree of chromatic adaptation:
    if D is None:
        D = F * (1.0 - (1.0 / 3.6) * np.exp((-La - 42.0) / 92.0))

    #===================================================================
    # WHITE POINT transformations (common to forward and inverse modes):

    #--------------------------------------------
    # Normalize white point (keep transpose for next step):
    xyzw = Yw * xyzw.T / yw

    #--------------------------------------------
    # transform from xyzw to cat sensor space:
    rgbw = math.dot23(mcat, xyzw)

    #--------------------------------------------
    # apply von Kries cat:
    rgbwc = (
        (D * Yw / rgbw) + (1 - D)
    ) * rgbw  # factor 100 from ciecam02 is replaced with Yw[i] in ciecam16, but see 'note' in Fairchild's "Color Appearance Models" (p291 ni 3ed.)

    #--------------------------------------------
    # convert from cat02 sensor space to cone sensors (hpe):
    rgbwp = math.dot23(mhpe_x_invmcat, rgbwc).T

    #--------------------------------------------
    # apply Naka_rushton repsonse compression to white:
    NK = lambda x, forward: naka_rushton(x,
                                         scaling=400,
                                         n=0.42,
                                         sig=27.13**(1 / 0.42),
                                         noise=0.1,
                                         forward=forward)

    pw = np.where(rgbwp < 0)

    # if requested apply yellow-blue correction:
    if (yellowbluepurplecorrect == 'brill-suss'
        ):  # Brill & Susstrunck approach, for purple line problem
        rgbwp[pw] = 0.0
    rgbwpa = NK(FL * rgbwp / 100.0, True)
    rgbwpa[pw] = 0.1 - (NK(FL * np.abs(rgbwp[pw]) / 100.0, True) - 0.1)

    #--------------------------------------------
    # Calculate achromatic signal of white:
    Aw = (2.0 * rgbwpa[..., 0] + rgbwpa[..., 1] +
          (1.0 / 20.0) * rgbwpa[..., 2] - 0.305) * Nbb

    # massage shape of data for broadcasting:
    original_ndim = data.ndim
    if data.ndim == 2: data = data[:, None]

    #===================================================================
    # STIMULUS transformations
    if forward:

        #--------------------------------------------
        # Normalize xyz (keep transpose for matrix multiplication in next step):
        xyz = (Yw / yw)[..., None] * data.T

        #--------------------------------------------
        # transform from xyz to cat sensor space:
        rgb = math.dot23(mcat, xyz)

        #--------------------------------------------
        # apply von Kries cat:
        rgbc = (
            (D * Yw / rgbw)[..., None] + (1 - D)
        ) * rgb  # factor 100 from ciecam02 is replaced with Yw[i] in ciecam16, but see 'note' in Fairchild's "Color Appearance Models" (p291 ni 3ed.)

        #--------------------------------------------
        # convert from cat02 sensor space to cone sensors (hpe):
        rgbp = math.dot23(mhpe_x_invmcat, rgbc).T

        #--------------------------------------------
        # apply Naka_rushton repsonse compression:
        p = np.where(rgbp < 0)
        if (yellowbluepurplecorrect == 'brill-suss'
            ):  # Brill & Susstrunck approach, for purple line problem
            rgbp[p] = 0.0
        rgbpa = NK(FL * rgbp / 100.0, forward)
        rgbpa[p] = 0.1 - (NK(FL * np.abs(rgbp[p]) / 100.0, forward) - 0.1)

        #--------------------------------------------
        # Calculate achromatic signal:
        A = (2.0 * rgbpa[..., 0] + rgbpa[..., 1] +
             (1.0 / 20.0) * rgbpa[..., 2] - 0.305) * Nbb

        #--------------------------------------------
        # calculate initial opponent channels:
        a = rgbpa[..., 0] - 12.0 * rgbpa[..., 1] / 11.0 + rgbpa[..., 2] / 11.0
        b = (1.0 / 9.0) * (rgbpa[..., 0] + rgbpa[..., 1] - 2.0 * rgbpa[..., 2])

        #--------------------------------------------
        # calculate hue h and eccentricity factor, et:
        h = hue_angle(a, b, htype='deg')
        et = (1.0 / 4.0) * (np.cos(h * np.pi / 180 + 2.0) + 3.8)

        #--------------------------------------------
        # calculate Hue quadrature (if requested in 'out'):
        if 'H' in outin:
            H = hue_quadrature(h, unique_hue_data=_UNIQUE_HUE_DATA)
        else:
            H = None

        #--------------------------------------------
        # calculate lightness, J:
        J = 100.0 * (A / Aw)**(c * z)

        #--------------------------------------------
        # calculate brightness, Q:
        Q = (4.0 / c) * ((J / 100.0)**0.5) * (Aw + 4.0) * (FL**0.25)

        #--------------------------------------------
        # calculate chroma, C:
        t = ((50000.0 / 13.0) * Nc * Ncb * et *
             ((a**2.0 + b**2.0)**0.5)) / (rgbpa[..., 0] + rgbpa[..., 1] +
                                          (21.0 / 20.0 * rgbpa[..., 2]))
        C = (t**0.9) * ((J / 100.0)**0.5) * (1.64 - 0.29**n)**0.73

        #--------------------------------------------
        # calculate colorfulness, M:
        M = C * FL**0.25

        #--------------------------------------------
        # calculate saturation, s:
        s = 100.0 * (M / Q)**0.5
        S = s  # make extra variable, jsut in case 'S' is called

        #--------------------------------------------
        # calculate cartesian coordinates:
        if ('aS' in outin):
            aS = s * np.cos(h * np.pi / 180.0)
            bS = s * np.sin(h * np.pi / 180.0)

        if ('aC' in outin):
            aC = C * np.cos(h * np.pi / 180.0)
            bC = C * np.sin(h * np.pi / 180.0)

        if ('aM' in outin):
            aM = M * np.cos(h * np.pi / 180.0)
            bM = M * np.sin(h * np.pi / 180.0)

        #--------------------------------------------
        if outin != ['J', 'aM', 'bM']:
            camout = eval('ajoin((' + ','.join(outin) + '))')
        else:
            camout = ajoin((J, aM, bM))

        if (camout.shape[1] == 1) & (original_ndim < 3):
            camout = camout[:, 0, :]

        return camout

    elif forward == False:

        #--------------------------------------------
        # Get Lightness J from data:
        if ('J' in outin[0]):
            J = data[..., 0].copy()
        elif ('Q' in outin[0]):
            Q = data[..., 0].copy()
            J = 100.0 * (Q / ((Aw + 4.0) * (FL**0.25) * (4.0 / c)))**2.0
        else:
            raise Exception(
                'No lightness or brightness values in data. Inverse CAM-transform not possible!'
            )

        #--------------------------------------------
        if 'a' in outin[1]:
            # calculate hue h:
            h = hue_angle(data[..., 1], data[..., 2], htype='deg')

            #--------------------------------------------
            # calculate Colorfulness M or Chroma C or Saturation s from a,b:
            MCs = (data[..., 1]**2.0 + data[..., 2]**2.0)**0.5
        else:
            h = data[..., 2]
            MCs = data[..., 1]

        if ('S' in outin[1]):
            Q = (4.0 / c) * ((J / 100.0)**0.5) * (Aw + 4.0) * (FL**0.25)
            M = Q * (MCs / 100.0)**2.0
            C = M / (FL**0.25)

        if ('M' in outin[1]):  # convert M to C:
            C = MCs / (FL**0.25)

        if ('C' in outin[1]):
            C = MCs

        #--------------------------------------------
        # calculate t from J, C:
        t = (C / ((J / 100.0)**(1.0 / 2.0) * (1.64 - 0.29**n)**0.73))**(1.0 /
                                                                        0.9)

        #--------------------------------------------
        # calculate eccentricity factor, et:
        et = (np.cos(h * np.pi / 180.0 + 2.0) + 3.8) / 4.0

        #--------------------------------------------
        # calculate achromatic signal, A:
        A = Aw * (J / 100.0)**(1.0 / (c * z))

        #--------------------------------------------
        # calculate temporary cart. co. at, bt and p1,p2,p3,p4,p5:
        at = np.cos(h * np.pi / 180.0)
        bt = np.sin(h * np.pi / 180.0)
        p1 = (50000.0 / 13.0) * Nc * Ncb * et / t
        p2 = A / Nbb + 0.305
        p3 = 21.0 / 20.0
        p4 = p1 / bt
        p5 = p1 / at

        #--------------------------------------------
        #q = np.where(np.abs(bt) < np.abs(at))[0]
        q = (np.abs(bt) < np.abs(at))

        b = p2 * (2.0 + p3) * (460.0 / 1403.0) / (p4 + (2.0 + p3) *
                                                  (220.0 / 1403.0) *
                                                  (at / bt) -
                                                  (27.0 / 1403.0) + p3 *
                                                  (6300.0 / 1403.0))
        a = b * (at / bt)

        a[q] = p2[q] * (2.0 + p3) * (460.0 / 1403.0) / (p5[q] + (2.0 + p3) *
                                                        (220.0 / 1403.0) -
                                                        ((27.0 / 1403.0) - p3 *
                                                         (6300.0 / 1403.0)) *
                                                        (bt[q] / at[q]))
        b[q] = a[q] * (bt[q] / at[q])

        #--------------------------------------------
        # calculate post-adaptation values
        rpa = (460.0 * p2 + 451.0 * a + 288.0 * b) / 1403.0
        gpa = (460.0 * p2 - 891.0 * a - 261.0 * b) / 1403.0
        bpa = (460.0 * p2 - 220.0 * a - 6300.0 * b) / 1403.0

        #--------------------------------------------
        # join values:
        rgbpa = ajoin((rpa, gpa, bpa))

        #--------------------------------------------
        # decompress signals:
        rgbp = (100.0 / FL) * NK(rgbpa, forward)

        # apply yellow-blue correction:
        if (yellowbluepurplecorrect == 'brill-suss'
            ):  # Brill & Susstrunck approach, for purple line problem
            p = np.where(rgbp < 0.0)
            rgbp[p] = 0.0

        #--------------------------------------------
        # convert from to cone sensors (hpe) cat02 sensor space:
        rgbc = math.dot23(mcat_x_invmhpe, rgbp.T)

        #--------------------------------------------
        # apply inverse von Kries cat:
        rgb = rgbc / ((D * Yw / rgbw)[..., None] + (1.0 - D))

        #--------------------------------------------
        # transform from cat sensor space to xyz:
        xyz = math.dot23(invmcat, rgb)

        #--------------------------------------------
        # unnormalize xyz:
        xyz = ((yw / Yw)[..., None] * xyz).T

        return xyz
Beispiel #15
0
def get_degree_of_adaptation(Dtype=None, **kwargs):
    """
    Calculates the degree of adaptation according to some function 
    published in literature. 
    
    Args:
        :Dtype:
            | None, optional
            |   If None: kwargs should contain 'D' with value.
            |   If 'manual: kwargs should contain 'D' with value.
            | If 'cat02' or 'cat16': kwargs should contain keys 'F' and 'La'.
            |     Calculate D according to CAT02 or CAT16 model:
            |        D = F*(1-(1/3.6)*numpy.exp((-La-42)/92))
            | If 'cmc': kwargs should contain 'La', 'La0'(or 'La2') and 'order'  
            |     for 'order' = '1>0': 'La' is set La1 and 'La0' to La0.
            |     for 'order' = '0>2': 'La' is set La0 and 'La0' to La1.
            |     for 'order' = '1>2': 'La' is set La1 and 'La2' to La0.
            |     D is calculated as follows:
            |        D = 0.08*numpy.log10(La1+La0)+0.76-0.45*(La1-La0)/(La1+La0)
            | If 'smet2017': kwargs should contain 'xyzw' and 'Dmax'
            |  (see Smet2017_D for more details).
            | If "? user defined", then D is calculated by:
            |        D = ndarray(eval(:Dtype:))  
    
    Returns:
         :D: 
            | ndarray with degree of adaptation values.
    Notes:
        1. D passes either right through or D is calculated following some 
           D-function (Dtype) published in literature.
        2. D is limited to values between zero and one
        3. If kwargs do not contain the required parameters, 
           an exception is raised.
    """
    try:
        if Dtype is None:
            PAR = ["D"]
            D = np.array([kwargs['D']])
        elif Dtype == 'manual':
            PAR = ["D"]
            D = np.array([kwargs['D']])
        elif (Dtype == 'cat02') | (Dtype == 'cat16'):
            PAR = ["F, La"]
            F = kwargs['F']

            if isinstance(F, str):  #CIECAM02 / CAT02 surround based F values
                if (F == 'avg') | (F == 'average'):
                    F = 1
                elif (F == 'dim'):
                    F = 0.9
                elif (F == 'dark'):
                    F = 0.8
                elif (F == 'disp') | (F == 'display'):
                    F = 0.0
                else:
                    F = eval(F)

            F = np.array([F])
            La = np.array([kwargs['La']])
            D = F * (1 - (1 / 3.6) * np.exp((-La - 42) / 92))
        elif Dtype == 'cmc':
            PAR = ["La, La0, order"]
            order = np.array([kwargs['order']])
            if order == '1>0':
                La1 = np.array([kwargs['La']])
                La0 = np.array([kwargs['La0']])
            elif order == '0>2':
                La0 = np.array([kwargs['La']])
                La1 = np.array([kwargs['La0']])
            elif order == '1>2':
                La1 = np.array([kwargs['La']])
                La0 = np.array([kwargs['La2']])
            D = 0.08 * np.log10(La1 + La0) + 0.76 - 0.45 * (La1 - La0) / (La1 +
                                                                          La0)

        elif 'smet2017':
            PAR = ['xyzw', 'Dmax']
            xyzw = np.array([kwargs['xyzw']])
            Dmax = np.array([kwargs['Dmax']])
            D = smet2017_D(xyzw, Dmax=Dmax)
        else:
            PAR = ["? user defined"]
            D = np.array(eval(Dtype))

        D[np.where(D < 0)] = 0
        D[np.where(D > 1)] = 1

    except:
        raise Exception(
            'degree_of_adaptation_D(): **kwargs does not contain the necessary parameters ({}) for Dtype = {}'
            .format(PAR, Dtype))

    return D
Beispiel #16
0
 def fSr(x):
     return (1/np.pi)*_BB['c1']*((x*1.0e-9)**(-5))*(n**(-2.0))*(np.exp(_BB['c2']*((n*x*1.0e-9*(cct+_EPS))**(-1.0)))-1.0)**(-1.0)
Beispiel #17
0
def spd_to_mcri(SPD, D = 0.9, E = None, Yb = 20.0, out = 'Rm', wl = None):
    """
    Calculates the MCRI or Memory Color Rendition Index, Rm
    
    Args: 
        :SPD: 
            | ndarray with spectral data (can be multiple SPDs, 
            |  first axis are the wavelengths)
        :D: 
            | 0.9, optional
            | Degree of adaptation.
        :E: 
            | None, optional
            | Illuminance in lux 
            |  (used to calculate La = (Yb/100)*(E/pi) to then calculate D 
            |  following the 'cat02' model). 
            | If None: the degree is determined by :D:
            |  If (:E: is not None) & (:Yb: is None):  :E: is assumed to contain 
            |  the adapting field luminance La (cd/m²).
        :Yb: 
            | 20.0, optional
            | Luminance factor of background. (used when calculating La from E)
            | If None, E contains La (cd/m²).
        :out: 
            | 'Rm' or str, optional
            | Specifies requested output (e.g. 'Rm,Rmi,cct,duv') 
        :wl: 
            | None, optional
            | Wavelengths (or [start, end, spacing]) to interpolate the SPDs to. 
            | None: default to no interpolation   
    
    Returns:
        :returns: 
            | float or ndarray with MCRI Rm for :out: 'Rm'
            | Other output is also possible by changing the :out: str value.        
          
    References:
        1. `K.A.G. Smet, W.R. Ryckaert, M.R. Pointer, G. Deconinck, P. Hanselaer,(2012)
        “A memory colour quality metric for white light sources,” 
        Energy Build., vol. 49, no. C, pp. 216–225.
        <http://www.sciencedirect.com/science/article/pii/S0378778812000837>`_
    """
    SPD = np2d(SPD)
    
    if wl is not None: 
        SPD = spd(data = SPD, interpolation = _S_INTERP_TYPE, kind = 'np', wl = wl)
    
    
    # unpack metric default values:
    avg, catf, cieobs, cri_specific_pars, cspace, ref_type, rg_pars, sampleset, scale = [_MCRI_DEFAULTS[x] for x in sorted(_MCRI_DEFAULTS.keys())] 
    similarity_ai = cri_specific_pars['similarity_ai']
    Mxyz2lms = cspace['Mxyz2lms'] 
    scale_fcn = scale['fcn']
    scale_factor = scale['cfactor']
    sampleset = eval(sampleset)
    
    # A. calculate xyz:
    xyzti, xyztw = spd_to_xyz(SPD, cieobs = cieobs['xyz'],  rfl = sampleset, out = 2)
    if 'cct' in out.split(','):
        cct, duv = xyz_to_cct(xyztw, cieobs = cieobs['cct'], out = 'cct,duv',mode = 'lut')
        
    # B. perform chromatic adaptation to adopted whitepoint of ipt color space, i.e. D65:
    if catf is not None:
        Dtype_cat, F, Yb_cat, catmode_cat, cattype_cat, mcat_cat, xyzw_cat = [catf[x] for x in sorted(catf.keys())]
        
        # calculate degree of adaptationn D:
        if E is not None:
            if Yb is not None:
                La = (Yb/100.0)*(E/np.pi)
            else:
                La = E
            D = cat.get_degree_of_adaptation(Dtype = Dtype_cat, F = F, La = La)
        else:
            Dtype_cat = None # direct input of D

        if (E is None) and (D is None):
            D = 1.0 # set degree of adaptation to 1 !
        if D > 1.0: D = 1.0
        if D < 0.6: D = 0.6 # put a limit on the lowest D

        # apply cat:
        xyzti = cat.apply(xyzti, cattype = cattype_cat, catmode = catmode_cat, xyzw1 = xyztw,xyzw0 = None, xyzw2 = xyzw_cat, D = D, mcat = [mcat_cat], Dtype = Dtype_cat)
        xyztw = cat.apply(xyztw, cattype = cattype_cat, catmode = catmode_cat, xyzw1 = xyztw,xyzw0 = None, xyzw2 = xyzw_cat, D = D, mcat = [mcat_cat], Dtype = Dtype_cat)
     
    # C. convert xyz to ipt and split:
    ipt = xyz_to_ipt(xyzti, cieobs = cieobs['xyz'], M = Mxyz2lms) #input matrix as published in Smet et al. 2012, Energy and Buildings
    I,P,T = asplit(ipt)  

    # D. calculate specific (hue dependent) similarity indicators, Si:
    if len(xyzti.shape) == 3:
        ai = np.expand_dims(similarity_ai, axis = 1)
    else: 
        ai = similarity_ai
    a1,a2,a3,a4,a5 = asplit(ai)
    mahalanobis_d2 = (a3*np.power((P - a1),2.0) + a4*np.power((T - a2),2.0) + 2.0*a5*(P-a1)*(T-a2))
    if (len(mahalanobis_d2.shape)==3) & (mahalanobis_d2.shape[-1]==1):
        mahalanobis_d2 = mahalanobis_d2[:,:,0].T
    Si = np.exp(-0.5*mahalanobis_d2)

    # E. calculate general similarity indicator, Sa:
    Sa = avg(Si, axis = 0,keepdims = True)

    # F. rescale similarity indicators (Si, Sa) with a 0-1 scale to memory color rendition indices (Rmi, Rm) with a 0 - 100 scale:
    Rmi = scale_fcn(np.log(Si),scale_factor = scale_factor)
    Rm = np2d(scale_fcn(np.log(Sa),scale_factor = scale_factor))

    # G. calculate Rg (polyarea of test / polyarea of memory colours):
    if 'Rg' in out.split(','):
        I = I[...,None] #broadcast_shape(I, target_shape = None,expand_2d_to_3d = 0)
        a1 = a1[:,None]*np.ones(I.shape)#broadcast_shape(a1, target_shape = None,expand_2d_to_3d = 0)
        a2 = a2[:,None]*np.ones(I.shape) #broadcast_shape(a2, target_shape = None,expand_2d_to_3d = 0)
        a12 = np.concatenate((a1,a2),axis=2) #broadcast_shape(np.hstack((a1,a2)), target_shape = ipt.shape,expand_2d_to_3d = 0)
        ipt_mc = np.concatenate((I,a12),axis=2)
        nhbins, normalize_gamut, normalized_chroma_ref, start_hue  = [rg_pars[x] for x in sorted(rg_pars.keys())]
    
        hue_bin_data = _get_hue_bin_data(ipt, ipt_mc, 
                                         start_hue = start_hue, nhbins = nhbins,
                                         normalized_chroma_ref = normalized_chroma_ref)
        Rg = _hue_bin_data_to_rg(hue_bin_data)

    if (out != 'Rm'):
        return  eval(out)
    else:
        return Rm
Beispiel #18
0
def cam_sww16(data,
              dataw=None,
              Yb=20.0,
              Lw=400.0,
              Ccwb=None,
              relative=True,
              inputtype='xyz',
              direction='forward',
              parameters=None,
              cieobs='2006_10',
              match_to_conversionmatrix_to_cieobs=True):
    """
    A simple principled color appearance model based on a mapping of 
    the Munsell color system.
    
    | This function implements the JOSA A (parameters = 'JOSA') published model. 
    
    Args:
        :data: 
            | ndarray with input tristimulus values 
            | or spectral data 
            | or input color appearance correlates
            | Can be of shape: (N [, xM], x 3), whereby: 
            | N refers to samples and M refers to light sources.
            | Note that for spectral input shape is (N x (M+1) x wl) 
        :dataw: 
            | None or ndarray, optional
            | Input tristimulus values or spectral data of white point.
            | None defaults to the use of CIE illuminant C.
        :Yb: 
            | 20.0, optional
            | Luminance factor of background (perfect white diffuser, Yw = 100)
        :Lw:
            | 400.0, optional
            | Luminance (cd/m²) of white point.
        :Ccwb:
            | None,  optional
            | Degree of cognitive adaptation (white point balancing)
            | If None: use [..,..] from parameters dict.
        :relative:
            | True or False, optional
            | True: xyz tristimulus values are relative (Yw = 100)
        :parameters:
            | None or str or dict, optional
            | Dict with model parameters.
            |    - None: defaults to luxpy.cam._CAM_SWW_2016_PARAMETERS['JOSA']
            |    - str: 'best-fit-JOSA' or 'best-fit-all-Munsell'
            |    - dict: user defined model parameters 
            |            (dict should have same structure)
        :inputtype:
            | 'xyz' or 'spd', optional
            | Specifies the type of input: 
            |     tristimulus values or spectral data for the forward mode.
        :direction:
            | 'forward' or 'inverse', optional
            |   -'forward': xyz -> cam_sww_2016
            |   -'inverse': cam_sww_2016 -> xyz 
        :cieobs:
            | '2006_10', optional
            | CMF set to use to perform calculations where spectral data 
            | is involved (inputtype == 'spd'; dataw = None)
            | Other options: see luxpy._CMF['types']
        :match_to_conversionmatrix_to_cieobs:
            | When channging to a different CIE observer, change the xyz-to_lms
            | matrix to the one corresponding to that observer. If False: use 
            | the one set in parameters or _CAM_SWW16_PARAMETERS
    
    Returns:
        :returns: 
            | ndarray with color appearance correlates (:direction: == 'forward')
            |  or 
            | XYZ tristimulus values (:direction: == 'inverse')
    
    Notes:
        | This function implements the JOSA A (parameters = 'JOSA') 
        | published model. 
        | With:
        |    1. A correction for the parameter 
        |         in Eq.4 of Fig. 11: 0.952 --> -0.952 
        |         
        |     2. The delta_ac and delta_bc white-balance shifts in Eq. 5e & 5f 
        |         should be: -0.028 & 0.821 
        |  
        |     (cfr. Ccwb = 0.66 in: 
        |         ab_test_out = ab_test_int - Ccwb*ab_gray_adaptation_field_int))
             
    References:
        1. `Smet, K. A. G., Webster, M. A., & Whitehead, L. A. (2016). 
        A simple principled approach for modeling and understanding uniform color metrics. 
        Journal of the Optical Society of America A, 33(3), A319–A331. 
        <https://doi.org/10.1364/JOSAA.33.00A319>`_

    """
    #--------------------------------------------------------------------------
    # Get model parameters:
    #--------------------------------------------------------------------------
    args = locals().copy()
    parameters = _update_parameter_dict(
        args,
        parameters=parameters,
        match_to_conversionmatrix_to_cieobs=match_to_conversionmatrix_to_cieobs
    )

    #unpack model parameters:
    Cc, Ccwb, Cf, Mxyz2lms, cLMS, cab_int, cab_out, calpha, cbeta, cga1, cga2, cgb1, cgb2, cl_int, clambda, lms0 = [
        parameters[x] for x in sorted(parameters.keys())
    ]

    #--------------------------------------------------------------------------
    # Setup default adaptation field:
    #--------------------------------------------------------------------------
    dataw = _setup_default_adaptation_field(dataw=dataw,
                                            Lw=Lw,
                                            inputtype=inputtype,
                                            relative=relative,
                                            cieobs=cieobs)

    #--------------------------------------------------------------------------
    # Redimension input data to ensure most appropriate sizes
    # for easy and efficient looping and initialize output array:
    #--------------------------------------------------------------------------
    data, dataw, camout, originalshape = _massage_input_and_init_output(
        data, dataw, inputtype=inputtype, direction=direction)

    #--------------------------------------------------------------------------
    # Do precomputations needed for both the forward and inverse model,
    # and which do not depend on sample or light source data:
    #--------------------------------------------------------------------------
    Mxyz2lms = np.dot(
        np.diag(cLMS), Mxyz2lms
    )  # weight the xyz-to-lms conversion matrix with cLMS (cfr. stage 1 calculations)
    invMxyz2lms = np.linalg.inv(
        Mxyz2lms)  # Calculate the inverse lms-to-xyz conversion matrix
    MAab = np.array(
        [clambda, calpha, cbeta]
    )  # Create matrix with scale factors for L, M, S for quick matrix multiplications
    invMAab = np.linalg.inv(
        MAab)  # Pre-calculate its inverse to avoid repeat in loop.

    #--------------------------------------------------------------------------
    # Apply forward/inverse model by looping over each row (=light source dim.)
    # in data:
    #--------------------------------------------------------------------------
    N = data.shape[0]
    for i in range(N):
        #++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
        #  START FORWARD MODE and common part of inverse mode
        #++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

        #-----------------------------------------------------------------------------
        # Get absolute tristimulus values for stimulus field and white point for row i:
        #-----------------------------------------------------------------------------
        xyzt, xyzw, xyzw_abs = _get_absolute_xyz_xyzw(data,
                                                      dataw,
                                                      i=i,
                                                      Lw=Lw,
                                                      direction=direction,
                                                      cieobs=cieobs,
                                                      inputtype=inputtype,
                                                      relative=relative)

        #-----------------------------------------------------------------------------
        # stage 1: calculate photon rates of stimulus and white white, and
        # adapting field: i.e. lmst, lmsw and lmsf
        #-----------------------------------------------------------------------------
        # Convert to white point l,m,s:
        lmsw = 683.0 * np.dot(Mxyz2lms, xyzw.T).T / _CMF[cieobs]['K']

        # Calculate adaptation field and convert to l,m,s:
        lmsf = (Yb / 100.0) * lmsw

        # Calculate lms of stimulus
        # or put adaptation lmsf in test field lmst for later use in inverse-mode (no xyz in 'inverse' mode!!!):
        lmst = (683.0 * np.dot(Mxyz2lms, xyzt.T).T /
                _CMF[cieobs]['K']) if (direction == 'forward') else lmsf

        #-----------------------------------------------------------------------------
        # stage 2: calculate cone outputs of stimulus lmstp
        #-----------------------------------------------------------------------------
        lmstp = math.erf(Cc *
                         (np.log(lmst / lms0) +
                          Cf * np.log(lmsf / lms0)))  # stimulus test field
        lmsfp = math.erf(Cc * (np.log(lmsf / lms0) +
                               Cf * np.log(lmsf / lms0)))  # adaptation field

        # add adaptation field lms temporarily to lmstp for quick calculation
        lmstp = np.vstack((lmsfp, lmstp))

        #-----------------------------------------------------------------------------
        # stage 3: calculate optic nerve signals, lam*, alphp, betp:
        #-----------------------------------------------------------------------------
        lstar, alph, bet = asplit(np.dot(MAab, lmstp.T).T)

        alphp = cga1[0] * alph
        alphp[alph < 0] = cga1[1] * alph[alph < 0]
        betp = cgb1[0] * bet
        betp[bet < 0] = cgb1[1] * bet[bet < 0]

        #-----------------------------------------------------------------------------
        #  stage 4: calculate recoded nerve signals, alphapp, betapp:
        #-----------------------------------------------------------------------------
        alphpp = cga2[0] * (alphp + betp)
        betpp = cgb2[0] * (alphp - betp)

        #-----------------------------------------------------------------------------
        #  stage 5: calculate conscious color perception:
        #-----------------------------------------------------------------------------
        lstar_int = cl_int[0] * (lstar + cl_int[1])
        alph_int = cab_int[0] * (np.cos(cab_int[1] * np.pi / 180.0) * alphpp -
                                 np.sin(cab_int[1] * np.pi / 180.0) * betpp)
        bet_int = cab_int[0] * (np.sin(cab_int[1] * np.pi / 180.0) * alphpp +
                                np.cos(cab_int[1] * np.pi / 180.0) * betpp)
        lstar_out = lstar_int

        #++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
        #  stage 5 continued but SPLIT IN FORWARD AND INVERSE MODES:
        #++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

        #--------------------------------------
        # FORWARD MODE TO PERCEPTUAL SIGNALS:
        #--------------------------------------
        if direction == 'forward':
            if Ccwb is None:
                alph_out = alph_int - cab_out[0]
                bet_out = bet_int - cab_out[1]

            else:
                Ccwb = Ccwb * np.ones((2))
                Ccwb[Ccwb < 0.0] = 0.0
                Ccwb[Ccwb > 1.0] = 1.0

                # white balance shift using adaptation gray background (Yb=20%), with Ccw: degree of adaptation:
                alph_out = alph_int - Ccwb[0] * alph_int[0]
                bet_out = bet_int - Ccwb[1] * bet_int[0]

            # stack together and remove adaptation field from vertical stack
            # camout is an ndarray with perceptual signals:
            camout[i] = np.vstack((lstar_out[1:], alph_out[1:], bet_out[1:])).T

        #--------------------------------------
        # INVERSE MODE FROM PERCEPTUAL SIGNALS:
        #--------------------------------------
        elif direction == 'inverse':

            # stack cognitive pre-adapted adaptation field signals (first on stack) together:
            #labf_int = np.hstack((lstar_int[0],alph_int[0],bet_int[0]))

            # get lstar_out, alph_out & bet_out for data
            #(contains model perceptual signals in inverse mode!!!):
            lstar_out, alph_out, bet_out = asplit(data[i])

            #------------------------------------------------------------------------
            #  Inverse stage 5: undo cortical white-balance:
            #------------------------------------------------------------------------
            if Ccwb is None:
                alph_int = alph_out + cab_out[0]
                bet_int = bet_out + cab_out[1]
            else:
                Ccwb = Ccwb * np.ones((2))
                Ccwb[Ccwb < 0.0] = 0.0
                Ccwb[Ccwb > 1.0] = 1.0

                #  inverse white balance shift using adaptation gray background (Yb=20%), with Ccw: degree of adaptation
                alph_int = alph_out + Ccwb[0] * alph_int[0]
                bet_int = bet_out + Ccwb[1] * bet_int[0]

            alphpp = (1.0 / cab_int[0]) * (
                np.cos(-cab_int[1] * np.pi / 180.0) * alph_int -
                np.sin(-cab_int[1] * np.pi / 180.0) * bet_int)
            betpp = (1.0 / cab_int[0]) * (
                np.sin(-cab_int[1] * np.pi / 180.0) * alph_int +
                np.cos(-cab_int[1] * np.pi / 180.0) * bet_int)
            lstar_int = lstar_out
            lstar = (lstar_int / cl_int[0]) - cl_int[1]

            #---------------------------------------------------------------------------
            #  Inverse stage 4: pre-adapted perceptual signals to recoded nerve signals:
            #---------------------------------------------------------------------------
            alphp = 0.5 * (alphpp / cga2[0] + betpp / cgb2[0]
                           )  # <-- alphpp = (Cga2.*(alphp+betp));
            betp = 0.5 * (alphpp / cga2[0] - betpp / cgb2[0]
                          )  # <-- betpp = (Cgb2.*(alphp-betp));

            #---------------------------------------------------------------------------
            #  Inverse stage 3: recoded nerve signals to optic nerve signals:
            #---------------------------------------------------------------------------
            alph = alphp / cga1[0]
            bet = betp / cgb1[0]
            sa = np.sign(cga1[1])
            sb = np.sign(cgb1[1])
            alph[(sa * alphp) < 0.0] = alphp[(sa * alphp) < 0] / cga1[1]
            bet[(sb * betp) < 0.0] = betp[(sb * betp) < 0] / cgb1[1]
            lab = ajoin((lstar, alph, bet))

            #---------------------------------------------------------------------------
            #  Inverse stage 2: optic nerve signals to cone outputs:
            #---------------------------------------------------------------------------
            lmstp = np.dot(invMAab, lab.T).T
            lmstp[lmstp < -1.0] = -1.0
            lmstp[lmstp > 1.0] = 1.0

            #---------------------------------------------------------------------------
            #  Inverse stage 1: cone outputs to photon rates:
            #---------------------------------------------------------------------------
            lmstp = math.erfinv(lmstp) / Cc - Cf * np.log(lmsf / lms0)
            lmst = np.exp(lmstp) * lms0

            #---------------------------------------------------------------------------
            #  Photon rates to absolute or relative tristimulus values:
            #---------------------------------------------------------------------------
            xyzt = np.dot(invMxyz2lms, lmst.T).T * (_CMF[cieobs]['K'] / 683.0)
            if relative == True:
                xyzt = (100 / Lw) * xyzt

            # store in same named variable as forward mode:
            camout[i] = xyzt

            #++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
            #  END inverse mode
            #++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

    return _massage_output_data_to_original_shape(camout, originalshape)
Beispiel #19
0
def cam_sww16(data, dataw = None, Yb = 20.0, Lw = 400.0, Ccwb = None, relative = True, \
              parameters = None, inputtype = 'xyz', direction = 'forward', \
              cieobs = '2006_10'):
    """
    A simple principled color appearance model based on a mapping 
    of the Munsell color system.
    
    | This function implements the JOSA A (parameters = 'JOSA') published model. 
    
    Args:
        :data: 
            | ndarray with input tristimulus values 
            | or spectral data 
            | or input color appearance correlates
            | Can be of shape: (N [, xM], x 3), whereby: 
            | N refers to samples and M refers to light sources.
            | Note that for spectral input shape is (N x (M+1) x wl) 
        :dataw: 
            | None or ndarray, optional
            | Input tristimulus values or spectral data of white point.
            | None defaults to the use of CIE illuminant C.
        :Yb: 
            | 20.0, optional
            | Luminance factor of background (perfect white diffuser, Yw = 100)
        :Lw:
            | 400.0, optional
            | Luminance (cd/m²) of white point.
        :Ccwb:
            | None,  optional
            | Degree of cognitive adaptation (white point balancing)
            | If None: use [..,..] from parameters dict.
        :relative:
            | True or False, optional
            | True: xyz tristimulus values are relative (Yw = 100)
        :parameters:
            | None or str or dict, optional
            | Dict with model parameters.
            |    - None: defaults to luxpy.cam._CAM_SWW_2016_PARAMETERS['JOSA']
            |    - str: 'best-fit-JOSA' or 'best-fit-all-Munsell'
            |    - dict: user defined model parameters 
            |            (dict should have same structure)
        :inputtype:
            | 'xyz' or 'spd', optional
            | Specifies the type of input: 
            |     tristimulus values or spectral data for the forward mode.
        :direction:
            | 'forward' or 'inverse', optional
            |   -'forward': xyz -> cam_sww_2016
            |   -'inverse': cam_sww_2016 -> xyz 
        :cieobs:
            | '2006_10', optional
            | CMF set to use to perform calculations where spectral data 
              is involved (inputtype == 'spd'; dataw = None)
            | Other options: see luxpy._CMF['types']
    
    Returns:
        :returns: 
            | ndarray with color appearance correlates (:direction: == 'forward')
            |  or 
            | XYZ tristimulus values (:direction: == 'inverse')
    
    Notes:
        | This function implements the JOSA A (parameters = 'JOSA') 
          published model. 
        | With:
        |    1. A correction for the parameter 
        |         in Eq.4 of Fig. 11: 0.952 --> -0.952 
        |         
        |     2. The delta_ac and delta_bc white-balance shifts in Eq. 5e & 5f 
        |         should be: -0.028 & 0.821 
        |  
        |     (cfr. Ccwb = 0.66 in: 
        |         ab_test_out = ab_test_int - Ccwb*ab_gray_adaptation_field_int))
             
    References:
        1. `Smet, K. A. G., Webster, M. A., & Whitehead, L. A. (2016). 
        A simple principled approach for modeling and understanding uniform color metrics. 
        Journal of the Optical Society of America A, 33(3), A319–A331. 
        <https://doi.org/10.1364/JOSAA.33.00A319>`_

    """

    # get model parameters
    args = locals().copy()
    if parameters is None:
        parameters = _CAM_SWW16_PARAMETERS['JOSA']
    if isinstance(parameters, str):
        parameters = _CAM_SWW16_PARAMETERS[parameters]
    parameters = put_args_in_db(
        parameters,
        args)  #overwrite parameters with other (not-None) args input

    #unpack model parameters:
    Cc, Ccwb, Cf, Mxyz2lms, cLMS, cab_int, cab_out, calpha, cbeta, cga1, cga2, cgb1, cgb2, cl_int, clambda, lms0 = [
        parameters[x] for x in sorted(parameters.keys())
    ]

    # setup default adaptation field:
    if (dataw is None):
        dataw = _CIE_ILLUMINANTS['C'].copy()  # get illuminant C
        xyzw = spd_to_xyz(dataw, cieobs=cieobs,
                          relative=False)  # get abs. tristimulus values
        if relative == False:  #input is expected to be absolute
            dataw[1:] = Lw * dataw[
                1:] / xyzw[:, 1:2]  #dataw = Lw*dataw # make absolute
        else:
            dataw = dataw  # make relative (Y=100)
        if inputtype == 'xyz':
            dataw = spd_to_xyz(dataw, cieobs=cieobs, relative=relative)

    # precomputations:
    Mxyz2lms = np.dot(
        np.diag(cLMS),
        math.normalize_3x3_matrix(Mxyz2lms, np.array([[1, 1, 1]]))
    )  # normalize matrix for xyz-> lms conversion to ill. E weighted with cLMS
    invMxyz2lms = np.linalg.inv(Mxyz2lms)
    MAab = np.array([clambda, calpha, cbeta])
    invMAab = np.linalg.inv(MAab)

    #initialize data and camout:
    data = np2d(data).copy(
    )  # stimulus data (can be upto NxMx3 for xyz, or [N x (M+1) x wl] for spd))
    dataw = np2d(dataw).copy(
    )  # white point (can be upto Nx3 for xyz, or [(N+1) x wl] for spd)

    # make axis 1 of dataw have 'same' dimensions as data:
    if (data.ndim == 2):
        data = np.expand_dims(data, axis=1)  # add light source axis 1

    if inputtype == 'xyz':
        if dataw.shape[
                0] == 1:  #make dataw have same lights source dimension size as data
            dataw = np.repeat(dataw, data.shape[1], axis=0)
    else:
        if dataw.shape[0] == 2:
            dataw = np.vstack(
                (dataw[0], np.repeat(dataw[1:], data.shape[1], axis=0)))

    # Flip light source dim to axis 0:
    data = np.transpose(data, axes=(1, 0, 2))

    # Initialize output array:
    dshape = list(data.shape)
    dshape[-1] = 3  # requested number of correlates: l_int, a_int, b_int
    if (inputtype != 'xyz') & (direction == 'forward'):
        dshape[-2] = dshape[
            -2] - 1  # wavelength row doesn't count & only with forward can the input data be spectral
    camout = np.zeros(dshape)
    camout.fill(np.nan)

    # apply forward/inverse model for each row in data:
    for i in range(data.shape[0]):

        # stage 1: calculate photon rates of stimulus and adapting field, lmst & lmsf:
        if (inputtype != 'xyz'):
            if relative == True:
                xyzw_abs = spd_to_xyz(np.vstack((dataw[0], dataw[i + 1])),
                                      cieobs=cieobs,
                                      relative=False)
                dataw[i +
                      1] = Lw * dataw[i + 1] / xyzw_abs[0, 1]  # make absolute
            xyzw = spd_to_xyz(np.vstack((dataw[0], dataw[i + 1])),
                              cieobs=cieobs,
                              relative=False)
            lmsw = 683.0 * np.dot(Mxyz2lms, xyzw.T).T / _CMF[cieobs]['K']
            lmsf = (Yb / 100.0
                    ) * lmsw  # calculate adaptation field and convert to l,m,s
            if (direction == 'forward'):
                if relative == True:
                    data[i, 1:, :] = Lw * data[i, 1:, :] / xyzw_abs[
                        0, 1]  # make absolute
                xyzt = spd_to_xyz(data[i], cieobs=cieobs,
                                  relative=False) / _CMF[cieobs]['K']
                lmst = 683.0 * np.dot(Mxyz2lms, xyzt.T).T  # convert to l,m,s
            else:
                lmst = lmsf  # put lmsf in lmst for inverse-mode

        elif (inputtype == 'xyz'):
            if relative == True:
                dataw[i] = Lw * dataw[i] / 100.0  # make absolute
            lmsw = 683.0 * np.dot(
                Mxyz2lms, dataw[i].T).T / _CMF[cieobs]['K']  # convert to lms
            lmsf = (Yb / 100.0) * lmsw
            if (direction == 'forward'):
                if relative == True:
                    data[i] = Lw * data[i] / 100.0  # make absolute
                lmst = 683.0 * np.dot(
                    Mxyz2lms,
                    data[i].T).T / _CMF[cieobs]['K']  # convert to lms
            else:
                lmst = lmsf  # put lmsf in lmst for inverse-mode

        # stage 2: calculate cone outputs of stimulus lmstp
        lmstp = math.erf(Cc * (np.log(lmst / lms0) + Cf * np.log(lmsf / lms0)))
        lmsfp = math.erf(Cc * (np.log(lmsf / lms0) + Cf * np.log(lmsf / lms0)))
        lmstp = np.vstack(
            (lmsfp, lmstp)
        )  # add adaptation field lms temporarily to lmsp for quick calculation

        # stage 3: calculate optic nerve signals, lam*, alphp, betp:
        lstar, alph, bet = asplit(np.dot(MAab, lmstp.T).T)

        alphp = cga1[0] * alph
        alphp[alph < 0] = cga1[1] * alph[alph < 0]
        betp = cgb1[0] * bet
        betp[bet < 0] = cgb1[1] * bet[bet < 0]

        # stage 4: calculate recoded nerve signals, alphapp, betapp:
        alphpp = cga2[0] * (alphp + betp)
        betpp = cgb2[0] * (alphp - betp)

        # stage 5: calculate conscious color perception:
        lstar_int = cl_int[0] * (lstar + cl_int[1])
        alph_int = cab_int[0] * (np.cos(cab_int[1] * np.pi / 180.0) * alphpp -
                                 np.sin(cab_int[1] * np.pi / 180.0) * betpp)
        bet_int = cab_int[0] * (np.sin(cab_int[1] * np.pi / 180.0) * alphpp +
                                np.cos(cab_int[1] * np.pi / 180.0) * betpp)
        lstar_out = lstar_int

        if direction == 'forward':
            if Ccwb is None:
                alph_out = alph_int - cab_out[0]
                bet_out = bet_int - cab_out[1]
            else:
                Ccwb = Ccwb * np.ones((2))
                Ccwb[Ccwb < 0.0] = 0.0
                Ccwb[Ccwb > 1.0] = 1.0
                alph_out = alph_int - Ccwb[0] * alph_int[
                    0]  # white balance shift using adaptation gray background (Yb=20%), with Ccw: degree of adaptation
                bet_out = bet_int - Ccwb[1] * bet_int[0]

            camout[i] = np.vstack(
                (lstar_out[1:], alph_out[1:], bet_out[1:])
            ).T  # stack together and remove adaptation field from vertical stack
        elif direction == 'inverse':
            labf_int = np.hstack((lstar_int[0], alph_int[0], bet_int[0]))

            # get lstar_out, alph_out & bet_out for data:
            lstar_out, alph_out, bet_out = asplit(data[i])

            # stage 5 inverse:
            # undo cortical white-balance:
            if Ccwb is None:
                alph_int = alph_out + cab_out[0]
                bet_int = bet_out + cab_out[1]
            else:
                Ccwb = Ccwb * np.ones((2))
                Ccwb[Ccwb < 0.0] = 0.0
                Ccwb[Ccwb > 1.0] = 1.0
                alph_int = alph_out + Ccwb[0] * alph_int[
                    0]  #  inverse white balance shift using adaptation gray background (Yb=20%), with Ccw: degree of adaptation
                bet_int = bet_out + Ccwb[1] * bet_int[0]

            lstar_int = lstar_out
            alphpp = (1.0 / cab_int[0]) * (
                np.cos(-cab_int[1] * np.pi / 180.0) * alph_int -
                np.sin(-cab_int[1] * np.pi / 180.0) * bet_int)
            betpp = (1.0 / cab_int[0]) * (
                np.sin(-cab_int[1] * np.pi / 180.0) * alph_int +
                np.cos(-cab_int[1] * np.pi / 180.0) * bet_int)
            lstar_int = lstar_out
            lstar = (lstar_int / cl_int[0]) - cl_int[1]

            # stage 4 inverse:
            alphp = 0.5 * (alphpp / cga2[0] + betpp / cgb2[0]
                           )  # <-- alphpp = (Cga2.*(alphp+betp));
            betp = 0.5 * (alphpp / cga2[0] - betpp / cgb2[0]
                          )  # <-- betpp = (Cgb2.*(alphp-betp));

            # stage 3 invers:
            alph = alphp / cga1[0]
            bet = betp / cgb1[0]
            sa = np.sign(cga1[1])
            sb = np.sign(cgb1[1])
            alph[(sa * alphp) < 0.0] = alphp[(sa * alphp) < 0] / cga1[1]
            bet[(sb * betp) < 0.0] = betp[(sb * betp) < 0] / cgb1[1]
            lab = ajoin((lstar, alph, bet))

            # stage 2 inverse:
            lmstp = np.dot(invMAab, lab.T).T
            lmstp[lmstp < -1.0] = -1.0
            lmstp[lmstp > 1.0] = 1.0

            lmstp = math.erfinv(lmstp) / Cc - Cf * np.log(lmsf / lms0)
            lmst = np.exp(lmstp) * lms0

            # stage 1 inverse:
            xyzt = np.dot(invMxyz2lms, lmst.T).T

            if relative == True:
                xyzt = (100.0 / Lw) * xyzt

            camout[i] = xyzt

#    if flipaxis0and1 == True: # loop over shortest dim.
#        camout = np.transpose(camout, axes = (1,0,2))

# Flip light source dim back to axis 1:
    camout = np.transpose(camout, axes=(1, 0, 2))

    if camout.shape[0] == 1:
        camout = np.squeeze(camout, axis=0)

    return camout
Beispiel #20
0
def DE2000(xyzt,
           xyzr,
           dtype='xyz',
           DEtype='jab',
           avg=None,
           avg_axis=0,
           out='DEi',
           xyzwt=None,
           xyzwr=None,
           KLCH=None):
    """
    Calculate DE2000 color difference.
    
    Args:
        :xyzt: 
            | ndarray with tristimulus values of test data.
        :xyzr:
            | ndarray with tristimulus values of reference data.
        :dtype:
            | 'xyz' or 'lab', optional
            | Specifies data type in :xyzt: and :xyzr:.
        :xyzwt:
            | None or ndarray, optional
            |   White point tristimulus values of test data
            |   None defaults to the one set in lx.xyz_to_lab()
        :xyzwr:
            | None or ndarray, optional
            |    Whitepoint tristimulus values of reference data
            |    None defaults to the one set in lx.xyz_to_lab()
        :DEtype:
            | 'jab' or str, optional
            | Options: 
            |    - 'jab' : calculates full color difference over all 3 dimensions.
            |    - 'ab'  : calculates chromaticity difference.
            |    - 'j'   : calculates lightness or brightness difference 
            |             (depending on :outin:).
            |    - 'j,ab': calculates both 'j' and 'ab' options 
            |              and returns them as a tuple.
        :KLCH: 
            | None, optional
            | Weigths for L, C, H 
            | None: default to [1,1,1] 
        :avg:
            | None, optional
            | None: don't calculate average DE, 
            |       otherwise use function handle in :avg:.
        :avg_axis:
            | axis to calculate average over, optional
        :out: 
            | 'DEi' or str, optional
            | Requested output.
        
    Note:
        For the other input arguments, see specific color space used.
        
    Returns:
        :returns: 
            | ndarray with DEi [, DEa] or other as specified by :out:
            
    References:
        1. `Sharma, G., Wu, W., & Dalal, E. N. (2005). 
        The CIEDE2000 color‐difference formula: Implementation notes, 
        supplementary test data, and mathematical observations. 
        Color Research & Application, 30(1), 21–30. 
        <https://doi.org/10.1002/col.20070>`_
    """

    if KLCH is None:
        KLCH = [1, 1, 1]

    if dtype == 'xyz':
        labt = xyz_to_lab(xyzt, xyzw=xyzwt)
        labr = xyz_to_lab(xyzr, xyzw=xyzwr)
    else:
        labt = xyzt
        labr = xyzr

    Lt = labt[..., 0:1]
    at = labt[..., 1:2]
    bt = labt[..., 2:3]
    Ct = np.sqrt(at**2 + bt**2)
    #ht = cam.hue_angle(at,bt,htype = 'rad')

    Lr = labr[..., 0:1]
    ar = labr[..., 1:2]
    br = labr[..., 2:3]
    Cr = np.sqrt(ar**2 + br**2)
    #hr = cam.hue_angle(at,bt,htype = 'rad')

    # Step 1:
    Cavg = (Ct + Cr) / 2
    G = 0.5 * (1 - np.sqrt((Cavg**7.0) / ((Cavg**7.0) + (25.0**7))))
    apt = (1 + G) * at
    apr = (1 + G) * ar

    Cpt = np.sqrt(apt**2 + bt**2)
    Cpr = np.sqrt(apr**2 + br**2)
    Cpprod = Cpt * Cpr

    hpt = cam.hue_angle(apt, bt, htype='deg')
    hpr = cam.hue_angle(apr, br, htype='deg')
    hpt[(apt == 0) * (bt == 0)] = 0
    hpr[(apr == 0) * (br == 0)] = 0

    # Step 2:
    dL = np.abs(Lr - Lt)
    dCp = np.abs(Cpr - Cpt)
    dhp_ = hpr - hpt

    dhp = dhp_.copy()
    dhp[np.where(np.abs(dhp_) > 180)] = dhp[np.where(np.abs(dhp_) > 180)] - 360
    dhp[np.where(
        np.abs(dhp_) < -180)] = dhp[np.where(np.abs(dhp_) < -180)] + 360
    dhp[np.where(Cpprod == 0)] = 0

    #dH = 2*np.sqrt(Cpprod)*np.sin(dhp/2*np.pi/180)
    dH = deltaH(dhp, Cpprod, htype='deg')

    # Step 3:
    Lp = (Lr + Lt) / 2
    Cp = (Cpr + Cpt) / 2

    hps = hpt + hpr
    hp = (hpt + hpr) / 2
    hp[np.where((np.abs(dhp_) > 180)
                & (hps < 360))] = hp[np.where((np.abs(dhp_) > 180)
                                              & (hps < 360))] + 180
    hp[np.where((np.abs(dhp_) > 180)
                & (hps >= 360))] = hp[np.where((np.abs(dhp_) > 180)
                                               & (hps >= 360))] - 180
    hp[np.where(Cpprod == 0)] = 0

    T = 1 - 0.17*np.cos((hp - 30)*np.pi/180) + 0.24*np.cos(2*hp*np.pi/180) +\
        0.32*np.cos((3*hp + 6)*np.pi/180) - 0.20*np.cos((4*hp - 63)*np.pi/180)
    dtheta = 30 * np.exp(-((hp - 275) / 25)**2)
    RC = 2 * np.sqrt((Cp**7) / ((Cp**7) + (25**7)))
    SL = 1 + ((0.015 * (Lp - 50)**2) / np.sqrt(20 + (Lp - 50)**2))
    SC = 1 + 0.045 * Cp
    SH = 1 + 0.015 * Cp * T
    RT = -np.sin(2 * dtheta * np.pi / 180) * RC

    kL, kC, kH = KLCH

    DEi = ((dL / (kL * SL))**2, (dCp / (kC * SC))**2 + (dH / (kH * SH))**2 +
           RT * (dCp / (kC * SC)) * (dH / (kH * SH)))

    return _process_DEi(DEi,
                        DEtype=DEtype,
                        avg=avg,
                        avg_axis=avg_axis,
                        out=out)
Beispiel #21
0
def run(data,
        xyzw=None,
        outin='J,aM,bM',
        cieobs=_CIEOBS,
        conditions=None,
        forward=True,
        mcat='cat02',
        **kwargs):
    """ 
    Run the Jz,az,bz based color appearance model in forward or backward modes.
    
    Args:
        :data:
            | ndarray with relative sample xyz values (forward mode) or J'a'b' coordinates (inverse mode)
        :xyzw:
            | ndarray with relative white point tristimulus values
            | None defaults to D65
        :cieobs:
            | _CIEOBS, optional
            | CMF set to use when calculating :xyzw: if this is None.
        :conditions:
            | None, optional
            | Dictionary with viewing condition parameters for:
            |       La, Yb, D and surround.
            |  surround can contain:
            |      - str (options: 'avg','dim','dark') or 
            |      - dict with keys c, Nc, F.
            | None results in:
            |   {'La':100, 'Yb':20, 'D':1, 'surround':'avg'}
        :forward:
            | True, optional
            | If True: run in CAM in forward mode, else: inverse mode.
        :outin:
            | 'J,aM,bM', optional
            | String with requested output (e.g. "J,aM,bM,M,h") [Forward mode]
            | - attributes: 'J': lightness,'Q': brightness,
            |               'M': colorfulness,'C': chroma, 's': saturation,
            |               'h': hue angle, 'H': hue quadrature/composition,
            |               'Wz': whiteness, 'Kz':blackness, 'Sz': saturation, 'V': vividness
            | String with inputs in data [inverse mode]. 
            | Input must have data.shape[-1]==3 and last dim of data must have 
            | the following structure for inverse mode: 
            |  * data[...,0] = J or Q,
            |  * data[...,1:] = (aM,bM) or (aC,bC) or (aS,bS) or (M,h) or (C, h), ...
        :mcat:
            | 'cat02', optional
            | Specifies CAT sensor space.
            | - options:
            |    - None defaults to 'cat02'
            |    - str: see see luxpy.cat._MCATS.keys() for options 
            |         (details on type, ?luxpy.cat)
            |    - ndarray: matrix with sensor primaries
    Returns:
        :camout: 
            | ndarray with color appearance correlates (forward mode) 
            |  or 
            | XYZ tristimulus values (inverse mode)
     
    References:
        1. `Safdar, M., Cui, G., Kim,Y. J., and  Luo, M. R.(2017).
        Perceptually uniform color space for image signals including high dynamic range and wide gamut.
        Opt. Express, vol. 25, no. 13, pp. 15131–15151, Jun. 2017. 
        <https://www.opticsexpress.org/abstract.cfm?URI=oe-25-13-15131>`_
    
        2. `Safdar, M., Hardeberg, J., Cui, G., Kim, Y. J., and Luo, M. R.(2018).
        A Colour Appearance Model based on Jzazbz Colour Space, 
        26th Color and Imaging Conference (2018), Vancouver, Canada, November 12-16, 2018, pp96-101.
        <https://doi.org/10.2352/ISSN.2169-2629.2018.26.96>`_
        
        3. Safdar, M., Hardeberg, J.Y., Luo, M.R. (2021) 
        "ZCAM, a psychophysical model for colour appearance prediction", 
        Optics Express.
    """
    print(
        "WARNING: Z-CAM is as yet unpublished and under development, so parameter values might change! (07 Oct, 2020"
    )
    outin = outin.split(',') if isinstance(outin, str) else outin

    #--------------------------------------------
    # Get condition parameters:
    if conditions is None:
        conditions = _DEFAULT_CONDITIONS

    D, Dtype, La, Yb, surround = (conditions[x]
                                  for x in sorted(conditions.keys()))

    surround_parameters = _SURROUND_PARAMETERS
    if isinstance(surround, str):
        surround = surround_parameters[conditions['surround']]
    F, FLL, Nc, c = [surround[x] for x in sorted(surround.keys())]

    # Define cone/chromatic adaptation sensor space:
    if (mcat is None):
        mcat = cat._MCATS['cat02']
    elif isinstance(mcat, str):
        mcat = cat._MCATS[mcat]
    invmcat = np.linalg.inv(mcat)

    #--------------------------------------------
    # Get white point of D65 fro chromatic adaptation transform (CAT)
    xyzw_d65 = np.array([[
        9.5047e+01, 1.0000e+02, 1.08883e+02
    ]]) if cieobs == '1931_2' else spd_to_xyz(_CIE_D65, cieobs=cieobs)

    #--------------------------------------------
    # Get default white point:
    if xyzw is None:
        xyzw = xyzw_d65.copy()

    #--------------------------------------------
    # calculate condition dependent parameters:
    Yw = xyzw[..., 1].T
    FL = 0.171 * La**(1 / 3) * (1 - np.exp(-48 / 9 * La)
                                )  # luminance adaptation factor
    n = Yb / Yw
    Fb = 1.045 + 1.46 * n**0.5  # background factor
    Fs = c  # surround factor

    #--------------------------------------------
    # Calculate degree of chromatic adaptation:
    if D is None:
        D = F * (1.0 - (1.0 / 3.6) * np.exp((-La - 42.0) / 92.0))

    #===================================================================
    # WHITE POINT transformations (common to forward and inverse modes):

    #--------------------------------------------
    # Apply CAT to white point:
    xyzwc = cat.apply_vonkries2(xyzw,
                                xyzw1=xyzw,
                                xyzw2=xyzw_d65,
                                D=D,
                                mcat=mcat,
                                invmcat=invmcat,
                                use_Yw=True)

    #--------------------------------------------
    # Get Iz,az,bz coordinates:
    iabzw = xyz_to_jabz(xyzwc, ztype='iabz', use_zcam_parameters=True)

    # Get brightness of white point:
    Qw = 925 * iabzw[..., 0]**1.17 * (FL / Fs)**0.5

    #===================================================================
    # STIMULUS transformations:

    #--------------------------------------------
    # massage shape of data for broadcasting:
    original_ndim = data.ndim
    if data.ndim == 2: data = data[:, None]

    if forward:
        # Apply CAT to D65:
        xyzc = cat.apply_vonkries2(data,
                                   xyzw1=xyzw,
                                   xyzw2=xyzw_d65,
                                   D=D,
                                   mcat=mcat,
                                   invmcat=invmcat,
                                   use_Yw=True)

        # Get Iz,az,bz coordinates:
        iabz = xyz_to_jabz(xyzc, ztype='iabz', use_zcam_parameters=True)

        #--------------------------------------------
        # calculate hue h and eccentricity factor, et:
        h = hue_angle(iabz[..., 1], iabz[..., 2], htype='deg')
        ez = 1.014 + np.cos((h + 89.038) * np.pi / 180)
        # ez = 1.014 + np.cos((h*np.pi/180) + 89.038)

        #--------------------------------------------
        # calculate Hue quadrature (if requested in 'out'):
        if 'H' in outin:
            H = hue_quadrature(h, unique_hue_data=_UNIQUE_HUE_DATA)
        else:
            H = None

        #--------------------------------------------
        # calculate brightness, Q:
        Q = 925 * iabz[..., 0]**1.17 * (FL / Fs)**0.5

        #--------------------------------------------
        # calculate lightness, J:
        J = 100.0 * (Q / Qw)**(Fs * Fb)

        #--------------------------------------------
        # calculate colorfulness, M:
        M = 50 * ((iabz[..., 1]**2.0 + iabz[..., 2]**2.0)**
                  0.368) * (ez**0.068) * (FL**0.2) / ((iabzw[..., 0]**2.52) *
                                                      (Fb**0.3))

        #--------------------------------------------
        # calculate chroma, C:
        C = 100 * M / Qw

        #--------------------------------------------
        # calculate saturation, s:
        s = 100.0 * (M / Q)
        S = s  # make extra variable, jsut in case 'S' is called

        #--------------------------------------------
        # calculate whiteness, W:
        if ('Wz' in outin) | ('aWz' in outin):
            Wz = 100 - 0.68 * ((100 - J)**2 + C**2)**0.5

        #--------------------------------------------
        # calculate blackness, K:
        if ('Kz' in outin) | ('aKz' in outin):
            Kz = 100 - 0.82 * (J**2 + C**2)**0.5

        #--------------------------------------------
        # calculate saturation, S:
        if ('Sz' in outin) | ('aSz' in outin):
            Sz = 8 + 0.5 * ((J - 55)**2 + C**2)**0.5

        #--------------------------------------------
        # calculate vividness, V:
        if ('Vz' in outin) | ('aVz' in outin):
            Sz = 8 + 0.4 * ((J - 70)**2 + C**2)**0.5

        #--------------------------------------------
        # calculate cartesian coordinates:
        if ('aS' in outin):
            aS = s * np.cos(h * np.pi / 180.0)
            bS = s * np.sin(h * np.pi / 180.0)

        if ('aC' in outin):
            aC = C * np.cos(h * np.pi / 180.0)
            bC = C * np.sin(h * np.pi / 180.0)

        if ('aM' in outin):
            aM = M * np.cos(h * np.pi / 180.0)
            bM = M * np.sin(h * np.pi / 180.0)

        #--------------------------------------------
        if outin != ['J', 'aM', 'bM']:
            camout = eval('ajoin((' + ','.join(outin) + '))')
        else:
            camout = ajoin((J, aM, bM))

        if (camout.shape[1] == 1) & (original_ndim < 3):
            camout = camout[:, 0, :]

        return camout

    elif forward == False:
        #--------------------------------------------
        # Get Lightness J and brightness Q from data:
        if ('J' in outin[0]):
            J = data[..., 0].copy()
            Q = Qw * (J / 100)**(1 / (Fs * Fb))
        elif ('Q' in outin[0]):
            Q = data[..., 0].copy()
            J = 100.0 * (Q / Qw)**(Fs * Fb)
        else:
            raise Exception(
                'No lightness or brightness values in data[...,0]. Inverse CAM-transform not possible!'
            )

        #--------------------------------------------
        # calculate achromatic signal, Iz:
        Iz = (Qw / 925 * ((J / 100)**(1 / (Fs * Fb))) * (Fs / FL)**0.5)**(1 /
                                                                          1.17)

        #--------------------------------------------
        if 'a' in outin[1]:
            # calculate hue h:
            h = hue_angle(data[..., 1], data[..., 2], htype='deg')

            #--------------------------------------------
            # calculate Colorfulness M or Chroma C or Saturation s from a,b:
            MCs = (data[..., 1]**2.0 + data[..., 2]**2.0)**0.5
        else:
            h = data[..., 2]
            MCs = data[..., 1]

        if ('aS' in outin) | ('S' in outin):
            Q = Qw * (J / 100)**(1 / (Fs * Fb))
            M = Q * (MCs / 100.0)
            C = 100 * M / Qw

        if ('aM' in outin) | ('M' in outin):
            C = 100 * MCs / Qw

        if ('aC' in outin) | ('C' in outin):  # convert C to M:
            C = MCs

        if ('Wz' in outin) | ('aWz' in outin):  #whiteness
            C = ((100 / 68 * (100 - MCs))**2 - (J - 100)**2)**0.5

        if ('Kz' in outin) | ('aKz' in outin):  # blackness
            C = ((100 / 82 * (100 - MCs))**2 - (J)**2)**0.5

        if ('Sz' in outin) | ('aSz' in outin):  # saturation
            C = ((10 / 5 * (MCs - 8))**2 - (J - 55)**2)**0.5

        if ('Vz' in outin) | ('aVz' in outin):  # vividness
            C = ((10 / 4 * (MCs - 8))**2 - (J - 70)**2)**0.5

        #--------------------------------------------
        # Calculate colorfulness, M:
        M = Qw * C / 100

        #--------------------------------------------
        # calculate eccentricity factor, et:
        # ez = 1.014 + np.cos(h*np.pi/180 + 89.038)
        ez = 1.014 + np.cos((h + 89.038) * np.pi / 180)

        #--------------------------------------------
        # calculate t (=sqrt(a**2+b**2)) from M:
        t = (((M / 50) * (iabzw[..., 0]**2.52) * (Fb**0.3)) /
             ((ez**0.068) * (FL**0.2)))**(1 / 0.368 / 2)

        #--------------------------------------------
        # Calculate az, bz:
        az = t * np.cos(h * np.pi / 180)
        bz = t * np.sin(h * np.pi / 180)

        #--------------------------------------------
        # join values and convert to xyz:
        xyzc = jabz_to_xyz(ajoin((Iz, az, bz)),
                           ztype='iabz',
                           use_zcam_parameters=True)

        #-------------------------------------------
        # Apply CAT from D65:
        xyz = cat.apply_vonkries2(xyzc,
                                  xyzw_d65,
                                  xyzw,
                                  D=D,
                                  mcat=mcat,
                                  invmcat=invmcat,
                                  use_Yw=True)

        return xyz