コード例 #1
0
def lmsb_to_xyzb(lms, fieldsize=10, out='XYZ', allow_negative_values=False):
    """
    Convert from LMS cone fundamentals to XYZ color matching functions.
    
    Args:
        :lms: 
            | ndarray with lms cone fundamentals, optional
        :fieldsize: 
            | fieldsize in degrees, optional
            | Defaults to 10°.
        :out: 
            | 'xyz' or str, optional
            | Determines output.
        :allow_negative_values:
            | False, optional
            | XYZ color matching functions should not have negative values.
            |     If False: xyz[xyz<0] = 0.
    Returns:
        :returns:
            | LMS 
            |   - LMS: ndarray with population XYZ color matching functions.    
    
    Note: 
        For intermediate field sizes (2° < fieldsize < 10°) a conversion matrix
        is calculated by linear interpolation between 
        the _INDVCMF_M_2d and _INDVCMF_M_10d matrices.
    """
    wl = lms[None, 0]  #store wavelengths
    M = get_lms_to_xyz_matrix(fieldsize=fieldsize)
    if lms.ndim > 2:
        xyz = np.vstack((wl, math.dot23(M, lms[1:, ...], keepdims=False)))
    else:
        xyz = np.vstack((wl, np.dot(M, lms[1:, ...])))
    if allow_negative_values == False:
        xyz[np.where(xyz < 0)] = 0
    return xyz
コード例 #2
0
def _xyz_to_jab_cam02ucs(xyz, xyzw, conditions = None):
    
    #--------------------------------------------
    # Get/ set conditions parameters:
    if conditions is not None:
        surround_parameters =  {'surrounds': ['avg', 'dim', 'dark'], 
                                'avg' : {'c':0.69, 'Nc':1.0, 'F':1.0,'FLL': 1.0}, 
                                'dim' : {'c':0.59, 'Nc':0.9, 'F':0.9,'FLL':1.0} ,
                                'dark' : {'c':0.525, 'Nc':0.8, 'F':0.8,'FLL':1.0}}
        La = conditions['La']
        Yb = conditions['Yb']
        D = conditions['D']
        surround = conditions['surround']
        if isinstance(surround, str):
            surround = surround_parameters[conditions['surround']]
        F, FLL, Nc, c = [surround[x] for x in sorted(surround.keys())]
    else:
        # set defaults:
        La, Yb, D, F, FLL, Nc, c = 100, 20, 1, 1, 1, 1, 0.69
        
    #--------------------------------------------
    # Define sensor space and cat matrices:        
    mhpe = np.array([[0.38971,0.68898,-0.07868],
                     [-0.22981,1.1834,0.04641],
                     [0.0,0.0,1.0]]) # Hunt-Pointer-Estevez sensors (cone fundamentals)
    
    mcat = np.array([[0.7328, 0.4296, -0.1624],
                       [ -0.7036, 1.6975,  0.0061],
                       [ 0.0030, 0.0136,  0.9834]]) # CAT02 sensor space
    
    #--------------------------------------------
    # pre-calculate some matrices:
    invmcat = np.linalg.inv(mcat)
    mhpe_x_invmcat = np.dot(mhpe,invmcat)
    
    #--------------------------------------------
    # calculate condition dependent parameters:
    Yw = xyzw[...,1:2].T
    k = 1.0 / (5.0*La + 1.0)
    FL = 0.2*(k**4.0)*(5.0*La) + 0.1*((1.0 - k**4.0)**2.0)*((5.0*La)**(1.0/3.0)) # luminance adaptation factor
    n = Yb/Yw 
    Nbb = 0.725*(1/n)**0.2   
    Ncb = Nbb
    z = 1.48 + FLL*n**0.5
    
    if D is None:
        D = F*(1.0-(1.0/3.6)*np.exp((-La-42.0)/92.0))
        
    #--------------------------------------------
    # transform from xyz, xyzw to cat sensor space:
    rgb = math.dot23(mcat, xyz.T)
    rgbw = mcat @ xyzw.T
    
    #--------------------------------------------  
    # apply von Kries cat:
    rgbc = ((D*Yw/rgbw)[...,None] + (1 - D))*rgb # factor 100 from ciecam02 is replaced with Yw[i] in ciecam16, but see 'note' in Fairchild's "Color Appearance Models" (p291 ni 3ed.)
    rgbwc = ((D*Yw/rgbw) + (1 - D))*rgbw # factor 100 from ciecam02 is replaced with Yw[i] in ciecam16, but see 'note' in Fairchild's "Color Appearance Models" (p291 ni 3ed.)
 
    #--------------------------------------------
    # convert from cat02 sensor space to cone sensors (hpe):
    rgbp = math.dot23(mhpe_x_invmcat,rgbc).T
    rgbwp = (mhpe_x_invmcat @ rgbwc).T

    #--------------------------------------------
    # apply Naka_rushton repsonse compression:
    naka_rushton = lambda x: 400*x**0.42/(x**0.42 + 27.13) + 0.1
    
    rgbpa = naka_rushton(FL*rgbp/100.0)
    p = np.where(rgbp<0)
    rgbpa[p] = 0.1 - (naka_rushton(FL*np.abs(rgbp[p])/100.0) - 0.1)
    
    rgbwpa = naka_rushton(FL*rgbwp/100.0)
    pw = np.where(rgbwp<0)
    rgbwpa[pw] = 0.1 - (naka_rushton(FL*np.abs(rgbwp[pw])/100.0) - 0.1)

    #--------------------------------------------
    # Calculate achromatic signal:
    A  =  (2.0*rgbpa[...,0] + rgbpa[...,1] + (1.0/20.0)*rgbpa[...,2] - 0.305)*Nbb
    Aw =  (2.0*rgbwpa[...,0] + rgbwpa[...,1] + (1.0/20.0)*rgbwpa[...,2] - 0.305)*Nbb
    
    #--------------------------------------------
    # calculate initial opponent channels:
    a = rgbpa[...,0] - 12.0*rgbpa[...,1]/11.0 + rgbpa[...,2]/11.0
    b = (1.0/9.0)*(rgbpa[...,0] + rgbpa[...,1] - 2.0*rgbpa[...,2])
        
    #--------------------------------------------
    # calculate hue h and eccentricity factor, et:
    h = np.arctan2(b,a)
    et = (1.0/4.0)*(np.cos(h + 2.0) + 3.8)
    
    #--------------------------------------------
    # calculate lightness, J:
    J = 100.0* (A / Aw)**(c*z)
    
    #--------------------------------------------
    # calculate chroma, C:
    t = ((50000.0/13.0)*Nc*Ncb*et*((a**2.0 + b**2.0)**0.5)) / (rgbpa[...,0] + rgbpa[...,1] + (21.0/20.0*rgbpa[...,2]))
    C = (t**0.9)*((J/100.0)**0.5) * (1.64 - 0.29**n)**0.73
    
    #--------------------------------------------  
    # Calculate colorfulness, M:
    M = C*FL**0.25
        
    #--------------------------------------------
    # convert to cam02ucs J', aM', bM':
    KL, c1, c2 =  1.0, 0.007, 0.0228
    Jp = (1.0 + 100.0*c1)*J / (1.0 + c1*J)
    Mp = (1.0/c2) * np.log(1.0 + c2*M)
    aMp = Mp * np.cos(h)
    bMp = Mp * np.sin(h)
    
    return np.dstack((Jp,aMp,bMp))
コード例 #3
0
def run(data,
        xyzw=_DEFAULT_WHITE_POINT,
        Yw=None,
        outin='J,aM,bM',
        conditions=None,
        forward=True,
        yellowbluepurplecorrect=False,
        mcat='cat02'):
    """ 
    Run CIECAM02 color appearance model in forward or backward modes.
    
    Args:
        :data:
            | ndarray with relative sample xyz values (forward mode) or J'a'b' coordinates (inverse mode)
        :xyzw:
            | ndarray with relative white point tristimulus values 
        :Yw: 
            | None, optional
            | Luminance factor of white point.
            | If None: xyz (in data) and xyzw are entered as relative tristimulus values 
            |          (normalized to Yw = 100). 
            | If not None: input tristimulus are absolute and Yw is used to
            |              rescale the absolute values to relative ones 
            |              (relative to a reference perfect white diffuser 
            |               with Ywr = 100). 
            | Yw can be < 100 for e.g. paper as white point. If Yw is None, it 
            | is assumed that the relative Y-tristimulus value in xyzw 
            | represents the luminance factor Yw.
        :conditions:
            | None, optional
            | Dictionary with viewing condition parameters for:
            |       La, Yb, D and surround.
            |  surround can contain:
            |      - str (options: 'avg','dim','dark') or 
            |      - dict with keys c, Nc, F.
            | None results in:
            |   {'La':100, 'Yb':20, 'D':1, 'surround':'avg'}
        :forward:
            | True, optional
            | If True: run in CAM in forward mode, else: inverse mode.
        :outin:
            | 'J,aM,bM', optional
            | String with requested output (e.g. "J,aM,bM,M,h") [Forward mode]
            | - attributes: 'J': lightness,'Q': brightness,
            |               'M': colorfulness,'C': chroma, 's': saturation,
            |               'h': hue angle, 'H': hue quadrature/composition,
            | String with inputs in data [inverse mode]. 
            | Input must have data.shape[-1]==3 and last dim of data must have 
            | the following structure for inverse mode: 
            |  * data[...,0] = J or Q,
            |  * data[...,1:] = (aM,bM) or (aC,bC) or (aS,bS) or (M,h) or (C, h), ...
        :yellowbluepurplecorrect:
            | False, optional
            | If False: don't correct for yellow-blue and purple problems in ciecam02. 
            | If 'brill-suss': 
            |       for yellow-blue problem, see: 
            |          - Brill [Color Res Appl, 2006; 31, 142-145] and 
            |          - Brill and Süsstrunk [Color Res Appl, 2008; 33, 424-426] 
            | If 'jiang-luo': 
            |       for yellow-blue problem + purple line problem, see:
            |          - Jiang, Jun et al. [Color Res Appl 2015: 40(5), 491-503] 
        :mcat:
            | 'cat02', optional
            | Specifies CAT sensor space.
            | - options:
            |    - None defaults to 'cat02' 
            |         (others e.g. 'cat02-bs', 'cat02-jiang',
            |         all trying to correct gamut problems of original cat02 matrix)
            |    - str: see see luxpy.cat._MCATS.keys() for options 
            |         (details on type, ?luxpy.cat)
            |    - ndarray: matrix with sensor primaries
    Returns:
        :camout: 
            | ndarray with color appearance correlates (forward mode) 
            |  or 
            | XYZ tristimulus values (inverse mode)
        
    References:
        1. `N. Moroney, M. D. Fairchild, R. W. G. Hunt, C. Li, M. R. Luo, and T. Newman, (2002), 
        "The CIECAM02 color appearance model,” 
        IS&T/SID Tenth Color Imaging Conference. p. 23, 2002.
        <http://rit-mcsl.org/fairchild/PDFs/PRO19.pdf>`_
    """
    outin = outin.split(',') if isinstance(outin, str) else outin

    #--------------------------------------------
    # Get condition parameters:
    if conditions is None:
        conditions = _DEFAULT_CONDITIONS
    D, Dtype, La, Yb, surround = (conditions[x]
                                  for x in sorted(conditions.keys()))

    surround_parameters = _SURROUND_PARAMETERS
    if isinstance(surround, str):
        surround = surround_parameters[conditions['surround']]
    F, FLL, Nc, c = [surround[x] for x in sorted(surround.keys())]

    #--------------------------------------------
    # Define sensor space and cat matrices:
    # Hunt-Pointer-Estevez sensors (cone fundamentals)
    mhpe = cat._MCATS['hpe']

    # chromatic adaptation sensors:
    if (mcat is None) | (mcat == 'cat02'):
        mcat = cat._MCATS['cat02']
        if yellowbluepurplecorrect == 'brill-suss':
            mcat = cat._MCATS[
                'cat02-bs']  # for yellow-blue problem, Brill [Color Res Appl 2006;31:142-145] and Brill and Süsstrunk [Color Res Appl 2008;33:424-426]
        elif yellowbluepurplecorrect == 'jiang-luo':
            mcat = cat._MCATS[
                'cat02-jiang-luo']  # for yellow-blue problem + purple line problem
    elif isinstance(mcat, str):
        mcat = cat._MCATS[mcat]

    #--------------------------------------------
    # pre-calculate some matrices:
    invmcat = np.linalg.inv(mcat)
    mhpe_x_invmcat = np.dot(mhpe, invmcat)
    if not forward: mcat_x_invmhpe = np.dot(mcat, np.linalg.inv(mhpe))

    #--------------------------------------------
    # Set Yw:
    if Yw is not None:
        Yw = (Yw * np.ones_like(xyzw2[..., 1:2]).T)
    else:
        Yw = xyzw[..., 1:2].T

    #--------------------------------------------
    # calculate condition dependent parameters:
    k = 1.0 / (5.0 * La + 1.0)
    FL = 0.2 * (k**4.0) * (5.0 * La) + 0.1 * ((1.0 - k**4.0)**2.0) * (
        (5.0 * La)**(1.0 / 3.0))  # luminance adaptation factor
    n = Yb / Yw
    Nbb = 0.725 * (1 / n)**0.2
    Ncb = Nbb
    z = 1.48 + FLL * n**0.5
    yw = xyzw[..., 1:2].T  # original Y in xyzw (pre-transposed)

    #--------------------------------------------
    # Calculate degree of chromatic adaptation:
    if D is None:
        D = F * (1.0 - (1.0 / 3.6) * np.exp((-La - 42.0) / 92.0))

    #===================================================================
    # WHITE POINT transformations (common to forward and inverse modes):

    #--------------------------------------------
    # Normalize white point (keep transpose for next step):
    xyzw = Yw * xyzw.T / yw

    #--------------------------------------------
    # transform from xyzw to cat sensor space:
    rgbw = math.dot23(mcat, xyzw)

    #--------------------------------------------
    # apply von Kries cat:
    rgbwc = (
        (D * Yw / rgbw) + (1 - D)
    ) * rgbw  # factor 100 from ciecam02 is replaced with Yw[i] in ciecam16, but see 'note' in Fairchild's "Color Appearance Models" (p291 ni 3ed.)

    #--------------------------------------------
    # convert from cat02 sensor space to cone sensors (hpe):
    rgbwp = math.dot23(mhpe_x_invmcat, rgbwc).T

    #--------------------------------------------
    # apply Naka_rushton repsonse compression to white:
    NK = lambda x, forward: naka_rushton(x,
                                         scaling=400,
                                         n=0.42,
                                         sig=27.13**(1 / 0.42),
                                         noise=0.1,
                                         forward=forward)

    pw = np.where(rgbwp < 0)

    # if requested apply yellow-blue correction:
    if (yellowbluepurplecorrect == 'brill-suss'
        ):  # Brill & Susstrunck approach, for purple line problem
        rgbwp[pw] = 0.0
    rgbwpa = NK(FL * rgbwp / 100.0, True)
    rgbwpa[pw] = 0.1 - (NK(FL * np.abs(rgbwp[pw]) / 100.0, True) - 0.1)

    #--------------------------------------------
    # Calculate achromatic signal of white:
    Aw = (2.0 * rgbwpa[..., 0] + rgbwpa[..., 1] +
          (1.0 / 20.0) * rgbwpa[..., 2] - 0.305) * Nbb

    # massage shape of data for broadcasting:
    original_ndim = data.ndim
    if data.ndim == 2: data = data[:, None]

    #===================================================================
    # STIMULUS transformations
    if forward:

        #--------------------------------------------
        # Normalize xyz (keep transpose for matrix multiplication in next step):
        xyz = (Yw / yw)[..., None] * data.T

        #--------------------------------------------
        # transform from xyz to cat sensor space:
        rgb = math.dot23(mcat, xyz)

        #--------------------------------------------
        # apply von Kries cat:
        rgbc = (
            (D * Yw / rgbw)[..., None] + (1 - D)
        ) * rgb  # factor 100 from ciecam02 is replaced with Yw[i] in ciecam16, but see 'note' in Fairchild's "Color Appearance Models" (p291 ni 3ed.)

        #--------------------------------------------
        # convert from cat02 sensor space to cone sensors (hpe):
        rgbp = math.dot23(mhpe_x_invmcat, rgbc).T

        #--------------------------------------------
        # apply Naka_rushton repsonse compression:
        p = np.where(rgbp < 0)
        if (yellowbluepurplecorrect == 'brill-suss'
            ):  # Brill & Susstrunck approach, for purple line problem
            rgbp[p] = 0.0
        rgbpa = NK(FL * rgbp / 100.0, forward)
        rgbpa[p] = 0.1 - (NK(FL * np.abs(rgbp[p]) / 100.0, forward) - 0.1)

        #--------------------------------------------
        # Calculate achromatic signal:
        A = (2.0 * rgbpa[..., 0] + rgbpa[..., 1] +
             (1.0 / 20.0) * rgbpa[..., 2] - 0.305) * Nbb

        #--------------------------------------------
        # calculate initial opponent channels:
        a = rgbpa[..., 0] - 12.0 * rgbpa[..., 1] / 11.0 + rgbpa[..., 2] / 11.0
        b = (1.0 / 9.0) * (rgbpa[..., 0] + rgbpa[..., 1] - 2.0 * rgbpa[..., 2])

        #--------------------------------------------
        # calculate hue h and eccentricity factor, et:
        h = hue_angle(a, b, htype='deg')
        et = (1.0 / 4.0) * (np.cos(h * np.pi / 180 + 2.0) + 3.8)

        #--------------------------------------------
        # calculate Hue quadrature (if requested in 'out'):
        if 'H' in outin:
            H = hue_quadrature(h, unique_hue_data=_UNIQUE_HUE_DATA)
        else:
            H = None

        #--------------------------------------------
        # calculate lightness, J:
        J = 100.0 * (A / Aw)**(c * z)

        #--------------------------------------------
        # calculate brightness, Q:
        Q = (4.0 / c) * ((J / 100.0)**0.5) * (Aw + 4.0) * (FL**0.25)

        #--------------------------------------------
        # calculate chroma, C:
        t = ((50000.0 / 13.0) * Nc * Ncb * et *
             ((a**2.0 + b**2.0)**0.5)) / (rgbpa[..., 0] + rgbpa[..., 1] +
                                          (21.0 / 20.0 * rgbpa[..., 2]))
        C = (t**0.9) * ((J / 100.0)**0.5) * (1.64 - 0.29**n)**0.73

        #--------------------------------------------
        # calculate colorfulness, M:
        M = C * FL**0.25

        #--------------------------------------------
        # calculate saturation, s:
        s = 100.0 * (M / Q)**0.5
        S = s  # make extra variable, jsut in case 'S' is called

        #--------------------------------------------
        # calculate cartesian coordinates:
        if ('aS' in outin):
            aS = s * np.cos(h * np.pi / 180.0)
            bS = s * np.sin(h * np.pi / 180.0)

        if ('aC' in outin):
            aC = C * np.cos(h * np.pi / 180.0)
            bC = C * np.sin(h * np.pi / 180.0)

        if ('aM' in outin):
            aM = M * np.cos(h * np.pi / 180.0)
            bM = M * np.sin(h * np.pi / 180.0)

        #--------------------------------------------
        if outin != ['J', 'aM', 'bM']:
            camout = eval('ajoin((' + ','.join(outin) + '))')
        else:
            camout = ajoin((J, aM, bM))

        if (camout.shape[1] == 1) & (original_ndim < 3):
            camout = camout[:, 0, :]

        return camout

    elif forward == False:

        #--------------------------------------------
        # Get Lightness J from data:
        if ('J' in outin[0]):
            J = data[..., 0].copy()
        elif ('Q' in outin[0]):
            Q = data[..., 0].copy()
            J = 100.0 * (Q / ((Aw + 4.0) * (FL**0.25) * (4.0 / c)))**2.0
        else:
            raise Exception(
                'No lightness or brightness values in data. Inverse CAM-transform not possible!'
            )

        #--------------------------------------------
        if 'a' in outin[1]:
            # calculate hue h:
            h = hue_angle(data[..., 1], data[..., 2], htype='deg')

            #--------------------------------------------
            # calculate Colorfulness M or Chroma C or Saturation s from a,b:
            MCs = (data[..., 1]**2.0 + data[..., 2]**2.0)**0.5
        else:
            h = data[..., 2]
            MCs = data[..., 1]

        if ('S' in outin[1]):
            Q = (4.0 / c) * ((J / 100.0)**0.5) * (Aw + 4.0) * (FL**0.25)
            M = Q * (MCs / 100.0)**2.0
            C = M / (FL**0.25)

        if ('M' in outin[1]):  # convert M to C:
            C = MCs / (FL**0.25)

        if ('C' in outin[1]):
            C = MCs

        #--------------------------------------------
        # calculate t from J, C:
        t = (C / ((J / 100.0)**(1.0 / 2.0) * (1.64 - 0.29**n)**0.73))**(1.0 /
                                                                        0.9)

        #--------------------------------------------
        # calculate eccentricity factor, et:
        et = (np.cos(h * np.pi / 180.0 + 2.0) + 3.8) / 4.0

        #--------------------------------------------
        # calculate achromatic signal, A:
        A = Aw * (J / 100.0)**(1.0 / (c * z))

        #--------------------------------------------
        # calculate temporary cart. co. at, bt and p1,p2,p3,p4,p5:
        at = np.cos(h * np.pi / 180.0)
        bt = np.sin(h * np.pi / 180.0)
        p1 = (50000.0 / 13.0) * Nc * Ncb * et / t
        p2 = A / Nbb + 0.305
        p3 = 21.0 / 20.0
        p4 = p1 / bt
        p5 = p1 / at

        #--------------------------------------------
        #q = np.where(np.abs(bt) < np.abs(at))[0]
        q = (np.abs(bt) < np.abs(at))

        b = p2 * (2.0 + p3) * (460.0 / 1403.0) / (p4 + (2.0 + p3) *
                                                  (220.0 / 1403.0) *
                                                  (at / bt) -
                                                  (27.0 / 1403.0) + p3 *
                                                  (6300.0 / 1403.0))
        a = b * (at / bt)

        a[q] = p2[q] * (2.0 + p3) * (460.0 / 1403.0) / (p5[q] + (2.0 + p3) *
                                                        (220.0 / 1403.0) -
                                                        ((27.0 / 1403.0) - p3 *
                                                         (6300.0 / 1403.0)) *
                                                        (bt[q] / at[q]))
        b[q] = a[q] * (bt[q] / at[q])

        #--------------------------------------------
        # calculate post-adaptation values
        rpa = (460.0 * p2 + 451.0 * a + 288.0 * b) / 1403.0
        gpa = (460.0 * p2 - 891.0 * a - 261.0 * b) / 1403.0
        bpa = (460.0 * p2 - 220.0 * a - 6300.0 * b) / 1403.0

        #--------------------------------------------
        # join values:
        rgbpa = ajoin((rpa, gpa, bpa))

        #--------------------------------------------
        # decompress signals:
        rgbp = (100.0 / FL) * NK(rgbpa, forward)

        # apply yellow-blue correction:
        if (yellowbluepurplecorrect == 'brill-suss'
            ):  # Brill & Susstrunck approach, for purple line problem
            p = np.where(rgbp < 0.0)
            rgbp[p] = 0.0

        #--------------------------------------------
        # convert from to cone sensors (hpe) cat02 sensor space:
        rgbc = math.dot23(mcat_x_invmhpe, rgbp.T)

        #--------------------------------------------
        # apply inverse von Kries cat:
        rgb = rgbc / ((D * Yw / rgbw)[..., None] + (1.0 - D))

        #--------------------------------------------
        # transform from cat sensor space to xyz:
        xyz = math.dot23(invmcat, rgb)

        #--------------------------------------------
        # unnormalize xyz:
        xyz = ((yw / Yw)[..., None] * xyz).T

        return xyz
コード例 #4
0
def run(data, xyzw, out = 'J,aM,bM', conditions = None, forward = True):
    """ 
    Run CIECAM02 color appearance model in forward or backward modes.
    
    Args:
        :data:
            | ndarray with relative sample xyz values (forward mode) or J'a'b' coordinates (inverse mode)
        :xyzw:
            | ndarray with relative white point tristimulus values  
        :conditions:
            | None, optional
            | Dictionary with viewing conditions.
            | None results in:
            |   {'La':100, 'Yb':20, 'D':1, 'surround':'avg'}
            | For more info see luxpy.cam.ciecam02()?
        :forward:
            | True, optional
            | If True: run in CAM in forward mode, else: inverse mode.
        :out:
            | 'J,aM,bM', optional
            | String with requested output (e.g. "J,aM,bM,M,h") [Forward mode]
            | String with inputs in data. 
            | Input must have data.shape[-1]==3 and last dim of data must have 
            | the following structure: 
            |  * data[...,0] = J or Q,
            |  * data[...,1:] = (aM,bM) or (aC,bC) or (aS,bS)
    Returns:
        :camout:
            | ndarray with Jab coordinates or whatever correlates requested in out.
    
    Note:
        * This is a simplified, less flexible, but faster version than the main ciecam02().
    
    References:
        1. `N. Moroney, M. D. Fairchild, R. W. G. Hunt, C. Li, M. R. Luo, and T. Newman, (2002), 
        "The CIECAM02 color appearance model,” 
        IS&T/SID Tenth Color Imaging Conference. p. 23, 2002.
        <http://rit-mcsl.org/fairchild/PDFs/PRO19.pdf>`_
    """
    outin = out.split(',') if isinstance(out,str) else out
    
    #--------------------------------------------
    # Get/ set conditions parameters:
    if conditions is not None:
        surround_parameters =  {'surrounds': ['avg', 'dim', 'dark'], 
                                'avg' : {'c':0.69, 'Nc':1.0, 'F':1.0,'FLL': 1.0}, 
                                'dim' : {'c':0.59, 'Nc':0.9, 'F':0.9,'FLL':1.0} ,
                                'dark' : {'c':0.525, 'Nc':0.8, 'F':0.8,'FLL':1.0}}
        La = conditions['La']
        Yb = conditions['Yb']
        D = conditions['D']
        surround = conditions['surround']
        if isinstance(surround, str):
            surround = surround_parameters[conditions['surround']]
        F, FLL, Nc, c = [surround[x] for x in sorted(surround.keys())]
    else:
        # set defaults:
        La, Yb, D, F, FLL, Nc, c = 100, 20, 1, 1, 1, 1, 0.69
        
    #--------------------------------------------
    # Define sensor space and cat matrices:        
    mhpe = np.array([[0.38971,0.68898,-0.07868],
                     [-0.22981,1.1834,0.04641],
                     [0.0,0.0,1.0]]) # Hunt-Pointer-Estevez sensors (cone fundamentals)
    
    mcat = np.array([[0.7328, 0.4296, -0.1624],
                       [ -0.7036, 1.6975,  0.0061],
                       [ 0.0030, 0.0136,  0.9834]]) # CAT02 sensor space
    
    #--------------------------------------------
    # pre-calculate some matrices:
    invmcat = np.linalg.inv(mcat)
    mhpe_x_invmcat = np.dot(mhpe,invmcat)
    if not forward: mcat_x_invmhpe = np.dot(mcat,np.linalg.inv(mhpe))
    
    #--------------------------------------------
    # calculate condition dependent parameters:
    Yw = xyzw[...,1:2].T
    k = 1.0 / (5.0*La + 1.0)
    FL = 0.2*(k**4.0)*(5.0*La) + 0.1*((1.0 - k**4.0)**2.0)*((5.0*La)**(1.0/3.0)) # luminance adaptation factor
    n = Yb/Yw 
    Nbb = 0.725*(1/n)**0.2   
    Ncb = Nbb
    z = 1.48 + FLL*n**0.5
    
    if D is None:
        D = F*(1.0-(1.0/3.6)*np.exp((-La-42.0)/92.0))
        
    #===================================================================
    # WHITE POINT transformations (common to forward and inverse modes):
    
    #--------------------------------------------
    # transform from xyzw to cat sensor space:
    rgbw = mcat @ xyzw.T
    
    #--------------------------------------------  
    # apply von Kries cat:
    rgbwc = ((D*Yw/rgbw) + (1 - D))*rgbw # factor 100 from ciecam02 is replaced with Yw[i] in cam16, but see 'note' in Fairchild's "Color Appearance Models" (p291 ni 3ed.)

    #--------------------------------------------
    # convert from cat02 sensor space to cone sensors (hpe):
    rgbwp = (mhpe_x_invmcat @ rgbwc).T
    
    #--------------------------------------------
    # apply Naka_rushton repsonse compression to white:
    NK = lambda x, forward: naka_rushton(x, scaling = 400, n = 0.42, sig = 27.13**(1/0.42), noise = 0.1, forward = forward)
    
    rgbwpa = NK(FL*rgbwp/100.0, True)
    pw = np.where(rgbwp<0)
    rgbwpa[pw] = 0.1 - (NK(FL*np.abs(rgbwp[pw])/100.0, True) - 0.1)
    
    #--------------------------------------------
    # Calculate achromatic signal of white:
    Aw =  (2.0*rgbwpa[...,0] + rgbwpa[...,1] + (1.0/20.0)*rgbwpa[...,2] - 0.305)*Nbb
    
    # massage shape of data for broadcasting:
    if data.ndim == 2: data = data[:,None]

    #===================================================================
    # STIMULUS transformations 
    if forward:
        
        #--------------------------------------------
        # transform from xyz to cat sensor space:
        rgb = math.dot23(mcat, data.T)
        
        #--------------------------------------------  
        # apply von Kries cat:
        rgbc = ((D*Yw/rgbw)[...,None] + (1 - D))*rgb # factor 100 from ciecam02 is replaced with Yw[i] in cam16, but see 'note' in Fairchild's "Color Appearance Models" (p291 ni 3ed.)
        
        #--------------------------------------------
        # convert from cat02 sensor space to cone sensors (hpe):
        rgbp = math.dot23(mhpe_x_invmcat,rgbc).T
        
        #--------------------------------------------
        # apply Naka_rushton repsonse compression:        
        rgbpa = NK(FL*rgbp/100.0, forward)
        p = np.where(rgbp<0)
        rgbpa[p] = 0.1 - (NK(FL*np.abs(rgbp[p])/100.0, forward) - 0.1)
        
        #--------------------------------------------
        # Calculate achromatic signal:
        A  =  (2.0*rgbpa[...,0] + rgbpa[...,1] + (1.0/20.0)*rgbpa[...,2] - 0.305)*Nbb
                
        #--------------------------------------------
        # calculate initial opponent channels:
        a = rgbpa[...,0] - 12.0*rgbpa[...,1]/11.0 + rgbpa[...,2]/11.0
        b = (1.0/9.0)*(rgbpa[...,0] + rgbpa[...,1] - 2.0*rgbpa[...,2])

        #--------------------------------------------
        # calculate hue h and eccentricity factor, et:
        h = hue_angle(a,b, htype = 'deg')
        et = (1.0/4.0)*(np.cos(h*np.pi/180 + 2.0) + 3.8)
        
        #-------------------------------------------- 
        # calculate Hue quadrature (if requested in 'out'):
        if 'H' in outin:    
            H = hue_quadrature(h, unique_hue_data = 'ciecam02')
        else:
            H = None
        
        #--------------------------------------------   
        # calculate lightness, J:
        if ('J' in outin) | ('Q' in outin) | ('C' in outin) | ('M' in outin) | ('s' in outin) | ('aS' in outin) | ('aC' in outin) | ('aM' in outin):
            J = 100.0* (A / Aw)**(c*z)
         
        #-------------------------------------------- 
        # calculate brightness, Q:
        if ('Q' in outin) | ('s' in outin) | ('aS' in outin):
            Q = (4.0/c)* ((J/100.0)**0.5) * (Aw + 4.0)*(FL**0.25)
          
        #-------------------------------------------- 
        # calculate chroma, C:
        if ('C' in outin) | ('M' in outin) | ('s' in outin) | ('aS' in outin) | ('aC' in outin) | ('aM' in outin):
            t = ((50000.0/13.0)*Nc*Ncb*et*((a**2.0 + b**2.0)**0.5)) / (rgbpa[...,0] + rgbpa[...,1] + (21.0/20.0*rgbpa[...,2]))
            C = (t**0.9)*((J/100.0)**0.5) * (1.64 - 0.29**n)**0.73
               
        #-------------------------------------------- 
        # calculate colorfulness, M:
        if ('M' in outin) | ('s' in outin) | ('aM' in outin) | ('aS' in outin):
            M = C*FL**0.25
        
        #--------------------------------------------         
        # calculate saturation, s:
        if ('s' in outin) | ('aS' in outin):
            s = 100.0* (M/Q)**0.5
        
        #--------------------------------------------            
        # calculate cartesian coordinates:
        if ('aS' in outin):
             aS = s*np.cos(h*np.pi/180.0)
             bS = s*np.sin(h*np.pi/180.0)
        
        if ('aC' in outin):
             aC = C*np.cos(h*np.pi/180.0)
             bC = C*np.sin(h*np.pi/180.0)
             
        if ('aM' in outin):
             aM = M*np.cos(h*np.pi/180.0)
             bM = M*np.sin(h*np.pi/180.0)
         
        #-------------------------------------------- 
        if outin != ['J','aM','bM']:
            camout = eval('ajoin(('+','.join(outin)+'))')
        else:
            camout = ajoin((J,aM,bM))
        
        if camout.shape[1] == 1:
            camout = camout[:,0,:]

        
        return camout
        
    elif forward == False:

                       
            #--------------------------------------------
            # Get Lightness J from data:
            if ('J' in outin):
                J = data[...,0].copy()
            elif ('Q' in outin):
                Q = data[...,0].copy()
                J = 100.0*(Q / ((Aw + 4.0)*(FL**0.25)*(4.0/c)))**2.0
            else:
                raise Exception('No lightness or brightness values in data. Inverse CAM-transform not possible!')
                
                
            #--------------------------------------------
            # calculate hue h:
            h = hue_angle(data[...,1],data[...,2], htype = 'deg')
            
            #--------------------------------------------
            # calculate Colorfulness M or Chroma C or Saturation s from a,b:
            MCs = (data[...,1]**2.0 + data[...,2]**2.0)**0.5    
            
            
            if ('aS' in outin):
                Q = (4.0/c)* ((J/100.0)**0.5) * (Aw + 4.0)*(FL**0.25)
                M = Q*(MCs/100.0)**2.0 
                C = M/(FL**0.25)
             
            if ('aM' in outin): # convert M to C:
                C = MCs/(FL**0.25)
            
            if ('aC' in outin):
                C = MCs
                
            #--------------------------------------------
            # calculate t from J, C:
            t = (C / ((J/100.0)**(1.0/2.0) * (1.64 - 0.29**n)**0.73))**(1.0/0.9)

            #--------------------------------------------
            # calculate eccentricity factor, et:
            et = (np.cos(h*np.pi/180.0 + 2.0) + 3.8) / 4.0
            
            #--------------------------------------------
            # calculate achromatic signal, A:
            A = Aw*(J/100.0)**(1.0/(c*z))

            #--------------------------------------------
            # calculate temporary cart. co. at, bt and p1,p2,p3,p4,p5:
            at = np.cos(h*np.pi/180.0)
            bt = np.sin(h*np.pi/180.0)
            p1 = (50000.0/13.0)*Nc*Ncb*et/t
            p2 = A/Nbb + 0.305
            p3 = 21.0/20.0
            p4 = p1/bt
            p5 = p1/at

            #--------------------------------------------
            #q = np.where(np.abs(bt) < np.abs(at))[0]
            q = (np.abs(bt) < np.abs(at))

            b = p2*(2.0 + p3) * (460.0/1403.0) / (p4 + (2.0 + p3) * (220.0/1403.0) * (at/bt) - (27.0/1403.0) + p3*(6300.0/1403.0))
            a = b * (at/bt)
            
            a[q] = p2[q]*(2.0 + p3) * (460.0/1403.0) / (p5[q] + (2.0 + p3) * (220.0/1403.0) - ((27.0/1403.0) - p3*(6300.0/1403.0)) * (bt[q]/at[q]))
            b[q] = a[q] * (bt[q]/at[q])
            
            #--------------------------------------------
            # calculate post-adaptation values
            rpa = (460.0*p2 + 451.0*a + 288.0*b) / 1403.0
            gpa = (460.0*p2 - 891.0*a - 261.0*b) / 1403.0
            bpa = (460.0*p2 - 220.0*a - 6300.0*b) / 1403.0
            
            #--------------------------------------------
            # join values:
            rgbpa = ajoin((rpa,gpa,bpa))

            #--------------------------------------------
            # decompress signals:
            rgbp = (100.0/FL)*NK(rgbpa, forward)

            #--------------------------------------------
            # convert from to cone sensors (hpe) cat02 sensor space:
            rgbc = math.dot23(mcat_x_invmhpe,rgbp.T)
                            
            #--------------------------------------------
            # apply inverse von Kries cat:
            rgb = rgbc / ((D*Yw/rgbw)[...,None] + (1.0 - D))
            
            #--------------------------------------------
            # transform from cat sensor space to xyz:
            xyz = math.dot23(invmcat,rgb).T
            
            
            return xyz
コード例 #5
0
def apply_vonkries2(xyz,
                    xyzw1,
                    xyzw2,
                    xyzw0=None,
                    D=1,
                    mcat=None,
                    invmcat=None,
                    in_='xyz',
                    out_='xyz',
                    use_Yw=False):
    """ 
    Apply a 2-step von kries chromatic adaptation transform.
    
    Args:
        :xyz:
            | ndarray with sample tristimulus or cat-sensor values
        :xyzw1:
            | ndarray with white point tristimulus or cat-sensor values of illuminant 1
        :xyzw2:
            | ndarray with white point tristimulus or cat-sensor values of illuminant 2
        :xyzw0:
            | None, optional
            | ndarray with white point tristimulus or cat-sensor values of baseline illuminant 0
            | None: defaults to EEW.
        :D:
            | [1,1], optional
            | Degree of chromatic adaptations (Ill.1-->Ill.0, Ill.2.-->Ill.0)
        :mcat:
            | None, optional
            | Specifies CAT sensor space.
            | - options:
            |    - None defaults to luxpy.cat._MCAT_DEFAULT
            |    - str: see see luxpy.cat._MCATS.keys() for options 
            |         (details on type, ?luxpy.cat)
            |    - ndarray: matrix with sensor primaries
        :invmcat:
            | None,optional
            | Pre-calculated inverse mcat.
            | If None: calculate inverse of mcat.
        :in_:
            | 'xyz', optional
            | Input type ('xyz', 'rgb') of data in xyz, xyzw1, xyzw2
        :out_:
            | 'xyz', optional
            | Output type ('xyz', 'rgb') of corresponding colors
        :use_Yw:
            | False, optional
            | Use CAT version with Yw factors included (but this results in 
            | potential wrong predictions, see Smet & Ma (2020)).

    Returns:
        :xyzc:
            | ndarray with corresponding colors.
            
    Reference:
        1. `Smet, K. A. G., & Ma, S. (2020). 
        Some concerns regarding the CAT16 chromatic adaptation transform. 
        Color Research & Application, 45(1), 172–177. 
        <https://doi.org/10.1002/col.22457>`_
    """
    # Define cone/chromatic adaptation sensor space:
    if (in_ == 'xyz') | (out_ == 'xyz'):
        if not isinstance(mcat, np.ndarray):
            if (mcat is None):
                mcat = _MCATS[_MCAT_DEFAULT]
            elif isinstance(mcat, str):
                mcat = _MCATS[mcat]
        if invmcat is None:
            invmcat = np.linalg.inv(mcat)

    D = D * np.ones((2, ))  # ensure there are two D's available!

    #--------------------------------------------
    # Define default baseline illuminant:
    if xyzw0 is None:
        xyzw0 = np.array([[100., 100., 100.]])

    #--------------------------------------------
    # transform from xyz to cat sensor space:
    if in_ == 'xyz':
        rgb = math.dot23(mcat, xyz.T)
        rgbw1 = math.dot23(mcat, xyzw1.T)
        rgbw2 = math.dot23(mcat, xyzw2.T)
        rgbw0 = math.dot23(mcat, xyzw0.T)
    elif (in_ == 'xyz') & (use_Yw == False):
        rgb = xyz
        rgbw1 = xyzw1
        rgbw2 = xyzw2
        rgbw0 = xyzw0
    else:
        raise Exception('Use of Yw requires xyz input.')

    #--------------------------------------------
    # apply 1-step von Kries cat from 1->0:
    vk_w_ratio10 = rgbw0 / rgbw1
    yw_ratio10 = 1.0 if use_Yw == False else xyzw1[..., 1] / xyzw0[..., 1]
    if rgb.ndim == 3:
        vk_w_ratio10 = vk_w_ratio10[..., None]
        if use_Yw: yw_ratio10 = yw_ratio10[..., None]
    rgbc = (D[0] * yw_ratio10 * vk_w_ratio10 + (1 - D[0])) * rgb

    #--------------------------------------------
    # apply inverse 1-step von Kries cat from 2->0:
    vk_w_ratio20 = rgbw0 / rgbw2
    yw_ratio20 = 1.0 if use_Yw == False else xyzw2[..., 1] / xyzw0[..., 1]
    if rgbc.ndim == 3:
        vk_w_ratio20 = vk_w_ratio20[..., None]
        if use_Yw: yw_ratio20 = yw_ratio20[..., None]
    rgbc = ((D[1] * yw_ratio20 * vk_w_ratio20 + (1 - D[1]))**(-1)) * rgbc

    #--------------------------------------------
    # convert from cat16 sensor space to xyz:
    if out_ == 'xyz':
        return math.dot23(invmcat, rgbc).T
    else:
        return rgbc.T
コード例 #6
0
def apply_ciecat94(xyz,
                   xyzw,
                   xyzwr=None,
                   E=1000,
                   Er=1000,
                   Yb=20,
                   D=1,
                   cat94_old=True):
    """ 
    Calculate corresponding color tristimulus values using the CIECAT94 chromatic adaptation transform.
    
    Args:
        :xyz:
            | ndarray with sample 1931 2° XYZ tristimulus values under the test illuminant
        :xyzw:
            | ndarray with white point tristimulus values of the test illuminant
        :xyzwr:
            | None, optional
            | ndarray with white point tristimulus values of the reference illuminant
            | None defaults to D65.
        :E:
            | 100, optional
            | Illuminance (lx) of test illumination
        :Er:
            | 63.66, optional
            | Illuminance (lx) of the reference illumination
        :Yb:
            | 20, optional
            | Relative luminance of the adaptation field (background) 
        :D:
            | 1, optional
            | Degree of chromatic adaptation.
            | For object colours D = 1,  
            | and for luminous colours (typically displays) D=0
            
    Returns:
        :xyzc:
            | ndarray with corresponding tristimlus values.

    Reference:
        1. CIE160-2004. (2004). A review of chromatic adaptation transforms (Vols. CIE160-200). CIE.
    """
    #--------------------------------------------
    # Define cone/chromatic adaptation sensor space:
    mcat = _MCATS['kries']
    invmcat = np.linalg.inv(mcat)

    #--------------------------------------------
    # Define default ref. white point:
    if xyzwr is None:
        xyzwr = np.array(
            [[9.5047e+01, 1.0000e+02, 1.0888e+02]]
        )  #spd_to_xyz(_CIE_D65, cieobs = '1931_2', relative = True, rfl = None)

    #--------------------------------------------
    # Calculate Y,x,y of white:
    Yxyw = xyz_to_Yxy(xyzw)
    Yxywr = xyz_to_Yxy(xyzwr)

    #--------------------------------------------
    # Calculate La, Lar:
    La = Yb * E / np.pi / 100
    Lar = Yb * Er / np.pi / 100

    #--------------------------------------------
    # Calculate CIELAB L* of samples:
    Lstar = xyz_to_lab(xyz, xyzw)[..., 0]

    #--------------------------------------------
    # Define xi_, eta_ and zeta_ functions:
    xi_ = lambda Yxy: (0.48105 * Yxy[..., 1] + 0.78841 * Yxy[..., 2] - 0.080811
                       ) / Yxy[..., 2]
    eta_ = lambda Yxy: (-0.27200 * Yxy[..., 1] + 1.11962 * Yxy[..., 2] +
                        0.04570) / Yxy[..., 2]
    zeta_ = lambda Yxy: 0.91822 * (1 - Yxy[..., 1] - Yxy[..., 2]) / Yxy[..., 2]

    #--------------------------------------------
    # Calculate intermediate values for test and ref. illuminants:
    xit, etat, zetat = xi_(Yxyw), eta_(Yxyw), zeta_(Yxyw)
    xir, etar, zetar = xi_(Yxywr), eta_(Yxywr), zeta_(Yxywr)

    #--------------------------------------------
    # Calculate alpha:
    if cat94_old == False:
        alpha = 0.1151 * np.log10(La) + 0.0025 * (Lstar - 50) + (0.22 * D +
                                                                 0.510)
        alpha[alpha > 1] = 1
    else:
        alpha = 1

    #--------------------------------------------
    # Calculate adapted intermediate xip, etap zetap:
    xip = alpha * xit - (1 - alpha) * xir
    etap = alpha * etat - (1 - alpha) * etar
    zetap = alpha * zetat - (1 - alpha) * zetar

    #--------------------------------------------
    # Calculate effective adapting response Rw, Gw, Bw and Rwr, Gwr, Bwr:
    #Rw, Gw, Bw = La*xit, La*etat, La*zetat # according to westland's book: Computational Colour Science wirg Matlab
    Rw, Gw, Bw = La * xip, La * etap, La * zetap  # according to CIE160-2004
    Rwr, Gwr, Bwr = Lar * xir, Lar * etar, Lar * zetar

    #--------------------------------------------
    # Calculate beta1_ and beta2_ exponents for (R,G) and B:
    beta1_ = lambda x: (6.469 + 6.362 * x**0.4495) / (6.469 + x**0.4495)
    beta2_ = lambda x: 0.7844 * (8.414 + 8.091 * x**0.5128) / (8.414 + x**
                                                               0.5128)
    b1Rw, b1Rwr, b1Gw, b1Gwr = beta1_(Rw), beta1_(Rwr), beta1_(Gw), beta1_(Gwr)
    b2Bw, b2Bwr = beta2_(Bw), beta2_(Bwr)

    #--------------------------------------------
    # Noise term:
    n = 1 if cat94_old else 0.1

    #--------------------------------------------
    # K factor = p/q (for correcting the difference between
    # the illuminance of the test and references conditions)
    # calculate q:
    p = ((Yb * xip + n) /
         (20 * xip + n))**(2 / 3 * b1Rw) * ((Yb * etap + n) /
                                            (20 * etap + n))**(1 / 3 * b1Gw)
    q = ((Yb * xir + n) /
         (20 * xir + n))**(2 / 3 * b1Rwr) * ((Yb * etar + n) /
                                             (20 * etar + n))**(1 / 3 * b1Gwr)
    K = p / q

    #--------------------------------------------
    # transform sample xyz to cat sensor space:
    rgb = math.dot23(mcat, xyz.T).T

    #--------------------------------------------
    # Calculate corresponding colors:
    Rc = (Yb * xir + n) * K**(1 / b1Rwr) * ((rgb[..., 0] + n) /
                                            (Yb * xip + n))**(b1Rw / b1Rwr) - n
    Gc = (Yb * etar + n) * K**(1 / b1Gwr) * (
        (rgb[..., 1] + n) / (Yb * etap + n))**(b1Gw / b1Gwr) - n
    Bc = (Yb * zetar + n) * K**(1 / b2Bwr) * (
        (rgb[..., 2] + n) / (Yb * zetap + n))**(b2Bw / b2Bwr) - n

    #--------------------------------------------
    # transform to xyz and return:
    xyzc = math.dot23(invmcat, ajoin((Rc, Gc, Bc)).T).T

    return xyzc