Esempio n. 1
0
def spd_to_power(data, ptype='ru', cieobs=_CIEOBS):
    """
    Calculate power of spectral data in radiometric, photometric 
    or quantal energy units.
    
    Args:
        :data: 
            | ndarray with spectral data
        :ptype: 
            | 'ru' or str, optional
            | str: - 'ru': in radiometric units 
            |      - 'pu': in photometric units 
            |      - 'pusa': in photometric units with Km corrected 
            |                to standard air (cfr. CIE TN003-2015)
            |      - 'qu': in quantal energy units
        :cieobs: 
            | _CIEOBS or str, optional
            | Type of cmf set to use for photometric units.
    
    Returns:
        returns: 
            | ndarray with normalized spectral data (SI units)
    """
    # get wavelength spacing:
    dl = getwld(data[0])

    if ptype == 'ru':  #normalize to radiometric units
        p = np2d(np.dot(data[1:], dl * np.ones(data.shape[1]))).T

    elif ptype == 'pusa':  # normalize in photometric units with correction of Km to standard air

        # Calculate correction factor for Km in standard air:
        na = _BB['na']  # n for standard air
        c = _BB['c']  # m/s light speed
        lambdad = c / (na * 54 * 1e13) / (1e-9
                                          )  # 555 nm lambda in standard air
        Km_correction_factor = 1 / (
            1 - (1 - 0.9998567) *
            (lambdad - 555))  # correction factor for Km in standard air

        # Get Vlambda and Km (for E):
        Vl, Km = vlbar(cieobs=cieobs, wl_new=data[0], out=2)
        Km *= Km_correction_factor
        p = Km * np2d(np.dot(data[1:], dl * Vl[1])).T

    elif ptype == 'pu':  # normalize in photometric units

        # Get Vlambda and Km (for E):
        Vl, Km = vlbar(cieobs=cieobs, wl_new=data[0], out=2)
        p = Km * np2d(np.dot(data[1:], dl * Vl[1])).T

    elif ptype == 'qu':  # normalize to quantual units

        # Get Quantal conversion factor:
        fQ = ((1e-9) / (_BB['h'] * _BB['c']))
        p = np2d(fQ * np.dot(data[1:], dl * data[0])).T

    return p
Esempio n. 2
0
 def dot(self, S):
     """
     Take dot product with instance of SPD.
     """
     if isinstance(S, SPD):
         self.value = np.dot(self.value, S.value)
     else:
         self.value = np.dot(self.value, M)
     self.shape = self.value.shape
     self.N = self.shape[0]
     return self
Esempio n. 3
0
def line_intersect(a1, a2, b1, b2):
    """
    Line intersections of series of two line segments a and b. 
        
    Args:
        :a1: 
            | ndarray (.shape  = (N,2)) specifying end-point 1 of line a
        :a2: 
            | ndarray (.shape  = (N,2)) specifying end-point 2 of line a
        :b1: 
            | ndarray (.shape  = (N,2)) specifying end-point 1 of line b
        :b2: 
            | ndarray (.shape  = (N,2)) specifying end-point 2 of line b
    
    Note: 
        N is the number of line segments a and b.
    
    Returns:
        :returns: 
            | ndarray with line-intersections (.shape = (N,2))
    
    References:
        1. https://stackoverflow.com/questions/3252194/numpy-and-line-intersections
    """
    T = np.array([[0.0, -1.0], [1.0, 0.0]])
    da = np.atleast_2d(a2 - a1)
    db = np.atleast_2d(b2 - b1)
    dp = np.atleast_2d(a1 - b1)
    dap = np.dot(da, T)
    denom = np.sum(dap * db, axis=1)
    num = np.sum(dap * dp, axis=1)
    return np.atleast_2d(num / denom).T * db + b1
Esempio n. 4
0
def check_posdef(A, atol = 1.0e-9, rtol = 1.0e-9):
    """
    Checks positive definiteness of a matrix via Cholesky.
    
    Args:
        :A: 
            | ndarray
        :atol:
            | float, optional
            | The absolute tolerance parameter (see Notes of numpy.allclose())
        :rtol:
            | float, optional
            | The relative tolerance parameter (see Notes of numpy.allclose())
    
    Returns:
        :returns:
            | Bool
            | True: the array is positive-definite within the given tolerance

    """
    try:
        R = np.linalg.cholesky(A)
        if np.allclose(A, np.dot(R,R.T), atol = atol,rtol = rtol):
            return True
        else:
            return False
    except np.linalg.LinAlgError:
        return False
Esempio n. 5
0
def rgb_to_xyz(rgb, M, tr, xyz_black, tr_type = 'lut'): 
    """
    Convert input rgb to xyz.
    
    Args:
        :rgb:
            | ndarray [Nx3] with RGB values 
        :M:
            | linear rgb to xyz conversion matrix
        :tr:
            | Tone Response function parameters or lut
        :xyz_black:
            | ndarray with XYZ tristimulus values of black
        :tr_type:
            | 'lut', optional
            | Type of Tone Response in tr input argument
            | options:
            |  - 'lut': Tone-Response as a look-up-table
            |  - 'gog': Tone-Response as a gain-offset-gamma function
            
    Returns:
        :xyz:
            | ndarray [Nx3] of XYZ tristimulus values
    """
    return np.dot(M, _rgb_linearizer(rgb, tr, tr_type = tr_type).T).T + xyz_black
Esempio n. 6
0
def xyz_to_rgb(xyz,N,tr, xyz_black, tr_type = 'lut'): 
    """
    Convert xyz to input rgb. 
    
    Args:
        :xyz:
            | ndarray [Nx3] with XYZ tristimulus values 
        :N:
            | xyz to linear rgb conversion matrix
        :tr:
            | Tone Response function parameters or lut
        :xyz_black:
            | ndarray with XYZ tristimulus values of black
        :tr_type:
            | 'lut', optional
            | Type of Tone Response in tr input argument
            | options:
            |  - 'lut': Tone-Response as a look-up-table
            |  - 'gog': Tone-Response as a gain-offset-gamma function
            
    Returns:
        :rgb:
            | ndarray [Nx3] of display RGB values
    """
    rgblin = _clamp0(np.dot(N,(xyz - xyz_black).T).T) # calculate rgblin and clamp to zero (on separate line for speed)
    return np.round(_rgb_delinearizer(rgblin,tr, tr_type = tr_type)) # delinearize rgblin
Esempio n. 7
0
 def dot(self, M):
     """
     Take dot product with instance.
     """
     self.value = np.dot(M, self.value.T).T
     self.shape = self.value.shape
     return self
Esempio n. 8
0
def polyarea(x,y):
    """
    Calculates area of polygon. 
    
    | First coordinate should also be last.
    
    Args:
        :x: 
            | ndarray of x-coordinates of polygon vertices.
        :y: 
            | ndarray of x-coordinates of polygon vertices.     
    
    Returns:
        :returns:
            | float (area or polygon)
    
    """
    return 0.5*np.abs(np.dot(x,np.roll(y,1).T)-np.dot(y,np.roll(x,1).T))
Esempio n. 9
0
def normalize_3x3_matrix(M, xyz0 = np.array([[1.0,1.0,1.0]])):
    """
    Normalize 3x3 matrix M to xyz0 -- > [1,1,1]
    
    | If M.shape == (1,9): M is reshaped to (3,3)
    
    Args:
        :M: 
            | ndarray((3,3) or ndarray((1,9))
        :xyz0: 
            | 2darray, optional 
        
    Returns:
        :returns: 
            | normalized matrix such that M*xyz0 = [1,1,1]
    """
    M = np2d(M)
    if M.shape[-1]==9:
        M = M.reshape(3,3)
    if xyz0.shape[0] == 1:
        return np.dot(np.diagflat(1/(np.dot(M,xyz0.T))),M)
    else:
        return np.concatenate([np.dot(np.diagflat(1/(np.dot(M,xyz0[1].T))),M) for i in range(xyz0.shape[0])],axis=0).reshape(xyz0.shape[0],3,3)
Esempio n. 10
0
def spd_to_ler(data, cieobs=_CIEOBS, K=None):
    """
    Calculates Luminous efficacy of radiation (LER) from spectral data.
       
    Args: 
        :data: 
            | ndarray or pandas.dataframe with spectral data
            | (.shape = (number of spectra + 1, number of wavelengths))
            | Note that :data: is never interpolated, only CMFs and RFLs. 
            | This way interpolation errors due to peaky spectra are avoided. 
            | Conform CIE15-2018.
        :cieobs: 
            | luxpy._CIEOBS, optional
            | Determines the color matching function set used in the 
            | calculation of LER. For cieobs = '1931_2' the ybar CMF curve equals
            | the CIE 1924 Vlambda curve.
        :K: 
            | None, optional
            |   e.g.  K  = 683 lm/W for '1931_2'
      
    Returns:
        :ler: 
            | ndarray of LER values. 
             
    References:
        1. `CIE15:2018, “Colorimetry,” CIE, Vienna, Austria, 2018. <https://doi.org/10.25039/TR.015.2018>`_
    """

    if isinstance(cieobs, str):
        if K == None: K = _CMF[cieobs]['K']
        Vl = vlbar(cieobs=cieobs, scr='dict', wl_new=data[0],
                   kind='np')[1:2]  #also interpolate to wl of data
    else:
        Vl = spd(wl=data[0], data=cieobs, interpolation='cmf', kind='np')[1:2]
        if K is None:
            raise Exception(
                "spd_to_ler: User defined Vlambda, but no K scaling factor has been supplied."
            )
    dl = getwld(data[0])
    return ((K * np.dot(
        (Vl * dl), data[1:].T)) / np.sum(data[1:] * dl, axis=data.ndim - 1)).T
def lmsb_to_xyzb(lms, fieldsize=10, out='XYZ', allow_negative_values=False):
    """
    Convert from LMS cone fundamentals to XYZ color matching functions.
    
    Args:
        :lms: 
            | ndarray with lms cone fundamentals, optional
        :fieldsize: 
            | fieldsize in degrees, optional
            | Defaults to 10°.
        :out: 
            | 'xyz' or str, optional
            | Determines output.
        :allow_negative_values:
            | False, optional
            | XYZ color matching functions should not have negative values.
            |     If False: xyz[xyz<0] = 0.
    Returns:
        :returns:
            | LMS 
            |   - LMS: ndarray with population XYZ color matching functions.    
    
    Note: 
        For intermediate field sizes (2° < fieldsize < 10°) a conversion matrix
        is calculated by linear interpolation between 
        the _INDVCMF_M_2d and _INDVCMF_M_10d matrices.
    """
    wl = lms[None, 0]  #store wavelengths
    M = get_lms_to_xyz_matrix(fieldsize=fieldsize)
    if lms.ndim > 2:
        xyz = np.vstack((wl, math.dot23(M, lms[1:, ...], keepdims=False)))
    else:
        xyz = np.vstack((wl, np.dot(M, lms[1:, ...])))
    if allow_negative_values == False:
        xyz[np.where(xyz < 0)] = 0
    return xyz
Esempio n. 12
0
def fit_ellipse(xy, center_on_mean_xy = False):
    """
    Fit an ellipse to supplied data points.

    Args:
        :xy: 
            | coordinates of points to fit (Nx2 array)
        :center_on_mean_xy:
            | False, optional
            | Center ellipse on mean of xy 
            | (otherwise it might be offset due to solving 
            | the contrained minization problem: aT*S*a, see ref below.)
            
    Returns:
        :v:
            | vector with ellipse parameters [Rmax,Rmin, xc,yc, theta (rad.)]
            
    Reference:
        1. Fitzgibbon, A.W., Pilu, M., and Fischer R.B., 
        Direct least squares fitting of ellipsees, 
        Proc. of the 13th Internation Conference on Pattern Recognition, 
        pp 253–257, Vienna, 1996.
    """
    # remove centroid:
#    center = xy.mean(axis=0)
#    xy = xy - center
    
    # Fit ellipse:
    x, y = xy[:,0:1], xy[:,1:2]
    D = np.hstack((x * x, x * y, y * y, x, y, np.ones_like(x)))
    S, C = np.dot(D.T, D), np.zeros([6, 6])
    C[0, 2], C[2, 0], C[1, 1] = 2, 2, -1
    U, s, V = np.linalg.svd(np.dot(np.linalg.inv(S), C))
    e = U[:, 0]
#    E, V =  np.linalg.eig(np.dot(np.linalg.inv(S), C))
#    n = np.argmax(np.abs(E))
#    e = V[:,n]
        
    # get ellipse axis lengths, center and orientation:
    b, c, d, f, g, a = e[1] / 2, e[2], e[3] / 2, e[4] / 2, e[5], e[0]
    
    # get ellipse center:
    num = b * b - a * c
    if num == 0:
        xc = 0
        yc = 0
    else:
        xc = ((c * d - b * f) / num) 
        yc = ((a * f - b * d) / num) 
    
    # get ellipse orientation:
    theta = np.arctan2(np.array(2 * b), np.array((a - c))) / 2
#    if b == 0:
#        if a > c:
#            theta = 0
#        else:
#            theta = np.pi/2
#    else:
#        if a > c:
#            theta = np.arctan2(2*b,(a-c))/2
#        else:
#            theta =  np.arctan2(2*b,(a-c))/2 + np.pi/2
        
    # axis lengths:
    up = 2 * (a * f * f + c * d * d + g * b * b - 2 * b * d * f - a * c * g)
    down1 = (b * b - a * c) * ((c - a) * np.sqrt(1 + 4 * b * b / ((a - c) * (a - c))) - (c + a))
    down2 = (b * b - a * c) * ((a - c) * np.sqrt(1 + 4 * b * b / ((a - c) * (a - c))) - (c + a))
    a, b  = np.sqrt((up / down1)), np.sqrt((up / down2))


    # assert that a is the major axis (otherwise swap and correct angle)
    if(b > a):
        b, a = a, b
        # ensure the angle is betwen 0 and 2*pi
        theta = fmod(theta, 2.0 * np.pi)
        
    if center_on_mean_xy == True:
        xc,yc = xy.mean(axis=0)

    return np.hstack((a, b, xc, yc, theta))
Esempio n. 13
0
def test_model():

    import pandas as pd
    import luxpy as lx

    # Read selected set of Munsell samples and LMS10(lambda):
    M = pd.read_csv('Munsell_LMS_nonlin_Nov18_2015_version.dat',
                    header=None,
                    sep='\t').values
    YLMS10_ = pd.read_csv('YLMS10_LMS_nonlin_Nov18_2015_version.dat',
                          header=None,
                          sep='\t').values
    Y10_ = YLMS10_[[0, 1], :].copy()
    LMS10_ = YLMS10_[[0, 2, 3, 4], :].copy()

    # Calculate lms:
    Y10 = cie_interp(_CMF['1964_10']['bar'].copy(),
                     getwlr([400, 700, 5]),
                     kind='cmf')[[0, 2], :]
    XYZ10_lx = _CMF['2006_10']['bar'].copy()
    XYZ10_lx = cie_interp(XYZ10_lx, getwlr([400, 700, 5]), kind='cmf')
    LMS10_lx = np.vstack(
        (XYZ10_lx[:1, :],
         np.dot(
             math.normalize_3x3_matrix(_CMF['2006_10']['M'],
                                       np.array([[1, 1, 1]])),
             XYZ10_lx[1:, :])))
    LMS10 = cie_interp(LMS10_lx, getwlr([400, 700, 5]), kind='cmf')

    #LMS10 = np.vstack((XYZ10[:1,:],np.dot(lx.math.normalize_3x3_matrix(_CMF['2006_10']['M'],np.array([[1,1,1]])),XYZ10_lx[1:,:])))

    #LMS10[1:,:] = LMS10[1:,:]/LMS10[1:,:].sum(axis=1,keepdims=True)*Y10[1:,:].sum()

    # test python model vs excel calculator:
    def spdBB(CCT=5500, wl=[400, 700, 5], Lw=25000, cieobs='1964_10'):
        wl = getwlr(wl)
        dl = wl[1] - wl[0]
        spd = 2 * np.pi * 6.626068E-34 * (299792458**2) / (
            (wl * 0.000000001)**
            5) / (np.exp(6.626068E-34 * 299792458 /
                         (wl * 0.000000001) / 1.3806503E-23 / CCT) - 1)
        spd = Lw * spd / (dl * 683 * (spd * cie_interp(
            _CMF[cieobs]['bar'].copy(), wl, kind='cmf')[2, :]).sum())
        return np.vstack((wl, spd))

    # Create long term and applied spds:
    spd5500 = spdBB(5500, Lw=25000, wl=[400, 700, 5], cieobs='1964_10')
    spd6500 = spdBB(6500, Lw=400, wl=[400, 700, 5], cieobs='1964_10')

    # Calculate lms0 as a check:
    clms = np.array(
        [0.98446776, 0.98401909, 0.98571412]
    )  # correction factor for slight differences in _CMF and the cmfs from the excel calculator
    lms0 = 5 * 683 * (spd5500[1:] * LMS10[1:, :] * 0.2).sum(axis=1).T

    # Full excel parameters for testing:
    parameters = {
        'cLMS':
        np.array([1, 1, 1]),
        'lms0':
        np.array([4985.02802565, 5032.49518502, 4761.27272226]) * 1,
        'Cc':
        0.251617118325755,
        'Cf':
        -0.4,
        'clambda': [0.5, 0.5, 0.0],
        'calpha': [1.0, -1.0, 0.0],
        'cbeta': [0.5, 0.5, -1.0],
        'cga1': [26.1047711317923, 33.9721745703298],
        'cgb1': [6.76038379211498, 10.9220216677629],
        'cga2': [0.587271269247578],
        'cgb2': [-0.952412544980473],
        'cl_int': [14.0035243121804, 1.0],
        'cab_int': [4.99218965716342, 65.7869547646456],
        'cab_out': [-0.1, -1.0],
        'Ccwb':
        None,
        'Mxyz2lms': [[0.21701045, 0.83573367, -0.0435106],
                     [-0.42997951, 1.2038895, 0.08621089],
                     [0., 0., 0.46579234]]
    }

    # Note cLMS is a relative scaling factor between CIE2006 10° and 1964 10°:
    #    clms = np.array([1.00164919, 1.00119269, 1.0029173 ]) = (Y10[1:,:].sum(axis=1)/LMS10[1:,:].sum(axis=1))*(406.98099078/400)

    #parameters =_CAM_SWW16_PARAMETERS['JOSA']
    # Calculate Munsell spectra multiplied with spd6500:
    spd6500xM = np.vstack((spd6500[:1, :], spd6500[1:, :] * M[1:, :]))

    # Test spectral input:
    print('SPD INPUT -----')
    jab = cam_sww16(spd6500xM,
                    dataw=spd6500,
                    Yb=20.0,
                    Lw=400.0,
                    Ccwb=1,
                    relative=True,
                    inputtype='spd',
                    direction='forward',
                    parameters=parameters,
                    cieobs='2006_10',
                    match_to_conversionmatrix_to_cieobs=True)

    #    # Test xyz input:
    print('\nXYZ INPUT -----')
    xyz = lx.spd_to_xyz(spd6500xM, cieobs='2006_10', relative=False)
    xyzw = lx.spd_to_xyz(spd6500, cieobs='2006_10', relative=False)
    xyz2, xyzw2 = lx.spd_to_xyz(spd6500,
                                cieobs='2006_10',
                                relative=False,
                                rfl=M,
                                out=2)

    print(xyzw)
    jab = cam_sww16(xyz,
                    dataw=xyzw,
                    Yb=20.0,
                    Lw=400,
                    Ccwb=1,
                    relative=True,
                    inputtype='xyz',
                    direction='forward',
                    parameters=parameters,
                    cieobs='2006_10',
                    match_to_conversionmatrix_to_cieobs=True)
Esempio n. 14
0
def symmM_to_posdefM(A = None, atol = 1.0e-9, rtol = 1.0e-9, method = 'make', forcesymm = True):
    """
    Convert a symmetric matrix to a positive definite one. 
    
    Args:
        :A: 
            | ndarray
        :atol:
            | float, optional
            | The absolute tolerance parameter (see Notes of numpy.allclose())
        :rtol:
            | float, optional
            | The relative tolerance parameter (see Notes of numpy.allclose())
        :method: 
            | 'make' or 'nearest', optional (see notes for more info)
        :forcesymm: 
            | True or False, optional
            | If A is not symmetric, force symmetry using: 
            |    A = numpy.triu(A) + numpy.triu(A).T - numpy.diag(numpy.diag(A))
    
    Returns:
        :returns:
            | ndarray with positive-definite matrix.
        
    Notes on supported methods:
        1. `'make': A Python/Numpy port of Muhammad Asim Mubeen's matlab function 
        Spd_Mat.m 
        <https://nl.mathworks.com/matlabcentral/fileexchange/45873-positive-definite-matrix>`_
        2. `'nearest': A Python/Numpy port of John D'Errico's `nearestSPD` 
        MATLAB code. 
        <https://stackoverflow.com/questions/43238173/python-convert-matrix-to-positive-semi-definite>`_
    """
    if A is not None:
        A = np2d(A)
        
        
        # Make sure matrix A is symmetric up to a certain tolerance:
        sn = check_symmetric(A, atol = atol, rtol = rtol) 
        if ((A.shape[0] != A.shape[1]) | (sn != True)):
            if (forcesymm == True)  &  (A.shape[0] == A.shape[1]):
                A = np.triu(A) + np.triu(A).T - np.diag(np.diag(A))
            else:
                raise Exception('symmM_to_posdefM(): matrix A not symmetric.')
        
        
        if check_posdef(A, atol = atol, rtol = rtol) == True:
            return A
        else:

            if method == 'make':

                # A Python/Numpy port of Muhammad Asim Mubeen's matlab function Spd_Mat.m
                #
                # See: https://nl.mathworks.com/matlabcentral/fileexchange/45873-positive-definite-matrix
                Val, Vec = np.linalg.eig(A) 
                Val = np.real(Val)
                Vec = np.real(Vec)
                Val[np.where(Val==0)] = _EPS #making zero eigenvalues non-zero
                p = np.where(Val<0)
                Val[p] = -Val[p] #making negative eigenvalues positive
                return   np.dot(Vec,np.dot(np.diag(Val) , Vec.T))
 
            
            elif method == 'nearest':
                
                 # A Python/Numpy port of John D'Errico's `nearestSPD` MATLAB code [1], which
                 # credits [2].
                 #
                 # [1] https://www.mathworks.com/matlabcentral/fileexchange/42885-nearestspd
                 #
                 # [2] N.J. Higham, "Computing a nearest symmetric positive semidefinite
                 # matrix" (1988): https://doi.org/10.1016/0024-3795(88)90223-6
                 #
                 # See: https://stackoverflow.com/questions/43238173/python-convert-matrix-to-positive-semi-definite
                
                B = (A + A.T) / 2.0
                _, s, V = np.linalg.svd(B)

                H = np.dot(V.T, np.dot(np.diag(s), V))

                A2 = (B + H) / 2.0

                A3 = (A2 + A2.T) / 2.0

                if check_posdef(A3, atol = atol, rtol = rtol) == True:
                    return A3

                spacing = np.spacing(np.linalg.norm(A))
                I = np.eye(A.shape[0])
                k = 1
                while not check_posdef(A3, atol = atol, rtol = rtol):
                    mineig = np.min(np.real(np.linalg.eigvals(A3)))
                    A3 += I * (-mineig * k**2.0+ spacing)
                    k += 1

                return A3
Esempio n. 15
0
def cam_sww16(data, dataw = None, Yb = 20.0, Lw = 400.0, Ccwb = None, relative = True, \
              parameters = None, inputtype = 'xyz', direction = 'forward', \
              cieobs = '2006_10'):
    """
    A simple principled color appearance model based on a mapping 
    of the Munsell color system.
    
    | This function implements the JOSA A (parameters = 'JOSA') published model. 
    
    Args:
        :data: 
            | ndarray with input tristimulus values 
            | or spectral data 
            | or input color appearance correlates
            | Can be of shape: (N [, xM], x 3), whereby: 
            | N refers to samples and M refers to light sources.
            | Note that for spectral input shape is (N x (M+1) x wl) 
        :dataw: 
            | None or ndarray, optional
            | Input tristimulus values or spectral data of white point.
            | None defaults to the use of CIE illuminant C.
        :Yb: 
            | 20.0, optional
            | Luminance factor of background (perfect white diffuser, Yw = 100)
        :Lw:
            | 400.0, optional
            | Luminance (cd/m²) of white point.
        :Ccwb:
            | None,  optional
            | Degree of cognitive adaptation (white point balancing)
            | If None: use [..,..] from parameters dict.
        :relative:
            | True or False, optional
            | True: xyz tristimulus values are relative (Yw = 100)
        :parameters:
            | None or str or dict, optional
            | Dict with model parameters.
            |    - None: defaults to luxpy.cam._CAM_SWW_2016_PARAMETERS['JOSA']
            |    - str: 'best-fit-JOSA' or 'best-fit-all-Munsell'
            |    - dict: user defined model parameters 
            |            (dict should have same structure)
        :inputtype:
            | 'xyz' or 'spd', optional
            | Specifies the type of input: 
            |     tristimulus values or spectral data for the forward mode.
        :direction:
            | 'forward' or 'inverse', optional
            |   -'forward': xyz -> cam_sww_2016
            |   -'inverse': cam_sww_2016 -> xyz 
        :cieobs:
            | '2006_10', optional
            | CMF set to use to perform calculations where spectral data 
              is involved (inputtype == 'spd'; dataw = None)
            | Other options: see luxpy._CMF['types']
    
    Returns:
        :returns: 
            | ndarray with color appearance correlates (:direction: == 'forward')
            |  or 
            | XYZ tristimulus values (:direction: == 'inverse')
    
    Notes:
        | This function implements the JOSA A (parameters = 'JOSA') 
          published model. 
        | With:
        |    1. A correction for the parameter 
        |         in Eq.4 of Fig. 11: 0.952 --> -0.952 
        |         
        |     2. The delta_ac and delta_bc white-balance shifts in Eq. 5e & 5f 
        |         should be: -0.028 & 0.821 
        |  
        |     (cfr. Ccwb = 0.66 in: 
        |         ab_test_out = ab_test_int - Ccwb*ab_gray_adaptation_field_int))
             
    References:
        1. `Smet, K. A. G., Webster, M. A., & Whitehead, L. A. (2016). 
        A simple principled approach for modeling and understanding uniform color metrics. 
        Journal of the Optical Society of America A, 33(3), A319–A331. 
        <https://doi.org/10.1364/JOSAA.33.00A319>`_

    """

    # get model parameters
    args = locals().copy()
    if parameters is None:
        parameters = _CAM_SWW16_PARAMETERS['JOSA']
    if isinstance(parameters, str):
        parameters = _CAM_SWW16_PARAMETERS[parameters]
    parameters = put_args_in_db(
        parameters,
        args)  #overwrite parameters with other (not-None) args input

    #unpack model parameters:
    Cc, Ccwb, Cf, Mxyz2lms, cLMS, cab_int, cab_out, calpha, cbeta, cga1, cga2, cgb1, cgb2, cl_int, clambda, lms0 = [
        parameters[x] for x in sorted(parameters.keys())
    ]

    # setup default adaptation field:
    if (dataw is None):
        dataw = _CIE_ILLUMINANTS['C'].copy()  # get illuminant C
        xyzw = spd_to_xyz(dataw, cieobs=cieobs,
                          relative=False)  # get abs. tristimulus values
        if relative == False:  #input is expected to be absolute
            dataw[1:] = Lw * dataw[
                1:] / xyzw[:, 1:2]  #dataw = Lw*dataw # make absolute
        else:
            dataw = dataw  # make relative (Y=100)
        if inputtype == 'xyz':
            dataw = spd_to_xyz(dataw, cieobs=cieobs, relative=relative)

    # precomputations:
    Mxyz2lms = np.dot(
        np.diag(cLMS),
        math.normalize_3x3_matrix(Mxyz2lms, np.array([[1, 1, 1]]))
    )  # normalize matrix for xyz-> lms conversion to ill. E weighted with cLMS
    invMxyz2lms = np.linalg.inv(Mxyz2lms)
    MAab = np.array([clambda, calpha, cbeta])
    invMAab = np.linalg.inv(MAab)

    #initialize data and camout:
    data = np2d(data).copy(
    )  # stimulus data (can be upto NxMx3 for xyz, or [N x (M+1) x wl] for spd))
    dataw = np2d(dataw).copy(
    )  # white point (can be upto Nx3 for xyz, or [(N+1) x wl] for spd)

    # make axis 1 of dataw have 'same' dimensions as data:
    if (data.ndim == 2):
        data = np.expand_dims(data, axis=1)  # add light source axis 1

    if inputtype == 'xyz':
        if dataw.shape[
                0] == 1:  #make dataw have same lights source dimension size as data
            dataw = np.repeat(dataw, data.shape[1], axis=0)
    else:
        if dataw.shape[0] == 2:
            dataw = np.vstack(
                (dataw[0], np.repeat(dataw[1:], data.shape[1], axis=0)))

    # Flip light source dim to axis 0:
    data = np.transpose(data, axes=(1, 0, 2))

    # Initialize output array:
    dshape = list(data.shape)
    dshape[-1] = 3  # requested number of correlates: l_int, a_int, b_int
    if (inputtype != 'xyz') & (direction == 'forward'):
        dshape[-2] = dshape[
            -2] - 1  # wavelength row doesn't count & only with forward can the input data be spectral
    camout = np.zeros(dshape)
    camout.fill(np.nan)

    # apply forward/inverse model for each row in data:
    for i in range(data.shape[0]):

        # stage 1: calculate photon rates of stimulus and adapting field, lmst & lmsf:
        if (inputtype != 'xyz'):
            if relative == True:
                xyzw_abs = spd_to_xyz(np.vstack((dataw[0], dataw[i + 1])),
                                      cieobs=cieobs,
                                      relative=False)
                dataw[i +
                      1] = Lw * dataw[i + 1] / xyzw_abs[0, 1]  # make absolute
            xyzw = spd_to_xyz(np.vstack((dataw[0], dataw[i + 1])),
                              cieobs=cieobs,
                              relative=False)
            lmsw = 683.0 * np.dot(Mxyz2lms, xyzw.T).T / _CMF[cieobs]['K']
            lmsf = (Yb / 100.0
                    ) * lmsw  # calculate adaptation field and convert to l,m,s
            if (direction == 'forward'):
                if relative == True:
                    data[i, 1:, :] = Lw * data[i, 1:, :] / xyzw_abs[
                        0, 1]  # make absolute
                xyzt = spd_to_xyz(data[i], cieobs=cieobs,
                                  relative=False) / _CMF[cieobs]['K']
                lmst = 683.0 * np.dot(Mxyz2lms, xyzt.T).T  # convert to l,m,s
            else:
                lmst = lmsf  # put lmsf in lmst for inverse-mode

        elif (inputtype == 'xyz'):
            if relative == True:
                dataw[i] = Lw * dataw[i] / 100.0  # make absolute
            lmsw = 683.0 * np.dot(
                Mxyz2lms, dataw[i].T).T / _CMF[cieobs]['K']  # convert to lms
            lmsf = (Yb / 100.0) * lmsw
            if (direction == 'forward'):
                if relative == True:
                    data[i] = Lw * data[i] / 100.0  # make absolute
                lmst = 683.0 * np.dot(
                    Mxyz2lms,
                    data[i].T).T / _CMF[cieobs]['K']  # convert to lms
            else:
                lmst = lmsf  # put lmsf in lmst for inverse-mode

        # stage 2: calculate cone outputs of stimulus lmstp
        lmstp = math.erf(Cc * (np.log(lmst / lms0) + Cf * np.log(lmsf / lms0)))
        lmsfp = math.erf(Cc * (np.log(lmsf / lms0) + Cf * np.log(lmsf / lms0)))
        lmstp = np.vstack(
            (lmsfp, lmstp)
        )  # add adaptation field lms temporarily to lmsp for quick calculation

        # stage 3: calculate optic nerve signals, lam*, alphp, betp:
        lstar, alph, bet = asplit(np.dot(MAab, lmstp.T).T)

        alphp = cga1[0] * alph
        alphp[alph < 0] = cga1[1] * alph[alph < 0]
        betp = cgb1[0] * bet
        betp[bet < 0] = cgb1[1] * bet[bet < 0]

        # stage 4: calculate recoded nerve signals, alphapp, betapp:
        alphpp = cga2[0] * (alphp + betp)
        betpp = cgb2[0] * (alphp - betp)

        # stage 5: calculate conscious color perception:
        lstar_int = cl_int[0] * (lstar + cl_int[1])
        alph_int = cab_int[0] * (np.cos(cab_int[1] * np.pi / 180.0) * alphpp -
                                 np.sin(cab_int[1] * np.pi / 180.0) * betpp)
        bet_int = cab_int[0] * (np.sin(cab_int[1] * np.pi / 180.0) * alphpp +
                                np.cos(cab_int[1] * np.pi / 180.0) * betpp)
        lstar_out = lstar_int

        if direction == 'forward':
            if Ccwb is None:
                alph_out = alph_int - cab_out[0]
                bet_out = bet_int - cab_out[1]
            else:
                Ccwb = Ccwb * np.ones((2))
                Ccwb[Ccwb < 0.0] = 0.0
                Ccwb[Ccwb > 1.0] = 1.0
                alph_out = alph_int - Ccwb[0] * alph_int[
                    0]  # white balance shift using adaptation gray background (Yb=20%), with Ccw: degree of adaptation
                bet_out = bet_int - Ccwb[1] * bet_int[0]

            camout[i] = np.vstack(
                (lstar_out[1:], alph_out[1:], bet_out[1:])
            ).T  # stack together and remove adaptation field from vertical stack
        elif direction == 'inverse':
            labf_int = np.hstack((lstar_int[0], alph_int[0], bet_int[0]))

            # get lstar_out, alph_out & bet_out for data:
            lstar_out, alph_out, bet_out = asplit(data[i])

            # stage 5 inverse:
            # undo cortical white-balance:
            if Ccwb is None:
                alph_int = alph_out + cab_out[0]
                bet_int = bet_out + cab_out[1]
            else:
                Ccwb = Ccwb * np.ones((2))
                Ccwb[Ccwb < 0.0] = 0.0
                Ccwb[Ccwb > 1.0] = 1.0
                alph_int = alph_out + Ccwb[0] * alph_int[
                    0]  #  inverse white balance shift using adaptation gray background (Yb=20%), with Ccw: degree of adaptation
                bet_int = bet_out + Ccwb[1] * bet_int[0]

            lstar_int = lstar_out
            alphpp = (1.0 / cab_int[0]) * (
                np.cos(-cab_int[1] * np.pi / 180.0) * alph_int -
                np.sin(-cab_int[1] * np.pi / 180.0) * bet_int)
            betpp = (1.0 / cab_int[0]) * (
                np.sin(-cab_int[1] * np.pi / 180.0) * alph_int +
                np.cos(-cab_int[1] * np.pi / 180.0) * bet_int)
            lstar_int = lstar_out
            lstar = (lstar_int / cl_int[0]) - cl_int[1]

            # stage 4 inverse:
            alphp = 0.5 * (alphpp / cga2[0] + betpp / cgb2[0]
                           )  # <-- alphpp = (Cga2.*(alphp+betp));
            betp = 0.5 * (alphpp / cga2[0] - betpp / cgb2[0]
                          )  # <-- betpp = (Cgb2.*(alphp-betp));

            # stage 3 invers:
            alph = alphp / cga1[0]
            bet = betp / cgb1[0]
            sa = np.sign(cga1[1])
            sb = np.sign(cgb1[1])
            alph[(sa * alphp) < 0.0] = alphp[(sa * alphp) < 0] / cga1[1]
            bet[(sb * betp) < 0.0] = betp[(sb * betp) < 0] / cgb1[1]
            lab = ajoin((lstar, alph, bet))

            # stage 2 inverse:
            lmstp = np.dot(invMAab, lab.T).T
            lmstp[lmstp < -1.0] = -1.0
            lmstp[lmstp > 1.0] = 1.0

            lmstp = math.erfinv(lmstp) / Cc - Cf * np.log(lmsf / lms0)
            lmst = np.exp(lmstp) * lms0

            # stage 1 inverse:
            xyzt = np.dot(invMxyz2lms, lmst.T).T

            if relative == True:
                xyzt = (100.0 / Lw) * xyzt

            camout[i] = xyzt

#    if flipaxis0and1 == True: # loop over shortest dim.
#        camout = np.transpose(camout, axes = (1,0,2))

# Flip light source dim back to axis 1:
    camout = np.transpose(camout, axes=(1, 0, 2))

    if camout.shape[0] == 1:
        camout = np.squeeze(camout, axis=0)

    return camout
Esempio n. 16
0
def run(data,
        xyzw=_DEFAULT_WHITE_POINT,
        Yw=None,
        outin='J,aM,bM',
        conditions=None,
        forward=True,
        yellowbluepurplecorrect=False,
        mcat='cat02'):
    """ 
    Run CIECAM02 color appearance model in forward or backward modes.
    
    Args:
        :data:
            | ndarray with relative sample xyz values (forward mode) or J'a'b' coordinates (inverse mode)
        :xyzw:
            | ndarray with relative white point tristimulus values 
        :Yw: 
            | None, optional
            | Luminance factor of white point.
            | If None: xyz (in data) and xyzw are entered as relative tristimulus values 
            |          (normalized to Yw = 100). 
            | If not None: input tristimulus are absolute and Yw is used to
            |              rescale the absolute values to relative ones 
            |              (relative to a reference perfect white diffuser 
            |               with Ywr = 100). 
            | Yw can be < 100 for e.g. paper as white point. If Yw is None, it 
            | is assumed that the relative Y-tristimulus value in xyzw 
            | represents the luminance factor Yw.
        :conditions:
            | None, optional
            | Dictionary with viewing condition parameters for:
            |       La, Yb, D and surround.
            |  surround can contain:
            |      - str (options: 'avg','dim','dark') or 
            |      - dict with keys c, Nc, F.
            | None results in:
            |   {'La':100, 'Yb':20, 'D':1, 'surround':'avg'}
        :forward:
            | True, optional
            | If True: run in CAM in forward mode, else: inverse mode.
        :outin:
            | 'J,aM,bM', optional
            | String with requested output (e.g. "J,aM,bM,M,h") [Forward mode]
            | - attributes: 'J': lightness,'Q': brightness,
            |               'M': colorfulness,'C': chroma, 's': saturation,
            |               'h': hue angle, 'H': hue quadrature/composition,
            | String with inputs in data [inverse mode]. 
            | Input must have data.shape[-1]==3 and last dim of data must have 
            | the following structure for inverse mode: 
            |  * data[...,0] = J or Q,
            |  * data[...,1:] = (aM,bM) or (aC,bC) or (aS,bS) or (M,h) or (C, h), ...
        :yellowbluepurplecorrect:
            | False, optional
            | If False: don't correct for yellow-blue and purple problems in ciecam02. 
            | If 'brill-suss': 
            |       for yellow-blue problem, see: 
            |          - Brill [Color Res Appl, 2006; 31, 142-145] and 
            |          - Brill and Süsstrunk [Color Res Appl, 2008; 33, 424-426] 
            | If 'jiang-luo': 
            |       for yellow-blue problem + purple line problem, see:
            |          - Jiang, Jun et al. [Color Res Appl 2015: 40(5), 491-503] 
        :mcat:
            | 'cat02', optional
            | Specifies CAT sensor space.
            | - options:
            |    - None defaults to 'cat02' 
            |         (others e.g. 'cat02-bs', 'cat02-jiang',
            |         all trying to correct gamut problems of original cat02 matrix)
            |    - str: see see luxpy.cat._MCATS.keys() for options 
            |         (details on type, ?luxpy.cat)
            |    - ndarray: matrix with sensor primaries
    Returns:
        :camout: 
            | ndarray with color appearance correlates (forward mode) 
            |  or 
            | XYZ tristimulus values (inverse mode)
        
    References:
        1. `N. Moroney, M. D. Fairchild, R. W. G. Hunt, C. Li, M. R. Luo, and T. Newman, (2002), 
        "The CIECAM02 color appearance model,” 
        IS&T/SID Tenth Color Imaging Conference. p. 23, 2002.
        <http://rit-mcsl.org/fairchild/PDFs/PRO19.pdf>`_
    """
    outin = outin.split(',') if isinstance(outin, str) else outin

    #--------------------------------------------
    # Get condition parameters:
    if conditions is None:
        conditions = _DEFAULT_CONDITIONS
    D, Dtype, La, Yb, surround = (conditions[x]
                                  for x in sorted(conditions.keys()))

    surround_parameters = _SURROUND_PARAMETERS
    if isinstance(surround, str):
        surround = surround_parameters[conditions['surround']]
    F, FLL, Nc, c = [surround[x] for x in sorted(surround.keys())]

    #--------------------------------------------
    # Define sensor space and cat matrices:
    # Hunt-Pointer-Estevez sensors (cone fundamentals)
    mhpe = cat._MCATS['hpe']

    # chromatic adaptation sensors:
    if (mcat is None) | (mcat == 'cat02'):
        mcat = cat._MCATS['cat02']
        if yellowbluepurplecorrect == 'brill-suss':
            mcat = cat._MCATS[
                'cat02-bs']  # for yellow-blue problem, Brill [Color Res Appl 2006;31:142-145] and Brill and Süsstrunk [Color Res Appl 2008;33:424-426]
        elif yellowbluepurplecorrect == 'jiang-luo':
            mcat = cat._MCATS[
                'cat02-jiang-luo']  # for yellow-blue problem + purple line problem
    elif isinstance(mcat, str):
        mcat = cat._MCATS[mcat]

    #--------------------------------------------
    # pre-calculate some matrices:
    invmcat = np.linalg.inv(mcat)
    mhpe_x_invmcat = np.dot(mhpe, invmcat)
    if not forward: mcat_x_invmhpe = np.dot(mcat, np.linalg.inv(mhpe))

    #--------------------------------------------
    # Set Yw:
    if Yw is not None:
        Yw = (Yw * np.ones_like(xyzw2[..., 1:2]).T)
    else:
        Yw = xyzw[..., 1:2].T

    #--------------------------------------------
    # calculate condition dependent parameters:
    k = 1.0 / (5.0 * La + 1.0)
    FL = 0.2 * (k**4.0) * (5.0 * La) + 0.1 * ((1.0 - k**4.0)**2.0) * (
        (5.0 * La)**(1.0 / 3.0))  # luminance adaptation factor
    n = Yb / Yw
    Nbb = 0.725 * (1 / n)**0.2
    Ncb = Nbb
    z = 1.48 + FLL * n**0.5
    yw = xyzw[..., 1:2].T  # original Y in xyzw (pre-transposed)

    #--------------------------------------------
    # Calculate degree of chromatic adaptation:
    if D is None:
        D = F * (1.0 - (1.0 / 3.6) * np.exp((-La - 42.0) / 92.0))

    #===================================================================
    # WHITE POINT transformations (common to forward and inverse modes):

    #--------------------------------------------
    # Normalize white point (keep transpose for next step):
    xyzw = Yw * xyzw.T / yw

    #--------------------------------------------
    # transform from xyzw to cat sensor space:
    rgbw = math.dot23(mcat, xyzw)

    #--------------------------------------------
    # apply von Kries cat:
    rgbwc = (
        (D * Yw / rgbw) + (1 - D)
    ) * rgbw  # factor 100 from ciecam02 is replaced with Yw[i] in ciecam16, but see 'note' in Fairchild's "Color Appearance Models" (p291 ni 3ed.)

    #--------------------------------------------
    # convert from cat02 sensor space to cone sensors (hpe):
    rgbwp = math.dot23(mhpe_x_invmcat, rgbwc).T

    #--------------------------------------------
    # apply Naka_rushton repsonse compression to white:
    NK = lambda x, forward: naka_rushton(x,
                                         scaling=400,
                                         n=0.42,
                                         sig=27.13**(1 / 0.42),
                                         noise=0.1,
                                         forward=forward)

    pw = np.where(rgbwp < 0)

    # if requested apply yellow-blue correction:
    if (yellowbluepurplecorrect == 'brill-suss'
        ):  # Brill & Susstrunck approach, for purple line problem
        rgbwp[pw] = 0.0
    rgbwpa = NK(FL * rgbwp / 100.0, True)
    rgbwpa[pw] = 0.1 - (NK(FL * np.abs(rgbwp[pw]) / 100.0, True) - 0.1)

    #--------------------------------------------
    # Calculate achromatic signal of white:
    Aw = (2.0 * rgbwpa[..., 0] + rgbwpa[..., 1] +
          (1.0 / 20.0) * rgbwpa[..., 2] - 0.305) * Nbb

    # massage shape of data for broadcasting:
    original_ndim = data.ndim
    if data.ndim == 2: data = data[:, None]

    #===================================================================
    # STIMULUS transformations
    if forward:

        #--------------------------------------------
        # Normalize xyz (keep transpose for matrix multiplication in next step):
        xyz = (Yw / yw)[..., None] * data.T

        #--------------------------------------------
        # transform from xyz to cat sensor space:
        rgb = math.dot23(mcat, xyz)

        #--------------------------------------------
        # apply von Kries cat:
        rgbc = (
            (D * Yw / rgbw)[..., None] + (1 - D)
        ) * rgb  # factor 100 from ciecam02 is replaced with Yw[i] in ciecam16, but see 'note' in Fairchild's "Color Appearance Models" (p291 ni 3ed.)

        #--------------------------------------------
        # convert from cat02 sensor space to cone sensors (hpe):
        rgbp = math.dot23(mhpe_x_invmcat, rgbc).T

        #--------------------------------------------
        # apply Naka_rushton repsonse compression:
        p = np.where(rgbp < 0)
        if (yellowbluepurplecorrect == 'brill-suss'
            ):  # Brill & Susstrunck approach, for purple line problem
            rgbp[p] = 0.0
        rgbpa = NK(FL * rgbp / 100.0, forward)
        rgbpa[p] = 0.1 - (NK(FL * np.abs(rgbp[p]) / 100.0, forward) - 0.1)

        #--------------------------------------------
        # Calculate achromatic signal:
        A = (2.0 * rgbpa[..., 0] + rgbpa[..., 1] +
             (1.0 / 20.0) * rgbpa[..., 2] - 0.305) * Nbb

        #--------------------------------------------
        # calculate initial opponent channels:
        a = rgbpa[..., 0] - 12.0 * rgbpa[..., 1] / 11.0 + rgbpa[..., 2] / 11.0
        b = (1.0 / 9.0) * (rgbpa[..., 0] + rgbpa[..., 1] - 2.0 * rgbpa[..., 2])

        #--------------------------------------------
        # calculate hue h and eccentricity factor, et:
        h = hue_angle(a, b, htype='deg')
        et = (1.0 / 4.0) * (np.cos(h * np.pi / 180 + 2.0) + 3.8)

        #--------------------------------------------
        # calculate Hue quadrature (if requested in 'out'):
        if 'H' in outin:
            H = hue_quadrature(h, unique_hue_data=_UNIQUE_HUE_DATA)
        else:
            H = None

        #--------------------------------------------
        # calculate lightness, J:
        J = 100.0 * (A / Aw)**(c * z)

        #--------------------------------------------
        # calculate brightness, Q:
        Q = (4.0 / c) * ((J / 100.0)**0.5) * (Aw + 4.0) * (FL**0.25)

        #--------------------------------------------
        # calculate chroma, C:
        t = ((50000.0 / 13.0) * Nc * Ncb * et *
             ((a**2.0 + b**2.0)**0.5)) / (rgbpa[..., 0] + rgbpa[..., 1] +
                                          (21.0 / 20.0 * rgbpa[..., 2]))
        C = (t**0.9) * ((J / 100.0)**0.5) * (1.64 - 0.29**n)**0.73

        #--------------------------------------------
        # calculate colorfulness, M:
        M = C * FL**0.25

        #--------------------------------------------
        # calculate saturation, s:
        s = 100.0 * (M / Q)**0.5
        S = s  # make extra variable, jsut in case 'S' is called

        #--------------------------------------------
        # calculate cartesian coordinates:
        if ('aS' in outin):
            aS = s * np.cos(h * np.pi / 180.0)
            bS = s * np.sin(h * np.pi / 180.0)

        if ('aC' in outin):
            aC = C * np.cos(h * np.pi / 180.0)
            bC = C * np.sin(h * np.pi / 180.0)

        if ('aM' in outin):
            aM = M * np.cos(h * np.pi / 180.0)
            bM = M * np.sin(h * np.pi / 180.0)

        #--------------------------------------------
        if outin != ['J', 'aM', 'bM']:
            camout = eval('ajoin((' + ','.join(outin) + '))')
        else:
            camout = ajoin((J, aM, bM))

        if (camout.shape[1] == 1) & (original_ndim < 3):
            camout = camout[:, 0, :]

        return camout

    elif forward == False:

        #--------------------------------------------
        # Get Lightness J from data:
        if ('J' in outin[0]):
            J = data[..., 0].copy()
        elif ('Q' in outin[0]):
            Q = data[..., 0].copy()
            J = 100.0 * (Q / ((Aw + 4.0) * (FL**0.25) * (4.0 / c)))**2.0
        else:
            raise Exception(
                'No lightness or brightness values in data. Inverse CAM-transform not possible!'
            )

        #--------------------------------------------
        if 'a' in outin[1]:
            # calculate hue h:
            h = hue_angle(data[..., 1], data[..., 2], htype='deg')

            #--------------------------------------------
            # calculate Colorfulness M or Chroma C or Saturation s from a,b:
            MCs = (data[..., 1]**2.0 + data[..., 2]**2.0)**0.5
        else:
            h = data[..., 2]
            MCs = data[..., 1]

        if ('S' in outin[1]):
            Q = (4.0 / c) * ((J / 100.0)**0.5) * (Aw + 4.0) * (FL**0.25)
            M = Q * (MCs / 100.0)**2.0
            C = M / (FL**0.25)

        if ('M' in outin[1]):  # convert M to C:
            C = MCs / (FL**0.25)

        if ('C' in outin[1]):
            C = MCs

        #--------------------------------------------
        # calculate t from J, C:
        t = (C / ((J / 100.0)**(1.0 / 2.0) * (1.64 - 0.29**n)**0.73))**(1.0 /
                                                                        0.9)

        #--------------------------------------------
        # calculate eccentricity factor, et:
        et = (np.cos(h * np.pi / 180.0 + 2.0) + 3.8) / 4.0

        #--------------------------------------------
        # calculate achromatic signal, A:
        A = Aw * (J / 100.0)**(1.0 / (c * z))

        #--------------------------------------------
        # calculate temporary cart. co. at, bt and p1,p2,p3,p4,p5:
        at = np.cos(h * np.pi / 180.0)
        bt = np.sin(h * np.pi / 180.0)
        p1 = (50000.0 / 13.0) * Nc * Ncb * et / t
        p2 = A / Nbb + 0.305
        p3 = 21.0 / 20.0
        p4 = p1 / bt
        p5 = p1 / at

        #--------------------------------------------
        #q = np.where(np.abs(bt) < np.abs(at))[0]
        q = (np.abs(bt) < np.abs(at))

        b = p2 * (2.0 + p3) * (460.0 / 1403.0) / (p4 + (2.0 + p3) *
                                                  (220.0 / 1403.0) *
                                                  (at / bt) -
                                                  (27.0 / 1403.0) + p3 *
                                                  (6300.0 / 1403.0))
        a = b * (at / bt)

        a[q] = p2[q] * (2.0 + p3) * (460.0 / 1403.0) / (p5[q] + (2.0 + p3) *
                                                        (220.0 / 1403.0) -
                                                        ((27.0 / 1403.0) - p3 *
                                                         (6300.0 / 1403.0)) *
                                                        (bt[q] / at[q]))
        b[q] = a[q] * (bt[q] / at[q])

        #--------------------------------------------
        # calculate post-adaptation values
        rpa = (460.0 * p2 + 451.0 * a + 288.0 * b) / 1403.0
        gpa = (460.0 * p2 - 891.0 * a - 261.0 * b) / 1403.0
        bpa = (460.0 * p2 - 220.0 * a - 6300.0 * b) / 1403.0

        #--------------------------------------------
        # join values:
        rgbpa = ajoin((rpa, gpa, bpa))

        #--------------------------------------------
        # decompress signals:
        rgbp = (100.0 / FL) * NK(rgbpa, forward)

        # apply yellow-blue correction:
        if (yellowbluepurplecorrect == 'brill-suss'
            ):  # Brill & Susstrunck approach, for purple line problem
            p = np.where(rgbp < 0.0)
            rgbp[p] = 0.0

        #--------------------------------------------
        # convert from to cone sensors (hpe) cat02 sensor space:
        rgbc = math.dot23(mcat_x_invmhpe, rgbp.T)

        #--------------------------------------------
        # apply inverse von Kries cat:
        rgb = rgbc / ((D * Yw / rgbw)[..., None] + (1.0 - D))

        #--------------------------------------------
        # transform from cat sensor space to xyz:
        xyz = math.dot23(invmcat, rgb)

        #--------------------------------------------
        # unnormalize xyz:
        xyz = ((yw / Yw)[..., None] * xyz).T

        return xyz
Esempio n. 17
0
def cam15u(data,
           fov=10.0,
           inputtype='xyz',
           direction='forward',
           outin='Q,aW,bW',
           parameters=None):
    """
    Convert between CIE 2006 10°  XYZ tristimulus values (or spectral data) 
    and CAM15u color appearance correlates.
    
    Args:
        :data: 
            | ndarray of CIE 2006 10°  XYZ tristimulus values or spectral data
            |  or color appearance attributes
        :fov: 
            | 10.0, optional
            | Field-of-view of stimulus (for size effect on brightness)
        :inputtpe:
            | 'xyz' or 'spd', optional
            | Specifies the type of input: 
            |     tristimulus values or spectral data for the forward mode.
        :direction:
            | 'forward' or 'inverse', optional
            |   -'forward': xyz -> cam15u
            |   -'inverse': cam15u -> xyz 
        :outin:
            | 'Q,aW,bW' or str, optional
            | 'Q,aW,bW' (brightness and opponent signals for amount-of-neutral)
            |  other options: 'Q,aM,bM' (colorfulness) and 'Q,aS,bS' (saturation)
            | Str specifying the type of 
            |     input (:direction: == 'inverse') and 
            |     output (:direction: == 'forward')
        :parameters:
            | None or dict, optional
            | Set of model parameters.
            |   - None: defaults to luxpy.cam._CAM15U_PARAMETERS 
            |    (see references below)
    
    Returns:
        :returns: 
            | ndarray with color appearance correlates (:direction: == 'forward')
            |  or 
            | XYZ tristimulus values (:direction: == 'inverse')

    References: 
        1. `M. Withouck, K. A. G. Smet, W. R. Ryckaert, and P. Hanselaer, 
        “Experimental driven modelling of the color appearance of 
        unrelated self-luminous stimuli: CAM15u,” 
        Opt. Express, vol. 23, no. 9, pp. 12045–12064, 2015.
        <https://www.osapublishing.org/oe/abstract.cfm?uri=oe-23-9-12045&origin=search>`_
        2. `M. Withouck, K. A. G. Smet, and P. Hanselaer, (2015), 
        “Brightness prediction of different sized unrelated self-luminous stimuli,” 
        Opt. Express, vol. 23, no. 10, pp. 13455–13466. 
        <https://www.osapublishing.org/oe/abstract.cfm?uri=oe-23-10-13455&origin=search>`_  
     """

    if parameters is None:
        parameters = _CAM15U_PARAMETERS

    outin = outin.split(',')

    #unpack model parameters:
    Mxyz2rgb, cA, cAlms, cHK, cM, cW, ca, calms, cb, cblms, cfov, cp, k, unique_hue_data = [
        parameters[x] for x in sorted(parameters.keys())
    ]

    # precomputations:
    invMxyz2rgb = np.linalg.inv(Mxyz2rgb)
    MAab = np.array([cAlms, calms, cblms])
    invMAab = np.linalg.inv(MAab)

    #initialize data and camout:
    data = np2d(data)
    if len(data.shape) == 2:
        data = np.expand_dims(data, axis=0)  # avoid looping if not necessary

    if (data.shape[0] > data.shape[1]):  # loop over shortest dim.
        flipaxis0and1 = True
        data = np.transpose(data, axes=(1, 0, 2))
    else:
        flipaxis0and1 = False

    dshape = list(data.shape)
    dshape[-1] = len(outin)  # requested number of correlates
    if (inputtype != 'xyz') & (direction == 'forward'):
        dshape[-2] = dshape[
            -2] - 1  # wavelength row doesn't count & only with forward can the input data be spectral

    camout = np.zeros(dshape)
    camout.fill(np.nan)

    for i in range(data.shape[0]):

        if (inputtype != 'xyz') & (direction == 'forward'):
            xyz = spd_to_xyz(data[i], cieobs='2006_10', relative=False)
            lms = np.dot(_CMF['2006_10']['M'], xyz.T).T  # convert to l,m,s
            rgb = (lms /
                   _CMF['2006_10']['K']) * k  # convert to rho, gamma, beta
        elif (inputtype == 'xyz') & (direction == 'forward'):
            rgb = np.dot(Mxyz2rgb, data[i].T).T

        if direction == 'forward':

            # apply cube-root compression:
            rgbc = rgb**(cp)

            # calculate achromatic and color difference signals, A, a, b:
            Aab = np.dot(MAab, rgbc.T).T
            A, a, b = asplit(Aab)
            A = cA * A
            a = ca * a
            b = cb * b

            # calculate colorfullness like signal M:
            M = cM * ((a**2.0 + b**2.0)**0.5)

            # calculate brightness Q:
            Q = A + cHK[0] * M**cHK[
                1]  # last term is contribution of Helmholtz-Kohlrausch effect on brightness

            # calculate saturation, s:
            s = M / Q

            # calculate amount of white, W:
            W = 100.0 / (1.0 + cW[0] * (s**cW[1]))

            #  adjust Q for size (fov) of stimulus (matter of debate whether to do this before or after calculation of s or W, there was no data on s, M or W for different sized stimuli: after)
            Q = Q * (fov / 10.0)**cfov

            # calculate hue, h and Hue quadrature, H:
            h = hue_angle(a, b, htype='deg')

            if 'H' in outin:
                H = hue_quadrature(h, unique_hue_data=unique_hue_data)
            else:
                H = None

            # calculate cart. co.:
            if 'aM' in outin:
                aM = M * np.cos(h * np.pi / 180.0)
                bM = M * np.sin(h * np.pi / 180.0)

            if 'aS' in outin:
                aS = s * np.cos(h * np.pi / 180.0)
                bS = s * np.sin(h * np.pi / 180.0)

            if 'aW' in outin:
                aW = W * np.cos(h * np.pi / 180.0)
                bW = W * np.sin(h * np.pi / 180.0)

            if (outin != ['Q', 'aW', 'bW']):
                camout[i] = eval('ajoin((' + ','.join(outin) + '))')
            else:
                camout[i] = ajoin((Q, aW, bW))

        elif direction == 'inverse':

            # get Q, M and a, b depending on input type:
            if 'aW' in outin:
                Q, a, b = asplit(data[i])
                Q = Q / (
                    (fov / 10.0)**cfov
                )  #adjust Q for size (fov) of stimulus back to that 10° ref
                W = (a**2.0 + b**2.0)**0.5
                s = (((100 / W) - 1.0) / cW[0])**(1.0 / cW[1])
                M = s * Q

            if 'aM' in outin:
                Q, a, b = asplit(data[i])
                Q = Q / (
                    (fov / 10.0)**cfov
                )  #adjust Q for size (fov) of stimulus back to that 10° ref
                M = (a**2.0 + b**2.0)**0.5

            if 'aS' in outin:
                Q, a, b = asplit(data[i])
                Q = Q / (
                    (fov / 10.0)**cfov
                )  #adjust Q for size (fov) of stimulus back to that 10° ref
                s = (a**2.0 + b**2.0)**0.5
                M = s * Q

            if 'h' in outin:
                Q, WsM, h = asplit(data[i])
                Q = Q / (
                    (fov / 10.0)**cfov
                )  #adjust Q for size (fov) of stimulus back to that 10° ref
                if 'W' in outin:
                    s = (((100.0 / WsM) - 1.0) / cW[0])**(1.0 / cW[1])
                    M = s * Q
                elif 's' in outin:
                    M = WsM * Q
                elif 'M' in outin:
                    M = WsM

            # calculate achromatic signal, A from Q and M:
            A = Q - cHK[0] * M**cHK[1]
            A = A / cA

            # calculate hue angle:
            h = hue_angle(a, b, htype='rad')

            # calculate a,b from M and h:
            a = (M / cM) * np.cos(h)
            b = (M / cM) * np.sin(h)
            a = a / ca
            b = b / cb

            # create Aab:
            Aab = ajoin((A, a, b))

            # calculate rgbc:
            rgbc = np.dot(invMAab, Aab.T).T

            # decompress rgbc to rgb:
            rgb = rgbc**(1 / cp)

            # convert rgb to xyz:
            xyz = np.dot(invMxyz2rgb, rgb.T).T

            camout[i] = xyz

    if flipaxis0and1 == True:  # loop over shortest dim.
        camout = np.transpose(camout, axes=(1, 0, 2))

    if camout.shape[0] == 1:
        camout = np.squeeze(camout, axis=0)

    return camout
Esempio n. 18
0
def _polyarea(x, y):
    return 0.5 * np.abs(
        np.dot(x, np.roll(y, 1, axis=0)) - np.dot(y, np.roll(x, 1, axis=0)))
Esempio n. 19
0
def _simple_cam(
        data,
        dataw=None,
        Lw=100.0,
        relative=True,
        inputtype='xyz',
        direction='forward',
        cie_illuminant='D65',
        parameters={
            'cA': 1,
            'ca': np.array([1, -1, 0]),
            'cb': (1 / 3) * np.array([0.5, 0.5, -1]),
            'n': 1 / 3,
            'Mxyz2lms': _CMF['1931_2']['M'].copy()
        },
        cieobs='2006_10',
        match_to_conversionmatrix_to_cieobs=True):
    """
    An example CAM illustration the usage of the functions in luxpy.cam.helpers 
    
    | Note that this example uses NO chromatic adaptation 
    | and SIMPLE compression, opponent and correlate processing.
    | THIS IS ONLY FOR ILLUSTRATION PURPOSES !!!

    Args:
        :data: 
            | ndarray with input:
            |  - tristimulus values 
            | or
            |  - spectral data 
            | or 
            |  - input color appearance correlates
            | Can be of shape: (N [, xM], x 3), whereby: 
            | N refers to samples and M refers to light sources.
            | Note that for spectral input shape is (N x (M+1) x wl) 
        :dataw: 
            | None or ndarray, optional
            | Input tristimulus values or spectral data of white point.
            | None defaults to the use of :cie_illuminant:
        :cie_illuminant:
            | 'D65', optional
            | String corresponding to one of the illuminants (keys) 
            | in luxpy._CIE_ILLUMINANT
            | If ndarray, then use this one.
            | This is ONLY USED WHEN dataw is NONE !!!
        :Lw:
            | 100.0, optional
            | Luminance (cd/m²) of white point.
        :relative:
            | True or False, optional
            | True: data and dataw input is relative (i.e. Yw = 100)
        :parameters:
            | {'cA': 1, 'ca':np.array([1,-1,0]), 'cb':(1/3)*np.array([0.5,0.5,-1]),
            |  'n': 1/3, 'Mxyz2lms': _CMF['1931_2']['M'].copy()}
            | Dict with model parameters 
            | (For illustration purposes of match_conversionmatrix_to_cieobs, 
            |  the conversion matrix luxpy._CMF['1931_2']['M'] does NOT match
            |  the default observer specification of the input data in :cieobs: !!!)
        :inputtype:
            | 'xyz' or 'spd', optional
            | Specifies the type of input: 
            |     tristimulus values or spectral data for the forward mode.
        :direction:
            | 'forward' or 'inverse', optional
            |   -'forward': xyz -> cam
            |   -'inverse': cam -> xyz 
        :cieobs:
            | '2006_10', optional
            | CMF set to use to perform calculations where spectral data 
            | is involved (inputtype == 'spd'; dataw = None)
            | Other options: see luxpy._CMF['types']
        :match_conversionmatrix_to_cieobs:
            | True, optional
            | When changing to a different CIE observer, change the xyz_to_lms
            | matrix to the one corresponding to that observer. 
            | Set to False to keep the one in the parameter dict!
    
    Returns:
        :returns: 
            | ndarray with:
            | - color appearance correlates (:direction: == 'forward')
            |  or 
            | - XYZ tristimulus values (:direction: == 'inverse')
    """
    #--------------------------------------------------------------------------
    # Get model parameters:
    #--------------------------------------------------------------------------
    args = locals().copy(
    )  # gets all local variables (i.e. the function arguments)

    parameters = _update_parameter_dict(
        args,
        parameters=parameters,
        cieobs=cieobs,
        match_conversionmatrix_to_cieobs=match_to_conversionmatrix_to_cieobs,
        Mxyz2lms_whitepoint=np.array([[1, 1, 1]]))

    #unpack model parameters:
    (Mxyz2lms, cA, ca, cb,
     n) = [parameters[x] for x in sorted(parameters.keys())]

    #--------------------------------------------------------------------------
    # Setup default white point / adaptation field:
    #--------------------------------------------------------------------------
    dataw = _setup_default_adaptation_field(dataw=dataw,
                                            Lw=Lw,
                                            cie_illuminant='C',
                                            inputtype=inputtype,
                                            relative=relative,
                                            cieobs=cieobs)

    #--------------------------------------------------------------------------
    # Redimension input data to ensure most appropriate sizes
    # for easy and efficient looping and initialize output array:
    #--------------------------------------------------------------------------
    n_out = 5  # this example outputs 5 'correlates': J, a, b, C, h
    (data, dataw, camout,
     originalshape) = _massage_input_and_init_output(data,
                                                     dataw,
                                                     inputtype=inputtype,
                                                     direction=direction,
                                                     n_out=n_out)

    #--------------------------------------------------------------------------
    # Do precomputations needed for both the forward and inverse model,
    # and which do not depend on sample or light source data:
    #--------------------------------------------------------------------------
    # Create matrix with scale factors for L, M, S
    # for quick matrix multiplications to obtain neural signals:
    MAab = np.array([[cA, cA, cA], ca, cb])

    if direction == 'inverse':
        invMxyz2lms = np.linalg.inv(
            Mxyz2lms)  # Calculate the inverse lms-to-xyz conversion matrix
        invMAab = np.linalg.inv(
            MAab)  # Pre-calculate its inverse to avoid repeat in loop.

    #--------------------------------------------------------------------------
    # Apply forward/inverse model by looping over each row (=light source dim.)
    # in data:
    #--------------------------------------------------------------------------
    N = data.shape[0]
    for i in range(N):
        #++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
        #  START FORWARD MODE and common part of inverse mode
        #++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

        #-----------------------------------------------------------------------------
        # Get tristimulus values for stimulus field and white point for row i:
        #-----------------------------------------------------------------------------
        # Note that xyzt will contain a None in case of inverse mode !!!
        xyzt, xyzw, xyzw_abs = _get_absolute_xyz_xyzw(data,
                                                      dataw,
                                                      i=i,
                                                      Lw=Lw,
                                                      direction=direction,
                                                      cieobs=cieobs,
                                                      inputtype=inputtype,
                                                      relative=relative)

        #---------------------------------------------------------------------
        # stage 1 (white point): calculate lms values of white:
        #----------------------------------------------------------------------
        lmsw = np.dot(Mxyz2lms, xyzw.T).T

        #------------------------------------------------------------------
        # stage 2 (white): apply simple chromatic adaptation:
        #------------------------------------------------------------------
        lmsw_a = lmsw / lmsw

        #----------------------------------------------------------------------
        # stage 3 (white point): apply simple compression to lms values
        #----------------------------------------------------------------------
        lmsw_ac = lmsw_a**n

        #----------------------------------------------------------------------
        # stage 4 (white point): calculate achromatic A, and opponent signals a,b):
        #----------------------------------------------------------------------
        Aabw = np.dot(MAab, lmsw_ac.T).T

        #++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
        # SPLIT CALCULATION STEPS IN FORWARD AND INVERSE MODES:
        #++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

        if direction == 'forward':
            #------------------------------------------------------------------
            # stage 1 (stimulus): calculate lms values
            #------------------------------------------------------------------
            lms = np.dot(Mxyz2lms, xyzt.T).T

            #------------------------------------------------------------------
            # stage 2 (stimulus): apply simple chromatic adaptation:
            #------------------------------------------------------------------
            lms_a = lms / lmsw

            #------------------------------------------------------------------
            # stage 3 (stimulus): apply simple compression to lms values
            #------------------------------------------------------------------
            lms_ac = lms_a**n

            #------------------------------------------------------------------
            # stage 3 (stimulus): calculate achromatic A, and opponent signals a,b:
            #------------------------------------------------------------------
            Aab = np.dot(MAab, lms_ac.T).T

            #------------------------------------------------------------------
            # stage 4 (stimulus): calculate J, C, h
            #------------------------------------------------------------------
            J = Aab[..., 0] / Aabw[..., 0]
            C = (Aab[..., 1]**2 + Aab[..., 2]**2)**0.5
            h = math.positive_arctan(Aab[..., 1], Aab[..., 2])

            # # stack together:
            camout[i] = np.vstack((J, Aab[..., 1], Aab[..., 2], C, h)).T

        #--------------------------------------
        # INVERSE MODE FROM PERCEPTUAL SIGNALS:
        #--------------------------------------
        elif direction == 'inverse':
            pass

    return _massage_output_data_to_original_shape(camout, originalshape)
Esempio n. 20
0
def plotellipse(v, cspace_in = 'Yxy', cspace_out = None, nsamples = 100, \
                show = True, axh = None, \
                line_color = 'darkgray', line_style = ':', line_width = 1, line_marker = '', line_markersize = 4,\
                plot_center = False, center_marker = 'o', center_color = 'darkgray', center_markersize = 4,\
                show_grid = False, llabel = '', label_fontname = 'Times New Roman', label_fontsize = 12,\
                out = None):
    """
    Plot ellipse(s) given in v-format [Rmax,Rmin,xc,yc,theta].
    
    Args:
        :v: 
            | (Nx5) ndarray
            | ellipse parameters [Rmax,Rmin,xc,yc,theta]
        :cspace_in:
            | 'Yxy', optional
            | Color space of v.
            | If None: no color space assumed. Axis labels assumed ('x','y').
        :cspace_out:
            | None, optional
            | Color space to plot ellipse(s) in.
            | If None: plot in cspace_in.
        :nsamples:
            | 100 or int, optional
            | Number of points (samples) in ellipse boundary
        :show:
            | True or boolean, optional
            | Plot ellipse(s) (True) or not (False)
        :axh: 
            | None, optional
            | Ax-handle to plot ellipse(s) in.
            | If None: create new figure with axes.
        :line_color:
            | 'darkgray', optional
            | Color to plot ellipse(s) in.
        :line_style:
            | ':', optional
            | Linestyle of ellipse(s).
        :line_width':
            | 1, optional
            | Width of ellipse boundary line.
        :line_marker:
            | 'none', optional
            | Marker for ellipse boundary.
        :line_markersize:
            | 4, optional
            | Size of markers in ellipse boundary.
        :plot_center:
            | False, optional
            | Plot center of ellipse: yes (True) or no (False)
        :center_color:
            | 'darkgray', optional
            | Color to plot ellipse center in.
        :center_marker:
            | 'o', optional
            | Marker for ellipse center.
        :center_markersize:
            | 4, optional
            | Size of marker of ellipse center.
        :show_grid:
            | False, optional
            | Show grid (True) or not (False)
        :llabel:
            | None,optional
            | Legend label for ellipse boundary.
        :label_fontname: 
            | 'Times New Roman', optional
            | Sets font type of axis labels.
        :label_fontsize:
            | 12, optional
            | Sets font size of axis labels.
        :out:
            | None, optional
            | Output of function
            | If None: returns None. Can be used to output axh of newly created
            |      figure axes or to return Yxys an ndarray with coordinates of 
            |       ellipse boundaries in cspace_out (shape = (nsamples,3,N)) 
            
        
    Returns:
        :returns: None, or whatever set by :out:.
    """
    Yxys = np.zeros((nsamples, 3, v.shape[0]))
    ellipse_vs = np.zeros((v.shape[0], 5))
    for i, vi in enumerate(v):

        # Set sample density of ellipse boundary:
        t = np.linspace(0, 2 * np.pi, int(nsamples))

        a = vi[0]  # major axis
        b = vi[1]  # minor axis
        xyc = vi[2:4, None]  # center
        theta = vi[-1]  # rotation angle

        # define rotation matrix:
        R = np.hstack((np.vstack((np.cos(theta), np.sin(theta))),
                       np.vstack((-np.sin(theta), np.cos(theta)))))

        # Calculate ellipses:
        Yxyc = np.vstack((1, xyc)).T
        Yxy = np.vstack(
            (np.ones((1, nsamples)),
             xyc + np.dot(R, np.vstack((a * np.cos(t), b * np.sin(t)))))).T
        Yxys[:, :, i] = Yxy

        # Convert to requested color space:
        if (cspace_out is not None) & (cspace_in is not None):
            Yxy = colortf(Yxy, cspace_in + '>' + cspace_out)
            Yxyc = colortf(Yxyc, cspace_in + '>' + cspace_out)
            Yxys[:, :, i] = Yxy

            # get ellipse parameters in requested color space:
            ellipse_vs[i, :] = math.fit_ellipse(Yxy[:, 1:])
            #de = np.sqrt((Yxy[:,1]-Yxyc[:,1])**2 + (Yxy[:,2]-Yxyc[:,2])**2)
            #ellipse_vs[i,:] = np.hstack((de.max(),de.min(),Yxyc[:,1],Yxyc[:,2],np.nan)) # nan because orientation is xy, but request is some other color space. Change later to actual angle when fitellipse() has been implemented

        # plot ellipses:
        if show == True:
            if (axh is None) & (i == 0):
                fig = plt.figure()
                axh = fig.add_subplot(111)

            if (cspace_in is None):
                xlabel = 'x'
                ylabel = 'y'
            else:
                xlabel = _CSPACE_AXES[cspace_in][1]
                ylabel = _CSPACE_AXES[cspace_in][2]

            if (cspace_out is not None):
                xlabel = _CSPACE_AXES[cspace_out][1]
                ylabel = _CSPACE_AXES[cspace_out][2]

            if plot_center == True:
                axh.plot(Yxyc[:, 1],
                         Yxyc[:, 2],
                         color=center_color,
                         linestyle='none',
                         marker=center_marker,
                         markersize=center_markersize)
            if llabel is None:
                axh.plot(Yxy[:, 1],
                         Yxy[:, 2],
                         color=line_color,
                         linestyle=line_style,
                         linewidth=line_width,
                         marker=line_marker,
                         markersize=line_markersize)
            else:
                axh.plot(Yxy[:, 1],
                         Yxy[:, 2],
                         color=line_color,
                         linestyle=line_style,
                         linewidth=line_width,
                         marker=line_marker,
                         markersize=line_markersize,
                         label=llabel)

            axh.set_xlabel(xlabel,
                           fontname=label_fontname,
                           fontsize=label_fontsize)
            axh.set_ylabel(ylabel,
                           fontname=label_fontname,
                           fontsize=label_fontsize)
            if show_grid == True:
                plt.grid(True)
            #plt.show()
    Yxys = np.transpose(Yxys, axes=(0, 2, 1))
    if out is not None:
        return eval(out)
    else:
        return None
Esempio n. 21
0
def _xyz_to_jab_cam02ucs(xyz, xyzw, ucs=True, conditions=None):
    """ 
    Calculate CAM02-UCS J'a'b' coordinates from xyz tristimulus values of sample and white point.
    
    Args:
        :xyz:
            | ndarray with sample tristimulus values
        :xyzw:
            | ndarray with white point tristimulus values  
        :conditions:
            | None, optional
            | Dictionary with viewing conditions.
            | None results in:
            |   {'La':100, 'Yb':20, 'D':1, 'surround':'avg'}
            | For more info see luxpy.cam.ciecam02()?
    
    Returns:
        :jab:
            | ndarray with J'a'b' coordinates.
    """
    #--------------------------------------------
    # Get/ set conditions parameters:
    if conditions is not None:
        surround_parameters = {
            'surrounds': ['avg', 'dim', 'dark'],
            'avg': {
                'c': 0.69,
                'Nc': 1.0,
                'F': 1.0,
                'FLL': 1.0
            },
            'dim': {
                'c': 0.59,
                'Nc': 0.9,
                'F': 0.9,
                'FLL': 1.0
            },
            'dark': {
                'c': 0.525,
                'Nc': 0.8,
                'F': 0.8,
                'FLL': 1.0
            }
        }
        La = conditions['La']
        Yb = conditions['Yb']
        D = conditions['D']
        surround = conditions['surround']
        if isinstance(surround, str):
            surround = surround_parameters[conditions['surround']]
        F, FLL, Nc, c = [surround[x] for x in sorted(surround.keys())]
    else:
        # set defaults:
        La, Yb, D, F, FLL, Nc, c = 100, 20, 1, 1, 1, 1, 0.69

    #--------------------------------------------
    # Define sensor space and cat matrices:
    mhpe = np.array([[0.38971, 0.68898, -0.07868], [-0.22981, 1.1834, 0.04641],
                     [0.0, 0.0, 1.0]
                     ])  # Hunt-Pointer-Estevez sensors (cone fundamentals)

    mcat = np.array([[0.7328, 0.4296, -0.1624], [-0.7036, 1.6975, 0.0061],
                     [0.0030, 0.0136, 0.9834]])  # CAT02 sensor space

    #--------------------------------------------
    # pre-calculate some matrices:
    invmcat = np.linalg.inv(mcat)
    mhpe_x_invmcat = np.dot(mhpe, invmcat)

    #--------------------------------------------
    # calculate condition dependent parameters:
    Yw = xyzw[..., 1:2].T
    k = 1.0 / (5.0 * La + 1.0)
    FL = 0.2 * (k**4.0) * (5.0 * La) + 0.1 * ((1.0 - k**4.0)**2.0) * (
        (5.0 * La)**(1.0 / 3.0))  # luminance adaptation factor
    n = Yb / Yw
    Nbb = 0.725 * (1 / n)**0.2
    Ncb = Nbb
    z = 1.48 + FLL * n**0.5

    if D is None:
        D = F * (1.0 - (1.0 / 3.6) * np.exp((-La - 42.0) / 92.0))

    #--------------------------------------------
    # transform from xyz, xyzw to cat sensor space:
    rgb = math.dot23(mcat, xyz.T)
    rgbw = mcat @ xyzw.T

    #--------------------------------------------
    # apply von Kries cat:
    rgbc = (
        (D * Yw / rgbw)[..., None] + (1 - D)
    ) * rgb  # factor 100 from ciecam02 is replaced with Yw[i] in ciecam16, but see 'note' in Fairchild's "Color Appearance Models" (p291 ni 3ed.)
    rgbwc = (
        (D * Yw / rgbw) + (1 - D)
    ) * rgbw  # factor 100 from ciecam02 is replaced with Yw[i] in ciecam16, but see 'note' in Fairchild's "Color Appearance Models" (p291 ni 3ed.)

    #--------------------------------------------
    # convert from cat02 sensor space to cone sensors (hpe):
    rgbp = math.dot23(mhpe_x_invmcat, rgbc).T
    rgbwp = (mhpe_x_invmcat @ rgbwc).T

    #--------------------------------------------
    # apply Naka_rushton repsonse compression:
    naka_rushton = lambda x: 400 * x**0.42 / (x**0.42 + 27.13) + 0.1

    rgbpa = naka_rushton(FL * rgbp / 100.0)
    p = np.where(rgbp < 0)
    rgbpa[p] = 0.1 - (naka_rushton(FL * np.abs(rgbp[p]) / 100.0) - 0.1)

    rgbwpa = naka_rushton(FL * rgbwp / 100.0)
    pw = np.where(rgbwp < 0)
    rgbwpa[pw] = 0.1 - (naka_rushton(FL * np.abs(rgbwp[pw]) / 100.0) - 0.1)

    #--------------------------------------------
    # Calculate achromatic signal:
    A = (2.0 * rgbpa[..., 0] + rgbpa[..., 1] +
         (1.0 / 20.0) * rgbpa[..., 2] - 0.305) * Nbb
    Aw = (2.0 * rgbwpa[..., 0] + rgbwpa[..., 1] +
          (1.0 / 20.0) * rgbwpa[..., 2] - 0.305) * Nbb

    #--------------------------------------------
    # calculate initial opponent channels:
    a = rgbpa[..., 0] - 12.0 * rgbpa[..., 1] / 11.0 + rgbpa[..., 2] / 11.0
    b = (1.0 / 9.0) * (rgbpa[..., 0] + rgbpa[..., 1] - 2.0 * rgbpa[..., 2])

    #--------------------------------------------
    # calculate hue h and eccentricity factor, et:
    h = np.arctan2(b, a)
    et = (1.0 / 4.0) * (np.cos(h + 2.0) + 3.8)

    #--------------------------------------------
    # calculate lightness, J:
    J = 100.0 * (A / Aw)**(c * z)

    #--------------------------------------------
    # calculate chroma, C:
    t = ((50000.0 / 13.0) * Nc * Ncb * et *
         ((a**2.0 + b**2.0)**0.5)) / (rgbpa[..., 0] + rgbpa[..., 1] +
                                      (21.0 / 20.0 * rgbpa[..., 2]))
    C = (t**0.9) * ((J / 100.0)**0.5) * (1.64 - 0.29**n)**0.73

    #--------------------------------------------
    # Calculate colorfulness, M:
    M = C * FL**0.25

    #--------------------------------------------
    # convert to cam02ucs J', aM', bM':
    if ucs == True:
        KL, c1, c2 = 1.0, 0.007, 0.0228
        Jp = (1.0 + 100.0 * c1) * J / (1.0 + c1 * J)
        Mp = (1.0 / c2) * np.log(1.0 + c2 * M)
    else:
        Jp = J
        Mp = M
    aMp = Mp * np.cos(h)
    bMp = Mp * np.sin(h)

    return np.dstack((Jp, aMp, bMp))
Esempio n. 22
0
def spd_to_xyz(data,
               relative=True,
               rfl=None,
               cieobs=_CIEOBS,
               K=None,
               out=None,
               cie_std_dev_obs=None):
    """
    Calculates xyz tristimulus values from spectral data.
       
    Args: 
        :data: 
            | ndarray or pandas.dataframe with spectral data
            | (.shape = (number of spectra + 1, number of wavelengths))
            | Note that :data: is never interpolated, only CMFs and RFLs. 
            | This way interpolation errors due to peaky spectra are avoided. 
            | Conform CIE15-2018.
        :relative: 
            | True or False, optional
            | Calculate relative XYZ (Yw = 100) or absolute XYZ (Y = Luminance)
        :rfl: 
            | ndarray with spectral reflectance functions.
            | Will be interpolated if wavelengths do not match those of :data:
        :cieobs:
            | luxpy._CIEOBS or str, optional
            | Determines the color matching functions to be used in the 
            | calculation of XYZ.
        :K: 
            | None, optional
            |   e.g.  K  = 683 lm/W for '1931_2' (relative == False) 
            |   or K = 100/sum(spd*dl)        (relative == True)
        :out:
            | None or 1 or 2, optional
            | Determines number and shape of output. (see :returns:)
        :cie_std_dev_obs: 
            | None or str, optional
            | - None: don't use CIE Standard Deviate Observer function.
            | - 'f1': use F1 function.
    
    Returns:
        :returns:
            | If rfl is None:
            |    If out is None: ndarray of xyz values 
            |        (.shape = (data.shape[0],3))
            |    If out == 1: ndarray of xyz values 
            |        (.shape = (data.shape[0],3))
            |    If out == 2: (ndarray of xyz, ndarray of xyzw) values
            |        Note that xyz == xyzw, with (.shape = (data.shape[0],3))
            | If rfl is not None:
            |   If out is None: ndarray of xyz values 
            |         (.shape = (rfl.shape[0],data.shape[0],3))
            |   If out == 1: ndarray of xyz values 
            |       (.shape = (rfl.shape[0]+1,data.shape[0],3))
            |        The xyzw values of the light source spd are the first set 
            |        of values of the first dimension. The following values 
            |       along this dimension are the sample (rfl) xyz values.
            |    If out == 2: (ndarray of xyz, ndarray of xyzw) values
            |        with xyz.shape = (rfl.shape[0],data.shape[0],3)
            |        and with xyzw.shape = (data.shape[0],3)
             
    References:
        1. `CIE15:2018, “Colorimetry,” CIE, Vienna, Austria, 2018. <https://doi.org/10.25039/TR.015.2018>`_
    """

    data = getdata(data,
                   kind='np') if isinstance(data, pd.DataFrame) else np2d(
                       data)  # convert to np format and ensure 2D-array

    # get wl spacing:
    dl = getwld(data[0])

    # get cmf,k for cieobs:
    if isinstance(cieobs, str):
        if K is None: K = _CMF[cieobs]['K']
        scr = 'dict'
    else:
        scr = 'cieobs'
        if (K is None) & (relative == False): K = 1

    # Interpolate to wl of data:
    cmf = xyzbar(cieobs=cieobs, scr=scr, wl_new=data[0], kind='np')

    # Add CIE standard deviate observer function to cmf if requested:
    if cie_std_dev_obs is not None:
        cmf_cie_std_dev_obs = xyzbar(cieobs='cie_std_dev_obs_' +
                                     cie_std_dev_obs.lower(),
                                     scr=scr,
                                     wl_new=data[0],
                                     kind='np')
        cmf[1:] = cmf[1:] + cmf_cie_std_dev_obs[1:]

    # Rescale xyz using k or 100/Yw:
    if relative == True: K = 100.0 / np.dot(data[1:], cmf[2, :] * dl)

    # Interpolate rfls to lambda range of spd and calculate xyz:
    if rfl is not None:
        rfl = cie_interp(data=np2d(rfl), wl_new=data[0], kind='rfl')
        rfl = np.concatenate((np.ones((1, data.shape[1])),
                              rfl[1:]))  #add rfl = 1 for light source spectrum
        xyz = K * np.array(
            [np.dot(rfl, (data[1:] * cmf[i + 1, :] * dl).T)
             for i in range(3)])  #calculate tristimulus values
        rflwasnotnone = 1
    else:
        rfl = np.ones((1, data.shape[1]))
        xyz = (K * (np.dot((cmf[1:] * dl), data[1:].T))[:, None, :])
        rflwasnotnone = 0
    xyz = np.transpose(xyz, [1, 2, 0])  #order [rfl,spd,xyz]

    # Setup output:
    if out == 2:
        xyzw = xyz[0, ...]
        xyz = xyz[rflwasnotnone:, ...]
        if rflwasnotnone == 0: xyz = np.squeeze(xyz, axis=0)
        return xyz, xyzw
    elif out == 1:
        if rflwasnotnone == 0: xyz = np.squeeze(xyz, axis=0)
        return xyz
    else:
        xyz = xyz[rflwasnotnone:, ...]
        if rflwasnotnone == 0: xyz = np.squeeze(xyz, axis=0)
        return xyz
Esempio n. 23
0
def get_poly_model(jabt, jabr, modeltype=_VF_MODEL_TYPE):
    """
    Setup base color shift model (delta_a, delta_b), 
    determine model parameters and accuracy.
    
    | Calculates a base color shift (delta) from the ref. chromaticity ar, br.
    
    Args:
        :jabt: 
            | ndarray with jab color coordinates under the test SPD.
        :jabr: 
            | ndarray with jab color coordinates under the reference SPD.
        :modeltype:
            | _VF_MODEL_TYPE or 'M6' or 'M5', optional
            | Specifies degree 5 or degree 6 polynomial model in ab-coordinates.
            | (see notes below)
            
    Returns:
        :returns: 
            | (poly_model, 
            |       pmodel, 
            |       dab_model, 
            |        dab_res, 
            |        dCHoverC_res, 
            |        dab_std, 
            |        dCHoverC_std)
            |
            | :poly_model: function handle to model
            | :pmodel: ndarray with model parameters
            | :dab_model: ndarray with ab model predictions from ar, br.
            | :dab_res: ndarray with residuals between 'da,db' of samples and 
            |            'da,db' predicted by the model.
            | :dCHoverC_res: ndarray with residuals between 'dCoverC,dH' 
            |                 of samples and 'dCoverC,dH' predicted by the model.
            |     Note: dCoverC = (Ct - Cr)/Cr and dH = ht - hr 
            |         (predicted from model, see notes below)
            | :dab_std: ndarray with std of :dab_res:
            | :dCHoverC_std: ndarray with std of :dCHoverC_res: 

    Notes: 
        1. Model types:
            | poly5_model = lambda a,b,p:         p[0]*a + p[1]*b + p[2]*(a**2) + p[3]*a*b + p[4]*(b**2)
            | poly6_model = lambda a,b,p:  p[0] + p[1]*a + p[2]*b + p[3]*(a**2) + p[4]*a*b + p[5]*(b**2)
        
        2. Calculation of dCoverC and dH:
            | dCoverC = (np.cos(hr)*da + np.sin(hr)*db)/Cr
            | dHoverC = (np.cos(hr)*db - np.sin(hr)*da)/Cr    
    """
    at = jabt[..., 1]
    bt = jabt[..., 2]
    ar = jabr[..., 1]
    br = jabr[..., 2]

    # A. Calculate da, db:
    da = at - ar
    db = bt - br

    # B.1 Calculate model matrix:
    # 5-parameter model:
    M5 = np.array([[
        np.sum(ar * ar),
        np.sum(ar * br),
        np.sum(ar * ar**2),
        np.sum(ar * ar * br),
        np.sum(ar * br**2)
    ],
                   [
                       np.sum(br * ar),
                       np.sum(br * br),
                       np.sum(br * ar**2),
                       np.sum(br * ar * br),
                       np.sum(br * br**2)
                   ],
                   [
                       np.sum((ar**2) * ar),
                       np.sum((ar**2) * br),
                       np.sum((ar**2) * ar**2),
                       np.sum((ar**2) * ar * br),
                       np.sum((ar**2) * br**2)
                   ],
                   [
                       np.sum(ar * br * ar),
                       np.sum(ar * br * br),
                       np.sum(ar * br * ar**2),
                       np.sum(ar * br * ar * br),
                       np.sum(ar * br * br**2)
                   ],
                   [
                       np.sum((br**2) * ar),
                       np.sum((br**2) * br),
                       np.sum((br**2) * ar**2),
                       np.sum((br**2) * ar * br),
                       np.sum((br**2) * br**2)
                   ]])
    #6-parameters model
    M6 = np.array([[
        ar.size,
        np.sum(1.0 * ar),
        np.sum(1.0 * br),
        np.sum(1.0 * ar**2),
        np.sum(1.0 * ar * br),
        np.sum(1.0 * br**2)
    ],
                   [
                       np.sum(ar * 1.0),
                       np.sum(ar * ar),
                       np.sum(ar * br),
                       np.sum(ar * ar**2),
                       np.sum(ar * ar * br),
                       np.sum(ar * br**2)
                   ],
                   [
                       np.sum(br * 1.0),
                       np.sum(br * ar),
                       np.sum(br * br),
                       np.sum(br * ar**2),
                       np.sum(br * ar * br),
                       np.sum(br * br**2)
                   ],
                   [
                       np.sum((ar**2) * 1.0),
                       np.sum((ar**2) * ar),
                       np.sum((ar**2) * br),
                       np.sum((ar**2) * ar**2),
                       np.sum((ar**2) * ar * br),
                       np.sum((ar**2) * br**2)
                   ],
                   [
                       np.sum(ar * br * 1.0),
                       np.sum(ar * br * ar),
                       np.sum(ar * br * br),
                       np.sum(ar * br * ar**2),
                       np.sum(ar * br * ar * br),
                       np.sum(ar * br * br**2)
                   ],
                   [
                       np.sum((br**2) * 1.0),
                       np.sum((br**2) * ar),
                       np.sum((br**2) * br),
                       np.sum((br**2) * ar**2),
                       np.sum((br**2) * ar * br),
                       np.sum((br**2) * br**2)
                   ]])

    # B.2 Define model function:
    poly5_model = lambda a, b, p: p[0] * a + p[1] * b + p[2] * (a**2) + p[
        3] * a * b + p[4] * (b**2)
    poly6_model = lambda a, b, p: p[0] + p[1] * a + p[2] * b + p[3] * (
        a**2) + p[4] * a * b + p[5] * (b**2)

    if modeltype == 'M5':
        M = M5
        poly_model = poly5_model
    else:
        M = M6
        poly_model = poly6_model

    M = np.linalg.inv(M)

    # C.1 Data a,b analysis output:
    if modeltype == 'M5':
        da_model_parameters = np.dot(
            M,
            np.array([
                np.sum(da * ar),
                np.sum(da * br),
                np.sum(da * ar**2),
                np.sum(da * ar * br),
                np.sum(da * br**2)
            ]))
        db_model_parameters = np.dot(
            M,
            np.array([
                np.sum(db * ar),
                np.sum(db * br),
                np.sum(db * ar**2),
                np.sum(db * ar * br),
                np.sum(db * br**2)
            ]))
    else:
        da_model_parameters = np.dot(
            M,
            np.array([
                np.sum(da * 1.0),
                np.sum(da * ar),
                np.sum(da * br),
                np.sum(da * ar**2),
                np.sum(da * ar * br),
                np.sum(da * br**2)
            ]))
        db_model_parameters = np.dot(
            M,
            np.array([
                np.sum(db * 1.0),
                np.sum(db * ar),
                np.sum(db * br),
                np.sum(db * ar**2),
                np.sum(db * ar * br),
                np.sum(db * br**2)
            ]))
    pmodel = np.vstack((da_model_parameters, db_model_parameters))

    # D.1 Calculate model da, db:
    da_model = poly_model(ar, br, pmodel[0])
    db_model = poly_model(ar, br, pmodel[1])
    dab_model = np.hstack((da_model, db_model))

    # D.2 Calculate residuals for da & db:
    da_res = da - da_model
    db_res = db - db_model
    dab_res = np.hstack((da_res, db_res))
    dab_std = np.vstack((np.std(da_res, axis=0), np.std(db_res, axis=0)))

    # E Calculate href, Cref:
    href = np.arctan2(br, ar)
    Cref = (ar**2 + br**2)**0.5

    # F Calculate dC/C, dH/C for data and model and calculate residuals:
    dCoverC = (np.cos(href) * da + np.sin(href) * db) / Cref
    dHoverC = (np.cos(href) * db - np.sin(href) * da) / Cref
    dCoverC_model = (np.cos(href) * da_model + np.sin(href) * db_model) / Cref
    dHoverC_model = (np.cos(href) * db_model - np.sin(href) * da_model) / Cref
    dCoverC_res = dCoverC - dCoverC_model
    dHoverC_res = dHoverC - dHoverC_model
    dCHoverC_std = np.vstack((np.std(dCoverC_res,
                                     axis=0), np.std(dHoverC_res, axis=0)))

    dCHoverC_res = np.hstack((href, dCoverC_res, dHoverC_res))

    return poly_model, pmodel, dab_model, dab_res, dCHoverC_res, dab_std, dCHoverC_std
Esempio n. 24
0
def xyz_to_Ydlep_(xyz,
                  cieobs=_CIEOBS,
                  xyzw=_COLORTF_DEFAULT_WHITE_POINT,
                  flip_axes=False,
                  **kwargs):
    """
    Convert XYZ tristimulus values to Y, dominant (complementary) wavelength
    and excitation purity.

    Args:
        :xyz:
            | ndarray with tristimulus values
        :xyzw:
            | None or ndarray with tristimulus values of a single (!) native white point, optional
            | None defaults to xyz of CIE D65 using the :cieobs: observer.
        :cieobs:
            | luxpy._CIEOBS, optional
            | CMF set to use when calculating spectrum locus coordinates.
        :flip_axes:
            | False, optional
            | If True: flip axis 0 and axis 1 in Ydelep to increase speed of loop in function.
            |          (single xyzw with is not flipped!)
    Returns:
        :Ydlep: 
            | ndarray with Y, dominant (complementary) wavelength
            |  and excitation purity
    """

    xyz3 = np3d(xyz).copy().astype(np.float)

    # flip axis so that shortest dim is on axis0 (save time in looping):
    if (xyz3.shape[0] < xyz3.shape[1]) & (flip_axes == True):
        axes12flipped = True
        xyz3 = xyz3.transpose((1, 0, 2))
    else:
        axes12flipped = False

    # convert xyz to Yxy:
    Yxy = xyz_to_Yxy(xyz3)
    Yxyw = xyz_to_Yxy(xyzw)

    # get spectrum locus Y,x,y and wavelengths:
    SL = _CMF[cieobs]['bar']
    SL = SL[:, SL[1:].sum(axis=0) >
            0]  # avoid div by zero in xyz-to-Yxy conversion
    wlsl = SL[0]
    Yxysl = xyz_to_Yxy(SL[1:4].T)[:, None]
    pmaxlambda = Yxysl[..., 1].argmax()
    maxlambda = wlsl[pmaxlambda]
    maxlambda = 700
    print(np.where(wlsl == maxlambda))
    pmaxlambda = np.where(wlsl == maxlambda)[0][0]
    Yxysl = Yxysl[:(pmaxlambda + 1), :]
    wlsl = wlsl[:(pmaxlambda + 1)]

    # center on xyzw:
    Yxy = Yxy - Yxyw
    Yxysl = Yxysl - Yxyw
    Yxyw = Yxyw - Yxyw

    #split:
    Y, x, y = asplit(Yxy)
    Yw, xw, yw = asplit(Yxyw)
    Ysl, xsl, ysl = asplit(Yxysl)

    # calculate hue:
    h = math.positive_arctan(x, y, htype='deg')
    print(h)
    print('rh', h[0, 0] - h[0, 1])
    print(wlsl[0], wlsl[-1])

    hsl = math.positive_arctan(xsl, ysl, htype='deg')

    hsl_max = hsl[0]  # max hue angle at min wavelength
    hsl_min = hsl[-1]  # min hue angle at max wavelength
    if hsl_min < hsl_max: hsl_min += 360

    dominantwavelength = np.empty(Y.shape)
    purity = np.empty(Y.shape)
    print('xyz:', xyz)
    for i in range(xyz3.shape[1]):
        print('\ni:', i, h[:, i], hsl_max, hsl_min)
        print(h)
        # find index of complementary wavelengths/hues:
        pc = np.where(
            (h[:, i] > hsl_max) & (h[:, i] < hsl_min)
        )  # hue's requiring complementary wavelength (purple line)
        print('pc', (h[:, i] > hsl_max) & (h[:, i] < hsl_min))
        h[:, i][pc] = h[:, i][pc] - np.sign(
            h[:, i][pc] - 180.0
        ) * 180.0  # add/subtract 180° to get positive complementary wavelength

        # find 2 closest hues in sl:
        #hslb,hib = meshblock(hsl,h[:,i:i+1])
        hib, hslb = np.meshgrid(h[:, i:i + 1], hsl)
        dh = np.abs(hslb - hib)
        q1 = dh.argmin(axis=0)  # index of closest hue
        dh[q1] = 1000000.0
        q2 = dh.argmin(axis=0)  # index of second closest hue
        print('q1q2', q2, q1)

        print('wls:', h[:, i], wlsl[q1], wlsl[q2])
        print('hsls:', hsl[q2, 0], hsl[q1, 0])
        print('d', (wlsl[q2] - wlsl[q1]), (hsl[q2, 0] - hsl[q1, 0]),
              (wlsl[q2] - wlsl[q1]) / (hsl[q2, 0] - hsl[q1, 0]))
        print('(h[:,i] - hsl[q1,0])', (h[:, i] - hsl[q1, 0]))
        print('div', np.divide((wlsl[q2] - wlsl[q1]),
                               (hsl[q2, 0] - hsl[q1, 0])))
        print(
            'mult(...)',
            np.multiply((h[:, i] - hsl[q1, 0]),
                        np.divide((wlsl[q2] - wlsl[q1]),
                                  (hsl[q2, 0] - hsl[q1, 0]))))
        dominantwavelength[:, i] = wlsl[q1] + np.multiply(
            (h[:, i] - hsl[q1, 0]),
            np.divide((wlsl[q2] - wlsl[q1]), (hsl[q2, 0] - hsl[q1, 0]))
        )  # calculate wl corresponding to h: y = y1 + (x-x1)*(y2-y1)/(x2-x1)
        print('dom', dominantwavelength[:, i])
        dominantwavelength[(dominantwavelength[:,
                                               i] > max(wlsl[q1], wlsl[q2])),
                           i] = max(wlsl[q1], wlsl[q2])
        dominantwavelength[(dominantwavelength[:,
                                               i] < min(wlsl[q1], wlsl[q2])),
                           i] = min(wlsl[q1], wlsl[q2])

        dominantwavelength[:, i][pc] = -dominantwavelength[:, i][
            pc]  #complementary wavelengths are specified by '-' sign

        # calculate excitation purity:
        x_dom_wl = xsl[q1, 0] + (xsl[q2, 0] - xsl[q1, 0]) * (h[:, i] - hsl[
            q1, 0]) / (hsl[q2, 0] - hsl[q1, 0])  # calculate x of dom. wl
        y_dom_wl = ysl[q1, 0] + (ysl[q2, 0] - ysl[q1, 0]) * (h[:, i] - hsl[
            q1, 0]) / (hsl[q2, 0] - hsl[q1, 0])  # calculate y of dom. wl
        d_wl = (x_dom_wl**2.0 +
                y_dom_wl**2.0)**0.5  # distance from white point to sl
        d = (x[:, i]**2.0 +
             y[:, i]**2.0)**0.5  # distance from white point to test point
        purity[:, i] = d / d_wl

        # correct for those test points that have a complementary wavelength
        # calculate intersection of line through white point and test point and purple line:
        xy = np.vstack((x[:, i], y[:, i])).T
        xyw = np.hstack((xw, yw))
        xypl1 = np.hstack((xsl[0, None], ysl[0, None]))
        xypl2 = np.hstack((xsl[-1, None], ysl[-1, None]))
        da = (xy - xyw)
        db = (xypl2 - xypl1)
        dp = (xyw - xypl1)
        T = np.array([[0.0, -1.0], [1.0, 0.0]])
        dap = np.dot(da, T)
        denom = np.sum(dap * db, axis=1, keepdims=True)
        num = np.sum(dap * dp, axis=1, keepdims=True)
        xy_linecross = (num / denom) * db + xypl1
        d_linecross = np.atleast_2d(
            (xy_linecross[:, 0]**2.0 + xy_linecross[:, 1]**2.0)**0.5).T  #[0]
        purity[:, i][pc] = d[pc] / d_linecross[pc][:, 0]
    Ydlep = np.dstack((xyz3[:, :, 1], dominantwavelength, purity))

    if axes12flipped == True:
        Ydlep = Ydlep.transpose((1, 0, 2))
    else:
        Ydlep = Ydlep.transpose((0, 1, 2))
    return Ydlep.reshape(xyz.shape)
Esempio n. 25
0
def cam_sww16(data,
              dataw=None,
              Yb=20.0,
              Lw=400.0,
              Ccwb=None,
              relative=True,
              inputtype='xyz',
              direction='forward',
              parameters=None,
              cieobs='2006_10',
              match_to_conversionmatrix_to_cieobs=True):
    """
    A simple principled color appearance model based on a mapping of 
    the Munsell color system.
    
    | This function implements the JOSA A (parameters = 'JOSA') published model. 
    
    Args:
        :data: 
            | ndarray with input tristimulus values 
            | or spectral data 
            | or input color appearance correlates
            | Can be of shape: (N [, xM], x 3), whereby: 
            | N refers to samples and M refers to light sources.
            | Note that for spectral input shape is (N x (M+1) x wl) 
        :dataw: 
            | None or ndarray, optional
            | Input tristimulus values or spectral data of white point.
            | None defaults to the use of CIE illuminant C.
        :Yb: 
            | 20.0, optional
            | Luminance factor of background (perfect white diffuser, Yw = 100)
        :Lw:
            | 400.0, optional
            | Luminance (cd/m²) of white point.
        :Ccwb:
            | None,  optional
            | Degree of cognitive adaptation (white point balancing)
            | If None: use [..,..] from parameters dict.
        :relative:
            | True or False, optional
            | True: xyz tristimulus values are relative (Yw = 100)
        :parameters:
            | None or str or dict, optional
            | Dict with model parameters.
            |    - None: defaults to luxpy.cam._CAM_SWW_2016_PARAMETERS['JOSA']
            |    - str: 'best-fit-JOSA' or 'best-fit-all-Munsell'
            |    - dict: user defined model parameters 
            |            (dict should have same structure)
        :inputtype:
            | 'xyz' or 'spd', optional
            | Specifies the type of input: 
            |     tristimulus values or spectral data for the forward mode.
        :direction:
            | 'forward' or 'inverse', optional
            |   -'forward': xyz -> cam_sww_2016
            |   -'inverse': cam_sww_2016 -> xyz 
        :cieobs:
            | '2006_10', optional
            | CMF set to use to perform calculations where spectral data 
            | is involved (inputtype == 'spd'; dataw = None)
            | Other options: see luxpy._CMF['types']
        :match_to_conversionmatrix_to_cieobs:
            | When channging to a different CIE observer, change the xyz-to_lms
            | matrix to the one corresponding to that observer. If False: use 
            | the one set in parameters or _CAM_SWW16_PARAMETERS
    
    Returns:
        :returns: 
            | ndarray with color appearance correlates (:direction: == 'forward')
            |  or 
            | XYZ tristimulus values (:direction: == 'inverse')
    
    Notes:
        | This function implements the JOSA A (parameters = 'JOSA') 
        | published model. 
        | With:
        |    1. A correction for the parameter 
        |         in Eq.4 of Fig. 11: 0.952 --> -0.952 
        |         
        |     2. The delta_ac and delta_bc white-balance shifts in Eq. 5e & 5f 
        |         should be: -0.028 & 0.821 
        |  
        |     (cfr. Ccwb = 0.66 in: 
        |         ab_test_out = ab_test_int - Ccwb*ab_gray_adaptation_field_int))
             
    References:
        1. `Smet, K. A. G., Webster, M. A., & Whitehead, L. A. (2016). 
        A simple principled approach for modeling and understanding uniform color metrics. 
        Journal of the Optical Society of America A, 33(3), A319–A331. 
        <https://doi.org/10.1364/JOSAA.33.00A319>`_

    """
    #--------------------------------------------------------------------------
    # Get model parameters:
    #--------------------------------------------------------------------------
    args = locals().copy()
    parameters = _update_parameter_dict(
        args,
        parameters=parameters,
        match_to_conversionmatrix_to_cieobs=match_to_conversionmatrix_to_cieobs
    )

    #unpack model parameters:
    Cc, Ccwb, Cf, Mxyz2lms, cLMS, cab_int, cab_out, calpha, cbeta, cga1, cga2, cgb1, cgb2, cl_int, clambda, lms0 = [
        parameters[x] for x in sorted(parameters.keys())
    ]

    #--------------------------------------------------------------------------
    # Setup default adaptation field:
    #--------------------------------------------------------------------------
    dataw = _setup_default_adaptation_field(dataw=dataw,
                                            Lw=Lw,
                                            inputtype=inputtype,
                                            relative=relative,
                                            cieobs=cieobs)

    #--------------------------------------------------------------------------
    # Redimension input data to ensure most appropriate sizes
    # for easy and efficient looping and initialize output array:
    #--------------------------------------------------------------------------
    data, dataw, camout, originalshape = _massage_input_and_init_output(
        data, dataw, inputtype=inputtype, direction=direction)

    #--------------------------------------------------------------------------
    # Do precomputations needed for both the forward and inverse model,
    # and which do not depend on sample or light source data:
    #--------------------------------------------------------------------------
    Mxyz2lms = np.dot(
        np.diag(cLMS), Mxyz2lms
    )  # weight the xyz-to-lms conversion matrix with cLMS (cfr. stage 1 calculations)
    invMxyz2lms = np.linalg.inv(
        Mxyz2lms)  # Calculate the inverse lms-to-xyz conversion matrix
    MAab = np.array(
        [clambda, calpha, cbeta]
    )  # Create matrix with scale factors for L, M, S for quick matrix multiplications
    invMAab = np.linalg.inv(
        MAab)  # Pre-calculate its inverse to avoid repeat in loop.

    #--------------------------------------------------------------------------
    # Apply forward/inverse model by looping over each row (=light source dim.)
    # in data:
    #--------------------------------------------------------------------------
    N = data.shape[0]
    for i in range(N):
        #++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
        #  START FORWARD MODE and common part of inverse mode
        #++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

        #-----------------------------------------------------------------------------
        # Get absolute tristimulus values for stimulus field and white point for row i:
        #-----------------------------------------------------------------------------
        xyzt, xyzw, xyzw_abs = _get_absolute_xyz_xyzw(data,
                                                      dataw,
                                                      i=i,
                                                      Lw=Lw,
                                                      direction=direction,
                                                      cieobs=cieobs,
                                                      inputtype=inputtype,
                                                      relative=relative)

        #-----------------------------------------------------------------------------
        # stage 1: calculate photon rates of stimulus and white white, and
        # adapting field: i.e. lmst, lmsw and lmsf
        #-----------------------------------------------------------------------------
        # Convert to white point l,m,s:
        lmsw = 683.0 * np.dot(Mxyz2lms, xyzw.T).T / _CMF[cieobs]['K']

        # Calculate adaptation field and convert to l,m,s:
        lmsf = (Yb / 100.0) * lmsw

        # Calculate lms of stimulus
        # or put adaptation lmsf in test field lmst for later use in inverse-mode (no xyz in 'inverse' mode!!!):
        lmst = (683.0 * np.dot(Mxyz2lms, xyzt.T).T /
                _CMF[cieobs]['K']) if (direction == 'forward') else lmsf

        #-----------------------------------------------------------------------------
        # stage 2: calculate cone outputs of stimulus lmstp
        #-----------------------------------------------------------------------------
        lmstp = math.erf(Cc *
                         (np.log(lmst / lms0) +
                          Cf * np.log(lmsf / lms0)))  # stimulus test field
        lmsfp = math.erf(Cc * (np.log(lmsf / lms0) +
                               Cf * np.log(lmsf / lms0)))  # adaptation field

        # add adaptation field lms temporarily to lmstp for quick calculation
        lmstp = np.vstack((lmsfp, lmstp))

        #-----------------------------------------------------------------------------
        # stage 3: calculate optic nerve signals, lam*, alphp, betp:
        #-----------------------------------------------------------------------------
        lstar, alph, bet = asplit(np.dot(MAab, lmstp.T).T)

        alphp = cga1[0] * alph
        alphp[alph < 0] = cga1[1] * alph[alph < 0]
        betp = cgb1[0] * bet
        betp[bet < 0] = cgb1[1] * bet[bet < 0]

        #-----------------------------------------------------------------------------
        #  stage 4: calculate recoded nerve signals, alphapp, betapp:
        #-----------------------------------------------------------------------------
        alphpp = cga2[0] * (alphp + betp)
        betpp = cgb2[0] * (alphp - betp)

        #-----------------------------------------------------------------------------
        #  stage 5: calculate conscious color perception:
        #-----------------------------------------------------------------------------
        lstar_int = cl_int[0] * (lstar + cl_int[1])
        alph_int = cab_int[0] * (np.cos(cab_int[1] * np.pi / 180.0) * alphpp -
                                 np.sin(cab_int[1] * np.pi / 180.0) * betpp)
        bet_int = cab_int[0] * (np.sin(cab_int[1] * np.pi / 180.0) * alphpp +
                                np.cos(cab_int[1] * np.pi / 180.0) * betpp)
        lstar_out = lstar_int

        #++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
        #  stage 5 continued but SPLIT IN FORWARD AND INVERSE MODES:
        #++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

        #--------------------------------------
        # FORWARD MODE TO PERCEPTUAL SIGNALS:
        #--------------------------------------
        if direction == 'forward':
            if Ccwb is None:
                alph_out = alph_int - cab_out[0]
                bet_out = bet_int - cab_out[1]

            else:
                Ccwb = Ccwb * np.ones((2))
                Ccwb[Ccwb < 0.0] = 0.0
                Ccwb[Ccwb > 1.0] = 1.0

                # white balance shift using adaptation gray background (Yb=20%), with Ccw: degree of adaptation:
                alph_out = alph_int - Ccwb[0] * alph_int[0]
                bet_out = bet_int - Ccwb[1] * bet_int[0]

            # stack together and remove adaptation field from vertical stack
            # camout is an ndarray with perceptual signals:
            camout[i] = np.vstack((lstar_out[1:], alph_out[1:], bet_out[1:])).T

        #--------------------------------------
        # INVERSE MODE FROM PERCEPTUAL SIGNALS:
        #--------------------------------------
        elif direction == 'inverse':

            # stack cognitive pre-adapted adaptation field signals (first on stack) together:
            #labf_int = np.hstack((lstar_int[0],alph_int[0],bet_int[0]))

            # get lstar_out, alph_out & bet_out for data
            #(contains model perceptual signals in inverse mode!!!):
            lstar_out, alph_out, bet_out = asplit(data[i])

            #------------------------------------------------------------------------
            #  Inverse stage 5: undo cortical white-balance:
            #------------------------------------------------------------------------
            if Ccwb is None:
                alph_int = alph_out + cab_out[0]
                bet_int = bet_out + cab_out[1]
            else:
                Ccwb = Ccwb * np.ones((2))
                Ccwb[Ccwb < 0.0] = 0.0
                Ccwb[Ccwb > 1.0] = 1.0

                #  inverse white balance shift using adaptation gray background (Yb=20%), with Ccw: degree of adaptation
                alph_int = alph_out + Ccwb[0] * alph_int[0]
                bet_int = bet_out + Ccwb[1] * bet_int[0]

            alphpp = (1.0 / cab_int[0]) * (
                np.cos(-cab_int[1] * np.pi / 180.0) * alph_int -
                np.sin(-cab_int[1] * np.pi / 180.0) * bet_int)
            betpp = (1.0 / cab_int[0]) * (
                np.sin(-cab_int[1] * np.pi / 180.0) * alph_int +
                np.cos(-cab_int[1] * np.pi / 180.0) * bet_int)
            lstar_int = lstar_out
            lstar = (lstar_int / cl_int[0]) - cl_int[1]

            #---------------------------------------------------------------------------
            #  Inverse stage 4: pre-adapted perceptual signals to recoded nerve signals:
            #---------------------------------------------------------------------------
            alphp = 0.5 * (alphpp / cga2[0] + betpp / cgb2[0]
                           )  # <-- alphpp = (Cga2.*(alphp+betp));
            betp = 0.5 * (alphpp / cga2[0] - betpp / cgb2[0]
                          )  # <-- betpp = (Cgb2.*(alphp-betp));

            #---------------------------------------------------------------------------
            #  Inverse stage 3: recoded nerve signals to optic nerve signals:
            #---------------------------------------------------------------------------
            alph = alphp / cga1[0]
            bet = betp / cgb1[0]
            sa = np.sign(cga1[1])
            sb = np.sign(cgb1[1])
            alph[(sa * alphp) < 0.0] = alphp[(sa * alphp) < 0] / cga1[1]
            bet[(sb * betp) < 0.0] = betp[(sb * betp) < 0] / cgb1[1]
            lab = ajoin((lstar, alph, bet))

            #---------------------------------------------------------------------------
            #  Inverse stage 2: optic nerve signals to cone outputs:
            #---------------------------------------------------------------------------
            lmstp = np.dot(invMAab, lab.T).T
            lmstp[lmstp < -1.0] = -1.0
            lmstp[lmstp > 1.0] = 1.0

            #---------------------------------------------------------------------------
            #  Inverse stage 1: cone outputs to photon rates:
            #---------------------------------------------------------------------------
            lmstp = math.erfinv(lmstp) / Cc - Cf * np.log(lmsf / lms0)
            lmst = np.exp(lmstp) * lms0

            #---------------------------------------------------------------------------
            #  Photon rates to absolute or relative tristimulus values:
            #---------------------------------------------------------------------------
            xyzt = np.dot(invMxyz2lms, lmst.T).T * (_CMF[cieobs]['K'] / 683.0)
            if relative == True:
                xyzt = (100 / Lw) * xyzt

            # store in same named variable as forward mode:
            camout[i] = xyzt

            #++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
            #  END inverse mode
            #++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

    return _massage_output_data_to_original_shape(camout, originalshape)
Esempio n. 26
0
def Ydlep_to_xyz(Ydlep,
                 cieobs=_CIEOBS,
                 xyzw=_COLORTF_DEFAULT_WHITE_POINT,
                 flip_axes=False,
                 SL_max_lambda=None,
                 **kwargs):
    """
    Convert Y, dominant (complementary) wavelength and excitation purity to XYZ
    tristimulus values.

    Args:
        :Ydlep: 
            | ndarray with Y, dominant (complementary) wavelength
              and excitation purity
        :xyzw: 
            | None or narray with tristimulus values of a single (!) native white point, optional
            | None defaults to xyz of CIE D65 using the :cieobs: observer.
        :cieobs:
            | luxpy._CIEOBS, optional
            | CMF set to use when calculating spectrum locus coordinates.
        :flip_axes:
            | False, optional
            | If True: flip axis 0 and axis 1 in Ydelep to increase speed of loop in function.
            |          (single xyzw with is not flipped!)
        :SL_max_lambda:
            | None or float, optional
            | Maximum wavelength of spectrum locus before it turns back on itelf in the high wavelength range (~700 nm)

    Returns:
        :xyz: 
            | ndarray with tristimulus values
    """

    Ydlep3 = np3d(Ydlep).copy().astype(np.float)

    # flip axis so that longest dim is on first axis  (save time in looping):
    if (Ydlep3.shape[0] < Ydlep3.shape[1]) & (flip_axes == True):
        axes12flipped = True
        Ydlep3 = Ydlep3.transpose((1, 0, 2))
    else:
        axes12flipped = False

    # convert xyzw to Yxyw:
    Yxyw = xyz_to_Yxy(xyzw)
    Yxywo = Yxyw.copy()

    # get spectrum locus Y,x,y and wavelengths:
    SL = _CMF[cieobs]['bar']
    SL = SL[:, SL[1:].sum(axis=0) >
            0]  # avoid div by zero in xyz-to-Yxy conversion
    wlsl = SL[0, None].T
    Yxysl = xyz_to_Yxy(SL[1:4].T)[:, None]

    # Get maximum wavelength of spectrum locus (before it turns back on itself)
    if SL_max_lambda is None:
        pmaxlambda = Yxysl[..., 1].argmax()  # lambda with largest x value
        dwl = np.diff(
            Yxysl[:, 0,
                  1])  # spectrumlocus in that range should have increasing x
        dwl[wlsl[:-1, 0] < 600] = 10000
        pmaxlambda = np.where(
            dwl <= 0)[0][0]  # Take first element with zero or <zero slope
    else:
        pmaxlambda = np.abs(wlsl - SL_max_lambda).argmin()
    Yxysl = Yxysl[:(pmaxlambda + 1), :]
    wlsl = wlsl[:(pmaxlambda + 1), :1]

    # center on xyzw:
    Yxysl = Yxysl - Yxyw
    Yxyw = Yxyw - Yxyw

    #split:
    Y, dom, pur = asplit(Ydlep3)
    Yw, xw, yw = asplit(Yxyw)
    Ywo, xwo, ywo = asplit(Yxywo)
    Ysl, xsl, ysl = asplit(Yxysl)

    # loop over longest dim:
    x = np.empty(Y.shape)
    y = np.empty(Y.shape)
    for i in range(Ydlep3.shape[1]):

        # find closest wl's to dom:
        #wlslb,wlib = meshblock(wlsl,np.abs(dom[i,:])) #abs because dom<0--> complemtary wl
        wlib, wlslb = np.meshgrid(np.abs(dom[:, i]), wlsl)

        dwl = wlslb - wlib
        q1 = np.abs(dwl).argmin(axis=0)  # index of closest wl
        sign_q1 = np.sign(dwl[q1])
        dwl[np.sign(dwl) ==
            sign_q1] = 1000000  # set all dwl on the same side as q1 to a very large value
        q2 = np.abs(dwl).argmin(
            axis=0)  # index of second closest (enclosing) wl

        # calculate x,y of dom:
        x_dom_wl = xsl[q1, 0] + (xsl[q2, 0] - xsl[q1, 0]) * (
            np.abs(dom[:, i]) - wlsl[q1, 0]) / (wlsl[q2, 0] - wlsl[q1, 0]
                                                )  # calculate x of dom. wl
        y_dom_wl = ysl[q1, 0] + (ysl[q2, 0] - ysl[q1, 0]) * (
            np.abs(dom[:, i]) - wlsl[q1, 0]) / (wlsl[q2, 0] - wlsl[q1, 0]
                                                )  # calculate y of dom. wl

        # calculate x,y of test:
        d_wl = (x_dom_wl**2.0 +
                y_dom_wl**2.0)**0.5  # distance from white point to dom
        d = pur[:, i] * d_wl
        hdom = math.positive_arctan(x_dom_wl, y_dom_wl, htype='deg')
        x[:, i] = d * np.cos(hdom * np.pi / 180.0)
        y[:, i] = d * np.sin(hdom * np.pi / 180.0)

        # complementary:
        pc = np.where(dom[:, i] < 0.0)
        hdom[pc] = hdom[pc] - np.sign(dom[:, i][pc] -
                                      180.0) * 180.0  # get positive hue angle

        # calculate intersection of line through white point and test point and purple line:
        xy = np.vstack((x_dom_wl, y_dom_wl)).T
        xyw = np.vstack((xw, yw)).T
        xypl1 = np.vstack((xsl[0, None], ysl[0, None])).T
        xypl2 = np.vstack((xsl[-1, None], ysl[-1, None])).T
        da = (xy - xyw)
        db = (xypl2 - xypl1)
        dp = (xyw - xypl1)
        T = np.array([[0.0, -1.0], [1.0, 0.0]])
        dap = np.dot(da, T)
        denom = np.sum(dap * db, axis=1, keepdims=True)
        num = np.sum(dap * dp, axis=1, keepdims=True)
        xy_linecross = (num / denom) * db + xypl1
        d_linecross = np.atleast_2d(
            (xy_linecross[:, 0]**2.0 + xy_linecross[:, 1]**2.0)**0.5).T[:, 0]
        x[:, i][pc] = pur[:, i][pc] * d_linecross[pc] * np.cos(
            hdom[pc] * np.pi / 180)
        y[:, i][pc] = pur[:, i][pc] * d_linecross[pc] * np.sin(
            hdom[pc] * np.pi / 180)
    Yxy = np.dstack((Ydlep3[:, :, 0], x + xwo, y + ywo))
    if axes12flipped == True:
        Yxy = Yxy.transpose((1, 0, 2))
    else:
        Yxy = Yxy.transpose((0, 1, 2))
    return Yxy_to_xyz(Yxy).reshape(Ydlep.shape)
Esempio n. 27
0
def spd_to_aopicE(sid,
                  Ee=None,
                  E=None,
                  Q=None,
                  cieobs=_CIEOBS,
                  sid_units='W/m2',
                  out='Eeas,Eas'):
    """
    Calculate alpha-opic irradiance (Ee,α) and equivalent luminance (Eα) values
    for the l-cone, m-cone, s-cone, rod and iprgc (α) photoreceptor cells 
    following CIE technical note TN 003:2015.
    
    Args:
        :sid: 
            | numpy.ndarray with retinal spectral irradiance in :sid_units: 
            | (if 'uW/cm2', sid will be converted to SI units 'W/m2')
        :Ee: 
            | None, optional
            | If not None: normalize :sid: to an irradiance of :Ee:
        :E: 
            | None, optional
            | If not None: normalize :sid: to an illuminance of :E:
            | Note that E is calculate using a Km factor corrected to standard air.
        :Q: 
            | None, optional
            | If not None: nNormalize :sid: to a quantal energy of :Q:
        :cieobs:
            | _CIEOBS or str, optional
            | Type of cmf set to use for photometric units.
        :sid_units:
            | 'W/m2', optional
            | Other option 'uW/m2', input units of :sid:
        :out: 
            | 'Eeas, Eas' or str, optional
            | Determines values to return.
            
    Returns:
        :returns: 
            | (Eeas, Eas) with Eeas and Eas resp. numpy.ndarrays with the 
            | α-opic irradiance and equivalent illuminance values 
            | of all spectra in :sid: in SI-units. 
            |
            | (other choice can be set using :out:)
    """
    outlist = out.split(',')

    # Convert to Watt/m²:
    if sid_units == 'uW/cm2':
        sid[1:] = sid[1:] / 100

    elif sid_units == 'W/m2':
        pass
    else:
        raise Exception(
            "spd_to_aopicE(): {} unsupported units for SID.".format(sid_units))

    # Normalize sid to Ee:
    if Ee is not None:
        sid = spd_normalize(sid, norm_type='ru', norm_f=Ee)
    elif E is not None:
        sid = spd_normalize(sid, norm_type='pusa', norm_f=E)
    elif Q is not None:
        sid = spd_normalize(sid, norm_type='qu', norm_f=Q)

    # Get sid irradiance (W/m²):
    if 'Ee' in outlist:
        Ee = spd_to_power(sid, cieobs=cieobs, ptype='ru')

    # Get sid illuminance (lx):
    if 'E' in outlist:
        E = spd_to_power(
            sid, cieobs=cieobs,
            ptype='pusa')  #photometric units (Km corrected to standard air)

    # Get sid quantal energy (photons/m²/s):
    if 'Q' in outlist:
        Q = spd_to_power(sid, cieobs=cieobs, ptype='qu')

    # get SI actinic action spectra, sa:
    sa = spd(_ACTIONSPECTRA, wl=sid[0], interpolation='cmf', norm_type='max')

    # get wavelength spacing:
    dl = getwld(sid[0])

    # Calculate all alpha-opics Ee's:
    Eeas = (np.dot((sa[1:] * dl), sid[1:].T)).T

    # Calculate equivalent alpha-opic E's:
    Vl, Km = vlbar(cieobs=cieobs, wl_new=sid[0], out=2)
    Eas = Km * Km_correction_factor * Eeas * (Vl[1].sum() / sa[1:].sum(axis=1))

    #Prepare output:
    if out == 'Eeas,Eas':
        return Eeas, Eas
    elif out == 'Eeas':
        return Eeas
    elif out == 'Eas':
        return Eas
    else:
        eval(out)
Esempio n. 28
0
def apply(data, n_step = 2, catmode = None, cattype = 'vonkries', xyzw1 = None, xyzw2 = None, xyzw0 = None,\
          D = None, mcat = [_MCAT_DEFAULT], normxyz0 = None, outtype = 'xyz', La = None, F = None, Dtype = None):
    """
    Calculate corresponding colors by applying a von Kries chromatic adaptation
    transform (CAT), i.e. independent rescaling of 'sensor sensitivity' to data
    to adapt from current adaptation conditions (1) to the new conditions (2).
    
    Args:
        :data: 
            | ndarray of tristimulus values (can be NxMx3)
        :n_step:
            | 2, optional
            | Number of step in CAT (1: 1-step, 2: 2-step)
        :catmode: 
            | None, optional
            |    - None: use :n_step: to set mode: 1 = '1>2', 2:'1>0>2'
            |    -'1>0>2': Two-step CAT 
            |      from illuminant 1 to baseline illuminant 0 to illuminant 2.
            |    -'1>2': One-step CAT
            |      from illuminant 1 to illuminant 2.
            |    -'1>0': One-step CAT 
            |      from illuminant 1 to baseline illuminant 0.
            |    -'0>2': One-step CAT 
            |      from baseline illuminant 0 to illuminant 2. 
        :cattype: 
            | 'vonkries' (others: 'rlab', see Farchild 1990), optional
        :xyzw1:
            | None, depending on :catmode: optional (can be Mx3)
        :xyzw2:
            | None, depending on :catmode: optional (can be Mx3)
        :xyzw0:
            | None, depending on :catmode: optional (can be Mx3)
        :D: 
            | None, optional
            | Degrees of adaptation. Defaults to [1.0, 1.0]. 
        :La: 
            | None, optional
            | Adapting luminances. 
            | If None: xyz values are absolute or relative.
            | If not None: xyz are relative. 
        :F: 
            | None, optional
            | Surround parameter(s) for CAT02/CAT16 calculations 
            |  (:Dtype: == 'cat02' or 'cat16')
            | Defaults to [1.0, 1.0]. 
        :Dtype:
            | None, optional
            | Type of degree of adaptation function from literature
            | See luxpy.cat.get_degree_of_adaptation()
        :mcat:
            | [_MCAT_DEFAULT], optional
            | List[str] or List[ndarray] of sensor space matrices for each 
            |  condition pair. If len(:mcat:) == 1, the same matrix is used.
        :normxyz0: 
            | None, optional
            | Set of xyz tristimulus values to normalize the sensor space matrix to.
        :outtype:
            | 'xyz' or 'lms', optional
            |   - 'xyz': return corresponding tristimulus values 
            |   - 'lms': return corresponding sensor space excitation values 
            |            (e.g. for further calculations) 
      
    Returns:
          :returns: 
              | ndarray with corresponding colors
        
    Reference:
        1. `Smet, K. A. G., & Ma, S. (2020). 
        Some concerns regarding the CAT16 chromatic adaptation transform. 
        Color Research & Application, 45(1), 172–177. 
        <https://doi.org/10.1002/col.22457>`_
    """

    if (xyzw1 is None) & (xyzw2 is None):
        return data  # do nothing

    else:
        # Set catmode:
        if catmode is None:
            if n_step == 2:
                catmode = '1>0>2'
            elif n_step == 1:
                catmode = '1>2'
            else:
                raise Exception(
                    'cat.apply(n_step = {:1.0f}, catmode = None): Unknown requested n-step CAT mode !'
                    .format(n_step))

        # Make data 2d:
        data = np2d(data)
        data_original_shape = data.shape
        if data.ndim < 3:
            target_shape = np.hstack((1, data.shape))
            data = data * np.ones(target_shape)
        else:
            target_shape = data.shape

        target_shape = data.shape

        # initialize xyzw0:
        if (xyzw0 is None):  # set to iLL.E
            xyzw0 = np2d([100.0, 100.0, 100.0])
        xyzw0 = np.ones(target_shape) * xyzw0
        La0 = xyzw0[..., 1, None]

        # Determine cat-type (1-step or 2-step) + make input same shape as data for block calculations:
        expansion_axis = np.abs(1 * (len(data_original_shape) == 2) - 1)
        if ((xyzw1 is not None) & (xyzw2 is not None)):
            xyzw1 = xyzw1 * np.ones(target_shape)
            xyzw2 = xyzw2 * np.ones(target_shape)
            default_La12 = [xyzw1[..., 1, None], xyzw2[..., 1, None]]

        elif (xyzw2 is None) & (xyzw1
                                is not None):  # apply one-step CAT: 1-->0
            catmode = '1>0'  #override catmode input
            xyzw1 = xyzw1 * np.ones(target_shape)
            default_La12 = [xyzw1[..., 1, None], La0]

        elif (xyzw1 is None) & (xyzw2 is not None):
            raise Exception(
                "von_kries(): cat transformation '0>2' not supported, use '1>0' !"
            )

        # Get or set La (La == None: xyz are absolute or relative, La != None: xyz are relative):
        target_shape_1 = tuple(np.hstack((target_shape[:-1], 1)))
        La1, La2 = parse_x1x2_parameters(La,
                                         target_shape=target_shape_1,
                                         catmode=catmode,
                                         expand_2d_to_3d=expansion_axis,
                                         default=default_La12)

        # Set degrees of adaptation, D10, D20:  (note D20 is degree of adaptation for 2-->0!!)
        D10, D20 = parse_x1x2_parameters(D,
                                         target_shape=target_shape_1,
                                         catmode=catmode,
                                         expand_2d_to_3d=expansion_axis)

        # Set F surround in case of Dtype == 'cat02':
        F1, F2 = parse_x1x2_parameters(F,
                                       target_shape=target_shape_1,
                                       catmode=catmode,
                                       expand_2d_to_3d=expansion_axis)

        # Make xyz relative to go to relative xyz0:
        if La is None:
            data = 100 * data / La1
            xyzw1 = 100 * xyzw1 / La1
            xyzw0 = 100 * xyzw0 / La0
            if (catmode == '1>0>2') | (catmode == '1>2'):
                xyzw2 = 100 * xyzw2 / La2

        # transform data (xyz) to sensor space (lms) and perform cat:
        xyzc = np.zeros(data.shape)
        xyzc.fill(np.nan)
        mcat = np.array(mcat)
        if (mcat.shape[0] != data.shape[1]) & (mcat.shape[0] == 1):
            mcat = np.repeat(mcat, data.shape[1], axis=0)
        elif (mcat.shape[0] != data.shape[1]) & (mcat.shape[0] > 1):
            raise Exception(
                'von_kries(): mcat.shape[0] > 1 and does not match data.shape[0]!'
            )

        for i in range(xyzc.shape[1]):
            # get cat sensor matrix:
            if mcat[i].dtype == np.float64:
                mcati = mcat[i]
            else:
                mcati = _MCATS[mcat[i]]

            # normalize sensor matrix:
            if normxyz0 is not None:
                mcati = math.normalize_3x3_matrix(mcati, xyz0=normxyz0)

            # convert from xyz to lms:
            lms = np.dot(mcati, data[:, i].T).T
            lmsw0 = np.dot(mcati, xyzw0[:, i].T).T
            if (catmode == '1>0>2') | (catmode == '1>0'):
                lmsw1 = np.dot(mcati, xyzw1[:, i].T).T
                Dpar1 = dict(D=D10[:, i],
                             F=F1[:, i],
                             La=La1[:, i],
                             La0=La0[:, i],
                             order='1>0')
                D10[:, i] = get_degree_of_adaptation(
                    Dtype=Dtype,
                    **Dpar1)  #get degree of adaptation depending on Dtype
                lmsw2 = None  # in case of '1>0'

            if (catmode == '1>0>2'):
                lmsw2 = np.dot(mcati, xyzw2[:, i].T).T
                Dpar2 = dict(D=D20[:, i],
                             F=F2[:, i],
                             La=La2[:, i],
                             La0=La0[:, i],
                             order='0>2')

                D20[:, i] = get_degree_of_adaptation(
                    Dtype=Dtype,
                    **Dpar2)  #get degree of adaptation depending on Dtype

            if (catmode == '1>2'):
                lmsw1 = np.dot(mcati, xyzw1[:, i].T).T
                lmsw2 = np.dot(mcati, xyzw2[:, i].T).T
                Dpar12 = dict(D=D10[:, i],
                              F=F1[:, i],
                              La=La1[:, i],
                              La2=La2[:, i],
                              order='1>2')
                D10[:, i] = get_degree_of_adaptation(
                    Dtype=Dtype,
                    **Dpar12)  #get degree of adaptation depending on Dtype

            # Determine transfer function Dt:
            Dt = get_transfer_function(cattype=cattype,
                                       catmode=catmode,
                                       lmsw1=lmsw1,
                                       lmsw2=lmsw2,
                                       lmsw0=lmsw0,
                                       D10=D10[:, i],
                                       D20=D20[:, i],
                                       La1=La1[:, i],
                                       La2=La2[:, i])

            # Perform cat:
            lms = np.dot(np.diagflat(Dt[0]), lms.T).T

            # Make xyz, lms 'absolute' again:
            if (catmode == '1>0>2'):
                lms = (La2[:, i] / La1[:, i]) * lms
            elif (catmode == '1>0'):
                lms = (La0[:, i] / La1[:, i]) * lms
            elif (catmode == '1>2'):
                lms = (La2[:, i] / La1[:, i]) * lms

            # transform back from sensor space to xyz (or not):
            if outtype == 'xyz':
                xyzci = np.dot(np.linalg.inv(mcati), lms.T).T
                xyzci[np.where(xyzci < 0)] = _EPS
                xyzc[:, i] = xyzci
            else:
                xyzc[:, i] = lms

        # return data to original shape:
        if len(data_original_shape) == 2:
            xyzc = xyzc[0]

        return xyzc
Esempio n. 29
0
def cam18sl(data,
            datab=None,
            Lb=[100],
            fov=10.0,
            inputtype='xyz',
            direction='forward',
            outin='Q,aS,bS',
            parameters=None):
    """
    Convert between CIE 2006 10°  XYZ tristimulus values (or spectral data) 
    and CAM18sl color appearance correlates.
    
    Args:
        :data: 
            | ndarray of CIE 2006 10°  absolute XYZ tristimulus values or spectral data
            |  or color appearance attributes of stimulus
        :datab: 
            | ndarray of CIE 2006 10°  absolute XYZ tristimulus values or spectral data
            |  of stimulus background
        :Lb: 
            | [100], optional
            | Luminance (cd/m²) value(s) of background(s) calculated using the CIE 2006 10° CMFs 
            | (only used in case datab == None and the background is assumed to be an Equal-Energy-White)
        :fov: 
            | 10.0, optional
            | Field-of-view of stimulus (for size effect on brightness)
        :inputtpe:
            | 'xyz' or 'spd', optional
            | Specifies the type of input: 
            |     tristimulus values or spectral data for the forward mode.
        :direction:
            | 'forward' or 'inverse', optional
            |   -'forward': xyz -> cam18sl
            |   -'inverse': cam18sl -> xyz 
        :outin:
            | 'Q,aS,bS' or str, optional
            | 'Q,aS,bS' (brightness and opponent signals for saturation)
            |  other options: 'Q,aM,bM' (colorfulness) 
            |                 (Note that 'Q,aW,bW' would lead to a Cartesian 
            |                  a,b-coordinate system centered at (1,0))
            | Str specifying the type of 
            |     input (:direction: == 'inverse') and 
            |     output (:direction: == 'forward')
        :parameters:
            | None or dict, optional
            | Set of model parameters.
            |   - None: defaults to luxpy.cam._CAM18SL_PARAMETERS 
            |    (see references below)
    
    Returns:
        :returns: 
            | ndarray with color appearance correlates (:direction: == 'forward')
            |  or 
            | XYZ tristimulus values (:direction: == 'inverse')
            
    Notes:
        | * Instead of using the CIE 1964 10° CMFs in some places of the model,
        |   the CIE 2006 10° CMFs are used througout, making it more self_consistent.
        |   This has an effect on the k scaling factors (now different those in CAM15u) 
        |   and the illuminant E normalization for use in the chromatic adaptation transform.
        |   (see future erratum to Hermans et al., 2018)
        | * The paper also used an equation for the amount of white W, which is
        |   based on a Q value not expressed in 'bright' ('cA' = 0.937 instead of 123). 
        |   This has been corrected for in the luxpy version of the model, i.e.
        |   _CAM18SL_PARAMETERS['cW'][0] has been changed from 2.29 to 1/11672.
        |   (see future erratum to Hermans et al., 2018)
        | * Default output was 'Q,aW,bW' prior to March 2020, but since this
        |   is an a,b Cartesian system centered on (1,0), the default output
        |   has been changed to 'Q,aS,bS'.

    References: 
        1. `Hermans, S., Smet, K. A. G., & Hanselaer, P. (2018). 
        "Color appearance model for self-luminous stimuli."
        Journal of the Optical Society of America A, 35(12), 2000–2009. 
        <https://doi.org/10.1364/JOSAA.35.002000>`_ 
     """

    if parameters is None:
        parameters = _CAM18SL_PARAMETERS

    outin = outin.split(',')

    #unpack model parameters:
    cA, cAlms, cHK, cM, cW, ca, calms, cb, cblms, cfov, cieobs, k, naka, unique_hue_data = [
        parameters[x] for x in sorted(parameters.keys())
    ]

    # precomputations:
    Mlms2xyz = np.linalg.inv(_CMF[cieobs]['M'])
    MAab = np.array([cAlms, calms, cblms])
    invMAab = np.linalg.inv(MAab)

    #-------------------------------------------------
    # setup EEW reference field and default background field (Lr should be equal to Lb):
    # Get Lb values:
    if datab is not None:
        if inputtype != 'xyz':
            Lb = spd_to_xyz(datab, cieobs=cieobs, relative=False)[..., 1:2]
        else:
            Lb = datab[..., 1:2]
    else:
        if isinstance(Lb, list):
            Lb = np2dT(Lb)

    # Setup EEW ref of same luminance as datab:
    if inputtype == 'xyz':
        wlr = getwlr(_CAM18SL_WL3)
    else:
        if datab is None:
            wlr = data[0]  # use wlr of stimulus data
        else:
            wlr = datab[0]  # use wlr of background data
    datar = np.vstack((wlr, np.ones(
        (Lb.shape[0], wlr.shape[0]))))  # create eew
    xyzr = spd_to_xyz(datar, cieobs=cieobs,
                      relative=False)  # get abs. tristimulus values
    datar[1:] = datar[1:] / xyzr[..., 1:2] * Lb

    # Create datab if None:
    if (datab is None):
        if inputtype != 'xyz':
            datab = datar.copy()
        else:
            datab = spd_to_xyz(datar, cieobs=cieobs, relative=False)

    # prepare data and datab for loop over backgrounds:
    # make axis 1 of datab have 'same' dimensions as data:
    if (data.ndim == 2):
        data = np.expand_dims(data, axis=1)  # add light source axis 1

    if inputtype == 'xyz':
        datar = spd_to_xyz(datar, cieobs=cieobs,
                           relative=False)  # convert to xyz!!
        if datab.shape[
                0] == 1:  #make datab and datar have same lights source dimension (used to store different backgrounds) size as data
            datab = np.repeat(datab, data.shape[1], axis=0)
            datar = np.repeat(datar, data.shape[1], axis=0)
    else:
        if datab.shape[0] == 2:
            datab = np.vstack(
                (datab[0], np.repeat(datab[1:], data.shape[1], axis=0)))
        if datar.shape[0] == 2:
            datar = np.vstack(
                (datar[0], np.repeat(datar[1:], data.shape[1], axis=0)))

    # Flip light source/ background dim to axis 0:
    data = np.transpose(data, axes=(1, 0, 2))

    #-------------------------------------------------

    #initialize camout:
    dshape = list(data.shape)
    dshape[-1] = len(outin)  # requested number of correlates
    if (inputtype != 'xyz') & (direction == 'forward'):
        dshape[-2] = dshape[
            -2] - 1  # wavelength row doesn't count & only with forward can the input data be spectral
    camout = np.zeros(dshape)
    camout.fill(np.nan)

    for i in range(data.shape[0]):

        # get rho, gamma, beta of background and reference white:
        if (inputtype != 'xyz'):
            xyzb = spd_to_xyz(np.vstack((datab[0], datab[i + 1:i + 2, :])),
                              cieobs=cieobs,
                              relative=False)
            xyzr = spd_to_xyz(np.vstack((datar[0], datar[i + 1:i + 2, :])),
                              cieobs=cieobs,
                              relative=False)
        else:
            xyzb = datab[i:i + 1, :]
            xyzr = datar[i:i + 1, :]

        lmsb = np.dot(_CMF[cieobs]['M'], xyzb.T).T  # convert to l,m,s
        rgbb = (lmsb / _CMF[cieobs]['K']) * k  # convert to rho, gamma, beta
        #lmsr = np.dot(_CMF[cieobs]['M'],xyzr.T).T # convert to l,m,s
        #rgbr = (lmsr / _CMF[cieobs]['K']) * k # convert to rho, gamma, beta
        #rgbr = rgbr/rgbr[...,1:2]*Lb[i] # calculated EEW cone excitations at same luminance values as background
        rgbr = np.ones(xyzr.shape) * Lb[
            i]  # explicitely equal EEW cone excitations at same luminance values as background

        if direction == 'forward':
            # get rho, gamma, beta of stimulus:
            if (inputtype != 'xyz'):
                xyz = spd_to_xyz(data[i], cieobs=cieobs, relative=False)
            elif (inputtype == 'xyz'):
                xyz = data[i]
            lms = np.dot(_CMF[cieobs]['M'], xyz.T).T  # convert to l,m,s
            rgb = (lms / _CMF[cieobs]['K']) * k  # convert to rho, gamma, beta

            # apply von-kries cat with D = 1:
            if (rgbb == 0).any():
                Mcat = np.eye(3)
            else:
                Mcat = np.diag((rgbr / rgbb)[0])
            rgba = np.dot(Mcat, rgb.T).T

            # apply naka-rushton compression:
            rgbc = naka_rushton(rgba,
                                n=naka['n'],
                                sig=naka['sig'](rgbr.mean()),
                                noise=naka['noise'],
                                scaling=naka['scaling'])

            #rgbc = np.ones(rgbc.shape)*rgbc.mean() # test if eew ends up at origin

            # calculate achromatic and color difference signals, A, a, b:
            Aab = np.dot(MAab, rgbc.T).T
            A, a, b = asplit(Aab)
            a = ca * a
            b = cb * b

            # calculate colorfullness like signal M:
            M = cM * ((a**2.0 + b**2.0)**0.5)

            # calculate brightness Q:
            Q = cA * (
                A + cHK[0] * M**cHK[1]
            )  # last term is contribution of Helmholtz-Kohlrausch effect on brightness

            # calculate saturation, s:
            s = M / Q
            S = s  # make extra variable, jsut in case 'S' is called

            # calculate amount of white, W:
            W = 1 / (1.0 + cW[0] * (s**cW[1]))

            #  adjust Q for size (fov) of stimulus (matter of debate whether to do this before or after calculation of s or W, there was no data on s, M or W for different sized stimuli: after)
            Q = Q * (fov / 10.0)**cfov

            # calculate hue, h and Hue quadrature, H:
            h = hue_angle(a, b, htype='deg')
            if 'H' in outin:
                H = hue_quadrature(h, unique_hue_data=unique_hue_data)
            else:
                H = None

            # calculate cart. co.:
            if 'aM' in outin:
                aM = M * np.cos(h * np.pi / 180.0)
                bM = M * np.sin(h * np.pi / 180.0)

            if 'aS' in outin:
                aS = s * np.cos(h * np.pi / 180.0)
                bS = s * np.sin(h * np.pi / 180.0)

            if 'aW' in outin:
                aW = W * np.cos(h * np.pi / 180.0)
                bW = W * np.sin(h * np.pi / 180.0)

            if (outin != ['Q', 'as', 'bs']):
                camout[i] = eval('ajoin((' + ','.join(outin) + '))')
            else:
                camout[i] = ajoin((Q, aS, bS))

        elif direction == 'inverse':

            # get Q, M and a, b depending on input type:
            if 'aW' in outin:
                Q, a, b = asplit(data[i])
                Q = Q / (
                    (fov / 10.0)**cfov
                )  #adjust Q for size (fov) of stimulus back to that 10° ref
                W = (a**2.0 + b**2.0)**0.5
                s = (((1.0 / W) - 1.0) / cW[0])**(1.0 / cW[1])
                M = s * Q

            if 'aM' in outin:
                Q, a, b = asplit(data[i])
                Q = Q / (
                    (fov / 10.0)**cfov
                )  #adjust Q for size (fov) of stimulus back to that 10° ref
                M = (a**2.0 + b**2.0)**0.5

            if 'aS' in outin:
                Q, a, b = asplit(data[i])
                Q = Q / (
                    (fov / 10.0)**cfov
                )  #adjust Q for size (fov) of stimulus back to that 10° ref
                s = (a**2.0 + b**2.0)**0.5
                M = s * Q

            if 'h' in outin:
                Q, WsM, h = asplit(data[i])
                Q = Q / (
                    (fov / 10.0)**cfov
                )  #adjust Q for size (fov) of stimulus back to that 10° ref
                if 'W' in outin:
                    s = (((1.0 / WsM) - 1.0) / cW[0])**(1.0 / cW[1])
                    M = s * Q
                elif 's' in outin:
                    M = WsM * Q
                elif 'M' in outin:
                    M = WsM

            # calculate achromatic signal, A from Q and M:
            A = Q / cA - cHK[0] * M**cHK[1]

            # calculate hue angle:
            h = hue_angle(a, b, htype='rad')

            # calculate a,b from M and h:
            a = (M / cM) * np.cos(h)
            b = (M / cM) * np.sin(h)

            a = a / ca
            b = b / cb

            # create Aab:
            Aab = ajoin((A, a, b))

            # calculate rgbc:
            rgbc = np.dot(invMAab, Aab.T).T

            # decompress rgbc to (adapted) rgba :
            rgba = naka_rushton(rgbc,
                                n=naka['n'],
                                sig=naka['sig'](rgbr.mean()),
                                noise=naka['noise'],
                                scaling=naka['scaling'],
                                direction='inverse')

            # apply inverse von-kries cat with D = 1:
            rgb = np.dot(np.diag((rgbb / rgbr)[0]), rgba.T).T

            # convert rgb to lms to xyz:
            lms = rgb / k * _CMF[cieobs]['K']
            xyz = np.dot(Mlms2xyz, lms.T).T

            camout[i] = xyz

    camout = np.transpose(camout, axes=(1, 0, 2))

    if camout.shape[1] == 1:
        camout = np.squeeze(camout, axis=1)

    return camout
Esempio n. 30
0
def xyz_to_Ydlep(xyz,
                 cieobs=_CIEOBS,
                 xyzw=_COLORTF_DEFAULT_WHITE_POINT,
                 flip_axes=False,
                 SL_max_lambda=None,
                 **kwargs):
    """
    Convert XYZ tristimulus values to Y, dominant (complementary) wavelength
    and excitation purity.

    Args:
        :xyz:
            | ndarray with tristimulus values
        :xyzw:
            | None or ndarray with tristimulus values of a single (!) native white point, optional
            | None defaults to xyz of CIE D65 using the :cieobs: observer.
        :cieobs:
            | luxpy._CIEOBS, optional
            | CMF set to use when calculating spectrum locus coordinates.
        :flip_axes:
            | False, optional
            | If True: flip axis 0 and axis 1 in Ydelep to increase speed of loop in function.
            |          (single xyzw with is not flipped!)
        :SL_max_lambda:
            | None or float, optional
            | Maximum wavelength of spectrum locus before it turns back on itelf in the high wavelength range (~700 nm)
    Returns:
        :Ydlep: 
            | ndarray with Y, dominant (complementary) wavelength
            |  and excitation purity
    """

    xyz3 = np3d(xyz).copy().astype(np.float)

    # flip axis so that shortest dim is on axis0 (save time in looping):
    if (xyz3.shape[0] < xyz3.shape[1]) & (flip_axes == True):
        axes12flipped = True
        xyz3 = xyz3.transpose((1, 0, 2))
    else:
        axes12flipped = False

    # convert xyz to Yxy:
    Yxy = xyz_to_Yxy(xyz3)
    Yxyw = xyz_to_Yxy(xyzw)

    # get spectrum locus Y,x,y and wavelengths:
    SL = _CMF[cieobs]['bar']
    SL = SL[:, SL[1:].sum(axis=0) >
            0]  # avoid div by zero in xyz-to-Yxy conversion
    wlsl = SL[0]
    Yxysl = xyz_to_Yxy(SL[1:4].T)[:, None]

    # Get maximum wavelength of spectrum locus (before it turns back on itself)
    if SL_max_lambda is None:
        pmaxlambda = Yxysl[..., 1].argmax()  # lambda with largest x value
        dwl = np.diff(
            Yxysl[:, 0,
                  1])  # spectrumlocus in that range should have increasing x
        dwl[wlsl[:-1] < 600] = 10000
        pmaxlambda = np.where(
            dwl <= 0)[0][0]  # Take first element with zero or <zero slope
    else:
        pmaxlambda = np.abs(wlsl - SL_max_lambda).argmin()
    Yxysl = Yxysl[:(pmaxlambda + 1), :]
    wlsl = wlsl[:(pmaxlambda + 1)]

    # center on xyzw:
    Yxy = Yxy - Yxyw
    Yxysl = Yxysl - Yxyw
    Yxyw = Yxyw - Yxyw

    #split:
    Y, x, y = asplit(Yxy)
    Yw, xw, yw = asplit(Yxyw)
    Ysl, xsl, ysl = asplit(Yxysl)

    # calculate hue:
    h = math.positive_arctan(x, y, htype='deg')

    hsl = math.positive_arctan(xsl, ysl, htype='deg')

    hsl_max = hsl[0]  # max hue angle at min wavelength
    hsl_min = hsl[-1]  # min hue angle at max wavelength
    if hsl_min < hsl_max: hsl_min += 360

    dominantwavelength = np.empty(Y.shape)
    purity = np.empty(Y.shape)

    for i in range(xyz3.shape[1]):

        # find index of complementary wavelengths/hues:
        pc = np.where(
            (h[:, i] > hsl_max) & (h[:, i] < hsl_min)
        )  # hue's requiring complementary wavelength (purple line)
        h[:, i][pc] = h[:, i][pc] - np.sign(
            h[:, i][pc] - 180.0
        ) * 180.0  # add/subtract 180° to get positive complementary wavelength

        # find 2 closest enclosing hues in sl:
        #hslb,hib = meshblock(hsl,h[:,i:i+1])
        hib, hslb = np.meshgrid(h[:, i:i + 1], hsl)
        dh = (hslb - hib)
        q1 = np.abs(dh).argmin(axis=0)  # index of closest hue
        sign_q1 = np.sign(dh[q1])[0]
        dh[np.sign(dh) ==
           sign_q1] = 1000000  # set all dh on the same side as q1 to a very large value
        q2 = np.abs(dh).argmin(
            axis=0)  # index of second  closest (enclosing) hue

        # # Test changes to code:
        # print('wls',i, wlsl[q1],wlsl[q2])
        # import matplotlib.pyplot as plt
        # plt.figure()
        # plt.plot(wlsl[:-1],np.diff(xsl[:,0]),'k.-')
        # plt.figure()
        # plt.plot(x[0,i],y[0,i],'k.'); plt.plot(xsl,ysl,'r.-');plt.plot(xsl[q1],ysl[q1],'b.');plt.plot(xsl[q2],ysl[q2],'g.');plt.plot(xsl[-1],ysl[-1],'c+')

        dominantwavelength[:, i] = wlsl[q1] + np.multiply(
            (h[:, i] - hsl[q1, 0]),
            np.divide((wlsl[q2] - wlsl[q1]), (hsl[q2, 0] - hsl[q1, 0]))
        )  # calculate wl corresponding to h: y = y1 + (x-x1)*(y2-y1)/(x2-x1)
        dominantwavelength[:, i][pc] = -dominantwavelength[:, i][
            pc]  #complementary wavelengths are specified by '-' sign

        # calculate excitation purity:
        x_dom_wl = xsl[q1, 0] + (xsl[q2, 0] - xsl[q1, 0]) * (h[:, i] - hsl[
            q1, 0]) / (hsl[q2, 0] - hsl[q1, 0])  # calculate x of dom. wl
        y_dom_wl = ysl[q1, 0] + (ysl[q2, 0] - ysl[q1, 0]) * (h[:, i] - hsl[
            q1, 0]) / (hsl[q2, 0] - hsl[q1, 0])  # calculate y of dom. wl
        d_wl = (x_dom_wl**2.0 +
                y_dom_wl**2.0)**0.5  # distance from white point to sl
        d = (x[:, i]**2.0 +
             y[:, i]**2.0)**0.5  # distance from white point to test point
        purity[:, i] = d / d_wl

        # correct for those test points that have a complementary wavelength
        # calculate intersection of line through white point and test point and purple line:
        xy = np.vstack((x[:, i], y[:, i])).T
        xyw = np.hstack((xw, yw))
        xypl1 = np.hstack((xsl[0, None], ysl[0, None]))
        xypl2 = np.hstack((xsl[-1, None], ysl[-1, None]))
        da = (xy - xyw)
        db = (xypl2 - xypl1)
        dp = (xyw - xypl1)
        T = np.array([[0.0, -1.0], [1.0, 0.0]])
        dap = np.dot(da, T)
        denom = np.sum(dap * db, axis=1, keepdims=True)
        num = np.sum(dap * dp, axis=1, keepdims=True)
        xy_linecross = (num / denom) * db + xypl1
        d_linecross = np.atleast_2d(
            (xy_linecross[:, 0]**2.0 + xy_linecross[:, 1]**2.0)**0.5).T  #[0]
        purity[:, i][pc] = d[pc] / d_linecross[pc][:, 0]
    Ydlep = np.dstack((xyz3[:, :, 1], dominantwavelength, purity))

    if axes12flipped == True:
        Ydlep = Ydlep.transpose((1, 0, 2))
    else:
        Ydlep = Ydlep.transpose((0, 1, 2))
    return Ydlep.reshape(xyz.shape)