Ejemplo n.º 1
0
def _update_parameter_dict(args,
                           parameters={},
                           cieobs=_CAM_DEFAULT_CIEOBS,
                           match_conversionmatrix_to_cieobs=False,
                           Mxyz2lms_whitepoint=None):
    """
    Get parameter dict and update with values in args dict. 
     | Also replace the xyz-to-lms conversion matrix with the one corresponding 
     | to cieobs and normalize it to illuminant E.
     
    Args:
        :args: 
            | dictionary with updated values. 
            | (get by placing 'args = locals().copy()' immediately after the start
            | of the function from which the update is called, 
            | see _simple_cam() code for an example.)
        :parameters:
            | dictionary with all (adjustable) parameter values used by the model   
        :cieobs: 
            | String with the CIE observer CMFs (one of _CMF['types'] of the input data
            | Is used to get the Mxyz2lms matrix when  match_conversionmatrix_to_cieobs == True)
        :match_conversionmatrix_to_cieobs:
            | False, optional
            | If False: keep the Mxyz2lms in the parameters dict
        :Mxyz2lms_whitepoint:
            | None, optional
            | If not None: update the Mxyz2lms key in the parameters dict
            | so that the conversion matrix is the one in _CMF[cieobs]['M'], 
            | in other such that it matches the cieobs of the input data.
    
    Returns:
        :parameters:
            | updated dictionary with model parameters for further use in the CAM.
            
    Notes:
        For an example on the use, see code _simple_cam() (type: _simple_cam??)
    """
    parameters = put_args_in_db(
        parameters,
        args)  #overwrite parameters with other (not-None) args input
    if match_conversionmatrix_to_cieobs == True:
        parameters['Mxyz2lms'] = _CMF[cieobs]['M'].copy()
    if Mxyz2lms_whitepoint is None:
        Mxyz2lms_whitepoint = np.array([[1.0, 1.0, 1.0]])
    parameters['Mxyz2lms'] = math.normalize_3x3_matrix(
        parameters['Mxyz2lms'], Mxyz2lms_whitepoint
    )  # normalize matrix for xyz-> lms conversion to ill. E
    return parameters
Ejemplo n.º 2
0
def _update_parameter_dict(args,
                           parameters=None,
                           cieobs='2006_10',
                           match_to_conversionmatrix_to_cieobs=True):
    """
    Get parameter dict and update with values in args dict. 
    Also replace the xyz-to-lms conversion matrix with the one corresponding 
    to cieobs and normalize it to illuminant E.
    """
    if parameters is None:
        parameters = _CAM_SWW16_PARAMETERS['JOSA']
    if isinstance(parameters, str):
        parameters = _CAM_SWW16_PARAMETERS[parameters]
    parameters = put_args_in_db(
        parameters,
        args)  #overwrite parameters with other (not-None) args input
    if match_to_conversionmatrix_to_cieobs == True:
        parameters['Mxyz2lms'] = _CMF[cieobs]['M'].copy()
    parameters['Mxyz2lms'] = math.normalize_3x3_matrix(
        parameters['Mxyz2lms'],
        np.array([[1.0, 1.0, 1.0]
                  ]))  # normalize matrix for xyz-> lms conversion to ill. E
    return parameters
Ejemplo n.º 3
0
def ipt_to_xyz(ipt, cieobs=_CIEOBS, xyzw=None, M=None, **kwargs):
    """
    Convert XYZ tristimulus values to IPT color coordinates.

    | I: Lightness axis, P, red-green axis, T: yellow-blue axis.

    Args:
        :ipt: 
            | ndarray with IPT color coordinates
        :xyzw:
            | None or ndarray with tristimulus values of white point, optional
            | None defaults to xyz of CIE D65 using the :cieobs: observer.
        :cieobs:
            | luxpy._CIEOBS, optional
            | CMF set to use when calculating xyzw for rescaling Mxyz2lms
              (only when not None).
        :M: | None, optional
            | None defaults to xyz to lms conversion matrix determined by:cieobs:

    Returns:
        :xyz: 
            | ndarray with tristimulus values

    Note:
        :xyz: is assumed to be under D65 viewing conditions! If necessary 
              perform chromatic adaptation !

    Reference:
        1. `Ebner F, and Fairchild MD (1998).
           Development and testing of a color space (IPT) with improved hue uniformity.
           In IS&T 6th Color Imaging Conference, (Scottsdale, Arizona, USA), pp. 8–13.
           <http://www.ingentaconnect.com/content/ist/cic/1998/00001998/00000001/art00003?crawler=true>`_
    """
    ipt = np2d(ipt)

    # get M to convert xyz to lms and apply normalization to matrix or input your own:
    if M is None:
        M = _IPT_M['xyz2lms'][cieobs].copy(
        )  # matrix conversions from xyz to lms
        if xyzw is None:
            xyzw = spd_to_xyz(_CIE_ILLUMINANTS['D65'], cieobs=cieobs,
                              out=1) / 100.0
        else:
            xyzw = xyzw / 100.0
        M = math.normalize_3x3_matrix(M, xyzw)

    # convert from ipt to lms':
    if len(ipt.shape) == 3:
        lmsp = np.einsum('ij,klj->kli', np.linalg.inv(_IPT_M['lms2ipt']), ipt)
    else:
        lmsp = np.einsum('ij,lj->li', np.linalg.inv(_IPT_M['lms2ipt']), ipt)

    # reverse response compression: lms' to lms
    lms = lmsp**(1.0 / 0.43)
    p = np.where(lmsp < 0.0)
    lms[p] = -np.abs(lmsp[p])**(1.0 / 0.43)

    # convert from lms to xyz:
    if np.ndim(M) == 2:
        if len(ipt.shape) == 3:
            xyz = np.einsum('ij,klj->kli', np.linalg.inv(M), lms)
        else:
            xyz = np.einsum('ij,lj->li', np.linalg.inv(M), lms)
    else:
        if len(
                ipt.shape
        ) == 3:  # second dim of lms must match dim of 1st of M and 1st dim of xyzw
            xyz = np.concatenate([
                np.einsum('ij,klj->kli', np.linalg.inv(M[i]),
                          lms[:, i:i + 1, :]) for i in range(M.shape[0])
            ],
                                 axis=1)
        else:  # first dim of lms must match dim of 1st of M and 1st dim of xyzw
            xyz = np.concatenate([
                np.einsum('ij,lj->li', np.linalg.inv(M[i]), lms[i:i + 1, :])
                for i in range(M.shape[0])
            ],
                                 axis=0)

    #xyz = np.dot(np.linalg.inv(M),lms.T).T
    xyz = xyz * 100.0
    xyz[np.where(xyz < 0.0)] = 0.0

    return xyz
Ejemplo n.º 4
0
_CSPACE_AXES['luv'] = ['L*', "u*", "v*"]
_CSPACE_AXES['ipt'] = ['I', "P", "T"]
_CSPACE_AXES['wuv'] = ['W*', "U*", "V*"]
_CSPACE_AXES['Vrb_mb'] = [
    'V (Macleod-Boyton)', "r (Macleod-Boyton)", "b (Macleod-Boyton)"
]
_CSPACE_AXES['cct'] = ['', 'cct', 'duv']
_CSPACE_AXES['srgb'] = ['sR', 'sG', 'sB']

# pre-calculate matrices for conversion of xyz to lms and back for use in xyz_to_ipt() and ipt_to_xyz():
_IPT_M = {
    'lms2ipt':
    np.array([[0.4000, 0.4000, 0.2000], [4.4550, -4.8510, 0.3960],
              [0.8056, 0.3572, -1.1628]]),
    'xyz2lms': {
        x: math.normalize_3x3_matrix(
            _CMF[x]['M'], spd_to_xyz(_CIE_ILLUMINANTS['D65'], cieobs=x))
        for x in sorted(_CMF['types'])
    }
}
_COLORTF_DEFAULT_WHITE_POINT = np.array([[100.0, 100.0,
                                          100.0]])  # ill. E white point


#------------------------------------------------------------------------------
#---chromaticity coordinates---------------------------------------------------
#------------------------------------------------------------------------------
def xyz_to_Yxy(xyz, **kwargs):
    """
    Convert XYZ tristimulus values CIE Yxy chromaticity values.

    Args:
Ejemplo n.º 5
0
def calibrate(rgbcal, xyzcal, L_type = 'lms', tr_type = 'lut', cieobs = '1931_2', 
              nbit = 8, cspace = 'lab', avg = lambda x: ((x**2).mean()**0.5), ensure_increasing_lut_at_low_rgb = 0.2,
              verbosity = 1, sep=',',header=None): 
    """
    Calculate TR parameters/lut and conversion matrices.
    
    Args:
        :rgbcal:
            | ndarray [Nx3] or string with filename of RGB values 
            | rgcal must contain at least the following type of settings:
            | - pure R,G,B: e.g. for pure R: (R != 0) & (G==0) & (B == 0)
            | - white(s): R = G = B = 2**nbit-1
            | - gray(s): R = G = B
            | - black(s): R = G = B = 0
            | - binary colors: cyan (G = B, R = 0), yellow (G = R, B = 0), magenta (R = B, G = 0)
        :xyzcal:
            | ndarray [Nx3] or string with filename of measured XYZ values for 
            | the RGB settings in rgbcal.
        :L_type:
            | 'lms', optional
            | Type of response to use in the derivation of the Tone-Response curves.
            | options:
            |  - 'lms': use cone fundamental responses: L vs R, M vs G and S vs B 
            |           (reduces noise and generally leads to more accurate characterization) 
            |  - 'Y': use the luminance signal: Y vs R, Y vs G, Y vs B
        :tr_type:
            | 'lut', optional
            | options:
            |  - 'lut': Derive/specify Tone-Response as a look-up-table
            |  - 'gog': Derive/specify Tone-Response as a gain-offset-gamma function
        :cieobs:
            | '1931_2', optional
            | CIE CMF set used to determine the XYZ tristimulus values
            | (needed when L_type == 'lms': determines the conversion matrix to
            | convert xyz to lms values)
        :nbit:
            | 8, optional
            | RGB values in nbit format (e.g. 8, 16, ...)
        :cspace:
            | color space or chromaticity diagram to calculate color differences in
            | when optimizing the xyz_to_rgb and rgb_to_xyz conversion matrices.
        :avg:
            | lambda x: ((x**2).mean()**0.5), optional
            | Function used to average the color differences of the individual RGB settings
            | in the optimization of the xyz_to_rgb and rgb_to_xyz conversion matrices.
        :ensure_increasing_lut_at_low_rgb:
            | 0.2 or float (max = 1.0) or None, optional
            | Ensure an increasing lut by setting all values below the RGB with the maximum
            | zero-crossing of np.diff(lut) and RGB/RGB.max() values of :ensure_increasing_lut_at_low_rgb:
            | (values of 0.2 are a good rule of thumb value)
            | Non-strictly increasing lut values can be caused at low RGB values due
            | to noise and low measurement signal. 
            | If None: don't force lut, but keep as is.
        :verbosity:
            | 1, optional
            | > 0: print and plot optimization results
        :sep:
            | ',', optional
            | separator in files with rgbcal and xyzcal data
        :header:
            | None, optional
            | header specifier for files with rgbcal and xyzcal data 
            | (see pandas.read_csv)
            
    Returns:
        :M:
            | linear rgb to xyz conversion matrix
        :N:
            | xyz to linear rgb conversion matrix
        :tr:
            | Tone Response function parameters or lut
        :xyz_black:
            | ndarray with XYZ tristimulus values of black
        :xyz_white:
            | ndarray with tristimlus values of white
    """
    
    # process rgb, xyzcal inputs:
    rgbcal, xyzcal = _parse_rgbxyz_input(rgbcal, xyz = xyzcal, sep = sep, header=header)
    
    # get black-positions and average black xyz (flare):
    p_blacks = (rgbcal[:,0]==0) & (rgbcal[:,1]==0) & (rgbcal[:,2]==0)
    xyz_black = xyzcal[p_blacks,:].mean(axis=0,keepdims=True)
    
    # Calculate flare corrected xyz:
    xyz_fc = xyzcal - xyz_black
    
    # get positions of pure r, g, b values:
    p_pure = [(rgbcal[:,1]==0) & (rgbcal[:,2]==0), 
              (rgbcal[:,0]==0) & (rgbcal[:,2]==0), 
              (rgbcal[:,0]==0) & (rgbcal[:,1]==0)] 
    
    # set type of L-response to use: Y for R,G,B or L,M,S for R,G,B:
    if L_type == 'Y':
        L = np.array([xyz_fc[:,1] for i in range(3)]).T
    elif L_type == 'lms':
        lms = (math.normalize_3x3_matrix(_CMF[cieobs]['M'].copy()) @ xyz_fc.T).T
        L = np.array([lms[:,i] for i in range(3)]).T
        
    # Get rgb linearizer parameters or lut and apply to all rgb's:
    if tr_type == 'gog':
        par = np.array([sp.optimize.curve_fit(TR, rgbcal[p_pure[i],i], L[p_pure[i],i]/L[p_pure[i],i].max(), p0=[1,0,1])[0] for i in range(3)]) # calculate parameters of each TR
        tr = par
    elif tr_type == 'lut':
        dac = np.arange(2**nbit)
        # lut = np.array([cie_interp(np.vstack((rgbcal[p_pure[i],i],L[p_pure[i],i]/L[p_pure[i],i].max())), dac, kind ='cubic')[1,:] for i in range(3)]).T
        lut = np.array([sp.interpolate.PchipInterpolator(rgbcal[p_pure[i],i],L[p_pure[i],i]/L[p_pure[i],i].max())(dac) for i in range(3)]).T # use this one to avoid potential overshoot with cubic spline interpolation (but slightly worse performance)
        lut[lut<0] = 0
          
        # ensure monotonically increasing lut values for low signal:
        if ensure_increasing_lut_at_low_rgb is not None:
            #ensure_increasing_lut_at_low_rgb = 0.2 # anything below that has a zero-crossing for diff(lut) will be set to zero
            for i in range(3):
                p0 = np.where((np.diff(lut[dac/dac.max() < ensure_increasing_lut_at_low_rgb,i])<=0))[0]
                if p0.any():
                    p0 = range(0,p0[-1])
                    lut[p0,i] = 0
        tr = lut

    
    # plot:
    if verbosity > 0:
        colors = 'rgb'
        linestyles = ['-','--',':']
        rgball = np.repeat(np.arange(2**8)[:,None],3,axis=1)
        Lall = _rgb_linearizer(rgball, tr, tr_type = tr_type)
        plt.figure()
        for i in range(3):
            plt.plot(rgbcal[p_pure[i],i],L[p_pure[i],i]/L[p_pure[i],i].max(),colors[i]+'o')
            plt.plot(rgball[:,i],Lall[:,i],colors[i]+linestyles[i],label=colors[i])
        plt.xlabel('Display RGB')
        plt.ylabel('Linear RGB')
        plt.legend()
        plt.title('Tone response curves')
    
    # linearize all rgb values and clamp to 0
    rgblin = _rgb_linearizer(rgbcal, tr, tr_type = tr_type) 
 
    # get rgblin to xyz_fc matrix:
    M = np.linalg.lstsq(rgblin, xyz_fc, rcond=None)[0].T 
    
    # get xyz_fc to rgblin matrix:
    N = np.linalg.inv(M)
    
    # get better approximation for conversion matrices:
    p_grays = (rgbcal[:,0] == rgbcal[:,1]) & (rgbcal[:,0] == rgbcal[:,2])
    p_whites = (rgbcal[:,0] == (2**nbit-1)) & (rgbcal[:,1] == (2**nbit-1)) & (rgbcal[:,2] == (2**nbit-1))
    xyz_white = xyzcal[p_whites,:].mean(axis=0,keepdims=True) # get xyzw for input into xyz_to_lab() or colortf()
    def optfcn(x, rgbcal, xyzcal, tr, xyz_black, cspace, p_grays, p_whites,out,verbosity):
        M = x.reshape((3,3))
        xyzest = rgb_to_xyz(rgbcal, M, tr, xyz_black, tr_type)
        xyzw = xyzcal[p_whites,:].mean(axis=0) # get xyzw for input into xyz_to_lab() or colortf()
        labcal, labest = colortf(xyzcal,tf=cspace,xyzw=xyzw), colortf(xyzest,tf=cspace,xyzw=xyzw) # calculate lab coord. of cal. and est.
        DEs = ((labcal-labest)**2).sum(axis=1)**0.5
        DEg = DEs[p_grays]
        DEw = DEs[p_whites]
        F = (avg(DEs)**2 + avg(DEg)**2 + avg(DEw**2))**0.5
        if verbosity > 1:
            print('\nPerformance of TR + rgb-to-xyz conversion matrix M:')
            print('all: DE(jab): avg = {:1.4f}, std = {:1.4f}'.format(avg(DEs),np.std(DEs)))
            print('grays: DE(jab): avg = {:1.4f}, std = {:1.4f}'.format(avg(DEg),np.std(DEg)))
            print('whites(s) DE(jab): avg = {:1.4f}, std = {:1.4f}'.format(avg(DEw),np.std(DEw)))
        if out == 'F':
            return F
        else:
            return eval(out)
    x0 = M.ravel()
    res = math.minimizebnd(optfcn, x0, args =(rgbcal, xyzcal, tr, xyz_black, cspace, p_grays, p_whites,'F',0), use_bnd=False)
    xf = res['x_final']
    M = optfcn(xf, rgbcal, xyzcal, tr, xyz_black, cspace, p_grays, p_whites,'M',verbosity)
    N = np.linalg.inv(M)
    return M, N, tr, xyz_black, xyz_white
Ejemplo n.º 6
0
def apply(data, n_step = 2, catmode = None, cattype = 'vonkries', xyzw1 = None, xyzw2 = None, xyzw0 = None,\
          D = None, mcat = [_MCAT_DEFAULT], normxyz0 = None, outtype = 'xyz', La = None, F = None, Dtype = None):
    """
    Calculate corresponding colors by applying a von Kries chromatic adaptation
    transform (CAT), i.e. independent rescaling of 'sensor sensitivity' to data
    to adapt from current adaptation conditions (1) to the new conditions (2).
    
    Args:
        :data: 
            | ndarray of tristimulus values (can be NxMx3)
        :n_step:
            | 2, optional
            | Number of step in CAT (1: 1-step, 2: 2-step)
        :catmode: 
            | None, optional
            |    - None: use :n_step: to set mode: 1 = '1>2', 2:'1>0>2'
            |    -'1>0>2': Two-step CAT 
            |      from illuminant 1 to baseline illuminant 0 to illuminant 2.
            |    -'1>2': One-step CAT
            |      from illuminant 1 to illuminant 2.
            |    -'1>0': One-step CAT 
            |      from illuminant 1 to baseline illuminant 0.
            |    -'0>2': One-step CAT 
            |      from baseline illuminant 0 to illuminant 2. 
        :cattype: 
            | 'vonkries' (others: 'rlab', see Farchild 1990), optional
        :xyzw1:
            | None, depending on :catmode: optional (can be Mx3)
        :xyzw2:
            | None, depending on :catmode: optional (can be Mx3)
        :xyzw0:
            | None, depending on :catmode: optional (can be Mx3)
        :D: 
            | None, optional
            | Degrees of adaptation. Defaults to [1.0, 1.0]. 
        :La: 
            | None, optional
            | Adapting luminances. 
            | If None: xyz values are absolute or relative.
            | If not None: xyz are relative. 
        :F: 
            | None, optional
            | Surround parameter(s) for CAT02/CAT16 calculations 
            |  (:Dtype: == 'cat02' or 'cat16')
            | Defaults to [1.0, 1.0]. 
        :Dtype:
            | None, optional
            | Type of degree of adaptation function from literature
            | See luxpy.cat.get_degree_of_adaptation()
        :mcat:
            | [_MCAT_DEFAULT], optional
            | List[str] or List[ndarray] of sensor space matrices for each 
            |  condition pair. If len(:mcat:) == 1, the same matrix is used.
        :normxyz0: 
            | None, optional
            | Set of xyz tristimulus values to normalize the sensor space matrix to.
        :outtype:
            | 'xyz' or 'lms', optional
            |   - 'xyz': return corresponding tristimulus values 
            |   - 'lms': return corresponding sensor space excitation values 
            |            (e.g. for further calculations) 
      
    Returns:
          :returns: 
              | ndarray with corresponding colors
        
    Reference:
        1. `Smet, K. A. G., & Ma, S. (2020). 
        Some concerns regarding the CAT16 chromatic adaptation transform. 
        Color Research & Application, 45(1), 172–177. 
        <https://doi.org/10.1002/col.22457>`_
    """

    if (xyzw1 is None) & (xyzw2 is None):
        return data  # do nothing

    else:
        # Set catmode:
        if catmode is None:
            if n_step == 2:
                catmode = '1>0>2'
            elif n_step == 1:
                catmode = '1>2'
            else:
                raise Exception(
                    'cat.apply(n_step = {:1.0f}, catmode = None): Unknown requested n-step CAT mode !'
                    .format(n_step))

        # Make data 2d:
        data = np2d(data)
        data_original_shape = data.shape
        if data.ndim < 3:
            target_shape = np.hstack((1, data.shape))
            data = data * np.ones(target_shape)
        else:
            target_shape = data.shape

        target_shape = data.shape

        # initialize xyzw0:
        if (xyzw0 is None):  # set to iLL.E
            xyzw0 = np2d([100.0, 100.0, 100.0])
        xyzw0 = np.ones(target_shape) * xyzw0
        La0 = xyzw0[..., 1, None]

        # Determine cat-type (1-step or 2-step) + make input same shape as data for block calculations:
        expansion_axis = np.abs(1 * (len(data_original_shape) == 2) - 1)
        if ((xyzw1 is not None) & (xyzw2 is not None)):
            xyzw1 = xyzw1 * np.ones(target_shape)
            xyzw2 = xyzw2 * np.ones(target_shape)
            default_La12 = [xyzw1[..., 1, None], xyzw2[..., 1, None]]

        elif (xyzw2 is None) & (xyzw1
                                is not None):  # apply one-step CAT: 1-->0
            catmode = '1>0'  #override catmode input
            xyzw1 = xyzw1 * np.ones(target_shape)
            default_La12 = [xyzw1[..., 1, None], La0]

        elif (xyzw1 is None) & (xyzw2 is not None):
            raise Exception(
                "von_kries(): cat transformation '0>2' not supported, use '1>0' !"
            )

        # Get or set La (La == None: xyz are absolute or relative, La != None: xyz are relative):
        target_shape_1 = tuple(np.hstack((target_shape[:-1], 1)))
        La1, La2 = parse_x1x2_parameters(La,
                                         target_shape=target_shape_1,
                                         catmode=catmode,
                                         expand_2d_to_3d=expansion_axis,
                                         default=default_La12)

        # Set degrees of adaptation, D10, D20:  (note D20 is degree of adaptation for 2-->0!!)
        D10, D20 = parse_x1x2_parameters(D,
                                         target_shape=target_shape_1,
                                         catmode=catmode,
                                         expand_2d_to_3d=expansion_axis)

        # Set F surround in case of Dtype == 'cat02':
        F1, F2 = parse_x1x2_parameters(F,
                                       target_shape=target_shape_1,
                                       catmode=catmode,
                                       expand_2d_to_3d=expansion_axis)

        # Make xyz relative to go to relative xyz0:
        if La is None:
            data = 100 * data / La1
            xyzw1 = 100 * xyzw1 / La1
            xyzw0 = 100 * xyzw0 / La0
            if (catmode == '1>0>2') | (catmode == '1>2'):
                xyzw2 = 100 * xyzw2 / La2

        # transform data (xyz) to sensor space (lms) and perform cat:
        xyzc = np.zeros(data.shape)
        xyzc.fill(np.nan)
        mcat = np.array(mcat)
        if (mcat.shape[0] != data.shape[1]) & (mcat.shape[0] == 1):
            mcat = np.repeat(mcat, data.shape[1], axis=0)
        elif (mcat.shape[0] != data.shape[1]) & (mcat.shape[0] > 1):
            raise Exception(
                'von_kries(): mcat.shape[0] > 1 and does not match data.shape[0]!'
            )

        for i in range(xyzc.shape[1]):
            # get cat sensor matrix:
            if mcat[i].dtype == np.float64:
                mcati = mcat[i]
            else:
                mcati = _MCATS[mcat[i]]

            # normalize sensor matrix:
            if normxyz0 is not None:
                mcati = math.normalize_3x3_matrix(mcati, xyz0=normxyz0)

            # convert from xyz to lms:
            lms = np.dot(mcati, data[:, i].T).T
            lmsw0 = np.dot(mcati, xyzw0[:, i].T).T
            if (catmode == '1>0>2') | (catmode == '1>0'):
                lmsw1 = np.dot(mcati, xyzw1[:, i].T).T
                Dpar1 = dict(D=D10[:, i],
                             F=F1[:, i],
                             La=La1[:, i],
                             La0=La0[:, i],
                             order='1>0')
                D10[:, i] = get_degree_of_adaptation(
                    Dtype=Dtype,
                    **Dpar1)  #get degree of adaptation depending on Dtype
                lmsw2 = None  # in case of '1>0'

            if (catmode == '1>0>2'):
                lmsw2 = np.dot(mcati, xyzw2[:, i].T).T
                Dpar2 = dict(D=D20[:, i],
                             F=F2[:, i],
                             La=La2[:, i],
                             La0=La0[:, i],
                             order='0>2')

                D20[:, i] = get_degree_of_adaptation(
                    Dtype=Dtype,
                    **Dpar2)  #get degree of adaptation depending on Dtype

            if (catmode == '1>2'):
                lmsw1 = np.dot(mcati, xyzw1[:, i].T).T
                lmsw2 = np.dot(mcati, xyzw2[:, i].T).T
                Dpar12 = dict(D=D10[:, i],
                              F=F1[:, i],
                              La=La1[:, i],
                              La2=La2[:, i],
                              order='1>2')
                D10[:, i] = get_degree_of_adaptation(
                    Dtype=Dtype,
                    **Dpar12)  #get degree of adaptation depending on Dtype

            # Determine transfer function Dt:
            Dt = get_transfer_function(cattype=cattype,
                                       catmode=catmode,
                                       lmsw1=lmsw1,
                                       lmsw2=lmsw2,
                                       lmsw0=lmsw0,
                                       D10=D10[:, i],
                                       D20=D20[:, i],
                                       La1=La1[:, i],
                                       La2=La2[:, i])

            # Perform cat:
            lms = np.dot(np.diagflat(Dt[0]), lms.T).T

            # Make xyz, lms 'absolute' again:
            if (catmode == '1>0>2'):
                lms = (La2[:, i] / La1[:, i]) * lms
            elif (catmode == '1>0'):
                lms = (La0[:, i] / La1[:, i]) * lms
            elif (catmode == '1>2'):
                lms = (La2[:, i] / La1[:, i]) * lms

            # transform back from sensor space to xyz (or not):
            if outtype == 'xyz':
                xyzci = np.dot(np.linalg.inv(mcati), lms.T).T
                xyzci[np.where(xyzci < 0)] = _EPS
                xyzc[:, i] = xyzci
            else:
                xyzc[:, i] = lms

        # return data to original shape:
        if len(data_original_shape) == 2:
            xyzc = xyzc[0]

        return xyzc
Ejemplo n.º 7
0
def cam_sww16(data, dataw = None, Yb = 20.0, Lw = 400.0, Ccwb = None, relative = True, \
              parameters = None, inputtype = 'xyz', direction = 'forward', \
              cieobs = '2006_10'):
    """
    A simple principled color appearance model based on a mapping 
    of the Munsell color system.
    
    | This function implements the JOSA A (parameters = 'JOSA') published model. 
    
    Args:
        :data: 
            | ndarray with input tristimulus values 
            | or spectral data 
            | or input color appearance correlates
            | Can be of shape: (N [, xM], x 3), whereby: 
            | N refers to samples and M refers to light sources.
            | Note that for spectral input shape is (N x (M+1) x wl) 
        :dataw: 
            | None or ndarray, optional
            | Input tristimulus values or spectral data of white point.
            | None defaults to the use of CIE illuminant C.
        :Yb: 
            | 20.0, optional
            | Luminance factor of background (perfect white diffuser, Yw = 100)
        :Lw:
            | 400.0, optional
            | Luminance (cd/m²) of white point.
        :Ccwb:
            | None,  optional
            | Degree of cognitive adaptation (white point balancing)
            | If None: use [..,..] from parameters dict.
        :relative:
            | True or False, optional
            | True: xyz tristimulus values are relative (Yw = 100)
        :parameters:
            | None or str or dict, optional
            | Dict with model parameters.
            |    - None: defaults to luxpy.cam._CAM_SWW_2016_PARAMETERS['JOSA']
            |    - str: 'best-fit-JOSA' or 'best-fit-all-Munsell'
            |    - dict: user defined model parameters 
            |            (dict should have same structure)
        :inputtype:
            | 'xyz' or 'spd', optional
            | Specifies the type of input: 
            |     tristimulus values or spectral data for the forward mode.
        :direction:
            | 'forward' or 'inverse', optional
            |   -'forward': xyz -> cam_sww_2016
            |   -'inverse': cam_sww_2016 -> xyz 
        :cieobs:
            | '2006_10', optional
            | CMF set to use to perform calculations where spectral data 
              is involved (inputtype == 'spd'; dataw = None)
            | Other options: see luxpy._CMF['types']
    
    Returns:
        :returns: 
            | ndarray with color appearance correlates (:direction: == 'forward')
            |  or 
            | XYZ tristimulus values (:direction: == 'inverse')
    
    Notes:
        | This function implements the JOSA A (parameters = 'JOSA') 
          published model. 
        | With:
        |    1. A correction for the parameter 
        |         in Eq.4 of Fig. 11: 0.952 --> -0.952 
        |         
        |     2. The delta_ac and delta_bc white-balance shifts in Eq. 5e & 5f 
        |         should be: -0.028 & 0.821 
        |  
        |     (cfr. Ccwb = 0.66 in: 
        |         ab_test_out = ab_test_int - Ccwb*ab_gray_adaptation_field_int))
             
    References:
        1. `Smet, K. A. G., Webster, M. A., & Whitehead, L. A. (2016). 
        A simple principled approach for modeling and understanding uniform color metrics. 
        Journal of the Optical Society of America A, 33(3), A319–A331. 
        <https://doi.org/10.1364/JOSAA.33.00A319>`_

    """

    # get model parameters
    args = locals().copy()
    if parameters is None:
        parameters = _CAM_SWW16_PARAMETERS['JOSA']
    if isinstance(parameters, str):
        parameters = _CAM_SWW16_PARAMETERS[parameters]
    parameters = put_args_in_db(
        parameters,
        args)  #overwrite parameters with other (not-None) args input

    #unpack model parameters:
    Cc, Ccwb, Cf, Mxyz2lms, cLMS, cab_int, cab_out, calpha, cbeta, cga1, cga2, cgb1, cgb2, cl_int, clambda, lms0 = [
        parameters[x] for x in sorted(parameters.keys())
    ]

    # setup default adaptation field:
    if (dataw is None):
        dataw = _CIE_ILLUMINANTS['C'].copy()  # get illuminant C
        xyzw = spd_to_xyz(dataw, cieobs=cieobs,
                          relative=False)  # get abs. tristimulus values
        if relative == False:  #input is expected to be absolute
            dataw[1:] = Lw * dataw[
                1:] / xyzw[:, 1:2]  #dataw = Lw*dataw # make absolute
        else:
            dataw = dataw  # make relative (Y=100)
        if inputtype == 'xyz':
            dataw = spd_to_xyz(dataw, cieobs=cieobs, relative=relative)

    # precomputations:
    Mxyz2lms = np.dot(
        np.diag(cLMS),
        math.normalize_3x3_matrix(Mxyz2lms, np.array([[1, 1, 1]]))
    )  # normalize matrix for xyz-> lms conversion to ill. E weighted with cLMS
    invMxyz2lms = np.linalg.inv(Mxyz2lms)
    MAab = np.array([clambda, calpha, cbeta])
    invMAab = np.linalg.inv(MAab)

    #initialize data and camout:
    data = np2d(data).copy(
    )  # stimulus data (can be upto NxMx3 for xyz, or [N x (M+1) x wl] for spd))
    dataw = np2d(dataw).copy(
    )  # white point (can be upto Nx3 for xyz, or [(N+1) x wl] for spd)

    # make axis 1 of dataw have 'same' dimensions as data:
    if (data.ndim == 2):
        data = np.expand_dims(data, axis=1)  # add light source axis 1

    if inputtype == 'xyz':
        if dataw.shape[
                0] == 1:  #make dataw have same lights source dimension size as data
            dataw = np.repeat(dataw, data.shape[1], axis=0)
    else:
        if dataw.shape[0] == 2:
            dataw = np.vstack(
                (dataw[0], np.repeat(dataw[1:], data.shape[1], axis=0)))

    # Flip light source dim to axis 0:
    data = np.transpose(data, axes=(1, 0, 2))

    # Initialize output array:
    dshape = list(data.shape)
    dshape[-1] = 3  # requested number of correlates: l_int, a_int, b_int
    if (inputtype != 'xyz') & (direction == 'forward'):
        dshape[-2] = dshape[
            -2] - 1  # wavelength row doesn't count & only with forward can the input data be spectral
    camout = np.zeros(dshape)
    camout.fill(np.nan)

    # apply forward/inverse model for each row in data:
    for i in range(data.shape[0]):

        # stage 1: calculate photon rates of stimulus and adapting field, lmst & lmsf:
        if (inputtype != 'xyz'):
            if relative == True:
                xyzw_abs = spd_to_xyz(np.vstack((dataw[0], dataw[i + 1])),
                                      cieobs=cieobs,
                                      relative=False)
                dataw[i +
                      1] = Lw * dataw[i + 1] / xyzw_abs[0, 1]  # make absolute
            xyzw = spd_to_xyz(np.vstack((dataw[0], dataw[i + 1])),
                              cieobs=cieobs,
                              relative=False)
            lmsw = 683.0 * np.dot(Mxyz2lms, xyzw.T).T / _CMF[cieobs]['K']
            lmsf = (Yb / 100.0
                    ) * lmsw  # calculate adaptation field and convert to l,m,s
            if (direction == 'forward'):
                if relative == True:
                    data[i, 1:, :] = Lw * data[i, 1:, :] / xyzw_abs[
                        0, 1]  # make absolute
                xyzt = spd_to_xyz(data[i], cieobs=cieobs,
                                  relative=False) / _CMF[cieobs]['K']
                lmst = 683.0 * np.dot(Mxyz2lms, xyzt.T).T  # convert to l,m,s
            else:
                lmst = lmsf  # put lmsf in lmst for inverse-mode

        elif (inputtype == 'xyz'):
            if relative == True:
                dataw[i] = Lw * dataw[i] / 100.0  # make absolute
            lmsw = 683.0 * np.dot(
                Mxyz2lms, dataw[i].T).T / _CMF[cieobs]['K']  # convert to lms
            lmsf = (Yb / 100.0) * lmsw
            if (direction == 'forward'):
                if relative == True:
                    data[i] = Lw * data[i] / 100.0  # make absolute
                lmst = 683.0 * np.dot(
                    Mxyz2lms,
                    data[i].T).T / _CMF[cieobs]['K']  # convert to lms
            else:
                lmst = lmsf  # put lmsf in lmst for inverse-mode

        # stage 2: calculate cone outputs of stimulus lmstp
        lmstp = math.erf(Cc * (np.log(lmst / lms0) + Cf * np.log(lmsf / lms0)))
        lmsfp = math.erf(Cc * (np.log(lmsf / lms0) + Cf * np.log(lmsf / lms0)))
        lmstp = np.vstack(
            (lmsfp, lmstp)
        )  # add adaptation field lms temporarily to lmsp for quick calculation

        # stage 3: calculate optic nerve signals, lam*, alphp, betp:
        lstar, alph, bet = asplit(np.dot(MAab, lmstp.T).T)

        alphp = cga1[0] * alph
        alphp[alph < 0] = cga1[1] * alph[alph < 0]
        betp = cgb1[0] * bet
        betp[bet < 0] = cgb1[1] * bet[bet < 0]

        # stage 4: calculate recoded nerve signals, alphapp, betapp:
        alphpp = cga2[0] * (alphp + betp)
        betpp = cgb2[0] * (alphp - betp)

        # stage 5: calculate conscious color perception:
        lstar_int = cl_int[0] * (lstar + cl_int[1])
        alph_int = cab_int[0] * (np.cos(cab_int[1] * np.pi / 180.0) * alphpp -
                                 np.sin(cab_int[1] * np.pi / 180.0) * betpp)
        bet_int = cab_int[0] * (np.sin(cab_int[1] * np.pi / 180.0) * alphpp +
                                np.cos(cab_int[1] * np.pi / 180.0) * betpp)
        lstar_out = lstar_int

        if direction == 'forward':
            if Ccwb is None:
                alph_out = alph_int - cab_out[0]
                bet_out = bet_int - cab_out[1]
            else:
                Ccwb = Ccwb * np.ones((2))
                Ccwb[Ccwb < 0.0] = 0.0
                Ccwb[Ccwb > 1.0] = 1.0
                alph_out = alph_int - Ccwb[0] * alph_int[
                    0]  # white balance shift using adaptation gray background (Yb=20%), with Ccw: degree of adaptation
                bet_out = bet_int - Ccwb[1] * bet_int[0]

            camout[i] = np.vstack(
                (lstar_out[1:], alph_out[1:], bet_out[1:])
            ).T  # stack together and remove adaptation field from vertical stack
        elif direction == 'inverse':
            labf_int = np.hstack((lstar_int[0], alph_int[0], bet_int[0]))

            # get lstar_out, alph_out & bet_out for data:
            lstar_out, alph_out, bet_out = asplit(data[i])

            # stage 5 inverse:
            # undo cortical white-balance:
            if Ccwb is None:
                alph_int = alph_out + cab_out[0]
                bet_int = bet_out + cab_out[1]
            else:
                Ccwb = Ccwb * np.ones((2))
                Ccwb[Ccwb < 0.0] = 0.0
                Ccwb[Ccwb > 1.0] = 1.0
                alph_int = alph_out + Ccwb[0] * alph_int[
                    0]  #  inverse white balance shift using adaptation gray background (Yb=20%), with Ccw: degree of adaptation
                bet_int = bet_out + Ccwb[1] * bet_int[0]

            lstar_int = lstar_out
            alphpp = (1.0 / cab_int[0]) * (
                np.cos(-cab_int[1] * np.pi / 180.0) * alph_int -
                np.sin(-cab_int[1] * np.pi / 180.0) * bet_int)
            betpp = (1.0 / cab_int[0]) * (
                np.sin(-cab_int[1] * np.pi / 180.0) * alph_int +
                np.cos(-cab_int[1] * np.pi / 180.0) * bet_int)
            lstar_int = lstar_out
            lstar = (lstar_int / cl_int[0]) - cl_int[1]

            # stage 4 inverse:
            alphp = 0.5 * (alphpp / cga2[0] + betpp / cgb2[0]
                           )  # <-- alphpp = (Cga2.*(alphp+betp));
            betp = 0.5 * (alphpp / cga2[0] - betpp / cgb2[0]
                          )  # <-- betpp = (Cgb2.*(alphp-betp));

            # stage 3 invers:
            alph = alphp / cga1[0]
            bet = betp / cgb1[0]
            sa = np.sign(cga1[1])
            sb = np.sign(cgb1[1])
            alph[(sa * alphp) < 0.0] = alphp[(sa * alphp) < 0] / cga1[1]
            bet[(sb * betp) < 0.0] = betp[(sb * betp) < 0] / cgb1[1]
            lab = ajoin((lstar, alph, bet))

            # stage 2 inverse:
            lmstp = np.dot(invMAab, lab.T).T
            lmstp[lmstp < -1.0] = -1.0
            lmstp[lmstp > 1.0] = 1.0

            lmstp = math.erfinv(lmstp) / Cc - Cf * np.log(lmsf / lms0)
            lmst = np.exp(lmstp) * lms0

            # stage 1 inverse:
            xyzt = np.dot(invMxyz2lms, lmst.T).T

            if relative == True:
                xyzt = (100.0 / Lw) * xyzt

            camout[i] = xyzt

#    if flipaxis0and1 == True: # loop over shortest dim.
#        camout = np.transpose(camout, axes = (1,0,2))

# Flip light source dim back to axis 1:
    camout = np.transpose(camout, axes=(1, 0, 2))

    if camout.shape[0] == 1:
        camout = np.squeeze(camout, axis=0)

    return camout
Ejemplo n.º 8
0
def test_model():

    import pandas as pd
    import luxpy as lx

    # Read selected set of Munsell samples and LMS10(lambda):
    M = pd.read_csv('Munsell_LMS_nonlin_Nov18_2015_version.dat',
                    header=None,
                    sep='\t').values
    YLMS10_ = pd.read_csv('YLMS10_LMS_nonlin_Nov18_2015_version.dat',
                          header=None,
                          sep='\t').values
    Y10_ = YLMS10_[[0, 1], :].copy()
    LMS10_ = YLMS10_[[0, 2, 3, 4], :].copy()

    # Calculate lms:
    Y10 = cie_interp(_CMF['1964_10']['bar'].copy(),
                     getwlr([400, 700, 5]),
                     kind='cmf')[[0, 2], :]
    XYZ10_lx = _CMF['2006_10']['bar'].copy()
    XYZ10_lx = cie_interp(XYZ10_lx, getwlr([400, 700, 5]), kind='cmf')
    LMS10_lx = np.vstack(
        (XYZ10_lx[:1, :],
         np.dot(
             math.normalize_3x3_matrix(_CMF['2006_10']['M'],
                                       np.array([[1, 1, 1]])),
             XYZ10_lx[1:, :])))
    LMS10 = cie_interp(LMS10_lx, getwlr([400, 700, 5]), kind='cmf')

    #LMS10 = np.vstack((XYZ10[:1,:],np.dot(lx.math.normalize_3x3_matrix(_CMF['2006_10']['M'],np.array([[1,1,1]])),XYZ10_lx[1:,:])))

    #LMS10[1:,:] = LMS10[1:,:]/LMS10[1:,:].sum(axis=1,keepdims=True)*Y10[1:,:].sum()

    # test python model vs excel calculator:
    def spdBB(CCT=5500, wl=[400, 700, 5], Lw=25000, cieobs='1964_10'):
        wl = getwlr(wl)
        dl = wl[1] - wl[0]
        spd = 2 * np.pi * 6.626068E-34 * (299792458**2) / (
            (wl * 0.000000001)**
            5) / (np.exp(6.626068E-34 * 299792458 /
                         (wl * 0.000000001) / 1.3806503E-23 / CCT) - 1)
        spd = Lw * spd / (dl * 683 * (spd * cie_interp(
            _CMF[cieobs]['bar'].copy(), wl, kind='cmf')[2, :]).sum())
        return np.vstack((wl, spd))

    # Create long term and applied spds:
    spd5500 = spdBB(5500, Lw=25000, wl=[400, 700, 5], cieobs='1964_10')
    spd6500 = spdBB(6500, Lw=400, wl=[400, 700, 5], cieobs='1964_10')

    # Calculate lms0 as a check:
    clms = np.array(
        [0.98446776, 0.98401909, 0.98571412]
    )  # correction factor for slight differences in _CMF and the cmfs from the excel calculator
    lms0 = 5 * 683 * (spd5500[1:] * LMS10[1:, :] * 0.2).sum(axis=1).T

    # Full excel parameters for testing:
    parameters = {
        'cLMS':
        np.array([1, 1, 1]),
        'lms0':
        np.array([4985.02802565, 5032.49518502, 4761.27272226]) * 1,
        'Cc':
        0.251617118325755,
        'Cf':
        -0.4,
        'clambda': [0.5, 0.5, 0.0],
        'calpha': [1.0, -1.0, 0.0],
        'cbeta': [0.5, 0.5, -1.0],
        'cga1': [26.1047711317923, 33.9721745703298],
        'cgb1': [6.76038379211498, 10.9220216677629],
        'cga2': [0.587271269247578],
        'cgb2': [-0.952412544980473],
        'cl_int': [14.0035243121804, 1.0],
        'cab_int': [4.99218965716342, 65.7869547646456],
        'cab_out': [-0.1, -1.0],
        'Ccwb':
        None,
        'Mxyz2lms': [[0.21701045, 0.83573367, -0.0435106],
                     [-0.42997951, 1.2038895, 0.08621089],
                     [0., 0., 0.46579234]]
    }

    # Note cLMS is a relative scaling factor between CIE2006 10° and 1964 10°:
    #    clms = np.array([1.00164919, 1.00119269, 1.0029173 ]) = (Y10[1:,:].sum(axis=1)/LMS10[1:,:].sum(axis=1))*(406.98099078/400)

    #parameters =_CAM_SWW16_PARAMETERS['JOSA']
    # Calculate Munsell spectra multiplied with spd6500:
    spd6500xM = np.vstack((spd6500[:1, :], spd6500[1:, :] * M[1:, :]))

    # Test spectral input:
    print('SPD INPUT -----')
    jab = cam_sww16(spd6500xM,
                    dataw=spd6500,
                    Yb=20.0,
                    Lw=400.0,
                    Ccwb=1,
                    relative=True,
                    inputtype='spd',
                    direction='forward',
                    parameters=parameters,
                    cieobs='2006_10',
                    match_to_conversionmatrix_to_cieobs=True)

    #    # Test xyz input:
    print('\nXYZ INPUT -----')
    xyz = lx.spd_to_xyz(spd6500xM, cieobs='2006_10', relative=False)
    xyzw = lx.spd_to_xyz(spd6500, cieobs='2006_10', relative=False)
    xyz2, xyzw2 = lx.spd_to_xyz(spd6500,
                                cieobs='2006_10',
                                relative=False,
                                rfl=M,
                                out=2)

    print(xyzw)
    jab = cam_sww16(xyz,
                    dataw=xyzw,
                    Yb=20.0,
                    Lw=400,
                    Ccwb=1,
                    relative=True,
                    inputtype='xyz',
                    direction='forward',
                    parameters=parameters,
                    cieobs='2006_10',
                    match_to_conversionmatrix_to_cieobs=True)
Ejemplo n.º 9
0
def xyz_to_ipt(xyz, cieobs = _CIEOBS, xyzw = None, M = None, **kwargs):
    """
    Convert XYZ tristimulus values to IPT color coordinates.

    | I: Lightness axis, P, red-green axis, T: yellow-blue axis.

    Args:
        :xyz: 
            | ndarray with tristimulus values
        :xyzw: 
            | None or ndarray with tristimulus values of white point, optional
            | None defaults to xyz of CIE D65 using the :cieobs: observer.
        :cieobs:
            | luxpy._CIEOBS, optional
            | CMF set to use when calculating xyzw for rescaling M
              (only when not None).
        :M: | None, optional
            | None defaults to xyz to lms conversion matrix determined by :cieobs:

    Returns:
        :ipt: 
            | ndarray with IPT color coordinates

    Note:
        :xyz: is assumed to be under D65 viewing conditions! If necessary 
              perform chromatic adaptation !

    Reference:
        1. `Ebner F, and Fairchild MD (1998).
           Development and testing of a color space (IPT) with improved hue uniformity.
           In IS&T 6th Color Imaging Conference, (Scottsdale, Arizona, USA), pp. 8–13.
           <http://www.ingentaconnect.com/content/ist/cic/1998/00001998/00000001/art00003?crawler=true>`_
    """
    xyz = np2d(xyz)

    # get M to convert xyz to lms and apply normalization to matrix or input your own:
    if M is None:
        M = _IPT_M['xyz2lms'][cieobs].copy() # matrix conversions from xyz to lms
        if xyzw is None:
            xyzw = spd_to_xyz(_CIE_ILLUMINANTS['D65'],cieobs = cieobs, out = 1)[0]/100.0
        else:
            xyzw = xyzw/100.0
        M = math.normalize_3x3_matrix(M,xyzw)

    # get xyz and normalize to 1:
    xyz = xyz/100.0

    # convert xyz to lms:
    if len(xyz.shape) == 3:
        lms = np.einsum('ij,klj->kli', M, xyz)
    else:
        lms = np.einsum('ij,lj->li', M, xyz)
    #lms = np.dot(M,xyz.T).T

    #response compression: lms to lms'
    lmsp = lms**0.43
    p = np.where(lms<0.0)
    lmsp[p] = -np.abs(lms[p])**0.43

    # convert lms' to ipt coordinates:
    if len(xyz.shape) == 3:
        ipt = np.einsum('ij,klj->kli', _IPT_M['lms2ipt'], lmsp)
    else:
        ipt = np.einsum('ij,lj->li', _IPT_M['lms2ipt'], lmsp)

    return ipt
Ejemplo n.º 10
0
# Database with cspace-axis strings (for plotting):
_CSPACE_AXES = {'Yxy': ['Y / L (cd/m²)', 'x', 'y']}
_CSPACE_AXES['Yuv'] = ['Y / L (cd/m²)', "u'", "v'"]
_CSPACE_AXES['xyz'] = ['X', 'Y', 'Z']
_CSPACE_AXES['lms'] = ['L', 'M', 'S']
_CSPACE_AXES['lab'] = ['L*', "a*", "b*"]
_CSPACE_AXES['luv'] = ['L*', "u*", "u*"]
_CSPACE_AXES['ipt'] = ['I', "P", "T"]
_CSPACE_AXES['wuv'] = ['W*', "U*", "V*"]
_CSPACE_AXES['Vrb_mb'] = ['V (Macleod-Boyton)', "r (Macleod-Boyton)", "b (Macleod-Boyton)"]
_CSPACE_AXES['cct'] = ['', 'cct','duv']


# pre-calculate matrices for conversion of xyz to lms and back for use in xyz_to_ipt() and ipt_to_xyz():
_IPT_M = {'lms2ipt': np.array([[0.4000,0.4000,0.2000],[4.4550,-4.8510,0.3960],[0.8056,0.3572,-1.1628]]),
                              'xyz2lms' : {x : math.normalize_3x3_matrix(_CMF[x]['M'],spd_to_xyz(_CIE_ILLUMINANTS['D65'],cieobs = x)) for x in sorted(_CMF['types'])}}
_COLORTF_DEFAULT_WHITE_POINT = np.array([100.0, 100.0, 100.0]) # ill. E white point

#------------------------------------------------------------------------------
#---chromaticity coordinates---------------------------------------------------
#------------------------------------------------------------------------------
def xyz_to_Yxy(xyz, **kwargs):
    """
    Convert XYZ tristimulus values CIE Yxy chromaticity values.

    Args:
        :xyz: 
            | ndarray with tristimulus values

    Returns:
        :Yxy: 
Ejemplo n.º 11
0
_CMF_M_1931_2=np.array([     # definition of 3x3 matrices to convert from xyz to lms
[0.38971,0.68898,-0.07868],
[-0.22981,1.1834,0.04641],
[0.0,0.0,1.0]
 ])
         
#_CMF_M_2006_2=np.array([ # Note that these are directly (but inverse) from CIE15:2018, but not normalized to illuminant E!!
#[0.21057582,0.85509764,-0.039698265],
#[-0.41707637,1.1772611,0.078628251],
#[0.0,0.0,0.51683501]
#])
_CMF_M_2006_2 = np.linalg.inv(np.array([[1.94735469, -1.41445123, 0.36476327],
                                        [0.68990272, 0.34832189, 0],
                                        [0, 0, 1.93485343]]))
_CMF_M_2006_2 = math.normalize_3x3_matrix(_CMF_M_2006_2) 
        
#_CMF_M_2006_10=np.array([
#[0.21701045,0.83573367,-0.043510597],
#[-0.42997951,1.2038895,0.086210895],
#[0.0,0.0,0.46579234]
#])
_CMF_M_2006_10 = np.linalg.inv(np.array([[1.93986443, -1.34664359, 0.43044935],
                                        [0.69283932, 0.34967567, 0],
                                        [0, 0, 2.14687945]]))
_CMF_M_2006_10 = math.normalize_3x3_matrix(_CMF_M_2006_10)  
    
# Note that for the following, no conversion has been defined, so the 1931 HPE matrix is used:    
_CMF_M_1964_10=np.array([
[0.38971,0.68898,-0.07868],
[-0.22981,1.1834,0.04641],