Esempio n. 1
0
def xyz_to_cct_ohno(xyzw,
                    cieobs=_CIEOBS,
                    out='cct',
                    wl=None,
                    accuracy=0.1,
                    force_out_of_lut=True,
                    upper_cct_max=10.0**20,
                    approx_cct_temp=True):
    """
    Convert XYZ tristimulus values to correlated color temperature (CCT) and 
    Duv (distance above (>0) or below (<0) the Planckian locus) 
    using Ohno's method. 
    
    Args:
        :xyzw: 
            | ndarray of tristimulus values
        :cieobs: 
            | luxpy._CIEOBS, optional
            | CMF set used to calculated xyzw.
        :out: 
            | 'cct' (or 1), optional
            | Determines what to return.
            | Other options: 'duv' (or -1), 'cct,duv'(or 2), "[cct,duv]" (or -2)
        :wl: 
            | None, optional
            | Wavelengths used when calculating Planckian radiators.
        :accuracy: 
            | float, optional
            | Stop brute-force search when cct :accuracy: is reached.
        :upper_cct_max: 
            | 10.0**20, optional
            | Limit brute-force search to this cct.
        :approx_cct_temp: 
            | True, optional
            | If True: use xyz_to_cct_HA() to get a first estimate of cct 
              to speed up search.
        :force_out_of_lut: 
            | True, optional
            | If True and cct is out of range of the LUT, then switch to 
              brute-force search method, else return numpy.nan values.
        
    Returns:
        :returns: 
            | ndarray with:
            |    cct: out == 'cct' (or 1)
            |    duv: out == 'duv' (or -1)
            |    cct, duv: out == 'cct,duv' (or 2)
            |    [cct,duv]: out == "[cct,duv]" (or -2) 
            
    Note:
        LUTs are stored in ./data/cctluts/
        
    Reference:
        1. `Ohno Y. Practical use and calculation of CCT and Duv. 
        Leukos. 2014 Jan 2;10(1):47-55.
        <http://www.tandfonline.com/doi/abs/10.1080/15502724.2014.839020>`_
    """

    xyzw = np2d(xyzw)

    if len(xyzw.shape) > 2:
        raise Exception('xyz_to_cct_ohno(): Input xyzwa.ndim must be <= 2 !')

    # get 1960 u,v of test source:
    Yuv = xyz_to_Yuv(
        xyzw)  # remove possible 1-dim + convert xyzw to CIE 1976 u',v'
    axis_of_v3 = len(Yuv.shape) - 1  # axis containing color components
    u = Yuv[:, 1, None]  # get CIE 1960 u
    v = (2.0 / 3.0) * Yuv[:, 2, None]  # get CIE 1960 v

    uv = np2d(np.concatenate((u, v), axis=axis_of_v3))

    # load cct & uv from LUT:
    if cieobs not in _CCT_LUT:
        _CCT_LUT[cieobs] = calculate_lut(ccts=None,
                                         cieobs=cieobs,
                                         add_to_lut=False)
    cct_LUT = _CCT_LUT[cieobs][:, 0, None]
    uv_LUT = _CCT_LUT[cieobs][:, 1:3]

    # calculate CCT of each uv:
    CCT = np.ones(uv.shape[0]) * np.nan  # initialize with NaN's
    Duv = CCT.copy()  # initialize with NaN's
    idx_m = 0
    idx_M = uv_LUT.shape[0] - 1
    for i in range(uv.shape[0]):
        out_of_lut = False
        delta_uv = (((uv_LUT - uv[i])**2.0).sum(
            axis=1))**0.5  # calculate distance of uv with uv_LUT
        idx_min = delta_uv.argmin()  # find index of minimum distance

        # find Tm, delta_uv and u,v for 2 points surrounding uv corresponding to idx_min:
        if idx_min == idx_m:
            idx_min_m1 = idx_min
            out_of_lut = True
        else:
            idx_min_m1 = idx_min - 1
        if idx_min == idx_M:
            idx_min_p1 = idx_min
            out_of_lut = True
        else:
            idx_min_p1 = idx_min + 1

        if (out_of_lut == True) & (force_out_of_lut
                                   == True):  # calculate using search-function
            cct_i, Duv_i = xyz_to_cct_search(xyzw[i],
                                             cieobs=cieobs,
                                             wl=wl,
                                             accuracy=accuracy,
                                             out='cct,duv',
                                             upper_cct_max=upper_cct_max,
                                             approx_cct_temp=approx_cct_temp)
            CCT[i] = cct_i
            Duv[i] = Duv_i
            continue
        elif (out_of_lut == True) & (force_out_of_lut == False):
            CCT[i] = np.nan
            Duv[i] = np.nan

        cct_m1 = cct_LUT[idx_min_m1]  # - 2*_EPS
        delta_uv_m1 = delta_uv[idx_min_m1]
        uv_m1 = uv_LUT[idx_min_m1]
        cct_p1 = cct_LUT[idx_min_p1]
        delta_uv_p1 = delta_uv[idx_min_p1]
        uv_p1 = uv_LUT[idx_min_p1]

        cct_0 = cct_LUT[idx_min]
        delta_uv_0 = delta_uv[idx_min]

        # calculate uv distance between Tm_m1 & Tm_p1:
        delta_uv_p1m1 = ((uv_p1[0] - uv_m1[0])**2.0 +
                         (uv_p1[1] - uv_m1[1])**2.0)**0.5

        # Triangular solution:
        x = ((delta_uv_m1**2) - (delta_uv_p1**2) +
             (delta_uv_p1m1**2)) / (2 * delta_uv_p1m1)
        Tx = cct_m1 + ((cct_p1 - cct_m1) * (x / delta_uv_p1m1))
        #uBB = uv_m1[0] + (uv_p1[0] - uv_m1[0]) * (x / delta_uv_p1m1)
        vBB = uv_m1[1] + (uv_p1[1] - uv_m1[1]) * (x / delta_uv_p1m1)

        Tx_corrected_triangular = Tx * 0.99991
        signDuv = np.sign(uv[i][1] - vBB)
        Duv_triangular = signDuv * np.atleast_1d(
            ((delta_uv_m1**2.0) - (x**2.0))**0.5)

        # Parabolic solution:
        a = delta_uv_m1 / (cct_m1 - cct_0 + _EPS) / (cct_m1 - cct_p1 + _EPS)
        b = delta_uv_0 / (cct_0 - cct_m1 + _EPS) / (cct_0 - cct_p1 + _EPS)
        c = delta_uv_p1 / (cct_p1 - cct_0 + _EPS) / (cct_p1 - cct_m1 + _EPS)
        A = a + b + c
        B = -(a * (cct_p1 + cct_0) + b * (cct_p1 + cct_m1) + c *
              (cct_0 + cct_m1))
        C = (a * cct_p1 * cct_0) + (b * cct_p1 * cct_m1) + (c * cct_0 * cct_m1)
        Tx = -B / (2 * A + _EPS)
        Tx_corrected_parabolic = Tx * 0.99991
        Duv_parabolic = signDuv * (A * np.power(Tx_corrected_parabolic, 2) +
                                   B * Tx_corrected_parabolic + C)

        Threshold = 0.002
        if Duv_triangular < Threshold:
            CCT[i] = Tx_corrected_triangular
            Duv[i] = Duv_triangular
        else:
            CCT[i] = Tx_corrected_parabolic
            Duv[i] = Duv_parabolic

    # Regulate output:
    if (out == 'cct') | (out == 1):
        return np2dT(CCT)
    elif (out == 'duv') | (out == -1):
        return np2dT(Duv)
    elif (out == 'cct,duv') | (out == 2):
        return np2dT(CCT), np2dT(Duv)
    elif (out == "[cct,duv]") | (out == -2):
        return np.vstack((CCT, Duv)).T
Esempio n. 2
0
def cct_to_xyz(ccts,
               duv=None,
               cieobs=_CIEOBS,
               wl=None,
               mode='lut',
               out=None,
               accuracy=0.1,
               force_out_of_lut=True,
               upper_cct_max=10.0 * 20,
               approx_cct_temp=True):
    """
    Convert correlated color temperature (CCT) and Duv (distance above (>0) or 
    below (<0) the Planckian locus) to XYZ tristimulus values.
    
    | Finds xyzw_estimated by minimization of:
    |    
    |    F = numpy.sqrt(((100.0*(cct_min - cct)/(cct))**2.0) 
    |         + (((duv_min - duv)/(duv))**2.0))
    |    
    | with cct,duv the input values and cct_min, duv_min calculated using 
    | luxpy.xyz_to_cct(xyzw_estimated,...).
    
    Args:
        :ccts: 
            | ndarray of cct values
        :duv: 
            | None or ndarray of duv values, optional
            | Note that duv can be supplied together with cct values in :ccts: 
              as ndarray with shape (N,2)
        :cieobs: 
            | luxpy._CIEOBS, optional
            | CMF set used to calculated xyzw.
        :mode: 
            | 'lut' or 'search', optional
            | Determines what method to use.
        :out: 
            | None (or 1), optional
            | If not None or 1: output a ndarray that contains estimated 
              xyz and minimization results: 
            | (cct_min, duv_min, F_min (objective fcn value))
        :wl: 
            | None, optional
            | Wavelengths used when calculating Planckian radiators.
        :accuracy: 
            | float, optional
            | Stop brute-force search when cct :accuracy: is reached.
        :upper_cct_max: 
            | 10.0**20, optional
            | Limit brute-force search to this cct.
        :approx_cct_temp: 
            | True, optional
            | If True: use xyz_to_cct_HA() to get a first estimate of cct to 
              speed up search.
        :force_out_of_lut: 
            | True, optional
            | If True and cct is out of range of the LUT, then switch to 
              brute-force search method, else return numpy.nan values.
        
    Returns:
        :returns: 
            | ndarray with estimated XYZ tristimulus values
    
    Note:
        If duv is not supplied (:ccts:.shape is (N,1) and :duv: is None), 
        source is assumed to be on the Planckian locus.
	 """
    # make ccts a min. 2d np.array:
    if isinstance(ccts, list):
        ccts = np2dT(np.array(ccts))
    else:
        ccts = np2d(ccts)

    if len(ccts.shape) > 2:
        raise Exception('cct_to_xyz(): Input ccts.shape must be <= 2 !')

    # get cct and duv arrays from :ccts:
    cct = np2d(ccts[:, 0, None])

    if (duv is None) & (ccts.shape[1] == 2):
        duv = np2d(ccts[:, 1, None])
    elif duv is not None:
        duv = np2d(duv)

    #get estimates of approximate xyz values in case duv = None:
    BB = cri_ref(ccts=cct, wl3=wl, ref_type=['BB'])
    xyz_est = spd_to_xyz(data=BB, cieobs=cieobs, out=1)
    results = np.ones([ccts.shape[0], 3]) * np.nan

    if duv is not None:

        # optimization/minimization setup:
        def objfcn(uv_offset,
                   uv0,
                   cct,
                   duv,
                   out=1):  #, cieobs = cieobs, wl = wl, mode = mode):
            uv0 = np2d(uv0 + uv_offset)
            Yuv0 = np.concatenate((np2d([100.0]), uv0), axis=1)
            cct_min, duv_min = xyz_to_cct(Yuv_to_xyz(Yuv0),
                                          cieobs=cieobs,
                                          out='cct,duv',
                                          wl=wl,
                                          mode=mode,
                                          accuracy=accuracy,
                                          force_out_of_lut=force_out_of_lut,
                                          upper_cct_max=upper_cct_max,
                                          approx_cct_temp=approx_cct_temp)
            F = np.sqrt(((100.0 * (cct_min[0] - cct[0]) / (cct[0]))**2.0) +
                        (((duv_min[0] - duv[0]) / (duv[0]))**2.0))
            if out == 'F':
                return F
            else:
                return np.concatenate((cct_min, duv_min, np2d(F)), axis=1)

        # loop through each xyz_est:
        for i in range(xyz_est.shape[0]):
            xyz0 = xyz_est[i]
            cct_i = cct[i]
            duv_i = duv[i]
            cct_min, duv_min = xyz_to_cct(xyz0,
                                          cieobs=cieobs,
                                          out='cct,duv',
                                          wl=wl,
                                          mode=mode,
                                          accuracy=accuracy,
                                          force_out_of_lut=force_out_of_lut,
                                          upper_cct_max=upper_cct_max,
                                          approx_cct_temp=approx_cct_temp)

            if np.abs(duv[i]) > _EPS:
                # find xyz:
                Yuv0 = xyz_to_Yuv(xyz0)
                uv0 = Yuv0[0][1:3]

                OptimizeResult = minimize(fun=objfcn,
                                          x0=np.zeros((1, 2)),
                                          args=(uv0, cct_i, duv_i, 'F'),
                                          method='Nelder-Mead',
                                          options={
                                              "maxiter": np.inf,
                                              "maxfev": np.inf,
                                              'xatol': 0.000001,
                                              'fatol': 0.000001
                                          })
                betas = OptimizeResult['x']
                #betas = np.zeros(uv0.shape)
                if out is not None:
                    results[i] = objfcn(betas, uv0, cct_i, duv_i, out=3)

                uv0 = np2d(uv0 + betas)
                Yuv0 = np.concatenate((np2d([100.0]), uv0), axis=1)
                xyz_est[i] = Yuv_to_xyz(Yuv0)

            else:
                xyz_est[i] = xyz0

    if (out is None) | (out == 1):
        return xyz_est
    else:
        # Also output results of minimization:
        return np.concatenate((xyz_est, results), axis=1)
Esempio n. 3
0
def cam18sl(data, datab = None, Lb = [100], fov = 10.0, inputtype = 'xyz', direction = 'forward', outin = 'Q,aW,bW', parameters = None):
    """
    Convert between CIE 2006 10°  XYZ tristimulus values (or spectral data) 
    and CAM18sl color appearance correlates.
    
    Args:
        :data: 
            | ndarray of CIE 2006 10°  absolute XYZ tristimulus values or spectral data
              or color appearance attributes of stimulus
        :datab: 
            | ndarray of CIE 2006 10°  absolute XYZ tristimulus values or spectral data
              of stimulus background
        :Lb: 
            | [100], optional
            | Luminance (cd/m²) value(s) of background(s) calculated using the CIE 2006 10° CMFs 
            | (only used in case datab == None and the background is assumed to be an Equal-Energy-White)
        :fov: 
            | 10.0, optional
            | Field-of-view of stimulus (for size effect on brightness)
        :inputtpe:
            | 'xyz' or 'spd', optional
            | Specifies the type of input: 
            |     tristimulus values or spectral data for the forward mode.
        :direction:
            | 'forward' or 'inverse', optional
            |   -'forward': xyz -> cam18sl
            |   -'inverse': cam18sl -> xyz 
        :outin:
            | 'Q,aW,bW' or str, optional
            | 'Q,aW,bW' (brightness and opponent signals for amount-of-neutral)
            |  other options: 'Q,aM,bM' (colorfulness) and 'Q,aS,bS' (saturation)
            | Str specifying the type of 
            |     input (:direction: == 'inverse') and 
            |     output (:direction: == 'forward')
        :parameters:
            | None or dict, optional
            | Set of model parameters.
            |   - None: defaults to luxpy.cam._CAM18SL_PARAMETERS 
            |    (see references below)
    
    Returns:
        :returns: 
            | ndarray with color appearance correlates (:direction: == 'forward')
            |  or 
            | XYZ tristimulus values (:direction: == 'inverse')
            
    Notes:
        | * Instead of using the CIE 1964 10° CMFs in some places of the model,
        |   the CIE 2006 10° CMFs are used througout, making it more self_consistent.
        |   This has an effect on the k scaling factors (now different those in CAM15u) 
        |   and the illuminant E normalization for use in the chromatic adaptation transform.
        |   (see future erratum to Hermans et al., 2018)
        | * The paper also used an equation for the amount of white W, which is
        |   based on a Q value not expressed in 'bright' ('cA' = 0.937 instead of 123). 
        |   This has been corrected for in the luxpy version of the model, i.e.
        |   _CAM18SL_PARAMETERS['cW'][0] has been changed from 2.29 to 1/11672.
        |   (see future erratum to Hermans et al., 2018)

    References: 
        1. `Hermans, S., Smet, K. A. G., & Hanselaer, P. (2018). 
        "Color appearance model for self-luminous stimuli."
        Journal of the Optical Society of America A, 35(12), 2000–2009. 
        <https://doi.org/10.1364/JOSAA.35.002000>`_ 
     """
    
    if parameters is None:
        parameters = _CAM18SL_PARAMETERS
        
    outin = outin.split(',')    
    
    #unpack model parameters:
    cA, cAlms, cHK, cM, cW, ca, calms, cb, cblms, cfov, k, naka, unique_hue_data = [parameters[x] for x in sorted(parameters.keys())]
    
    # precomputations:
    Mlms2xyz = np.linalg.inv(_CMF['2006_10']['M'])
    MAab = np.array([cAlms,calms,cblms])
    invMAab = np.linalg.inv(MAab)    
    
    #-------------------------------------------------
    # setup EEW reference field and default background field (Lr should be equal to Lb):
    # Get Lb values:
    if datab is not None:
        if inputtype != 'xyz':
            Lb = spd_to_xyz(datab, cieobs = '2006_10', relative = False)[...,1:2]
        else:
            Lb = datab[...,1:2]
    else:
        if isinstance(Lb,list):
            Lb = np2dT(Lb)

    # Setup EEW ref of same luminance as datab:
    if inputtype == 'xyz':
        wlr = getwlr(_CAM18SL_WL3)
    else:
        if datab is None:
            wlr = data[0] # use wlr of stimulus data
        else:
            wlr = datab[0] # use wlr of background data
    datar = np.vstack((wlr,np.ones((Lb.shape[0], wlr.shape[0])))) # create eew
    xyzr = spd_to_xyz(datar, cieobs = '2006_10', relative = False) # get abs. tristimulus values
    datar[1:] = datar[1:]/xyzr[...,1:2]*Lb
    # Create datab if None:
    if (datab is None):
        if inputtype != 'xyz':
            datab = datar.copy()
        else:
            datab = spd_to_xyz(datar, cieobs = '2006_10', relative = False)
            datar = datab.copy()

 
    # prepare data and datab for loop over backgrounds: 
    # make axis 1 of datab have 'same' dimensions as data:         
    if (data.ndim == 2): 
        data = np.expand_dims(data, axis = 1)  # add light source axis 1     

    if inputtype == 'xyz': 
        if datab.shape[0] == 1: #make datab and datar have same lights source dimension (used to store different backgrounds) size as data
            datab = np.repeat(datab,data.shape[1],axis=0)  
            datar = np.repeat(datar,data.shape[1],axis=0)               
    else:
        if datab.shape[0] == 2:
            datab = np.vstack((datab[0],np.repeat(datab[1:], data.shape[1], axis = 0)))
        if datar.shape[0] == 2:
            datar = np.vstack((datar[0],np.repeat(datar[1:], data.shape[1], axis = 0)))

    # Flip light source/ background dim to axis 0:
    data = np.transpose(data, axes = (1,0,2))

    #-------------------------------------------------
    
    #initialize camout:     
    dshape = list(data.shape)
    dshape[-1] = len(outin) # requested number of correlates
    if (inputtype != 'xyz') & (direction == 'forward'):
        dshape[-2] = dshape[-2] - 1 # wavelength row doesn't count & only with forward can the input data be spectral
    camout = np.nan*np.ones(dshape)
    
  
    for i in range(data.shape[0]):
       
        # get rho, gamma, beta of background and reference white:
        if (inputtype != 'xyz'):
            xyzb = spd_to_xyz(np.vstack((datab[0], datab[i+1:i+2,:])), cieobs = '2006_10', relative = False)
            xyzr = spd_to_xyz(np.vstack((datar[0], datar[i+1:i+2,:])), cieobs = '2006_10', relative = False)
        else:
            xyzb = datab[i:i+1,:] 
            xyzr = datar[i:i+1,:] 

        lmsb = np.dot(_CMF['2006_10']['M'],xyzb.T).T # convert to l,m,s
        rgbb = (lmsb / _CMF['2006_10']['K']) * k # convert to rho, gamma, beta
        #lmsr = np.dot(_CMF['2006_10']['M'],xyzr.T).T # convert to l,m,s
        #rgbr = (lmsr / _CMF['2006_10']['K']) * k # convert to rho, gamma, beta
        #rgbr = rgbr/rgbr[...,1:2]*Lb[i] # calculated EEW cone excitations at same luminance values as background
        rgbr = np.ones(xyzr.shape)*Lb[i] # explicitely equal EEW cone excitations at same luminance values as background

        if direction == 'forward':
            # get rho, gamma, beta of stimulus:
            if (inputtype != 'xyz'):
                xyz = spd_to_xyz(data[i], cieobs = '2006_10', relative = False)   
            elif (inputtype == 'xyz'):
                xyz = data[i]
            lms = np.dot(_CMF['2006_10']['M'],xyz.T).T # convert to l,m,s
            rgb = (lms / _CMF['2006_10']['K']) * k # convert to rho, gamma, beta

            # apply von-kries cat with D = 1:
            if (rgbb == 0).any():
                Mcat = np.eye(3)
            else:
                Mcat = np.diag((rgbr/rgbb)[0])
            rgba = np.dot(Mcat,rgb.T).T

            # apply naka-rushton compression:
            rgbc = naka_rushton(rgba, n = naka['n'], sig = naka['sig'](rgbr.mean()), noise = naka['noise'], scaling = naka['scaling'])

            #rgbc = np.ones(rgbc.shape)*rgbc.mean() # test if eew ends up at origin
            
            # calculate achromatic and color difference signals, A, a, b:
            Aab = np.dot(MAab, rgbc.T).T
            A,a,b = asplit(Aab)
            a = ca*a
            b = cb*b

            # calculate colorfullness like signal M:
            M = cM*((a**2.0 + b**2.0)**0.5)

            # calculate brightness Q:
            Q = cA*(A + cHK[0]*M**cHK[1]) # last term is contribution of Helmholtz-Kohlrausch effect on brightness

            # calculate saturation, s:
            s = M / Q

            # calculate amount of white, W:
            W = 1 / (1.0 + cW[0]*(s**cW[1]))

            #  adjust Q for size (fov) of stimulus (matter of debate whether to do this before or after calculation of s or W, there was no data on s, M or W for different sized stimuli: after)
            Q = Q*(fov/10.0)**cfov

            # calculate hue, h and Hue quadrature, H:
            h = hue_angle(a,b, htype = 'deg')
            if 'H' in outin:
                H = hue_quadrature(h, unique_hue_data = unique_hue_data)
            else:
                H = None

            # calculate cart. co.:
            if 'aM' in outin:
                aM = M*np.cos(h*np.pi/180.0)
                bM = M*np.sin(h*np.pi/180.0)
            
            if 'aS' in outin:
                aS = s*np.cos(h*np.pi/180.0)
                bS = s*np.sin(h*np.pi/180.0)
            
            if 'aW' in outin:
                aW = W*np.cos(h*np.pi/180.0)
                bW = W*np.sin(h*np.pi/180.0)

            if (outin != ['Q','aW','bW']):
                camout[i] =  eval('ajoin(('+','.join(outin)+'))')
            else:
                camout[i] = ajoin((Q,aW,bW))
    
        
        elif direction == 'inverse':

            # get Q, M and a, b depending on input type:        
            if 'aW' in outin:
                Q,a,b = asplit(data[i])
                Q = Q / ((fov/10.0)**cfov) #adjust Q for size (fov) of stimulus back to that 10° ref
                W = (a**2.0 + b**2.0)**0.5
                s = (((1.0 / W) - 1.0)/cW[0])**(1.0/cW[1])
                M = s*Q
                
            
            if 'aM' in outin:
                Q,a,b = asplit(data[i])
                Q = Q / ((fov/10.0)**cfov) #adjust Q for size (fov) of stimulus back to that 10° ref
                M = (a**2.0 + b**2.0)**0.5
            
            if 'aS' in outin:
                Q,a,b = asplit(data[i])
                Q = Q / ((fov/10.0)**cfov) #adjust Q for size (fov) of stimulus back to that 10° ref
                s = (a**2.0 + b**2.0)**0.5
                M = s*Q
                      
            if 'h' in outin:
                Q, WsM, h = asplit(data[i])
                Q = Q / ((fov/10.0)**cfov) #adjust Q for size (fov) of stimulus back to that 10° ref
                if 'W' in outin:
                     s = (((1.0 / WsM) - 1.0)/cW[0])**(1.0/cW[1])
                     M = s*Q
                elif 's' in outin:
                     M = WsM*Q
                elif 'M' in outin:
                     M = WsM
            
            # calculate achromatic signal, A from Q and M:
            A = Q/cA - cHK[0]*M**cHK[1]

            # calculate hue angle:
            h = hue_angle(a,b, htype = 'rad')
            
            # calculate a,b from M and h:
            a = (M/cM)*np.cos(h)
            b = (M/cM)*np.sin(h)

            a = a/ca
            b = b/cb

            # create Aab:
            Aab = ajoin((A,a,b))    

            # calculate rgbc:
            rgbc = np.dot(invMAab, Aab.T).T    

            # decompress rgbc to (adapted) rgba :
            rgba = naka_rushton(rgbc, n = naka['n'], sig = naka['sig'](rgbr.mean()), noise = naka['noise'], scaling = naka['scaling'], direction = 'inverse')

            # apply inverse von-kries cat with D = 1:
            rgb = np.dot(np.diag((rgbb/rgbr)[0]),rgba.T).T

            # convert rgb to lms to xyz:
            lms = rgb/k*_CMF['2006_10']['K']  
            xyz = np.dot(Mlms2xyz,lms.T).T 
            
            camout[i] = xyz
    
    if camout.shape[0] == 1:
        camout = np.squeeze(camout,axis = 0)
    
    return camout