예제 #1
0
def cct_to_neutral_loci_smet2018(cct, nlocitype='uw', out='duv,D'):
    """
    Calculate the most neutral appearing Duv10 in and the degree of neutrality for a specified CCT using the models in Smet et al. (2018).
    
    Args:
        :cct10:
            | ndarray CCT 
        :nlocitype:
            | 'uw', optional
            | 'uw': use unique white models published in Smet et al. (2014).
            | 'ca': use degree of chromatic adaptation model from Smet et al. (2017).
        :out:
            | 'duv,D', optional
            | Specifies requested output (other options: 'duv', 'D').
            
    Returns:
        :duv: ndarray with most neutral Duv10 value corresponding to the cct input.
        :D: ndarray with the degree of neutrality at (cct, duv).
        
    References:
         1. `Smet, K. A. G. (2018). 
        Two Neutral White Illumination Loci Based on Unique White Rating and Degree of Chromatic Adaptation. 
        LEUKOS, 14(2), 55–67.  
        <https://doi.org/10.1080/15502724.2017.1385400>`_  
        
    Notes:
        1. Duv is specified in the CIE 1960 u10v10 chromatity diagram as the 
        models were developed using CIE 1964 10° tristimulus, chromaticity and CCT values.
        2. The parameter +0.0172 in Eq. 4b should be -0.0172
    """
    if nlocitype == 'uw':
        duv = 0.0202 * np.log(cct / 3325) * np.exp(
            -1.445 * np.log(cct / 3325)**2) - 0.0137
        D = np.exp(-(6368 * ((1 / cct) -
                             (1 / 6410)))**2)  # degree of neutrality
    elif nlocitype == 'ca':
        duv = 0.0382 * np.log(cct / 2194) * np.exp(
            -0.679 * np.log(cct / 2194)**2) - 0.0172
        D = np.exp(-(3912 * ((1 / cct) -
                             (1 / 6795)))**2)  # degree of adaptation
    else:
        raise Exception('Unrecognized nlocitype')

    if out == 'duv,D':
        return duv, D
    elif out == 'duv':
        return duv
    elif out == 'D':
        return D
    else:
        raise Exception('smet_white_loci(): Requested output unrecognized.')
예제 #2
0
def log_scale(data,
              scale_factor=[6.73],
              scale_max=100.0):  # defaults from cie-224-2017 cri
    """
    Log-based color rendering index scale from Davis & Ohno (2009): 
    
    | Rfi,a = 10 * ln(exp((100 - c1*DEi,a)/10) + 1).
                    
    Args:
        :data: 
            | float or list[floats] or ndarray 
        :scale_factor:
            | [6.73] or list[float] or ndarray, optional
            | Rescales color differences before subtracting them from :scale_max:
            | Note that the default value is the one from cie-224-2017.
        :scale_max:
            | 100.0, optional
            | Maximum value of linear scale
    
    Returns:
        :returns:
            | float or list[floats] or ndarray
        
    References:
        1. `W. Davis and Y. Ohno, 
        “Color quality scale,” (2010), 
        Opt. Eng., vol. 49, no. 3, pp. 33602–33616.
        <http://spie.org/Publications/Journal/10.1117/1.3360335>`_
        2. `CIE224:2017. CIE 2017 Colour Fidelity Index for accurate scientific use. 
        Vienna, Austria: CIE. (2017).
        <http://www.cie.co.at/index.php?i_ca_id=1027>`_

    """
    return 10.0 * np.log(
        np.exp((scale_max - scale_factor[0] * data) / 10.0) + 1.0)
예제 #3
0
def smet2017_D(xyzw, Dmax=None, cieobs='1964_10'):
    """
    Calculate the degree of adaptation based on chromaticity following 
    Smet et al. (2017) 
    
    Args:
        :xyzw: 
            | ndarray with white point data
        :Dmax:
            | None or float, optional
            | Defaults to 0.6539 (max D obtained under experimental conditions, 
              but probably too low due to dark surround leading to incomplete 
              chromatic adaptation even for neutral illuminants 
              resulting in background luminance (fov~50°) of 760 cd/m²))
        :cieobs:
            | '1964_10', optional
            | CMF set used in deriving model in cited paper.
            
    Returns:
        :D: 
            | ndarray with degrees of adaptation
    
    References: 
        1. `Smet, K.A.G.*, Zhai, Q., Luo, M.R., Hanselaer, P., (2017), 
        Study of chromatic adaptation using memory color matches, 
        Part II: colored illuminants, 
        Opt. Express, 25(7), pp. 8350-8365.
        <https://www.osapublishing.org/oe/abstract.cfm?uri=oe-25-7-8350&origin=search)>`_

    """

    # Convert xyzw to log-compressed Macleod_Boyton coordinates:
    Vl, rl, bl = asplit(np.log(xyz_to_Vrb_mb(xyzw, cieobs=cieobs)))

    # apply Dmodel (technically only for cieobs = '1964_10')
    pD = (1.0e7) * np.array([
        0.021081326530436, 4.751255762876845, -0.000000071025181,
        -0.000000063627042, -0.146952821492957, 3.117390441655821
    ])  #D model parameters for gaussian model in log(MB)-space (july 2016)
    if Dmax is None:
        Dmax = 0.6539  # max D obtained under experimental conditions (probably too low due to dark surround leading to incomplete chromatic adaptation even for neutral illuminants resulting in background luminance (fov~50°) of 760 cd/m²)
    return Dmax * math.bvgpdf(x=rl,
                              y=bl,
                              mu=pD[2:4],
                              sigmainv=np.linalg.inv(
                                  np.array([[pD[0], pD[4]], [pD[4], pD[1]]
                                            ])))**pD[5]
예제 #4
0
def spd_to_mcri(SPD, D=0.9, E=None, Yb=20.0, out='Rm', wl=None):
    """
    Calculates the MCRI or Memory Color Rendition Index, Rm
    
    Args: 
        :SPD: 
            | ndarray with spectral data (can be multiple SPDs, 
              first axis are the wavelengths)
        :D: 
            | 0.9, optional
            | Degree of adaptation.
        :E: 
            | None, optional
            | Illuminance in lux 
            |  (used to calculate La = (Yb/100)*(E/pi) to then calculate D 
            |  following the 'cat02' model). 
            | If None: the degree is determined by :D:
            |  If (:E: is not None) & (:Yb: is None):  :E: is assumed to contain 
               the adapting field luminance La (cd/m²).
        :Yb: 
            | 20.0, optional
            | Luminance factor of background. (used when calculating La from E)
            | If None, E contains La (cd/m²).
        :out: 
            | 'Rm' or str, optional
            | Specifies requested output (e.g. 'Rm,Rmi,cct,duv') 
        :wl: 
            | None, optional
            | Wavelengths (or [start, end, spacing]) to interpolate the SPDs to. 
            | None: default to no interpolation   
    
    Returns:
        :returns: 
            | float or ndarray with MCRI Rm for :out: 'Rm'
            | Other output is also possible by changing the :out: str value.        
          
    References:
        1. `K.A.G. Smet, W.R. Ryckaert, M.R. Pointer, G. Deconinck, P. Hanselaer,(2012)
        “A memory colour quality metric for white light sources,” 
        Energy Build., vol. 49, no. C, pp. 216–225.
        <http://www.sciencedirect.com/science/article/pii/S0378778812000837>`_
    """
    SPD = np2d(SPD)

    if wl is not None:
        SPD = spd(data=SPD, interpolation=_S_INTERP_TYPE, kind='np', wl=wl)

    # unpack metric default values:
    avg, catf, cieobs, cri_specific_pars, cspace, ref_type, rg_pars, sampleset, scale = [
        _MCRI_DEFAULTS[x] for x in sorted(_MCRI_DEFAULTS.keys())
    ]
    similarity_ai = cri_specific_pars['similarity_ai']
    Mxyz2lms = cspace['Mxyz2lms']
    scale_fcn = scale['fcn']
    scale_factor = scale['cfactor']
    sampleset = eval(sampleset)

    # A. calculate xyz:
    xyzti, xyztw = spd_to_xyz(SPD, cieobs=cieobs['xyz'], rfl=sampleset, out=2)
    if 'cct' in out.split(','):
        cct, duv = xyz_to_cct(xyztw,
                              cieobs=cieobs['cct'],
                              out='cct,duv',
                              mode='lut')

    # B. perform chromatic adaptation to adopted whitepoint of ipt color space, i.e. D65:
    if catf is not None:
        Dtype_cat, F, Yb_cat, catmode_cat, cattype_cat, mcat_cat, xyzw_cat = [
            catf[x] for x in sorted(catf.keys())
        ]

        # calculate degree of adaptationn D:
        if E is not None:
            if Yb is not None:
                La = (Yb / 100.0) * (E / np.pi)
            else:
                La = E
            D = cat.get_degree_of_adaptation(Dtype=Dtype_cat, F=F, La=La)
        else:
            Dtype_cat = None  # direct input of D

        if (E is None) and (D is None):
            D = 1.0  # set degree of adaptation to 1 !
        if D > 1.0: D = 1.0
        if D < 0.6: D = 0.6  # put a limit on the lowest D

        # apply cat:
        xyzti = cat.apply(xyzti,
                          cattype=cattype_cat,
                          catmode=catmode_cat,
                          xyzw1=xyztw,
                          xyzw0=None,
                          xyzw2=xyzw_cat,
                          D=D,
                          mcat=[mcat_cat],
                          Dtype=Dtype_cat)
        xyztw = cat.apply(xyztw,
                          cattype=cattype_cat,
                          catmode=catmode_cat,
                          xyzw1=xyztw,
                          xyzw0=None,
                          xyzw2=xyzw_cat,
                          D=D,
                          mcat=[mcat_cat],
                          Dtype=Dtype_cat)

    # C. convert xyz to ipt and split:
    ipt = xyz_to_ipt(
        xyzti, cieobs=cieobs['xyz'], M=Mxyz2lms
    )  #input matrix as published in Smet et al. 2012, Energy and Buildings
    I, P, T = asplit(ipt)

    # D. calculate specific (hue dependent) similarity indicators, Si:
    if len(xyzti.shape) == 3:
        ai = np.expand_dims(similarity_ai, axis=1)
    else:
        ai = similarity_ai
    a1, a2, a3, a4, a5 = asplit(ai)
    mahalanobis_d2 = (a3 * np.power((P - a1), 2.0) + a4 * np.power(
        (T - a2), 2.0) + 2.0 * a5 * (P - a1) * (T - a2))
    if (len(mahalanobis_d2.shape) == 3) & (mahalanobis_d2.shape[-1] == 1):
        mahalanobis_d2 = mahalanobis_d2[:, :, 0].T
    Si = np.exp(-0.5 * mahalanobis_d2)

    # E. calculate general similarity indicator, Sa:
    Sa = avg(Si, axis=0, keepdims=True)

    # F. rescale similarity indicators (Si, Sa) with a 0-1 scale to memory color rendition indices (Rmi, Rm) with a 0 - 100 scale:
    Rmi = scale_fcn(np.log(Si), scale_factor=scale_factor)
    Rm = np2d(scale_fcn(np.log(Sa), scale_factor=scale_factor))

    # G. calculate Rg (polyarea of test / polyarea of memory colours):
    if 'Rg' in out.split(','):
        I = I[
            ...,
            None]  #broadcast_shape(I, target_shape = None,expand_2d_to_3d = 0)
        a1 = a1[:, None] * np.ones(
            I.shape
        )  #broadcast_shape(a1, target_shape = None,expand_2d_to_3d = 0)
        a2 = a2[:, None] * np.ones(
            I.shape
        )  #broadcast_shape(a2, target_shape = None,expand_2d_to_3d = 0)
        a12 = np.concatenate(
            (a1, a2), axis=2
        )  #broadcast_shape(np.hstack((a1,a2)), target_shape = ipt.shape,expand_2d_to_3d = 0)
        ipt_mc = np.concatenate((I, a12), axis=2)
        nhbins, normalize_gamut, normalized_chroma_ref, start_hue = [
            rg_pars[x] for x in sorted(rg_pars.keys())
        ]

        Rg = jab_to_rg(ipt,
                       ipt_mc,
                       ordered_and_sliced=False,
                       nhbins=nhbins,
                       start_hue=start_hue,
                       normalize_gamut=normalize_gamut)

    if (out != 'Rm'):
        return eval(out)
    else:
        return Rm
예제 #5
0
def box_m(*X, ni=None, verbosity=0):
    """
    Perform Box's M test (p>=2) to check equality of covariance matrices or Bartlett's test (p==1) for equality of variances.
    
    Args:
        :X: 
            | A number  (k groups) or list of 2d-ndarrays (rows: samples, cols: variables) with data.
            | or a number of 2d-ndarrays with covariance matrices (supply ni!)
        :ni:
            | None, optional
            | If None: X contains data, else, X contains covariance matrices.
        :verbosity: 
            | 0, optional
            | If 1: print results.
    
    Returns:
        :statistic:
            | F or chi2 value (see len(dfs))
        :pval:
            | p-value
        :df:
            | degrees of freedom.
            | if len(dfs) == 2: F-test was used.
            | if len(dfs) == 1: chi2 approx. was used.
    
    Notes:
        1. If p==1: Reduces to Bartlett's test for equal variances.
        2. If (ni>20).all() & (p<6) & (k<6): then a more appropriate chi2 test is used in a some cases.
    """

    k = len(X)  # groups
    p = np.atleast_2d(X[0]).shape[1]  # variables
    if p == 1:  # for p == 1: only variance!
        det = lambda x: np.array(x)
    else:
        det = lambda x: np.linalg.det(x)
    if ni is None:  # samples in each group
        ni = np.array([Xi.shape[0] for Xi in X])
        Si = np.array([np.cov(Xi.T) for Xi in X])
        if p == 1:
            Si = np.atleast_2d(Si).T
    else:
        Si = np.array([Xi for Xi in X])  # input are already cov matrices!
        ni = np.array(ni)
        if ni.shape[0] == 1:
            ni = ni * np.ones((k, ))

    N = ni.sum()
    S = np.array([(ni[i] - 1) * Si[i]
                  for i in range(len(ni))]).sum(axis=0) / (N - k)

    M = (N - k) * np.log(det(S)) - ((ni - 1) * np.log(det(Si))).sum()
    if p == 1:
        M = M[0]
    A1 = (2 * p**2 + 3 * p - 1) / (6 * (p + 1) * (k - 1)) * ((1 /
                                                              (ni - 1)) - 1 /
                                                             (N - k)).sum()
    v1 = p * (p + 1) * (k - 1) / 2
    A2 = (p - 1) * (p + 2) / (6 * (k - 1)) * ((1 / (ni - 1)**2) - 1 /
                                              (N - k)**2).sum()

    if (A2 - A1**2) > 0:
        v2 = (v1 + 2) / (A2 - A1**2)
        b = v1 / (1 - A1 - (v1 / v2))
        Fv1v2 = M / b
        statistic = Fv1v2
        pval = 1.0 - sp.stats.f.cdf(Fv1v2, v1, v2)
        dfs = [v1, v2]

        if verbosity == 1:
            print(
                'M = {:1.4f}, F = {:1.4f}, df1 = {:1.1f}, df2 = {:1.1f}, p = {:1.4f}'
                .format(M, Fv1v2, v1, v2, pval))
    else:
        v2 = (v1 + 2) / (A1**2 - A2)
        b = v2 / (1 - A1 + (2 / v2))
        Fv1v2 = v2 * M / (v1 * (b - M))
        statistic = Fv1v2
        pval = 1.0 - sp.stats.f.cdf(Fv1v2, v1, v2)
        dfs = [v1, v2]

        if (ni > 20).all() & (p < 6) & (k < 6):  #use Chi2v1
            chi2v1 = M * (1 - A1)
            statistic = chi2v1
            pval = 1.0 - sp.stats.chi2.cdf(chi2v1, v1)
            dfs = [v1]
            if verbosity == 1:
                print(
                    'M = {:1.4f}, chi2 = {:1.4f}, df1 = {:1.1f}, p = {:1.4f}'.
                    format(M, chi2v1, v1, pval))

        else:
            if verbosity == 1:
                print(
                    'M = {:1.4f}, F = {:1.4f}, df1 = {:1.1f}, df2 = {:1.1f}, p = {:1.4f}'
                    .format(M, Fv1v2, v1, v2, pval))

    return statistic, pval, dfs
예제 #6
0
def cam_sww16(data, dataw = None, Yb = 20.0, Lw = 400.0, Ccwb = None, relative = True, \
              parameters = None, inputtype = 'xyz', direction = 'forward', \
              cieobs = '2006_10'):
    """
    A simple principled color appearance model based on a mapping 
    of the Munsell color system.
    
    | This function implements the JOSA A (parameters = 'JOSA') published model. 
    
    Args:
        :data: 
            | ndarray with input tristimulus values 
            | or spectral data 
            | or input color appearance correlates
            | Can be of shape: (N [, xM], x 3), whereby: 
            | N refers to samples and M refers to light sources.
            | Note that for spectral input shape is (N x (M+1) x wl) 
        :dataw: 
            | None or ndarray, optional
            | Input tristimulus values or spectral data of white point.
            | None defaults to the use of CIE illuminant C.
        :Yb: 
            | 20.0, optional
            | Luminance factor of background (perfect white diffuser, Yw = 100)
        :Lw:
            | 400.0, optional
            | Luminance (cd/m²) of white point.
        :Ccwb:
            | None,  optional
            | Degree of cognitive adaptation (white point balancing)
            | If None: use [..,..] from parameters dict.
        :relative:
            | True or False, optional
            | True: xyz tristimulus values are relative (Yw = 100)
        :parameters:
            | None or str or dict, optional
            | Dict with model parameters.
            |    - None: defaults to luxpy.cam._CAM_SWW_2016_PARAMETERS['JOSA']
            |    - str: 'best-fit-JOSA' or 'best-fit-all-Munsell'
            |    - dict: user defined model parameters 
            |            (dict should have same structure)
        :inputtype:
            | 'xyz' or 'spd', optional
            | Specifies the type of input: 
            |     tristimulus values or spectral data for the forward mode.
        :direction:
            | 'forward' or 'inverse', optional
            |   -'forward': xyz -> cam_sww_2016
            |   -'inverse': cam_sww_2016 -> xyz 
        :cieobs:
            | '2006_10', optional
            | CMF set to use to perform calculations where spectral data 
              is involved (inputtype == 'spd'; dataw = None)
            | Other options: see luxpy._CMF['types']
    
    Returns:
        :returns: 
            | ndarray with color appearance correlates (:direction: == 'forward')
            |  or 
            | XYZ tristimulus values (:direction: == 'inverse')
    
    Notes:
        | This function implements the JOSA A (parameters = 'JOSA') 
          published model. 
        | With:
        |    1. A correction for the parameter 
        |         in Eq.4 of Fig. 11: 0.952 --> -0.952 
        |         
        |     2. The delta_ac and delta_bc white-balance shifts in Eq. 5e & 5f 
        |         should be: -0.028 & 0.821 
        |  
        |     (cfr. Ccwb = 0.66 in: 
        |         ab_test_out = ab_test_int - Ccwb*ab_gray_adaptation_field_int))
             
    References:
        1. `Smet, K. A. G., Webster, M. A., & Whitehead, L. A. (2016). 
        A simple principled approach for modeling and understanding uniform color metrics. 
        Journal of the Optical Society of America A, 33(3), A319–A331. 
        <https://doi.org/10.1364/JOSAA.33.00A319>`_

    """

    # get model parameters
    args = locals().copy() 
    if parameters is None:
        parameters = _CAM_SWW16_PARAMETERS['JOSA']
    if isinstance(parameters,str):
        parameters = _CAM_SWW16_PARAMETERS[parameters]
    parameters = put_args_in_db(parameters,args)  #overwrite parameters with other (not-None) args input 
      
    #unpack model parameters:
    Cc, Ccwb, Cf, Mxyz2lms, cLMS, cab_int, cab_out, calpha, cbeta,cga1, cga2, cgb1, cgb2, cl_int, clambda, lms0  = [parameters[x] for x in sorted(parameters.keys())]
    
    # setup default adaptation field:   
    if (dataw is None):
        dataw = _CIE_ILLUMINANTS['C'].copy() # get illuminant C
        xyzw = spd_to_xyz(dataw, cieobs = cieobs,relative=False) # get abs. tristimulus values
        if relative == False: #input is expected to be absolute
            dataw[1:] = Lw*dataw[1:]/xyzw[:,1:2] #dataw = Lw*dataw # make absolute
        else:
            dataw = dataw # make relative (Y=100)
        if inputtype == 'xyz':
            dataw = spd_to_xyz(dataw, cieobs = cieobs, relative = relative)

    # precomputations:
    Mxyz2lms = np.dot(np.diag(cLMS),math.normalize_3x3_matrix(Mxyz2lms, np.array([[1, 1, 1]]))) # normalize matrix for xyz-> lms conversion to ill. E weighted with cLMS   
    invMxyz2lms = np.linalg.inv(Mxyz2lms)
    MAab = np.array([clambda,calpha,cbeta])
    invMAab = np.linalg.inv(MAab)
    
    #initialize data and camout:
    data = np2d(data).copy() # stimulus data (can be upto NxMx3 for xyz, or [N x (M+1) x wl] for spd))
    dataw = np2d(dataw).copy() # white point (can be upto Nx3 for xyz, or [(N+1) x wl] for spd)

    # make axis 1 of dataw have 'same' dimensions as data:         
    if (data.ndim == 2): 
        data = np.expand_dims(data, axis = 1)  # add light source axis 1     

    if inputtype == 'xyz': 
        if dataw.shape[0] == 1: #make dataw have same lights source dimension size as data
            dataw = np.repeat(dataw,data.shape[1],axis=0)                
    else:
        if dataw.shape[0] == 2:
            dataw = np.vstack((dataw[0],np.repeat(dataw[1:], data.shape[1], axis = 0)))

    # Flip light source dim to axis 0:
    data = np.transpose(data, axes = (1,0,2))
  
    # Initialize output array:
    dshape = list(data.shape)
    dshape[-1] = 3 # requested number of correlates: l_int, a_int, b_int
    if (inputtype != 'xyz') & (direction == 'forward'):
        dshape[-2] = dshape[-2] - 1 # wavelength row doesn't count & only with forward can the input data be spectral
    camout = np.nan*np.ones(dshape)

    # apply forward/inverse model for each row in data:
    for i in range(data.shape[0]):

        # stage 1: calculate photon rates of stimulus and adapting field, lmst & lmsf:
        if (inputtype != 'xyz'):            
            if relative == True:
                xyzw_abs = spd_to_xyz(np.vstack((dataw[0],dataw[i+1])), cieobs = cieobs, relative = False)
                dataw[i+1] = Lw*dataw[i+1]/xyzw_abs[0,1] # make absolute
            xyzw = spd_to_xyz(np.vstack((dataw[0],dataw[i+1])), cieobs = cieobs, relative = False)
            lmsw = 683.0*np.dot(Mxyz2lms,xyzw.T).T/_CMF[cieobs]['K']
            lmsf = (Yb/100.0)*lmsw # calculate adaptation field and convert to l,m,s
            if (direction == 'forward'):
                if relative == True:
                    data[i,1:,:] = Lw*data[i,1:,:]/xyzw_abs[0,1] # make absolute
                xyzt = spd_to_xyz(data[i], cieobs = cieobs, relative = False)/_CMF[cieobs]['K'] 
                lmst = 683.0*np.dot(Mxyz2lms,xyzt.T).T # convert to l,m,s
            else:
                lmst = lmsf # put lmsf in lmst for inverse-mode

        elif (inputtype == 'xyz'):
            if relative == True: 
                dataw[i] = Lw*dataw[i]/100.0 # make absolute
            lmsw = 683.0* np.dot(Mxyz2lms, dataw[i].T).T /_CMF[cieobs]['K']  # convert to lms
            lmsf = (Yb/100.0)*lmsw
            if (direction == 'forward'):
                if relative == True:
                    data[i] = Lw*data[i]/100.0 # make absolute
                lmst = 683.0* np.dot(Mxyz2lms, data[i].T).T /_CMF[cieobs]['K'] # convert to lms
            else:
                 lmst = lmsf # put lmsf in lmst for inverse-mode

        # stage 2: calculate cone outputs of stimulus lmstp
        lmstp = math.erf(Cc*(np.log(lmst/lms0) + Cf*np.log(lmsf/lms0)))
        lmsfp = math.erf(Cc*(np.log(lmsf/lms0) + Cf*np.log(lmsf/lms0)))
        lmstp = np.vstack((lmsfp,lmstp)) # add adaptation field lms temporarily to lmsp for quick calculation
        
        
        # stage 3: calculate optic nerve signals, lam*, alphp, betp:
        lstar,alph, bet = asplit(np.dot(MAab, lmstp.T).T)

        alphp = cga1[0]*alph
        alphp[alph<0] = cga1[1]*alph[alph<0]
        betp = cgb1[0]*bet
        betp[bet<0] = cgb1[1]*bet[bet<0]
        
        # stage 4: calculate recoded nerve signals, alphapp, betapp:
        alphpp = cga2[0]*(alphp + betp)
        betpp = cgb2[0]*(alphp - betp)

        # stage 5: calculate conscious color perception:
        lstar_int = cl_int[0]*(lstar + cl_int[1])
        alph_int = cab_int[0]*(np.cos(cab_int[1]*np.pi/180.0)*alphpp - np.sin(cab_int[1]*np.pi/180.0)*betpp)
        bet_int = cab_int[0]*(np.sin(cab_int[1]*np.pi/180.0)*alphpp + np.cos(cab_int[1]*np.pi/180.0)*betpp)
        lstar_out = lstar_int
        
        if direction == 'forward':
            if Ccwb is None:
                alph_out = alph_int - cab_out[0]
                bet_out = bet_int -  cab_out[1]
            else:
                Ccwb = Ccwb*np.ones((2))
                Ccwb[Ccwb<0.0] = 0.0
                Ccwb[Ccwb>1.0] = 1.0
                alph_out = alph_int - Ccwb[0]*alph_int[0] # white balance shift using adaptation gray background (Yb=20%), with Ccw: degree of adaptation
                bet_out = bet_int -  Ccwb[1]*bet_int[0]
  
            camout[i] = np.vstack((lstar_out[1:],alph_out[1:],bet_out[1:])).T # stack together and remove adaptation field from vertical stack
        elif direction == 'inverse':
            labf_int = np.hstack((lstar_int[0],alph_int[0],bet_int[0]))
            
            # get lstar_out, alph_out & bet_out for data:
            lstar_out, alph_out, bet_out = asplit(data[i])
        
            # stage 5 inverse: 
            # undo cortical white-balance:
            if Ccwb is None:
                alph_int = alph_out + cab_out[0]
                bet_int = bet_out +  cab_out[1]
            else:
                Ccwb = Ccwb*np.ones((2))
                Ccwb[Ccwb<0.0] = 0.0
                Ccwb[Ccwb>1.0] = 1.0
                alph_int = alph_out + Ccwb[0]*alph_int[0] #  inverse white balance shift using adaptation gray background (Yb=20%), with Ccw: degree of adaptation
                bet_int = bet_out +  Ccwb[1]*bet_int[0]

            lstar_int = lstar_out
            alphpp = (1.0 / cab_int[0]) * (np.cos(-cab_int[1]*np.pi/180.0)*alph_int - np.sin(-cab_int[1]*np.pi/180.0)*bet_int)
            betpp = (1.0 / cab_int[0]) * (np.sin(-cab_int[1]*np.pi/180.0)*alph_int + np.cos(-cab_int[1]*np.pi/180.0)*bet_int)
            lstar_int = lstar_out
            lstar = (lstar_int /cl_int[0]) - cl_int[1] 
            
            # stage 4 inverse:
            alphp = 0.5*(alphpp/cga2[0] + betpp/cgb2[0])  # <-- alphpp = (Cga2.*(alphp+betp));
            betp = 0.5*(alphpp/cga2[0] - betpp/cgb2[0]) # <-- betpp = (Cgb2.*(alphp-betp));

            # stage 3 invers:
            alph = alphp/cga1[0]
            bet = betp/cgb1[0]
            sa = np.sign(cga1[1])
            sb = np.sign(cgb1[1])
            alph[(sa*alphp)<0.0] = alphp[(sa*alphp)<0] / cga1[1] 
            bet[(sb*betp)<0.0] = betp[(sb*betp)<0] / cgb1[1] 
            lab = ajoin((lstar, alph, bet))
            
            # stage 2 inverse:
            lmstp = np.dot(invMAab,lab.T).T 
            lmstp[lmstp<-1.0] = -1.0
            lmstp[lmstp>1.0] = 1.0

            lmstp = math.erfinv(lmstp) / Cc - Cf*np.log(lmsf/lms0)
            lmst = np.exp(lmstp) * lms0
            
            # stage 1 inverse:
            xyzt =  np.dot(invMxyz2lms,lmst.T).T   
            
            if relative == True:
                xyzt = (100.0/Lw) * xyzt
            
            camout[i] = xyzt
    
#    if flipaxis0and1 == True: # loop over shortest dim.
#        camout = np.transpose(camout, axes = (1,0,2))
    
    # Flip light source dim back to axis 1:
    camout = np.transpose(camout, axes = (1,0,2))

    if camout.shape[0] == 1:
        camout = np.squeeze(camout,axis = 0)
        
    return camout