Beispiel #1
0
def symmM_to_posdefM(A = None, atol = 1.0e-9, rtol = 1.0e-9, method = 'make', forcesymm = True):
    """
    Convert a symmetric matrix to a positive definite one. 
    
    Args:
        :A: 
            | ndarray
        :atol:
            | float, optional
            | The absolute tolerance parameter (see Notes of numpy.allclose())
        :rtol:
            | float, optional
            | The relative tolerance parameter (see Notes of numpy.allclose())
        :method: 
            | 'make' or 'nearest', optional (see notes for more info)
        :forcesymm: 
            | True or False, optional
            | If A is not symmetric, force symmetry using: 
            |    A = numpy.triu(A) + numpy.triu(A).T - numpy.diag(numpy.diag(A))
    
    Returns:
        :returns:
            | ndarray with positive-definite matrix.
        
    Notes on supported methods:
        1. `'make': A Python/Numpy port of Muhammad Asim Mubeen's matlab function 
        Spd_Mat.m 
        <https://nl.mathworks.com/matlabcentral/fileexchange/45873-positive-definite-matrix>`_
        2. `'nearest': A Python/Numpy port of John D'Errico's `nearestSPD` 
        MATLAB code. 
        <https://stackoverflow.com/questions/43238173/python-convert-matrix-to-positive-semi-definite>`_
    """
    if A is not None:
        A = np2d(A)
        
        
        # Make sure matrix A is symmetric up to a certain tolerance:
        sn = check_symmetric(A, atol = atol, rtol = rtol) 
        if ((A.shape[0] != A.shape[1]) | (sn != True)):
            if (forcesymm == True)  &  (A.shape[0] == A.shape[1]):
                A = np.triu(A) + np.triu(A).T - np.diag(np.diag(A))
            else:
                raise Exception('symmM_to_posdefM(): matrix A not symmetric.')
        
        
        if check_posdef(A, atol = atol, rtol = rtol) == True:
            return A
        else:

            if method == 'make':

                # A Python/Numpy port of Muhammad Asim Mubeen's matlab function Spd_Mat.m
                #
                # See: https://nl.mathworks.com/matlabcentral/fileexchange/45873-positive-definite-matrix
                Val, Vec = np.linalg.eig(A) 
                Val = np.real(Val)
                Vec = np.real(Vec)
                Val[np.where(Val==0)] = _EPS #making zero eigenvalues non-zero
                p = np.where(Val<0)
                Val[p] = -Val[p] #making negative eigenvalues positive
                return   np.dot(Vec,np.dot(np.diag(Val) , Vec.T))
 
            
            elif method == 'nearest':
                
                 # A Python/Numpy port of John D'Errico's `nearestSPD` MATLAB code [1], which
                 # credits [2].
                 #
                 # [1] https://www.mathworks.com/matlabcentral/fileexchange/42885-nearestspd
                 #
                 # [2] N.J. Higham, "Computing a nearest symmetric positive semidefinite
                 # matrix" (1988): https://doi.org/10.1016/0024-3795(88)90223-6
                 #
                 # See: https://stackoverflow.com/questions/43238173/python-convert-matrix-to-positive-semi-definite
                
                B = (A + A.T) / 2.0
                _, s, V = np.linalg.svd(B)

                H = np.dot(V.T, np.dot(np.diag(s), V))

                A2 = (B + H) / 2.0

                A3 = (A2 + A2.T) / 2.0

                if check_posdef(A3, atol = atol, rtol = rtol) == True:
                    return A3

                spacing = np.spacing(np.linalg.norm(A))
                I = np.eye(A.shape[0])
                k = 1
                while not check_posdef(A3, atol = atol, rtol = rtol):
                    mineig = np.min(np.real(np.linalg.eigvals(A3)))
                    A3 += I * (-mineig * k**2.0+ spacing)
                    k += 1

                return A3
Beispiel #2
0
def cam18sl(data, datab = None, Lb = [100], fov = 10.0, inputtype = 'xyz', direction = 'forward', outin = 'Q,aW,bW', parameters = None):
    """
    Convert between CIE 2006 10°  XYZ tristimulus values (or spectral data) 
    and CAM18sl color appearance correlates.
    
    Args:
        :data: 
            | ndarray of CIE 2006 10°  absolute XYZ tristimulus values or spectral data
              or color appearance attributes of stimulus
        :datab: 
            | ndarray of CIE 2006 10°  absolute XYZ tristimulus values or spectral data
              of stimulus background
        :Lb: 
            | [100], optional
            | Luminance (cd/m²) value(s) of background(s) calculated using the CIE 2006 10° CMFs 
            | (only used in case datab == None and the background is assumed to be an Equal-Energy-White)
        :fov: 
            | 10.0, optional
            | Field-of-view of stimulus (for size effect on brightness)
        :inputtpe:
            | 'xyz' or 'spd', optional
            | Specifies the type of input: 
            |     tristimulus values or spectral data for the forward mode.
        :direction:
            | 'forward' or 'inverse', optional
            |   -'forward': xyz -> cam18sl
            |   -'inverse': cam18sl -> xyz 
        :outin:
            | 'Q,aW,bW' or str, optional
            | 'Q,aW,bW' (brightness and opponent signals for amount-of-neutral)
            |  other options: 'Q,aM,bM' (colorfulness) and 'Q,aS,bS' (saturation)
            | Str specifying the type of 
            |     input (:direction: == 'inverse') and 
            |     output (:direction: == 'forward')
        :parameters:
            | None or dict, optional
            | Set of model parameters.
            |   - None: defaults to luxpy.cam._CAM18SL_PARAMETERS 
            |    (see references below)
    
    Returns:
        :returns: 
            | ndarray with color appearance correlates (:direction: == 'forward')
            |  or 
            | XYZ tristimulus values (:direction: == 'inverse')
            
    Notes:
        | * Instead of using the CIE 1964 10° CMFs in some places of the model,
        |   the CIE 2006 10° CMFs are used througout, making it more self_consistent.
        |   This has an effect on the k scaling factors (now different those in CAM15u) 
        |   and the illuminant E normalization for use in the chromatic adaptation transform.
        |   (see future erratum to Hermans et al., 2018)
        | * The paper also used an equation for the amount of white W, which is
        |   based on a Q value not expressed in 'bright' ('cA' = 0.937 instead of 123). 
        |   This has been corrected for in the luxpy version of the model, i.e.
        |   _CAM18SL_PARAMETERS['cW'][0] has been changed from 2.29 to 1/11672.
        |   (see future erratum to Hermans et al., 2018)

    References: 
        1. `Hermans, S., Smet, K. A. G., & Hanselaer, P. (2018). 
        "Color appearance model for self-luminous stimuli."
        Journal of the Optical Society of America A, 35(12), 2000–2009. 
        <https://doi.org/10.1364/JOSAA.35.002000>`_ 
     """
    
    if parameters is None:
        parameters = _CAM18SL_PARAMETERS
        
    outin = outin.split(',')    
    
    #unpack model parameters:
    cA, cAlms, cHK, cM, cW, ca, calms, cb, cblms, cfov, k, naka, unique_hue_data = [parameters[x] for x in sorted(parameters.keys())]
    
    # precomputations:
    Mlms2xyz = np.linalg.inv(_CMF['2006_10']['M'])
    MAab = np.array([cAlms,calms,cblms])
    invMAab = np.linalg.inv(MAab)    
    
    #-------------------------------------------------
    # setup EEW reference field and default background field (Lr should be equal to Lb):
    # Get Lb values:
    if datab is not None:
        if inputtype != 'xyz':
            Lb = spd_to_xyz(datab, cieobs = '2006_10', relative = False)[...,1:2]
        else:
            Lb = datab[...,1:2]
    else:
        if isinstance(Lb,list):
            Lb = np2dT(Lb)

    # Setup EEW ref of same luminance as datab:
    if inputtype == 'xyz':
        wlr = getwlr(_CAM18SL_WL3)
    else:
        if datab is None:
            wlr = data[0] # use wlr of stimulus data
        else:
            wlr = datab[0] # use wlr of background data
    datar = np.vstack((wlr,np.ones((Lb.shape[0], wlr.shape[0])))) # create eew
    xyzr = spd_to_xyz(datar, cieobs = '2006_10', relative = False) # get abs. tristimulus values
    datar[1:] = datar[1:]/xyzr[...,1:2]*Lb
    # Create datab if None:
    if (datab is None):
        if inputtype != 'xyz':
            datab = datar.copy()
        else:
            datab = spd_to_xyz(datar, cieobs = '2006_10', relative = False)
            datar = datab.copy()

 
    # prepare data and datab for loop over backgrounds: 
    # make axis 1 of datab have 'same' dimensions as data:         
    if (data.ndim == 2): 
        data = np.expand_dims(data, axis = 1)  # add light source axis 1     

    if inputtype == 'xyz': 
        if datab.shape[0] == 1: #make datab and datar have same lights source dimension (used to store different backgrounds) size as data
            datab = np.repeat(datab,data.shape[1],axis=0)  
            datar = np.repeat(datar,data.shape[1],axis=0)               
    else:
        if datab.shape[0] == 2:
            datab = np.vstack((datab[0],np.repeat(datab[1:], data.shape[1], axis = 0)))
        if datar.shape[0] == 2:
            datar = np.vstack((datar[0],np.repeat(datar[1:], data.shape[1], axis = 0)))

    # Flip light source/ background dim to axis 0:
    data = np.transpose(data, axes = (1,0,2))

    #-------------------------------------------------
    
    #initialize camout:     
    dshape = list(data.shape)
    dshape[-1] = len(outin) # requested number of correlates
    if (inputtype != 'xyz') & (direction == 'forward'):
        dshape[-2] = dshape[-2] - 1 # wavelength row doesn't count & only with forward can the input data be spectral
    camout = np.nan*np.ones(dshape)
    
  
    for i in range(data.shape[0]):
       
        # get rho, gamma, beta of background and reference white:
        if (inputtype != 'xyz'):
            xyzb = spd_to_xyz(np.vstack((datab[0], datab[i+1:i+2,:])), cieobs = '2006_10', relative = False)
            xyzr = spd_to_xyz(np.vstack((datar[0], datar[i+1:i+2,:])), cieobs = '2006_10', relative = False)
        else:
            xyzb = datab[i:i+1,:] 
            xyzr = datar[i:i+1,:] 

        lmsb = np.dot(_CMF['2006_10']['M'],xyzb.T).T # convert to l,m,s
        rgbb = (lmsb / _CMF['2006_10']['K']) * k # convert to rho, gamma, beta
        #lmsr = np.dot(_CMF['2006_10']['M'],xyzr.T).T # convert to l,m,s
        #rgbr = (lmsr / _CMF['2006_10']['K']) * k # convert to rho, gamma, beta
        #rgbr = rgbr/rgbr[...,1:2]*Lb[i] # calculated EEW cone excitations at same luminance values as background
        rgbr = np.ones(xyzr.shape)*Lb[i] # explicitely equal EEW cone excitations at same luminance values as background

        if direction == 'forward':
            # get rho, gamma, beta of stimulus:
            if (inputtype != 'xyz'):
                xyz = spd_to_xyz(data[i], cieobs = '2006_10', relative = False)   
            elif (inputtype == 'xyz'):
                xyz = data[i]
            lms = np.dot(_CMF['2006_10']['M'],xyz.T).T # convert to l,m,s
            rgb = (lms / _CMF['2006_10']['K']) * k # convert to rho, gamma, beta

            # apply von-kries cat with D = 1:
            if (rgbb == 0).any():
                Mcat = np.eye(3)
            else:
                Mcat = np.diag((rgbr/rgbb)[0])
            rgba = np.dot(Mcat,rgb.T).T

            # apply naka-rushton compression:
            rgbc = naka_rushton(rgba, n = naka['n'], sig = naka['sig'](rgbr.mean()), noise = naka['noise'], scaling = naka['scaling'])

            #rgbc = np.ones(rgbc.shape)*rgbc.mean() # test if eew ends up at origin
            
            # calculate achromatic and color difference signals, A, a, b:
            Aab = np.dot(MAab, rgbc.T).T
            A,a,b = asplit(Aab)
            a = ca*a
            b = cb*b

            # calculate colorfullness like signal M:
            M = cM*((a**2.0 + b**2.0)**0.5)

            # calculate brightness Q:
            Q = cA*(A + cHK[0]*M**cHK[1]) # last term is contribution of Helmholtz-Kohlrausch effect on brightness

            # calculate saturation, s:
            s = M / Q

            # calculate amount of white, W:
            W = 1 / (1.0 + cW[0]*(s**cW[1]))

            #  adjust Q for size (fov) of stimulus (matter of debate whether to do this before or after calculation of s or W, there was no data on s, M or W for different sized stimuli: after)
            Q = Q*(fov/10.0)**cfov

            # calculate hue, h and Hue quadrature, H:
            h = hue_angle(a,b, htype = 'deg')
            if 'H' in outin:
                H = hue_quadrature(h, unique_hue_data = unique_hue_data)
            else:
                H = None

            # calculate cart. co.:
            if 'aM' in outin:
                aM = M*np.cos(h*np.pi/180.0)
                bM = M*np.sin(h*np.pi/180.0)
            
            if 'aS' in outin:
                aS = s*np.cos(h*np.pi/180.0)
                bS = s*np.sin(h*np.pi/180.0)
            
            if 'aW' in outin:
                aW = W*np.cos(h*np.pi/180.0)
                bW = W*np.sin(h*np.pi/180.0)

            if (outin != ['Q','aW','bW']):
                camout[i] =  eval('ajoin(('+','.join(outin)+'))')
            else:
                camout[i] = ajoin((Q,aW,bW))
    
        
        elif direction == 'inverse':

            # get Q, M and a, b depending on input type:        
            if 'aW' in outin:
                Q,a,b = asplit(data[i])
                Q = Q / ((fov/10.0)**cfov) #adjust Q for size (fov) of stimulus back to that 10° ref
                W = (a**2.0 + b**2.0)**0.5
                s = (((1.0 / W) - 1.0)/cW[0])**(1.0/cW[1])
                M = s*Q
                
            
            if 'aM' in outin:
                Q,a,b = asplit(data[i])
                Q = Q / ((fov/10.0)**cfov) #adjust Q for size (fov) of stimulus back to that 10° ref
                M = (a**2.0 + b**2.0)**0.5
            
            if 'aS' in outin:
                Q,a,b = asplit(data[i])
                Q = Q / ((fov/10.0)**cfov) #adjust Q for size (fov) of stimulus back to that 10° ref
                s = (a**2.0 + b**2.0)**0.5
                M = s*Q
                      
            if 'h' in outin:
                Q, WsM, h = asplit(data[i])
                Q = Q / ((fov/10.0)**cfov) #adjust Q for size (fov) of stimulus back to that 10° ref
                if 'W' in outin:
                     s = (((1.0 / WsM) - 1.0)/cW[0])**(1.0/cW[1])
                     M = s*Q
                elif 's' in outin:
                     M = WsM*Q
                elif 'M' in outin:
                     M = WsM
            
            # calculate achromatic signal, A from Q and M:
            A = Q/cA - cHK[0]*M**cHK[1]

            # calculate hue angle:
            h = hue_angle(a,b, htype = 'rad')
            
            # calculate a,b from M and h:
            a = (M/cM)*np.cos(h)
            b = (M/cM)*np.sin(h)

            a = a/ca
            b = b/cb

            # create Aab:
            Aab = ajoin((A,a,b))    

            # calculate rgbc:
            rgbc = np.dot(invMAab, Aab.T).T    

            # decompress rgbc to (adapted) rgba :
            rgba = naka_rushton(rgbc, n = naka['n'], sig = naka['sig'](rgbr.mean()), noise = naka['noise'], scaling = naka['scaling'], direction = 'inverse')

            # apply inverse von-kries cat with D = 1:
            rgb = np.dot(np.diag((rgbb/rgbr)[0]),rgba.T).T

            # convert rgb to lms to xyz:
            lms = rgb/k*_CMF['2006_10']['K']  
            xyz = np.dot(Mlms2xyz,lms.T).T 
            
            camout[i] = xyz
    
    if camout.shape[0] == 1:
        camout = np.squeeze(camout,axis = 0)
    
    return camout
Beispiel #3
0
def cam_sww16(data, dataw = None, Yb = 20.0, Lw = 400.0, Ccwb = None, relative = True, \
              parameters = None, inputtype = 'xyz', direction = 'forward', \
              cieobs = '2006_10'):
    """
    A simple principled color appearance model based on a mapping 
    of the Munsell color system.
    
    | This function implements the JOSA A (parameters = 'JOSA') published model. 
    
    Args:
        :data: 
            | ndarray with input tristimulus values 
            | or spectral data 
            | or input color appearance correlates
            | Can be of shape: (N [, xM], x 3), whereby: 
            | N refers to samples and M refers to light sources.
            | Note that for spectral input shape is (N x (M+1) x wl) 
        :dataw: 
            | None or ndarray, optional
            | Input tristimulus values or spectral data of white point.
            | None defaults to the use of CIE illuminant C.
        :Yb: 
            | 20.0, optional
            | Luminance factor of background (perfect white diffuser, Yw = 100)
        :Lw:
            | 400.0, optional
            | Luminance (cd/m²) of white point.
        :Ccwb:
            | None,  optional
            | Degree of cognitive adaptation (white point balancing)
            | If None: use [..,..] from parameters dict.
        :relative:
            | True or False, optional
            | True: xyz tristimulus values are relative (Yw = 100)
        :parameters:
            | None or str or dict, optional
            | Dict with model parameters.
            |    - None: defaults to luxpy.cam._CAM_SWW_2016_PARAMETERS['JOSA']
            |    - str: 'best-fit-JOSA' or 'best-fit-all-Munsell'
            |    - dict: user defined model parameters 
            |            (dict should have same structure)
        :inputtype:
            | 'xyz' or 'spd', optional
            | Specifies the type of input: 
            |     tristimulus values or spectral data for the forward mode.
        :direction:
            | 'forward' or 'inverse', optional
            |   -'forward': xyz -> cam_sww_2016
            |   -'inverse': cam_sww_2016 -> xyz 
        :cieobs:
            | '2006_10', optional
            | CMF set to use to perform calculations where spectral data 
              is involved (inputtype == 'spd'; dataw = None)
            | Other options: see luxpy._CMF['types']
    
    Returns:
        :returns: 
            | ndarray with color appearance correlates (:direction: == 'forward')
            |  or 
            | XYZ tristimulus values (:direction: == 'inverse')
    
    Notes:
        | This function implements the JOSA A (parameters = 'JOSA') 
          published model. 
        | With:
        |    1. A correction for the parameter 
        |         in Eq.4 of Fig. 11: 0.952 --> -0.952 
        |         
        |     2. The delta_ac and delta_bc white-balance shifts in Eq. 5e & 5f 
        |         should be: -0.028 & 0.821 
        |  
        |     (cfr. Ccwb = 0.66 in: 
        |         ab_test_out = ab_test_int - Ccwb*ab_gray_adaptation_field_int))
             
    References:
        1. `Smet, K. A. G., Webster, M. A., & Whitehead, L. A. (2016). 
        A simple principled approach for modeling and understanding uniform color metrics. 
        Journal of the Optical Society of America A, 33(3), A319–A331. 
        <https://doi.org/10.1364/JOSAA.33.00A319>`_

    """

    # get model parameters
    args = locals().copy() 
    if parameters is None:
        parameters = _CAM_SWW16_PARAMETERS['JOSA']
    if isinstance(parameters,str):
        parameters = _CAM_SWW16_PARAMETERS[parameters]
    parameters = put_args_in_db(parameters,args)  #overwrite parameters with other (not-None) args input 
      
    #unpack model parameters:
    Cc, Ccwb, Cf, Mxyz2lms, cLMS, cab_int, cab_out, calpha, cbeta,cga1, cga2, cgb1, cgb2, cl_int, clambda, lms0  = [parameters[x] for x in sorted(parameters.keys())]
    
    # setup default adaptation field:   
    if (dataw is None):
        dataw = _CIE_ILLUMINANTS['C'].copy() # get illuminant C
        xyzw = spd_to_xyz(dataw, cieobs = cieobs,relative=False) # get abs. tristimulus values
        if relative == False: #input is expected to be absolute
            dataw[1:] = Lw*dataw[1:]/xyzw[:,1:2] #dataw = Lw*dataw # make absolute
        else:
            dataw = dataw # make relative (Y=100)
        if inputtype == 'xyz':
            dataw = spd_to_xyz(dataw, cieobs = cieobs, relative = relative)

    # precomputations:
    Mxyz2lms = np.dot(np.diag(cLMS),math.normalize_3x3_matrix(Mxyz2lms, np.array([[1, 1, 1]]))) # normalize matrix for xyz-> lms conversion to ill. E weighted with cLMS   
    invMxyz2lms = np.linalg.inv(Mxyz2lms)
    MAab = np.array([clambda,calpha,cbeta])
    invMAab = np.linalg.inv(MAab)
    
    #initialize data and camout:
    data = np2d(data).copy() # stimulus data (can be upto NxMx3 for xyz, or [N x (M+1) x wl] for spd))
    dataw = np2d(dataw).copy() # white point (can be upto Nx3 for xyz, or [(N+1) x wl] for spd)

    # make axis 1 of dataw have 'same' dimensions as data:         
    if (data.ndim == 2): 
        data = np.expand_dims(data, axis = 1)  # add light source axis 1     

    if inputtype == 'xyz': 
        if dataw.shape[0] == 1: #make dataw have same lights source dimension size as data
            dataw = np.repeat(dataw,data.shape[1],axis=0)                
    else:
        if dataw.shape[0] == 2:
            dataw = np.vstack((dataw[0],np.repeat(dataw[1:], data.shape[1], axis = 0)))

    # Flip light source dim to axis 0:
    data = np.transpose(data, axes = (1,0,2))
  
    # Initialize output array:
    dshape = list(data.shape)
    dshape[-1] = 3 # requested number of correlates: l_int, a_int, b_int
    if (inputtype != 'xyz') & (direction == 'forward'):
        dshape[-2] = dshape[-2] - 1 # wavelength row doesn't count & only with forward can the input data be spectral
    camout = np.nan*np.ones(dshape)

    # apply forward/inverse model for each row in data:
    for i in range(data.shape[0]):

        # stage 1: calculate photon rates of stimulus and adapting field, lmst & lmsf:
        if (inputtype != 'xyz'):            
            if relative == True:
                xyzw_abs = spd_to_xyz(np.vstack((dataw[0],dataw[i+1])), cieobs = cieobs, relative = False)
                dataw[i+1] = Lw*dataw[i+1]/xyzw_abs[0,1] # make absolute
            xyzw = spd_to_xyz(np.vstack((dataw[0],dataw[i+1])), cieobs = cieobs, relative = False)
            lmsw = 683.0*np.dot(Mxyz2lms,xyzw.T).T/_CMF[cieobs]['K']
            lmsf = (Yb/100.0)*lmsw # calculate adaptation field and convert to l,m,s
            if (direction == 'forward'):
                if relative == True:
                    data[i,1:,:] = Lw*data[i,1:,:]/xyzw_abs[0,1] # make absolute
                xyzt = spd_to_xyz(data[i], cieobs = cieobs, relative = False)/_CMF[cieobs]['K'] 
                lmst = 683.0*np.dot(Mxyz2lms,xyzt.T).T # convert to l,m,s
            else:
                lmst = lmsf # put lmsf in lmst for inverse-mode

        elif (inputtype == 'xyz'):
            if relative == True: 
                dataw[i] = Lw*dataw[i]/100.0 # make absolute
            lmsw = 683.0* np.dot(Mxyz2lms, dataw[i].T).T /_CMF[cieobs]['K']  # convert to lms
            lmsf = (Yb/100.0)*lmsw
            if (direction == 'forward'):
                if relative == True:
                    data[i] = Lw*data[i]/100.0 # make absolute
                lmst = 683.0* np.dot(Mxyz2lms, data[i].T).T /_CMF[cieobs]['K'] # convert to lms
            else:
                 lmst = lmsf # put lmsf in lmst for inverse-mode

        # stage 2: calculate cone outputs of stimulus lmstp
        lmstp = math.erf(Cc*(np.log(lmst/lms0) + Cf*np.log(lmsf/lms0)))
        lmsfp = math.erf(Cc*(np.log(lmsf/lms0) + Cf*np.log(lmsf/lms0)))
        lmstp = np.vstack((lmsfp,lmstp)) # add adaptation field lms temporarily to lmsp for quick calculation
        
        
        # stage 3: calculate optic nerve signals, lam*, alphp, betp:
        lstar,alph, bet = asplit(np.dot(MAab, lmstp.T).T)

        alphp = cga1[0]*alph
        alphp[alph<0] = cga1[1]*alph[alph<0]
        betp = cgb1[0]*bet
        betp[bet<0] = cgb1[1]*bet[bet<0]
        
        # stage 4: calculate recoded nerve signals, alphapp, betapp:
        alphpp = cga2[0]*(alphp + betp)
        betpp = cgb2[0]*(alphp - betp)

        # stage 5: calculate conscious color perception:
        lstar_int = cl_int[0]*(lstar + cl_int[1])
        alph_int = cab_int[0]*(np.cos(cab_int[1]*np.pi/180.0)*alphpp - np.sin(cab_int[1]*np.pi/180.0)*betpp)
        bet_int = cab_int[0]*(np.sin(cab_int[1]*np.pi/180.0)*alphpp + np.cos(cab_int[1]*np.pi/180.0)*betpp)
        lstar_out = lstar_int
        
        if direction == 'forward':
            if Ccwb is None:
                alph_out = alph_int - cab_out[0]
                bet_out = bet_int -  cab_out[1]
            else:
                Ccwb = Ccwb*np.ones((2))
                Ccwb[Ccwb<0.0] = 0.0
                Ccwb[Ccwb>1.0] = 1.0
                alph_out = alph_int - Ccwb[0]*alph_int[0] # white balance shift using adaptation gray background (Yb=20%), with Ccw: degree of adaptation
                bet_out = bet_int -  Ccwb[1]*bet_int[0]
  
            camout[i] = np.vstack((lstar_out[1:],alph_out[1:],bet_out[1:])).T # stack together and remove adaptation field from vertical stack
        elif direction == 'inverse':
            labf_int = np.hstack((lstar_int[0],alph_int[0],bet_int[0]))
            
            # get lstar_out, alph_out & bet_out for data:
            lstar_out, alph_out, bet_out = asplit(data[i])
        
            # stage 5 inverse: 
            # undo cortical white-balance:
            if Ccwb is None:
                alph_int = alph_out + cab_out[0]
                bet_int = bet_out +  cab_out[1]
            else:
                Ccwb = Ccwb*np.ones((2))
                Ccwb[Ccwb<0.0] = 0.0
                Ccwb[Ccwb>1.0] = 1.0
                alph_int = alph_out + Ccwb[0]*alph_int[0] #  inverse white balance shift using adaptation gray background (Yb=20%), with Ccw: degree of adaptation
                bet_int = bet_out +  Ccwb[1]*bet_int[0]

            lstar_int = lstar_out
            alphpp = (1.0 / cab_int[0]) * (np.cos(-cab_int[1]*np.pi/180.0)*alph_int - np.sin(-cab_int[1]*np.pi/180.0)*bet_int)
            betpp = (1.0 / cab_int[0]) * (np.sin(-cab_int[1]*np.pi/180.0)*alph_int + np.cos(-cab_int[1]*np.pi/180.0)*bet_int)
            lstar_int = lstar_out
            lstar = (lstar_int /cl_int[0]) - cl_int[1] 
            
            # stage 4 inverse:
            alphp = 0.5*(alphpp/cga2[0] + betpp/cgb2[0])  # <-- alphpp = (Cga2.*(alphp+betp));
            betp = 0.5*(alphpp/cga2[0] - betpp/cgb2[0]) # <-- betpp = (Cgb2.*(alphp-betp));

            # stage 3 invers:
            alph = alphp/cga1[0]
            bet = betp/cgb1[0]
            sa = np.sign(cga1[1])
            sb = np.sign(cgb1[1])
            alph[(sa*alphp)<0.0] = alphp[(sa*alphp)<0] / cga1[1] 
            bet[(sb*betp)<0.0] = betp[(sb*betp)<0] / cgb1[1] 
            lab = ajoin((lstar, alph, bet))
            
            # stage 2 inverse:
            lmstp = np.dot(invMAab,lab.T).T 
            lmstp[lmstp<-1.0] = -1.0
            lmstp[lmstp>1.0] = 1.0

            lmstp = math.erfinv(lmstp) / Cc - Cf*np.log(lmsf/lms0)
            lmst = np.exp(lmstp) * lms0
            
            # stage 1 inverse:
            xyzt =  np.dot(invMxyz2lms,lmst.T).T   
            
            if relative == True:
                xyzt = (100.0/Lw) * xyzt
            
            camout[i] = xyzt
    
#    if flipaxis0and1 == True: # loop over shortest dim.
#        camout = np.transpose(camout, axes = (1,0,2))
    
    # Flip light source dim back to axis 1:
    camout = np.transpose(camout, axes = (1,0,2))

    if camout.shape[0] == 1:
        camout = np.squeeze(camout,axis = 0)
        
    return camout