コード例 #1
0
ファイル: helpers.py プロジェクト: uhqinli/luxpy
def meshblock(x, y):
    """
    Create a meshed block from x and y.
    
    | (Similar to meshgrid, but axis = 0 is retained).
    | To enable fast blockwise calculation.
    
    Args: 
        :x: 
            | ndarray with ndim == 2
        :y: 
            | ndarray with ndim == 2
        
    Returns:
        :X,Y: 
            | 2 ndarrays with ndim == 3 
            |   X.shape = (x.shape[0],y.shape[0],x.shape[1])
            |   Y.shape = (x.shape[0],y.shape[0],y.shape[1])
    """
    Y = np.transpose(np.repeat(y, x.shape[0], axis=0).reshape(
        (y.shape[0], x.shape[0], y.shape[1])),
                     axes=[1, 0, 2])
    X = np.repeat(x, y.shape[0], axis=0).reshape(
        (x.shape[0], y.shape[0], x.shape[1]))
    return X, Y
コード例 #2
0
 def normalize_to_Lw(Ill, Lw, cieobs, rflM):
     xyzw = lx.spd_to_xyz(Ill, cieobs = cieobs, relative = False)
     for i in range(Ill.shape[0]-1):
         Ill[i+1] = Lw*Ill[i+1]/xyzw[i,1]
     IllM = []
     for i in range(Ill.shape[0]-1):
         IllM.append(np.vstack((Ill1[0],Ill[i+1]*rflM[1:,:])))
     IllM = np.transpose(np.array(IllM),(1,0,2))
     return Ill, IllM
コード例 #3
0
ファイル: CDATA.py プロジェクト: hohlraum/luxpy
 def join(self, data):
     """
     Join data along last axis and return instance.
     """
     if data[0].ndim == 2:  #faster implementation
         self.value = np.transpose(
             np.concatenate(data, axis=0).reshape((np.hstack(
                 (len(data), data[0].shape)))), (1, 2, 0))
     elif data[0].ndim == 1:
         self.value = np.concatenate(data, axis=0).reshape((np.hstack(
             (len(data), data[0].shape)))).T
     else:
         self.value = np.hstack(data)[0]
     return self
コード例 #4
0
def ajoin(data):
    """
    Join data on last axis.
    
    Args:
        :data:
            | tuple (ndarray, ndarray, ...)
        
    Returns:
        :returns:
            | ndarray (shape[-1] is equal to tuple length)
    """
    if data[0].ndim == 2: #faster implementation
        return np.transpose(np.concatenate(data,axis=0).reshape((np.hstack((len(data),data[0].shape)))),(1,2,0))
    elif data[0].ndim == 1:
        return np.concatenate(data,axis=0).reshape((np.hstack((len(data),data[0].shape)))).T
    else:
        return np.hstack(data)[0]
コード例 #5
0
def ndset(F):
    """
    Finds the nondominated set of a set of objective points.

    Args:
      F: 
          | a m x mu ndarray with mu points and m objectives

   Returns:
      :ispar: 
          | a mu-length vector with true in the nondominated points
    """
    mu = F.shape[1]  #number of points

    # The idea is to compare each point with the other ones
    f1 = np.transpose(F[..., None], axes=[0, 2, 1])  #puts in the 3D direction
    f1 = np.repeat(f1, mu, axis=1)
    f2 = np.repeat(F[..., None], mu, axis=2)

    # Now, for the ii-th slice, the ii-th individual is compared with all of the
    # others at once. Then, the usual operations of domination are checked
    # Checks where f1 dominates f2
    aux1 = (f1 <= f2).all(axis=0, keepdims=True)
    aux2 = (f1 < f2).any(axis=0, keepdims=True)

    auxf1 = np.logical_and(aux1, aux2)
    # Checks where f1 is dominated by f2
    aux1 = (f1 >= f2).all(axis=0, keepdims=True)
    aux2 = (f1 > f2).any(axis=0, keepdims=True)
    auxf2 = np.logical_and(aux1, aux2)

    # dom will be a 3D matrix (1 x mu x mu) such that, for the ii-th slice, it
    # will contain +1 if fii dominates the current point, -1 if it is dominated
    # by it, and 0 if they are incomparable
    dom = np.zeros((1, mu, mu), dtype=int)

    dom[auxf1] = 1
    dom[auxf2] = -1

    # Finally, the slices with no -1 are nondominated
    ispar = (dom != -1).all(axis=1)
    ispar = ispar.flatten()
    return ispar
コード例 #6
0
def cam18sl(data, datab = None, Lb = [100], fov = 10.0, inputtype = 'xyz', direction = 'forward', outin = 'Q,aW,bW', parameters = None):
    """
    Convert between CIE 2006 10°  XYZ tristimulus values (or spectral data) 
    and CAM18sl color appearance correlates.
    
    Args:
        :data: 
            | ndarray of CIE 2006 10°  absolute XYZ tristimulus values or spectral data
              or color appearance attributes of stimulus
        :datab: 
            | ndarray of CIE 2006 10°  absolute XYZ tristimulus values or spectral data
              of stimulus background
        :Lb: 
            | [100], optional
            | Luminance (cd/m²) value(s) of background(s) calculated using the CIE 2006 10° CMFs 
            | (only used in case datab == None and the background is assumed to be an Equal-Energy-White)
        :fov: 
            | 10.0, optional
            | Field-of-view of stimulus (for size effect on brightness)
        :inputtpe:
            | 'xyz' or 'spd', optional
            | Specifies the type of input: 
            |     tristimulus values or spectral data for the forward mode.
        :direction:
            | 'forward' or 'inverse', optional
            |   -'forward': xyz -> cam18sl
            |   -'inverse': cam18sl -> xyz 
        :outin:
            | 'Q,aW,bW' or str, optional
            | 'Q,aW,bW' (brightness and opponent signals for amount-of-neutral)
            |  other options: 'Q,aM,bM' (colorfulness) and 'Q,aS,bS' (saturation)
            | Str specifying the type of 
            |     input (:direction: == 'inverse') and 
            |     output (:direction: == 'forward')
        :parameters:
            | None or dict, optional
            | Set of model parameters.
            |   - None: defaults to luxpy.cam._CAM18SL_PARAMETERS 
            |    (see references below)
    
    Returns:
        :returns: 
            | ndarray with color appearance correlates (:direction: == 'forward')
            |  or 
            | XYZ tristimulus values (:direction: == 'inverse')
            
    Notes:
        | * Instead of using the CIE 1964 10° CMFs in some places of the model,
        |   the CIE 2006 10° CMFs are used througout, making it more self_consistent.
        |   This has an effect on the k scaling factors (now different those in CAM15u) 
        |   and the illuminant E normalization for use in the chromatic adaptation transform.
        |   (see future erratum to Hermans et al., 2018)
        | * The paper also used an equation for the amount of white W, which is
        |   based on a Q value not expressed in 'bright' ('cA' = 0.937 instead of 123). 
        |   This has been corrected for in the luxpy version of the model, i.e.
        |   _CAM18SL_PARAMETERS['cW'][0] has been changed from 2.29 to 1/11672.
        |   (see future erratum to Hermans et al., 2018)

    References: 
        1. `Hermans, S., Smet, K. A. G., & Hanselaer, P. (2018). 
        "Color appearance model for self-luminous stimuli."
        Journal of the Optical Society of America A, 35(12), 2000–2009. 
        <https://doi.org/10.1364/JOSAA.35.002000>`_ 
     """
    
    if parameters is None:
        parameters = _CAM18SL_PARAMETERS
        
    outin = outin.split(',')    
    
    #unpack model parameters:
    cA, cAlms, cHK, cM, cW, ca, calms, cb, cblms, cfov, k, naka, unique_hue_data = [parameters[x] for x in sorted(parameters.keys())]
    
    # precomputations:
    Mlms2xyz = np.linalg.inv(_CMF['2006_10']['M'])
    MAab = np.array([cAlms,calms,cblms])
    invMAab = np.linalg.inv(MAab)    
    
    #-------------------------------------------------
    # setup EEW reference field and default background field (Lr should be equal to Lb):
    # Get Lb values:
    if datab is not None:
        if inputtype != 'xyz':
            Lb = spd_to_xyz(datab, cieobs = '2006_10', relative = False)[...,1:2]
        else:
            Lb = datab[...,1:2]
    else:
        if isinstance(Lb,list):
            Lb = np2dT(Lb)

    # Setup EEW ref of same luminance as datab:
    if inputtype == 'xyz':
        wlr = getwlr(_CAM18SL_WL3)
    else:
        if datab is None:
            wlr = data[0] # use wlr of stimulus data
        else:
            wlr = datab[0] # use wlr of background data
    datar = np.vstack((wlr,np.ones((Lb.shape[0], wlr.shape[0])))) # create eew
    xyzr = spd_to_xyz(datar, cieobs = '2006_10', relative = False) # get abs. tristimulus values
    datar[1:] = datar[1:]/xyzr[...,1:2]*Lb
    # Create datab if None:
    if (datab is None):
        if inputtype != 'xyz':
            datab = datar.copy()
        else:
            datab = spd_to_xyz(datar, cieobs = '2006_10', relative = False)
            datar = datab.copy()

 
    # prepare data and datab for loop over backgrounds: 
    # make axis 1 of datab have 'same' dimensions as data:         
    if (data.ndim == 2): 
        data = np.expand_dims(data, axis = 1)  # add light source axis 1     

    if inputtype == 'xyz': 
        if datab.shape[0] == 1: #make datab and datar have same lights source dimension (used to store different backgrounds) size as data
            datab = np.repeat(datab,data.shape[1],axis=0)  
            datar = np.repeat(datar,data.shape[1],axis=0)               
    else:
        if datab.shape[0] == 2:
            datab = np.vstack((datab[0],np.repeat(datab[1:], data.shape[1], axis = 0)))
        if datar.shape[0] == 2:
            datar = np.vstack((datar[0],np.repeat(datar[1:], data.shape[1], axis = 0)))

    # Flip light source/ background dim to axis 0:
    data = np.transpose(data, axes = (1,0,2))

    #-------------------------------------------------
    
    #initialize camout:     
    dshape = list(data.shape)
    dshape[-1] = len(outin) # requested number of correlates
    if (inputtype != 'xyz') & (direction == 'forward'):
        dshape[-2] = dshape[-2] - 1 # wavelength row doesn't count & only with forward can the input data be spectral
    camout = np.nan*np.ones(dshape)
    
  
    for i in range(data.shape[0]):
       
        # get rho, gamma, beta of background and reference white:
        if (inputtype != 'xyz'):
            xyzb = spd_to_xyz(np.vstack((datab[0], datab[i+1:i+2,:])), cieobs = '2006_10', relative = False)
            xyzr = spd_to_xyz(np.vstack((datar[0], datar[i+1:i+2,:])), cieobs = '2006_10', relative = False)
        else:
            xyzb = datab[i:i+1,:] 
            xyzr = datar[i:i+1,:] 

        lmsb = np.dot(_CMF['2006_10']['M'],xyzb.T).T # convert to l,m,s
        rgbb = (lmsb / _CMF['2006_10']['K']) * k # convert to rho, gamma, beta
        #lmsr = np.dot(_CMF['2006_10']['M'],xyzr.T).T # convert to l,m,s
        #rgbr = (lmsr / _CMF['2006_10']['K']) * k # convert to rho, gamma, beta
        #rgbr = rgbr/rgbr[...,1:2]*Lb[i] # calculated EEW cone excitations at same luminance values as background
        rgbr = np.ones(xyzr.shape)*Lb[i] # explicitely equal EEW cone excitations at same luminance values as background

        if direction == 'forward':
            # get rho, gamma, beta of stimulus:
            if (inputtype != 'xyz'):
                xyz = spd_to_xyz(data[i], cieobs = '2006_10', relative = False)   
            elif (inputtype == 'xyz'):
                xyz = data[i]
            lms = np.dot(_CMF['2006_10']['M'],xyz.T).T # convert to l,m,s
            rgb = (lms / _CMF['2006_10']['K']) * k # convert to rho, gamma, beta

            # apply von-kries cat with D = 1:
            if (rgbb == 0).any():
                Mcat = np.eye(3)
            else:
                Mcat = np.diag((rgbr/rgbb)[0])
            rgba = np.dot(Mcat,rgb.T).T

            # apply naka-rushton compression:
            rgbc = naka_rushton(rgba, n = naka['n'], sig = naka['sig'](rgbr.mean()), noise = naka['noise'], scaling = naka['scaling'])

            #rgbc = np.ones(rgbc.shape)*rgbc.mean() # test if eew ends up at origin
            
            # calculate achromatic and color difference signals, A, a, b:
            Aab = np.dot(MAab, rgbc.T).T
            A,a,b = asplit(Aab)
            a = ca*a
            b = cb*b

            # calculate colorfullness like signal M:
            M = cM*((a**2.0 + b**2.0)**0.5)

            # calculate brightness Q:
            Q = cA*(A + cHK[0]*M**cHK[1]) # last term is contribution of Helmholtz-Kohlrausch effect on brightness

            # calculate saturation, s:
            s = M / Q

            # calculate amount of white, W:
            W = 1 / (1.0 + cW[0]*(s**cW[1]))

            #  adjust Q for size (fov) of stimulus (matter of debate whether to do this before or after calculation of s or W, there was no data on s, M or W for different sized stimuli: after)
            Q = Q*(fov/10.0)**cfov

            # calculate hue, h and Hue quadrature, H:
            h = hue_angle(a,b, htype = 'deg')
            if 'H' in outin:
                H = hue_quadrature(h, unique_hue_data = unique_hue_data)
            else:
                H = None

            # calculate cart. co.:
            if 'aM' in outin:
                aM = M*np.cos(h*np.pi/180.0)
                bM = M*np.sin(h*np.pi/180.0)
            
            if 'aS' in outin:
                aS = s*np.cos(h*np.pi/180.0)
                bS = s*np.sin(h*np.pi/180.0)
            
            if 'aW' in outin:
                aW = W*np.cos(h*np.pi/180.0)
                bW = W*np.sin(h*np.pi/180.0)

            if (outin != ['Q','aW','bW']):
                camout[i] =  eval('ajoin(('+','.join(outin)+'))')
            else:
                camout[i] = ajoin((Q,aW,bW))
    
        
        elif direction == 'inverse':

            # get Q, M and a, b depending on input type:        
            if 'aW' in outin:
                Q,a,b = asplit(data[i])
                Q = Q / ((fov/10.0)**cfov) #adjust Q for size (fov) of stimulus back to that 10° ref
                W = (a**2.0 + b**2.0)**0.5
                s = (((1.0 / W) - 1.0)/cW[0])**(1.0/cW[1])
                M = s*Q
                
            
            if 'aM' in outin:
                Q,a,b = asplit(data[i])
                Q = Q / ((fov/10.0)**cfov) #adjust Q for size (fov) of stimulus back to that 10° ref
                M = (a**2.0 + b**2.0)**0.5
            
            if 'aS' in outin:
                Q,a,b = asplit(data[i])
                Q = Q / ((fov/10.0)**cfov) #adjust Q for size (fov) of stimulus back to that 10° ref
                s = (a**2.0 + b**2.0)**0.5
                M = s*Q
                      
            if 'h' in outin:
                Q, WsM, h = asplit(data[i])
                Q = Q / ((fov/10.0)**cfov) #adjust Q for size (fov) of stimulus back to that 10° ref
                if 'W' in outin:
                     s = (((1.0 / WsM) - 1.0)/cW[0])**(1.0/cW[1])
                     M = s*Q
                elif 's' in outin:
                     M = WsM*Q
                elif 'M' in outin:
                     M = WsM
            
            # calculate achromatic signal, A from Q and M:
            A = Q/cA - cHK[0]*M**cHK[1]

            # calculate hue angle:
            h = hue_angle(a,b, htype = 'rad')
            
            # calculate a,b from M and h:
            a = (M/cM)*np.cos(h)
            b = (M/cM)*np.sin(h)

            a = a/ca
            b = b/cb

            # create Aab:
            Aab = ajoin((A,a,b))    

            # calculate rgbc:
            rgbc = np.dot(invMAab, Aab.T).T    

            # decompress rgbc to (adapted) rgba :
            rgba = naka_rushton(rgbc, n = naka['n'], sig = naka['sig'](rgbr.mean()), noise = naka['noise'], scaling = naka['scaling'], direction = 'inverse')

            # apply inverse von-kries cat with D = 1:
            rgb = np.dot(np.diag((rgbb/rgbr)[0]),rgba.T).T

            # convert rgb to lms to xyz:
            lms = rgb/k*_CMF['2006_10']['K']  
            xyz = np.dot(Mlms2xyz,lms.T).T 
            
            camout[i] = xyz
    
    if camout.shape[0] == 1:
        camout = np.squeeze(camout,axis = 0)
    
    return camout
コード例 #7
0
def VF_colorshift_model(S, cri_type = _VF_CRI_DEFAULT, model_type = _VF_MODEL_TYPE, \
                        cspace = _VF_CSPACE, sampleset = None, pool = False, \
                        pcolorshift = {'href': np.arange(np.pi/10,2*np.pi,2*np.pi/10),'Cref' : _VF_MAXR, 'sig' : _VF_SIG}, \
                        vfcolor = 'k',verbosity = 0):
    """
    Applies full vector field model calculations to spectral data.
    
    Args:
        :S: 
            | nump.ndarray with spectral data.
        :cri_type:
            | _VF_CRI_DEFAULT or str or dict, optional
            | Specifies type of color fidelity model to use. 
            | Controls choice of ref. ill., sample set, averaging, scaling, etc.
            | See luxpy.cri.spd_to_cri for more info.
        :modeltype:
            | _VF_MODEL_TYPE or 'M6' or 'M5', optional
            | Specifies degree 5 or degree 6 polynomial model in ab-coordinates.
        :cspace:
            | _VF_CSPACE or dict, optional
            | Specifies color space. See _VF_CSPACE_EXAMPLE for example structure.
        :sampleset:
            | None or str or ndarray, optional
            | Sampleset to be used when calculating vector field model.
        :pool: 
            | False, optional
            | If :S: contains multiple spectra, True pools all jab data before 
              modeling the vector field, while False models a different field 
              for each spectrum.
        :pcolorshift: 
            | default dict (see below) or user defined dict, optional
            | Dict containing the specification input 
              for apply_poly_model_at_hue_x().
            | Default dict = {'href': np.arange(np.pi/10,2*np.pi,2*np.pi/10),
            |                 'Cref' : _VF_MAXR, 
            |                 'sig' : _VF_SIG, 
            |                 'labels' : '#'} 
            | The polynomial models of degree 5 and 6 can be fully specified or 
              summarized by the model parameters themselved OR by calculating the
              dCoverC and dH at resp. 5 and 6 hues.
        :vfcolor:
            | 'k', optional
            | For plotting the vector fields.
        :verbosity: 
            | 0, optional
            | Report warnings or not.
            
    Returns:
        :returns: 
            | list[dict] (each list element refers to a different test SPD)
            | with the following keys:
            |   - 'Source': dict with ndarrays of the S, cct and duv of source spd.
            |   - 'metrics': dict with ndarrays for:
            |         * Rf (color fidelity: base + metameric shift)
            |         * Rt (metameric uncertainty index) 
            |         * Rfi (specific color fidelity indices)
            |         * Rti (specific metameric uncertainty indices)
            |         * cri_type (str with cri_type)
            |   - 'Jab': dict with with ndarrays for Jabt, Jabr, DEi
            |   - 'dC/C_dH_x_sig' : 
            |           np.vstack((dCoverC_x,dCoverC_x_sig,dH_x,dH_x_sig)).T
            |           See get_poly_model() for more info.
            |   - 'fielddata': dict with dicts containing data on the calculated 
            |      vector-field and circle-fields: 
            |        * 'vectorfield' : {'axt': vfaxt, 'bxt' : vfbxt, 
            |                           'axr' : vfaxr, 'bxr' : vfbxr},
            |        * 'circlefield' : {'axt': cfaxt, 'bxt' : cfbxt, 
            |                           'axr' : cfaxr, 'bxr' : cfbxr}},
            |   - 'modeldata' : dict with model info:
            |                {'pmodel': pmodel, 
            |                'pcolorshift' : pcolorshift, 
            |                  'dab_model' : dab_model, 
            |                  'dab_res' : dab_res,
            |                  'dab_std' : dab_std,
            |                  'modeltype' : modeltype, 
            |                  'fmodel' : poly_model,
            |                  'Jabtm' : Jabtm, 
            |                  'Jabrm' : Jabrm, 
            |                  'DEim' : DEim},
            |   - 'vshifts' :dict with various vector shifts:
            |        * 'Jabshiftvector_r_to_t' : ndarray with difference vectors
            |                                    between jabt and jabr.
            |        * 'vshift_ab_s' : vshift_ab_s: ab-shift vectors of samples 
            |        * 'vshift_ab_s_vf' : vshift_ab_s_vf: ab-shift vectors of 
            |                             VF model predictions of samples.
            |        * 'vshift_ab_vf' : vshift_ab_vf: ab-shift vectors of VF 
            |                            model predictions of vector field grid.
    """
    
    if type(cri_type) == str:
        cri_type_str = cri_type
    else:
        cri_type_str = None
    
    # Calculate Rf, Rfi and Jabr, Jabt:
    Rf, Rfi, Jabt, Jabr,cct,duv,cri_type  = spd_to_cri(S, cri_type= cri_type,out='Rf,Rfi,jabt,jabr,cct,duv,cri_type', sampleset=sampleset)
    
    # In case of multiple source SPDs, pool:
    if (len(Jabr.shape) == 3) & (Jabr.shape[1]>1) & (pool == True):
        #Nsamples = Jabr.shape[0]
        Jabr = np.transpose(Jabr,(1,0,2)) # set lamps on first dimension
        Jabt = np.transpose(Jabt,(1,0,2))
        Jabr = Jabr.reshape(Jabr.shape[0]*Jabr.shape[1],3) # put all lamp data one after the other
        Jabt = Jabt.reshape(Jabt.shape[0]*Jabt.shape[1],3)
        Jabt = Jabt[:,None,:] # add dim = 1
        Jabr = Jabr[:,None,:]
    

    out = [{} for _ in range(Jabr.shape[1])] #initialize empty list of dicts
    if pool == False:
        N = Jabr.shape[1]
    else:
        N = 1
    for i in range(N):
        
        Jabr_i = Jabr[:,i,:].copy()
        Jabr_i = Jabr_i[:,None,:]
        Jabt_i = Jabt[:,i,:].copy()
        Jabt_i = Jabt_i[:,None,:]

        DEi = np.sqrt((Jabr_i[...,0] - Jabt_i[...,0])**2 + (Jabr_i[...,1] - Jabt_i[...,1])**2 + (Jabr_i[...,2] - Jabt_i[...,2])**2)

        # Determine polynomial model:
        poly_model, pmodel, dab_model, dab_res, dCHoverC_res, dab_std, dCHoverC_std = get_poly_model(Jabt_i, Jabr_i, modeltype = _VF_MODEL_TYPE)
        
        # Apply model at fixed hues:
        href = pcolorshift['href']
        Cref = pcolorshift['Cref']
        sig = pcolorshift['sig']
        dCoverC_x, dCoverC_x_sig, dH_x, dH_x_sig = apply_poly_model_at_hue_x(poly_model, pmodel, dCHoverC_res, hx = href, Cxr = Cref, sig = sig)
        
        # Calculate deshifted a,b values on original samples:
        Jt = Jabt_i[...,0].copy()
        at = Jabt_i[...,1].copy()
        bt = Jabt_i[...,2].copy()
        Jr = Jabr_i[...,0].copy()
        ar = Jabr_i[...,1].copy()
        br = Jabr_i[...,2].copy()
        ar = ar + dab_model[:,0:1] # deshift reference to model prediction
        br = br + dab_model[:,1:2] # deshift reference to model prediction
        
        Jabtm = np.hstack((Jt,at,bt))
        Jabrm = np.hstack((Jr,ar,br))
        
        # calculate color differences between test and deshifted ref:
#        DEim = np.sqrt((Jr - Jt)**2 + (at - ar)**2 + (bt - br)**2)
        DEim = np.sqrt(0*(Jr - Jt)**2 + (at - ar)**2 + (bt - br)**2) # J is not used

        # Apply scaling function to convert DEim to Rti:
        scale_factor = cri_type['scale']['cfactor']
        scale_fcn = cri_type['scale']['fcn']
        avg = cri_type['avg']  
        Rfi_deshifted = scale_fcn(DEim,scale_factor)
        Rf_deshifted = scale_fcn(avg(DEim,axis = 0),scale_factor)
        
        rms = lambda x: np.sqrt(np.sum(x**2,axis=0)/x.shape[0])
        Rf_deshifted_rms = scale_fcn(rms(DEim),scale_factor)
    
        # Generate vector field:
        vfaxt,vfbxt,vfaxr,vfbxr = generate_vector_field(poly_model, pmodel,axr = np.arange(-_VF_MAXR,_VF_MAXR+_VF_DELTAR,_VF_DELTAR), bxr = np.arange(-_VF_MAXR,_VF_MAXR+_VF_DELTAR,_VF_DELTAR), limit_grid_radius = _VF_MAXR,color = 0)
        vfaxt,vfbxt,vfaxr,vfbxr = generate_vector_field(poly_model, pmodel,axr = np.arange(-_VF_MAXR,_VF_MAXR+_VF_DELTAR,_VF_DELTAR), bxr = np.arange(-_VF_MAXR,_VF_MAXR+_VF_DELTAR,_VF_DELTAR), limit_grid_radius = _VF_MAXR,color = 0)

        # Calculate ab-shift vectors of samples and VF model predictions:
        vshift_ab_s = calculate_shiftvectors(Jabt_i, Jabr_i, average = False, vtype = 'ab')[:,0,0:3]
        vshift_ab_s_vf = calculate_shiftvectors(Jabtm,Jabrm, average = False, vtype = 'ab')

        # Calculate ab-shift vectors using vector field model:
        Jabt_vf = np.hstack((np.zeros((vfaxt.shape[0],1)), vfaxt, vfbxt))   
        Jabr_vf = np.hstack((np.zeros((vfaxr.shape[0],1)), vfaxr, vfbxr))   
        vshift_ab_vf = calculate_shiftvectors(Jabt_vf,Jabr_vf, average = False, vtype = 'ab')

        # Generate circle field:
        x,y = plotcircle(radii = np.arange(0,_VF_MAXR+_VF_DELTAR,10), angles = np.arange(0,359,1), out = 'x,y')
        cfaxt,cfbxt,cfaxr,cfbxr = generate_vector_field(poly_model, pmodel,make_grid = False,axr = x[:,None], bxr = y[:,None], limit_grid_radius = _VF_MAXR,color = 0)

        out[i] = {'Source' : {'S' : S, 'cct' : cct[i] , 'duv': duv[i]},
               'metrics' : {'Rf':Rf[:,i], 'Rt': Rf_deshifted, 'Rt_rms' : Rf_deshifted_rms, 'Rfi':Rfi[:,i], 'Rti': Rfi_deshifted, 'cri_type' : cri_type_str},
               'Jab' : {'Jabt' : Jabt_i, 'Jabr' : Jabr_i, 'DEi' : DEi},
               'dC/C_dH_x_sig' : np.vstack((dCoverC_x,dCoverC_x_sig,dH_x,dH_x_sig)).T,
               'fielddata': {'vectorfield' : {'axt': vfaxt, 'bxt' : vfbxt, 'axr' : vfaxr, 'bxr' : vfbxr},
                             'circlefield' : {'axt': cfaxt, 'bxt' : cfbxt, 'axr' : cfaxr, 'bxr' : cfbxr}},
               'modeldata' : {'pmodel': pmodel, 'pcolorshift' : pcolorshift, 
                              'dab_model' : dab_model, 'dab_res' : dab_res,'dab_std' : dab_std,
                              'model_type' : model_type, 'fmodel' : poly_model,
                              'Jabtm' : Jabtm, 'Jabrm' : Jabrm, 'DEim' : DEim},
               'vshifts' : {'Jabshiftvector_r_to_t' : np.hstack((Jt-Jr,at-ar,bt-br)),
                            'vshift_ab_s' : vshift_ab_s,
                            'vshift_ab_s_vf' : vshift_ab_s_vf,
                            'vshift_ab_vf' : vshift_ab_vf}}
     
    return out
コード例 #8
0
ファイル: cam15u.py プロジェクト: uhqinli/luxpy
def cam15u(data,
           fov=10.0,
           inputtype='xyz',
           direction='forward',
           outin='Q,aW,bW',
           parameters=None):
    """
    Convert between CIE 2006 10°  XYZ tristimulus values (or spectral data) 
    and CAM15u color appearance correlates.
    
    Args:
        :data: 
            | ndarray of CIE 2006 10°  XYZ tristimulus values or spectral data
              or color appearance attributes
        :fov: 
            | 10.0, optional
            | Field-of-view of stimulus (for size effect on brightness)
        :inputtpe:
            | 'xyz' or 'spd', optional
            | Specifies the type of input: 
            |     tristimulus values or spectral data for the forward mode.
        :direction:
            | 'forward' or 'inverse', optional
            |   -'forward': xyz -> cam15u
            |   -'inverse': cam15u -> xyz 
        :outin:
            | 'Q,aW,bW' or str, optional
            | 'Q,aW,bW' (brightness and opponent signals for amount-of-neutral)
            |  other options: 'Q,aM,bM' (colorfulness) and 'Q,aS,bS' (saturation)
            | Str specifying the type of 
            |     input (:direction: == 'inverse') and 
            |     output (:direction: == 'forward')
        :parameters:
            | None or dict, optional
            | Set of model parameters.
            |   - None: defaults to luxpy.cam._CAM15U_PARAMETERS 
            |    (see references below)
    
    Returns:
        :returns: 
            | ndarray with color appearance correlates (:direction: == 'forward')
            |  or 
            | XYZ tristimulus values (:direction: == 'inverse')

    References: 
        1. `M. Withouck, K. A. G. Smet, W. R. Ryckaert, and P. Hanselaer, 
        “Experimental driven modelling of the color appearance of 
        unrelated self-luminous stimuli: CAM15u,” 
        Opt. Express, vol. 23, no. 9, pp. 12045–12064, 2015.
        <https://www.osapublishing.org/oe/abstract.cfm?uri=oe-23-9-12045&origin=search>`_
        2. `M. Withouck, K. A. G. Smet, and P. Hanselaer, (2015), 
        “Brightness prediction of different sized unrelated self-luminous stimuli,” 
        Opt. Express, vol. 23, no. 10, pp. 13455–13466. 
        <https://www.osapublishing.org/oe/abstract.cfm?uri=oe-23-10-13455&origin=search>`_  
     """

    if parameters is None:
        parameters = _CAM15U_PARAMETERS

    outin = outin.split(',')

    #unpack model parameters:
    Mxyz2rgb, cA, cAlms, cHK, cM, cW, ca, calms, cb, cblms, cfov, cp, k, unique_hue_data = [
        parameters[x] for x in sorted(parameters.keys())
    ]

    # precomputations:
    invMxyz2rgb = np.linalg.inv(Mxyz2rgb)
    MAab = np.array([cAlms, calms, cblms])
    invMAab = np.linalg.inv(MAab)

    #initialize data and camout:
    data = np2d(data)
    if len(data.shape) == 2:
        data = np.expand_dims(data, axis=0)  # avoid looping if not necessary

    if (data.shape[0] > data.shape[1]):  # loop over shortest dim.
        flipaxis0and1 = True
        data = np.transpose(data, axes=(1, 0, 2))
    else:
        flipaxis0and1 = False

    dshape = list(data.shape)
    dshape[-1] = len(outin)  # requested number of correlates
    if (inputtype != 'xyz') & (direction == 'forward'):
        dshape[-2] = dshape[
            -2] - 1  # wavelength row doesn't count & only with forward can the input data be spectral
    camout = np.nan * np.ones(dshape)

    for i in range(data.shape[0]):

        if (inputtype != 'xyz') & (direction == 'forward'):
            xyz = spd_to_xyz(data[i], cieobs='2006_10', relative=False)
            lms = np.dot(_CMF['2006_10']['M'], xyz.T).T  # convert to l,m,s
            rgb = (lms /
                   _CMF['2006_10']['K']) * k  # convert to rho, gamma, beta
        elif (inputtype == 'xyz') & (direction == 'forward'):
            rgb = np.dot(Mxyz2rgb, data[i].T).T

        if direction == 'forward':

            # apply cube-root compression:
            rgbc = rgb**(cp)

            # calculate achromatic and color difference signals, A, a, b:
            Aab = np.dot(MAab, rgbc.T).T
            A, a, b = asplit(Aab)
            A = cA * A
            a = ca * a
            b = cb * b

            # calculate colorfullness like signal M:
            M = cM * ((a**2.0 + b**2.0)**0.5)

            # calculate brightness Q:
            Q = A + cHK[0] * M**cHK[
                1]  # last term is contribution of Helmholtz-Kohlrausch effect on brightness

            # calculate saturation, s:
            s = M / Q

            # calculate amount of white, W:
            W = 100.0 / (1.0 + cW[0] * (s**cW[1]))

            #  adjust Q for size (fov) of stimulus (matter of debate whether to do this before or after calculation of s or W, there was no data on s, M or W for different sized stimuli: after)
            Q = Q * (fov / 10.0)**cfov

            # calculate hue, h and Hue quadrature, H:
            h = hue_angle(a, b, htype='deg')

            if 'H' in outin:
                H = hue_quadrature(h, unique_hue_data=unique_hue_data)
            else:
                H = None

            # calculate cart. co.:
            if 'aM' in outin:
                aM = M * np.cos(h * np.pi / 180.0)
                bM = M * np.sin(h * np.pi / 180.0)

            if 'aS' in outin:
                aS = s * np.cos(h * np.pi / 180.0)
                bS = s * np.sin(h * np.pi / 180.0)

            if 'aW' in outin:
                aW = W * np.cos(h * np.pi / 180.0)
                bW = W * np.sin(h * np.pi / 180.0)

            if (outin != ['Q', 'aW', 'bW']):
                camout[i] = eval('ajoin((' + ','.join(outin) + '))')
            else:
                camout[i] = ajoin((Q, aW, bW))

        elif direction == 'inverse':

            # get Q, M and a, b depending on input type:
            if 'aW' in outin:
                Q, a, b = asplit(data[i])
                Q = Q / (
                    (fov / 10.0)**cfov
                )  #adjust Q for size (fov) of stimulus back to that 10° ref
                W = (a**2.0 + b**2.0)**0.5
                s = (((100 / W) - 1.0) / cW[0])**(1.0 / cW[1])
                M = s * Q

            if 'aM' in outin:
                Q, a, b = asplit(data[i])
                Q = Q / (
                    (fov / 10.0)**cfov
                )  #adjust Q for size (fov) of stimulus back to that 10° ref
                M = (a**2.0 + b**2.0)**0.5

            if 'aS' in outin:
                Q, a, b = asplit(data[i])
                Q = Q / (
                    (fov / 10.0)**cfov
                )  #adjust Q for size (fov) of stimulus back to that 10° ref
                s = (a**2.0 + b**2.0)**0.5
                M = s * Q

            if 'h' in outin:
                Q, WsM, h = asplit(data[i])
                Q = Q / (
                    (fov / 10.0)**cfov
                )  #adjust Q for size (fov) of stimulus back to that 10° ref
                if 'W' in outin:
                    s = (((100.0 / WsM) - 1.0) / cW[0])**(1.0 / cW[1])
                    M = s * Q
                elif 's' in outin:
                    M = WsM * Q
                elif 'M' in outin:
                    M = WsM

            # calculate achromatic signal, A from Q and M:
            A = Q - cHK[0] * M**cHK[1]
            A = A / cA

            # calculate hue angle:
            h = hue_angle(a, b, htype='rad')

            # calculate a,b from M and h:
            a = (M / cM) * np.cos(h)
            b = (M / cM) * np.sin(h)
            a = a / ca
            b = b / cb

            # create Aab:
            Aab = ajoin((A, a, b))

            # calculate rgbc:
            rgbc = np.dot(invMAab, Aab.T).T

            # decompress rgbc to rgb:
            rgb = rgbc**(1 / cp)

            # convert rgb to xyz:
            xyz = np.dot(invMxyz2rgb, rgb.T).T

            camout[i] = xyz

    if flipaxis0and1 == True:  # loop over shortest dim.
        camout = np.transpose(camout, axes=(1, 0, 2))

    if camout.shape[0] == 1:
        camout = np.squeeze(camout, axis=0)

    return camout
コード例 #9
0
ファイル: plotters.py プロジェクト: husion/luxpy
def plotellipse(v, cspace_in = 'Yxy', cspace_out = None, nsamples = 100, \
                show = True, axh = None, \
                line_color = 'darkgray', line_style = ':', line_width = 1, line_marker = '', line_markersize = 4,\
                plot_center = False, center_marker = 'o', center_color = 'darkgray', center_markersize = 4,\
                show_grid = True, label_fontname = 'Times New Roman', label_fontsize = 12,\
                out = None):
    """
    Plot ellipse(s) given in v-format [Rmax,Rmin,xc,yc,theta].
    
    Args:
        :v: 
            | (Nx5) ndarray
            | ellipse parameters [Rmax,Rmin,xc,yc,theta]
        :cspace_in:
            | 'Yxy', optional
            | Color space of v.
            | If None: no color space assumed. Axis labels assumed ('x','y').
        :cspace_out:
            | None, optional
            | Color space to plot ellipse(s) in.
            | If None: plot in cspace_in.
        :nsamples:
            | 100 or int, optional
            | Number of points (samples) in ellipse boundary
        :show:
            | True or boolean, optional
            | Plot ellipse(s) (True) or not (False)
        :axh: 
            | None, optional
            | Ax-handle to plot ellipse(s) in.
            | If None: create new figure with axes.
        :line_color:
            | 'darkgray', optional
            | Color to plot ellipse(s) in.
        :line_style:
            | ':', optional
            | Linestyle of ellipse(s).
        :line_width':
            | 1, optional
            | Width of ellipse boundary line.
        :line_marker:
            | 'none', optional
            | Marker for ellipse boundary.
        :line_markersize:
            | 4, optional
            | Size of markers in ellipse boundary.
        :plot_center:
            | False, optional
            | Plot center of ellipse: yes (True) or no (False)
        :center_color:
            | 'darkgray', optional
            | Color to plot ellipse center in.
        :center_marker:
            | 'o', optional
            | Marker for ellipse center.
        :center_markersize:
            | 4, optional
            | Size of marker of ellipse center.
        :show_grid:
            | True, optional
            | Show grid (True) or not (False)
        :label_fontname: 
            | 'Times New Roman', optional
            | Sets font type of axis labels.
        :label_fontsize:
            | 12, optional
            | Sets font size of axis labels.
        :out:
            | None, optional
            | Output of function
            | If None: returns None. Can be used to output axh of newly created
            |      figure axes or to return Yxys an ndarray with coordinates of 
            |       ellipse boundaries in cspace_out (shape = (nsamples,3,N)) 
            
        
    Returns:
        :returns: None, or whatever set by :out:.
    """
    Yxys = np.zeros((nsamples,3,v.shape[0]))
    ellipse_vs = np.zeros((v.shape[0],5))
    for i,vi in enumerate(v):
        
        # Set sample density of ellipse boundary:
        t = np.linspace(0, 2*np.pi, nsamples)
        
        a = vi[0] # major axis
        b = vi[1] # minor axis
        xyc = vi[2:4,None] # center
        theta = vi[-1] # rotation angle
        
        # define rotation matrix:
        R = np.hstack(( np.vstack((np.cos(theta), np.sin(theta))), np.vstack((-np.sin(theta), np.cos(theta)))))
 
        # Calculate ellipses:
        Yxyc = np.vstack((1, xyc)).T
        Yxy = np.vstack((np.ones((1,nsamples)), xyc + np.dot(R, np.vstack((a*np.cos(t), b*np.sin(t))) ))).T
        Yxys[:,:,i] = Yxy
        
        # Convert to requested color space:
        if (cspace_out is not None) & (cspace_in is not None):
            Yxy = colortf(Yxy, cspace_in + '>' + cspace_out)
            Yxyc = colortf(Yxyc, cspace_in + '>' + cspace_out)
            Yxys[:,:,i] = Yxy
            
            # get ellipse parameters in requested color space:
            ellipse_vs[i,:] = math.fit_ellipse(Yxy[:,1:])
            #de = np.sqrt((Yxy[:,1]-Yxyc[:,1])**2 + (Yxy[:,2]-Yxyc[:,2])**2)
            #ellipse_vs[i,:] = np.hstack((de.max(),de.min(),Yxyc[:,1],Yxyc[:,2],np.nan)) # nan because orientation is xy, but request is some other color space. Change later to actual angle when fitellipse() has been implemented

        
        # plot ellipses:
        if show == True:
            if (axh is None) & (i == 0):
                fig = plt.figure()
                axh = fig.add_subplot(111)
            
            if (cspace_in is None):
                xlabel = 'x'
                ylabel = 'y'
            else:
                xlabel = _CSPACE_AXES[cspace_in][1]
                ylabel = _CSPACE_AXES[cspace_in][2]
            
            if (cspace_out is not None):
                xlabel = _CSPACE_AXES[cspace_out][1]
                ylabel = _CSPACE_AXES[cspace_out][2]
            
            if plot_center == True:
                plt.plot(Yxyc[:,1],Yxyc[:,2],color = center_color, linestyle = 'none', marker = center_marker, markersize = center_markersize)

            plt.plot(Yxy[:,1],Yxy[:,2],color = line_color, linestyle = line_style, linewidth = line_width, marker = line_marker, markersize = line_markersize)
            plt.xlabel(xlabel, fontname = label_fontname, fontsize = label_fontsize)
            plt.ylabel(ylabel, fontname = label_fontname, fontsize = label_fontsize)
            if show_grid == True:
                plt.grid()
            #plt.show()     
    Yxys = np.transpose(Yxys,axes=(0,2,1))       
    if out is not None:
        return eval(out)
    else:
        return None
コード例 #10
0
def cam_sww16(data, dataw = None, Yb = 20.0, Lw = 400.0, Ccwb = None, relative = True, \
              parameters = None, inputtype = 'xyz', direction = 'forward', \
              cieobs = '2006_10'):
    """
    A simple principled color appearance model based on a mapping 
    of the Munsell color system.
    
    | This function implements the JOSA A (parameters = 'JOSA') published model. 
    
    Args:
        :data: 
            | ndarray with input tristimulus values 
            | or spectral data 
            | or input color appearance correlates
            | Can be of shape: (N [, xM], x 3), whereby: 
            | N refers to samples and M refers to light sources.
            | Note that for spectral input shape is (N x (M+1) x wl) 
        :dataw: 
            | None or ndarray, optional
            | Input tristimulus values or spectral data of white point.
            | None defaults to the use of CIE illuminant C.
        :Yb: 
            | 20.0, optional
            | Luminance factor of background (perfect white diffuser, Yw = 100)
        :Lw:
            | 400.0, optional
            | Luminance (cd/m²) of white point.
        :Ccwb:
            | None,  optional
            | Degree of cognitive adaptation (white point balancing)
            | If None: use [..,..] from parameters dict.
        :relative:
            | True or False, optional
            | True: xyz tristimulus values are relative (Yw = 100)
        :parameters:
            | None or str or dict, optional
            | Dict with model parameters.
            |    - None: defaults to luxpy.cam._CAM_SWW_2016_PARAMETERS['JOSA']
            |    - str: 'best-fit-JOSA' or 'best-fit-all-Munsell'
            |    - dict: user defined model parameters 
            |            (dict should have same structure)
        :inputtype:
            | 'xyz' or 'spd', optional
            | Specifies the type of input: 
            |     tristimulus values or spectral data for the forward mode.
        :direction:
            | 'forward' or 'inverse', optional
            |   -'forward': xyz -> cam_sww_2016
            |   -'inverse': cam_sww_2016 -> xyz 
        :cieobs:
            | '2006_10', optional
            | CMF set to use to perform calculations where spectral data 
              is involved (inputtype == 'spd'; dataw = None)
            | Other options: see luxpy._CMF['types']
    
    Returns:
        :returns: 
            | ndarray with color appearance correlates (:direction: == 'forward')
            |  or 
            | XYZ tristimulus values (:direction: == 'inverse')
    
    Notes:
        | This function implements the JOSA A (parameters = 'JOSA') 
          published model. 
        | With:
        |    1. A correction for the parameter 
        |         in Eq.4 of Fig. 11: 0.952 --> -0.952 
        |         
        |     2. The delta_ac and delta_bc white-balance shifts in Eq. 5e & 5f 
        |         should be: -0.028 & 0.821 
        |  
        |     (cfr. Ccwb = 0.66 in: 
        |         ab_test_out = ab_test_int - Ccwb*ab_gray_adaptation_field_int))
             
    References:
        1. `Smet, K. A. G., Webster, M. A., & Whitehead, L. A. (2016). 
        A simple principled approach for modeling and understanding uniform color metrics. 
        Journal of the Optical Society of America A, 33(3), A319–A331. 
        <https://doi.org/10.1364/JOSAA.33.00A319>`_

    """

    # get model parameters
    args = locals().copy() 
    if parameters is None:
        parameters = _CAM_SWW16_PARAMETERS['JOSA']
    if isinstance(parameters,str):
        parameters = _CAM_SWW16_PARAMETERS[parameters]
    parameters = put_args_in_db(parameters,args)  #overwrite parameters with other (not-None) args input 
      
    #unpack model parameters:
    Cc, Ccwb, Cf, Mxyz2lms, cLMS, cab_int, cab_out, calpha, cbeta,cga1, cga2, cgb1, cgb2, cl_int, clambda, lms0  = [parameters[x] for x in sorted(parameters.keys())]
    
    # setup default adaptation field:   
    if (dataw is None):
        dataw = _CIE_ILLUMINANTS['C'].copy() # get illuminant C
        xyzw = spd_to_xyz(dataw, cieobs = cieobs,relative=False) # get abs. tristimulus values
        if relative == False: #input is expected to be absolute
            dataw[1:] = Lw*dataw[1:]/xyzw[:,1:2] #dataw = Lw*dataw # make absolute
        else:
            dataw = dataw # make relative (Y=100)
        if inputtype == 'xyz':
            dataw = spd_to_xyz(dataw, cieobs = cieobs, relative = relative)

    # precomputations:
    Mxyz2lms = np.dot(np.diag(cLMS),math.normalize_3x3_matrix(Mxyz2lms, np.array([[1, 1, 1]]))) # normalize matrix for xyz-> lms conversion to ill. E weighted with cLMS   
    invMxyz2lms = np.linalg.inv(Mxyz2lms)
    MAab = np.array([clambda,calpha,cbeta])
    invMAab = np.linalg.inv(MAab)
    
    #initialize data and camout:
    data = np2d(data).copy() # stimulus data (can be upto NxMx3 for xyz, or [N x (M+1) x wl] for spd))
    dataw = np2d(dataw).copy() # white point (can be upto Nx3 for xyz, or [(N+1) x wl] for spd)

    # make axis 1 of dataw have 'same' dimensions as data:         
    if (data.ndim == 2): 
        data = np.expand_dims(data, axis = 1)  # add light source axis 1     

    if inputtype == 'xyz': 
        if dataw.shape[0] == 1: #make dataw have same lights source dimension size as data
            dataw = np.repeat(dataw,data.shape[1],axis=0)                
    else:
        if dataw.shape[0] == 2:
            dataw = np.vstack((dataw[0],np.repeat(dataw[1:], data.shape[1], axis = 0)))

    # Flip light source dim to axis 0:
    data = np.transpose(data, axes = (1,0,2))
  
    # Initialize output array:
    dshape = list(data.shape)
    dshape[-1] = 3 # requested number of correlates: l_int, a_int, b_int
    if (inputtype != 'xyz') & (direction == 'forward'):
        dshape[-2] = dshape[-2] - 1 # wavelength row doesn't count & only with forward can the input data be spectral
    camout = np.nan*np.ones(dshape)

    # apply forward/inverse model for each row in data:
    for i in range(data.shape[0]):

        # stage 1: calculate photon rates of stimulus and adapting field, lmst & lmsf:
        if (inputtype != 'xyz'):            
            if relative == True:
                xyzw_abs = spd_to_xyz(np.vstack((dataw[0],dataw[i+1])), cieobs = cieobs, relative = False)
                dataw[i+1] = Lw*dataw[i+1]/xyzw_abs[0,1] # make absolute
            xyzw = spd_to_xyz(np.vstack((dataw[0],dataw[i+1])), cieobs = cieobs, relative = False)
            lmsw = 683.0*np.dot(Mxyz2lms,xyzw.T).T/_CMF[cieobs]['K']
            lmsf = (Yb/100.0)*lmsw # calculate adaptation field and convert to l,m,s
            if (direction == 'forward'):
                if relative == True:
                    data[i,1:,:] = Lw*data[i,1:,:]/xyzw_abs[0,1] # make absolute
                xyzt = spd_to_xyz(data[i], cieobs = cieobs, relative = False)/_CMF[cieobs]['K'] 
                lmst = 683.0*np.dot(Mxyz2lms,xyzt.T).T # convert to l,m,s
            else:
                lmst = lmsf # put lmsf in lmst for inverse-mode

        elif (inputtype == 'xyz'):
            if relative == True: 
                dataw[i] = Lw*dataw[i]/100.0 # make absolute
            lmsw = 683.0* np.dot(Mxyz2lms, dataw[i].T).T /_CMF[cieobs]['K']  # convert to lms
            lmsf = (Yb/100.0)*lmsw
            if (direction == 'forward'):
                if relative == True:
                    data[i] = Lw*data[i]/100.0 # make absolute
                lmst = 683.0* np.dot(Mxyz2lms, data[i].T).T /_CMF[cieobs]['K'] # convert to lms
            else:
                 lmst = lmsf # put lmsf in lmst for inverse-mode

        # stage 2: calculate cone outputs of stimulus lmstp
        lmstp = math.erf(Cc*(np.log(lmst/lms0) + Cf*np.log(lmsf/lms0)))
        lmsfp = math.erf(Cc*(np.log(lmsf/lms0) + Cf*np.log(lmsf/lms0)))
        lmstp = np.vstack((lmsfp,lmstp)) # add adaptation field lms temporarily to lmsp for quick calculation
        
        
        # stage 3: calculate optic nerve signals, lam*, alphp, betp:
        lstar,alph, bet = asplit(np.dot(MAab, lmstp.T).T)

        alphp = cga1[0]*alph
        alphp[alph<0] = cga1[1]*alph[alph<0]
        betp = cgb1[0]*bet
        betp[bet<0] = cgb1[1]*bet[bet<0]
        
        # stage 4: calculate recoded nerve signals, alphapp, betapp:
        alphpp = cga2[0]*(alphp + betp)
        betpp = cgb2[0]*(alphp - betp)

        # stage 5: calculate conscious color perception:
        lstar_int = cl_int[0]*(lstar + cl_int[1])
        alph_int = cab_int[0]*(np.cos(cab_int[1]*np.pi/180.0)*alphpp - np.sin(cab_int[1]*np.pi/180.0)*betpp)
        bet_int = cab_int[0]*(np.sin(cab_int[1]*np.pi/180.0)*alphpp + np.cos(cab_int[1]*np.pi/180.0)*betpp)
        lstar_out = lstar_int
        
        if direction == 'forward':
            if Ccwb is None:
                alph_out = alph_int - cab_out[0]
                bet_out = bet_int -  cab_out[1]
            else:
                Ccwb = Ccwb*np.ones((2))
                Ccwb[Ccwb<0.0] = 0.0
                Ccwb[Ccwb>1.0] = 1.0
                alph_out = alph_int - Ccwb[0]*alph_int[0] # white balance shift using adaptation gray background (Yb=20%), with Ccw: degree of adaptation
                bet_out = bet_int -  Ccwb[1]*bet_int[0]
  
            camout[i] = np.vstack((lstar_out[1:],alph_out[1:],bet_out[1:])).T # stack together and remove adaptation field from vertical stack
        elif direction == 'inverse':
            labf_int = np.hstack((lstar_int[0],alph_int[0],bet_int[0]))
            
            # get lstar_out, alph_out & bet_out for data:
            lstar_out, alph_out, bet_out = asplit(data[i])
        
            # stage 5 inverse: 
            # undo cortical white-balance:
            if Ccwb is None:
                alph_int = alph_out + cab_out[0]
                bet_int = bet_out +  cab_out[1]
            else:
                Ccwb = Ccwb*np.ones((2))
                Ccwb[Ccwb<0.0] = 0.0
                Ccwb[Ccwb>1.0] = 1.0
                alph_int = alph_out + Ccwb[0]*alph_int[0] #  inverse white balance shift using adaptation gray background (Yb=20%), with Ccw: degree of adaptation
                bet_int = bet_out +  Ccwb[1]*bet_int[0]

            lstar_int = lstar_out
            alphpp = (1.0 / cab_int[0]) * (np.cos(-cab_int[1]*np.pi/180.0)*alph_int - np.sin(-cab_int[1]*np.pi/180.0)*bet_int)
            betpp = (1.0 / cab_int[0]) * (np.sin(-cab_int[1]*np.pi/180.0)*alph_int + np.cos(-cab_int[1]*np.pi/180.0)*bet_int)
            lstar_int = lstar_out
            lstar = (lstar_int /cl_int[0]) - cl_int[1] 
            
            # stage 4 inverse:
            alphp = 0.5*(alphpp/cga2[0] + betpp/cgb2[0])  # <-- alphpp = (Cga2.*(alphp+betp));
            betp = 0.5*(alphpp/cga2[0] - betpp/cgb2[0]) # <-- betpp = (Cgb2.*(alphp-betp));

            # stage 3 invers:
            alph = alphp/cga1[0]
            bet = betp/cgb1[0]
            sa = np.sign(cga1[1])
            sb = np.sign(cgb1[1])
            alph[(sa*alphp)<0.0] = alphp[(sa*alphp)<0] / cga1[1] 
            bet[(sb*betp)<0.0] = betp[(sb*betp)<0] / cgb1[1] 
            lab = ajoin((lstar, alph, bet))
            
            # stage 2 inverse:
            lmstp = np.dot(invMAab,lab.T).T 
            lmstp[lmstp<-1.0] = -1.0
            lmstp[lmstp>1.0] = 1.0

            lmstp = math.erfinv(lmstp) / Cc - Cf*np.log(lmsf/lms0)
            lmst = np.exp(lmstp) * lms0
            
            # stage 1 inverse:
            xyzt =  np.dot(invMxyz2lms,lmst.T).T   
            
            if relative == True:
                xyzt = (100.0/Lw) * xyzt
            
            camout[i] = xyzt
    
#    if flipaxis0and1 == True: # loop over shortest dim.
#        camout = np.transpose(camout, axes = (1,0,2))
    
    # Flip light source dim back to axis 1:
    camout = np.transpose(camout, axes = (1,0,2))

    if camout.shape[0] == 1:
        camout = np.squeeze(camout,axis = 0)
        
    return camout