Пример #1
0
def spd_to_fci(spd, use_cielab=True):
    """
    Calculate Feeling of Contrast Index (FCI).
    
    Args:
        :spd:
            | ndarray with spectral power distribution(s) of the test light source(s).
        :use_cielab:
            |  True, optional
            | True: use original formulation of FCI, which adopts a CIECAT94 
            | chromatic adaptation transform followed by a conversion to 
            | CIELAB coordinates before calculating the gamuts.
            | False: use CIECAM02 coordinates and embedded CAT02 transform.
            
    Returns:
        :fci:
            | ndarray with FCI values.
            
    References:
        1. `Hashimoto, K., Yano, T., Shimizu, M., & Nayatani, Y. (2007). 
        New method for specifying color-rendering properties of light sources 
        based on feeling of contrast. 
        Color Research and Application, 32(5), 361–371. 
        <http://dx.doi.org/10.1002/col.20338>`_
    """

    # get xyz:
    xyz, xyzw = spd_to_xyz(spd,
                           cieobs='1931_2',
                           relative=True,
                           rfl=_RFL_FCI,
                           out=2)

    # set condition parameters:
    D = 1
    Yb = 20
    La = Yb * 1000 / np.pi / 100

    if use_cielab:
        # apply ciecat94 chromatic adaptation transform:
        xyzc = cat.apply_ciecat94(
            xyz, xyzw=xyzw, E=1000, Yb=20, D=D, cat94_old=True
        )  # there is apparently an updated version with an alpha incomplete adaptation factor and noise = 0.1; However, FCI doesn't use that version.

        # convert to cielab:
        lab = xyz_to_lab(xyzc, xyzw=_XYZW_D65_REF)
        labd65 = np.repeat(xyz_to_lab(_XYZ_D65_REF, xyzw=_XYZW_D65_REF),
                           lab.shape[1],
                           axis=1)
    else:
        f = lambda xyz, xyzw: cam.xyz_to_jabC_ciecam02(
            xyz, xyzw=xyzw, La=1000 * 20 / np.pi / 100, Yb=20, surround='avg')
        lab = f(xyz, xyzw)
        labd65 = np.repeat(f(_XYZ_D65_REF, _XYZW_D65_REF),
                           lab.shape[1],
                           axis=1)

    fci = 100 * (_polyarea3D(lab) / _polyarea3D(labd65))**1.5

    return fci
Пример #2
0
def _index_combinations(indices):
    """ 
    Get index combinations
    
    Modified from http://stackoverflow.com/a/11144716
    """
    return np.tile(indices, len(indices)), np.repeat(indices, len(indices))
Пример #3
0
def ndset(F):
    """
    Finds the nondominated set of a set of objective points.

    Args:
      :F: 
          | a m x mu ndarray with mu points and m objectives

   Returns:
      :ispar: 
          | a mu-length vector with true in the nondominated points
    """
    mu = F.shape[1] #number of points

    # The idea is to compare each point with the other ones
    f1 = np.transpose(F[...,None], axes = [0, 2, 1]) #puts in the 3D direction
    f1 = np.repeat(f1,mu,axis=1)
    f2 = np.repeat(F[...,None],mu,axis=2)

    # Now, for the ii-th slice, the ii-th individual is compared with all of the
    # others at once. Then, the usual operations of domination are checked
    # Checks where f1 dominates f2
    aux1 = (f1 <= f2).all(axis = 0, keepdims = True)
    aux2 = (f1 < f2).any(axis = 0, keepdims = True)

    auxf1 = np.logical_and(aux1, aux2)
    # Checks where f1 is dominated by f2
    aux1 = (f1 >= f2).all(axis = 0, keepdims = True)
    aux2 = (f1 > f2).any(axis = 0, keepdims = True)
    auxf2 = np.logical_and(aux1, aux2)
    
    # dom will be a 3D matrix (1 x mu x mu) such that, for the ii-th slice, it
    # will contain +1 if fii dominates the current point, -1 if it is dominated 
    # by it, and 0 if they are incomparable
    dom = np.zeros((1, mu, mu), dtype = int)

    dom[auxf1] = 1
    dom[auxf2] = -1
    
    # Finally, the slices with no -1 are nondominated
    ispar = (dom != -1).all(axis = 1)
    ispar = ispar.flatten()
    return ispar
Пример #4
0
def crowdingdistance(F):
    """
    Computes the crowding distance of a nondominated front.
    
    | The crowding distance gives a measure of how close the individuals are
    | with regard to its neighbors. The higher this value, the greater the
    | spacing. This is used to promote better diversity in the population.

    Args:
       :F: 
           | an m x mu ndarray with mu individuals and m objectives

    Returns:
       :cdist: 
           | a m-length column vector
    """
    m, mu = F.shape #gets the size of F
    
    if mu == 2:
       cdist = np.vstack((np.inf, np.inf))
       return cdist

    
    #[Fs, Is] = sort(F,2); #sorts the objectives by individuals
    Is = F.argsort(axis = 1)
    Fs = np.sort(F,axis=1)
    
    # Creates the numerator
    C = Fs[:,2:] - Fs[:,:-2]
    C = np.hstack((np.inf*np.ones((m,1)), C, np.inf*np.ones((m,1)))) #complements with inf in the extremes
    
    # Indexing to permute the C matrix in the right ordering
    Aux = np.arange(m).repeat(mu).reshape(m,mu)   
    ind = np.ravel_multi_index((Aux.flatten(),Is.flatten()),(m, mu)) #converts to lin. indexes # ind = sub2ind([m, mu], Aux(:), Is(:));
    C2 = C.flatten().copy()
    C2[ind] = C2.flatten()
    C = C2.reshape((m, mu))

    # Constructs the denominator
    den = np.repeat((Fs[:,-1] - Fs[:,0])[:,None], mu, axis = 1)
    
    # Calculates the crowding distance
    cdist = (C/den).sum(axis=0)
    cdist = cdist.flatten() #assures a column vector
    return cdist
def getUSCensusAgeDist():
    """
    Get US Census Age Distribution
    """
    t_num = _INDVCMF_DATA['USCensus2010population']

    list_AgeCensus = t_num[0]
    freq_AgeCensus = np.round(
        t_num[1] / 1000
    )  # Reduce # of populations to manageable number, this doesn't change probability

    # Remove age < 10 and 70 < age:
    freq_AgeCensus[:10] = 0
    freq_AgeCensus[71:] = 0

    list_Age = []
    for k in range(len(list_AgeCensus)):
        list_Age = np.hstack(
            (list_Age, np.repeat(list_AgeCensus[k], freq_AgeCensus[k])))

    return list_Age
Пример #6
0
def _massage_input_and_init_output(data,
                                   dataw,
                                   inputtype='xyz',
                                   direction='forward',
                                   n_out=3):
    """
    Redimension input data to ensure most they have the appropriate sizes for easy and efficient looping.
    |
    | 1. Convert data and dataw to atleast_2d ndarrays
    | 2. Make axis 1 of dataw have 'same' dimensions as data
    | 3. Make dataw have same lights source axis size as data
    | 4. Flip light source axis to axis=0 for efficient looping
    | 5. Initialize output array camout to 'same' shape as data but with camout.shape[-1] == n_out
    
    Args:
        :data: 
            | ndarray with input tristimulus values 
            | or spectral data 
            | or input color appearance correlates
            | Can be of shape: (N [, xM], x 3), whereby: 
            | N refers to samples and M refers to light sources.
            | Note that for spectral input shape is (N x (M+1) x wl) 
        :dataw: 
            | None or ndarray, optional
            | Input tristimulus values or spectral data of white point.
            | None defaults to the use of CIE illuminant C.
        :inputtype:
            | 'xyz' or 'spd', optional
            | Specifies the type of input: 
            |     tristimulus values or spectral data for the forward mode.
        :direction:
            | 'forward' or 'inverse', optional
            |   -'forward': xyz -> cam
            |   -'inverse': cam -> xyz 
        :n_out:
            | 3, optional
            | output size of last dimension of camout 
            | (e.g. n_out=3 for j,a,b output or n_out = 5 for J,M,h,a,b output)
            
    Returns:
        :data:
            | ndarray with reshaped data
        :dataw:
            | ndarray with reshaped dataw
        :camout:
            | NaN filled ndarray for output of CAMv (camout.shape[-1] == Nout) 
        :originalshape:
            | original shape of data
            
    Notes:
        For an example on the use, see code _simple_cam() (type: _simple_cam??)
    """
    # Convert data and dataw to atleast_2d ndarrays:
    data = np2d(data).copy(
    )  # stimulus data (can be upto NxMx3 for xyz, or [N x (M+1) x wl] for spd))
    dataw = np2d(dataw).copy(
    )  # white point (can be upto Nx3 for xyz, or [(N+1) x wl] for spd)
    originalshape = data.shape  # to restore output to same shape

    # Make axis 1 of dataw have 'same' dimensions as data:
    if (data.ndim == 2):
        data = np.expand_dims(data, axis=1)  # add light source axis 1

    # Flip light source dim to axis 0:
    data = np.transpose(data, axes=(1, 0, 2))

    dataw = np.expand_dims(
        dataw, axis=1)  # add extra axis to move light source to axis 0

    # Make dataw have same lights source dimension size as data:
    if inputtype == 'xyz':
        if dataw.shape[0] == 1:
            dataw = np.repeat(dataw, data.shape[0], axis=0)
        if (data.shape[0] == 1) & (dataw.shape[0] > 1):
            data = np.repeat(data, dataw.shape[0], axis=0)
    else:
        dataw = np.array([
            np.vstack((dataw[:1, 0, :], dataw[i + 1:i + 2, 0, :]))
            for i in range(dataw.shape[0] - 1)
        ])
        if (data.shape[0] == 1) & (dataw.shape[0] > 1):
            data = np.repeat(data, dataw.shape[0], axis=0)

    # Initialize output array:
    if n_out is not None:
        dshape = list((data).shape)
        dshape[-1] = n_out  # requested number of correlates: e.g. j,a,b
        if (inputtype != 'xyz') & (direction == 'forward'):
            dshape[-2] = dshape[
                -2] - 1  # wavelength row doesn't count & only with forward can the input data be spectral
        camout = np.zeros(dshape)
        camout.fill(np.nan)
    else:
        camout = None
    return data, dataw, camout, originalshape
Пример #7
0
def calibrate(rgbcal, xyzcal, L_type = 'lms', tr_type = 'lut', cieobs = '1931_2', 
              nbit = 8, cspace = 'lab', avg = lambda x: ((x**2).mean()**0.5), ensure_increasing_lut_at_low_rgb = 0.2,
              verbosity = 1, sep=',',header=None): 
    """
    Calculate TR parameters/lut and conversion matrices.
    
    Args:
        :rgbcal:
            | ndarray [Nx3] or string with filename of RGB values 
            | rgcal must contain at least the following type of settings:
            | - pure R,G,B: e.g. for pure R: (R != 0) & (G==0) & (B == 0)
            | - white(s): R = G = B = 2**nbit-1
            | - gray(s): R = G = B
            | - black(s): R = G = B = 0
            | - binary colors: cyan (G = B, R = 0), yellow (G = R, B = 0), magenta (R = B, G = 0)
        :xyzcal:
            | ndarray [Nx3] or string with filename of measured XYZ values for 
            | the RGB settings in rgbcal.
        :L_type:
            | 'lms', optional
            | Type of response to use in the derivation of the Tone-Response curves.
            | options:
            |  - 'lms': use cone fundamental responses: L vs R, M vs G and S vs B 
            |           (reduces noise and generally leads to more accurate characterization) 
            |  - 'Y': use the luminance signal: Y vs R, Y vs G, Y vs B
        :tr_type:
            | 'lut', optional
            | options:
            |  - 'lut': Derive/specify Tone-Response as a look-up-table
            |  - 'gog': Derive/specify Tone-Response as a gain-offset-gamma function
        :cieobs:
            | '1931_2', optional
            | CIE CMF set used to determine the XYZ tristimulus values
            | (needed when L_type == 'lms': determines the conversion matrix to
            | convert xyz to lms values)
        :nbit:
            | 8, optional
            | RGB values in nbit format (e.g. 8, 16, ...)
        :cspace:
            | color space or chromaticity diagram to calculate color differences in
            | when optimizing the xyz_to_rgb and rgb_to_xyz conversion matrices.
        :avg:
            | lambda x: ((x**2).mean()**0.5), optional
            | Function used to average the color differences of the individual RGB settings
            | in the optimization of the xyz_to_rgb and rgb_to_xyz conversion matrices.
        :ensure_increasing_lut_at_low_rgb:
            | 0.2 or float (max = 1.0) or None, optional
            | Ensure an increasing lut by setting all values below the RGB with the maximum
            | zero-crossing of np.diff(lut) and RGB/RGB.max() values of :ensure_increasing_lut_at_low_rgb:
            | (values of 0.2 are a good rule of thumb value)
            | Non-strictly increasing lut values can be caused at low RGB values due
            | to noise and low measurement signal. 
            | If None: don't force lut, but keep as is.
        :verbosity:
            | 1, optional
            | > 0: print and plot optimization results
        :sep:
            | ',', optional
            | separator in files with rgbcal and xyzcal data
        :header:
            | None, optional
            | header specifier for files with rgbcal and xyzcal data 
            | (see pandas.read_csv)
            
    Returns:
        :M:
            | linear rgb to xyz conversion matrix
        :N:
            | xyz to linear rgb conversion matrix
        :tr:
            | Tone Response function parameters or lut
        :xyz_black:
            | ndarray with XYZ tristimulus values of black
        :xyz_white:
            | ndarray with tristimlus values of white
    """
    
    # process rgb, xyzcal inputs:
    rgbcal, xyzcal = _parse_rgbxyz_input(rgbcal, xyz = xyzcal, sep = sep, header=header)
    
    # get black-positions and average black xyz (flare):
    p_blacks = (rgbcal[:,0]==0) & (rgbcal[:,1]==0) & (rgbcal[:,2]==0)
    xyz_black = xyzcal[p_blacks,:].mean(axis=0,keepdims=True)
    
    # Calculate flare corrected xyz:
    xyz_fc = xyzcal - xyz_black
    
    # get positions of pure r, g, b values:
    p_pure = [(rgbcal[:,1]==0) & (rgbcal[:,2]==0), 
              (rgbcal[:,0]==0) & (rgbcal[:,2]==0), 
              (rgbcal[:,0]==0) & (rgbcal[:,1]==0)] 
    
    # set type of L-response to use: Y for R,G,B or L,M,S for R,G,B:
    if L_type == 'Y':
        L = np.array([xyz_fc[:,1] for i in range(3)]).T
    elif L_type == 'lms':
        lms = (math.normalize_3x3_matrix(_CMF[cieobs]['M'].copy()) @ xyz_fc.T).T
        L = np.array([lms[:,i] for i in range(3)]).T
        
    # Get rgb linearizer parameters or lut and apply to all rgb's:
    if tr_type == 'gog':
        par = np.array([sp.optimize.curve_fit(TR, rgbcal[p_pure[i],i], L[p_pure[i],i]/L[p_pure[i],i].max(), p0=[1,0,1])[0] for i in range(3)]) # calculate parameters of each TR
        tr = par
    elif tr_type == 'lut':
        dac = np.arange(2**nbit)
        # lut = np.array([cie_interp(np.vstack((rgbcal[p_pure[i],i],L[p_pure[i],i]/L[p_pure[i],i].max())), dac, kind ='cubic')[1,:] for i in range(3)]).T
        lut = np.array([sp.interpolate.PchipInterpolator(rgbcal[p_pure[i],i],L[p_pure[i],i]/L[p_pure[i],i].max())(dac) for i in range(3)]).T # use this one to avoid potential overshoot with cubic spline interpolation (but slightly worse performance)
        lut[lut<0] = 0
          
        # ensure monotonically increasing lut values for low signal:
        if ensure_increasing_lut_at_low_rgb is not None:
            #ensure_increasing_lut_at_low_rgb = 0.2 # anything below that has a zero-crossing for diff(lut) will be set to zero
            for i in range(3):
                p0 = np.where((np.diff(lut[dac/dac.max() < ensure_increasing_lut_at_low_rgb,i])<=0))[0]
                if p0.any():
                    p0 = range(0,p0[-1])
                    lut[p0,i] = 0
        tr = lut

    
    # plot:
    if verbosity > 0:
        colors = 'rgb'
        linestyles = ['-','--',':']
        rgball = np.repeat(np.arange(2**8)[:,None],3,axis=1)
        Lall = _rgb_linearizer(rgball, tr, tr_type = tr_type)
        plt.figure()
        for i in range(3):
            plt.plot(rgbcal[p_pure[i],i],L[p_pure[i],i]/L[p_pure[i],i].max(),colors[i]+'o')
            plt.plot(rgball[:,i],Lall[:,i],colors[i]+linestyles[i],label=colors[i])
        plt.xlabel('Display RGB')
        plt.ylabel('Linear RGB')
        plt.legend()
        plt.title('Tone response curves')
    
    # linearize all rgb values and clamp to 0
    rgblin = _rgb_linearizer(rgbcal, tr, tr_type = tr_type) 
 
    # get rgblin to xyz_fc matrix:
    M = np.linalg.lstsq(rgblin, xyz_fc, rcond=None)[0].T 
    
    # get xyz_fc to rgblin matrix:
    N = np.linalg.inv(M)
    
    # get better approximation for conversion matrices:
    p_grays = (rgbcal[:,0] == rgbcal[:,1]) & (rgbcal[:,0] == rgbcal[:,2])
    p_whites = (rgbcal[:,0] == (2**nbit-1)) & (rgbcal[:,1] == (2**nbit-1)) & (rgbcal[:,2] == (2**nbit-1))
    xyz_white = xyzcal[p_whites,:].mean(axis=0,keepdims=True) # get xyzw for input into xyz_to_lab() or colortf()
    def optfcn(x, rgbcal, xyzcal, tr, xyz_black, cspace, p_grays, p_whites,out,verbosity):
        M = x.reshape((3,3))
        xyzest = rgb_to_xyz(rgbcal, M, tr, xyz_black, tr_type)
        xyzw = xyzcal[p_whites,:].mean(axis=0) # get xyzw for input into xyz_to_lab() or colortf()
        labcal, labest = colortf(xyzcal,tf=cspace,xyzw=xyzw), colortf(xyzest,tf=cspace,xyzw=xyzw) # calculate lab coord. of cal. and est.
        DEs = ((labcal-labest)**2).sum(axis=1)**0.5
        DEg = DEs[p_grays]
        DEw = DEs[p_whites]
        F = (avg(DEs)**2 + avg(DEg)**2 + avg(DEw**2))**0.5
        if verbosity > 1:
            print('\nPerformance of TR + rgb-to-xyz conversion matrix M:')
            print('all: DE(jab): avg = {:1.4f}, std = {:1.4f}'.format(avg(DEs),np.std(DEs)))
            print('grays: DE(jab): avg = {:1.4f}, std = {:1.4f}'.format(avg(DEg),np.std(DEg)))
            print('whites(s) DE(jab): avg = {:1.4f}, std = {:1.4f}'.format(avg(DEw),np.std(DEw)))
        if out == 'F':
            return F
        else:
            return eval(out)
    x0 = M.ravel()
    res = math.minimizebnd(optfcn, x0, args =(rgbcal, xyzcal, tr, xyz_black, cspace, p_grays, p_whites,'F',0), use_bnd=False)
    xf = res['x_final']
    M = optfcn(xf, rgbcal, xyzcal, tr, xyz_black, cspace, p_grays, p_whites,'M',verbosity)
    N = np.linalg.inv(M)
    return M, N, tr, xyz_black, xyz_white
Пример #8
0
def hue_quadrature(h, unique_hue_data=None):
    """
    Get hue quadrature H from hue h.
    
    Args:
        :h: 
            | float or ndarray [(N,) or (N,1)] with hue data in degrees (!).
        :unique_hue data:
            | None or dict, optional
            |   - None: defaults to:
            |         {'hues': 'red yellow green blue red'.split(), 
            |        'i': np.arange(5.0), 
            |        'hi':[20.14, 90.0, 164.25,237.53,380.14],
            |        'ei':[0.8,0.7,1.0,1.2,0.8],
            |        'Hi':[0.0,100.0,200.0,300.0,400.0]}
            |   - dict: user specified unique hue data  
            |           (same structure as above)
    
    Returns:
        :H: 
            | ndarray of Hue quadrature value(s).
    """

    if unique_hue_data is None:
        unique_hue_data = {
            'hues': 'red yellow green blue red'.split(),
            'i': [0, 1, 2, 3, 4],
            'hi': [20.14, 90.0, 164.25, 237.53, 380.14],
            'ei': [0.8, 0.7, 1.0, 1.2, 0.8],
            'Hi': [0.0, 100.0, 200.0, 300.0, 400.0]
        }

    ndim = np.array(h).ndim

    hi = unique_hue_data['hi']
    Hi = unique_hue_data['Hi']
    ei = unique_hue_data['ei']

    h = np.atleast_2d(h)
    h[h < hi[0]] += 360.0
    if h.shape[0] == 1:
        h = h.T

    H = np.zeros_like(h)
    for j in range(h.shape[1]):
        h_j = h[..., j:j + 1]
        h_hi = np.repeat(h_j, repeats=len(hi), axis=1)
        hi_h = np.repeat(np.atleast_2d(hi), repeats=h.shape[0], axis=0)
        d = (h_hi - hi_h)
        d[d < 0] = 1000.0
        p = d.argmin(axis=1)
        p[p == (len(hi) -
                1)] = 0  # make sure last unique hue data is not selected
        H_j = np.array([
            Hi[pi] + (100.0 * (h_j[i] - hi[pi]) / ei[pi]) /
            ((h_j[i] - hi[pi]) / ei[pi] + (hi[pi + 1] - h_j[i]) / ei[pi + 1])
            for (i, pi) in enumerate(p)
        ])
        H[..., j:j + 1] = H_j

    if ndim == 0:
        return H[0][0]
    elif ndim == 1:
        return H[:, 0]
    else:
        return H
Пример #9
0
def apply(data, n_step = 2, catmode = None, cattype = 'vonkries', xyzw1 = None, xyzw2 = None, xyzw0 = None,\
          D = None, mcat = [_MCAT_DEFAULT], normxyz0 = None, outtype = 'xyz', La = None, F = None, Dtype = None):
    """
    Calculate corresponding colors by applying a von Kries chromatic adaptation
    transform (CAT), i.e. independent rescaling of 'sensor sensitivity' to data
    to adapt from current adaptation conditions (1) to the new conditions (2).
    
    Args:
        :data: 
            | ndarray of tristimulus values (can be NxMx3)
        :n_step:
            | 2, optional
            | Number of step in CAT (1: 1-step, 2: 2-step)
        :catmode: 
            | None, optional
            |    - None: use :n_step: to set mode: 1 = '1>2', 2:'1>0>2'
            |    -'1>0>2': Two-step CAT 
            |      from illuminant 1 to baseline illuminant 0 to illuminant 2.
            |    -'1>2': One-step CAT
            |      from illuminant 1 to illuminant 2.
            |    -'1>0': One-step CAT 
            |      from illuminant 1 to baseline illuminant 0.
            |    -'0>2': One-step CAT 
            |      from baseline illuminant 0 to illuminant 2. 
        :cattype: 
            | 'vonkries' (others: 'rlab', see Farchild 1990), optional
        :xyzw1:
            | None, depending on :catmode: optional (can be Mx3)
        :xyzw2:
            | None, depending on :catmode: optional (can be Mx3)
        :xyzw0:
            | None, depending on :catmode: optional (can be Mx3)
        :D: 
            | None, optional
            | Degrees of adaptation. Defaults to [1.0, 1.0]. 
        :La: 
            | None, optional
            | Adapting luminances. 
            | If None: xyz values are absolute or relative.
            | If not None: xyz are relative. 
        :F: 
            | None, optional
            | Surround parameter(s) for CAT02/CAT16 calculations 
            |  (:Dtype: == 'cat02' or 'cat16')
            | Defaults to [1.0, 1.0]. 
        :Dtype:
            | None, optional
            | Type of degree of adaptation function from literature
            | See luxpy.cat.get_degree_of_adaptation()
        :mcat:
            | [_MCAT_DEFAULT], optional
            | List[str] or List[ndarray] of sensor space matrices for each 
            |  condition pair. If len(:mcat:) == 1, the same matrix is used.
        :normxyz0: 
            | None, optional
            | Set of xyz tristimulus values to normalize the sensor space matrix to.
        :outtype:
            | 'xyz' or 'lms', optional
            |   - 'xyz': return corresponding tristimulus values 
            |   - 'lms': return corresponding sensor space excitation values 
            |            (e.g. for further calculations) 
      
    Returns:
          :returns: 
              | ndarray with corresponding colors
        
    Reference:
        1. `Smet, K. A. G., & Ma, S. (2020). 
        Some concerns regarding the CAT16 chromatic adaptation transform. 
        Color Research & Application, 45(1), 172–177. 
        <https://doi.org/10.1002/col.22457>`_
    """

    if (xyzw1 is None) & (xyzw2 is None):
        return data  # do nothing

    else:
        # Set catmode:
        if catmode is None:
            if n_step == 2:
                catmode = '1>0>2'
            elif n_step == 1:
                catmode = '1>2'
            else:
                raise Exception(
                    'cat.apply(n_step = {:1.0f}, catmode = None): Unknown requested n-step CAT mode !'
                    .format(n_step))

        # Make data 2d:
        data = np2d(data)
        data_original_shape = data.shape
        if data.ndim < 3:
            target_shape = np.hstack((1, data.shape))
            data = data * np.ones(target_shape)
        else:
            target_shape = data.shape

        target_shape = data.shape

        # initialize xyzw0:
        if (xyzw0 is None):  # set to iLL.E
            xyzw0 = np2d([100.0, 100.0, 100.0])
        xyzw0 = np.ones(target_shape) * xyzw0
        La0 = xyzw0[..., 1, None]

        # Determine cat-type (1-step or 2-step) + make input same shape as data for block calculations:
        expansion_axis = np.abs(1 * (len(data_original_shape) == 2) - 1)
        if ((xyzw1 is not None) & (xyzw2 is not None)):
            xyzw1 = xyzw1 * np.ones(target_shape)
            xyzw2 = xyzw2 * np.ones(target_shape)
            default_La12 = [xyzw1[..., 1, None], xyzw2[..., 1, None]]

        elif (xyzw2 is None) & (xyzw1
                                is not None):  # apply one-step CAT: 1-->0
            catmode = '1>0'  #override catmode input
            xyzw1 = xyzw1 * np.ones(target_shape)
            default_La12 = [xyzw1[..., 1, None], La0]

        elif (xyzw1 is None) & (xyzw2 is not None):
            raise Exception(
                "von_kries(): cat transformation '0>2' not supported, use '1>0' !"
            )

        # Get or set La (La == None: xyz are absolute or relative, La != None: xyz are relative):
        target_shape_1 = tuple(np.hstack((target_shape[:-1], 1)))
        La1, La2 = parse_x1x2_parameters(La,
                                         target_shape=target_shape_1,
                                         catmode=catmode,
                                         expand_2d_to_3d=expansion_axis,
                                         default=default_La12)

        # Set degrees of adaptation, D10, D20:  (note D20 is degree of adaptation for 2-->0!!)
        D10, D20 = parse_x1x2_parameters(D,
                                         target_shape=target_shape_1,
                                         catmode=catmode,
                                         expand_2d_to_3d=expansion_axis)

        # Set F surround in case of Dtype == 'cat02':
        F1, F2 = parse_x1x2_parameters(F,
                                       target_shape=target_shape_1,
                                       catmode=catmode,
                                       expand_2d_to_3d=expansion_axis)

        # Make xyz relative to go to relative xyz0:
        if La is None:
            data = 100 * data / La1
            xyzw1 = 100 * xyzw1 / La1
            xyzw0 = 100 * xyzw0 / La0
            if (catmode == '1>0>2') | (catmode == '1>2'):
                xyzw2 = 100 * xyzw2 / La2

        # transform data (xyz) to sensor space (lms) and perform cat:
        xyzc = np.zeros(data.shape)
        xyzc.fill(np.nan)
        mcat = np.array(mcat)
        if (mcat.shape[0] != data.shape[1]) & (mcat.shape[0] == 1):
            mcat = np.repeat(mcat, data.shape[1], axis=0)
        elif (mcat.shape[0] != data.shape[1]) & (mcat.shape[0] > 1):
            raise Exception(
                'von_kries(): mcat.shape[0] > 1 and does not match data.shape[0]!'
            )

        for i in range(xyzc.shape[1]):
            # get cat sensor matrix:
            if mcat[i].dtype == np.float64:
                mcati = mcat[i]
            else:
                mcati = _MCATS[mcat[i]]

            # normalize sensor matrix:
            if normxyz0 is not None:
                mcati = math.normalize_3x3_matrix(mcati, xyz0=normxyz0)

            # convert from xyz to lms:
            lms = np.dot(mcati, data[:, i].T).T
            lmsw0 = np.dot(mcati, xyzw0[:, i].T).T
            if (catmode == '1>0>2') | (catmode == '1>0'):
                lmsw1 = np.dot(mcati, xyzw1[:, i].T).T
                Dpar1 = dict(D=D10[:, i],
                             F=F1[:, i],
                             La=La1[:, i],
                             La0=La0[:, i],
                             order='1>0')
                D10[:, i] = get_degree_of_adaptation(
                    Dtype=Dtype,
                    **Dpar1)  #get degree of adaptation depending on Dtype
                lmsw2 = None  # in case of '1>0'

            if (catmode == '1>0>2'):
                lmsw2 = np.dot(mcati, xyzw2[:, i].T).T
                Dpar2 = dict(D=D20[:, i],
                             F=F2[:, i],
                             La=La2[:, i],
                             La0=La0[:, i],
                             order='0>2')

                D20[:, i] = get_degree_of_adaptation(
                    Dtype=Dtype,
                    **Dpar2)  #get degree of adaptation depending on Dtype

            if (catmode == '1>2'):
                lmsw1 = np.dot(mcati, xyzw1[:, i].T).T
                lmsw2 = np.dot(mcati, xyzw2[:, i].T).T
                Dpar12 = dict(D=D10[:, i],
                              F=F1[:, i],
                              La=La1[:, i],
                              La2=La2[:, i],
                              order='1>2')
                D10[:, i] = get_degree_of_adaptation(
                    Dtype=Dtype,
                    **Dpar12)  #get degree of adaptation depending on Dtype

            # Determine transfer function Dt:
            Dt = get_transfer_function(cattype=cattype,
                                       catmode=catmode,
                                       lmsw1=lmsw1,
                                       lmsw2=lmsw2,
                                       lmsw0=lmsw0,
                                       D10=D10[:, i],
                                       D20=D20[:, i],
                                       La1=La1[:, i],
                                       La2=La2[:, i])

            # Perform cat:
            lms = np.dot(np.diagflat(Dt[0]), lms.T).T

            # Make xyz, lms 'absolute' again:
            if (catmode == '1>0>2'):
                lms = (La2[:, i] / La1[:, i]) * lms
            elif (catmode == '1>0'):
                lms = (La0[:, i] / La1[:, i]) * lms
            elif (catmode == '1>2'):
                lms = (La2[:, i] / La1[:, i]) * lms

            # transform back from sensor space to xyz (or not):
            if outtype == 'xyz':
                xyzci = np.dot(np.linalg.inv(mcati), lms.T).T
                xyzci[np.where(xyzci < 0)] = _EPS
                xyzc[:, i] = xyzci
            else:
                xyzc[:, i] = lms

        # return data to original shape:
        if len(data_original_shape) == 2:
            xyzc = xyzc[0]

        return xyzc
Пример #10
0
def cam_sww16(data, dataw = None, Yb = 20.0, Lw = 400.0, Ccwb = None, relative = True, \
              parameters = None, inputtype = 'xyz', direction = 'forward', \
              cieobs = '2006_10'):
    """
    A simple principled color appearance model based on a mapping 
    of the Munsell color system.
    
    | This function implements the JOSA A (parameters = 'JOSA') published model. 
    
    Args:
        :data: 
            | ndarray with input tristimulus values 
            | or spectral data 
            | or input color appearance correlates
            | Can be of shape: (N [, xM], x 3), whereby: 
            | N refers to samples and M refers to light sources.
            | Note that for spectral input shape is (N x (M+1) x wl) 
        :dataw: 
            | None or ndarray, optional
            | Input tristimulus values or spectral data of white point.
            | None defaults to the use of CIE illuminant C.
        :Yb: 
            | 20.0, optional
            | Luminance factor of background (perfect white diffuser, Yw = 100)
        :Lw:
            | 400.0, optional
            | Luminance (cd/m²) of white point.
        :Ccwb:
            | None,  optional
            | Degree of cognitive adaptation (white point balancing)
            | If None: use [..,..] from parameters dict.
        :relative:
            | True or False, optional
            | True: xyz tristimulus values are relative (Yw = 100)
        :parameters:
            | None or str or dict, optional
            | Dict with model parameters.
            |    - None: defaults to luxpy.cam._CAM_SWW_2016_PARAMETERS['JOSA']
            |    - str: 'best-fit-JOSA' or 'best-fit-all-Munsell'
            |    - dict: user defined model parameters 
            |            (dict should have same structure)
        :inputtype:
            | 'xyz' or 'spd', optional
            | Specifies the type of input: 
            |     tristimulus values or spectral data for the forward mode.
        :direction:
            | 'forward' or 'inverse', optional
            |   -'forward': xyz -> cam_sww_2016
            |   -'inverse': cam_sww_2016 -> xyz 
        :cieobs:
            | '2006_10', optional
            | CMF set to use to perform calculations where spectral data 
              is involved (inputtype == 'spd'; dataw = None)
            | Other options: see luxpy._CMF['types']
    
    Returns:
        :returns: 
            | ndarray with color appearance correlates (:direction: == 'forward')
            |  or 
            | XYZ tristimulus values (:direction: == 'inverse')
    
    Notes:
        | This function implements the JOSA A (parameters = 'JOSA') 
          published model. 
        | With:
        |    1. A correction for the parameter 
        |         in Eq.4 of Fig. 11: 0.952 --> -0.952 
        |         
        |     2. The delta_ac and delta_bc white-balance shifts in Eq. 5e & 5f 
        |         should be: -0.028 & 0.821 
        |  
        |     (cfr. Ccwb = 0.66 in: 
        |         ab_test_out = ab_test_int - Ccwb*ab_gray_adaptation_field_int))
             
    References:
        1. `Smet, K. A. G., Webster, M. A., & Whitehead, L. A. (2016). 
        A simple principled approach for modeling and understanding uniform color metrics. 
        Journal of the Optical Society of America A, 33(3), A319–A331. 
        <https://doi.org/10.1364/JOSAA.33.00A319>`_

    """

    # get model parameters
    args = locals().copy()
    if parameters is None:
        parameters = _CAM_SWW16_PARAMETERS['JOSA']
    if isinstance(parameters, str):
        parameters = _CAM_SWW16_PARAMETERS[parameters]
    parameters = put_args_in_db(
        parameters,
        args)  #overwrite parameters with other (not-None) args input

    #unpack model parameters:
    Cc, Ccwb, Cf, Mxyz2lms, cLMS, cab_int, cab_out, calpha, cbeta, cga1, cga2, cgb1, cgb2, cl_int, clambda, lms0 = [
        parameters[x] for x in sorted(parameters.keys())
    ]

    # setup default adaptation field:
    if (dataw is None):
        dataw = _CIE_ILLUMINANTS['C'].copy()  # get illuminant C
        xyzw = spd_to_xyz(dataw, cieobs=cieobs,
                          relative=False)  # get abs. tristimulus values
        if relative == False:  #input is expected to be absolute
            dataw[1:] = Lw * dataw[
                1:] / xyzw[:, 1:2]  #dataw = Lw*dataw # make absolute
        else:
            dataw = dataw  # make relative (Y=100)
        if inputtype == 'xyz':
            dataw = spd_to_xyz(dataw, cieobs=cieobs, relative=relative)

    # precomputations:
    Mxyz2lms = np.dot(
        np.diag(cLMS),
        math.normalize_3x3_matrix(Mxyz2lms, np.array([[1, 1, 1]]))
    )  # normalize matrix for xyz-> lms conversion to ill. E weighted with cLMS
    invMxyz2lms = np.linalg.inv(Mxyz2lms)
    MAab = np.array([clambda, calpha, cbeta])
    invMAab = np.linalg.inv(MAab)

    #initialize data and camout:
    data = np2d(data).copy(
    )  # stimulus data (can be upto NxMx3 for xyz, or [N x (M+1) x wl] for spd))
    dataw = np2d(dataw).copy(
    )  # white point (can be upto Nx3 for xyz, or [(N+1) x wl] for spd)

    # make axis 1 of dataw have 'same' dimensions as data:
    if (data.ndim == 2):
        data = np.expand_dims(data, axis=1)  # add light source axis 1

    if inputtype == 'xyz':
        if dataw.shape[
                0] == 1:  #make dataw have same lights source dimension size as data
            dataw = np.repeat(dataw, data.shape[1], axis=0)
    else:
        if dataw.shape[0] == 2:
            dataw = np.vstack(
                (dataw[0], np.repeat(dataw[1:], data.shape[1], axis=0)))

    # Flip light source dim to axis 0:
    data = np.transpose(data, axes=(1, 0, 2))

    # Initialize output array:
    dshape = list(data.shape)
    dshape[-1] = 3  # requested number of correlates: l_int, a_int, b_int
    if (inputtype != 'xyz') & (direction == 'forward'):
        dshape[-2] = dshape[
            -2] - 1  # wavelength row doesn't count & only with forward can the input data be spectral
    camout = np.zeros(dshape)
    camout.fill(np.nan)

    # apply forward/inverse model for each row in data:
    for i in range(data.shape[0]):

        # stage 1: calculate photon rates of stimulus and adapting field, lmst & lmsf:
        if (inputtype != 'xyz'):
            if relative == True:
                xyzw_abs = spd_to_xyz(np.vstack((dataw[0], dataw[i + 1])),
                                      cieobs=cieobs,
                                      relative=False)
                dataw[i +
                      1] = Lw * dataw[i + 1] / xyzw_abs[0, 1]  # make absolute
            xyzw = spd_to_xyz(np.vstack((dataw[0], dataw[i + 1])),
                              cieobs=cieobs,
                              relative=False)
            lmsw = 683.0 * np.dot(Mxyz2lms, xyzw.T).T / _CMF[cieobs]['K']
            lmsf = (Yb / 100.0
                    ) * lmsw  # calculate adaptation field and convert to l,m,s
            if (direction == 'forward'):
                if relative == True:
                    data[i, 1:, :] = Lw * data[i, 1:, :] / xyzw_abs[
                        0, 1]  # make absolute
                xyzt = spd_to_xyz(data[i], cieobs=cieobs,
                                  relative=False) / _CMF[cieobs]['K']
                lmst = 683.0 * np.dot(Mxyz2lms, xyzt.T).T  # convert to l,m,s
            else:
                lmst = lmsf  # put lmsf in lmst for inverse-mode

        elif (inputtype == 'xyz'):
            if relative == True:
                dataw[i] = Lw * dataw[i] / 100.0  # make absolute
            lmsw = 683.0 * np.dot(
                Mxyz2lms, dataw[i].T).T / _CMF[cieobs]['K']  # convert to lms
            lmsf = (Yb / 100.0) * lmsw
            if (direction == 'forward'):
                if relative == True:
                    data[i] = Lw * data[i] / 100.0  # make absolute
                lmst = 683.0 * np.dot(
                    Mxyz2lms,
                    data[i].T).T / _CMF[cieobs]['K']  # convert to lms
            else:
                lmst = lmsf  # put lmsf in lmst for inverse-mode

        # stage 2: calculate cone outputs of stimulus lmstp
        lmstp = math.erf(Cc * (np.log(lmst / lms0) + Cf * np.log(lmsf / lms0)))
        lmsfp = math.erf(Cc * (np.log(lmsf / lms0) + Cf * np.log(lmsf / lms0)))
        lmstp = np.vstack(
            (lmsfp, lmstp)
        )  # add adaptation field lms temporarily to lmsp for quick calculation

        # stage 3: calculate optic nerve signals, lam*, alphp, betp:
        lstar, alph, bet = asplit(np.dot(MAab, lmstp.T).T)

        alphp = cga1[0] * alph
        alphp[alph < 0] = cga1[1] * alph[alph < 0]
        betp = cgb1[0] * bet
        betp[bet < 0] = cgb1[1] * bet[bet < 0]

        # stage 4: calculate recoded nerve signals, alphapp, betapp:
        alphpp = cga2[0] * (alphp + betp)
        betpp = cgb2[0] * (alphp - betp)

        # stage 5: calculate conscious color perception:
        lstar_int = cl_int[0] * (lstar + cl_int[1])
        alph_int = cab_int[0] * (np.cos(cab_int[1] * np.pi / 180.0) * alphpp -
                                 np.sin(cab_int[1] * np.pi / 180.0) * betpp)
        bet_int = cab_int[0] * (np.sin(cab_int[1] * np.pi / 180.0) * alphpp +
                                np.cos(cab_int[1] * np.pi / 180.0) * betpp)
        lstar_out = lstar_int

        if direction == 'forward':
            if Ccwb is None:
                alph_out = alph_int - cab_out[0]
                bet_out = bet_int - cab_out[1]
            else:
                Ccwb = Ccwb * np.ones((2))
                Ccwb[Ccwb < 0.0] = 0.0
                Ccwb[Ccwb > 1.0] = 1.0
                alph_out = alph_int - Ccwb[0] * alph_int[
                    0]  # white balance shift using adaptation gray background (Yb=20%), with Ccw: degree of adaptation
                bet_out = bet_int - Ccwb[1] * bet_int[0]

            camout[i] = np.vstack(
                (lstar_out[1:], alph_out[1:], bet_out[1:])
            ).T  # stack together and remove adaptation field from vertical stack
        elif direction == 'inverse':
            labf_int = np.hstack((lstar_int[0], alph_int[0], bet_int[0]))

            # get lstar_out, alph_out & bet_out for data:
            lstar_out, alph_out, bet_out = asplit(data[i])

            # stage 5 inverse:
            # undo cortical white-balance:
            if Ccwb is None:
                alph_int = alph_out + cab_out[0]
                bet_int = bet_out + cab_out[1]
            else:
                Ccwb = Ccwb * np.ones((2))
                Ccwb[Ccwb < 0.0] = 0.0
                Ccwb[Ccwb > 1.0] = 1.0
                alph_int = alph_out + Ccwb[0] * alph_int[
                    0]  #  inverse white balance shift using adaptation gray background (Yb=20%), with Ccw: degree of adaptation
                bet_int = bet_out + Ccwb[1] * bet_int[0]

            lstar_int = lstar_out
            alphpp = (1.0 / cab_int[0]) * (
                np.cos(-cab_int[1] * np.pi / 180.0) * alph_int -
                np.sin(-cab_int[1] * np.pi / 180.0) * bet_int)
            betpp = (1.0 / cab_int[0]) * (
                np.sin(-cab_int[1] * np.pi / 180.0) * alph_int +
                np.cos(-cab_int[1] * np.pi / 180.0) * bet_int)
            lstar_int = lstar_out
            lstar = (lstar_int / cl_int[0]) - cl_int[1]

            # stage 4 inverse:
            alphp = 0.5 * (alphpp / cga2[0] + betpp / cgb2[0]
                           )  # <-- alphpp = (Cga2.*(alphp+betp));
            betp = 0.5 * (alphpp / cga2[0] - betpp / cgb2[0]
                          )  # <-- betpp = (Cgb2.*(alphp-betp));

            # stage 3 invers:
            alph = alphp / cga1[0]
            bet = betp / cgb1[0]
            sa = np.sign(cga1[1])
            sb = np.sign(cgb1[1])
            alph[(sa * alphp) < 0.0] = alphp[(sa * alphp) < 0] / cga1[1]
            bet[(sb * betp) < 0.0] = betp[(sb * betp) < 0] / cgb1[1]
            lab = ajoin((lstar, alph, bet))

            # stage 2 inverse:
            lmstp = np.dot(invMAab, lab.T).T
            lmstp[lmstp < -1.0] = -1.0
            lmstp[lmstp > 1.0] = 1.0

            lmstp = math.erfinv(lmstp) / Cc - Cf * np.log(lmsf / lms0)
            lmst = np.exp(lmstp) * lms0

            # stage 1 inverse:
            xyzt = np.dot(invMxyz2lms, lmst.T).T

            if relative == True:
                xyzt = (100.0 / Lw) * xyzt

            camout[i] = xyzt

#    if flipaxis0and1 == True: # loop over shortest dim.
#        camout = np.transpose(camout, axes = (1,0,2))

# Flip light source dim back to axis 1:
    camout = np.transpose(camout, axes=(1, 0, 2))

    if camout.shape[0] == 1:
        camout = np.squeeze(camout, axis=0)

    return camout
Пример #11
0
def cam18sl(data,
            datab=None,
            Lb=[100],
            fov=10.0,
            inputtype='xyz',
            direction='forward',
            outin='Q,aS,bS',
            parameters=None):
    """
    Convert between CIE 2006 10°  XYZ tristimulus values (or spectral data) 
    and CAM18sl color appearance correlates.
    
    Args:
        :data: 
            | ndarray of CIE 2006 10°  absolute XYZ tristimulus values or spectral data
            |  or color appearance attributes of stimulus
        :datab: 
            | ndarray of CIE 2006 10°  absolute XYZ tristimulus values or spectral data
            |  of stimulus background
        :Lb: 
            | [100], optional
            | Luminance (cd/m²) value(s) of background(s) calculated using the CIE 2006 10° CMFs 
            | (only used in case datab == None and the background is assumed to be an Equal-Energy-White)
        :fov: 
            | 10.0, optional
            | Field-of-view of stimulus (for size effect on brightness)
        :inputtpe:
            | 'xyz' or 'spd', optional
            | Specifies the type of input: 
            |     tristimulus values or spectral data for the forward mode.
        :direction:
            | 'forward' or 'inverse', optional
            |   -'forward': xyz -> cam18sl
            |   -'inverse': cam18sl -> xyz 
        :outin:
            | 'Q,aS,bS' or str, optional
            | 'Q,aS,bS' (brightness and opponent signals for saturation)
            |  other options: 'Q,aM,bM' (colorfulness) 
            |                 (Note that 'Q,aW,bW' would lead to a Cartesian 
            |                  a,b-coordinate system centered at (1,0))
            | Str specifying the type of 
            |     input (:direction: == 'inverse') and 
            |     output (:direction: == 'forward')
        :parameters:
            | None or dict, optional
            | Set of model parameters.
            |   - None: defaults to luxpy.cam._CAM18SL_PARAMETERS 
            |    (see references below)
    
    Returns:
        :returns: 
            | ndarray with color appearance correlates (:direction: == 'forward')
            |  or 
            | XYZ tristimulus values (:direction: == 'inverse')
            
    Notes:
        | * Instead of using the CIE 1964 10° CMFs in some places of the model,
        |   the CIE 2006 10° CMFs are used througout, making it more self_consistent.
        |   This has an effect on the k scaling factors (now different those in CAM15u) 
        |   and the illuminant E normalization for use in the chromatic adaptation transform.
        |   (see future erratum to Hermans et al., 2018)
        | * The paper also used an equation for the amount of white W, which is
        |   based on a Q value not expressed in 'bright' ('cA' = 0.937 instead of 123). 
        |   This has been corrected for in the luxpy version of the model, i.e.
        |   _CAM18SL_PARAMETERS['cW'][0] has been changed from 2.29 to 1/11672.
        |   (see future erratum to Hermans et al., 2018)
        | * Default output was 'Q,aW,bW' prior to March 2020, but since this
        |   is an a,b Cartesian system centered on (1,0), the default output
        |   has been changed to 'Q,aS,bS'.

    References: 
        1. `Hermans, S., Smet, K. A. G., & Hanselaer, P. (2018). 
        "Color appearance model for self-luminous stimuli."
        Journal of the Optical Society of America A, 35(12), 2000–2009. 
        <https://doi.org/10.1364/JOSAA.35.002000>`_ 
     """

    if parameters is None:
        parameters = _CAM18SL_PARAMETERS

    outin = outin.split(',')

    #unpack model parameters:
    cA, cAlms, cHK, cM, cW, ca, calms, cb, cblms, cfov, cieobs, k, naka, unique_hue_data = [
        parameters[x] for x in sorted(parameters.keys())
    ]

    # precomputations:
    Mlms2xyz = np.linalg.inv(_CMF[cieobs]['M'])
    MAab = np.array([cAlms, calms, cblms])
    invMAab = np.linalg.inv(MAab)

    #-------------------------------------------------
    # setup EEW reference field and default background field (Lr should be equal to Lb):
    # Get Lb values:
    if datab is not None:
        if inputtype != 'xyz':
            Lb = spd_to_xyz(datab, cieobs=cieobs, relative=False)[..., 1:2]
        else:
            Lb = datab[..., 1:2]
    else:
        if isinstance(Lb, list):
            Lb = np2dT(Lb)

    # Setup EEW ref of same luminance as datab:
    if inputtype == 'xyz':
        wlr = getwlr(_CAM18SL_WL3)
    else:
        if datab is None:
            wlr = data[0]  # use wlr of stimulus data
        else:
            wlr = datab[0]  # use wlr of background data
    datar = np.vstack((wlr, np.ones(
        (Lb.shape[0], wlr.shape[0]))))  # create eew
    xyzr = spd_to_xyz(datar, cieobs=cieobs,
                      relative=False)  # get abs. tristimulus values
    datar[1:] = datar[1:] / xyzr[..., 1:2] * Lb

    # Create datab if None:
    if (datab is None):
        if inputtype != 'xyz':
            datab = datar.copy()
        else:
            datab = spd_to_xyz(datar, cieobs=cieobs, relative=False)

    # prepare data and datab for loop over backgrounds:
    # make axis 1 of datab have 'same' dimensions as data:
    if (data.ndim == 2):
        data = np.expand_dims(data, axis=1)  # add light source axis 1

    if inputtype == 'xyz':
        datar = spd_to_xyz(datar, cieobs=cieobs,
                           relative=False)  # convert to xyz!!
        if datab.shape[
                0] == 1:  #make datab and datar have same lights source dimension (used to store different backgrounds) size as data
            datab = np.repeat(datab, data.shape[1], axis=0)
            datar = np.repeat(datar, data.shape[1], axis=0)
    else:
        if datab.shape[0] == 2:
            datab = np.vstack(
                (datab[0], np.repeat(datab[1:], data.shape[1], axis=0)))
        if datar.shape[0] == 2:
            datar = np.vstack(
                (datar[0], np.repeat(datar[1:], data.shape[1], axis=0)))

    # Flip light source/ background dim to axis 0:
    data = np.transpose(data, axes=(1, 0, 2))

    #-------------------------------------------------

    #initialize camout:
    dshape = list(data.shape)
    dshape[-1] = len(outin)  # requested number of correlates
    if (inputtype != 'xyz') & (direction == 'forward'):
        dshape[-2] = dshape[
            -2] - 1  # wavelength row doesn't count & only with forward can the input data be spectral
    camout = np.zeros(dshape)
    camout.fill(np.nan)

    for i in range(data.shape[0]):

        # get rho, gamma, beta of background and reference white:
        if (inputtype != 'xyz'):
            xyzb = spd_to_xyz(np.vstack((datab[0], datab[i + 1:i + 2, :])),
                              cieobs=cieobs,
                              relative=False)
            xyzr = spd_to_xyz(np.vstack((datar[0], datar[i + 1:i + 2, :])),
                              cieobs=cieobs,
                              relative=False)
        else:
            xyzb = datab[i:i + 1, :]
            xyzr = datar[i:i + 1, :]

        lmsb = np.dot(_CMF[cieobs]['M'], xyzb.T).T  # convert to l,m,s
        rgbb = (lmsb / _CMF[cieobs]['K']) * k  # convert to rho, gamma, beta
        #lmsr = np.dot(_CMF[cieobs]['M'],xyzr.T).T # convert to l,m,s
        #rgbr = (lmsr / _CMF[cieobs]['K']) * k # convert to rho, gamma, beta
        #rgbr = rgbr/rgbr[...,1:2]*Lb[i] # calculated EEW cone excitations at same luminance values as background
        rgbr = np.ones(xyzr.shape) * Lb[
            i]  # explicitely equal EEW cone excitations at same luminance values as background

        if direction == 'forward':
            # get rho, gamma, beta of stimulus:
            if (inputtype != 'xyz'):
                xyz = spd_to_xyz(data[i], cieobs=cieobs, relative=False)
            elif (inputtype == 'xyz'):
                xyz = data[i]
            lms = np.dot(_CMF[cieobs]['M'], xyz.T).T  # convert to l,m,s
            rgb = (lms / _CMF[cieobs]['K']) * k  # convert to rho, gamma, beta

            # apply von-kries cat with D = 1:
            if (rgbb == 0).any():
                Mcat = np.eye(3)
            else:
                Mcat = np.diag((rgbr / rgbb)[0])
            rgba = np.dot(Mcat, rgb.T).T

            # apply naka-rushton compression:
            rgbc = naka_rushton(rgba,
                                n=naka['n'],
                                sig=naka['sig'](rgbr.mean()),
                                noise=naka['noise'],
                                scaling=naka['scaling'])

            #rgbc = np.ones(rgbc.shape)*rgbc.mean() # test if eew ends up at origin

            # calculate achromatic and color difference signals, A, a, b:
            Aab = np.dot(MAab, rgbc.T).T
            A, a, b = asplit(Aab)
            a = ca * a
            b = cb * b

            # calculate colorfullness like signal M:
            M = cM * ((a**2.0 + b**2.0)**0.5)

            # calculate brightness Q:
            Q = cA * (
                A + cHK[0] * M**cHK[1]
            )  # last term is contribution of Helmholtz-Kohlrausch effect on brightness

            # calculate saturation, s:
            s = M / Q
            S = s  # make extra variable, jsut in case 'S' is called

            # calculate amount of white, W:
            W = 1 / (1.0 + cW[0] * (s**cW[1]))

            #  adjust Q for size (fov) of stimulus (matter of debate whether to do this before or after calculation of s or W, there was no data on s, M or W for different sized stimuli: after)
            Q = Q * (fov / 10.0)**cfov

            # calculate hue, h and Hue quadrature, H:
            h = hue_angle(a, b, htype='deg')
            if 'H' in outin:
                H = hue_quadrature(h, unique_hue_data=unique_hue_data)
            else:
                H = None

            # calculate cart. co.:
            if 'aM' in outin:
                aM = M * np.cos(h * np.pi / 180.0)
                bM = M * np.sin(h * np.pi / 180.0)

            if 'aS' in outin:
                aS = s * np.cos(h * np.pi / 180.0)
                bS = s * np.sin(h * np.pi / 180.0)

            if 'aW' in outin:
                aW = W * np.cos(h * np.pi / 180.0)
                bW = W * np.sin(h * np.pi / 180.0)

            if (outin != ['Q', 'as', 'bs']):
                camout[i] = eval('ajoin((' + ','.join(outin) + '))')
            else:
                camout[i] = ajoin((Q, aS, bS))

        elif direction == 'inverse':

            # get Q, M and a, b depending on input type:
            if 'aW' in outin:
                Q, a, b = asplit(data[i])
                Q = Q / (
                    (fov / 10.0)**cfov
                )  #adjust Q for size (fov) of stimulus back to that 10° ref
                W = (a**2.0 + b**2.0)**0.5
                s = (((1.0 / W) - 1.0) / cW[0])**(1.0 / cW[1])
                M = s * Q

            if 'aM' in outin:
                Q, a, b = asplit(data[i])
                Q = Q / (
                    (fov / 10.0)**cfov
                )  #adjust Q for size (fov) of stimulus back to that 10° ref
                M = (a**2.0 + b**2.0)**0.5

            if 'aS' in outin:
                Q, a, b = asplit(data[i])
                Q = Q / (
                    (fov / 10.0)**cfov
                )  #adjust Q for size (fov) of stimulus back to that 10° ref
                s = (a**2.0 + b**2.0)**0.5
                M = s * Q

            if 'h' in outin:
                Q, WsM, h = asplit(data[i])
                Q = Q / (
                    (fov / 10.0)**cfov
                )  #adjust Q for size (fov) of stimulus back to that 10° ref
                if 'W' in outin:
                    s = (((1.0 / WsM) - 1.0) / cW[0])**(1.0 / cW[1])
                    M = s * Q
                elif 's' in outin:
                    M = WsM * Q
                elif 'M' in outin:
                    M = WsM

            # calculate achromatic signal, A from Q and M:
            A = Q / cA - cHK[0] * M**cHK[1]

            # calculate hue angle:
            h = hue_angle(a, b, htype='rad')

            # calculate a,b from M and h:
            a = (M / cM) * np.cos(h)
            b = (M / cM) * np.sin(h)

            a = a / ca
            b = b / cb

            # create Aab:
            Aab = ajoin((A, a, b))

            # calculate rgbc:
            rgbc = np.dot(invMAab, Aab.T).T

            # decompress rgbc to (adapted) rgba :
            rgba = naka_rushton(rgbc,
                                n=naka['n'],
                                sig=naka['sig'](rgbr.mean()),
                                noise=naka['noise'],
                                scaling=naka['scaling'],
                                direction='inverse')

            # apply inverse von-kries cat with D = 1:
            rgb = np.dot(np.diag((rgbb / rgbr)[0]), rgba.T).T

            # convert rgb to lms to xyz:
            lms = rgb / k * _CMF[cieobs]['K']
            xyz = np.dot(Mlms2xyz, lms.T).T

            camout[i] = xyz

    camout = np.transpose(camout, axes=(1, 0, 2))

    if camout.shape[1] == 1:
        camout = np.squeeze(camout, axis=1)

    return camout