Beispiel #1
0
def meshblock(x, y):
    """
    Create a meshed block from x and y.
    
    | (Similar to meshgrid, but axis = 0 is retained).
    | To enable fast blockwise calculation.
    
    Args: 
        :x: 
            | ndarray with ndim == 2
        :y: 
            | ndarray with ndim == 2
        
    Returns:
        :X,Y: 
            | 2 ndarrays with ndim == 3 
            |   X.shape = (x.shape[0],y.shape[0],x.shape[1])
            |   Y.shape = (x.shape[0],y.shape[0],y.shape[1])
    """
    Y = np.transpose(np.repeat(y, x.shape[0], axis=0).reshape(
        (y.shape[0], x.shape[0], y.shape[1])),
                     axes=[1, 0, 2])
    X = np.repeat(x, y.shape[0], axis=0).reshape(
        (x.shape[0], y.shape[0], x.shape[1]))
    return X, Y
Beispiel #2
0
def broadcast_shape(data,target_shape = None, expand_2d_to_3d = None, axis0_repeats = None, axis1_repeats = None):
    """
    Broadcasts shapes of data to a target_shape.
    
    | Useful for block/vector calc. when numpy fails to broadcast correctly.
    
    Args:
        :data: 
            | ndarray 
        :target_shape: 
            | None or tuple with requested shape, optional
            |   - None: returns unchanged :data:
        :expand_2d_to_3d:
            | None (do nothing) or ..., optional 
            | If ndim == 2, expand from 2 to 3 dimensions
        :axis0_repeats:
            | None or number of times to repeat axis=0, optional
            |   - None: keep axis=0 same size
        :axis1_repeats:
            | None or number of times to repeat axis=1, optional
            |   - None: keep axis=1 same size

    Returns:
        :returns: 
            | reshaped ndarray
    """
    data = np2d(data)
    
    # expand shape along axis (useful for some functions that allow block-calculations when the data is only 2d )
    if (expand_2d_to_3d is not None) & (len(data.shape) == 2):
        data = np.expand_dims(data, axis = expand_2d_to_3d)
     
    if target_shape is not None:
        dshape = data.shape
        if dshape != target_shape:
            axis_of_v3 = len(target_shape)-1
            if (dshape[0] != target_shape[0]): # repeat along axis 0
                if axis0_repeats is None:
                    axis0_repeats = (target_shape[0]-dshape[0] + 1)
                data = np.repeat(data,axis0_repeats,axis = 0)
            if (len(target_shape)>2) & (len(data.shape)==2): # repeat along axis 1, create axis if necessary
                data = np.expand_dims(data,axis = axis_of_v3-1) # axis creation
                dshape = data.shape
                if (dshape[1] != target_shape[1]):
                    if axis1_repeats is None:
                        axis1_repeats = (target_shape[1]-dshape[1] + 1) # repititon
                    data = np.repeat(data,axis1_repeats,axis = 1)

        for i in range(2):
            if (data.shape[i] > 1) & (data.shape[i] != target_shape[i]):
                raise Exception('broadcast_shape(): Cannot match dim of data with target: data.shape[i]>1  & ...  != target.shape[i]')
    return data
Beispiel #3
0
def ndset(F):
    """
    Finds the nondominated set of a set of objective points.

    Args:
      F: 
          | a m x mu ndarray with mu points and m objectives

   Returns:
      :ispar: 
          | a mu-length vector with true in the nondominated points
    """
    mu = F.shape[1]  #number of points

    # The idea is to compare each point with the other ones
    f1 = np.transpose(F[..., None], axes=[0, 2, 1])  #puts in the 3D direction
    f1 = np.repeat(f1, mu, axis=1)
    f2 = np.repeat(F[..., None], mu, axis=2)

    # Now, for the ii-th slice, the ii-th individual is compared with all of the
    # others at once. Then, the usual operations of domination are checked
    # Checks where f1 dominates f2
    aux1 = (f1 <= f2).all(axis=0, keepdims=True)
    aux2 = (f1 < f2).any(axis=0, keepdims=True)

    auxf1 = np.logical_and(aux1, aux2)
    # Checks where f1 is dominated by f2
    aux1 = (f1 >= f2).all(axis=0, keepdims=True)
    aux2 = (f1 > f2).any(axis=0, keepdims=True)
    auxf2 = np.logical_and(aux1, aux2)

    # dom will be a 3D matrix (1 x mu x mu) such that, for the ii-th slice, it
    # will contain +1 if fii dominates the current point, -1 if it is dominated
    # by it, and 0 if they are incomparable
    dom = np.zeros((1, mu, mu), dtype=int)

    dom[auxf1] = 1
    dom[auxf2] = -1

    # Finally, the slices with no -1 are nondominated
    ispar = (dom != -1).all(axis=1)
    ispar = ispar.flatten()
    return ispar
Beispiel #4
0
def crowdingdistance(F):
    """
    Computes the crowding distance of a nondominated front.
    The crowding distance gives a measure of how close the individuals are
    with regard to its neighbors. The higher this value, the greater the
    spacing. This is used to promote better diversity in the population.

    Args:
       F: 
           | an m x mu ndarray with mu individuals and m objectives

    Returns:
       cdist: 
           | a m-length column vector
    """
    m, mu = F.shape  #gets the size of F

    if mu == 2:
        cdist = np.vstack((np.inf, np.inf))
        return cdist

    #[Fs, Is] = sort(F,2); #sorts the objectives by individuals
    Is = F.argsort(axis=1)
    Fs = np.sort(F, axis=1)

    # Creates the numerator
    C = Fs[:, 2:] - Fs[:, :-2]
    C = np.hstack((np.inf * np.ones((m, 1)), C, np.inf * np.ones(
        (m, 1))))  #complements with inf in the extremes

    # Indexing to permute the C matrix in the right ordering
    Aux = np.arange(m).repeat(mu).reshape(m, mu)
    ind = np.ravel_multi_index(
        (Aux.flatten(), Is.flatten()),
        (m, mu
         ))  #converts to lin. indexes # ind = sub2ind([m, mu], Aux(:), Is(:));
    C2 = C.flatten().copy()
    C2[ind] = C2.flatten()
    C = C2.reshape((m, mu))

    # Constructs the denominator
    den = np.repeat((Fs[:, -1] - Fs[:, 0])[:, None], mu, axis=1)

    # Calculates the crowding distance
    cdist = (C / den).sum(axis=0)
    cdist = cdist.flatten()  #assures a column vector
    return cdist
def getUSCensusAgeDist():
    """
    Get US Census Age Distribution
    """
    t_num = _INDVCMF_DATA['USCensus2010population']

    list_AgeCensus = t_num[0]
    freq_AgeCensus = np.round(
        t_num[1] / 1000
    )  # Reduce # of populations to manageable number, this doesn't change probability

    # Remove age < 10 and 70 < age:
    freq_AgeCensus[:10] = 0
    freq_AgeCensus[71:] = 0

    list_Age = []
    for k in range(len(list_AgeCensus)):
        list_Age = np.hstack(
            (list_Age, np.repeat(list_AgeCensus[k], freq_AgeCensus[k])))

    return list_Age
Beispiel #6
0
def cam18sl(data, datab = None, Lb = [100], fov = 10.0, inputtype = 'xyz', direction = 'forward', outin = 'Q,aW,bW', parameters = None):
    """
    Convert between CIE 2006 10°  XYZ tristimulus values (or spectral data) 
    and CAM18sl color appearance correlates.
    
    Args:
        :data: 
            | ndarray of CIE 2006 10°  absolute XYZ tristimulus values or spectral data
              or color appearance attributes of stimulus
        :datab: 
            | ndarray of CIE 2006 10°  absolute XYZ tristimulus values or spectral data
              of stimulus background
        :Lb: 
            | [100], optional
            | Luminance (cd/m²) value(s) of background(s) calculated using the CIE 2006 10° CMFs 
            | (only used in case datab == None and the background is assumed to be an Equal-Energy-White)
        :fov: 
            | 10.0, optional
            | Field-of-view of stimulus (for size effect on brightness)
        :inputtpe:
            | 'xyz' or 'spd', optional
            | Specifies the type of input: 
            |     tristimulus values or spectral data for the forward mode.
        :direction:
            | 'forward' or 'inverse', optional
            |   -'forward': xyz -> cam18sl
            |   -'inverse': cam18sl -> xyz 
        :outin:
            | 'Q,aW,bW' or str, optional
            | 'Q,aW,bW' (brightness and opponent signals for amount-of-neutral)
            |  other options: 'Q,aM,bM' (colorfulness) and 'Q,aS,bS' (saturation)
            | Str specifying the type of 
            |     input (:direction: == 'inverse') and 
            |     output (:direction: == 'forward')
        :parameters:
            | None or dict, optional
            | Set of model parameters.
            |   - None: defaults to luxpy.cam._CAM18SL_PARAMETERS 
            |    (see references below)
    
    Returns:
        :returns: 
            | ndarray with color appearance correlates (:direction: == 'forward')
            |  or 
            | XYZ tristimulus values (:direction: == 'inverse')
            
    Notes:
        | * Instead of using the CIE 1964 10° CMFs in some places of the model,
        |   the CIE 2006 10° CMFs are used througout, making it more self_consistent.
        |   This has an effect on the k scaling factors (now different those in CAM15u) 
        |   and the illuminant E normalization for use in the chromatic adaptation transform.
        |   (see future erratum to Hermans et al., 2018)
        | * The paper also used an equation for the amount of white W, which is
        |   based on a Q value not expressed in 'bright' ('cA' = 0.937 instead of 123). 
        |   This has been corrected for in the luxpy version of the model, i.e.
        |   _CAM18SL_PARAMETERS['cW'][0] has been changed from 2.29 to 1/11672.
        |   (see future erratum to Hermans et al., 2018)

    References: 
        1. `Hermans, S., Smet, K. A. G., & Hanselaer, P. (2018). 
        "Color appearance model for self-luminous stimuli."
        Journal of the Optical Society of America A, 35(12), 2000–2009. 
        <https://doi.org/10.1364/JOSAA.35.002000>`_ 
     """
    
    if parameters is None:
        parameters = _CAM18SL_PARAMETERS
        
    outin = outin.split(',')    
    
    #unpack model parameters:
    cA, cAlms, cHK, cM, cW, ca, calms, cb, cblms, cfov, k, naka, unique_hue_data = [parameters[x] for x in sorted(parameters.keys())]
    
    # precomputations:
    Mlms2xyz = np.linalg.inv(_CMF['2006_10']['M'])
    MAab = np.array([cAlms,calms,cblms])
    invMAab = np.linalg.inv(MAab)    
    
    #-------------------------------------------------
    # setup EEW reference field and default background field (Lr should be equal to Lb):
    # Get Lb values:
    if datab is not None:
        if inputtype != 'xyz':
            Lb = spd_to_xyz(datab, cieobs = '2006_10', relative = False)[...,1:2]
        else:
            Lb = datab[...,1:2]
    else:
        if isinstance(Lb,list):
            Lb = np2dT(Lb)

    # Setup EEW ref of same luminance as datab:
    if inputtype == 'xyz':
        wlr = getwlr(_CAM18SL_WL3)
    else:
        if datab is None:
            wlr = data[0] # use wlr of stimulus data
        else:
            wlr = datab[0] # use wlr of background data
    datar = np.vstack((wlr,np.ones((Lb.shape[0], wlr.shape[0])))) # create eew
    xyzr = spd_to_xyz(datar, cieobs = '2006_10', relative = False) # get abs. tristimulus values
    datar[1:] = datar[1:]/xyzr[...,1:2]*Lb
    # Create datab if None:
    if (datab is None):
        if inputtype != 'xyz':
            datab = datar.copy()
        else:
            datab = spd_to_xyz(datar, cieobs = '2006_10', relative = False)
            datar = datab.copy()

 
    # prepare data and datab for loop over backgrounds: 
    # make axis 1 of datab have 'same' dimensions as data:         
    if (data.ndim == 2): 
        data = np.expand_dims(data, axis = 1)  # add light source axis 1     

    if inputtype == 'xyz': 
        if datab.shape[0] == 1: #make datab and datar have same lights source dimension (used to store different backgrounds) size as data
            datab = np.repeat(datab,data.shape[1],axis=0)  
            datar = np.repeat(datar,data.shape[1],axis=0)               
    else:
        if datab.shape[0] == 2:
            datab = np.vstack((datab[0],np.repeat(datab[1:], data.shape[1], axis = 0)))
        if datar.shape[0] == 2:
            datar = np.vstack((datar[0],np.repeat(datar[1:], data.shape[1], axis = 0)))

    # Flip light source/ background dim to axis 0:
    data = np.transpose(data, axes = (1,0,2))

    #-------------------------------------------------
    
    #initialize camout:     
    dshape = list(data.shape)
    dshape[-1] = len(outin) # requested number of correlates
    if (inputtype != 'xyz') & (direction == 'forward'):
        dshape[-2] = dshape[-2] - 1 # wavelength row doesn't count & only with forward can the input data be spectral
    camout = np.nan*np.ones(dshape)
    
  
    for i in range(data.shape[0]):
       
        # get rho, gamma, beta of background and reference white:
        if (inputtype != 'xyz'):
            xyzb = spd_to_xyz(np.vstack((datab[0], datab[i+1:i+2,:])), cieobs = '2006_10', relative = False)
            xyzr = spd_to_xyz(np.vstack((datar[0], datar[i+1:i+2,:])), cieobs = '2006_10', relative = False)
        else:
            xyzb = datab[i:i+1,:] 
            xyzr = datar[i:i+1,:] 

        lmsb = np.dot(_CMF['2006_10']['M'],xyzb.T).T # convert to l,m,s
        rgbb = (lmsb / _CMF['2006_10']['K']) * k # convert to rho, gamma, beta
        #lmsr = np.dot(_CMF['2006_10']['M'],xyzr.T).T # convert to l,m,s
        #rgbr = (lmsr / _CMF['2006_10']['K']) * k # convert to rho, gamma, beta
        #rgbr = rgbr/rgbr[...,1:2]*Lb[i] # calculated EEW cone excitations at same luminance values as background
        rgbr = np.ones(xyzr.shape)*Lb[i] # explicitely equal EEW cone excitations at same luminance values as background

        if direction == 'forward':
            # get rho, gamma, beta of stimulus:
            if (inputtype != 'xyz'):
                xyz = spd_to_xyz(data[i], cieobs = '2006_10', relative = False)   
            elif (inputtype == 'xyz'):
                xyz = data[i]
            lms = np.dot(_CMF['2006_10']['M'],xyz.T).T # convert to l,m,s
            rgb = (lms / _CMF['2006_10']['K']) * k # convert to rho, gamma, beta

            # apply von-kries cat with D = 1:
            if (rgbb == 0).any():
                Mcat = np.eye(3)
            else:
                Mcat = np.diag((rgbr/rgbb)[0])
            rgba = np.dot(Mcat,rgb.T).T

            # apply naka-rushton compression:
            rgbc = naka_rushton(rgba, n = naka['n'], sig = naka['sig'](rgbr.mean()), noise = naka['noise'], scaling = naka['scaling'])

            #rgbc = np.ones(rgbc.shape)*rgbc.mean() # test if eew ends up at origin
            
            # calculate achromatic and color difference signals, A, a, b:
            Aab = np.dot(MAab, rgbc.T).T
            A,a,b = asplit(Aab)
            a = ca*a
            b = cb*b

            # calculate colorfullness like signal M:
            M = cM*((a**2.0 + b**2.0)**0.5)

            # calculate brightness Q:
            Q = cA*(A + cHK[0]*M**cHK[1]) # last term is contribution of Helmholtz-Kohlrausch effect on brightness

            # calculate saturation, s:
            s = M / Q

            # calculate amount of white, W:
            W = 1 / (1.0 + cW[0]*(s**cW[1]))

            #  adjust Q for size (fov) of stimulus (matter of debate whether to do this before or after calculation of s or W, there was no data on s, M or W for different sized stimuli: after)
            Q = Q*(fov/10.0)**cfov

            # calculate hue, h and Hue quadrature, H:
            h = hue_angle(a,b, htype = 'deg')
            if 'H' in outin:
                H = hue_quadrature(h, unique_hue_data = unique_hue_data)
            else:
                H = None

            # calculate cart. co.:
            if 'aM' in outin:
                aM = M*np.cos(h*np.pi/180.0)
                bM = M*np.sin(h*np.pi/180.0)
            
            if 'aS' in outin:
                aS = s*np.cos(h*np.pi/180.0)
                bS = s*np.sin(h*np.pi/180.0)
            
            if 'aW' in outin:
                aW = W*np.cos(h*np.pi/180.0)
                bW = W*np.sin(h*np.pi/180.0)

            if (outin != ['Q','aW','bW']):
                camout[i] =  eval('ajoin(('+','.join(outin)+'))')
            else:
                camout[i] = ajoin((Q,aW,bW))
    
        
        elif direction == 'inverse':

            # get Q, M and a, b depending on input type:        
            if 'aW' in outin:
                Q,a,b = asplit(data[i])
                Q = Q / ((fov/10.0)**cfov) #adjust Q for size (fov) of stimulus back to that 10° ref
                W = (a**2.0 + b**2.0)**0.5
                s = (((1.0 / W) - 1.0)/cW[0])**(1.0/cW[1])
                M = s*Q
                
            
            if 'aM' in outin:
                Q,a,b = asplit(data[i])
                Q = Q / ((fov/10.0)**cfov) #adjust Q for size (fov) of stimulus back to that 10° ref
                M = (a**2.0 + b**2.0)**0.5
            
            if 'aS' in outin:
                Q,a,b = asplit(data[i])
                Q = Q / ((fov/10.0)**cfov) #adjust Q for size (fov) of stimulus back to that 10° ref
                s = (a**2.0 + b**2.0)**0.5
                M = s*Q
                      
            if 'h' in outin:
                Q, WsM, h = asplit(data[i])
                Q = Q / ((fov/10.0)**cfov) #adjust Q for size (fov) of stimulus back to that 10° ref
                if 'W' in outin:
                     s = (((1.0 / WsM) - 1.0)/cW[0])**(1.0/cW[1])
                     M = s*Q
                elif 's' in outin:
                     M = WsM*Q
                elif 'M' in outin:
                     M = WsM
            
            # calculate achromatic signal, A from Q and M:
            A = Q/cA - cHK[0]*M**cHK[1]

            # calculate hue angle:
            h = hue_angle(a,b, htype = 'rad')
            
            # calculate a,b from M and h:
            a = (M/cM)*np.cos(h)
            b = (M/cM)*np.sin(h)

            a = a/ca
            b = b/cb

            # create Aab:
            Aab = ajoin((A,a,b))    

            # calculate rgbc:
            rgbc = np.dot(invMAab, Aab.T).T    

            # decompress rgbc to (adapted) rgba :
            rgba = naka_rushton(rgbc, n = naka['n'], sig = naka['sig'](rgbr.mean()), noise = naka['noise'], scaling = naka['scaling'], direction = 'inverse')

            # apply inverse von-kries cat with D = 1:
            rgb = np.dot(np.diag((rgbb/rgbr)[0]),rgba.T).T

            # convert rgb to lms to xyz:
            lms = rgb/k*_CMF['2006_10']['K']  
            xyz = np.dot(Mlms2xyz,lms.T).T 
            
            camout[i] = xyz
    
    if camout.shape[0] == 1:
        camout = np.squeeze(camout,axis = 0)
    
    return camout
Beispiel #7
0
def hue_quadrature(h, unique_hue_data=None):
    """
    Get hue quadrature H from hue h.
    
    Args:
        :h: 
            | float or ndarray [(N,) or (N,1)] with hue data in degrees (!).
        :unique_hue data:
            | None or dict, optional
            |   - None: defaults to:
            |         {'hues': 'red yellow green blue red'.split(), 
            |        'i': np.arange(5.0), 
            |        'hi':[20.14, 90.0, 164.25,237.53,380.14],
            |        'ei':[0.8,0.7,1.0,1.2,0.8],
            |        'Hi':[0.0,100.0,200.0,300.0,400.0]}
            |   - dict: user specified unique hue data  
            |           (same structure as above)
    
    Returns:
        :H: 
            | ndarray of Hue quadrature value(s).
    """
    if unique_hue_data is None:
        unique_hue_data = {
            'hues': 'red yellow green blue red'.split(),
            'i': np.arange(5.0),
            'hi': [20.14, 90.0, 164.25, 237.53, 380.14],
            'ei': [0.8, 0.7, 1.0, 1.2, 0.8],
            'Hi': [0.0, 100.0, 200.0, 300.0, 400.0]
        }

    changed_number_to_array = False
    if isinstance(h, float) | isinstance(h, int):
        h = np.atleast_1d(h)
        changed_number_to_array = True

    squeezed = False
    if h.ndim > 1:
        if (h.shape[0] == 1):
            h = np.squeeze(h, axis=0)
            squeezed = True

    hi = unique_hue_data['hi']
    Hi = unique_hue_data['Hi']
    ei = unique_hue_data['ei']
    h[h < hi[0]] += 360.0
    h_tmp = np.atleast_2d(h)
    if h_tmp.shape[0] == 1:
        h_tmp = h_tmp.T
    h_hi = np.repeat(h_tmp, repeats=len(hi), axis=1)
    hi_h = np.repeat(np.atleast_2d(hi), repeats=h.shape[0], axis=0)
    d = (h_hi - hi_h)
    d[d < 0] = 1000.0
    p = d.argmin(axis=1)
    p[p == (len(hi) - 1)] = 0  # make sure last unique hue data is not selected
    H = np.array([
        Hi[pi] + (100.0 * (h[i] - hi[pi]) / ei[pi]) /
        ((h[i] - hi[pi]) / ei[pi] + (hi[pi + 1] - h[i]) / ei[pi + 1])
        for (i, pi) in enumerate(p)
    ])
    if changed_number_to_array:
        H = H[0]
    if squeezed:
        H = np.expand_dims(H, axis=0)
    return H
Beispiel #8
0
def apply(data, catmode = '1>0>2', cattype = 'vonkries', xyzw1 = None, xyzw2 = None, xyzw0 = None,\
          D = None, mcat = ['cat02'], normxyz0 = None, outtype = 'xyz', La = None, F = None, Dtype = None):
    """
    Calculate corresponding colors by applying a von Kries chromatic adaptation
    transform (CAT), i.e. independent rescaling of 'sensor sensitivity' to data
    to adapt from current adaptation conditions (1) to the new conditions (2).
    
    Args:
        :data: 
            | ndarray of tristimulus values (can be NxMx3)
        :catmode: 
            | '1>0>2, optional
            |    -'1>0>2': Two-step CAT 
            |      from illuminant 1 to baseline illuminant 0 to illuminant 2.
            |    -'1>0': One-step CAT 
            |      from illuminant 1 to baseline illuminant 0.
            |    -'0>2': One-step CAT 
            |      from baseline illuminant 0 to illuminant 2. 
        :cattype: 
            | 'vonkries' (others: 'rlab', see Farchild 1990), optional
        :xyzw1:
            | None, depending on :catmode: optional (can be Mx3)
        :xyzw2:
            | None, depending on :catmode: optional (can be Mx3)
        :xyzw0:
            | None, depending on :catmode: optional (can be Mx3)
        :D: 
            | None, optional
            | Degrees of adaptation. Defaults to [1.0, 1.0]. 
        :La: 
            | None, optional
            | Adapting luminances. 
            | If None: xyz values are absolute or relative.
            | If not None: xyz are relative. 
        :F: 
            | None, optional
            | Surround parameter(s) for CAT02/CAT16 calculations 
              (:Dtype: == 'cat02' or 'cat16')
            | Defaults to [1.0, 1.0]. 
        :Dtype:
            | None, optional
            | Type of degree of adaptation function from literature
            | See luxpy.cat.get_degree_of_adaptation()
        :mcat:
            | ['cat02'], optional
            | List[str] or List[ndarray] of sensor space matrices for each 
              condition pair. If len(:mcat:) == 1, the same matrix is used.
        :normxyz0: 
            | None, optional
            | Set of xyz tristimulus values to normalize the sensor space matrix to.
        :outtype:
            | 'xyz' or 'lms', optional
            |   - 'xyz': return corresponding tristimulus values 
            |   - 'lms': return corresponding sensor space excitation values 
            |            (e.g. for further calculations) 
      
    Returns:
          :returns: 
              | ndarray with corresponding colors
    """

    if (xyzw1 is None) & (xyzw2 is None):
        return data  # do nothing

    else:

        # Make data 2d:
        data = np2d(data)
        data_original_shape = data.shape
        if data.ndim < 3:
            target_shape = np.hstack((1, data.shape))
            data = data * np.ones(target_shape)
        else:
            target_shape = data.shape

        target_shape = data.shape

        # initialize xyzw0:
        if (xyzw0 is None):  # set to iLL.E
            xyzw0 = np2d([100.0, 100.0, 100.0])
        xyzw0 = np.ones(target_shape) * xyzw0
        La0 = xyzw0[..., 1, None]

        # Determine cat-type (1-step or 2-step) + make input same shape as data for block calculations:
        expansion_axis = np.abs(1 * (len(data_original_shape) == 2) - 1)
        if ((xyzw1 is not None) & (xyzw2 is not None)):
            xyzw1 = xyzw1 * np.ones(target_shape)
            xyzw2 = xyzw2 * np.ones(target_shape)
            default_La12 = [xyzw1[..., 1, None], xyzw2[..., 1, None]]

        elif (xyzw2 is None) & (xyzw1
                                is not None):  # apply one-step CAT: 1-->0
            catmode = '1>0'  #override catmode input
            xyzw1 = xyzw1 * np.ones(target_shape)
            default_La12 = [xyzw1[..., 1, None], La0]

        elif (xyzw1 is None) & (xyzw2 is not None):
            raise Exception(
                "von_kries(): cat transformation '0>2' not supported, use '1>0' !"
            )

        # Get or set La (La == None: xyz are absolute or relative, La != None: xyz are relative):
        target_shape_1 = tuple(np.hstack((target_shape[:-1], 1)))
        La1, La2 = parse_x1x2_parameters(La,
                                         target_shape=target_shape_1,
                                         catmode=catmode,
                                         expand_2d_to_3d=expansion_axis,
                                         default=default_La12)

        # Set degrees of adaptation, D10, D20:  (note D20 is degree of adaptation for 2-->0!!)
        D10, D20 = parse_x1x2_parameters(D,
                                         target_shape=target_shape_1,
                                         catmode=catmode,
                                         expand_2d_to_3d=expansion_axis)

        # Set F surround in case of Dtype == 'cat02':
        F1, F2 = parse_x1x2_parameters(F,
                                       target_shape=target_shape_1,
                                       catmode=catmode,
                                       expand_2d_to_3d=expansion_axis)

        # Make xyz relative to go to relative xyz0:
        if La is None:
            data = 100 * data / La1
            xyzw1 = 100 * xyzw1 / La1
            xyzw0 = 100 * xyzw0 / La0
            if (catmode == '1>0>2') | (catmode == '1>2'):
                xyzw2 = 100 * xyzw2 / La2

        # transform data (xyz) to sensor space (lms) and perform cat:
        xyzc = np.ones(data.shape) * np.nan
        mcat = np.array(mcat)
        if (mcat.shape[0] != data.shape[1]) & (mcat.shape[0] == 1):
            mcat = np.repeat(mcat, data.shape[1], axis=0)
        elif (mcat.shape[0] != data.shape[1]) & (mcat.shape[0] > 1):
            raise Exception(
                'von_kries(): mcat.shape[0] > 1 and does not match data.shape[0]!'
            )

        for i in range(xyzc.shape[1]):
            # get cat sensor matrix:
            if mcat[i].dtype == np.float64:
                mcati = mcat[i]
            else:
                mcati = _MCATS[mcat[i]]

            # normalize sensor matrix:
            if normxyz0 is not None:
                mcati = math.normalize_3x3_matrix(mcati, xyz0=normxyz0)

            # convert from xyz to lms:
            lms = np.dot(mcati, data[:, i].T).T
            lmsw0 = np.dot(mcati, xyzw0[:, i].T).T
            if (catmode == '1>0>2') | (catmode == '1>0'):
                lmsw1 = np.dot(mcati, xyzw1[:, i].T).T
                Dpar1 = dict(D=D10[:, i],
                             F=F1[:, i],
                             La=La1[:, i],
                             La0=La0[:, i],
                             order='1>0')
                D10[:, i] = get_degree_of_adaptation(
                    Dtype=Dtype,
                    **Dpar1)  #get degree of adaptation depending on Dtype
                lmsw2 = None  # in case of '1>0'

            if (catmode == '1>0>2'):
                lmsw2 = np.dot(mcati, xyzw2[:, i].T).T
                Dpar2 = dict(D=D20[:, i],
                             F=F2[:, i],
                             La=La2[:, i],
                             La0=La0[:, i],
                             order='0>2')

                D20[:, i] = get_degree_of_adaptation(
                    Dtype=Dtype,
                    **Dpar2)  #get degree of adaptation depending on Dtype

            if (catmode == '1>2'):
                lmsw1 = np.dot(mcati, xyzw1[:, i].T).T
                lmsw2 = np.dot(mcati, xyzw2[:, i].T).T
                Dpar12 = dict(D=D10[:, i],
                              F=F1[:, i],
                              La=La1[:, i],
                              La2=La2[:, i],
                              order='1>2')
                D10[:, i] = get_degree_of_adaptation(
                    Dtype=Dtype,
                    **Dpar12)  #get degree of adaptation depending on Dtype

            # Determine transfer function Dt:
            Dt = get_transfer_function(cattype=cattype,
                                       catmode=catmode,
                                       lmsw1=lmsw1,
                                       lmsw2=lmsw2,
                                       lmsw0=lmsw0,
                                       D10=D10[:, i],
                                       D20=D20[:, i],
                                       La1=La1[:, i],
                                       La2=La2[:, i])

            # Perform cat:
            lms = np.dot(np.diagflat(Dt[0]), lms.T).T

            # Make xyz, lms 'absolute' again:
            if (catmode == '1>0>2'):
                lms = (La2[:, i] / La1[:, i]) * lms
            elif (catmode == '1>0'):
                lms = (La0[:, i] / La1[:, i]) * lms
            elif (catmode == '1>2'):
                lms = (La2[:, i] / La1[:, i]) * lms

            # transform back from sensor space to xyz (or not):
            if outtype == 'xyz':
                xyzci = np.dot(np.linalg.inv(mcati), lms.T).T
                xyzci[np.where(xyzci < 0)] = _EPS
                xyzc[:, i] = xyzci
            else:
                xyzc[:, i] = lms

        # return data to original shape:
        if len(data_original_shape) == 2:
            xyzc = xyzc[0]

        return xyzc
Beispiel #9
0
def cam_sww16(data, dataw = None, Yb = 20.0, Lw = 400.0, Ccwb = None, relative = True, \
              parameters = None, inputtype = 'xyz', direction = 'forward', \
              cieobs = '2006_10'):
    """
    A simple principled color appearance model based on a mapping 
    of the Munsell color system.
    
    | This function implements the JOSA A (parameters = 'JOSA') published model. 
    
    Args:
        :data: 
            | ndarray with input tristimulus values 
            | or spectral data 
            | or input color appearance correlates
            | Can be of shape: (N [, xM], x 3), whereby: 
            | N refers to samples and M refers to light sources.
            | Note that for spectral input shape is (N x (M+1) x wl) 
        :dataw: 
            | None or ndarray, optional
            | Input tristimulus values or spectral data of white point.
            | None defaults to the use of CIE illuminant C.
        :Yb: 
            | 20.0, optional
            | Luminance factor of background (perfect white diffuser, Yw = 100)
        :Lw:
            | 400.0, optional
            | Luminance (cd/m²) of white point.
        :Ccwb:
            | None,  optional
            | Degree of cognitive adaptation (white point balancing)
            | If None: use [..,..] from parameters dict.
        :relative:
            | True or False, optional
            | True: xyz tristimulus values are relative (Yw = 100)
        :parameters:
            | None or str or dict, optional
            | Dict with model parameters.
            |    - None: defaults to luxpy.cam._CAM_SWW_2016_PARAMETERS['JOSA']
            |    - str: 'best-fit-JOSA' or 'best-fit-all-Munsell'
            |    - dict: user defined model parameters 
            |            (dict should have same structure)
        :inputtype:
            | 'xyz' or 'spd', optional
            | Specifies the type of input: 
            |     tristimulus values or spectral data for the forward mode.
        :direction:
            | 'forward' or 'inverse', optional
            |   -'forward': xyz -> cam_sww_2016
            |   -'inverse': cam_sww_2016 -> xyz 
        :cieobs:
            | '2006_10', optional
            | CMF set to use to perform calculations where spectral data 
              is involved (inputtype == 'spd'; dataw = None)
            | Other options: see luxpy._CMF['types']
    
    Returns:
        :returns: 
            | ndarray with color appearance correlates (:direction: == 'forward')
            |  or 
            | XYZ tristimulus values (:direction: == 'inverse')
    
    Notes:
        | This function implements the JOSA A (parameters = 'JOSA') 
          published model. 
        | With:
        |    1. A correction for the parameter 
        |         in Eq.4 of Fig. 11: 0.952 --> -0.952 
        |         
        |     2. The delta_ac and delta_bc white-balance shifts in Eq. 5e & 5f 
        |         should be: -0.028 & 0.821 
        |  
        |     (cfr. Ccwb = 0.66 in: 
        |         ab_test_out = ab_test_int - Ccwb*ab_gray_adaptation_field_int))
             
    References:
        1. `Smet, K. A. G., Webster, M. A., & Whitehead, L. A. (2016). 
        A simple principled approach for modeling and understanding uniform color metrics. 
        Journal of the Optical Society of America A, 33(3), A319–A331. 
        <https://doi.org/10.1364/JOSAA.33.00A319>`_

    """

    # get model parameters
    args = locals().copy() 
    if parameters is None:
        parameters = _CAM_SWW16_PARAMETERS['JOSA']
    if isinstance(parameters,str):
        parameters = _CAM_SWW16_PARAMETERS[parameters]
    parameters = put_args_in_db(parameters,args)  #overwrite parameters with other (not-None) args input 
      
    #unpack model parameters:
    Cc, Ccwb, Cf, Mxyz2lms, cLMS, cab_int, cab_out, calpha, cbeta,cga1, cga2, cgb1, cgb2, cl_int, clambda, lms0  = [parameters[x] for x in sorted(parameters.keys())]
    
    # setup default adaptation field:   
    if (dataw is None):
        dataw = _CIE_ILLUMINANTS['C'].copy() # get illuminant C
        xyzw = spd_to_xyz(dataw, cieobs = cieobs,relative=False) # get abs. tristimulus values
        if relative == False: #input is expected to be absolute
            dataw[1:] = Lw*dataw[1:]/xyzw[:,1:2] #dataw = Lw*dataw # make absolute
        else:
            dataw = dataw # make relative (Y=100)
        if inputtype == 'xyz':
            dataw = spd_to_xyz(dataw, cieobs = cieobs, relative = relative)

    # precomputations:
    Mxyz2lms = np.dot(np.diag(cLMS),math.normalize_3x3_matrix(Mxyz2lms, np.array([[1, 1, 1]]))) # normalize matrix for xyz-> lms conversion to ill. E weighted with cLMS   
    invMxyz2lms = np.linalg.inv(Mxyz2lms)
    MAab = np.array([clambda,calpha,cbeta])
    invMAab = np.linalg.inv(MAab)
    
    #initialize data and camout:
    data = np2d(data).copy() # stimulus data (can be upto NxMx3 for xyz, or [N x (M+1) x wl] for spd))
    dataw = np2d(dataw).copy() # white point (can be upto Nx3 for xyz, or [(N+1) x wl] for spd)

    # make axis 1 of dataw have 'same' dimensions as data:         
    if (data.ndim == 2): 
        data = np.expand_dims(data, axis = 1)  # add light source axis 1     

    if inputtype == 'xyz': 
        if dataw.shape[0] == 1: #make dataw have same lights source dimension size as data
            dataw = np.repeat(dataw,data.shape[1],axis=0)                
    else:
        if dataw.shape[0] == 2:
            dataw = np.vstack((dataw[0],np.repeat(dataw[1:], data.shape[1], axis = 0)))

    # Flip light source dim to axis 0:
    data = np.transpose(data, axes = (1,0,2))
  
    # Initialize output array:
    dshape = list(data.shape)
    dshape[-1] = 3 # requested number of correlates: l_int, a_int, b_int
    if (inputtype != 'xyz') & (direction == 'forward'):
        dshape[-2] = dshape[-2] - 1 # wavelength row doesn't count & only with forward can the input data be spectral
    camout = np.nan*np.ones(dshape)

    # apply forward/inverse model for each row in data:
    for i in range(data.shape[0]):

        # stage 1: calculate photon rates of stimulus and adapting field, lmst & lmsf:
        if (inputtype != 'xyz'):            
            if relative == True:
                xyzw_abs = spd_to_xyz(np.vstack((dataw[0],dataw[i+1])), cieobs = cieobs, relative = False)
                dataw[i+1] = Lw*dataw[i+1]/xyzw_abs[0,1] # make absolute
            xyzw = spd_to_xyz(np.vstack((dataw[0],dataw[i+1])), cieobs = cieobs, relative = False)
            lmsw = 683.0*np.dot(Mxyz2lms,xyzw.T).T/_CMF[cieobs]['K']
            lmsf = (Yb/100.0)*lmsw # calculate adaptation field and convert to l,m,s
            if (direction == 'forward'):
                if relative == True:
                    data[i,1:,:] = Lw*data[i,1:,:]/xyzw_abs[0,1] # make absolute
                xyzt = spd_to_xyz(data[i], cieobs = cieobs, relative = False)/_CMF[cieobs]['K'] 
                lmst = 683.0*np.dot(Mxyz2lms,xyzt.T).T # convert to l,m,s
            else:
                lmst = lmsf # put lmsf in lmst for inverse-mode

        elif (inputtype == 'xyz'):
            if relative == True: 
                dataw[i] = Lw*dataw[i]/100.0 # make absolute
            lmsw = 683.0* np.dot(Mxyz2lms, dataw[i].T).T /_CMF[cieobs]['K']  # convert to lms
            lmsf = (Yb/100.0)*lmsw
            if (direction == 'forward'):
                if relative == True:
                    data[i] = Lw*data[i]/100.0 # make absolute
                lmst = 683.0* np.dot(Mxyz2lms, data[i].T).T /_CMF[cieobs]['K'] # convert to lms
            else:
                 lmst = lmsf # put lmsf in lmst for inverse-mode

        # stage 2: calculate cone outputs of stimulus lmstp
        lmstp = math.erf(Cc*(np.log(lmst/lms0) + Cf*np.log(lmsf/lms0)))
        lmsfp = math.erf(Cc*(np.log(lmsf/lms0) + Cf*np.log(lmsf/lms0)))
        lmstp = np.vstack((lmsfp,lmstp)) # add adaptation field lms temporarily to lmsp for quick calculation
        
        
        # stage 3: calculate optic nerve signals, lam*, alphp, betp:
        lstar,alph, bet = asplit(np.dot(MAab, lmstp.T).T)

        alphp = cga1[0]*alph
        alphp[alph<0] = cga1[1]*alph[alph<0]
        betp = cgb1[0]*bet
        betp[bet<0] = cgb1[1]*bet[bet<0]
        
        # stage 4: calculate recoded nerve signals, alphapp, betapp:
        alphpp = cga2[0]*(alphp + betp)
        betpp = cgb2[0]*(alphp - betp)

        # stage 5: calculate conscious color perception:
        lstar_int = cl_int[0]*(lstar + cl_int[1])
        alph_int = cab_int[0]*(np.cos(cab_int[1]*np.pi/180.0)*alphpp - np.sin(cab_int[1]*np.pi/180.0)*betpp)
        bet_int = cab_int[0]*(np.sin(cab_int[1]*np.pi/180.0)*alphpp + np.cos(cab_int[1]*np.pi/180.0)*betpp)
        lstar_out = lstar_int
        
        if direction == 'forward':
            if Ccwb is None:
                alph_out = alph_int - cab_out[0]
                bet_out = bet_int -  cab_out[1]
            else:
                Ccwb = Ccwb*np.ones((2))
                Ccwb[Ccwb<0.0] = 0.0
                Ccwb[Ccwb>1.0] = 1.0
                alph_out = alph_int - Ccwb[0]*alph_int[0] # white balance shift using adaptation gray background (Yb=20%), with Ccw: degree of adaptation
                bet_out = bet_int -  Ccwb[1]*bet_int[0]
  
            camout[i] = np.vstack((lstar_out[1:],alph_out[1:],bet_out[1:])).T # stack together and remove adaptation field from vertical stack
        elif direction == 'inverse':
            labf_int = np.hstack((lstar_int[0],alph_int[0],bet_int[0]))
            
            # get lstar_out, alph_out & bet_out for data:
            lstar_out, alph_out, bet_out = asplit(data[i])
        
            # stage 5 inverse: 
            # undo cortical white-balance:
            if Ccwb is None:
                alph_int = alph_out + cab_out[0]
                bet_int = bet_out +  cab_out[1]
            else:
                Ccwb = Ccwb*np.ones((2))
                Ccwb[Ccwb<0.0] = 0.0
                Ccwb[Ccwb>1.0] = 1.0
                alph_int = alph_out + Ccwb[0]*alph_int[0] #  inverse white balance shift using adaptation gray background (Yb=20%), with Ccw: degree of adaptation
                bet_int = bet_out +  Ccwb[1]*bet_int[0]

            lstar_int = lstar_out
            alphpp = (1.0 / cab_int[0]) * (np.cos(-cab_int[1]*np.pi/180.0)*alph_int - np.sin(-cab_int[1]*np.pi/180.0)*bet_int)
            betpp = (1.0 / cab_int[0]) * (np.sin(-cab_int[1]*np.pi/180.0)*alph_int + np.cos(-cab_int[1]*np.pi/180.0)*bet_int)
            lstar_int = lstar_out
            lstar = (lstar_int /cl_int[0]) - cl_int[1] 
            
            # stage 4 inverse:
            alphp = 0.5*(alphpp/cga2[0] + betpp/cgb2[0])  # <-- alphpp = (Cga2.*(alphp+betp));
            betp = 0.5*(alphpp/cga2[0] - betpp/cgb2[0]) # <-- betpp = (Cgb2.*(alphp-betp));

            # stage 3 invers:
            alph = alphp/cga1[0]
            bet = betp/cgb1[0]
            sa = np.sign(cga1[1])
            sb = np.sign(cgb1[1])
            alph[(sa*alphp)<0.0] = alphp[(sa*alphp)<0] / cga1[1] 
            bet[(sb*betp)<0.0] = betp[(sb*betp)<0] / cgb1[1] 
            lab = ajoin((lstar, alph, bet))
            
            # stage 2 inverse:
            lmstp = np.dot(invMAab,lab.T).T 
            lmstp[lmstp<-1.0] = -1.0
            lmstp[lmstp>1.0] = 1.0

            lmstp = math.erfinv(lmstp) / Cc - Cf*np.log(lmsf/lms0)
            lmst = np.exp(lmstp) * lms0
            
            # stage 1 inverse:
            xyzt =  np.dot(invMxyz2lms,lmst.T).T   
            
            if relative == True:
                xyzt = (100.0/Lw) * xyzt
            
            camout[i] = xyzt
    
#    if flipaxis0and1 == True: # loop over shortest dim.
#        camout = np.transpose(camout, axes = (1,0,2))
    
    # Flip light source dim back to axis 1:
    camout = np.transpose(camout, axes = (1,0,2))

    if camout.shape[0] == 1:
        camout = np.squeeze(camout,axis = 0)
        
    return camout