Exemplo n.º 1
0
 def normalize_to_Lw(Ill, Lw, cieobs, rflM):
     xyzw = lx.spd_to_xyz(Ill, cieobs=cieobs, relative=False)
     for i in range(Ill.shape[0] - 1):
         Ill[i + 1] = Lw * Ill[i + 1] / xyzw[i, 1]
     IllM = []
     for i in range(Ill.shape[0] - 1):
         IllM.append(np.vstack((Ill1[0], Ill[i + 1] * rflM[1:, :])))
     IllM = np.transpose(np.array(IllM), (1, 0, 2))
     return Ill, IllM
Exemplo n.º 2
0
def _massage_output_data_to_original_shape(camout, originalshape):
    """
    Massage output data to restore original shape of input.
    """
    # Flip light source dim back to axis 1:
    camout = np.transpose(camout, axes=(1, 0, 2))

    if len(originalshape) < 3:
        if camout.shape[1] == 1:
            camout = np.squeeze(camout, axis=1)

    return camout
Exemplo n.º 3
0
 def join(self, data):
     """
     Join data along last axis and return instance.
     """
     if data[0].ndim == 2:  #faster implementation
         self.value = np.transpose(
             np.concatenate(data, axis=0).reshape((np.hstack(
                 (len(data), data[0].shape)))), (1, 2, 0))
     elif data[0].ndim == 1:
         self.value = np.concatenate(data, axis=0).reshape((np.hstack(
             (len(data), data[0].shape)))).T
     else:
         self.value = np.hstack(data)[0]
     return self
Exemplo n.º 4
0
def _massage_output_data_to_original_shape(data, originalshape):
    """
    Massage output data to restore original shape of original CAM input.
    
    Notes:
        For an example on the use, see code _simple_cam() (type: _simple_cam??)
    """
    # Flip light source dim back to axis 1:
    data = np.transpose(data, axes=(1, 0, 2))

    if len(originalshape) < 3:
        if data.shape[1] == 1:
            data = np.squeeze(data, axis=1)

    return data
Exemplo n.º 5
0
def ndset(F):
    """
    Finds the nondominated set of a set of objective points.

    Args:
      :F: 
          | a m x mu ndarray with mu points and m objectives

   Returns:
      :ispar: 
          | a mu-length vector with true in the nondominated points
    """
    mu = F.shape[1] #number of points

    # The idea is to compare each point with the other ones
    f1 = np.transpose(F[...,None], axes = [0, 2, 1]) #puts in the 3D direction
    f1 = np.repeat(f1,mu,axis=1)
    f2 = np.repeat(F[...,None],mu,axis=2)

    # Now, for the ii-th slice, the ii-th individual is compared with all of the
    # others at once. Then, the usual operations of domination are checked
    # Checks where f1 dominates f2
    aux1 = (f1 <= f2).all(axis = 0, keepdims = True)
    aux2 = (f1 < f2).any(axis = 0, keepdims = True)

    auxf1 = np.logical_and(aux1, aux2)
    # Checks where f1 is dominated by f2
    aux1 = (f1 >= f2).all(axis = 0, keepdims = True)
    aux2 = (f1 > f2).any(axis = 0, keepdims = True)
    auxf2 = np.logical_and(aux1, aux2)
    
    # dom will be a 3D matrix (1 x mu x mu) such that, for the ii-th slice, it
    # will contain +1 if fii dominates the current point, -1 if it is dominated 
    # by it, and 0 if they are incomparable
    dom = np.zeros((1, mu, mu), dtype = int)

    dom[auxf1] = 1
    dom[auxf2] = -1
    
    # Finally, the slices with no -1 are nondominated
    ispar = (dom != -1).all(axis = 1)
    ispar = ispar.flatten()
    return ispar
Exemplo n.º 6
0
def _cij_to_gij(xyz,C):
    """ Convert from matrix elements describing the discrimination ellipses from Cij (XYZ) to gij (Yxy)"""
    SIG = xyz[...,0] + xyz[...,1] + xyz[...,2]
    M1 = np.array([SIG, -SIG*xyz[...,0]/xyz[...,1], xyz[...,0]/xyz[...,1]])
    M2 = np.array([np.zeros_like(SIG), np.zeros_like(SIG), np.ones_like(SIG)])
    M3 = np.array([-SIG, -SIG*(xyz[...,1] + xyz[...,2])/xyz[...,1], xyz[...,2]/xyz[...,1]])
    M = np.array((M1,M2,M3))
    
    M = _transpose_02(M) # move stimulus dimension to axis = 0
    
    C = _transpose_02(C) # move stimulus dimension to axis = 0
    
    # convert Cij (XYZ) to gij' (xyY):
    AM = np.einsum('ij,kjl->kil', _M_XYZ_TO_PQS, M)
    CAM = np.einsum('kij,kjl->kil', C, AM) 
#    ATCAM = np.einsum('ij,kjl->kil', _M_XYZ_TO_PQS.T, CAM)
#    gij = np.einsum('kij,kjl->kil', np.transpose(M,(0,2,1)), ATCAM) # gij = M.T*A.T*C**A*M = (AM).T*C*A*M
    gij = np.einsum('kij,kjl->kil', np.transpose(AM,(0,2,1)), CAM) # gij = M.T*A.T*C**A*M = (AM).T*C*A*M

    # convert gij' (xyY) to gij (Yxy):
    gij = np.roll(np.roll(gij,1,axis=2),1,axis=1)
    
    return gij
Exemplo n.º 7
0
def _massage_input_and_init_output(data,
                                   dataw,
                                   inputtype='xyz',
                                   direction='forward',
                                   n_out=3):
    """
    Redimension input data to ensure most they have the appropriate sizes for easy and efficient looping.
    |
    | 1. Convert data and dataw to atleast_2d ndarrays
    | 2. Make axis 1 of dataw have 'same' dimensions as data
    | 3. Make dataw have same lights source axis size as data
    | 4. Flip light source axis to axis=0 for efficient looping
    | 5. Initialize output array camout to 'same' shape as data but with camout.shape[-1] == n_out
    
    Args:
        :data: 
            | ndarray with input tristimulus values 
            | or spectral data 
            | or input color appearance correlates
            | Can be of shape: (N [, xM], x 3), whereby: 
            | N refers to samples and M refers to light sources.
            | Note that for spectral input shape is (N x (M+1) x wl) 
        :dataw: 
            | None or ndarray, optional
            | Input tristimulus values or spectral data of white point.
            | None defaults to the use of CIE illuminant C.
        :inputtype:
            | 'xyz' or 'spd', optional
            | Specifies the type of input: 
            |     tristimulus values or spectral data for the forward mode.
        :direction:
            | 'forward' or 'inverse', optional
            |   -'forward': xyz -> cam
            |   -'inverse': cam -> xyz 
        :n_out:
            | 3, optional
            | output size of last dimension of camout 
            | (e.g. n_out=3 for j,a,b output or n_out = 5 for J,M,h,a,b output)
            
    Returns:
        :data:
            | ndarray with reshaped data
        :dataw:
            | ndarray with reshaped dataw
        :camout:
            | NaN filled ndarray for output of CAMv (camout.shape[-1] == Nout) 
        :originalshape:
            | original shape of data
            
    Notes:
        For an example on the use, see code _simple_cam() (type: _simple_cam??)
    """
    # Convert data and dataw to atleast_2d ndarrays:
    data = np2d(data).copy(
    )  # stimulus data (can be upto NxMx3 for xyz, or [N x (M+1) x wl] for spd))
    dataw = np2d(dataw).copy(
    )  # white point (can be upto Nx3 for xyz, or [(N+1) x wl] for spd)
    originalshape = data.shape  # to restore output to same shape

    # Make axis 1 of dataw have 'same' dimensions as data:
    if (data.ndim == 2):
        data = np.expand_dims(data, axis=1)  # add light source axis 1

    # Flip light source dim to axis 0:
    data = np.transpose(data, axes=(1, 0, 2))

    dataw = np.expand_dims(
        dataw, axis=1)  # add extra axis to move light source to axis 0

    # Make dataw have same lights source dimension size as data:
    if inputtype == 'xyz':
        if dataw.shape[0] == 1:
            dataw = np.repeat(dataw, data.shape[0], axis=0)
        if (data.shape[0] == 1) & (dataw.shape[0] > 1):
            data = np.repeat(data, dataw.shape[0], axis=0)
    else:
        dataw = np.array([
            np.vstack((dataw[:1, 0, :], dataw[i + 1:i + 2, 0, :]))
            for i in range(dataw.shape[0] - 1)
        ])
        if (data.shape[0] == 1) & (dataw.shape[0] > 1):
            data = np.repeat(data, dataw.shape[0], axis=0)

    # Initialize output array:
    if n_out is not None:
        dshape = list((data).shape)
        dshape[-1] = n_out  # requested number of correlates: e.g. j,a,b
        if (inputtype != 'xyz') & (direction == 'forward'):
            dshape[-2] = dshape[
                -2] - 1  # wavelength row doesn't count & only with forward can the input data be spectral
        camout = np.zeros(dshape)
        camout.fill(np.nan)
    else:
        camout = None
    return data, dataw, camout, originalshape
Exemplo n.º 8
0
def plotellipse(v, cspace_in = 'Yxy', cspace_out = None, nsamples = 100, \
                show = True, axh = None, \
                line_color = 'darkgray', line_style = ':', line_width = 1, line_marker = '', line_markersize = 4,\
                plot_center = False, center_marker = 'o', center_color = 'darkgray', center_markersize = 4,\
                show_grid = False, llabel = '', label_fontname = 'Times New Roman', label_fontsize = 12,\
                out = None):
    """
    Plot ellipse(s) given in v-format [Rmax,Rmin,xc,yc,theta].
    
    Args:
        :v: 
            | (Nx5) ndarray
            | ellipse parameters [Rmax,Rmin,xc,yc,theta]
        :cspace_in:
            | 'Yxy', optional
            | Color space of v.
            | If None: no color space assumed. Axis labels assumed ('x','y').
        :cspace_out:
            | None, optional
            | Color space to plot ellipse(s) in.
            | If None: plot in cspace_in.
        :nsamples:
            | 100 or int, optional
            | Number of points (samples) in ellipse boundary
        :show:
            | True or boolean, optional
            | Plot ellipse(s) (True) or not (False)
        :axh: 
            | None, optional
            | Ax-handle to plot ellipse(s) in.
            | If None: create new figure with axes.
        :line_color:
            | 'darkgray', optional
            | Color to plot ellipse(s) in.
        :line_style:
            | ':', optional
            | Linestyle of ellipse(s).
        :line_width':
            | 1, optional
            | Width of ellipse boundary line.
        :line_marker:
            | 'none', optional
            | Marker for ellipse boundary.
        :line_markersize:
            | 4, optional
            | Size of markers in ellipse boundary.
        :plot_center:
            | False, optional
            | Plot center of ellipse: yes (True) or no (False)
        :center_color:
            | 'darkgray', optional
            | Color to plot ellipse center in.
        :center_marker:
            | 'o', optional
            | Marker for ellipse center.
        :center_markersize:
            | 4, optional
            | Size of marker of ellipse center.
        :show_grid:
            | False, optional
            | Show grid (True) or not (False)
        :llabel:
            | None,optional
            | Legend label for ellipse boundary.
        :label_fontname: 
            | 'Times New Roman', optional
            | Sets font type of axis labels.
        :label_fontsize:
            | 12, optional
            | Sets font size of axis labels.
        :out:
            | None, optional
            | Output of function
            | If None: returns None. Can be used to output axh of newly created
            |      figure axes or to return Yxys an ndarray with coordinates of 
            |       ellipse boundaries in cspace_out (shape = (nsamples,3,N)) 
            
        
    Returns:
        :returns: None, or whatever set by :out:.
    """
    Yxys = np.zeros((nsamples, 3, v.shape[0]))
    ellipse_vs = np.zeros((v.shape[0], 5))
    for i, vi in enumerate(v):

        # Set sample density of ellipse boundary:
        t = np.linspace(0, 2 * np.pi, int(nsamples))

        a = vi[0]  # major axis
        b = vi[1]  # minor axis
        xyc = vi[2:4, None]  # center
        theta = vi[-1]  # rotation angle

        # define rotation matrix:
        R = np.hstack((np.vstack((np.cos(theta), np.sin(theta))),
                       np.vstack((-np.sin(theta), np.cos(theta)))))

        # Calculate ellipses:
        Yxyc = np.vstack((1, xyc)).T
        Yxy = np.vstack(
            (np.ones((1, nsamples)),
             xyc + np.dot(R, np.vstack((a * np.cos(t), b * np.sin(t)))))).T
        Yxys[:, :, i] = Yxy

        # Convert to requested color space:
        if (cspace_out is not None) & (cspace_in is not None):
            Yxy = colortf(Yxy, cspace_in + '>' + cspace_out)
            Yxyc = colortf(Yxyc, cspace_in + '>' + cspace_out)
            Yxys[:, :, i] = Yxy

            # get ellipse parameters in requested color space:
            ellipse_vs[i, :] = math.fit_ellipse(Yxy[:, 1:])
            #de = np.sqrt((Yxy[:,1]-Yxyc[:,1])**2 + (Yxy[:,2]-Yxyc[:,2])**2)
            #ellipse_vs[i,:] = np.hstack((de.max(),de.min(),Yxyc[:,1],Yxyc[:,2],np.nan)) # nan because orientation is xy, but request is some other color space. Change later to actual angle when fitellipse() has been implemented

        # plot ellipses:
        if show == True:
            if (axh is None) & (i == 0):
                fig = plt.figure()
                axh = fig.add_subplot(111)

            if (cspace_in is None):
                xlabel = 'x'
                ylabel = 'y'
            else:
                xlabel = _CSPACE_AXES[cspace_in][1]
                ylabel = _CSPACE_AXES[cspace_in][2]

            if (cspace_out is not None):
                xlabel = _CSPACE_AXES[cspace_out][1]
                ylabel = _CSPACE_AXES[cspace_out][2]

            if plot_center == True:
                axh.plot(Yxyc[:, 1],
                         Yxyc[:, 2],
                         color=center_color,
                         linestyle='none',
                         marker=center_marker,
                         markersize=center_markersize)
            if llabel is None:
                axh.plot(Yxy[:, 1],
                         Yxy[:, 2],
                         color=line_color,
                         linestyle=line_style,
                         linewidth=line_width,
                         marker=line_marker,
                         markersize=line_markersize)
            else:
                axh.plot(Yxy[:, 1],
                         Yxy[:, 2],
                         color=line_color,
                         linestyle=line_style,
                         linewidth=line_width,
                         marker=line_marker,
                         markersize=line_markersize,
                         label=llabel)

            axh.set_xlabel(xlabel,
                           fontname=label_fontname,
                           fontsize=label_fontsize)
            axh.set_ylabel(ylabel,
                           fontname=label_fontname,
                           fontsize=label_fontsize)
            if show_grid == True:
                plt.grid(True)
            #plt.show()
    Yxys = np.transpose(Yxys, axes=(0, 2, 1))
    if out is not None:
        return eval(out)
    else:
        return None
Exemplo n.º 9
0
def VF_colorshift_model(S, cri_type = _VF_CRI_DEFAULT, model_type = _VF_MODEL_TYPE, \
                        cspace = _VF_CSPACE, sampleset = None, pool = False, \
                        pcolorshift = {'href': np.arange(np.pi/10,2*np.pi,2*np.pi/10),'Cref' : _VF_MAXR, 'sig' : _VF_SIG}, \
                        vfcolor = 'k',verbosity = 0):
    """
    Applies full vector field model calculations to spectral data.
    
    Args:
        :S: 
            | nump.ndarray with spectral data.
        :cri_type:
            | _VF_CRI_DEFAULT or str or dict, optional
            | Specifies type of color fidelity model to use. 
            | Controls choice of ref. ill., sample set, averaging, scaling, etc.
            | See luxpy.cri.spd_to_cri for more info.
        :modeltype:
            | _VF_MODEL_TYPE or 'M6' or 'M5', optional
            | Specifies degree 5 or degree 6 polynomial model in ab-coordinates.
        :cspace:
            | _VF_CSPACE or dict, optional
            | Specifies color space. See _VF_CSPACE_EXAMPLE for example structure.
        :sampleset:
            | None or str or ndarray, optional
            | Sampleset to be used when calculating vector field model.
        :pool: 
            | False, optional
            | If :S: contains multiple spectra, True pools all jab data before 
            | modeling the vector field, while False models a different field 
            | for each spectrum.
        :pcolorshift: 
            | default dict (see below) or user defined dict, optional
            | Dict containing the specification input 
            | for apply_poly_model_at_hue_x().
            | Default dict = {'href': np.arange(np.pi/10,2*np.pi,2*np.pi/10),
            |                 'Cref' : _VF_MAXR, 
            |                 'sig' : _VF_SIG, 
            |                 'labels' : '#'} 
            | The polynomial models of degree 5 and 6 can be fully specified or 
            | summarized by the model parameters themselved OR by calculating the
            | dCoverC and dH at resp. 5 and 6 hues.
        :vfcolor:
            | 'k', optional
            | For plotting the vector fields.
        :verbosity: 
            | 0, optional
            | Report warnings or not.
            
    Returns:
        :returns: 
            | list[dict] (each list element refers to a different test SPD)
            | with the following keys:
            |   - 'Source': dict with ndarrays of the S, cct and duv of source spd.
            |   - 'metrics': dict with ndarrays for:
            |         * Rf (color fidelity: base + metameric shift)
            |         * Rt (metameric uncertainty index) 
            |         * Rfi (specific color fidelity indices)
            |         * Rti (specific metameric uncertainty indices)
            |         * cri_type (str with cri_type)
            |   - 'Jab': dict with with ndarrays for Jabt, Jabr, DEi
            |   - 'dC/C_dH_x_sig' : 
            |           np.vstack((dCoverC_x,dCoverC_x_sig,dH_x,dH_x_sig)).T
            |           See get_poly_model() for more info.
            |   - 'fielddata': dict with dicts containing data on the calculated 
            |      vector-field and circle-fields: 
            |        * 'vectorfield' : {'axt': vfaxt, 'bxt' : vfbxt, 
            |                           'axr' : vfaxr, 'bxr' : vfbxr},
            |        * 'circlefield' : {'axt': cfaxt, 'bxt' : cfbxt, 
            |                           'axr' : cfaxr, 'bxr' : cfbxr}},
            |   - 'modeldata' : dict with model info:
            |                {'pmodel': pmodel, 
            |                'pcolorshift' : pcolorshift, 
            |                  'dab_model' : dab_model, 
            |                  'dab_res' : dab_res,
            |                  'dab_std' : dab_std,
            |                  'modeltype' : modeltype, 
            |                  'fmodel' : poly_model,
            |                  'Jabtm' : Jabtm, 
            |                  'Jabrm' : Jabrm, 
            |                  'DEim' : DEim},
            |   - 'vshifts' :dict with various vector shifts:
            |        * 'Jabshiftvector_r_to_t' : ndarray with difference vectors
            |                                    between jabt and jabr.
            |        * 'vshift_ab_s' : vshift_ab_s: ab-shift vectors of samples 
            |        * 'vshift_ab_s_vf' : vshift_ab_s_vf: ab-shift vectors of 
            |                             VF model predictions of samples.
            |        * 'vshift_ab_vf' : vshift_ab_vf: ab-shift vectors of VF 
            |                            model predictions of vector field grid.
    """

    if type(cri_type) == str:
        cri_type_str = cri_type
    else:
        cri_type_str = None

    # Calculate Rf, Rfi and Jabr, Jabt:
    Rf, Rfi, Jabt, Jabr, cct, duv, cri_type = spd_to_cri(
        S,
        cri_type=cri_type,
        out='Rf,Rfi,jabt,jabr,cct,duv,cri_type',
        sampleset=sampleset)

    # In case of multiple source SPDs, pool:
    if (len(Jabr.shape) == 3) & (Jabr.shape[1] > 1) & (pool == True):
        #Nsamples = Jabr.shape[0]
        Jabr = np.transpose(Jabr, (1, 0, 2))  # set lamps on first dimension
        Jabt = np.transpose(Jabt, (1, 0, 2))
        Jabr = Jabr.reshape(Jabr.shape[0] * Jabr.shape[1],
                            3)  # put all lamp data one after the other
        Jabt = Jabt.reshape(Jabt.shape[0] * Jabt.shape[1], 3)
        Jabt = Jabt[:, None, :]  # add dim = 1
        Jabr = Jabr[:, None, :]

    out = [{} for _ in range(Jabr.shape[1])]  #initialize empty list of dicts
    if pool == False:
        N = Jabr.shape[1]
    else:
        N = 1
    for i in range(N):

        Jabr_i = Jabr[:, i, :].copy()
        Jabr_i = Jabr_i[:, None, :]
        Jabt_i = Jabt[:, i, :].copy()
        Jabt_i = Jabt_i[:, None, :]

        DEi = np.sqrt((Jabr_i[..., 0] - Jabt_i[..., 0])**2 +
                      (Jabr_i[..., 1] - Jabt_i[..., 1])**2 +
                      (Jabr_i[..., 2] - Jabt_i[..., 2])**2)

        # Determine polynomial model:
        poly_model, pmodel, dab_model, dab_res, dCHoverC_res, dab_std, dCHoverC_std = get_poly_model(
            Jabt_i, Jabr_i, modeltype=_VF_MODEL_TYPE)

        # Apply model at fixed hues:
        href = pcolorshift['href']
        Cref = pcolorshift['Cref']
        sig = pcolorshift['sig']
        dCoverC_x, dCoverC_x_sig, dH_x, dH_x_sig = apply_poly_model_at_hue_x(
            poly_model, pmodel, dCHoverC_res, hx=href, Cxr=Cref, sig=sig)

        # Calculate deshifted a,b values on original samples:
        Jt = Jabt_i[..., 0].copy()
        at = Jabt_i[..., 1].copy()
        bt = Jabt_i[..., 2].copy()
        Jr = Jabr_i[..., 0].copy()
        ar = Jabr_i[..., 1].copy()
        br = Jabr_i[..., 2].copy()
        ar = ar + dab_model[:, 0:1]  # deshift reference to model prediction
        br = br + dab_model[:, 1:2]  # deshift reference to model prediction

        Jabtm = np.hstack((Jt, at, bt))
        Jabrm = np.hstack((Jr, ar, br))

        # calculate color differences between test and deshifted ref:
        #        DEim = np.sqrt((Jr - Jt)**2 + (at - ar)**2 + (bt - br)**2)
        DEim = np.sqrt(0 * (Jr - Jt)**2 + (at - ar)**2 +
                       (bt - br)**2)  # J is not used

        # Apply scaling function to convert DEim to Rti:
        scale_factor = cri_type['scale']['cfactor']
        scale_fcn = cri_type['scale']['fcn']
        avg = cri_type['avg']
        Rfi_deshifted = scale_fcn(DEim, scale_factor)
        Rf_deshifted = scale_fcn(avg(DEim, axis=0), scale_factor)

        rms = lambda x: np.sqrt(np.sum(x**2, axis=0) / x.shape[0])
        Rf_deshifted_rms = scale_fcn(rms(DEim), scale_factor)

        # Generate vector field:
        vfaxt, vfbxt, vfaxr, vfbxr = generate_vector_field(
            poly_model,
            pmodel,
            axr=np.arange(-_VF_MAXR, _VF_MAXR + _VF_DELTAR, _VF_DELTAR),
            bxr=np.arange(-_VF_MAXR, _VF_MAXR + _VF_DELTAR, _VF_DELTAR),
            limit_grid_radius=_VF_MAXR,
            color=0)
        vfaxt, vfbxt, vfaxr, vfbxr = generate_vector_field(
            poly_model,
            pmodel,
            axr=np.arange(-_VF_MAXR, _VF_MAXR + _VF_DELTAR, _VF_DELTAR),
            bxr=np.arange(-_VF_MAXR, _VF_MAXR + _VF_DELTAR, _VF_DELTAR),
            limit_grid_radius=_VF_MAXR,
            color=0)

        # Calculate ab-shift vectors of samples and VF model predictions:
        vshift_ab_s = calculate_shiftvectors(Jabt_i,
                                             Jabr_i,
                                             average=False,
                                             vtype='ab')[:, 0, 0:3]
        vshift_ab_s_vf = calculate_shiftvectors(Jabtm,
                                                Jabrm,
                                                average=False,
                                                vtype='ab')

        # Calculate ab-shift vectors using vector field model:
        Jabt_vf = np.hstack((np.zeros((vfaxt.shape[0], 1)), vfaxt, vfbxt))
        Jabr_vf = np.hstack((np.zeros((vfaxr.shape[0], 1)), vfaxr, vfbxr))
        vshift_ab_vf = calculate_shiftvectors(Jabt_vf,
                                              Jabr_vf,
                                              average=False,
                                              vtype='ab')

        # Generate circle field:
        x, y = plotcircle(radii=np.arange(0, _VF_MAXR + _VF_DELTAR, 10),
                          angles=np.arange(0, 359, 1),
                          out='x,y')
        cfaxt, cfbxt, cfaxr, cfbxr = generate_vector_field(
            poly_model,
            pmodel,
            make_grid=False,
            axr=x[:, None],
            bxr=y[:, None],
            limit_grid_radius=_VF_MAXR,
            color=0)

        out[i] = {
            'Source': {
                'S': S,
                'cct': cct[i],
                'duv': duv[i]
            },
            'metrics': {
                'Rf': Rf[:, i],
                'Rt': Rf_deshifted,
                'Rt_rms': Rf_deshifted_rms,
                'Rfi': Rfi[:, i],
                'Rti': Rfi_deshifted,
                'cri_type': cri_type_str
            },
            'Jab': {
                'Jabt': Jabt_i,
                'Jabr': Jabr_i,
                'DEi': DEi
            },
            'dC/C_dH_x_sig':
            np.vstack((dCoverC_x, dCoverC_x_sig, dH_x, dH_x_sig)).T,
            'fielddata': {
                'vectorfield': {
                    'axt': vfaxt,
                    'bxt': vfbxt,
                    'axr': vfaxr,
                    'bxr': vfbxr
                },
                'circlefield': {
                    'axt': cfaxt,
                    'bxt': cfbxt,
                    'axr': cfaxr,
                    'bxr': cfbxr
                }
            },
            'modeldata': {
                'pmodel': pmodel,
                'pcolorshift': pcolorshift,
                'dab_model': dab_model,
                'dab_res': dab_res,
                'dab_std': dab_std,
                'model_type': model_type,
                'fmodel': poly_model,
                'Jabtm': Jabtm,
                'Jabrm': Jabrm,
                'DEim': DEim
            },
            'vshifts': {
                'Jabshiftvector_r_to_t': np.hstack(
                    (Jt - Jr, at - ar, bt - br)),
                'vshift_ab_s': vshift_ab_s,
                'vshift_ab_s_vf': vshift_ab_s_vf,
                'vshift_ab_vf': vshift_ab_vf
            }
        }

    return out
Exemplo n.º 10
0
def _transpose_02(C):
    if C.ndim == 3:
        C = np.transpose(C,(2,0,1))
    else:
        C = C[None,...]
    return C
Exemplo n.º 11
0
def cam15u(data,
           fov=10.0,
           inputtype='xyz',
           direction='forward',
           outin='Q,aW,bW',
           parameters=None):
    """
    Convert between CIE 2006 10°  XYZ tristimulus values (or spectral data) 
    and CAM15u color appearance correlates.
    
    Args:
        :data: 
            | ndarray of CIE 2006 10°  XYZ tristimulus values or spectral data
            |  or color appearance attributes
        :fov: 
            | 10.0, optional
            | Field-of-view of stimulus (for size effect on brightness)
        :inputtpe:
            | 'xyz' or 'spd', optional
            | Specifies the type of input: 
            |     tristimulus values or spectral data for the forward mode.
        :direction:
            | 'forward' or 'inverse', optional
            |   -'forward': xyz -> cam15u
            |   -'inverse': cam15u -> xyz 
        :outin:
            | 'Q,aW,bW' or str, optional
            | 'Q,aW,bW' (brightness and opponent signals for amount-of-neutral)
            |  other options: 'Q,aM,bM' (colorfulness) and 'Q,aS,bS' (saturation)
            | Str specifying the type of 
            |     input (:direction: == 'inverse') and 
            |     output (:direction: == 'forward')
        :parameters:
            | None or dict, optional
            | Set of model parameters.
            |   - None: defaults to luxpy.cam._CAM15U_PARAMETERS 
            |    (see references below)
    
    Returns:
        :returns: 
            | ndarray with color appearance correlates (:direction: == 'forward')
            |  or 
            | XYZ tristimulus values (:direction: == 'inverse')

    References: 
        1. `M. Withouck, K. A. G. Smet, W. R. Ryckaert, and P. Hanselaer, 
        “Experimental driven modelling of the color appearance of 
        unrelated self-luminous stimuli: CAM15u,” 
        Opt. Express, vol. 23, no. 9, pp. 12045–12064, 2015.
        <https://www.osapublishing.org/oe/abstract.cfm?uri=oe-23-9-12045&origin=search>`_
        2. `M. Withouck, K. A. G. Smet, and P. Hanselaer, (2015), 
        “Brightness prediction of different sized unrelated self-luminous stimuli,” 
        Opt. Express, vol. 23, no. 10, pp. 13455–13466. 
        <https://www.osapublishing.org/oe/abstract.cfm?uri=oe-23-10-13455&origin=search>`_  
     """

    if parameters is None:
        parameters = _CAM15U_PARAMETERS

    outin = outin.split(',')

    #unpack model parameters:
    Mxyz2rgb, cA, cAlms, cHK, cM, cW, ca, calms, cb, cblms, cfov, cp, k, unique_hue_data = [
        parameters[x] for x in sorted(parameters.keys())
    ]

    # precomputations:
    invMxyz2rgb = np.linalg.inv(Mxyz2rgb)
    MAab = np.array([cAlms, calms, cblms])
    invMAab = np.linalg.inv(MAab)

    #initialize data and camout:
    data = np2d(data)
    if len(data.shape) == 2:
        data = np.expand_dims(data, axis=0)  # avoid looping if not necessary

    if (data.shape[0] > data.shape[1]):  # loop over shortest dim.
        flipaxis0and1 = True
        data = np.transpose(data, axes=(1, 0, 2))
    else:
        flipaxis0and1 = False

    dshape = list(data.shape)
    dshape[-1] = len(outin)  # requested number of correlates
    if (inputtype != 'xyz') & (direction == 'forward'):
        dshape[-2] = dshape[
            -2] - 1  # wavelength row doesn't count & only with forward can the input data be spectral

    camout = np.zeros(dshape)
    camout.fill(np.nan)

    for i in range(data.shape[0]):

        if (inputtype != 'xyz') & (direction == 'forward'):
            xyz = spd_to_xyz(data[i], cieobs='2006_10', relative=False)
            lms = np.dot(_CMF['2006_10']['M'], xyz.T).T  # convert to l,m,s
            rgb = (lms /
                   _CMF['2006_10']['K']) * k  # convert to rho, gamma, beta
        elif (inputtype == 'xyz') & (direction == 'forward'):
            rgb = np.dot(Mxyz2rgb, data[i].T).T

        if direction == 'forward':

            # apply cube-root compression:
            rgbc = rgb**(cp)

            # calculate achromatic and color difference signals, A, a, b:
            Aab = np.dot(MAab, rgbc.T).T
            A, a, b = asplit(Aab)
            A = cA * A
            a = ca * a
            b = cb * b

            # calculate colorfullness like signal M:
            M = cM * ((a**2.0 + b**2.0)**0.5)

            # calculate brightness Q:
            Q = A + cHK[0] * M**cHK[
                1]  # last term is contribution of Helmholtz-Kohlrausch effect on brightness

            # calculate saturation, s:
            s = M / Q

            # calculate amount of white, W:
            W = 100.0 / (1.0 + cW[0] * (s**cW[1]))

            #  adjust Q for size (fov) of stimulus (matter of debate whether to do this before or after calculation of s or W, there was no data on s, M or W for different sized stimuli: after)
            Q = Q * (fov / 10.0)**cfov

            # calculate hue, h and Hue quadrature, H:
            h = hue_angle(a, b, htype='deg')

            if 'H' in outin:
                H = hue_quadrature(h, unique_hue_data=unique_hue_data)
            else:
                H = None

            # calculate cart. co.:
            if 'aM' in outin:
                aM = M * np.cos(h * np.pi / 180.0)
                bM = M * np.sin(h * np.pi / 180.0)

            if 'aS' in outin:
                aS = s * np.cos(h * np.pi / 180.0)
                bS = s * np.sin(h * np.pi / 180.0)

            if 'aW' in outin:
                aW = W * np.cos(h * np.pi / 180.0)
                bW = W * np.sin(h * np.pi / 180.0)

            if (outin != ['Q', 'aW', 'bW']):
                camout[i] = eval('ajoin((' + ','.join(outin) + '))')
            else:
                camout[i] = ajoin((Q, aW, bW))

        elif direction == 'inverse':

            # get Q, M and a, b depending on input type:
            if 'aW' in outin:
                Q, a, b = asplit(data[i])
                Q = Q / (
                    (fov / 10.0)**cfov
                )  #adjust Q for size (fov) of stimulus back to that 10° ref
                W = (a**2.0 + b**2.0)**0.5
                s = (((100 / W) - 1.0) / cW[0])**(1.0 / cW[1])
                M = s * Q

            if 'aM' in outin:
                Q, a, b = asplit(data[i])
                Q = Q / (
                    (fov / 10.0)**cfov
                )  #adjust Q for size (fov) of stimulus back to that 10° ref
                M = (a**2.0 + b**2.0)**0.5

            if 'aS' in outin:
                Q, a, b = asplit(data[i])
                Q = Q / (
                    (fov / 10.0)**cfov
                )  #adjust Q for size (fov) of stimulus back to that 10° ref
                s = (a**2.0 + b**2.0)**0.5
                M = s * Q

            if 'h' in outin:
                Q, WsM, h = asplit(data[i])
                Q = Q / (
                    (fov / 10.0)**cfov
                )  #adjust Q for size (fov) of stimulus back to that 10° ref
                if 'W' in outin:
                    s = (((100.0 / WsM) - 1.0) / cW[0])**(1.0 / cW[1])
                    M = s * Q
                elif 's' in outin:
                    M = WsM * Q
                elif 'M' in outin:
                    M = WsM

            # calculate achromatic signal, A from Q and M:
            A = Q - cHK[0] * M**cHK[1]
            A = A / cA

            # calculate hue angle:
            h = hue_angle(a, b, htype='rad')

            # calculate a,b from M and h:
            a = (M / cM) * np.cos(h)
            b = (M / cM) * np.sin(h)
            a = a / ca
            b = b / cb

            # create Aab:
            Aab = ajoin((A, a, b))

            # calculate rgbc:
            rgbc = np.dot(invMAab, Aab.T).T

            # decompress rgbc to rgb:
            rgb = rgbc**(1 / cp)

            # convert rgb to xyz:
            xyz = np.dot(invMxyz2rgb, rgb.T).T

            camout[i] = xyz

    if flipaxis0and1 == True:  # loop over shortest dim.
        camout = np.transpose(camout, axes=(1, 0, 2))

    if camout.shape[0] == 1:
        camout = np.squeeze(camout, axis=0)

    return camout
Exemplo n.º 12
0
def cam_sww16(data, dataw = None, Yb = 20.0, Lw = 400.0, Ccwb = None, relative = True, \
              parameters = None, inputtype = 'xyz', direction = 'forward', \
              cieobs = '2006_10'):
    """
    A simple principled color appearance model based on a mapping 
    of the Munsell color system.
    
    | This function implements the JOSA A (parameters = 'JOSA') published model. 
    
    Args:
        :data: 
            | ndarray with input tristimulus values 
            | or spectral data 
            | or input color appearance correlates
            | Can be of shape: (N [, xM], x 3), whereby: 
            | N refers to samples and M refers to light sources.
            | Note that for spectral input shape is (N x (M+1) x wl) 
        :dataw: 
            | None or ndarray, optional
            | Input tristimulus values or spectral data of white point.
            | None defaults to the use of CIE illuminant C.
        :Yb: 
            | 20.0, optional
            | Luminance factor of background (perfect white diffuser, Yw = 100)
        :Lw:
            | 400.0, optional
            | Luminance (cd/m²) of white point.
        :Ccwb:
            | None,  optional
            | Degree of cognitive adaptation (white point balancing)
            | If None: use [..,..] from parameters dict.
        :relative:
            | True or False, optional
            | True: xyz tristimulus values are relative (Yw = 100)
        :parameters:
            | None or str or dict, optional
            | Dict with model parameters.
            |    - None: defaults to luxpy.cam._CAM_SWW_2016_PARAMETERS['JOSA']
            |    - str: 'best-fit-JOSA' or 'best-fit-all-Munsell'
            |    - dict: user defined model parameters 
            |            (dict should have same structure)
        :inputtype:
            | 'xyz' or 'spd', optional
            | Specifies the type of input: 
            |     tristimulus values or spectral data for the forward mode.
        :direction:
            | 'forward' or 'inverse', optional
            |   -'forward': xyz -> cam_sww_2016
            |   -'inverse': cam_sww_2016 -> xyz 
        :cieobs:
            | '2006_10', optional
            | CMF set to use to perform calculations where spectral data 
              is involved (inputtype == 'spd'; dataw = None)
            | Other options: see luxpy._CMF['types']
    
    Returns:
        :returns: 
            | ndarray with color appearance correlates (:direction: == 'forward')
            |  or 
            | XYZ tristimulus values (:direction: == 'inverse')
    
    Notes:
        | This function implements the JOSA A (parameters = 'JOSA') 
          published model. 
        | With:
        |    1. A correction for the parameter 
        |         in Eq.4 of Fig. 11: 0.952 --> -0.952 
        |         
        |     2. The delta_ac and delta_bc white-balance shifts in Eq. 5e & 5f 
        |         should be: -0.028 & 0.821 
        |  
        |     (cfr. Ccwb = 0.66 in: 
        |         ab_test_out = ab_test_int - Ccwb*ab_gray_adaptation_field_int))
             
    References:
        1. `Smet, K. A. G., Webster, M. A., & Whitehead, L. A. (2016). 
        A simple principled approach for modeling and understanding uniform color metrics. 
        Journal of the Optical Society of America A, 33(3), A319–A331. 
        <https://doi.org/10.1364/JOSAA.33.00A319>`_

    """

    # get model parameters
    args = locals().copy()
    if parameters is None:
        parameters = _CAM_SWW16_PARAMETERS['JOSA']
    if isinstance(parameters, str):
        parameters = _CAM_SWW16_PARAMETERS[parameters]
    parameters = put_args_in_db(
        parameters,
        args)  #overwrite parameters with other (not-None) args input

    #unpack model parameters:
    Cc, Ccwb, Cf, Mxyz2lms, cLMS, cab_int, cab_out, calpha, cbeta, cga1, cga2, cgb1, cgb2, cl_int, clambda, lms0 = [
        parameters[x] for x in sorted(parameters.keys())
    ]

    # setup default adaptation field:
    if (dataw is None):
        dataw = _CIE_ILLUMINANTS['C'].copy()  # get illuminant C
        xyzw = spd_to_xyz(dataw, cieobs=cieobs,
                          relative=False)  # get abs. tristimulus values
        if relative == False:  #input is expected to be absolute
            dataw[1:] = Lw * dataw[
                1:] / xyzw[:, 1:2]  #dataw = Lw*dataw # make absolute
        else:
            dataw = dataw  # make relative (Y=100)
        if inputtype == 'xyz':
            dataw = spd_to_xyz(dataw, cieobs=cieobs, relative=relative)

    # precomputations:
    Mxyz2lms = np.dot(
        np.diag(cLMS),
        math.normalize_3x3_matrix(Mxyz2lms, np.array([[1, 1, 1]]))
    )  # normalize matrix for xyz-> lms conversion to ill. E weighted with cLMS
    invMxyz2lms = np.linalg.inv(Mxyz2lms)
    MAab = np.array([clambda, calpha, cbeta])
    invMAab = np.linalg.inv(MAab)

    #initialize data and camout:
    data = np2d(data).copy(
    )  # stimulus data (can be upto NxMx3 for xyz, or [N x (M+1) x wl] for spd))
    dataw = np2d(dataw).copy(
    )  # white point (can be upto Nx3 for xyz, or [(N+1) x wl] for spd)

    # make axis 1 of dataw have 'same' dimensions as data:
    if (data.ndim == 2):
        data = np.expand_dims(data, axis=1)  # add light source axis 1

    if inputtype == 'xyz':
        if dataw.shape[
                0] == 1:  #make dataw have same lights source dimension size as data
            dataw = np.repeat(dataw, data.shape[1], axis=0)
    else:
        if dataw.shape[0] == 2:
            dataw = np.vstack(
                (dataw[0], np.repeat(dataw[1:], data.shape[1], axis=0)))

    # Flip light source dim to axis 0:
    data = np.transpose(data, axes=(1, 0, 2))

    # Initialize output array:
    dshape = list(data.shape)
    dshape[-1] = 3  # requested number of correlates: l_int, a_int, b_int
    if (inputtype != 'xyz') & (direction == 'forward'):
        dshape[-2] = dshape[
            -2] - 1  # wavelength row doesn't count & only with forward can the input data be spectral
    camout = np.zeros(dshape)
    camout.fill(np.nan)

    # apply forward/inverse model for each row in data:
    for i in range(data.shape[0]):

        # stage 1: calculate photon rates of stimulus and adapting field, lmst & lmsf:
        if (inputtype != 'xyz'):
            if relative == True:
                xyzw_abs = spd_to_xyz(np.vstack((dataw[0], dataw[i + 1])),
                                      cieobs=cieobs,
                                      relative=False)
                dataw[i +
                      1] = Lw * dataw[i + 1] / xyzw_abs[0, 1]  # make absolute
            xyzw = spd_to_xyz(np.vstack((dataw[0], dataw[i + 1])),
                              cieobs=cieobs,
                              relative=False)
            lmsw = 683.0 * np.dot(Mxyz2lms, xyzw.T).T / _CMF[cieobs]['K']
            lmsf = (Yb / 100.0
                    ) * lmsw  # calculate adaptation field and convert to l,m,s
            if (direction == 'forward'):
                if relative == True:
                    data[i, 1:, :] = Lw * data[i, 1:, :] / xyzw_abs[
                        0, 1]  # make absolute
                xyzt = spd_to_xyz(data[i], cieobs=cieobs,
                                  relative=False) / _CMF[cieobs]['K']
                lmst = 683.0 * np.dot(Mxyz2lms, xyzt.T).T  # convert to l,m,s
            else:
                lmst = lmsf  # put lmsf in lmst for inverse-mode

        elif (inputtype == 'xyz'):
            if relative == True:
                dataw[i] = Lw * dataw[i] / 100.0  # make absolute
            lmsw = 683.0 * np.dot(
                Mxyz2lms, dataw[i].T).T / _CMF[cieobs]['K']  # convert to lms
            lmsf = (Yb / 100.0) * lmsw
            if (direction == 'forward'):
                if relative == True:
                    data[i] = Lw * data[i] / 100.0  # make absolute
                lmst = 683.0 * np.dot(
                    Mxyz2lms,
                    data[i].T).T / _CMF[cieobs]['K']  # convert to lms
            else:
                lmst = lmsf  # put lmsf in lmst for inverse-mode

        # stage 2: calculate cone outputs of stimulus lmstp
        lmstp = math.erf(Cc * (np.log(lmst / lms0) + Cf * np.log(lmsf / lms0)))
        lmsfp = math.erf(Cc * (np.log(lmsf / lms0) + Cf * np.log(lmsf / lms0)))
        lmstp = np.vstack(
            (lmsfp, lmstp)
        )  # add adaptation field lms temporarily to lmsp for quick calculation

        # stage 3: calculate optic nerve signals, lam*, alphp, betp:
        lstar, alph, bet = asplit(np.dot(MAab, lmstp.T).T)

        alphp = cga1[0] * alph
        alphp[alph < 0] = cga1[1] * alph[alph < 0]
        betp = cgb1[0] * bet
        betp[bet < 0] = cgb1[1] * bet[bet < 0]

        # stage 4: calculate recoded nerve signals, alphapp, betapp:
        alphpp = cga2[0] * (alphp + betp)
        betpp = cgb2[0] * (alphp - betp)

        # stage 5: calculate conscious color perception:
        lstar_int = cl_int[0] * (lstar + cl_int[1])
        alph_int = cab_int[0] * (np.cos(cab_int[1] * np.pi / 180.0) * alphpp -
                                 np.sin(cab_int[1] * np.pi / 180.0) * betpp)
        bet_int = cab_int[0] * (np.sin(cab_int[1] * np.pi / 180.0) * alphpp +
                                np.cos(cab_int[1] * np.pi / 180.0) * betpp)
        lstar_out = lstar_int

        if direction == 'forward':
            if Ccwb is None:
                alph_out = alph_int - cab_out[0]
                bet_out = bet_int - cab_out[1]
            else:
                Ccwb = Ccwb * np.ones((2))
                Ccwb[Ccwb < 0.0] = 0.0
                Ccwb[Ccwb > 1.0] = 1.0
                alph_out = alph_int - Ccwb[0] * alph_int[
                    0]  # white balance shift using adaptation gray background (Yb=20%), with Ccw: degree of adaptation
                bet_out = bet_int - Ccwb[1] * bet_int[0]

            camout[i] = np.vstack(
                (lstar_out[1:], alph_out[1:], bet_out[1:])
            ).T  # stack together and remove adaptation field from vertical stack
        elif direction == 'inverse':
            labf_int = np.hstack((lstar_int[0], alph_int[0], bet_int[0]))

            # get lstar_out, alph_out & bet_out for data:
            lstar_out, alph_out, bet_out = asplit(data[i])

            # stage 5 inverse:
            # undo cortical white-balance:
            if Ccwb is None:
                alph_int = alph_out + cab_out[0]
                bet_int = bet_out + cab_out[1]
            else:
                Ccwb = Ccwb * np.ones((2))
                Ccwb[Ccwb < 0.0] = 0.0
                Ccwb[Ccwb > 1.0] = 1.0
                alph_int = alph_out + Ccwb[0] * alph_int[
                    0]  #  inverse white balance shift using adaptation gray background (Yb=20%), with Ccw: degree of adaptation
                bet_int = bet_out + Ccwb[1] * bet_int[0]

            lstar_int = lstar_out
            alphpp = (1.0 / cab_int[0]) * (
                np.cos(-cab_int[1] * np.pi / 180.0) * alph_int -
                np.sin(-cab_int[1] * np.pi / 180.0) * bet_int)
            betpp = (1.0 / cab_int[0]) * (
                np.sin(-cab_int[1] * np.pi / 180.0) * alph_int +
                np.cos(-cab_int[1] * np.pi / 180.0) * bet_int)
            lstar_int = lstar_out
            lstar = (lstar_int / cl_int[0]) - cl_int[1]

            # stage 4 inverse:
            alphp = 0.5 * (alphpp / cga2[0] + betpp / cgb2[0]
                           )  # <-- alphpp = (Cga2.*(alphp+betp));
            betp = 0.5 * (alphpp / cga2[0] - betpp / cgb2[0]
                          )  # <-- betpp = (Cgb2.*(alphp-betp));

            # stage 3 invers:
            alph = alphp / cga1[0]
            bet = betp / cgb1[0]
            sa = np.sign(cga1[1])
            sb = np.sign(cgb1[1])
            alph[(sa * alphp) < 0.0] = alphp[(sa * alphp) < 0] / cga1[1]
            bet[(sb * betp) < 0.0] = betp[(sb * betp) < 0] / cgb1[1]
            lab = ajoin((lstar, alph, bet))

            # stage 2 inverse:
            lmstp = np.dot(invMAab, lab.T).T
            lmstp[lmstp < -1.0] = -1.0
            lmstp[lmstp > 1.0] = 1.0

            lmstp = math.erfinv(lmstp) / Cc - Cf * np.log(lmsf / lms0)
            lmst = np.exp(lmstp) * lms0

            # stage 1 inverse:
            xyzt = np.dot(invMxyz2lms, lmst.T).T

            if relative == True:
                xyzt = (100.0 / Lw) * xyzt

            camout[i] = xyzt

#    if flipaxis0and1 == True: # loop over shortest dim.
#        camout = np.transpose(camout, axes = (1,0,2))

# Flip light source dim back to axis 1:
    camout = np.transpose(camout, axes=(1, 0, 2))

    if camout.shape[0] == 1:
        camout = np.squeeze(camout, axis=0)

    return camout
Exemplo n.º 13
0
                                 kind='spd')[1:], C[1:, :] * 2, C[1:, :] * 3))
    M = _MUNSELL.copy()
    rflM = M['R']
    rflM = cie_interp(rflM, C[0], kind='rfl')
    cieobs = '2006_10'
    Lw = 400
    Yb = 20

    # Normalize to Lw:
    xyzw2 = spd_to_xyz(C, cieobs=cieobs, relative=False)
    for i in range(C.shape[0] - 1):
        C[i + 1] = Lw * C[i + 1] / xyzw2[i, 1]
    CM = []
    for i in range(C.shape[0] - 1):
        CM.append(np.vstack((C[0], C[i + 1] * rflM[1:, :])))
    CM = np.transpose(np.array(CM), (1, 0, 2))

    xyz, xyzw = spd_to_xyz(C, cieobs=cieobs, relative=True, rfl=rflM, out=2)
    xyz = xyz[:4, 0, :]
    CM = CM[:5, 0, :]
    #xyzw = np.vstack((xyzw[:1,:],xyzw[:1,:]))
    xyzw = xyzw[:1, ...]
    C = C[:2, :]

    print('xyz in:')
    lab = cam_sww16(xyz, dataw = xyzw, Yb = Yb, Lw = Lw, Ccwb = 1, relative = True, \
              parameters = None, inputtype = 'xyz', direction = 'forward', \
              cieobs = cieobs)
    print(lab)

    print('spd in:')
Exemplo n.º 14
0
def spd_to_xyz(data,
               relative=True,
               rfl=None,
               cieobs=_CIEOBS,
               K=None,
               out=None,
               cie_std_dev_obs=None):
    """
    Calculates xyz tristimulus values from spectral data.
       
    Args: 
        :data: 
            | ndarray or pandas.dataframe with spectral data
            | (.shape = (number of spectra + 1, number of wavelengths))
            | Note that :data: is never interpolated, only CMFs and RFLs. 
            | This way interpolation errors due to peaky spectra are avoided. 
            | Conform CIE15-2018.
        :relative: 
            | True or False, optional
            | Calculate relative XYZ (Yw = 100) or absolute XYZ (Y = Luminance)
        :rfl: 
            | ndarray with spectral reflectance functions.
            | Will be interpolated if wavelengths do not match those of :data:
        :cieobs:
            | luxpy._CIEOBS or str, optional
            | Determines the color matching functions to be used in the 
            | calculation of XYZ.
        :K: 
            | None, optional
            |   e.g.  K  = 683 lm/W for '1931_2' (relative == False) 
            |   or K = 100/sum(spd*dl)        (relative == True)
        :out:
            | None or 1 or 2, optional
            | Determines number and shape of output. (see :returns:)
        :cie_std_dev_obs: 
            | None or str, optional
            | - None: don't use CIE Standard Deviate Observer function.
            | - 'f1': use F1 function.
    
    Returns:
        :returns:
            | If rfl is None:
            |    If out is None: ndarray of xyz values 
            |        (.shape = (data.shape[0],3))
            |    If out == 1: ndarray of xyz values 
            |        (.shape = (data.shape[0],3))
            |    If out == 2: (ndarray of xyz, ndarray of xyzw) values
            |        Note that xyz == xyzw, with (.shape = (data.shape[0],3))
            | If rfl is not None:
            |   If out is None: ndarray of xyz values 
            |         (.shape = (rfl.shape[0],data.shape[0],3))
            |   If out == 1: ndarray of xyz values 
            |       (.shape = (rfl.shape[0]+1,data.shape[0],3))
            |        The xyzw values of the light source spd are the first set 
            |        of values of the first dimension. The following values 
            |       along this dimension are the sample (rfl) xyz values.
            |    If out == 2: (ndarray of xyz, ndarray of xyzw) values
            |        with xyz.shape = (rfl.shape[0],data.shape[0],3)
            |        and with xyzw.shape = (data.shape[0],3)
             
    References:
        1. `CIE15:2018, “Colorimetry,” CIE, Vienna, Austria, 2018. <https://doi.org/10.25039/TR.015.2018>`_
    """

    data = getdata(data,
                   kind='np') if isinstance(data, pd.DataFrame) else np2d(
                       data)  # convert to np format and ensure 2D-array

    # get wl spacing:
    dl = getwld(data[0])

    # get cmf,k for cieobs:
    if isinstance(cieobs, str):
        if K is None: K = _CMF[cieobs]['K']
        scr = 'dict'
    else:
        scr = 'cieobs'
        if (K is None) & (relative == False): K = 1

    # Interpolate to wl of data:
    cmf = xyzbar(cieobs=cieobs, scr=scr, wl_new=data[0], kind='np')

    # Add CIE standard deviate observer function to cmf if requested:
    if cie_std_dev_obs is not None:
        cmf_cie_std_dev_obs = xyzbar(cieobs='cie_std_dev_obs_' +
                                     cie_std_dev_obs.lower(),
                                     scr=scr,
                                     wl_new=data[0],
                                     kind='np')
        cmf[1:] = cmf[1:] + cmf_cie_std_dev_obs[1:]

    # Rescale xyz using k or 100/Yw:
    if relative == True: K = 100.0 / np.dot(data[1:], cmf[2, :] * dl)

    # Interpolate rfls to lambda range of spd and calculate xyz:
    if rfl is not None:
        rfl = cie_interp(data=np2d(rfl), wl_new=data[0], kind='rfl')
        rfl = np.concatenate((np.ones((1, data.shape[1])),
                              rfl[1:]))  #add rfl = 1 for light source spectrum
        xyz = K * np.array(
            [np.dot(rfl, (data[1:] * cmf[i + 1, :] * dl).T)
             for i in range(3)])  #calculate tristimulus values
        rflwasnotnone = 1
    else:
        rfl = np.ones((1, data.shape[1]))
        xyz = (K * (np.dot((cmf[1:] * dl), data[1:].T))[:, None, :])
        rflwasnotnone = 0
    xyz = np.transpose(xyz, [1, 2, 0])  #order [rfl,spd,xyz]

    # Setup output:
    if out == 2:
        xyzw = xyz[0, ...]
        xyz = xyz[rflwasnotnone:, ...]
        if rflwasnotnone == 0: xyz = np.squeeze(xyz, axis=0)
        return xyz, xyzw
    elif out == 1:
        if rflwasnotnone == 0: xyz = np.squeeze(xyz, axis=0)
        return xyz
    else:
        xyz = xyz[rflwasnotnone:, ...]
        if rflwasnotnone == 0: xyz = np.squeeze(xyz, axis=0)
        return xyz
Exemplo n.º 15
0
def cam18sl(data,
            datab=None,
            Lb=[100],
            fov=10.0,
            inputtype='xyz',
            direction='forward',
            outin='Q,aS,bS',
            parameters=None):
    """
    Convert between CIE 2006 10°  XYZ tristimulus values (or spectral data) 
    and CAM18sl color appearance correlates.
    
    Args:
        :data: 
            | ndarray of CIE 2006 10°  absolute XYZ tristimulus values or spectral data
            |  or color appearance attributes of stimulus
        :datab: 
            | ndarray of CIE 2006 10°  absolute XYZ tristimulus values or spectral data
            |  of stimulus background
        :Lb: 
            | [100], optional
            | Luminance (cd/m²) value(s) of background(s) calculated using the CIE 2006 10° CMFs 
            | (only used in case datab == None and the background is assumed to be an Equal-Energy-White)
        :fov: 
            | 10.0, optional
            | Field-of-view of stimulus (for size effect on brightness)
        :inputtpe:
            | 'xyz' or 'spd', optional
            | Specifies the type of input: 
            |     tristimulus values or spectral data for the forward mode.
        :direction:
            | 'forward' or 'inverse', optional
            |   -'forward': xyz -> cam18sl
            |   -'inverse': cam18sl -> xyz 
        :outin:
            | 'Q,aS,bS' or str, optional
            | 'Q,aS,bS' (brightness and opponent signals for saturation)
            |  other options: 'Q,aM,bM' (colorfulness) 
            |                 (Note that 'Q,aW,bW' would lead to a Cartesian 
            |                  a,b-coordinate system centered at (1,0))
            | Str specifying the type of 
            |     input (:direction: == 'inverse') and 
            |     output (:direction: == 'forward')
        :parameters:
            | None or dict, optional
            | Set of model parameters.
            |   - None: defaults to luxpy.cam._CAM18SL_PARAMETERS 
            |    (see references below)
    
    Returns:
        :returns: 
            | ndarray with color appearance correlates (:direction: == 'forward')
            |  or 
            | XYZ tristimulus values (:direction: == 'inverse')
            
    Notes:
        | * Instead of using the CIE 1964 10° CMFs in some places of the model,
        |   the CIE 2006 10° CMFs are used througout, making it more self_consistent.
        |   This has an effect on the k scaling factors (now different those in CAM15u) 
        |   and the illuminant E normalization for use in the chromatic adaptation transform.
        |   (see future erratum to Hermans et al., 2018)
        | * The paper also used an equation for the amount of white W, which is
        |   based on a Q value not expressed in 'bright' ('cA' = 0.937 instead of 123). 
        |   This has been corrected for in the luxpy version of the model, i.e.
        |   _CAM18SL_PARAMETERS['cW'][0] has been changed from 2.29 to 1/11672.
        |   (see future erratum to Hermans et al., 2018)
        | * Default output was 'Q,aW,bW' prior to March 2020, but since this
        |   is an a,b Cartesian system centered on (1,0), the default output
        |   has been changed to 'Q,aS,bS'.

    References: 
        1. `Hermans, S., Smet, K. A. G., & Hanselaer, P. (2018). 
        "Color appearance model for self-luminous stimuli."
        Journal of the Optical Society of America A, 35(12), 2000–2009. 
        <https://doi.org/10.1364/JOSAA.35.002000>`_ 
     """

    if parameters is None:
        parameters = _CAM18SL_PARAMETERS

    outin = outin.split(',')

    #unpack model parameters:
    cA, cAlms, cHK, cM, cW, ca, calms, cb, cblms, cfov, cieobs, k, naka, unique_hue_data = [
        parameters[x] for x in sorted(parameters.keys())
    ]

    # precomputations:
    Mlms2xyz = np.linalg.inv(_CMF[cieobs]['M'])
    MAab = np.array([cAlms, calms, cblms])
    invMAab = np.linalg.inv(MAab)

    #-------------------------------------------------
    # setup EEW reference field and default background field (Lr should be equal to Lb):
    # Get Lb values:
    if datab is not None:
        if inputtype != 'xyz':
            Lb = spd_to_xyz(datab, cieobs=cieobs, relative=False)[..., 1:2]
        else:
            Lb = datab[..., 1:2]
    else:
        if isinstance(Lb, list):
            Lb = np2dT(Lb)

    # Setup EEW ref of same luminance as datab:
    if inputtype == 'xyz':
        wlr = getwlr(_CAM18SL_WL3)
    else:
        if datab is None:
            wlr = data[0]  # use wlr of stimulus data
        else:
            wlr = datab[0]  # use wlr of background data
    datar = np.vstack((wlr, np.ones(
        (Lb.shape[0], wlr.shape[0]))))  # create eew
    xyzr = spd_to_xyz(datar, cieobs=cieobs,
                      relative=False)  # get abs. tristimulus values
    datar[1:] = datar[1:] / xyzr[..., 1:2] * Lb

    # Create datab if None:
    if (datab is None):
        if inputtype != 'xyz':
            datab = datar.copy()
        else:
            datab = spd_to_xyz(datar, cieobs=cieobs, relative=False)

    # prepare data and datab for loop over backgrounds:
    # make axis 1 of datab have 'same' dimensions as data:
    if (data.ndim == 2):
        data = np.expand_dims(data, axis=1)  # add light source axis 1

    if inputtype == 'xyz':
        datar = spd_to_xyz(datar, cieobs=cieobs,
                           relative=False)  # convert to xyz!!
        if datab.shape[
                0] == 1:  #make datab and datar have same lights source dimension (used to store different backgrounds) size as data
            datab = np.repeat(datab, data.shape[1], axis=0)
            datar = np.repeat(datar, data.shape[1], axis=0)
    else:
        if datab.shape[0] == 2:
            datab = np.vstack(
                (datab[0], np.repeat(datab[1:], data.shape[1], axis=0)))
        if datar.shape[0] == 2:
            datar = np.vstack(
                (datar[0], np.repeat(datar[1:], data.shape[1], axis=0)))

    # Flip light source/ background dim to axis 0:
    data = np.transpose(data, axes=(1, 0, 2))

    #-------------------------------------------------

    #initialize camout:
    dshape = list(data.shape)
    dshape[-1] = len(outin)  # requested number of correlates
    if (inputtype != 'xyz') & (direction == 'forward'):
        dshape[-2] = dshape[
            -2] - 1  # wavelength row doesn't count & only with forward can the input data be spectral
    camout = np.zeros(dshape)
    camout.fill(np.nan)

    for i in range(data.shape[0]):

        # get rho, gamma, beta of background and reference white:
        if (inputtype != 'xyz'):
            xyzb = spd_to_xyz(np.vstack((datab[0], datab[i + 1:i + 2, :])),
                              cieobs=cieobs,
                              relative=False)
            xyzr = spd_to_xyz(np.vstack((datar[0], datar[i + 1:i + 2, :])),
                              cieobs=cieobs,
                              relative=False)
        else:
            xyzb = datab[i:i + 1, :]
            xyzr = datar[i:i + 1, :]

        lmsb = np.dot(_CMF[cieobs]['M'], xyzb.T).T  # convert to l,m,s
        rgbb = (lmsb / _CMF[cieobs]['K']) * k  # convert to rho, gamma, beta
        #lmsr = np.dot(_CMF[cieobs]['M'],xyzr.T).T # convert to l,m,s
        #rgbr = (lmsr / _CMF[cieobs]['K']) * k # convert to rho, gamma, beta
        #rgbr = rgbr/rgbr[...,1:2]*Lb[i] # calculated EEW cone excitations at same luminance values as background
        rgbr = np.ones(xyzr.shape) * Lb[
            i]  # explicitely equal EEW cone excitations at same luminance values as background

        if direction == 'forward':
            # get rho, gamma, beta of stimulus:
            if (inputtype != 'xyz'):
                xyz = spd_to_xyz(data[i], cieobs=cieobs, relative=False)
            elif (inputtype == 'xyz'):
                xyz = data[i]
            lms = np.dot(_CMF[cieobs]['M'], xyz.T).T  # convert to l,m,s
            rgb = (lms / _CMF[cieobs]['K']) * k  # convert to rho, gamma, beta

            # apply von-kries cat with D = 1:
            if (rgbb == 0).any():
                Mcat = np.eye(3)
            else:
                Mcat = np.diag((rgbr / rgbb)[0])
            rgba = np.dot(Mcat, rgb.T).T

            # apply naka-rushton compression:
            rgbc = naka_rushton(rgba,
                                n=naka['n'],
                                sig=naka['sig'](rgbr.mean()),
                                noise=naka['noise'],
                                scaling=naka['scaling'])

            #rgbc = np.ones(rgbc.shape)*rgbc.mean() # test if eew ends up at origin

            # calculate achromatic and color difference signals, A, a, b:
            Aab = np.dot(MAab, rgbc.T).T
            A, a, b = asplit(Aab)
            a = ca * a
            b = cb * b

            # calculate colorfullness like signal M:
            M = cM * ((a**2.0 + b**2.0)**0.5)

            # calculate brightness Q:
            Q = cA * (
                A + cHK[0] * M**cHK[1]
            )  # last term is contribution of Helmholtz-Kohlrausch effect on brightness

            # calculate saturation, s:
            s = M / Q
            S = s  # make extra variable, jsut in case 'S' is called

            # calculate amount of white, W:
            W = 1 / (1.0 + cW[0] * (s**cW[1]))

            #  adjust Q for size (fov) of stimulus (matter of debate whether to do this before or after calculation of s or W, there was no data on s, M or W for different sized stimuli: after)
            Q = Q * (fov / 10.0)**cfov

            # calculate hue, h and Hue quadrature, H:
            h = hue_angle(a, b, htype='deg')
            if 'H' in outin:
                H = hue_quadrature(h, unique_hue_data=unique_hue_data)
            else:
                H = None

            # calculate cart. co.:
            if 'aM' in outin:
                aM = M * np.cos(h * np.pi / 180.0)
                bM = M * np.sin(h * np.pi / 180.0)

            if 'aS' in outin:
                aS = s * np.cos(h * np.pi / 180.0)
                bS = s * np.sin(h * np.pi / 180.0)

            if 'aW' in outin:
                aW = W * np.cos(h * np.pi / 180.0)
                bW = W * np.sin(h * np.pi / 180.0)

            if (outin != ['Q', 'as', 'bs']):
                camout[i] = eval('ajoin((' + ','.join(outin) + '))')
            else:
                camout[i] = ajoin((Q, aS, bS))

        elif direction == 'inverse':

            # get Q, M and a, b depending on input type:
            if 'aW' in outin:
                Q, a, b = asplit(data[i])
                Q = Q / (
                    (fov / 10.0)**cfov
                )  #adjust Q for size (fov) of stimulus back to that 10° ref
                W = (a**2.0 + b**2.0)**0.5
                s = (((1.0 / W) - 1.0) / cW[0])**(1.0 / cW[1])
                M = s * Q

            if 'aM' in outin:
                Q, a, b = asplit(data[i])
                Q = Q / (
                    (fov / 10.0)**cfov
                )  #adjust Q for size (fov) of stimulus back to that 10° ref
                M = (a**2.0 + b**2.0)**0.5

            if 'aS' in outin:
                Q, a, b = asplit(data[i])
                Q = Q / (
                    (fov / 10.0)**cfov
                )  #adjust Q for size (fov) of stimulus back to that 10° ref
                s = (a**2.0 + b**2.0)**0.5
                M = s * Q

            if 'h' in outin:
                Q, WsM, h = asplit(data[i])
                Q = Q / (
                    (fov / 10.0)**cfov
                )  #adjust Q for size (fov) of stimulus back to that 10° ref
                if 'W' in outin:
                    s = (((1.0 / WsM) - 1.0) / cW[0])**(1.0 / cW[1])
                    M = s * Q
                elif 's' in outin:
                    M = WsM * Q
                elif 'M' in outin:
                    M = WsM

            # calculate achromatic signal, A from Q and M:
            A = Q / cA - cHK[0] * M**cHK[1]

            # calculate hue angle:
            h = hue_angle(a, b, htype='rad')

            # calculate a,b from M and h:
            a = (M / cM) * np.cos(h)
            b = (M / cM) * np.sin(h)

            a = a / ca
            b = b / cb

            # create Aab:
            Aab = ajoin((A, a, b))

            # calculate rgbc:
            rgbc = np.dot(invMAab, Aab.T).T

            # decompress rgbc to (adapted) rgba :
            rgba = naka_rushton(rgbc,
                                n=naka['n'],
                                sig=naka['sig'](rgbr.mean()),
                                noise=naka['noise'],
                                scaling=naka['scaling'],
                                direction='inverse')

            # apply inverse von-kries cat with D = 1:
            rgb = np.dot(np.diag((rgbb / rgbr)[0]), rgba.T).T

            # convert rgb to lms to xyz:
            lms = rgb / k * _CMF[cieobs]['K']
            xyz = np.dot(Mlms2xyz, lms.T).T

            camout[i] = xyz

    camout = np.transpose(camout, axes=(1, 0, 2))

    if camout.shape[1] == 1:
        camout = np.squeeze(camout, axis=1)

    return camout
Exemplo n.º 16
0
def spd_to_ies_tm30_metrics(St, cri_type = None, \
                            hbins = 16, start_hue = 0.0,\
                            scalef = 100, \
                            vf_model_type = _VF_MODEL_TYPE, \
                            vf_pcolorshift = _VF_PCOLORSHIFT,\
                            scale_vf_chroma_to_sample_chroma = False):
    """
    Calculates IES TM30 metrics from spectral data.      
      
      Args:
        :St:
            | numpy.ndarray with spectral data 
        :cri_type:
            | None, optional
            | If None: defaults to cri_type = 'iesrf'.
            | Not none values of :hbins:, :start_hue: and :scalef: overwrite 
            | input in cri_type['rg_pars'] 
        :hbins:
            | None or numpy.ndarray with sorted hue bin centers (°), optional
        :start_hue: 
            | None, optional
        :scalef:
            | None, optional
            | Scale factor for reference circle.
        :vf_pcolorshift:
            | _VF_PCOLORSHIFT or user defined dict, optional
            | The polynomial models of degree 5 and 6 can be fully specified or 
            | summarized by the model parameters themselved OR by calculating the
            | dCoverC and dH at resp. 5 and 6 hues. :VF_pcolorshift: specifies 
            | these hues and chroma level.
        :scale_vf_chroma_to_sample_chroma: 
            | False, optional
            | Scale chroma of reference and test vf fields such that average of 
            | binned reference chroma equals that of the binned sample chroma
            | before calculating hue bin metrics.
            
    Returns:
        :data: 
            | Dictionary with color rendering data:
            | 
            | - 'St, Sr'  : ndarray of test SPDs and corresponding ref. illuminants.
            | - 'xyz_cct': xyz of white point calculate with cieobs defined for cct calculations in cri_type['cieobs']
            | - 'cct, duv': CCT and Duv obtained with cieobs in cri_type['cieobs']['cct']
            | - 'xyzti, xyzri': ndarray tristimulus values of test and ref. samples (obtained with with cieobs in cri_type['cieobs']['xyz'])
            | - 'xyztw, xyzrw': ndarray tristimulus values of test and ref. white points (obtained with with cieobs in cri_type['cieobs']['xyz'])
            | - 'DEi, DEa': ndarray with individual sample color differences DEi and average DEa between test and ref.       
            | - 'Rf'  : ndarray with general color fidelity index values
            | - 'Rg'  : ndarray with color gamut area index values
            | - 'Rfi'  : ndarray with specific (sample) color fidelity indices
            | - 'Rfhj' : ndarray with local (hue binned) fidelity indices
            | - 'DEhj' : ndarray with local (hue binned) color differences
            | - 'Rcshj': ndarray with local chroma shifts indices
            | - 'Rhshj': ndarray with local hue shifts indices
            | - 'hue_bin_data': dict with output from _get_hue_bin_data() [see its help for more info]
            | - 'cri_type': same as input (for reference purposes)
            | - 'vf' : dictionary with vector field measures and data.
            |         Keys:
            |           - 'Rt'  : ndarray with general metameric uncertainty index Rt
            |           - 'Rti' : ndarray with specific metameric uncertainty indices Rti
            |           - 'Rfhj' : ndarray with local (hue binned) fidelity indices 
            |                            obtained from VF model predictions at color space
            |                            pixel coordinates
            |           - 'DEhj' : ndarray with local (hue binned) color differences
            |                           (same as above)
            |           - 'Rcshj': ndarray with local chroma shifts indices for vectorfield coordinates
            |                           (same as above)
            |           - 'Rhshj': ndarray with local hue shifts indicesfor vectorfield coordinates
            |                           (same as above)
            |           - 'Rfi': ndarray with sample fidelity indices for vectorfield coordinates
            |                           (same as above)
            |           - 'DEi': ndarray with sample color differences for vectorfield coordinates
            |                           (same as above)
            |           - 'hue_bin_data': dict with output from _get_hue_bin_data() for vectorfield coordinates
            |           - 'dataVF': dictionary with output of cri.VFPX.VF_colorshift_model()
    """
    if cri_type is None:
        cri_type = 'iesrf'

    if isinstance(cri_type,str): # get dict 
        cri_type = copy.deepcopy(_CRI_DEFAULTS[cri_type])
    if hbins is not None:
        cri_type['rg_pars']['nhbins'] = hbins 
    if start_hue is not None:
        cri_type['rg_pars']['start_hue'] = start_hue
    if scalef is not None:
        cri_type['rg_pars']['normalized_chroma_ref'] = scalef
    
    #Calculate color rendering measures for SPDs in St:      
    data,_ = spd_to_cri(St, cri_type = cri_type, out = 'data,hue_bin_data', 
                        fit_gamut_ellipse = True)
    hdata = data['hue_bin_data']
    Rfhj, Rcshj, Rhshj = data['Rfhj'], data['Rcshj'], data['Rhshj']
    cct = data['cct']
    
    #Calculate Metameric uncertainty and base color shifts:
    dataVF = VF_colorshift_model(St, cri_type = cri_type, 
                                 model_type = vf_model_type, 
                                 cspace = cri_type['cspace'], 
                                 sampleset = eval(cri_type['sampleset']), 
                                 pool = False, 
                                 pcolorshift = vf_pcolorshift, 
                                 vfcolor = 0)
    Rf_ = np.array([dataVF[i]['metrics']['Rf'] for i in range(len(dataVF))]).T
    Rt = np.array([dataVF[i]['metrics']['Rt'] for i in range(len(dataVF))]).T
    Rti = np.array([dataVF[i]['metrics']['Rti'] for i in range(len(dataVF))][0])
    _data_vf = {'Rt' : Rt, 'Rti' : Rti, 'Rf_' : Rf_} # add to dict for output

    # Get normalized and sliced hue-bin _hj data for plotting:
    rg_pars = cri_type['rg_pars']
    nhbins, normalize_gamut, normalized_chroma_ref, start_hue = [rg_pars[x] for x in sorted(rg_pars.keys())]
    
    # Get chroma of samples:    
    if scale_vf_chroma_to_sample_chroma == True:
        jabt_hj_closed, jabr_hj_closed = hdata['jabt_hj_closed'], hdata['jabr_hj_closed']
        Cr_hj_s = (np.sqrt(jabr_hj_closed[:-1,...,1]**2 + jabr_hj_closed[:-1,...,2]**2)).mean(axis=0) # for rescaling vector field average reference chroma

    #jabtn_hj_closed, jabrn_hj_closed = hdata['jabtn_hj_closed'], hdata['jabrn_hj_closed']
    
    # get vector field data for each source (must be on 2nd dim)
    jabt_vf = np.transpose(np.array([np.hstack((np.ones(dataVF[i]['fielddata']['vectorfield']['axt'].shape),dataVF[i]['fielddata']['vectorfield']['axt'],dataVF[i]['fielddata']['vectorfield']['bxt'])) for i in range(cct.shape[0])]),(1,0,2))
    jabr_vf = np.transpose(np.array([np.hstack((np.ones(dataVF[i]['fielddata']['vectorfield']['axr'].shape),dataVF[i]['fielddata']['vectorfield']['axr'],dataVF[i]['fielddata']['vectorfield']['bxr'])) for i in range(cct.shape[0])]),(1,0,2))
    
    # Get hue bin data for vector field data:
    hue_bin_data_vf = _get_hue_bin_data(jabt_vf, jabr_vf, 
                                        start_hue = start_hue, nhbins = nhbins,
                                        normalized_chroma_ref = normalized_chroma_ref )
    
    # Rescale chroma of vector field such that it is on average equal to that of the binned samples:
    if scale_vf_chroma_to_sample_chroma == True:
        Cr_vf_hj, Cr_vf, Ct_vf = hue_bin_data_vf['Cr_hj'], hue_bin_data_vf['Cr'], hue_bin_data_vf['Ct']
        hr_vf, ht_vf = hue_bin_data_vf['hr'], hue_bin_data_vf['ht']
        fC = np.nanmean(Cr_hj_s)/np.nanmean(Cr_vf_hj)
        jabr_vf[...,1], jabr_vf[...,2] = fC * Cr_vf*np.cos(hr_vf), fC * Cr_vf*np.sin(hr_vf)
        jabt_vf[...,1], jabt_vf[...,2] = fC * Ct_vf*np.cos(ht_vf), fC * Ct_vf*np.sin(ht_vf)
        
        # Get new hue bin data for rescaled vector field data:
        hue_bin_data_vf = _get_hue_bin_data(jabt_vf, jabr_vf, 
                                            start_hue = start_hue, nhbins = nhbins,
                                            normalized_chroma_ref = normalized_chroma_ref )
    
    # Get scale factor and scaling function for Rfx:
    scale_factor = cri_type['scale']['cfactor']
    scale_fcn = cri_type['scale']['fcn']

    # Calculate Local color fidelity, chroma and hue shifts for vector field data:
    (Rcshj_vf, Rhshj_vf,
     Rfhj_vf, DEhj_vf) = _hue_bin_data_to_rxhj(hue_bin_data_vf, 
                                               cri_type = cri_type,
                                               scale_factor = scale_factor,
                                               scale_fcn = scale_fcn) 
                                               
    # Get sample color fidelity for vector field data:
    (Rfi_vf, DEi_vf) = _hue_bin_data_to_rfi(hue_bin_data_vf, 
                                            cri_type = cri_type,
                                            scale_factor = scale_factor,
                                            scale_fcn = scale_fcn)
    # Store in dict:
    _data_vf.update({'Rfi' : Rfi_vf, 'DEi' : DEi_vf,
                     'Rcshj' : Rcshj_vf, 'Rhshj' : Rhshj_vf,
                     'Rfhj' : Rfhj_vf, 'DEhj': DEhj_vf,
                     'dataVF' : dataVF, 'hue_bin_data' : hue_bin_data_vf})
    
    # Add to main dictionary:
    data['vf'] = _data_vf
    return data