예제 #1
0
def parse_x1x2_parameters(x,
                          target_shape,
                          catmode,
                          expand_2d_to_3d=None,
                          default=[1.0, 1.0]):
    """
   Parse input parameters x and make them the target_shape for easy calculation. 
   
   | Input in main function can now be a single value valid for all xyzw or 
     an array with a different value for each xyzw.
   
   Args:
        :x: 
            | list[float, float] or ndarray
        :target_shape: 
            | tuple with shape information
        :catmode: 
            | '1>0>2, optional
            |    -'1>0>2': Two-step CAT 
            |      from illuminant 1 to baseline illuminant 0 to illuminant 2.
            |    -'1>0': One-step CAT 
            |      from illuminant 1 to baseline illuminant 0.
            |    -'0>2': One-step CAT 
            |      from baseline illuminant 0 to illuminant 2. 
        :expand_2d_to_3d: 
            | None, optional 
            | [will be removed in future, serves no purpose]
            | Expand :x: from 2 to 3 dimensions.
        :default:
            | [1.0,1.0], optional
            | Default values for :x:
    
   Returns:
       :returns: 
           | (ndarray, ndarray) for x10 and x20

   """
    if x is None:
        x10 = np.ones(target_shape) * default[0]
        if (catmode == '1>0>2') | (catmode == '1>2'):
            x20 = np.ones(target_shape) * default[1]
        else:
            x20 = np.zeros(target_shape)
            x20.fill(np.nan)
    else:
        x = np2d(x)
        if (catmode == '1>0>2') | (catmode == '1>2'):
            if x.shape[-1] == 2:
                x10 = np.ones(target_shape) * x[..., 0]
                x20 = np.ones(target_shape) * x[..., 1]
            else:
                x10 = np.ones(target_shape) * x
                x20 = x10.copy()
        elif catmode == '1>0':
            x10 = np.ones(target_shape) * x[..., 0]
            x20 = np.zeros(target_shape)
            x20.fill(np.nan)
    return x10, x20
예제 #2
0
def plot(v,
         origin=None,
         ax=None,
         color='k',
         marker='.',
         linestyle='-',
         **kwargs):
    """
    Plot a vector from origin.
    
    Args:
        :v:
            | vec3 vector.
        :origin:
            | vec3 vector with same size attributes as in :v:.
        :ax: 
            | None, optional
            | axes handle.
            | If None, create new figure with axes ax.
        :color:
            | 'k', optional
            | color specifier.
        :marker:
            | '.', optional
            | marker specifier.
        :linestyle:
            | '-', optional
            | linestyle specifier
        :**kwargs:
            | other keyword specifiers for plot.
          
    Returns:
        :ax:
            | handle to figure axes.          
    """
    if ax is None:
        fig = plt.figure()
        ax = fig.add_subplot(111, projection='3d')
    if origin is None:
        origin = vec3(np.zeros(v.x.shape), np.zeros(v.x.shape),
                      np.zeros(v.x.shape))
    ax.plot(np.hstack([origin.x, v.x]),
            np.hstack([origin.y, v.y]),
            np.hstack([origin.z, v.z]),
            color=color,
            marker=marker,
            **kwargs)
    ax.set_xlabel('x')
    ax.set_ylabel('y')
    ax.set_zlabel('z')
    return ax
예제 #3
0
def cik_to_v(cik, xyc = None, inverse = False):
    """
    Calculate v-format ellipse descriptor from 2x2 'covariance matrix'^-1 cik 
    
    Args:
        :cik: 
            | 'Nx2x2' (covariance matrix)^-1
        :inverse:
            | If True: input is inverse of cik.
              
            
    Returns:
        :v: 
            | (Nx5) np.ndarray
            | ellipse parameters [Rmax,Rmin,xc,yc,theta]

    Notes:
        | cik is not actually the inverse covariance matrix,
        | only for a Gaussian or normal distribution!

    """
    if cik.ndim < 3:
        cik = cik[None,...]
    
    if inverse == True:
        for i in range(cik.shape[0]):
            cik[i,:,:] = np.linalg.inv(cik[i,:,:])
            
    g11 = cik[:,0,0]
    g22 = cik[:,1,1] 
    g12 = cik[:,0,1]

    theta = 0.5*np.arctan2(2*g12,(g11-g22)) + (np.pi/2)*(g12<0)
    #theta = theta2 + (np.pi/2)*(g12<0)
    #theta2 = theta
    cottheta = np.cos(theta)/np.sin(theta) #np.cot(theta)
    cottheta[np.isinf(cottheta)] = 0

    a = 1/np.sqrt((g22 + g12*cottheta))
    b = 1/np.sqrt((g11 - g12*cottheta))

    # ensure largest ellipse axis is first (correct angle):
    c = b>a; a[c], b[c], theta[c] = b[c],a[c],theta[c]+np.pi/2

    v = np.vstack((a, b, np.zeros(a.shape), np.zeros(a.shape), theta)).T
    
    # add center coordinates:
    if xyc is not None:
        v[:,2:4] = xyc
    
    return v
예제 #4
0
def _cri_ref(ccts,
             wl3=_WL,
             ref_type='iestm30',
             mix_range=[4000, 5000],
             cieobs='1931_2',
             force_daylight_below4000K=False,
             n=None,
             daylight_locus=None,
             wl=[360, 830, 1]):
    """
    Calculates multiple reference illuminant spectra based on ccts 
    for color rendering index calculations.
    """
    if mix_range is None:
        mix_range = _CRI_REF_TYPES[ref_type]
    if isinstance(ccts, float): ccts = [ccts]
    wlr = getwlr(wl3)
    Srs = np.zeros((len(ccts) + 1, len(wlr)))
    Srs[0] = wlr
    for i, cct in enumerate(ccts):
        Srs[i + 1, :] = _cri_ref_i(
            cct,
            wl3=wl3,
            ref_type=ref_type,
            mix_range=mix_range,
            cieobs=cieobs,
            force_daylight_below4000K=force_daylight_below4000K,
            n=n,
            daylight_locus=daylight_locus)[1:]

    return Srs
예제 #5
0
def v_to_cik(v, inverse = False):
    """
    Calculate 2x2 '(covariance matrix)^-1' elements cik 
    
    Args:
        :v: 
            | (Nx5) np.ndarray
            | ellipse parameters [Rmax,Rmin,xc,yc,theta]
        :inverse:
            | If True: return inverse of cik.
    
    Returns:
        :cik: 
            | 'Nx2x2' (covariance matrix)^-1
    
    Notes:
        | cik is not actually a covariance matrix,
        | only for a Gaussian or normal distribution!

    """
    v = np.atleast_2d(v)
    g11 = (1/v[:,0]*np.cos(v[:,4]))**2 + (1/v[:,1]*np.sin(v[:,4]))**2
    g22 = (1/v[:,0]*np.sin(v[:,4]))**2 + (1/v[:,1]*np.cos(v[:,4]))**2
    g12 = (1/v[:,0]**2 - 1/v[:,1]**2)*np.sin(v[:,4])*np.cos(v[:,4])
    cik = np.zeros((g11.shape[0],2,2))

    for i in range(g11.shape[0]):
        cik[i,:,:] = np.vstack((np.hstack((g11[i],g12[i])), np.hstack((g12[i],g22[i]))))
        if inverse == True:
            cik[i,:,:] = np.linalg.inv(cik[i,:,:])
    return cik
예제 #6
0
def _complete_ldt_lid(LDT, Isym=4):
    """
    Convert LDT LID map with Isym symmetry to a 'full' map with phi: [0,360] and theta: [0,180].
    """
    cangles = LDT['h_angs']
    tangles = LDT['v_angs']
    candela_2d = LDT['candela_2d']
    if Isym == 4:
        # complete cangles:
        a = candela_2d.copy().T
        b = np.hstack((a, a[:, (a.shape[1] - 2)::-1]))
        c = np.hstack((b, b[:, (b.shape[1] - 2):0:-1]))
        candela_2d_0C360 = np.hstack((c, c[:, :1]))
        cangles = np.hstack(
            (cangles, cangles[1:] + 90, cangles[1:] + 180, cangles[1:] + 270))
        # complete  tangles:
        a = candela_2d_0C360.copy()
        b = np.vstack((a, np.zeros(a.shape)[1:, :]))
        tangles = np.hstack((tangles, tangles[1:] + 90))
        candela_2d = b
    elif Isym == -4:
        # complete cangles:
        a = candela_2d.copy().T
        b = np.hstack((a, a[:, (a.shape[1] - 2)::-1]))
        c = np.hstack((b, b[:, (b.shape[1] - 2):0:-1]))
        candela_2d_0C360 = np.hstack((c, c[:, :1]))
        cangles = np.hstack(
            (cangles, -cangles[(cangles.shape[0] - 2)::-1] + 180))
        cangles = np.hstack(
            (cangles, -cangles[(cangles.shape[0] - 2):0:-1] + 360))
        cangles = np.hstack((cangles, cangles[:1]))
        # complete  tangles:
        a = candela_2d_0C360.copy()
        b = np.vstack((a, np.zeros(a.shape)[1:, :]))
        tangles = np.hstack(
            (tangles, -tangles[(tangles.shape[0] - 2)::-1] + 180))
        candela_2d = b
    else:
        raise Exception(
            'complete_ldt_lid(): Other "Isym" than "4", not yet implemented (31/10/2018).'
        )

    LDT['map'] = {'thetas': tangles}
    LDT['map']['phis'] = cangles
    LDT['map']['values'] = candela_2d.T
    return LDT
예제 #7
0
파일: plotters.py 프로젝트: simongr2/luxpy
def plot_rgb_color_patches(rgb,
                           patch_shape=(100, 100),
                           patch_layout=None,
                           ax=None,
                           show=True):
    """
    Create (and plot) an image with patches with specified rgb values.
    
    Args:
        :rgb:
            | ndarray with rgb values for each of the patches
        :patch_shape:
            | (100,100), optional
            | shape of each of the patches in the image
        :patch_layout:
            | None, optional
            | If None: layout is calculated automatically to give a 'good' aspect ratio
        :ax:
            | None, optional
            | Axes to plot the image in. If None: a new axes is created.
        :show:
            | True, optional
            | If True: plot image in axes and return axes handle; else: return ndarray with image.
            
    Return:
        :ax: or :imagae: 
            | Axes is returned if show == True, else: ndarray with rgb image is returned.
    """
    if ax is None:
        fig, ax = plt.subplots(1, 1)

    if patch_layout is None:
        patch_layout = get_subplot_layout(rgb.shape[0])

    image = np.zeros(
        np.hstack((np.array(patch_shape) * np.array(patch_layout), 3)))
    for i in range(rgb.shape[0]):
        r, c = np.unravel_index(i, patch_layout)
        R = int(r * patch_shape[0])
        C = int(c * patch_shape[1])
        image[R:R + patch_shape[0],
              C:C + patch_shape[1], :] = np.ones(np.hstack(
                  (patch_shape, 3))) * rgb[i, None, :]

    if show == False:
        return image
    else:
        ax.imshow(image.astype('uint8'))
        ax.axis('off')
        return ax
예제 #8
0
파일: demo_opt.py 프로젝트: simongr2/luxpy
def ndset(F):
    """
    Finds the nondominated set of a set of objective points.

    Args:
      :F: 
          | a m x mu ndarray with mu points and m objectives

   Returns:
      :ispar: 
          | a mu-length vector with true in the nondominated points
    """
    mu = F.shape[1] #number of points

    # The idea is to compare each point with the other ones
    f1 = np.transpose(F[...,None], axes = [0, 2, 1]) #puts in the 3D direction
    f1 = np.repeat(f1,mu,axis=1)
    f2 = np.repeat(F[...,None],mu,axis=2)

    # Now, for the ii-th slice, the ii-th individual is compared with all of the
    # others at once. Then, the usual operations of domination are checked
    # Checks where f1 dominates f2
    aux1 = (f1 <= f2).all(axis = 0, keepdims = True)
    aux2 = (f1 < f2).any(axis = 0, keepdims = True)

    auxf1 = np.logical_and(aux1, aux2)
    # Checks where f1 is dominated by f2
    aux1 = (f1 >= f2).all(axis = 0, keepdims = True)
    aux2 = (f1 > f2).any(axis = 0, keepdims = True)
    auxf2 = np.logical_and(aux1, aux2)
    
    # dom will be a 3D matrix (1 x mu x mu) such that, for the ii-th slice, it
    # will contain +1 if fii dominates the current point, -1 if it is dominated 
    # by it, and 0 if they are incomparable
    dom = np.zeros((1, mu, mu), dtype = int)

    dom[auxf1] = 1
    dom[auxf2] = -1
    
    # Finally, the slices with no -1 are nondominated
    ispar = (dom != -1).all(axis = 1)
    ispar = ispar.flatten()
    return ispar
예제 #9
0
def xtransform(x, params):
    """
    Converts unconstrained variables into their original domains.
    """

    xtrans = np.zeros((params['n']))

    # k allows some variables to be fixed, thus dropped from the optimization.
    k = 0
    for i in range(params['n']):
        if params['BoundClass'][i] == 1:
            # lower bound only
            xtrans[i] = params['LB'][i] + x[k]**2

        elif params['BoundClass'][i] == 2:
            # upper bound only
            xtrans[i] = params['UB'][i] - x[k]**2

        elif params['BoundClass'][i] == 3:
            # lower and upper bounds
            xtrans[i] = (np.sin(x[k]) + 1) / 2
            xtrans[i] = xtrans[i] * (params['UB'][i] -
                                     params['LB'][i]) + params['LB'][i]

            # just in case of any floating point problems
            xtrans[i] = np.hstack(
                (params['LB'][i], np.hstack(
                    (params['UB'][i], xtrans[i])).min())).max()

        elif params['BoundClass'][i] == 4:
            # fixed variable, bounds are equal, set it at either bound
            xtrans[i] = params['LB'][i]

        elif params['BoundClass'][i] == 0:
            # unconstrained variable.
            xtrans[i] = x[k]

        if params['BoundClass'][i] != 4:
            k += 1

    return xtrans
예제 #10
0
def rgb_to_spec_smits(rgb, intent='rfl', bitdepth=8, wlr=_WL3, rgb2spec=None):
    """
    Convert an array of RGB values to a spectrum using a Smits like conversion as implemented in Mitsuba.
    
    Args:
        :rgb: 
            | ndarray of list of rgb values
        :intent:
            | 'rfl' (or 'spd'), optional
            | type of requested spectrum conversion .
        :bitdepth:
            | 8, optional
            | bit depth of rgb values
        :wlr: 
            | _WL3, optional
            | desired wavelength (nm) range of spectrum.
        :rgb2spec:
            | None, optional
            | Dict with base spectra for white, cyan, magenta, yellow, blue, green and red for each intent.
            | If None: use _BASESPEC_SMITS.
        
    Returns:
        :spec: 
            | ndarray with spectrum or spectra (one for each rgb value, first row are the wavelengths)
    """
    if isinstance(rgb, list):
        rgb = np.atleast_2d(rgb)
    if rgb.max() > 1:
        rgb = rgb / (2**bitdepth - 1)
    if rgb2spec is None:
        rgb2spec = _BASESPEC_SMITS
    if not np.array_equal(rgb2spec['wlr'], getwlr(wlr)):
        rgb2spec = _convert_to_wlr(entries=copy.deepcopy(rgb2spec), wlr=wlr)
    spec = np.zeros((rgb.shape[0], rgb2spec['wlr'].shape[0]))
    for i in range(rgb.shape[0]):
        spec[i, :] = _fromLinearRGB(rgb[i, :],
                                    intent=intent,
                                    rgb2spec=rgb2spec,
                                    wlr=wlr)
    return np.vstack((rgb2spec['wlr'], spec))
예제 #11
0
파일: demo_opt.py 프로젝트: simongr2/luxpy
def dtlz_range_(fname, M):
    """
    Returns the decision range of a DTLZ function
    
    | The range is simply [0,1] for all variables. What varies is the number 
    | of decision variables in each problem. The equation for that is
    | n = (M-1) + k
    | wherein k = 5 for DTLZ1, 10 for DTLZ2-6, and 20 for DTLZ7.
    
    Args:
        :fname: 
            | a string with the name of the function ('dtlz1', 'dtlz2' etc.)
        :M: 
            | a scalar with the number of objectives
    
       Returns:
          :lim: 
              | a n x 2 matrix wherein the first column is the lower limit 
              |(0), and the second column, the upper limit of search (1)
    """
     #Checks if the string has or not the prefix 'dtlz', or if the number later
     #is greater than 7:
    fname = fname.lower()
    if (len(fname) < 5) or (fname[:4] != 'dtlz') or (float(fname[4]) > 7) :
       raise Exception('Sorry, the function {:s} is not implemented.'.format(fname))


    # If the name is o.k., defines the value of k
    if fname ==  'dtlz1':
       k = 5
    elif fname == 'dtlz7':
       k = 20
    else: #any other function
       k = 10;

    
    n = (M-1) + k #number of decision variables
    
    lim = np.hstack((np.zeros((n,1)), np.ones((n,1))))
    return lim
예제 #12
0
def _fromLinearRGB(rgb, intent='rfl', rgb2spec=_BASESPEC_SMITS, wlr=_WL3):

    r, g, b = rgb
    result = np.zeros((rgb2spec['wlr'].shape[0], ))

    if (r <= g) & (r <= b):
        # Compute reflectance spectrum with 'r' as minimum
        result += r * rgb2spec[intent]['white']
        if (g <= b):
            result += (g - r) * rgb2spec[intent]['cyan']
            result += (b - g) * rgb2spec[intent]['blue']
        else:
            result += (b - r) * rgb2spec[intent]['cyan']
            result += (g - b) * rgb2spec[intent]['green']

    elif (g <= r) & (g <= b):
        # Compute reflectance spectrum with 'g' as minimum
        result += g * rgb2spec[intent]['white']
        if (r <= b):
            result += (r - g) * rgb2spec[intent]['magenta']
            result += (b - r) * rgb2spec[intent]['blue']
        else:
            result += (b - g) * rgb2spec[intent]['magenta']
            result += (r - b) * rgb2spec[intent]['red']

    else:
        # Compute reflectance spectrum with 'b' as minimum
        result += b * rgb2spec[intent]['white']
        if (r <= g):
            result += (r - b) * rgb2spec[intent]['yellow']
            result += (g - r) * rgb2spec[intent]['green']
        else:
            result += (g - b) * rgb2spec[intent]['yellow']
            result += (r - g) * rgb2spec[intent]['red']
    result *= rgb2spec[intent]['scalefactor']

    return np.clip(result, 0, None)  # no negative values allowed
예제 #13
0
def apply(data, n_step = 2, catmode = None, cattype = 'vonkries', xyzw1 = None, xyzw2 = None, xyzw0 = None,\
          D = None, mcat = [_MCAT_DEFAULT], normxyz0 = None, outtype = 'xyz', La = None, F = None, Dtype = None):
    """
    Calculate corresponding colors by applying a von Kries chromatic adaptation
    transform (CAT), i.e. independent rescaling of 'sensor sensitivity' to data
    to adapt from current adaptation conditions (1) to the new conditions (2).
    
    Args:
        :data: 
            | ndarray of tristimulus values (can be NxMx3)
        :n_step:
            | 2, optional
            | Number of step in CAT (1: 1-step, 2: 2-step)
        :catmode: 
            | None, optional
            |    - None: use :n_step: to set mode: 1 = '1>2', 2:'1>0>2'
            |    -'1>0>2': Two-step CAT 
            |      from illuminant 1 to baseline illuminant 0 to illuminant 2.
            |    -'1>2': One-step CAT
            |      from illuminant 1 to illuminant 2.
            |    -'1>0': One-step CAT 
            |      from illuminant 1 to baseline illuminant 0.
            |    -'0>2': One-step CAT 
            |      from baseline illuminant 0 to illuminant 2. 
        :cattype: 
            | 'vonkries' (others: 'rlab', see Farchild 1990), optional
        :xyzw1:
            | None, depending on :catmode: optional (can be Mx3)
        :xyzw2:
            | None, depending on :catmode: optional (can be Mx3)
        :xyzw0:
            | None, depending on :catmode: optional (can be Mx3)
        :D: 
            | None, optional
            | Degrees of adaptation. Defaults to [1.0, 1.0]. 
        :La: 
            | None, optional
            | Adapting luminances. 
            | If None: xyz values are absolute or relative.
            | If not None: xyz are relative. 
        :F: 
            | None, optional
            | Surround parameter(s) for CAT02/CAT16 calculations 
            |  (:Dtype: == 'cat02' or 'cat16')
            | Defaults to [1.0, 1.0]. 
        :Dtype:
            | None, optional
            | Type of degree of adaptation function from literature
            | See luxpy.cat.get_degree_of_adaptation()
        :mcat:
            | [_MCAT_DEFAULT], optional
            | List[str] or List[ndarray] of sensor space matrices for each 
            |  condition pair. If len(:mcat:) == 1, the same matrix is used.
        :normxyz0: 
            | None, optional
            | Set of xyz tristimulus values to normalize the sensor space matrix to.
        :outtype:
            | 'xyz' or 'lms', optional
            |   - 'xyz': return corresponding tristimulus values 
            |   - 'lms': return corresponding sensor space excitation values 
            |            (e.g. for further calculations) 
      
    Returns:
          :returns: 
              | ndarray with corresponding colors
        
    Reference:
        1. `Smet, K. A. G., & Ma, S. (2020). 
        Some concerns regarding the CAT16 chromatic adaptation transform. 
        Color Research & Application, 45(1), 172–177. 
        <https://doi.org/10.1002/col.22457>`_
    """

    if (xyzw1 is None) & (xyzw2 is None):
        return data  # do nothing

    else:
        # Set catmode:
        if catmode is None:
            if n_step == 2:
                catmode = '1>0>2'
            elif n_step == 1:
                catmode = '1>2'
            else:
                raise Exception(
                    'cat.apply(n_step = {:1.0f}, catmode = None): Unknown requested n-step CAT mode !'
                    .format(n_step))

        # Make data 2d:
        data = np2d(data)
        data_original_shape = data.shape
        if data.ndim < 3:
            target_shape = np.hstack((1, data.shape))
            data = data * np.ones(target_shape)
        else:
            target_shape = data.shape

        target_shape = data.shape

        # initialize xyzw0:
        if (xyzw0 is None):  # set to iLL.E
            xyzw0 = np2d([100.0, 100.0, 100.0])
        xyzw0 = np.ones(target_shape) * xyzw0
        La0 = xyzw0[..., 1, None]

        # Determine cat-type (1-step or 2-step) + make input same shape as data for block calculations:
        expansion_axis = np.abs(1 * (len(data_original_shape) == 2) - 1)
        if ((xyzw1 is not None) & (xyzw2 is not None)):
            xyzw1 = xyzw1 * np.ones(target_shape)
            xyzw2 = xyzw2 * np.ones(target_shape)
            default_La12 = [xyzw1[..., 1, None], xyzw2[..., 1, None]]

        elif (xyzw2 is None) & (xyzw1
                                is not None):  # apply one-step CAT: 1-->0
            catmode = '1>0'  #override catmode input
            xyzw1 = xyzw1 * np.ones(target_shape)
            default_La12 = [xyzw1[..., 1, None], La0]

        elif (xyzw1 is None) & (xyzw2 is not None):
            raise Exception(
                "von_kries(): cat transformation '0>2' not supported, use '1>0' !"
            )

        # Get or set La (La == None: xyz are absolute or relative, La != None: xyz are relative):
        target_shape_1 = tuple(np.hstack((target_shape[:-1], 1)))
        La1, La2 = parse_x1x2_parameters(La,
                                         target_shape=target_shape_1,
                                         catmode=catmode,
                                         expand_2d_to_3d=expansion_axis,
                                         default=default_La12)

        # Set degrees of adaptation, D10, D20:  (note D20 is degree of adaptation for 2-->0!!)
        D10, D20 = parse_x1x2_parameters(D,
                                         target_shape=target_shape_1,
                                         catmode=catmode,
                                         expand_2d_to_3d=expansion_axis)

        # Set F surround in case of Dtype == 'cat02':
        F1, F2 = parse_x1x2_parameters(F,
                                       target_shape=target_shape_1,
                                       catmode=catmode,
                                       expand_2d_to_3d=expansion_axis)

        # Make xyz relative to go to relative xyz0:
        if La is None:
            data = 100 * data / La1
            xyzw1 = 100 * xyzw1 / La1
            xyzw0 = 100 * xyzw0 / La0
            if (catmode == '1>0>2') | (catmode == '1>2'):
                xyzw2 = 100 * xyzw2 / La2

        # transform data (xyz) to sensor space (lms) and perform cat:
        xyzc = np.zeros(data.shape)
        xyzc.fill(np.nan)
        mcat = np.array(mcat)
        if (mcat.shape[0] != data.shape[1]) & (mcat.shape[0] == 1):
            mcat = np.repeat(mcat, data.shape[1], axis=0)
        elif (mcat.shape[0] != data.shape[1]) & (mcat.shape[0] > 1):
            raise Exception(
                'von_kries(): mcat.shape[0] > 1 and does not match data.shape[0]!'
            )

        for i in range(xyzc.shape[1]):
            # get cat sensor matrix:
            if mcat[i].dtype == np.float64:
                mcati = mcat[i]
            else:
                mcati = _MCATS[mcat[i]]

            # normalize sensor matrix:
            if normxyz0 is not None:
                mcati = math.normalize_3x3_matrix(mcati, xyz0=normxyz0)

            # convert from xyz to lms:
            lms = np.dot(mcati, data[:, i].T).T
            lmsw0 = np.dot(mcati, xyzw0[:, i].T).T
            if (catmode == '1>0>2') | (catmode == '1>0'):
                lmsw1 = np.dot(mcati, xyzw1[:, i].T).T
                Dpar1 = dict(D=D10[:, i],
                             F=F1[:, i],
                             La=La1[:, i],
                             La0=La0[:, i],
                             order='1>0')
                D10[:, i] = get_degree_of_adaptation(
                    Dtype=Dtype,
                    **Dpar1)  #get degree of adaptation depending on Dtype
                lmsw2 = None  # in case of '1>0'

            if (catmode == '1>0>2'):
                lmsw2 = np.dot(mcati, xyzw2[:, i].T).T
                Dpar2 = dict(D=D20[:, i],
                             F=F2[:, i],
                             La=La2[:, i],
                             La0=La0[:, i],
                             order='0>2')

                D20[:, i] = get_degree_of_adaptation(
                    Dtype=Dtype,
                    **Dpar2)  #get degree of adaptation depending on Dtype

            if (catmode == '1>2'):
                lmsw1 = np.dot(mcati, xyzw1[:, i].T).T
                lmsw2 = np.dot(mcati, xyzw2[:, i].T).T
                Dpar12 = dict(D=D10[:, i],
                              F=F1[:, i],
                              La=La1[:, i],
                              La2=La2[:, i],
                              order='1>2')
                D10[:, i] = get_degree_of_adaptation(
                    Dtype=Dtype,
                    **Dpar12)  #get degree of adaptation depending on Dtype

            # Determine transfer function Dt:
            Dt = get_transfer_function(cattype=cattype,
                                       catmode=catmode,
                                       lmsw1=lmsw1,
                                       lmsw2=lmsw2,
                                       lmsw0=lmsw0,
                                       D10=D10[:, i],
                                       D20=D20[:, i],
                                       La1=La1[:, i],
                                       La2=La2[:, i])

            # Perform cat:
            lms = np.dot(np.diagflat(Dt[0]), lms.T).T

            # Make xyz, lms 'absolute' again:
            if (catmode == '1>0>2'):
                lms = (La2[:, i] / La1[:, i]) * lms
            elif (catmode == '1>0'):
                lms = (La0[:, i] / La1[:, i]) * lms
            elif (catmode == '1>2'):
                lms = (La2[:, i] / La1[:, i]) * lms

            # transform back from sensor space to xyz (or not):
            if outtype == 'xyz':
                xyzci = np.dot(np.linalg.inv(mcati), lms.T).T
                xyzci[np.where(xyzci < 0)] = _EPS
                xyzc[:, i] = xyzci
            else:
                xyzc[:, i] = lms

        # return data to original shape:
        if len(data_original_shape) == 2:
            xyzc = xyzc[0]

        return xyzc
예제 #14
0
def mahalanobis2(x, y = None, z = None, mu = None, sigmainv = None):
    """
    Evaluate the squared mahalanobis distance
    
    Args: 
        :x: 
            | scalar or list or ndarray (.ndim = 1 or 2) with x(y)-coordinates 
              at which to evaluate the mahalanobis distance squared.
        :y: 
            | None or scalar or list or ndarray (.ndim = 1) with y-coordinates 
              at which to evaluate the mahalanobis distance squared, optional.
            | If :y: is None, :x: should be a 2d array.
        :z: 
            | None or scalar or list or ndarray (.ndim = 1) with z-coordinates 
              at which to evaluate the mahalanobis distance squared, optional.
            | If :z: is None & :y: is None, then :x: should be a 2d array.
        :mu: 
            | None or ndarray (.ndim = 1) with center coordinates of the 
              mahalanobis ellipse, optional. 
            | None defaults to zeros(2) or zeros(3).
        :sigmainv:
            | None or ndarray with 'inverse covariance matrix', optional 
            | Determines the shape and orientation of the PD.
            | None default to np.eye(2) or eye(3).
    Returns:
         :returns: 
             | ndarray with magnitude of mahalanobis2(x,y[,z])

    """
    if (y is None) & (z is None):
        p = x.shape[-1]
    elif (z is None):
        p = x.shape[-1] if (y is None) else 2
    elif (z is not None):
        p = 3 if (y is not None) else 2
    
    if mu is None:
        mu = np.zeros(p)
    if sigmainv is None:
        sigmainv = np.eye(p)
    
    x = np2d(x)
    mu = np2d(mu)

    if (y is None) & (z is None):
        x = x - mu
        if p == 2:
            x, y = asplit(x)
        elif p==3:
            x, y, z = asplit(x)
    elif (z is None):
        if y is None:
            x = x - mu
            x, y = asplit(x)
        else:
            x = x - mu[...,0] # center data on mu 
            y = np2d(y) - mu[...,1] # center data on mu 
    elif (z is not None):
        if (y is not None):
            x = x - mu[0] # center data on mu 
            y = np2d(y) - mu[...,1] # center data on mu 
            z = np2d(z) - mu[...,2] # center data on mu 
        else:
            x = x - mu[...,0] # center data on mu 
            y = np2d(z) - mu[...,1] # center data on mu 
            
    if p == 2:
        return (sigmainv[0,0] * (x**2.0) + sigmainv[1,1] * (y**2.0) + 2.0*sigmainv[0,1]*(x*y))
    else:
        return (sigmainv[0,0] * (x**2.0) + sigmainv[1,1] * (y**2.0) + 2.0*sigmainv[0,1]*(x*y) + 
                sigmainv[2,2] * (z**2.0) + 2.0*sigmainv[0,2]*(x*z) +  2.0*sigmainv[1,2]*(y*z))
예제 #15
0
def cie_interp(data,
               wl_new,
               kind=None,
               negative_values_allowed=False,
               extrap_values=None):
    """
    Interpolate / extrapolate spectral data following standard CIE15-2018.
    
    | The kind of interpolation depends on the spectrum type defined in :kind:. 
    | Extrapolation is always done by replicate the closest known values.
    
    Args:
        :data: 
            | ndarray with spectral data 
            | (.shape = (number of spectra + 1, number of original wavelengths))
        :wl_new: 
            | ndarray with new wavelengths
        :kind: 
            | None, optional
            |   - If :kind: is None, return original data.
            |   - If :kind: is a spectrum type (see _INTERP_TYPES), the correct 
            |     interpolation type if automatically chosen.
            |   - Or :kind: can be any interpolation type supported by 
            |     scipy.interpolate.interp1d (math.interp1d if nan's are present!!)
        :negative_values_allowed: 
            | False, optional
            | If False: negative values are clipped to zero.
        :extrap_values:
            | None, optional
            | If None: use CIE recommended 'closest value' approach when extrapolating.
            | If float or list or ndarray, use those values to fill extrapolated value(s).
            | If 'ext': use normal extrapolated values by scipy.interpolate.interp1d
    
    Returns:
        :returns: 
            | ndarray of interpolated spectral data.
            | (.shape = (number of spectra + 1, number of wavelength in wl_new))
    """
    if (kind is not None):
        # Wavelength definition:
        wl_new = getwlr(wl_new)

        if (not np.array_equal(data[0], wl_new)) | np.isnan(data).any():

            extrap_values = np.atleast_1d(extrap_values)

            # Set interpolation type based on data type:
            if kind in _INTERP_TYPES['linear']:
                kind = 'linear'
            elif kind in _INTERP_TYPES['cubic']:
                kind = 'cubic'

            # define wl, S, wl_new:
            wl = np.array(data[0])
            S = data[1:]
            wl_new = np.array(wl_new)

            # Interpolate each spectrum in S:
            N = S.shape[0]
            nan_indices = np.isnan(S)

            # Interpolate all (if not all rows have nan):
            rows_with_nans = np.where(nan_indices.sum(axis=1))[0]
            if not (rows_with_nans.size == N):
                #allrows_nans = False
                if extrap_values[0] is None:
                    fill_value = (0, 0)
                elif (((type(extrap_values[0]) == np.str_) |
                       (type(extrap_values[0]) == str))
                      and (extrap_values[0][:3] == 'ext')):
                    fill_value = 'extrapolate'
                else:
                    fill_value = (extrap_values[0], extrap_values[-1])
                Si = sp.interpolate.interp1d(wl,
                                             S,
                                             kind=kind,
                                             bounds_error=False,
                                             fill_value=fill_value)(wl_new)

                #extrapolate by replicating closest known (in source data!) value (conform CIE15-2004 recommendation)
                if extrap_values[0] is None:
                    Si[:, wl_new < wl[0]] = S[:, :1]
                    Si[:, wl_new > wl[-1]] = S[:, -1:]

            else:
                #allrows_nans = True
                Si = np.zeros([N, wl_new.shape[0]])
                Si.fill(np.nan)

            # Re-interpolate those which have none:
            if nan_indices.any():
                #looping required as some values are NaN's
                for i in rows_with_nans:

                    nonan_indices = np.logical_not(nan_indices[i])
                    wl_nonan = wl[nonan_indices]
                    S_i_nonan = S[i][nonan_indices]
                    Si_nonan = math.interp1(wl_nonan,
                                            S_i_nonan,
                                            wl_new,
                                            kind=kind,
                                            ext='extrapolate')
                    #                    Si_nonan = sp.interpolate.interp1d(wl_nonan, S_i_nonan, kind = kind, bounds_error = False, fill_value = 'extrapolate')(wl_new)

                    #extrapolate by replicating closest known (in source data!) value (conform CIE15-2004 recommendation)
                    if extrap_values[0] is None:
                        Si_nonan[wl_new < wl_nonan[0]] = S_i_nonan[0]
                        Si_nonan[wl_new > wl_nonan[-1]] = S_i_nonan[-1]
                    elif (((type(extrap_values[0]) == np.str_) |
                           (type(extrap_values[0]) == str))
                          and (extrap_values[0][:3] == 'ext')):
                        pass
                    else:
                        Si_nonan[wl_new < wl_nonan[0]] = extrap_values[0]
                        Si_nonan[wl_new > wl_nonan[-1]] = extrap_values[-1]

                    Si[i] = Si_nonan

            # No negative values allowed for spectra:
            if negative_values_allowed == False:
                if np.any(Si): Si[Si < 0.0] = 0.0

            # Add wavelengths to data array:
            return np.vstack((wl_new, Si))

    return data
예제 #16
0
파일: plotters.py 프로젝트: simongr2/luxpy
def plotellipse(v, cspace_in = 'Yxy', cspace_out = None, nsamples = 100, \
                show = True, axh = None, \
                line_color = 'darkgray', line_style = ':', line_width = 1, line_marker = '', line_markersize = 4,\
                plot_center = False, center_marker = 'o', center_color = 'darkgray', center_markersize = 4,\
                show_grid = False, llabel = '', label_fontname = 'Times New Roman', label_fontsize = 12,\
                out = None):
    """
    Plot ellipse(s) given in v-format [Rmax,Rmin,xc,yc,theta].
    
    Args:
        :v: 
            | (Nx5) ndarray
            | ellipse parameters [Rmax,Rmin,xc,yc,theta]
        :cspace_in:
            | 'Yxy', optional
            | Color space of v.
            | If None: no color space assumed. Axis labels assumed ('x','y').
        :cspace_out:
            | None, optional
            | Color space to plot ellipse(s) in.
            | If None: plot in cspace_in.
        :nsamples:
            | 100 or int, optional
            | Number of points (samples) in ellipse boundary
        :show:
            | True or boolean, optional
            | Plot ellipse(s) (True) or not (False)
        :axh: 
            | None, optional
            | Ax-handle to plot ellipse(s) in.
            | If None: create new figure with axes.
        :line_color:
            | 'darkgray', optional
            | Color to plot ellipse(s) in.
        :line_style:
            | ':', optional
            | Linestyle of ellipse(s).
        :line_width':
            | 1, optional
            | Width of ellipse boundary line.
        :line_marker:
            | 'none', optional
            | Marker for ellipse boundary.
        :line_markersize:
            | 4, optional
            | Size of markers in ellipse boundary.
        :plot_center:
            | False, optional
            | Plot center of ellipse: yes (True) or no (False)
        :center_color:
            | 'darkgray', optional
            | Color to plot ellipse center in.
        :center_marker:
            | 'o', optional
            | Marker for ellipse center.
        :center_markersize:
            | 4, optional
            | Size of marker of ellipse center.
        :show_grid:
            | False, optional
            | Show grid (True) or not (False)
        :llabel:
            | None,optional
            | Legend label for ellipse boundary.
        :label_fontname: 
            | 'Times New Roman', optional
            | Sets font type of axis labels.
        :label_fontsize:
            | 12, optional
            | Sets font size of axis labels.
        :out:
            | None, optional
            | Output of function
            | If None: returns None. Can be used to output axh of newly created
            |      figure axes or to return Yxys an ndarray with coordinates of 
            |       ellipse boundaries in cspace_out (shape = (nsamples,3,N)) 
            
        
    Returns:
        :returns: None, or whatever set by :out:.
    """
    Yxys = np.zeros((nsamples, 3, v.shape[0]))
    ellipse_vs = np.zeros((v.shape[0], 5))
    for i, vi in enumerate(v):

        # Set sample density of ellipse boundary:
        t = np.linspace(0, 2 * np.pi, int(nsamples))

        a = vi[0]  # major axis
        b = vi[1]  # minor axis
        xyc = vi[2:4, None]  # center
        theta = vi[-1]  # rotation angle

        # define rotation matrix:
        R = np.hstack((np.vstack((np.cos(theta), np.sin(theta))),
                       np.vstack((-np.sin(theta), np.cos(theta)))))

        # Calculate ellipses:
        Yxyc = np.vstack((1, xyc)).T
        Yxy = np.vstack(
            (np.ones((1, nsamples)),
             xyc + np.dot(R, np.vstack((a * np.cos(t), b * np.sin(t)))))).T
        Yxys[:, :, i] = Yxy

        # Convert to requested color space:
        if (cspace_out is not None) & (cspace_in is not None):
            Yxy = colortf(Yxy, cspace_in + '>' + cspace_out)
            Yxyc = colortf(Yxyc, cspace_in + '>' + cspace_out)
            Yxys[:, :, i] = Yxy

            # get ellipse parameters in requested color space:
            ellipse_vs[i, :] = math.fit_ellipse(Yxy[:, 1:])
            #de = np.sqrt((Yxy[:,1]-Yxyc[:,1])**2 + (Yxy[:,2]-Yxyc[:,2])**2)
            #ellipse_vs[i,:] = np.hstack((de.max(),de.min(),Yxyc[:,1],Yxyc[:,2],np.nan)) # nan because orientation is xy, but request is some other color space. Change later to actual angle when fitellipse() has been implemented

        # plot ellipses:
        if show == True:
            if (axh is None) & (i == 0):
                fig = plt.figure()
                axh = fig.add_subplot(111)

            if (cspace_in is None):
                xlabel = 'x'
                ylabel = 'y'
            else:
                xlabel = _CSPACE_AXES[cspace_in][1]
                ylabel = _CSPACE_AXES[cspace_in][2]

            if (cspace_out is not None):
                xlabel = _CSPACE_AXES[cspace_out][1]
                ylabel = _CSPACE_AXES[cspace_out][2]

            if plot_center == True:
                axh.plot(Yxyc[:, 1],
                         Yxyc[:, 2],
                         color=center_color,
                         linestyle='none',
                         marker=center_marker,
                         markersize=center_markersize)
            if llabel is None:
                axh.plot(Yxy[:, 1],
                         Yxy[:, 2],
                         color=line_color,
                         linestyle=line_style,
                         linewidth=line_width,
                         marker=line_marker,
                         markersize=line_markersize)
            else:
                axh.plot(Yxy[:, 1],
                         Yxy[:, 2],
                         color=line_color,
                         linestyle=line_style,
                         linewidth=line_width,
                         marker=line_marker,
                         markersize=line_markersize,
                         label=llabel)

            axh.set_xlabel(xlabel,
                           fontname=label_fontname,
                           fontsize=label_fontsize)
            axh.set_ylabel(ylabel,
                           fontname=label_fontname,
                           fontsize=label_fontsize)
            if show_grid == True:
                plt.grid(True)
            #plt.show()
    Yxys = np.transpose(Yxys, axes=(0, 2, 1))
    if out is not None:
        return eval(out)
    else:
        return None
예제 #17
0
def cam_sww16(data, dataw = None, Yb = 20.0, Lw = 400.0, Ccwb = None, relative = True, \
              parameters = None, inputtype = 'xyz', direction = 'forward', \
              cieobs = '2006_10'):
    """
    A simple principled color appearance model based on a mapping 
    of the Munsell color system.
    
    | This function implements the JOSA A (parameters = 'JOSA') published model. 
    
    Args:
        :data: 
            | ndarray with input tristimulus values 
            | or spectral data 
            | or input color appearance correlates
            | Can be of shape: (N [, xM], x 3), whereby: 
            | N refers to samples and M refers to light sources.
            | Note that for spectral input shape is (N x (M+1) x wl) 
        :dataw: 
            | None or ndarray, optional
            | Input tristimulus values or spectral data of white point.
            | None defaults to the use of CIE illuminant C.
        :Yb: 
            | 20.0, optional
            | Luminance factor of background (perfect white diffuser, Yw = 100)
        :Lw:
            | 400.0, optional
            | Luminance (cd/m²) of white point.
        :Ccwb:
            | None,  optional
            | Degree of cognitive adaptation (white point balancing)
            | If None: use [..,..] from parameters dict.
        :relative:
            | True or False, optional
            | True: xyz tristimulus values are relative (Yw = 100)
        :parameters:
            | None or str or dict, optional
            | Dict with model parameters.
            |    - None: defaults to luxpy.cam._CAM_SWW_2016_PARAMETERS['JOSA']
            |    - str: 'best-fit-JOSA' or 'best-fit-all-Munsell'
            |    - dict: user defined model parameters 
            |            (dict should have same structure)
        :inputtype:
            | 'xyz' or 'spd', optional
            | Specifies the type of input: 
            |     tristimulus values or spectral data for the forward mode.
        :direction:
            | 'forward' or 'inverse', optional
            |   -'forward': xyz -> cam_sww_2016
            |   -'inverse': cam_sww_2016 -> xyz 
        :cieobs:
            | '2006_10', optional
            | CMF set to use to perform calculations where spectral data 
              is involved (inputtype == 'spd'; dataw = None)
            | Other options: see luxpy._CMF['types']
    
    Returns:
        :returns: 
            | ndarray with color appearance correlates (:direction: == 'forward')
            |  or 
            | XYZ tristimulus values (:direction: == 'inverse')
    
    Notes:
        | This function implements the JOSA A (parameters = 'JOSA') 
          published model. 
        | With:
        |    1. A correction for the parameter 
        |         in Eq.4 of Fig. 11: 0.952 --> -0.952 
        |         
        |     2. The delta_ac and delta_bc white-balance shifts in Eq. 5e & 5f 
        |         should be: -0.028 & 0.821 
        |  
        |     (cfr. Ccwb = 0.66 in: 
        |         ab_test_out = ab_test_int - Ccwb*ab_gray_adaptation_field_int))
             
    References:
        1. `Smet, K. A. G., Webster, M. A., & Whitehead, L. A. (2016). 
        A simple principled approach for modeling and understanding uniform color metrics. 
        Journal of the Optical Society of America A, 33(3), A319–A331. 
        <https://doi.org/10.1364/JOSAA.33.00A319>`_

    """

    # get model parameters
    args = locals().copy()
    if parameters is None:
        parameters = _CAM_SWW16_PARAMETERS['JOSA']
    if isinstance(parameters, str):
        parameters = _CAM_SWW16_PARAMETERS[parameters]
    parameters = put_args_in_db(
        parameters,
        args)  #overwrite parameters with other (not-None) args input

    #unpack model parameters:
    Cc, Ccwb, Cf, Mxyz2lms, cLMS, cab_int, cab_out, calpha, cbeta, cga1, cga2, cgb1, cgb2, cl_int, clambda, lms0 = [
        parameters[x] for x in sorted(parameters.keys())
    ]

    # setup default adaptation field:
    if (dataw is None):
        dataw = _CIE_ILLUMINANTS['C'].copy()  # get illuminant C
        xyzw = spd_to_xyz(dataw, cieobs=cieobs,
                          relative=False)  # get abs. tristimulus values
        if relative == False:  #input is expected to be absolute
            dataw[1:] = Lw * dataw[
                1:] / xyzw[:, 1:2]  #dataw = Lw*dataw # make absolute
        else:
            dataw = dataw  # make relative (Y=100)
        if inputtype == 'xyz':
            dataw = spd_to_xyz(dataw, cieobs=cieobs, relative=relative)

    # precomputations:
    Mxyz2lms = np.dot(
        np.diag(cLMS),
        math.normalize_3x3_matrix(Mxyz2lms, np.array([[1, 1, 1]]))
    )  # normalize matrix for xyz-> lms conversion to ill. E weighted with cLMS
    invMxyz2lms = np.linalg.inv(Mxyz2lms)
    MAab = np.array([clambda, calpha, cbeta])
    invMAab = np.linalg.inv(MAab)

    #initialize data and camout:
    data = np2d(data).copy(
    )  # stimulus data (can be upto NxMx3 for xyz, or [N x (M+1) x wl] for spd))
    dataw = np2d(dataw).copy(
    )  # white point (can be upto Nx3 for xyz, or [(N+1) x wl] for spd)

    # make axis 1 of dataw have 'same' dimensions as data:
    if (data.ndim == 2):
        data = np.expand_dims(data, axis=1)  # add light source axis 1

    if inputtype == 'xyz':
        if dataw.shape[
                0] == 1:  #make dataw have same lights source dimension size as data
            dataw = np.repeat(dataw, data.shape[1], axis=0)
    else:
        if dataw.shape[0] == 2:
            dataw = np.vstack(
                (dataw[0], np.repeat(dataw[1:], data.shape[1], axis=0)))

    # Flip light source dim to axis 0:
    data = np.transpose(data, axes=(1, 0, 2))

    # Initialize output array:
    dshape = list(data.shape)
    dshape[-1] = 3  # requested number of correlates: l_int, a_int, b_int
    if (inputtype != 'xyz') & (direction == 'forward'):
        dshape[-2] = dshape[
            -2] - 1  # wavelength row doesn't count & only with forward can the input data be spectral
    camout = np.zeros(dshape)
    camout.fill(np.nan)

    # apply forward/inverse model for each row in data:
    for i in range(data.shape[0]):

        # stage 1: calculate photon rates of stimulus and adapting field, lmst & lmsf:
        if (inputtype != 'xyz'):
            if relative == True:
                xyzw_abs = spd_to_xyz(np.vstack((dataw[0], dataw[i + 1])),
                                      cieobs=cieobs,
                                      relative=False)
                dataw[i +
                      1] = Lw * dataw[i + 1] / xyzw_abs[0, 1]  # make absolute
            xyzw = spd_to_xyz(np.vstack((dataw[0], dataw[i + 1])),
                              cieobs=cieobs,
                              relative=False)
            lmsw = 683.0 * np.dot(Mxyz2lms, xyzw.T).T / _CMF[cieobs]['K']
            lmsf = (Yb / 100.0
                    ) * lmsw  # calculate adaptation field and convert to l,m,s
            if (direction == 'forward'):
                if relative == True:
                    data[i, 1:, :] = Lw * data[i, 1:, :] / xyzw_abs[
                        0, 1]  # make absolute
                xyzt = spd_to_xyz(data[i], cieobs=cieobs,
                                  relative=False) / _CMF[cieobs]['K']
                lmst = 683.0 * np.dot(Mxyz2lms, xyzt.T).T  # convert to l,m,s
            else:
                lmst = lmsf  # put lmsf in lmst for inverse-mode

        elif (inputtype == 'xyz'):
            if relative == True:
                dataw[i] = Lw * dataw[i] / 100.0  # make absolute
            lmsw = 683.0 * np.dot(
                Mxyz2lms, dataw[i].T).T / _CMF[cieobs]['K']  # convert to lms
            lmsf = (Yb / 100.0) * lmsw
            if (direction == 'forward'):
                if relative == True:
                    data[i] = Lw * data[i] / 100.0  # make absolute
                lmst = 683.0 * np.dot(
                    Mxyz2lms,
                    data[i].T).T / _CMF[cieobs]['K']  # convert to lms
            else:
                lmst = lmsf  # put lmsf in lmst for inverse-mode

        # stage 2: calculate cone outputs of stimulus lmstp
        lmstp = math.erf(Cc * (np.log(lmst / lms0) + Cf * np.log(lmsf / lms0)))
        lmsfp = math.erf(Cc * (np.log(lmsf / lms0) + Cf * np.log(lmsf / lms0)))
        lmstp = np.vstack(
            (lmsfp, lmstp)
        )  # add adaptation field lms temporarily to lmsp for quick calculation

        # stage 3: calculate optic nerve signals, lam*, alphp, betp:
        lstar, alph, bet = asplit(np.dot(MAab, lmstp.T).T)

        alphp = cga1[0] * alph
        alphp[alph < 0] = cga1[1] * alph[alph < 0]
        betp = cgb1[0] * bet
        betp[bet < 0] = cgb1[1] * bet[bet < 0]

        # stage 4: calculate recoded nerve signals, alphapp, betapp:
        alphpp = cga2[0] * (alphp + betp)
        betpp = cgb2[0] * (alphp - betp)

        # stage 5: calculate conscious color perception:
        lstar_int = cl_int[0] * (lstar + cl_int[1])
        alph_int = cab_int[0] * (np.cos(cab_int[1] * np.pi / 180.0) * alphpp -
                                 np.sin(cab_int[1] * np.pi / 180.0) * betpp)
        bet_int = cab_int[0] * (np.sin(cab_int[1] * np.pi / 180.0) * alphpp +
                                np.cos(cab_int[1] * np.pi / 180.0) * betpp)
        lstar_out = lstar_int

        if direction == 'forward':
            if Ccwb is None:
                alph_out = alph_int - cab_out[0]
                bet_out = bet_int - cab_out[1]
            else:
                Ccwb = Ccwb * np.ones((2))
                Ccwb[Ccwb < 0.0] = 0.0
                Ccwb[Ccwb > 1.0] = 1.0
                alph_out = alph_int - Ccwb[0] * alph_int[
                    0]  # white balance shift using adaptation gray background (Yb=20%), with Ccw: degree of adaptation
                bet_out = bet_int - Ccwb[1] * bet_int[0]

            camout[i] = np.vstack(
                (lstar_out[1:], alph_out[1:], bet_out[1:])
            ).T  # stack together and remove adaptation field from vertical stack
        elif direction == 'inverse':
            labf_int = np.hstack((lstar_int[0], alph_int[0], bet_int[0]))

            # get lstar_out, alph_out & bet_out for data:
            lstar_out, alph_out, bet_out = asplit(data[i])

            # stage 5 inverse:
            # undo cortical white-balance:
            if Ccwb is None:
                alph_int = alph_out + cab_out[0]
                bet_int = bet_out + cab_out[1]
            else:
                Ccwb = Ccwb * np.ones((2))
                Ccwb[Ccwb < 0.0] = 0.0
                Ccwb[Ccwb > 1.0] = 1.0
                alph_int = alph_out + Ccwb[0] * alph_int[
                    0]  #  inverse white balance shift using adaptation gray background (Yb=20%), with Ccw: degree of adaptation
                bet_int = bet_out + Ccwb[1] * bet_int[0]

            lstar_int = lstar_out
            alphpp = (1.0 / cab_int[0]) * (
                np.cos(-cab_int[1] * np.pi / 180.0) * alph_int -
                np.sin(-cab_int[1] * np.pi / 180.0) * bet_int)
            betpp = (1.0 / cab_int[0]) * (
                np.sin(-cab_int[1] * np.pi / 180.0) * alph_int +
                np.cos(-cab_int[1] * np.pi / 180.0) * bet_int)
            lstar_int = lstar_out
            lstar = (lstar_int / cl_int[0]) - cl_int[1]

            # stage 4 inverse:
            alphp = 0.5 * (alphpp / cga2[0] + betpp / cgb2[0]
                           )  # <-- alphpp = (Cga2.*(alphp+betp));
            betp = 0.5 * (alphpp / cga2[0] - betpp / cgb2[0]
                          )  # <-- betpp = (Cgb2.*(alphp-betp));

            # stage 3 invers:
            alph = alphp / cga1[0]
            bet = betp / cgb1[0]
            sa = np.sign(cga1[1])
            sb = np.sign(cgb1[1])
            alph[(sa * alphp) < 0.0] = alphp[(sa * alphp) < 0] / cga1[1]
            bet[(sb * betp) < 0.0] = betp[(sb * betp) < 0] / cgb1[1]
            lab = ajoin((lstar, alph, bet))

            # stage 2 inverse:
            lmstp = np.dot(invMAab, lab.T).T
            lmstp[lmstp < -1.0] = -1.0
            lmstp[lmstp > 1.0] = 1.0

            lmstp = math.erfinv(lmstp) / Cc - Cf * np.log(lmsf / lms0)
            lmst = np.exp(lmstp) * lms0

            # stage 1 inverse:
            xyzt = np.dot(invMxyz2lms, lmst.T).T

            if relative == True:
                xyzt = (100.0 / Lw) * xyzt

            camout[i] = xyzt

#    if flipaxis0and1 == True: # loop over shortest dim.
#        camout = np.transpose(camout, axes = (1,0,2))

# Flip light source dim back to axis 1:
    camout = np.transpose(camout, axes=(1, 0, 2))

    if camout.shape[0] == 1:
        camout = np.squeeze(camout, axis=0)

    return camout
def getCatObs(n_cat=10,
              fieldsize=2,
              out='LMS',
              wl=None,
              allow_negative_values=False):
    """
    Generate cone fundamentals for categorical observers.
    
    Args: 
        :n_cat: 
            | 10, optional
            | Number of observer CMFs to generate.
        :fieldsize:
            | fieldsize in degrees (between 2° and 10°), optional
            | Defaults to 10°.
        :out: 
            | 'LMS' or str, optional
            | Determines output.
        :wl: 
            | None, optional
            | Interpolation/extraplation of :LMS: output to specified wavelengths.
            |  None: output original _WL = np.array([390,780,5])
        :allow_negative_values:
            | False, optional
            | Cone fundamentals or color matching functions 
            |  should not have negative values.
            |     If False: X[X<0] = 0.
    
    Returns:
        :returns:
            | LMS [,var_age, vAll] 
            |   - LMS: ndarray with population LMS functions.
            |   - var_age: ndarray with population observer ages.
            |   - vAll: dict with population physiological factors (see .keys()) 
    
    Notes:
        1. Categorical observers are observer functions that would represent 
        color-normal populations. They are finite and discrete as opposed to 
        observer functions generated from the individual colorimetric observer 
        model. Thus, they would offer more convenient and practical approaches
        for the personalized color imaging workflow and color matching analyses.
        Categorical observers were derived in two steps. 
        At the first step, 10000 observer functions were generated from the 
        individual colorimetric observer model using Monte Carlo simulation. 
        At the second step, the cluster analysis, a modified k-medoids 
        algorithm, was applied to the 10000 observers minimizing the squared 
        Euclidean distance in cone fundamentals space, and categorical 
        observers were derived iteratively. Since the proposed categorical 
        observers are defined by their physiological parameters and ages, their
        CMFs can be derived for any target field size.

        2. Categorical observers were ordered by the importance; 
        the first categorical observer vas the average observer equivalent to 
        CIEPO06 with 38 year-old for a given field size, followed by the second
        most important categorical observer, the third, and so on.
        
        3. see: https://www.rit.edu/cos/colorscience/re_AsanoObserverFunctions.php
    """
    # Use Iteratively Derived Cat.Obs.:
    var_age = _INDVCMF_CATOBSPFCTR['age'].copy()
    vAll = _INDVCMF_CATOBSPFCTR.copy()
    vAll.pop('age')

    # Set requested wavelength range:
    if wl is not None:
        wl = getwlr(wl3=wl)
    else:
        wl = _WL

    LMS_All = np.zeros((3 + 1, _WL.shape[0], n_cat))
    LMS_All.fill(np.nan)
    for k in range(n_cat):
        t_LMS = cie2006cmfsEx(age = var_age[k],fieldsize = fieldsize, wl = wl,\
                              var_od_lens = vAll['od_lens'][k],\
                              var_od_macula = vAll['od_macula'][k],\
                              var_od_L = vAll['od_L'][k],\
                              var_od_M = vAll['od_M'][k],\
                              var_od_S = vAll['od_S'][k],\
                              var_shft_L = vAll['shft_L'][k],\
                              var_shft_M = vAll['shft_M'][k],\
                              var_shft_S = vAll['shft_S'][k],\
                              out = 'LMS')

        LMS_All[:, :, k] = t_LMS

    LMS_All[np.where(LMS_All < 0)] = 0

    if n_cat == 1:
        LMS_All = np.squeeze(LMS_All, axis=2)

    if ('xyz' in out.lower().split(',')):
        LMS_All = lmsb_to_xyzb(LMS_All,
                               fieldsize,
                               out='xyz',
                               allow_negative_values=allow_negative_values)
        out = out.replace('xyz', 'LMS').replace('XYZ', 'LMS')
    if ('lms' in out.lower().split(',')):
        out = out.replace('lms', 'LMS')

    if (out == 'LMS'):
        return LMS_All
    elif (out == 'LMS,var_age,vAll'):
        return LMS_All, var_age, vAll
    else:
        return eval(out)
예제 #19
0
def rotate(v,
           vecA=None,
           vecB=None,
           rot_axis=None,
           rot_angle=None,
           deg=True,
           norm=False):
    """
    Rotate vector around rotation axis over angle.
    
    Args:
        :v: 
            | vec3 vector.
        :rot_axis:
            | None, optional
            | vec3 vector specifying rotation axis.
        :rot_angle:
            | None, optional
            | float or int rotation angle.
        :deg:
            | True, optional
            | If False, rot_angle is in radians.
        :vecA:, :vecB:
            | None, optional
            | vec3 vectors defining a normal direction (cross(vecA, vecB)) around 
            | which to rotate the vector in :v:. If rot_angle is None: rotation
            | angle is defined by the in-plane angle between vecA and vecB.
        :norm:
            | False, optional
            | Normalize rotated vector.
        
    """

    if (vecA is not None) & (vecB is not None):
        rot_axis = cross(vecA, vecB)  # rotation axis
        if rot_angle is None:
            costheta = dot(vecA, vecB, norm=True)  # rotation angle
            costheta[costheta > 1] = 1
            costheta[costheta < -1] = -1
            rot_angle = np.arccos(costheta)
    elif (rot_angle is not None):
        if deg == True:
            rot_angle = np.deg2rad(rot_angle)
    else:
        raise Exception('vec3.rotate: insufficient not-None input args.')

    # normalize rot_axis
    rot_axis = rot_axis / rot_axis.norm()

    # Create short-hand variables:
    u = rot_axis
    cost = np.cos(rot_angle)
    sint = np.sin(rot_angle)

    # Setup rotation matrix:
    R = np.asarray([[np.zeros(u.x.shape) for j in range(3)] for i in range(3)])
    R[0, 0] = cost + u.x * u.x * (1 - cost)
    R[0, 1] = u.x * u.y * (1 - cost) - u.z * sint
    R[0, 2] = u.x * u.z * (1 - cost) + u.y * sint
    R[1, 0] = u.x * u.y * (1 - cost) + u.z * sint
    R[1, 1] = cost + u.y * u.y * (1 - cost)
    R[1, 2] = u.y * u.z * (1 - cost) - u.x * sint
    R[2, 0] = u.z * u.x * (1 - cost) - u.y * sint
    R[2, 1] = u.z * u.y * (1 - cost) + u.x * sint
    R[2, 2] = cost + u.z * u.z * (1 - cost)

    # calculate dot product of matrix M with vector v:
    v3 = vec3(R[0,0]*v.x + R[0,1]*v.y + R[0,2]*v.z, \
                R[1,0]*v.x + R[1,1]*v.y + R[1,2]*v.z, \
                R[2,0]*v.x + R[2,1]*v.y + R[2,2]*v.z)
    if norm == True:
        v3 = v3 / v3.norm()
    return v3
def genMonteCarloObs(n_obs=1,
                     fieldsize=10,
                     list_Age=[32],
                     out='LMS',
                     wl=None,
                     allow_negative_values=False):
    """
    Monte-Carlo generation of individual observer cone fundamentals.
    
    Args: 
        :n_obs: 
            | 1, optional
            | Number of observer CMFs to generate.
        :list_Age:
            | list of observer ages or str, optional
            | Defaults to 32 (cfr. CIE2006 CMFs)
            | If 'us_census': use US population census of 2010 
              to generate list_Age.
        :fieldsize: 
            | fieldsize in degrees (between 2° and 10°), optional
            | Defaults to 10°.
        :out: 
            | 'LMS' or str, optional
            | Determines output.
        :wl: 
            | None, optional
            | Interpolation/extraplation of :LMS: output to specified wavelengths.
            | None: output original _WL = np.array([390,780,5])
        :allow_negative_values: 
            | False, optional
            | Cone fundamentals or color matching functions 
            |   should not have negative values.
            |     If False: X[X<0] = 0.
    
    Returns:
        :returns: 
            | LMS [,var_age, vAll] 
            |   - LMS: ndarray with population LMS functions.
            |   - var_age: ndarray with population observer ages.
            |   - vAll: dict with population physiological factors (see .keys()) 
            
    References:
         1. `Asano Y, Fairchild MD, and Blondé L (2016). 
         Individual Colorimetric Observer Model. 
         PLoS One 11, 1–19. 
         <http://journals.plos.org/plosone/article?id=10.1371/journal.pone.0145671>`_
         
         2. `Asano Y, Fairchild MD, Blondé L, and Morvan P (2016). 
         Color matching experiment for highlighting interobserver variability. 
         Color Res. Appl. 41, 530–539. 
         <https://onlinelibrary.wiley.com/doi/abs/10.1002/col.21975>`_
         
         3. `CIE, and CIE (2006). 
         Fundamental Chromaticity Diagram with Physiological Axes - Part I 
         (Vienna: CIE). 
         <http://www.cie.co.at/publications/fundamental-chromaticity-diagram-physiological-axes-part-1>`_ 
         
         4. `Asano's Individual Colorimetric Observer Model 
         <https://www.rit.edu/cos/colorscience/re_AsanoObserverFunctions.php>`_
    """

    # Scale down StdDev by scalars optimized using Asano's 75 observers
    # collected in Germany:
    stdDevAllParam = _INDVCMF_STD_DEV_ALL_PARAM.copy()
    scale_factors = [0.98, 0.98, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5]
    scale_factors = dict(zip(list(stdDevAllParam.keys()), scale_factors))
    stdDevAllParam = {
        k: v * scale_factors[k]
        for (k, v) in stdDevAllParam.items()
    }

    # Get Normally-distributed Physiological Factors:
    vAll = getMonteCarloParam(n_obs=n_obs)

    if list_Age is 'us_census':
        list_Age = getUSCensusAgeDist()

    # Generate Random Ages with the same probability density distribution
    # as color matching experiment:
    sz_interval = 1
    list_AgeRound = np.round(np.array(list_Age) / sz_interval) * sz_interval
    h = math.histogram(list_AgeRound,
                       bins=np.unique(list_AgeRound),
                       bin_center=True)[0]
    p = h / h.sum()  # probability density distribution

    var_age = np.random.choice(np.unique(list_AgeRound), \
                               size = n_obs, replace = True,\
                               p = p)

    # Set requested wavelength range:
    if wl is not None:
        wl = getwlr(wl3=wl)
    else:
        wl = _WL

    LMS_All = np.zeros((3 + 1, wl.shape[0], n_obs))
    LMS_All.fill(np.nan)
    for k in range(n_obs):
        t_LMS, t_trans_lens, t_trans_macula, t_sens_photopig = cie2006cmfsEx(age = var_age[k], fieldsize = fieldsize, wl = wl,\
                                                                          var_od_lens = vAll['od_lens'][k], var_od_macula = vAll['od_macula'][k], \
                                                                          var_od_L = vAll['od_L'][k], var_od_M = vAll['od_M'][k], var_od_S = vAll['od_S'][k],\
                                                                          var_shft_L = vAll['shft_L'][k], var_shft_M = vAll['shft_M'][k], var_shft_S = vAll['shft_S'][k],\
                                                                          out = 'LMS,trans_lens,trans_macula,sens_photopig')
        LMS_All[:, :, k] = t_LMS


#        listout = out.split(',')
#        if ('trans_lens' in listout) | ('trans_macula' in listout) | ('trans_photopig' in listout):
#            trans_lens[:,k] = t_trans_lens
#            trans_macula[:,k] = t_trans_macula
#            sens_photopig[:,:,k] = t_sens_photopig

    if n_obs == 1:
        LMS_All = np.squeeze(LMS_All, axis=2)

    if ('xyz' in out.lower().split(',')):
        LMS_All = lmsb_to_xyzb(LMS_All,
                               fieldsize,
                               out='xyz',
                               allow_negative_values=allow_negative_values)
        out = out.replace('xyz', 'LMS').replace('XYZ', 'LMS')
    if ('lms' in out.lower().split(',')):
        out = out.replace('lms', 'LMS')

    if (out == 'LMS'):
        return LMS_All
    elif (out == 'LMS,var_age,vAll'):
        return LMS_All, var_age, vAll
    else:
        return eval(out)
예제 #21
0
파일: cam15u.py 프로젝트: simongr2/luxpy
def cam15u(data,
           fov=10.0,
           inputtype='xyz',
           direction='forward',
           outin='Q,aW,bW',
           parameters=None):
    """
    Convert between CIE 2006 10°  XYZ tristimulus values (or spectral data) 
    and CAM15u color appearance correlates.
    
    Args:
        :data: 
            | ndarray of CIE 2006 10°  XYZ tristimulus values or spectral data
            |  or color appearance attributes
        :fov: 
            | 10.0, optional
            | Field-of-view of stimulus (for size effect on brightness)
        :inputtpe:
            | 'xyz' or 'spd', optional
            | Specifies the type of input: 
            |     tristimulus values or spectral data for the forward mode.
        :direction:
            | 'forward' or 'inverse', optional
            |   -'forward': xyz -> cam15u
            |   -'inverse': cam15u -> xyz 
        :outin:
            | 'Q,aW,bW' or str, optional
            | 'Q,aW,bW' (brightness and opponent signals for amount-of-neutral)
            |  other options: 'Q,aM,bM' (colorfulness) and 'Q,aS,bS' (saturation)
            | Str specifying the type of 
            |     input (:direction: == 'inverse') and 
            |     output (:direction: == 'forward')
        :parameters:
            | None or dict, optional
            | Set of model parameters.
            |   - None: defaults to luxpy.cam._CAM15U_PARAMETERS 
            |    (see references below)
    
    Returns:
        :returns: 
            | ndarray with color appearance correlates (:direction: == 'forward')
            |  or 
            | XYZ tristimulus values (:direction: == 'inverse')

    References: 
        1. `M. Withouck, K. A. G. Smet, W. R. Ryckaert, and P. Hanselaer, 
        “Experimental driven modelling of the color appearance of 
        unrelated self-luminous stimuli: CAM15u,” 
        Opt. Express, vol. 23, no. 9, pp. 12045–12064, 2015.
        <https://www.osapublishing.org/oe/abstract.cfm?uri=oe-23-9-12045&origin=search>`_
        2. `M. Withouck, K. A. G. Smet, and P. Hanselaer, (2015), 
        “Brightness prediction of different sized unrelated self-luminous stimuli,” 
        Opt. Express, vol. 23, no. 10, pp. 13455–13466. 
        <https://www.osapublishing.org/oe/abstract.cfm?uri=oe-23-10-13455&origin=search>`_  
     """

    if parameters is None:
        parameters = _CAM15U_PARAMETERS

    outin = outin.split(',')

    #unpack model parameters:
    Mxyz2rgb, cA, cAlms, cHK, cM, cW, ca, calms, cb, cblms, cfov, cp, k, unique_hue_data = [
        parameters[x] for x in sorted(parameters.keys())
    ]

    # precomputations:
    invMxyz2rgb = np.linalg.inv(Mxyz2rgb)
    MAab = np.array([cAlms, calms, cblms])
    invMAab = np.linalg.inv(MAab)

    #initialize data and camout:
    data = np2d(data)
    if len(data.shape) == 2:
        data = np.expand_dims(data, axis=0)  # avoid looping if not necessary

    if (data.shape[0] > data.shape[1]):  # loop over shortest dim.
        flipaxis0and1 = True
        data = np.transpose(data, axes=(1, 0, 2))
    else:
        flipaxis0and1 = False

    dshape = list(data.shape)
    dshape[-1] = len(outin)  # requested number of correlates
    if (inputtype != 'xyz') & (direction == 'forward'):
        dshape[-2] = dshape[
            -2] - 1  # wavelength row doesn't count & only with forward can the input data be spectral

    camout = np.zeros(dshape)
    camout.fill(np.nan)

    for i in range(data.shape[0]):

        if (inputtype != 'xyz') & (direction == 'forward'):
            xyz = spd_to_xyz(data[i], cieobs='2006_10', relative=False)
            lms = np.dot(_CMF['2006_10']['M'], xyz.T).T  # convert to l,m,s
            rgb = (lms /
                   _CMF['2006_10']['K']) * k  # convert to rho, gamma, beta
        elif (inputtype == 'xyz') & (direction == 'forward'):
            rgb = np.dot(Mxyz2rgb, data[i].T).T

        if direction == 'forward':

            # apply cube-root compression:
            rgbc = rgb**(cp)

            # calculate achromatic and color difference signals, A, a, b:
            Aab = np.dot(MAab, rgbc.T).T
            A, a, b = asplit(Aab)
            A = cA * A
            a = ca * a
            b = cb * b

            # calculate colorfullness like signal M:
            M = cM * ((a**2.0 + b**2.0)**0.5)

            # calculate brightness Q:
            Q = A + cHK[0] * M**cHK[
                1]  # last term is contribution of Helmholtz-Kohlrausch effect on brightness

            # calculate saturation, s:
            s = M / Q

            # calculate amount of white, W:
            W = 100.0 / (1.0 + cW[0] * (s**cW[1]))

            #  adjust Q for size (fov) of stimulus (matter of debate whether to do this before or after calculation of s or W, there was no data on s, M or W for different sized stimuli: after)
            Q = Q * (fov / 10.0)**cfov

            # calculate hue, h and Hue quadrature, H:
            h = hue_angle(a, b, htype='deg')

            if 'H' in outin:
                H = hue_quadrature(h, unique_hue_data=unique_hue_data)
            else:
                H = None

            # calculate cart. co.:
            if 'aM' in outin:
                aM = M * np.cos(h * np.pi / 180.0)
                bM = M * np.sin(h * np.pi / 180.0)

            if 'aS' in outin:
                aS = s * np.cos(h * np.pi / 180.0)
                bS = s * np.sin(h * np.pi / 180.0)

            if 'aW' in outin:
                aW = W * np.cos(h * np.pi / 180.0)
                bW = W * np.sin(h * np.pi / 180.0)

            if (outin != ['Q', 'aW', 'bW']):
                camout[i] = eval('ajoin((' + ','.join(outin) + '))')
            else:
                camout[i] = ajoin((Q, aW, bW))

        elif direction == 'inverse':

            # get Q, M and a, b depending on input type:
            if 'aW' in outin:
                Q, a, b = asplit(data[i])
                Q = Q / (
                    (fov / 10.0)**cfov
                )  #adjust Q for size (fov) of stimulus back to that 10° ref
                W = (a**2.0 + b**2.0)**0.5
                s = (((100 / W) - 1.0) / cW[0])**(1.0 / cW[1])
                M = s * Q

            if 'aM' in outin:
                Q, a, b = asplit(data[i])
                Q = Q / (
                    (fov / 10.0)**cfov
                )  #adjust Q for size (fov) of stimulus back to that 10° ref
                M = (a**2.0 + b**2.0)**0.5

            if 'aS' in outin:
                Q, a, b = asplit(data[i])
                Q = Q / (
                    (fov / 10.0)**cfov
                )  #adjust Q for size (fov) of stimulus back to that 10° ref
                s = (a**2.0 + b**2.0)**0.5
                M = s * Q

            if 'h' in outin:
                Q, WsM, h = asplit(data[i])
                Q = Q / (
                    (fov / 10.0)**cfov
                )  #adjust Q for size (fov) of stimulus back to that 10° ref
                if 'W' in outin:
                    s = (((100.0 / WsM) - 1.0) / cW[0])**(1.0 / cW[1])
                    M = s * Q
                elif 's' in outin:
                    M = WsM * Q
                elif 'M' in outin:
                    M = WsM

            # calculate achromatic signal, A from Q and M:
            A = Q - cHK[0] * M**cHK[1]
            A = A / cA

            # calculate hue angle:
            h = hue_angle(a, b, htype='rad')

            # calculate a,b from M and h:
            a = (M / cM) * np.cos(h)
            b = (M / cM) * np.sin(h)
            a = a / ca
            b = b / cb

            # create Aab:
            Aab = ajoin((A, a, b))

            # calculate rgbc:
            rgbc = np.dot(invMAab, Aab.T).T

            # decompress rgbc to rgb:
            rgb = rgbc**(1 / cp)

            # convert rgb to xyz:
            xyz = np.dot(invMxyz2rgb, rgb.T).T

            camout[i] = xyz

    if flipaxis0and1 == True:  # loop over shortest dim.
        camout = np.transpose(camout, axes=(1, 0, 2))

    if camout.shape[0] == 1:
        camout = np.squeeze(camout, axis=0)

    return camout
예제 #22
0
파일: helpers.py 프로젝트: simongr2/luxpy
def _massage_input_and_init_output(data,
                                   dataw,
                                   inputtype='xyz',
                                   direction='forward',
                                   n_out=3):
    """
    Redimension input data to ensure most they have the appropriate sizes for easy and efficient looping.
    |
    | 1. Convert data and dataw to atleast_2d ndarrays
    | 2. Make axis 1 of dataw have 'same' dimensions as data
    | 3. Make dataw have same lights source axis size as data
    | 4. Flip light source axis to axis=0 for efficient looping
    | 5. Initialize output array camout to 'same' shape as data but with camout.shape[-1] == n_out
    
    Args:
        :data: 
            | ndarray with input tristimulus values 
            | or spectral data 
            | or input color appearance correlates
            | Can be of shape: (N [, xM], x 3), whereby: 
            | N refers to samples and M refers to light sources.
            | Note that for spectral input shape is (N x (M+1) x wl) 
        :dataw: 
            | None or ndarray, optional
            | Input tristimulus values or spectral data of white point.
            | None defaults to the use of CIE illuminant C.
        :inputtype:
            | 'xyz' or 'spd', optional
            | Specifies the type of input: 
            |     tristimulus values or spectral data for the forward mode.
        :direction:
            | 'forward' or 'inverse', optional
            |   -'forward': xyz -> cam
            |   -'inverse': cam -> xyz 
        :n_out:
            | 3, optional
            | output size of last dimension of camout 
            | (e.g. n_out=3 for j,a,b output or n_out = 5 for J,M,h,a,b output)
            
    Returns:
        :data:
            | ndarray with reshaped data
        :dataw:
            | ndarray with reshaped dataw
        :camout:
            | NaN filled ndarray for output of CAMv (camout.shape[-1] == Nout) 
        :originalshape:
            | original shape of data
            
    Notes:
        For an example on the use, see code _simple_cam() (type: _simple_cam??)
    """
    # Convert data and dataw to atleast_2d ndarrays:
    data = np2d(data).copy(
    )  # stimulus data (can be upto NxMx3 for xyz, or [N x (M+1) x wl] for spd))
    dataw = np2d(dataw).copy(
    )  # white point (can be upto Nx3 for xyz, or [(N+1) x wl] for spd)
    originalshape = data.shape  # to restore output to same shape

    # Make axis 1 of dataw have 'same' dimensions as data:
    if (data.ndim == 2):
        data = np.expand_dims(data, axis=1)  # add light source axis 1

    # Flip light source dim to axis 0:
    data = np.transpose(data, axes=(1, 0, 2))

    dataw = np.expand_dims(
        dataw, axis=1)  # add extra axis to move light source to axis 0

    # Make dataw have same lights source dimension size as data:
    if inputtype == 'xyz':
        if dataw.shape[0] == 1:
            dataw = np.repeat(dataw, data.shape[0], axis=0)
        if (data.shape[0] == 1) & (dataw.shape[0] > 1):
            data = np.repeat(data, dataw.shape[0], axis=0)
    else:
        dataw = np.array([
            np.vstack((dataw[:1, 0, :], dataw[i + 1:i + 2, 0, :]))
            for i in range(dataw.shape[0] - 1)
        ])
        if (data.shape[0] == 1) & (dataw.shape[0] > 1):
            data = np.repeat(data, dataw.shape[0], axis=0)

    # Initialize output array:
    if n_out is not None:
        dshape = list((data).shape)
        dshape[-1] = n_out  # requested number of correlates: e.g. j,a,b
        if (inputtype != 'xyz') & (direction == 'forward'):
            dshape[-2] = dshape[
                -2] - 1  # wavelength row doesn't count & only with forward can the input data be spectral
        camout = np.zeros(dshape)
        camout.fill(np.nan)
    else:
        camout = None
    return data, dataw, camout, originalshape
예제 #23
0
def calculate_VF_PX_models(S, cri_type = _VF_CRI_DEFAULT, sampleset = None, pool = False, \
                           pcolorshift = {'href': np.arange(np.pi/10,2*np.pi,2*np.pi/10),\
                                          'Cref' : _VF_MAXR, 'sig' : _VF_SIG, 'labels' : '#'},\
                           vfcolor = 'k', verbosity = 0):
    """
    Calculate Vector Field and Pixel color shift models.
    
    Args:
        :cri_type: 
            | _VF_CRI_DEFAULT or str or dict, optional
            | Specifies type of color fidelity model to use. 
            | Controls choice of ref. ill., sample set, averaging, scaling, etc.
            | See luxpy.cri.spd_to_cri for more info.
        :sampleset:
            | None or str or ndarray, optional
            | Sampleset to be used when calculating vector field model.
        :pool:
            | False, optional
            | If :S: contains multiple spectra, True pools all jab data before 
            | modeling the vector field, while False models a different field 
            |  for each spectrum.
        :pcolorshift: 
            | default dict (see below) or user defined dict, optional
            | Dict containing the specification input 
            |  for apply_poly_model_at_hue_x().
            | Default dict = {'href': np.arange(np.pi/10,2*np.pi,2*np.pi/10),
            |                'Cref' : _VF_MAXR, 
            |                'sig' : _VF_SIG, 
            |                'labels' : '#'} 
            | The polynomial models of degree 5 and 6 can be fully specified or 
            | summarized by the model parameters themselved OR by calculating the
            | dCoverC and dH at resp. 5 and 6 hues.
        :vfcolor:
            | 'k', optional
            | For plotting the vector fields.
        :verbosity: 
            | 0, optional
            | Report warnings or not.
    
    Returns:
        :returns:
            | :dataVF:, :dataPX: 
            | Dicts, for more info, see output description of resp.: 
            | luxpy.cri.VF_colorshift_model() and luxpy.cri.PX_colorshift_model()
    """
    # calculate VectorField cri_color_shift model:
    dataVF = VF_colorshift_model(S,
                                 cri_type=cri_type,
                                 sampleset=sampleset,
                                 vfcolor=vfcolor,
                                 pcolorshift=pcolorshift,
                                 pool=pool,
                                 verbosity=verbosity)

    # Set jab_ranges and _deltas for PX-model pixel calculations:
    PX_jab_deltas = np.array([_VF_DELTAR, _VF_DELTAR, _VF_DELTAR
                              ])  #set same as for vectorfield generation
    PX_jab_ranges = np.vstack(
        ([0, 100, _VF_DELTAR], [-_VF_MAXR, _VF_MAXR + _VF_DELTAR, _VF_DELTAR],
         [-_VF_MAXR, _VF_MAXR + _VF_DELTAR, _VF_DELTAR]))  #IES4880 gamut

    # Calculate shift vectors using vectorfield and pixel methods:
    delta_SvsVF_vshift_ab_mean = np.zeros((len(dataVF), 1))
    delta_SvsVF_vshift_ab_mean.fill(np.nan)
    delta_SvsVF_vshift_ab_mean_normalized = delta_SvsVF_vshift_ab_mean.copy()
    delta_PXvsVF_vshift_ab_mean = np.zeros((len(dataVF), 1))
    delta_PXvsVF_vshift_ab_mean.fill(np.nan)
    delta_PXvsVF_vshift_ab_mean_normalized = delta_PXvsVF_vshift_ab_mean.copy()
    dataPX = [[] for k in range(len(dataVF))]
    for Snr in range(len(dataVF)):

        # Calculate shifts using pixel method, PX:
        dataPX[Snr] = PX_colorshift_model(dataVF[Snr]['Jab']['Jabt'][:, 0, :],
                                          dataVF[Snr]['Jab']['Jabr'][:, 0, :],
                                          jab_ranges=PX_jab_ranges,
                                          jab_deltas=PX_jab_deltas,
                                          limit_grid_radius=_VF_MAXR)

        # Calculate shift difference between Samples (S) and VectorField model predictions (VF):
        delta_SvsVF_vshift_ab = dataVF[Snr]['vshifts']['vshift_ab_s'] - dataVF[
            Snr]['vshifts']['vshift_ab_s_vf']
        delta_SvsVF_vshift_ab_mean[Snr] = np.nanmean(np.sqrt(
            (delta_SvsVF_vshift_ab[..., 1:3]**2).sum(
                axis=delta_SvsVF_vshift_ab[..., 1:3].ndim - 1)),
                                                     axis=0)
        delta_SvsVF_vshift_ab_mean_normalized[
            Snr] = delta_SvsVF_vshift_ab_mean[Snr] / dataVF[Snr]['Jab'][
                'DEi'].mean(axis=0)

        # Calculate shift difference between PiXel method (PX) and VectorField (VF):
        delta_PXvsVF_vshift_ab = dataPX[Snr]['vshifts'][
            'vectorshift_ab_J0'] - dataVF[Snr]['vshifts']['vshift_ab_vf']
        delta_PXvsVF_vshift_ab_mean[Snr] = np.nanmean(np.sqrt(
            (delta_PXvsVF_vshift_ab[..., 1:3]**2).sum(
                axis=delta_PXvsVF_vshift_ab[..., 1:3].ndim - 1)),
                                                      axis=0)
        delta_PXvsVF_vshift_ab_mean_normalized[
            Snr] = delta_PXvsVF_vshift_ab_mean[Snr] / dataVF[Snr]['Jab'][
                'DEi'].mean(axis=0)

        dataVF[Snr]['vshifts'][
            'delta_PXvsVF_vshift_ab_mean'] = delta_PXvsVF_vshift_ab_mean[Snr]
        dataVF[Snr]['vshifts'][
            'delta_SvsVF_vshift_ab_mean'] = delta_SvsVF_vshift_ab_mean[Snr]
        dataVF[Snr]['vshifts'][
            'delta_SvsVF_vshift_ab_mean_normalized'] = delta_SvsVF_vshift_ab_mean_normalized[
                Snr]
        dataVF[Snr]['vshifts'][
            'delta_PXvsVF_vshift_ab_mean_normalized'] = delta_PXvsVF_vshift_ab_mean_normalized[
                Snr]
        dataPX[Snr]['vshifts']['delta_PXvsVF_vshift_ab_mean'] = dataVF[Snr][
            'vshifts']['delta_PXvsVF_vshift_ab_mean']
        dataPX[Snr]['vshifts'][
            'delta_PXvsVF_vshift_ab_mean_normalized'] = dataVF[Snr]['vshifts'][
                'delta_PXvsVF_vshift_ab_mean_normalized']

    return dataVF, dataPX
예제 #24
0
def fit_ellipse(xy, center_on_mean_xy = False):
    """
    Fit an ellipse to supplied data points.

    Args:
        :xy: 
            | coordinates of points to fit (Nx2 array)
        :center_on_mean_xy:
            | False, optional
            | Center ellipse on mean of xy 
            | (otherwise it might be offset due to solving 
            | the contrained minization problem: aT*S*a, see ref below.)
            
    Returns:
        :v:
            | vector with ellipse parameters [Rmax,Rmin, xc,yc, theta (rad.)]
            
    Reference:
        1. Fitzgibbon, A.W., Pilu, M., and Fischer R.B., 
        Direct least squares fitting of ellipsees, 
        Proc. of the 13th Internation Conference on Pattern Recognition, 
        pp 253–257, Vienna, 1996.
    """
    # remove centroid:
#    center = xy.mean(axis=0)
#    xy = xy - center
    
    # Fit ellipse:
    x, y = xy[:,0:1], xy[:,1:2]
    D = np.hstack((x * x, x * y, y * y, x, y, np.ones_like(x)))
    S, C = np.dot(D.T, D), np.zeros([6, 6])
    C[0, 2], C[2, 0], C[1, 1] = 2, 2, -1
    U, s, V = np.linalg.svd(np.dot(np.linalg.inv(S), C))
    e = U[:, 0]
#    E, V =  np.linalg.eig(np.dot(np.linalg.inv(S), C))
#    n = np.argmax(np.abs(E))
#    e = V[:,n]
        
    # get ellipse axis lengths, center and orientation:
    b, c, d, f, g, a = e[1] / 2, e[2], e[3] / 2, e[4] / 2, e[5], e[0]
    
    # get ellipse center:
    num = b * b - a * c
    if num == 0:
        xc = 0
        yc = 0
    else:
        xc = ((c * d - b * f) / num) 
        yc = ((a * f - b * d) / num) 
    
    # get ellipse orientation:
    theta = np.arctan2(np.array(2 * b), np.array((a - c))) / 2
#    if b == 0:
#        if a > c:
#            theta = 0
#        else:
#            theta = np.pi/2
#    else:
#        if a > c:
#            theta = np.arctan2(2*b,(a-c))/2
#        else:
#            theta =  np.arctan2(2*b,(a-c))/2 + np.pi/2
        
    # axis lengths:
    up = 2 * (a * f * f + c * d * d + g * b * b - 2 * b * d * f - a * c * g)
    down1 = (b * b - a * c) * ((c - a) * np.sqrt(1 + 4 * b * b / ((a - c) * (a - c))) - (c + a))
    down2 = (b * b - a * c) * ((a - c) * np.sqrt(1 + 4 * b * b / ((a - c) * (a - c))) - (c + a))
    a, b  = np.sqrt((up / down1)), np.sqrt((up / down2))


    # assert that a is the major axis (otherwise swap and correct angle)
    if(b > a):
        b, a = a, b
        # ensure the angle is betwen 0 and 2*pi
        theta = fmod(theta, 2.0 * np.pi)
        
    if center_on_mean_xy == True:
        xc,yc = xy.mean(axis=0)

    return np.hstack((a, b, xc, yc, theta))
예제 #25
0
def xyz_to_rfl(xyz, CSF = None, rfl = None, out = 'rfl_est', \
                 refspd = None, D = None, cieobs = _CIEOBS, \
                 cspace = 'xyz', cspace_tf = {},\
                 interp_type = 'nd', k_neighbours = 4, verbosity = 0):
    """
    Approximate spectral reflectance of xyz values based on nd-dimensional linear interpolation 
    or k nearest neighbour interpolation of samples from a standard reflectance set.
    
    Args:
        :xyz: 
            | ndarray with xyz values of target points.
        :CSF:
            | None, optional
            | RGB camera response functions.
            | If None: input :xyz: contains raw rgb (float) values. Override :cspace:
            | argument and perform estimation directly in raw rgb space!!!
        :rfl: 
            | ndarray, optional
            | Reflectance set for color coordinate to rfl mapping.
        :out: 
            | 'rfl_est' or str, optional
        :refspd: 
            | None, optional
            | Refer ence spectrum for color coordinate to rfl mapping.
            | None defaults to D65.
        :cieobs:
            | _CIEOBS, optional
            | CMF set used for calculation of xyz from spectral data.
        :cspace:
            | 'xyz',  optional
            | Color space for color coordinate to rfl mapping.
            | Tip: Use linear space (e.g. 'xyz', 'Yuv',...) for (interp_type == 'nd'),
            |      and perceptually uniform space (e.g. 'ipt') for (interp_type == 'nearest')
        :cspace_tf:
            | {}, optional
            | Dict with parameters for xyz_to_cspace and cspace_to_xyz transform.
        :interp_type:
            | 'nd', optional
            | Options:
            | - 'nd': perform n-dimensional linear interpolation using Delaunay triangulation.
            | - 'nearest': perform nearest neighbour interpolation. 
        :k_neighbours:
            | 4 or int, optional
            | Number of nearest neighbours for reflectance spectrum interpolation.
            | Neighbours are found using scipy.spatial.cKDTree
        :verbosity:
            | 0, optional
            | If > 0: make a plot of the color coordinates of original and 
            | rendered image pixels.

    Returns:
        :returns: 
            | :rfl_est:
            | ndarrays with estimated reflectance spectra.
    """

    # get rfl set:
    if rfl is None:  # use IESTM30['4880'] set
        rfl = _CRI_RFL['ies-tm30']['4880']['5nm']

    wlr = rfl[0]

    # get Ref spd:
    if refspd is None:
        refspd = _CIE_ILLUMINANTS['D65'].copy()
    refspd = cie_interp(
        refspd, wlr,
        kind='linear')  # force spd to same wavelength range as rfl

    # Calculate rgb values of standard rfl set under refspd:
    if CSF is None:
        # Calculate lab coordinates:
        xyz_rr, xyz_wr = spd_to_xyz(refspd,
                                    relative=True,
                                    rfl=rfl,
                                    cieobs=cieobs,
                                    out=2)
        cspace_tf_copy = cspace_tf.copy()
        cspace_tf_copy[
            'xyzw'] = xyz_wr  # put correct white point in param. dict
        lab_rr = colortf(xyz_rr,
                         tf=cspace,
                         fwtf=cspace_tf_copy,
                         bwtf=cspace_tf_copy)[:, 0, :]
    else:
        # Calculate rgb coordinates from camera sensitivity functions
        rgb_rr = rfl_to_rgb(rfl, spd=refspd, CSF=CSF, wl=None)
        lab_rr = rgb_rr
        xyz = xyz
        lab_rr = np.round(lab_rr, _ROUNDING)  # speed up search

    # Convert xyz to lab-type values under refspd:
    if CSF is None:
        lab = colortf(xyz, tf=cspace, fwtf=cspace_tf_copy, bwtf=cspace_tf_copy)
    else:
        lab = xyz  # xyz contained rgb values !!!
        rgb = xyz
        lab = np.round(lab, _ROUNDING)  # speed up search

    if interp_type == 'nearest':
        # Find rfl (cfr. lab_rr) from rfl set that results in 'near' metameric
        # color coordinates for each value in lab_ur (i.e. smallest DE):
        # Construct cKDTree:
        tree = sp.spatial.cKDTree(lab_rr, copy_data=True)

        # Interpolate rfls using k nearest neightbours and inverse distance weigthing:
        d, inds = tree.query(lab, k=k_neighbours)
        if k_neighbours > 1:
            d += _EPS
            w = (1.0 / d**2)[:, :, None]  # inverse distance weigthing
            rfl_est = np.sum(w * rfl[inds + 1, :], axis=1) / np.sum(w, axis=1)
        else:
            rfl_est = rfl[inds + 1, :].copy()
    elif interp_type == 'nd':

        rfl_est = math.ndinterp1_scipy(lab_rr, rfl[1:], lab)

        _isnan = np.isnan(rfl_est[:, 0])

        if (
                _isnan.any()
        ):  #do nearest neigbour method for those that fail using Delaunay (i.e. ndinterp1_scipy)

            # Find rfl (cfr. lab_rr) from rfl set that results in 'near' metameric
            # color coordinates for each value in lab_ur (i.e. smallest DE):
            # Construct cKDTree:
            tree = sp.spatial.cKDTree(lab_rr, copy_data=True)

            # Interpolate rfls using k nearest neightbours and inverse distance weigthing:
            d, inds = tree.query(lab[_isnan, ...], k=k_neighbours)

            if k_neighbours > 1:
                d += _EPS
                w = (1.0 / d**2)[:, :, None]  # inverse distance weigthing
                rfl_est_isnan = np.sum(w * rfl[inds + 1, :], axis=1) / np.sum(
                    w, axis=1)
            else:
                rfl_est_isnan = rfl[inds + 1, :].copy()
            rfl_est[_isnan, :] = rfl_est_isnan

    else:
        raise Exception('xyz_to_rfl(): unsupported interp_type!')

    rfl_est[
        rfl_est <
        0] = 0  #can occur for points outside convexhull of standard rfl set.

    rfl_est = np.vstack((rfl[0], rfl_est))

    if ((verbosity > 0) | ('xyz_est' in out.split(',')) |
        ('lab_est' in out.split(',')) | ('DEi_ab' in out.split(',')) |
        ('DEa_ab' in out.split(','))) & (CSF is None):
        xyz_est, _ = spd_to_xyz(refspd,
                                rfl=rfl_est,
                                relative=True,
                                cieobs=cieobs,
                                out=2)
        cspace_tf_copy = cspace_tf.copy()
        cspace_tf_copy[
            'xyzw'] = xyz_wr  # put correct white point in param. dict
        lab_est = colortf(xyz_est, tf=cspace, fwtf=cspace_tf_copy)[:, 0, :]
        DEi_ab = np.sqrt(((lab_est[:, 1:3] - lab[:, 1:3])**2).sum(axis=1))
        DEa_ab = DEi_ab.mean()
    elif ((verbosity > 0) | ('xyz_est' in out.split(',')) |
          ('rgb_est' in out.split(',')) | ('DEi_rgb' in out.split(',')) |
          ('DEa_rgb' in out.split(','))) & (CSF is not None):
        rgb_est = rfl_to_rgb(rfl_est[1:], spd=refspd, CSF=CSF, wl=wlr)
        xyz_est = rgb_est
        DEi_rgb = np.sqrt(((rgb_est - rgb)**2).sum(axis=1))
        DEa_rgb = DEi_rgb.mean()

    if verbosity > 0:
        if CSF is None:
            ax = plot_color_data(lab[...,1], lab[...,2], z = lab[...,0], \
                            show = False, cieobs = cieobs, cspace = cspace, \
                            formatstr = 'ro', label = 'Original')
            plot_color_data(lab_est[...,1], lab_est[...,2], z = lab_est[...,0], \
                            show = True, axh = ax, cieobs = cieobs, cspace = cspace, \
                            formatstr = 'bd', label = 'Rendered')
        else:
            n = 100  #min(rfl.shape[0]-1,rfl_est.shape[0]-1)
            s = np.random.permutation(rfl.shape[0] -
                                      1)[:min(n, rfl.shape[0] - 1)]
            st = np.random.permutation(rfl_est.shape[0] -
                                       1)[:min(n, rfl_est.shape[0] - 1)]
            fig = plt.figure()
            ax = np.zeros((3, ), dtype=np.object)
            ax[0] = fig.add_subplot(131)
            ax[1] = fig.add_subplot(132)
            ax[2] = fig.add_subplot(133, projection='3d')
            ax[0].plot(rfl[0], rfl[1:][s].T, linestyle='-')
            ax[0].set_title('Original RFL set (random selection of all)')
            ax[0].set_ylim([0, 1])
            ax[1].plot(rfl_est[0], rfl_est[1:][st].T, linestyle='--')
            ax[0].set_title('Estimated RFL set (random selection of targets)')
            ax[1].set_ylim([0, 1])
            ax[2].plot(rgb[st, 0],
                       rgb[st, 1],
                       rgb[st, 2],
                       'ro',
                       label='Original')
            ax[2].plot(rgb_est[st, 0],
                       rgb_est[st, 1],
                       rgb_est[st, 2],
                       'bd',
                       label='Rendered')
            ax[2].legend()
    if out == 'rfl_est':
        return rfl_est
    elif out == 'rfl_est,xyz_est':
        return rfl_est, xyz_est
    else:
        return eval(out)
예제 #26
0
파일: graphics.py 프로젝트: simongr2/luxpy
def plot_hue_bins(hbins = 16, start_hue = 0.0, scalef = 100, \
        plot_axis_labels = False, bin_labels = '#', plot_edge_lines = True, \
        plot_center_lines = False, plot_bin_colors = True, \
        plot_10_20_circles = False,\
        axtype = 'polar', ax = None, force_CVG_layout = False):
    """
    Makes basis plot for Color Vector Graphic (CVG).
    
    Args:
        :hbins:
            | 16 or ndarray with sorted hue bin centers (°), optional
        :start_hue:
            | 0.0, optional
        :scalef:
            | 100, optional
            | Scale factor for graphic.
        :plot_axis_labels:
            | False, optional
            | Turns axis ticks on/off (True/False).
        :bin_labels:
            | None or list[str] or '#', optional
            | Plots labels at the bin center hues.
            |   - None: don't plot.
            |   - list[str]: list with str for each bin. 
            |                (len(:bin_labels:) = :nhbins:)
            |   - '#': plots number.
        :plot_edge_lines:
            | True or False, optional
            | Plot grey bin edge lines with '--'.
        :plot_center_lines:
            | False or True, optional
            | Plot colored lines at 'center' of hue bin.
        :plot_bin_colors:
            | True, optional
            | Colorize hue bins.
        :plot_10_20_circles:
            | False, optional
            | If True and :axtype: == 'cart': Plot white circles at 
            | 80%, 90%, 100%, 110% and 120% of :scalef: 
        :axtype: 
            | 'polar' or 'cart', optional
            | Make polar or Cartesian plot.
        :ax: 
            | None or 'new' or 'same', optional
            |   - None or 'new' creates new plot
            |   - 'same': continue plot on same axes.
            |   - axes handle: plot on specified axes.
        :force_CVG_layout:
            | False or True, optional
            | True: Force plot of basis of CVG on first encounter.
            
    Returns:
        :returns: 
            | gcf(), gca(), list with rgb colors for hue bins (for use in 
              other plotting fcns)
        
    """

    # Setup hbincenters and hsv_hues:
    if isinstance(hbins, float) | isinstance(hbins, int):
        nhbins = hbins
        dhbins = 360 / (nhbins)  # hue bin width
        hbincenters = np.arange(start_hue + dhbins / 2, 360, dhbins)
        hbincenters = np.sort(hbincenters)

    else:
        hbincenters = hbins
        idx = np.argsort(hbincenters)
        if isinstance(bin_labels, list) | isinstance(bin_labels, np.ndarray):
            bin_labels = bin_labels[idx]
        hbincenters = hbincenters[idx]
        nhbins = hbincenters.shape[0]
    hbincenters = hbincenters * np.pi / 180

    # Setup hbin labels:
    if bin_labels is '#':
        bin_labels = ['#{:1.0f}'.format(i + 1) for i in range(nhbins)]
    elif isinstance(bin_labels, str):
        bin_labels = [
            bin_labels + '{:1.0f}'.format(i + 1) for i in range(nhbins)
        ]

    # initializing the figure
    cmap = None
    if (ax is None) or (ax == 'new'):
        fig = plt.figure()
        newfig = True
    else:
        fig = plt.gcf()
        newfig = False
    rect = [0.1, 0.1, 0.8,
            0.8]  # setting the axis limits in [left, bottom, width, height]

    if axtype == 'polar':
        # the polar axis:
        if newfig == True:
            ax = fig.add_axes(rect, polar=True, frameon=False)
    else:
        #cartesian axis:
        if newfig == True:
            ax = fig.add_axes(rect)

    if (newfig == True) | (force_CVG_layout == True):

        # Calculate hue-bin boundaries:
        r = np.vstack((np.zeros(hbincenters.shape),
                       1. * scalef * np.ones(hbincenters.shape)))
        theta = np.vstack((np.zeros(hbincenters.shape), hbincenters))
        #t = hbincenters.copy()
        dU = np.roll(hbincenters.copy(), -1)
        dL = np.roll(hbincenters.copy(), 1)
        dtU = dU - hbincenters
        dtL = hbincenters - dL
        dtU[dtU < 0] = dtU[dtU < 0] + 2 * np.pi
        dtL[dtL < 0] = dtL[dtL < 0] + 2 * np.pi
        dL = hbincenters - dtL / 2
        dU = hbincenters + dtU / 2
        dt = (dU - dL)
        dM = dL + dt / 2

        # Setup color for plotting hue bins:
        hsv_hues = hbincenters - 30 * np.pi / 180
        hsv_hues = hsv_hues / hsv_hues.max()

        edges = np.vstack(
            (np.zeros(hbincenters.shape), dL))  # setup hue bin edges array

        if axtype == 'cart':
            if plot_center_lines == True:
                hx = r * np.cos(theta) * 1.2
                hy = r * np.sin(theta) * 1.2
            if bin_labels is not None:
                hxv = np.vstack((np.zeros(hbincenters.shape),
                                 1.4 * scalef * np.cos(hbincenters)))
                hyv = np.vstack((np.zeros(hbincenters.shape),
                                 1.4 * scalef * np.sin(hbincenters)))
            if plot_edge_lines == True:
                #hxe = np.vstack((np.zeros(hbincenters.shape),1.2*scalef*np.cos(dL)))
                #hye = np.vstack((np.zeros(hbincenters.shape),1.2*scalef*np.sin(dL)))
                hxe = np.vstack(
                    (0.1 * scalef * np.cos(dL), 1.5 * scalef * np.cos(dL)))
                hye = np.vstack(
                    (0.1 * scalef * np.sin(dL), 1.5 * scalef * np.sin(dL)))

        # Plot hue-bins:
        for i in range(nhbins):

            # Create color from hue angle:
            #c = np.abs(np.array(colorsys.hsv_to_rgb(hsv_hues[i], 0.75, 0.85)))
            c = np.abs(np.array(colorsys.hls_to_rgb(hsv_hues[i], 0.45, 0.5)))
            if i == 0:
                cmap = [c]
            else:
                cmap.append(c)

            if axtype == 'polar':
                if plot_edge_lines == True:
                    ax.plot(edges[:, i],
                            r[:, i] * 1.,
                            color='grey',
                            marker='None',
                            linestyle='--',
                            linewidth=1,
                            markersize=2)
                if plot_center_lines == True:
                    if np.mod(i, 2) == 1:
                        ax.plot(theta[:, i],
                                r[:, i],
                                color=c,
                                marker=None,
                                linestyle='--',
                                linewidth=1)
                    else:
                        ax.plot(theta[:, i],
                                r[:, i],
                                color=c,
                                marker=None,
                                linestyle='--',
                                linewidth=1,
                                markersize=10)
                if plot_bin_colors == True:
                    bar = ax.bar(dM[i],
                                 r[1, i],
                                 width=dt[i],
                                 color=c,
                                 alpha=0.25)
                if bin_labels is not None:
                    ax.text(hbincenters[i],
                            1.3 * scalef,
                            bin_labels[i],
                            fontsize=10,
                            horizontalalignment='center',
                            verticalalignment='center',
                            color=np.array([1, 1, 1]) * 0.45)
                if plot_axis_labels == False:
                    ax.set_xticklabels([])
                    ax.set_yticklabels([])
            else:
                axis_ = 1. * np.array(
                    [-scalef * 1.5, scalef * 1.5, -scalef * 1.5, scalef * 1.5])
                if plot_edge_lines == True:
                    ax.plot(hxe[:, i],
                            hye[:, i],
                            color='grey',
                            marker='None',
                            linestyle='--',
                            linewidth=1,
                            markersize=2)

                if plot_center_lines == True:
                    if np.mod(i, 2) == 1:
                        ax.plot(hx[:, i],
                                hy[:, i],
                                color=c,
                                marker=None,
                                linestyle='--',
                                linewidth=1)
                    else:
                        ax.plot(hx[:, i],
                                hy[:, i],
                                color=c,
                                marker=None,
                                linestyle='--',
                                linewidth=1,
                                markersize=10)
                if bin_labels is not None:
                    ax.text(hxv[1, i],
                            hyv[1, i],
                            bin_labels[i],
                            fontsize=10,
                            horizontalalignment='center',
                            verticalalignment='center',
                            color=np.array([1, 1, 1]) * 0.45)
                ax.axis(axis_)

        if plot_axis_labels == False:
            ax.set_xticklabels([])
            ax.set_yticklabels([])
        else:
            ax.set_xlabel("a'")
            ax.set_ylabel("b'")

        ax.plot(0, 0, color='grey', marker='+', linestyle=None, markersize=6)

        if (axtype != 'polar') & (plot_10_20_circles == True):
            r = np.array([
                0.8, 0.9, 1.1, 1.2
            ]) * scalef  # plot circles at 80, 90, 100, 110, 120 % of scale f
            plotcircle(radii=r,
                       angles=np.arange(0, 365, 5),
                       color='w',
                       linestyle='-',
                       axh=ax,
                       linewidth=0.5)
            plotcircle(radii=[scalef],
                       angles=np.arange(0, 365, 5),
                       color='k',
                       linestyle='-',
                       axh=ax,
                       linewidth=1)
            ax.text(0,
                    -0.75 * scalef,
                    '-20%',
                    fontsize=8,
                    horizontalalignment='center',
                    verticalalignment='center',
                    color='w')
            ax.text(0,
                    -1.25 * scalef,
                    '+20%',
                    fontsize=8,
                    horizontalalignment='center',
                    verticalalignment='center',
                    color='w')

        if (axtype != 'polar') & (plot_bin_colors == True) & (_CVG_BG
                                                              is not None):
            ax.imshow(_CVG_BG, origin='upper', extent=axis_)

    return fig, ax, cmap
예제 #27
0
def subsample_RFL_set(rfl, rflpath = '', samplefcn = 'rand', S = _CIE_ILLUMINANTS['E'], \
                      jab_ranges = None, jab_deltas = None, cieobs = _VF_CIEOBS, cspace = _VF_CSPACE, \
                      ax = np.arange(-_VF_MAXR,_VF_MAXR+_VF_DELTAR,_VF_DELTAR), \
                      bx = np.arange(-_VF_MAXR,_VF_MAXR+_VF_DELTAR,_VF_DELTAR), \
                      jx = None, limit_grid_radius = 0):
    """
    Sub-samples a spectral reflectance set by pixelization of color space.
    
    Args:
        :rfl: 
            | ndarray or str
            | Array with of str referring to a set of spectral reflectance 
            |  functions to be subsampled.
            | If str to file: file must contain data as columns, with first 
            |  column the wavelengths.
        :rflpath:
            | '' or str, optional
            | Path to folder with rfl-set specified in a str :rfl: filename.
        :samplefcn:
            | 'rand' or 'mean', optional
            |   -'rand': selects a random sample from the samples within each pixel
            |   -'mean': returns the mean spectral reflectance in each pixel.
        :S: 
            | _CIE_ILLUMINANTS['E'], optional
            | Illuminant used to calculate the color coordinates of the spectral 
            |  reflectance samples.
        :jab_ranges:
            | None or ndarray, optional
            | Specifies the pixelization of color space.
            |  (ndarray.shape = (3,3), with  first axis: J,a,b, and second 
            |   axis: min, max, delta)
        :jab_deltas:
            | float or ndarray, optional
            | Specifies the sampling range. 
            | A float uses jab_deltas as the maximum Euclidean distance to select
            | samples around each pixel center. A ndarray of 3 deltas, uses
            | a city block sampling around each pixel center.
        :cspace:
            | _VF_CSPACE or dict, optional
            | Specifies color space. See _VF_CSPACE_EXAMPLE for example structure.
        :cieobs:
            | _VF_CIEOBS or str, optional
            | Specifies CMF set used to calculate color coordinates.
        :ax: 
            | default ndarray or user defined ndarray, optional
            | default = np.arange(-_VF_MAXR,_VF_MAXR+_VF_DELTAR,_VF_DELTAR) 
        :bx: 
            | default ndarray or user defined ndarray, optional
            | default = np.arange(-_VF_MAXR,_VF_MAXR+_VF_DELTAR,_VF_DELTAR) 
        :jx: 
            | None, optional
            | Note that not-None :jab_ranges: override :ax:, :bx: and :jx input.
        :limit_grid_radius:
            | 0, optional
            | A value of zeros keeps grid as specified  by axr,bxr.
            | A value > 0 only keeps (a,b) coordinates within :limit_grid_radius:
   
    Returns:
        :returns:
            | rflsampled, jabp
            | ndarrays with resp. the subsampled set of spectral reflectance 
            | functions and the pixel coordinate centers.
    """
    # Testing effects of sample set, pixel size and gamut size:
    if type(rfl) == str:
        rfl = pd.read_csv(os.path.join(rflpath, rfl),
                          header=None).get_values().T

    # Calculate Jab coordinates of samples:
    xyz, xyzw = spd_to_xyz(S, cieobs=cieobs, rfl=rfl.copy(), out=2)
    cspace_pars = cspace.copy()
    cspace_pars.pop('type')
    cspace_pars['xyzw'] = xyzw
    jab = colortf(xyz, tf=cspace['type'], fwtf=cspace_pars)

    # Generate grid and get samples in each grid:
    gridp, idxp, jabp, pixelsamplenrs, pixelIDs = get_pixel_coordinates(
        jab,
        jab_ranges=jab_ranges,
        jab_deltas=jab_deltas,
        limit_grid_radius=limit_grid_radius)

    # Get rfls from set using sampling function (mean or rand):
    W = rfl[:1]
    R = rfl[1:]
    rflsampled = np.zeros((len(idxp), R.shape[1]))
    rflsampled.fill(np.nan)
    for i in range(len(idxp)):
        if samplefcn == 'mean':
            rfl_i = np.nanmean(rfl[pixelsamplenrs[i], :], axis=0)
        else:
            samplenr_i = np.random.randint(len(pixelsamplenrs[i]))
            rfl_i = rfl[pixelsamplenrs[i][samplenr_i], :]
        rflsampled[i, :] = rfl_i
    rflsampled = np.vstack((W, rflsampled))
    return rflsampled, jabp
예제 #28
0
    def __init__(self, spd = None, wl = None, ax0iswl = True, dtype = 'S', \
                 wl_new = None, interp_method = 'auto', negative_values_allowed = False, extrap_values = None,\
                 norm_type = None, norm_f = 1,\
                 header = None, sep = ','):
        """
        Initialize instance of SPD.
        
        Args:
            :spd: 
                | None or ndarray or str, optional
                | If None: self.value is initialized with zeros.
                | If str: spd contains filename.
                | If ndarray: ((wavelength, spectra)) or (spectra). 
                |     If latter, :wl: should contain the wavelengths.
            :wl: 
                | None or ndarray, optional
                | Wavelengths.
                | Either specified as a 3-vector ([start, stop, spacing]) 
                | or as full wavelength array.
            :a0iswl:
                | True, optional
                | Signals that first axis of :spd: contains wavelengths.
            :dtype:
                | 'S', optional
                | Type of spectral object (e.g. 'S' for source spectrum, 'R' for
                  reflectance spectra, etc.)
                | See SPD._INTERP_TYPES for more options. 
                | This is used to automatically determine the correct kind of
                  interpolation method according to CIE15-2018.
            :wl_new: 
                | None or ndarray with wavelength range, optional
                | If None: don't interpolate, else perform interpolation.
            :interp_method:
                | - 'auto', optional
                | If 'auto': method is determined based on :dtype:
            :negative-values_allowed:
                | False, optional (for cie_interp())
                | Spectral data can not be negative. Values < 0 are therefore 
                  clipped when set to False.
            :extrap_values:
                | None, optional
                | float or list or ndarray with values to extrapolate
                | If None: use CIE recommended 'closest value' approach.
            :norm_type:
                | None or str, optional
                | - 'lambda': make lambda in norm_f equal to 1
                | - 'area': area-normalization times norm_f
                | - 'max': max-normalization times norm_f
            :norm_f:
                | 1, optional
                | Normalization factor determines size of normalization 
                | for 'max' and 'area' 
                | or which wavelength is normalized to 1 for 'lambda' option.
        """
        if spd is not None:
            if isinstance(spd, str):
                spd = SPD.read_csv_(self, file=spd, header=header, sep=sep)
            if ax0iswl == True:
                self.wl = spd[0]
                self.value = spd[1:]
            else:
                self.wl = wl
                if (self.wl.size == 3):
                    self.wl = np.arange(self.wl[0], self.wl[1] + 1, self.wl[2])
                self.value = spd
            if self.value.shape[1] != self.wl.shape[0]:
                raise Exception(
                    'SPD.__init__(): Dimensions of wl and spd do not match.')
        else:
            if (wl is None):
                self.wl = SPD._WL3
            else:
                self.wl = wl
            if (self.wl.size == 3):
                self.wl = np.arange(self.wl[0], self.wl[1] + 1, self.wl[2])
            self.value = np.zeros((1, self.wl.size))

        self.wl = self.wl
        self.dtype = dtype
        self.shape = self.value.shape
        self.N = self.shape[0]

        if wl_new is not None:
            if interp_method == 'auto':
                interp_method = dtype
            self.cie_interp(wl_new,
                            kind=interp_method,
                            negative_values_allowed=negative_values_allowed,
                            extrap_values=extrap_values)
        if norm_type is not None:
            self.normalize(norm_type=norm_type, norm_f=norm_f)
예제 #29
0
def PX_colorshift_model(Jabt,
                        Jabr,
                        jab_ranges=None,
                        jab_deltas=None,
                        limit_grid_radius=0):
    """
    Pixelates the color space and calculates the color shifts in each pixel.
    
    Args:
        :Jabt: 
            | ndarray with color coordinates under the (single) test SPD.
        :Jabr: 
            | ndarray with color coordinates under the (single) reference SPD.  
        :jab_ranges:
            | None or ndarray, optional
            | Specifies the pixelization of color space.
            | (ndarray.shape = (3,3), with  first axis: J,a,b, and second 
            | axis: min, max, delta)
        :jab_deltas:
            | float or ndarray, optional
            | Specifies the sampling range. 
            | A float uses jab_deltas as the maximum Euclidean distance to select
            | samples around each pixel center. A ndarray of 3 deltas, uses
            | a city block sampling around each pixel center.
        :limit_grid_radius:
            | 0, optional
            | A value of zeros keeps grid as specified by axr,bxr.
            | A value > 0 only keeps (a,b) coordinates within :limit_grid_radius:
            
    Returns:
        :returns: 
            | dict with the following keys:
            |   - 'Jab': dict with with ndarrays for:
            |        Jabt, Jabr, DEi, DEi_ab (only ab-coordinates), DEa (mean) 
            |         and DEa_ab
            |   - 'vshifts': dict with:
            |      * 'vectorshift': ndarray with vector shifts between average
            |                       Jabt and Jabr for each pixel
            |      * 'vectorshift_ab': ndarray with vector shifts averaged 
            |                          over J for each pixel
            |      * 'vectorshift_ab_J0': ndarray with vector shifts averaged 
            |                             over J for each pixel of J=0 plane.
            |      * 'vectorshift_len': length of 'vectorshift'
            |      * 'vectorshift_ab_len': length of 'vectorshift_ab'
            |      * 'vectorshift_ab_J0_len': length of 'vectorshift_ab_J0'
            |      * 'vectorshift_len_DEnormed': length of 'vectorshift' 
            |                                    normalized to 'DEa'
            |      * 'vectorshift_ab_len_DEnormed': length of 'vectorshift_ab' 
            |                                       normalized to 'DEa_ab'
            |      * 'vectorshift_ab_J0_len_DEnormed': length of 'vectorshift_ab_J0' 
            |                                          normalized to 'DEa_ab'
            |   - 'pixeldata': dict with pixel info:
            |      * 'grid' ndarray with coordinates of all pixel centers.
            |      * 'idx': list[int] with pixel index for each non-empty pixel
            |      * 'Jab': ndarray with center coordinates of non-empty pixels
            |      * 'samplenrs': list[list[int]] with sample numbers belong to 
            |                     each non-empty pixel
            |      * 'IDs: summarizing list, 
            |              with column order: 'idxp, jabp, samplenrs'
            |  - 'fielddata' : dict with dicts containing data on the calculated 
            |                  vector-field and circle-fields 
            |      * 'vectorfield': dict with ndarrays for the ab-coordinates 
            |         under the ref. (axr, bxr) and test (axt, bxt) illuminants,
            |         centered at the pixel centers corresponding to the 
                      ab-coordinates of the reference illuminant.
     """

    # get pixelIDs of all samples under ref. conditions:
    gridp, idxp, jabp, pixelsamplenrs, pixelIDs = get_pixel_coordinates(
        Jabr,
        jab_ranges=jab_ranges,
        jab_deltas=jab_deltas,
        limit_grid_radius=limit_grid_radius)

    # get average Jab coordinates for each pixel:
    Npixels = len(idxp)  # number of non-empty pixels
    Jabr_avg = np.zeros((gridp.shape[0], 3))
    Jabr_avg.fill(np.nan)
    Jabt_avg = Jabr_avg.copy()
    for i in range(Npixels):
        Jabr_avg[idxp[i], :] = Jabr[pixelsamplenrs[i], :].mean(axis=0)
        Jabt_avg[idxp[i], :] = Jabt[pixelsamplenrs[i], :].mean(axis=0)
        #jabtemp = Jabr[pixelsamplenrs[i],:]
        #jabtempm = Jabr_avg[idxp[i],:]

    # calculate Jab vector shift:
    vectorshift = Jabt_avg - Jabr_avg

    # calculate ab vector shift:
    uabs = gridp[gridp[:, 0] == 0, 1:3]  #np.unique(gridp[:,1:3],axis=0)
    vectorshift_ab_J0 = np.zeros((uabs.shape[0], 2))
    vectorshift_ab_J0.fill(np.nan)
    vectorshift_ab = np.zeros((vectorshift.shape[0], 2))
    vectorshift_ab.fill(np.nan)
    for i in range(uabs.shape[0]):
        cond = (gridp[:, 1:3] == uabs[i, :]).all(axis=1)
        if cond.any() & np.logical_not(
                np.isnan(vectorshift[cond, 1:3]).all()
        ):  #last condition is to avoid warning of taking nanmean of empty slice when all are NaNs
            vectorshift_ab_J0[i, :] = np.nanmean(vectorshift[cond, 1:3],
                                                 axis=0)
            vectorshift_ab[cond, :] = np.nanmean(vectorshift[cond, 1:3],
                                                 axis=0)

    # Calculate length of shift vectors:
    vectorshift_len = np.sqrt((vectorshift**2).sum(axis=vectorshift.ndim - 1))
    vectorshift_ab_len = np.sqrt(
        (vectorshift_ab**2).sum(axis=vectorshift_ab.ndim - 1))
    vectorshift_ab_J0_len = np.sqrt(
        (vectorshift_ab_J0**2).sum(axis=vectorshift_ab_J0.ndim - 1))

    # Calculate average DE for normalization of vectorshifts
    DEi_Jab_avg = np.sqrt(((Jabt - Jabr)**2).sum(axis=Jabr.ndim - 1))
    DE_Jab_avg = DEi_Jab_avg.mean(axis=0)
    DEi_ab_avg = np.sqrt(
        ((Jabt[..., 1:3] - Jabr[..., 1:3])**2).sum(axis=Jabr[..., 1:3].ndim -
                                                   1))
    DE_ab_avg = DEi_ab_avg.mean(axis=0)

    # calculate vectorfield:
    axr = uabs[:, 0, None]
    bxr = uabs[:, 1, None]
    axt = axr + vectorshift_ab_J0[:, 0, None]
    bxt = bxr + vectorshift_ab_J0[:, 1, None]

    data = {
        'Jab': {
            'Jabr': Jabr_avg,
            'Jabt': Jabt_avg,
            'DEi': DEi_Jab_avg,
            'DEi_ab': DEi_ab_avg,
            'DEa': DE_Jab_avg,
            'DEa_ab': DE_ab_avg
        },
        'vshifts': {
            'vectorshift': vectorshift,
            'vectorshift_ab': vectorshift_ab,
            'vectorshift_ab_J0': vectorshift_ab_J0,
            'vectorshift_len': vectorshift_len,
            'vectorshift_ab_len': vectorshift_ab_len,
            'vectorshift_ab_J0_len': vectorshift_ab_J0_len,
            'vectorshift_len_DEnormed': vectorshift_len / DE_Jab_avg,
            'vectorshift_ab_len_DEnormed': vectorshift_ab_len / DE_ab_avg,
            'vectorshift_ab_J0_len_DEnormed': vectorshift_ab_J0_len / DE_ab_avg
        },
        'pixeldata': {
            'grid': gridp,
            'idx': idxp,
            'Jab': jabp,
            'samplenrs': pixelsamplenrs,
            'IDs': pixelIDs
        },
        'fielddata': {
            'vectorfield': {
                'axr': axr,
                'bxr': bxr,
                'axt': axt,
                'bxt': bxt
            }
        }
    }
    return data
예제 #30
0
def VF_colorshift_model(S, cri_type = _VF_CRI_DEFAULT, model_type = _VF_MODEL_TYPE, \
                        cspace = _VF_CSPACE, sampleset = None, pool = False, \
                        pcolorshift = {'href': np.arange(np.pi/10,2*np.pi,2*np.pi/10),'Cref' : _VF_MAXR, 'sig' : _VF_SIG}, \
                        vfcolor = 'k',verbosity = 0):
    """
    Applies full vector field model calculations to spectral data.
    
    Args:
        :S: 
            | nump.ndarray with spectral data.
        :cri_type:
            | _VF_CRI_DEFAULT or str or dict, optional
            | Specifies type of color fidelity model to use. 
            | Controls choice of ref. ill., sample set, averaging, scaling, etc.
            | See luxpy.cri.spd_to_cri for more info.
        :modeltype:
            | _VF_MODEL_TYPE or 'M6' or 'M5', optional
            | Specifies degree 5 or degree 6 polynomial model in ab-coordinates.
        :cspace:
            | _VF_CSPACE or dict, optional
            | Specifies color space. See _VF_CSPACE_EXAMPLE for example structure.
        :sampleset:
            | None or str or ndarray, optional
            | Sampleset to be used when calculating vector field model.
        :pool: 
            | False, optional
            | If :S: contains multiple spectra, True pools all jab data before 
            | modeling the vector field, while False models a different field 
            | for each spectrum.
        :pcolorshift: 
            | default dict (see below) or user defined dict, optional
            | Dict containing the specification input 
            | for apply_poly_model_at_hue_x().
            | Default dict = {'href': np.arange(np.pi/10,2*np.pi,2*np.pi/10),
            |                 'Cref' : _VF_MAXR, 
            |                 'sig' : _VF_SIG, 
            |                 'labels' : '#'} 
            | The polynomial models of degree 5 and 6 can be fully specified or 
            | summarized by the model parameters themselved OR by calculating the
            | dCoverC and dH at resp. 5 and 6 hues.
        :vfcolor:
            | 'k', optional
            | For plotting the vector fields.
        :verbosity: 
            | 0, optional
            | Report warnings or not.
            
    Returns:
        :returns: 
            | list[dict] (each list element refers to a different test SPD)
            | with the following keys:
            |   - 'Source': dict with ndarrays of the S, cct and duv of source spd.
            |   - 'metrics': dict with ndarrays for:
            |         * Rf (color fidelity: base + metameric shift)
            |         * Rt (metameric uncertainty index) 
            |         * Rfi (specific color fidelity indices)
            |         * Rti (specific metameric uncertainty indices)
            |         * cri_type (str with cri_type)
            |   - 'Jab': dict with with ndarrays for Jabt, Jabr, DEi
            |   - 'dC/C_dH_x_sig' : 
            |           np.vstack((dCoverC_x,dCoverC_x_sig,dH_x,dH_x_sig)).T
            |           See get_poly_model() for more info.
            |   - 'fielddata': dict with dicts containing data on the calculated 
            |      vector-field and circle-fields: 
            |        * 'vectorfield' : {'axt': vfaxt, 'bxt' : vfbxt, 
            |                           'axr' : vfaxr, 'bxr' : vfbxr},
            |        * 'circlefield' : {'axt': cfaxt, 'bxt' : cfbxt, 
            |                           'axr' : cfaxr, 'bxr' : cfbxr}},
            |   - 'modeldata' : dict with model info:
            |                {'pmodel': pmodel, 
            |                'pcolorshift' : pcolorshift, 
            |                  'dab_model' : dab_model, 
            |                  'dab_res' : dab_res,
            |                  'dab_std' : dab_std,
            |                  'modeltype' : modeltype, 
            |                  'fmodel' : poly_model,
            |                  'Jabtm' : Jabtm, 
            |                  'Jabrm' : Jabrm, 
            |                  'DEim' : DEim},
            |   - 'vshifts' :dict with various vector shifts:
            |        * 'Jabshiftvector_r_to_t' : ndarray with difference vectors
            |                                    between jabt and jabr.
            |        * 'vshift_ab_s' : vshift_ab_s: ab-shift vectors of samples 
            |        * 'vshift_ab_s_vf' : vshift_ab_s_vf: ab-shift vectors of 
            |                             VF model predictions of samples.
            |        * 'vshift_ab_vf' : vshift_ab_vf: ab-shift vectors of VF 
            |                            model predictions of vector field grid.
    """

    if type(cri_type) == str:
        cri_type_str = cri_type
    else:
        cri_type_str = None

    # Calculate Rf, Rfi and Jabr, Jabt:
    Rf, Rfi, Jabt, Jabr, cct, duv, cri_type = spd_to_cri(
        S,
        cri_type=cri_type,
        out='Rf,Rfi,jabt,jabr,cct,duv,cri_type',
        sampleset=sampleset)

    # In case of multiple source SPDs, pool:
    if (len(Jabr.shape) == 3) & (Jabr.shape[1] > 1) & (pool == True):
        #Nsamples = Jabr.shape[0]
        Jabr = np.transpose(Jabr, (1, 0, 2))  # set lamps on first dimension
        Jabt = np.transpose(Jabt, (1, 0, 2))
        Jabr = Jabr.reshape(Jabr.shape[0] * Jabr.shape[1],
                            3)  # put all lamp data one after the other
        Jabt = Jabt.reshape(Jabt.shape[0] * Jabt.shape[1], 3)
        Jabt = Jabt[:, None, :]  # add dim = 1
        Jabr = Jabr[:, None, :]

    out = [{} for _ in range(Jabr.shape[1])]  #initialize empty list of dicts
    if pool == False:
        N = Jabr.shape[1]
    else:
        N = 1
    for i in range(N):

        Jabr_i = Jabr[:, i, :].copy()
        Jabr_i = Jabr_i[:, None, :]
        Jabt_i = Jabt[:, i, :].copy()
        Jabt_i = Jabt_i[:, None, :]

        DEi = np.sqrt((Jabr_i[..., 0] - Jabt_i[..., 0])**2 +
                      (Jabr_i[..., 1] - Jabt_i[..., 1])**2 +
                      (Jabr_i[..., 2] - Jabt_i[..., 2])**2)

        # Determine polynomial model:
        poly_model, pmodel, dab_model, dab_res, dCHoverC_res, dab_std, dCHoverC_std = get_poly_model(
            Jabt_i, Jabr_i, modeltype=_VF_MODEL_TYPE)

        # Apply model at fixed hues:
        href = pcolorshift['href']
        Cref = pcolorshift['Cref']
        sig = pcolorshift['sig']
        dCoverC_x, dCoverC_x_sig, dH_x, dH_x_sig = apply_poly_model_at_hue_x(
            poly_model, pmodel, dCHoverC_res, hx=href, Cxr=Cref, sig=sig)

        # Calculate deshifted a,b values on original samples:
        Jt = Jabt_i[..., 0].copy()
        at = Jabt_i[..., 1].copy()
        bt = Jabt_i[..., 2].copy()
        Jr = Jabr_i[..., 0].copy()
        ar = Jabr_i[..., 1].copy()
        br = Jabr_i[..., 2].copy()
        ar = ar + dab_model[:, 0:1]  # deshift reference to model prediction
        br = br + dab_model[:, 1:2]  # deshift reference to model prediction

        Jabtm = np.hstack((Jt, at, bt))
        Jabrm = np.hstack((Jr, ar, br))

        # calculate color differences between test and deshifted ref:
        #        DEim = np.sqrt((Jr - Jt)**2 + (at - ar)**2 + (bt - br)**2)
        DEim = np.sqrt(0 * (Jr - Jt)**2 + (at - ar)**2 +
                       (bt - br)**2)  # J is not used

        # Apply scaling function to convert DEim to Rti:
        scale_factor = cri_type['scale']['cfactor']
        scale_fcn = cri_type['scale']['fcn']
        avg = cri_type['avg']
        Rfi_deshifted = scale_fcn(DEim, scale_factor)
        Rf_deshifted = scale_fcn(avg(DEim, axis=0), scale_factor)

        rms = lambda x: np.sqrt(np.sum(x**2, axis=0) / x.shape[0])
        Rf_deshifted_rms = scale_fcn(rms(DEim), scale_factor)

        # Generate vector field:
        vfaxt, vfbxt, vfaxr, vfbxr = generate_vector_field(
            poly_model,
            pmodel,
            axr=np.arange(-_VF_MAXR, _VF_MAXR + _VF_DELTAR, _VF_DELTAR),
            bxr=np.arange(-_VF_MAXR, _VF_MAXR + _VF_DELTAR, _VF_DELTAR),
            limit_grid_radius=_VF_MAXR,
            color=0)
        vfaxt, vfbxt, vfaxr, vfbxr = generate_vector_field(
            poly_model,
            pmodel,
            axr=np.arange(-_VF_MAXR, _VF_MAXR + _VF_DELTAR, _VF_DELTAR),
            bxr=np.arange(-_VF_MAXR, _VF_MAXR + _VF_DELTAR, _VF_DELTAR),
            limit_grid_radius=_VF_MAXR,
            color=0)

        # Calculate ab-shift vectors of samples and VF model predictions:
        vshift_ab_s = calculate_shiftvectors(Jabt_i,
                                             Jabr_i,
                                             average=False,
                                             vtype='ab')[:, 0, 0:3]
        vshift_ab_s_vf = calculate_shiftvectors(Jabtm,
                                                Jabrm,
                                                average=False,
                                                vtype='ab')

        # Calculate ab-shift vectors using vector field model:
        Jabt_vf = np.hstack((np.zeros((vfaxt.shape[0], 1)), vfaxt, vfbxt))
        Jabr_vf = np.hstack((np.zeros((vfaxr.shape[0], 1)), vfaxr, vfbxr))
        vshift_ab_vf = calculate_shiftvectors(Jabt_vf,
                                              Jabr_vf,
                                              average=False,
                                              vtype='ab')

        # Generate circle field:
        x, y = plotcircle(radii=np.arange(0, _VF_MAXR + _VF_DELTAR, 10),
                          angles=np.arange(0, 359, 1),
                          out='x,y')
        cfaxt, cfbxt, cfaxr, cfbxr = generate_vector_field(
            poly_model,
            pmodel,
            make_grid=False,
            axr=x[:, None],
            bxr=y[:, None],
            limit_grid_radius=_VF_MAXR,
            color=0)

        out[i] = {
            'Source': {
                'S': S,
                'cct': cct[i],
                'duv': duv[i]
            },
            'metrics': {
                'Rf': Rf[:, i],
                'Rt': Rf_deshifted,
                'Rt_rms': Rf_deshifted_rms,
                'Rfi': Rfi[:, i],
                'Rti': Rfi_deshifted,
                'cri_type': cri_type_str
            },
            'Jab': {
                'Jabt': Jabt_i,
                'Jabr': Jabr_i,
                'DEi': DEi
            },
            'dC/C_dH_x_sig':
            np.vstack((dCoverC_x, dCoverC_x_sig, dH_x, dH_x_sig)).T,
            'fielddata': {
                'vectorfield': {
                    'axt': vfaxt,
                    'bxt': vfbxt,
                    'axr': vfaxr,
                    'bxr': vfbxr
                },
                'circlefield': {
                    'axt': cfaxt,
                    'bxt': cfbxt,
                    'axr': cfaxr,
                    'bxr': cfbxr
                }
            },
            'modeldata': {
                'pmodel': pmodel,
                'pcolorshift': pcolorshift,
                'dab_model': dab_model,
                'dab_res': dab_res,
                'dab_std': dab_std,
                'model_type': model_type,
                'fmodel': poly_model,
                'Jabtm': Jabtm,
                'Jabrm': Jabrm,
                'DEim': DEim
            },
            'vshifts': {
                'Jabshiftvector_r_to_t': np.hstack(
                    (Jt - Jr, at - ar, bt - br)),
                'vshift_ab_s': vshift_ab_s,
                'vshift_ab_s_vf': vshift_ab_s_vf,
                'vshift_ab_vf': vshift_ab_vf
            }
        }

    return out