Пример #1
0
def todim(x, tshape, add_axis=1, equal_shape=False):
    """
    Expand x to dims that are broadcast-compatable with shape of another array.
    
    Args:
        :x: 
            | ndarray
        :tshape: 
            | tuple with target shape
        :add_axis:
            | 1, optional
            | Determines where in x.shape an axis should be added
        :equal_shape:
            | False or True, optional
            | True: expand :x: to identical dimensions (speficied by :tshape:)
            
    Returns:
        :returns:
            | ndarray broadcast-compatable with tshape.
    """
    if x is None:
        return np.broadcast_arrays(x, np.ones(tshape))[0]
    else:
        x = np2d(x)
        sx = x.shape
        lsx = len(sx)
        ltshape = len(tshape)
        if (sx == tshape):
            pass
        else:

            if ((lsx == 1) | (sx == (1, tshape[-1])) |
                (sx == (tshape[-1], 1))):
                if (sx == (tshape[-1], 1)):
                    x = x.T
                if lsx != ltshape:
                    x = np.expand_dims(x, 0)
            elif (lsx == 2):
                if (ltshape == 3):
                    sd = np.setdiff1d(tshape, sx, assume_unique=True)
                    if len(sd) == 0:
                        ax = add_axis
                    else:
                        ax = np.where(tshape == sd)[0][0]
                    x = np.expand_dims(x, ax)
                else:
                    raise Exception(
                        "todim(x,tshape): dimensions do not match for 2d arrays."
                    )
            else:
                raise Exception(
                    "todim(x,tshape): no matching dimensions between 3d x and tshape."
                )
        if equal_shape == False:
            return x
        else:
            return np.ones(
                tshape) * x  #make dims of x equal to those of a (tshape)
Пример #2
0
def crowdingdistance(F):
    """
    Computes the crowding distance of a nondominated front.
    The crowding distance gives a measure of how close the individuals are
    with regard to its neighbors. The higher this value, the greater the
    spacing. This is used to promote better diversity in the population.

    Args:
       F: 
           | an m x mu ndarray with mu individuals and m objectives

    Returns:
       cdist: 
           | a m-length column vector
    """
    m, mu = F.shape  #gets the size of F

    if mu == 2:
        cdist = np.vstack((np.inf, np.inf))
        return cdist

    #[Fs, Is] = sort(F,2); #sorts the objectives by individuals
    Is = F.argsort(axis=1)
    Fs = np.sort(F, axis=1)

    # Creates the numerator
    C = Fs[:, 2:] - Fs[:, :-2]
    C = np.hstack((np.inf * np.ones((m, 1)), C, np.inf * np.ones(
        (m, 1))))  #complements with inf in the extremes

    # Indexing to permute the C matrix in the right ordering
    Aux = np.arange(m).repeat(mu).reshape(m, mu)
    ind = np.ravel_multi_index(
        (Aux.flatten(), Is.flatten()),
        (m, mu
         ))  #converts to lin. indexes # ind = sub2ind([m, mu], Aux(:), Is(:));
    C2 = C.flatten().copy()
    C2[ind] = C2.flatten()
    C = C2.reshape((m, mu))

    # Constructs the denominator
    den = np.repeat((Fs[:, -1] - Fs[:, 0])[:, None], mu, axis=1)

    # Calculates the crowding distance
    cdist = (C / den).sum(axis=0)
    cdist = cdist.flatten()  #assures a column vector
    return cdist
Пример #3
0
def parse_x1x2_parameters(x,
                          target_shape,
                          catmode,
                          expand_2d_to_3d=None,
                          default=[1.0, 1.0]):
    """
   Parse input parameters x and make them the target_shape for easy calculation. 
   
   | Input in main function can now be a single value valid for all xyzw or 
     an array with a different value for each xyzw.
   
   Args:
        :x: 
            | list[float, float] or ndarray
        :target_shape: 
            | tuple with shape information
        :catmode: 
            | '1>0>2, optional
            |    -'1>0>2': Two-step CAT 
            |      from illuminant 1 to baseline illuminant 0 to illuminant 2.
            |    -'1>0': One-step CAT 
            |      from illuminant 1 to baseline illuminant 0.
            |    -'0>2': One-step CAT 
            |      from baseline illuminant 0 to illuminant 2. 
        :expand_2d_to_3d: 
            | None, optional 
            | [will be removed in future, serves no purpose]
            | Expand :x: from 2 to 3 dimensions.
        :default:
            | [1.0,1.0], optional
            | Default values for :x:
    
   Returns:
       :returns: 
           | (ndarray, ndarray) for x10 and x20

   """
    if x is None:
        x10 = np.ones(target_shape) * default[0]
        if (catmode == '1>0>2') | (catmode == '1>2'):
            x20 = np.ones(target_shape) * default[1]
        else:
            x20 = np.ones(target_shape) * np.nan
    else:
        x = np2d(x)
        if (catmode == '1>0>2') | (catmode == '1>2'):
            if x.shape[-1] == 2:
                x10 = np.ones(target_shape) * x[..., 0]
                x20 = np.ones(target_shape) * x[..., 1]
            else:
                x10 = np.ones(target_shape) * x
                x20 = x10.copy()
        elif catmode == '1>0':
            x10 = np.ones(target_shape) * x[..., 0]
            x10 = np.ones(target_shape) * np.nan
    return x10, x20
Пример #4
0
def plotDL(ccts = None, cieobs =_CIEOBS, cspace = _CSPACE, axh = None, \
           show = True, force_daylight_below4000K = False, cspace_pars = {}, \
           formatstr = 'k-',  **kwargs):
    """
    Plot daylight locus.
    
    Args: 
        :ccts: 
            | None or list[float], optional
            | None defaults to [4000 K to 1e19 K] in 100 steps on a log10 scale.
        :force_daylight_below4000K: 
            | False or True, optional
            | CIE daylight phases are not defined below 4000 K. 
            | If True plot anyway.
        :axh: 
            | None or axes handle, optional
            | Determines axes to plot data in.
            | None: make new figure.
        :show: 
            | True or False, optional
            | Invoke matplotlib.pyplot.show() right after plotting
        :cieobs:
            | luxpy._CIEOBS or str, optional
            | Determines CMF set to calculate spectrum locus or other.
        :cspace:
            | luxpy._CSPACE or str, optional
            | Determines color space / chromaticity diagram to plot data in.
            | Note that data is expected to be in specified :cspace:
        :formatstr:
            | 'k-' or str, optional
            | Format str for plotting (see ?matplotlib.pyplot.plot)
        :cspace_pars:
            | {} or dict, optional
            | Dict with parameters required by color space specified in :cspace: 
              (for use with luxpy.colortf())
        :kwargs: 
            | additional keyword arguments for use with matplotlib.pyplot.
    
    Returns:
        :returns: 
            | None (:show: == True) 
            |  or 
            | handle to current axes (:show: == False)
    """
    
    if ccts is None:
        ccts = 10**np.linspace(np.log10(4000.0),np.log10(10.0**19.0),100.0)
        
    xD,yD = daylightlocus(ccts, force_daylight_below4000K = force_daylight_below4000K)
    Y = 100*np.ones(xD.shape)
    DL =  Yxy_to_xyz(np.vstack((Y, xD,yD)).T)
    DL = colortf(DL, tf = cspace, tfa0 = cspace_pars)
    Y,x,y = asplit(DL)
    
    axh = plot_color_data(x,y,axh = axh, cieobs = cieobs, cspace = cspace, show=show, formatstr=formatstr, **kwargs)    
    
    if show == False:
        return axh
Пример #5
0
def xyz_to_cct_HA(xyzw):
    """
    Convert XYZ tristimulus values to correlated color temperature (CCT). 
    
    Args:
        :xyzw: 
            | ndarray of tristimulus values
        
    Returns:
        :cct: 
            | ndarray of correlated color temperatures estimates
    
    References:
        1. `Hernández-Andrés, Javier; Lee, RL; Romero, J (September 20, 1999). 
        Calculating Correlated Color Temperatures Across the Entire Gamut 
        of Daylight and Skylight Chromaticities.
        Applied Optics. 38 (27), 5703–5709. P
        <https://www.osapublishing.org/ao/abstract.cfm?uri=ao-38-27-5703>`_
            
    Notes: 
        According to paper small error from 3000 - 800 000 K, but a test with 
        Planckians showed errors up to 20% around 500 000 K; 
        e>0.05 for T>200 000, e>0.1 for T>300 000, ...
    """
    if len(xyzw.shape)>2:
        raise Exception('xyz_to_cct_HA(): Input xyzw.ndim must be <= 2 !')
        
    out_of_range_code = np.nan
    xe = [0.3366, 0.3356]
    ye = [0.1735, 0.1691]
    A0 = [-949.86315, 36284.48953]
    A1 = [6253.80338, 0.00228]
    t1 = [0.92159, 0.07861]
    A2 = [28.70599, 5.4535*1e-36]
    t2 = [0.20039, 0.01543]
    A3 = [0.00004, 0.0]
    t3 = [0.07125,1.0]
    cct_ranges = np.array([[3000.0,50000.0],[50000.0,800000.0]])
    
    Yxy = xyz_to_Yxy(xyzw)
    CCT = np.ones((1,Yxy.shape[0]))*out_of_range_code
    for i in range(2):
        n = (Yxy[:,1]-xe[i])/(Yxy[:,2]-ye[i])
        CCT_i = np2d(np.array(A0[i] + A1[i]*np.exp(np.divide(-n,t1[i])) + A2[i]*np.exp(np.divide(-n,t2[i])) + A3[i]*np.exp(np.divide(-n,t3[i]))))
        p = (CCT_i >= (1.0-0.05*(i == 0))*cct_ranges[i][0]) & (CCT_i < (1.0+0.05*(i == 0))*cct_ranges[i][1])
        CCT[p] = CCT_i[p]
        p = (CCT_i < (1.0-0.05)*cct_ranges[0][0]) #smaller than smallest valid CCT value
        CCT[p] = -1
   
    if (np.isnan(CCT.sum()) == True) | (np.any(CCT == -1)):
        print("Warning: xyz_to_cct_HA(): one or more CCTs out of range! --> (CCT < 3 kK,  CCT >800 kK) coded as (-1, NaN) 's")
    return CCT.T
Пример #6
0
def get_wavelength_params(dvc, out = "wlsFit,Errors", Errors = {}, verbosity = _VERBOSITY):
    """
    Get wavelength calibration parameters of polynomial of order 5.
    
    Args:
        :dvc:
            | Device handle (class ctypes) or int.
        :out:
            | "wlsFit,Errors", optional
            | Requested return.
        :Errors:
            | Dict with error messages.
        :verbosity:
            | 1, optional
            | 0: no printed error message output.
    
    Returns:
        :wlsFit:
            | ndarray with parameter values of wavelength calibration.
        :Errors:
            | Dict with error messages.
    """
    out = out.replace(' ','')
    wlsFit = np.nan*np.ones((5,),dtype=np.float32) # initialize parameter array with nan's
    try:
        Errors["GetFit"] = None
        
        # create and initialize buffer for polynomial parameters:
        fFit = (FLOAT * 5)(*[0,0,0,0,0]) 
        
        # get pointer to start of parameter array: 
        dwError = jtc.JETI_GetFit(dvc, ctypes.byref(fFit))
        Errors["GetFit"] = dwError
        if (dwError != 0):
            if verbosity == 1:
                print("Could not get the wavelength calibration parameters.")
        else:
            # Read parameters from buffer:
            wlsFit= np.frombuffer(fFit, np.float32)
    except:
        Errors["GetFit"] = "get_wavelength_params() fails."
    finally:
        if out == "wlsFit,Errors":
            return wlsFit,Errors
        elif out == "wlsFit":
            return wlsFit
        elif out == "Errors":
            return Errors
        else:
            raise Exception("Requested output error.")
Пример #7
0
 def __init__(self, *args, argtype='xyz', vtype='xyz', _TINY=1e-15):
     self._TINY = _TINY
     self.vtype = vtype
     if len(args) == 0:
         args = [0.0, 0.0, 0.0]
     args = [np.atleast_1d(args[i])
             for i in range(len(args))]  # make atleast_1d ndarray
     if vtype == 'xyz':
         self.x = args[0]
         self.y = args[1]
         self.z = args[2]
     elif vtype == 'tpr':
         if len(args) == 2:
             args.append(np.ones(args[0].shape))
         self.set_tpr(*args)
     self.shape = self.x.shape
Пример #8
0
def lab_to_xyz(lab, xyzw = None, cieobs = _CIEOBS, **kwargs):
    """
    Convert CIE 1976 L*a*b* (CIELAB) color coordinates to XYZ tristimulus values.

    Args:
        :lab: 
            | ndarray with CIE 1976 L*a*b* (CIELAB) color coordinates
        :xyzw:
            | None or ndarray with tristimulus values of white point, optional
            | None defaults to xyz of CIE D65 using the :cieobs: observer.
        :cieobs:
            | luxpy._CIEOBS, optional
            | CMF set to use when calculating xyzw.

    Returns:
        :xyz: 
            | ndarray with tristimulus values
    """
    lab = np2d(lab)

    if xyzw is None:
        xyzw = spd_to_xyz(_CIE_ILLUMINANTS['D65'],cieobs = cieobs)

    # make xyzw same shape as data:
    xyzw = xyzw*np.ones(lab.shape)

    # set knee point of function:
    k=(24/116) #(24/116)**3**(1/3)

    # get L*, a*, b* and Xw, Yw, Zw:
    L,a,b = asplit(lab)
    Xw,Yw,Zw = asplit(xyzw)

    fy = (L + 16.0) / 116.0
    fx = a / 500.0 + fy
    fz = fy - b/200.0

    # apply 3rd power:
    X,Y,Z = [xw*(x**3.0) for (x,xw) in ((fx,Xw),(fy,Yw),(fz,Zw))]

    # Now calculate T where T/Tn is below the knee point:
    p,q,r = [np.where(x<k) for x in (fx,fy,fz)]
    X[p],Y[q],Z[r] = [np.squeeze(xw[xp]*((x[xp] - 16.0/116.0) / (841/108))) for (x,xw,xp) in ((fx,Xw,p),(fy,Yw,q),(fz,Zw,r))]

    return ajoin((X,Y,Z))
Пример #9
0
def lab_to_xyz(lab, xyzw=None, cieobs=_CIEOBS, **kwargs):
    """
    Convert CIE 1976 L*a*b* (CIELAB) color coordinates to XYZ tristimulus values.

    Args:
        :lab: 
            | ndarray with CIE 1976 L*a*b* (CIELAB) color coordinates
        :xyzw:
            | None or ndarray with tristimulus values of white point, optional
            | None defaults to xyz of CIE D65 using the :cieobs: observer.
        :cieobs:
            | luxpy._CIEOBS, optional
            | CMF set to use when calculating xyzw.

    Returns:
        :xyz: 
            | ndarray with tristimulus values
    """
    lab = np2d(lab)

    if xyzw is None:
        xyzw = spd_to_xyz(_CIE_ILLUMINANTS['D65'], cieobs=cieobs)

    # make xyzw same shape as data:
    xyzw = xyzw * np.ones(lab.shape)

    # get L*, a*, b* and Xw, Yw, Zw:
    fXYZ = np.empty(lab.shape)
    fXYZ[..., 1] = (lab[..., 0] + 16.0) / 116.0
    fXYZ[..., 0] = lab[..., 1] / 500.0 + fXYZ[..., 1]
    fXYZ[..., 2] = fXYZ[..., 1] - lab[..., 2] / 200.0

    # apply 3rd power:
    xyz = (fXYZ**3.0) * xyzw

    # Now calculate T where T/Tn is below the knee point:
    pqr = fXYZ <= (24 / 116)  #(24/116)**3**(1/3)
    xyz[pqr] = np.squeeze(xyzw[pqr] * ((fXYZ[pqr] - 16.0 / 116.0) /
                                       (841 / 108)))

    return xyz
Пример #10
0
def calculate_lut(ccts=None, cieobs=None, add_to_lut=True):
    """
    Function that calculates LUT for the ccts stored in 
    ./data/cctluts/cct_lut_cctlist.dat or given as input argument.
    Calculation is performed for CMF set specified in cieobs. 
    Adds a new (temprorary) field to the _CCT_LUT dict.
    
    Args:
        :ccts: 
            | ndarray or str, optional
            | list of ccts for which to (re-)calculate the LUTs.
            | If str, ccts contains path/filename.dat to list.
        :cieobs: 
            | None or str, optional
            | str specifying cmf set.
            
    Returns:
        :returns: 
            | ndarray with cct and duv.
        
    Note:
        Function changes the global variable: _CCT_LUT!
    """
    if ccts is None:
        ccts = getdata('{}cct_lut_cctlist.dat'.format(_CCT_LUT_PATH))
    elif isinstance(ccts, str):
        ccts = getdata(ccts)

    Yuv = np.ones((ccts.shape[0], 2)) * np.nan
    for i, cct in enumerate(ccts):
        Yuv[i, :] = xyz_to_Yuv(
            spd_to_xyz(blackbody(cct, wl3=[360, 830, 1]), cieobs=cieobs))[:,
                                                                          1:3]
    u = Yuv[:, 0, None]  # get CIE 1960 u
    v = (2.0 / 3.0) * Yuv[:, 1, None]  # get CIE 1960 v
    cctuv = np.hstack((ccts, u, v))
    if add_to_lut == True:
        _CCT_LUT[cieobs] = cctuv
    return cctuv
Пример #11
0
def dtlz_range(fname, M):
    """
    Returns the decision range of a DTLZ function
    The range is simply [0,1] for all variables. What varies is the number 
    of decision variables in each problem. The equation for that is
    n = (M-1) + k
    wherein k = 5 for DTLZ1, 10 for DTLZ2-6, and 20 for DTLZ7.
    
    Args:
        :fname: 
            | a string with the name of the function ('dtlz1', 'dtlz2' etc.)
        :M: 
            | a scalar with the number of objectives
    
       Returns:
          :lim: 
              | a n x 2 matrix wherein the first column is the lower limit 
               (0), and the second column, the upper limit of search (1)
    """
    #Checks if the string has or not the prefix 'dtlz', or if the number later
    #is greater than 7:
    fname = fname.lower()
    if (len(fname) < 5) or (fname[:4] != 'dtlz') or (float(fname[4]) > 7):
        raise Exception(
            'Sorry, the function {:s} is not implemented.'.format(fname))

    # If the name is o.k., defines the value of k
    if fname == 'dtlz1':
        k = 5
    elif fname == 'dtlz7':
        k = 20
    else:  #any other function
        k = 10

    n = (M - 1) + k  #number of decision variables

    lim = np.hstack((np.zeros((n, 1)), np.ones((n, 1))))
    return lim
Пример #12
0
        C[i+1] = Lw*C[i+1]/xyzw2[i,1]

    
    xyz, xyzw = spd_to_xyz(C, cieobs = cieobs, relative = True, rfl = rflM, out = 2)
    qab = xyz_to_qabW_cam18sl(xyzw, xyzb = None, Lb = [100], fov = 10.0)
    print('qab: ',qab)
    qab2 = cam18sl(C, datab = None, Lb = [100], fov = 10.0, direction = 'forward', inputtype = 'spd', outin = 'Q,aW,bW', parameters = None)
    print('qab2: ',qab2)       
    xyz_ = qabW_cam18sl_to_xyz(qab, xyzb = None, Lb = [100], fov = 10.0)
    print('delta: ', xyzw-xyz_)
    
    # test 2:
    cieobs = '2006_10'
    Lb = np2d([100])
    wlr = getwlr(_CAM18SL_WL3)
    EEW = np.vstack((wlr,np.ones((Lb.shape[1], wlr.shape[0])))) 
    E = cie_interp(_CIE_ILLUMINANTS['E'],EEW[0],kind='spd')
    D65 = cie_interp(_CIE_ILLUMINANTS['D65'],EEW[0],kind='spd')
    A = cie_interp(_CIE_ILLUMINANTS['A'],EEW[0],kind='spd')
    C = cie_interp(_CIE_ILLUMINANTS['C'],EEW[0],kind='spd')
    
    STIM = np.vstack((EEW, E[1:], C[1:], D65[1,:], A[1:]))
    xyz = spd_to_xyz(STIM, cieobs = cieobs, relative = False)
    STIM[1:] = STIM[1:]/xyz[...,1:2]*Lw 
    xyz = spd_to_xyz(STIM, cieobs = cieobs, relative = False)
    
    BG = EEW
    qab = cam18sl(EEW, datab = EEW, Lb = [100], fov = 10.0, direction = 'forward', inputtype = 'spd', outin = 'Q,aW,bW', parameters = None)
    print('test 2 qab: ',qab)
    
    
Пример #13
0
def render_image(img = None, spd = None, rfl = None, out = 'img_hyp', \
                 refspd = None, D = None, cieobs = _CIEOBS, \
                 cspace = 'ipt', cspace_tf = {},\
                 k_neighbours = 4, show = True,
                 verbosity = 0, show_ref_img = True,\
                 stack_test_ref = 12,\
                 write_to_file = None):
    """
    Render image under specified light source spd.
    
    Args:
        :img: 
            | None or str or ndarray with uint8 rgb image.
            | None load a default image.
        :spd: 
            | ndarray, optional
            | Light source spectrum for rendering
        :rfl: 
            | ndarray, optional
            | Reflectance set for color coordinate to rfl mapping.
        :out: 
            | 'img_hyp' or str, optional
            |  (other option: 'img_ren': rendered image under :spd:)
        :refspd:
            | None, optional
            | Reference spectrum for color coordinate to rfl mapping.
            | None defaults to D65 (srgb has a D65 white point)
        :D: 
            | None, optional
            | Degree of (von Kries) adaptation from spd to refspd. 
        :cieobs:
            | _CIEOBS, optional
            | CMF set for calculation of xyz from spectral data.
        :cspace:
            | 'ipt',  optional
            | Color space for color coordinate to rfl mapping.
        :cspace_tf:
            | {}, optional
            | Dict with parameters for xyz_to_cspace and cspace_to_xyz transform.
        :k_neighbours:
            | 4 or int, optional
            | Number of nearest neighbours for reflectance spectrum interpolation.
            | Neighbours are found using scipy.cKDTree
        :show: 
            | True, optional
            |  Show images.
        :verbosity:
            | 0, optional
            | If > 0: make a plot of the color coordinates of original and 
              rendered image pixels.
        :show_ref_img:
            | True, optional
            | True: shows rendered image under reference spd. False: shows
              original image.
        :write_to_file:
            | None, optional
            | None: do nothing, else: write to filename(+path) in :write_to_file:
        :stack_test_ref: 
            | 12, optional
            |   - 12: left (test), right (ref) format for show and imwrite
            |   - 21: top (test), bottom (ref)
            |   - 1: only show/write test
            |   - 2: only show/write ref
            |   - 0: show both, write test

    Returns:
        :returns: 
            | img_hyp, img_ren, 
            | ndarrays with hyperspectral image and rendered images 
    """

    # Get image:
    #imread = lambda x: plt.imread(x) #matplotlib.pyplot

    if img is not None:
        if isinstance(img, str):
            img = plt.imread(img)  # use matplotlib.pyplot's imread
    else:
        img = plt.imread(_HYPSPCIM_DEFAULT_IMAGE)

    # Convert to 2D format:
    rgb = img.reshape(img.shape[0] * img.shape[1], 3) * 1.0  # *1.0: make float
    rgb[rgb == 0] = _EPS  # avoid division by zero for pure blacks.

    # Get unique rgb values and positions:
    rgb_u, rgb_indices = np.unique(rgb, return_inverse=True, axis=0)

    # get Ref spd:
    if refspd is None:
        refspd = _CIE_ILLUMINANTS['D65'].copy()

    # Convert rgb_u to xyz and lab-type values under assumed refspd:
    xyz_wr = spd_to_xyz(refspd, cieobs=cieobs, relative=True)
    xyz_ur = colortf(rgb_u, tf='srgb>xyz')

    # Estimate rfl's for xyz_ur:
    rfl_est, xyzri = xyz_to_rfl(xyz_ur, rfl = rfl, out = 'rfl_est,xyz_est', \
                 refspd = refspd, D = D, cieobs = cieobs, \
                 cspace = cspace, cspace_tf = cspace_tf,\
                 k_neighbours = k_neighbours, verbosity = verbosity)

    # Get default test spd if none supplied:
    if spd is None:
        spd = _CIE_ILLUMINANTS['F4']

    # calculate xyz values under test spd:
    xyzti, xyztw = spd_to_xyz(spd, rfl=rfl_est, cieobs=cieobs, out=2)

    # Chromatic adaptation from test spd to refspd:
    if D is not None:
        xyzti = cat.apply(xyzti, xyzw1=xyztw, xyzw2=xyz_wr, D=D)

    # Convert xyzti under test spd to srgb:
    rgbti = colortf(xyzti, tf='srgb') / 255

    # Reconstruct original locations for rendered image rgbs:
    img_ren = rgbti[rgb_indices]
    img_ren.shape = img.shape  # reshape back to 3D size of original

    # For output:
    if show_ref_img == True:
        rgb_ref = colortf(xyzri, tf='srgb') / 255
        img_ref = rgb_ref[rgb_indices]
        img_ref.shape = img.shape  # reshape back to 3D size of original
        img_str = 'Rendered (under ref. spd)'
        img = img_ref
    else:
        img_str = 'Original'
        img = img / 255

    if (stack_test_ref > 0) | show == True:
        if stack_test_ref == 21:
            img_original_rendered = np.vstack(
                (img_ren, np.ones((4, img.shape[1], 3)), img))
            img_original_rendered_str = 'Rendered (under test spd)\n ' + img_str
        elif stack_test_ref == 12:
            img_original_rendered = np.hstack(
                (img_ren, np.ones((img.shape[0], 4, 3)), img))
            img_original_rendered_str = 'Rendered (under test spd) | ' + img_str
        elif stack_test_ref == 1:
            img_original_rendered = img_ren
            img_original_rendered_str = 'Rendered (under test spd)'
        elif stack_test_ref == 2:
            img_original_rendered = img
            img_original_rendered_str = img_str
        elif stack_test_ref == 0:
            img_original_rendered = img_ren
            img_original_rendered_str = 'Rendered (under test spd)'

    if write_to_file is not None:
        # Convert from RGB to BGR formatand write:
        #print('Writing rendering results to image file: {}'.format(write_to_file))
        with warnings.catch_warnings():
            warnings.simplefilter("ignore")
            imsave(write_to_file, img_original_rendered)

    if show == True:
        # show images using pyplot.show():
        plt.figure()
        plt.imshow(img_original_rendered)
        plt.title(img_original_rendered_str)
        plt.gca().get_xaxis().set_ticklabels([])
        plt.gca().get_yaxis().set_ticklabels([])

        if stack_test_ref == 0:
            plt.figure()
            plt.imshow(img_str)
            plt.title(img_str)
            plt.axis('off')

    if 'img_hyp' in out.split(','):
        # Create hyper_spectral image:
        rfl_image_2D = rfl_est[
            rgb_indices +
            1, :]  # create array with all rfls required for each pixel
        img_hyp = rfl_image_2D.reshape(img.shape[0], img.shape[1],
                                       rfl_image_2D.shape[1])

    # Setup output:
    if out == 'img_hyp':
        return img_hyp
    elif out == 'img_ren':
        return img_ren
    else:
        return eval(out)
Пример #14
0
def read_spectral_radiance(dvc, wlstart = 360, wlend = 830, wlstep = 1, out = "spd,Errors", Errors = {}, verbosity = _VERBOSITY):
    """
    Read measured spectral radiance (W/m².sr.nm) from device.
    
    Args:
        :dvc:
            | Device handle (of class ctypes).
        :wlstart:
            | 360 or Int, optional
            | Start wavelength in nm. (min = 350 nm)
        :wlend:
            | 830 or Int, optional
            | Start wavelength in nm. (max = 1000 nm)
        :out:
            | "status,Errors", optional
            | Requested return.
        :Errors:
            | Dict with error messages.
        :verbosity:
            | 1, optional
            | 0: no printed error message output.
    
    Returns:
        :spd:
            | ndarray with wavelengths (1st row) and spectral radiance (2nd row; nan's if error).
        :Errors:
            | Dict with error messages.

    """
    out = out.replace(' ','')
    
    # Get wavelength range:
    wls = np.arange(np.int(wlstart), np.int(wlend)+np.int(wlstep), np.int(wlstep), dtype=np.float32)
    
    # Initialize spd filled with nan's:
    spd = np.vstack((wls, np.nan*np.ones(wls.shape)))
    
#    try:
    Errors["SpecRadEx"] = None
    
    # Convert measurement parameters to ctypes:
    dwBeg = DWORD(np.int(wlstart)) # wavelength start in nm
    dwEnd = DWORD(np.int(wlend)) # wavelength end in nm
    
    # create buffer for spectral radiance data:
    fSprad = (FLOAT * wls.shape[0])() 
    
    # get pointer to start of spectral radiance 
    dwError = jtre.JETI_SpecRadEx(dvc, dwBeg, dwEnd, ctypes.byref(fSprad)) 
    Errors["SpecRadEx"] = dwError
    if (dwError != 0):
        if (verbosity == 1):
            print("Could not read spectral radiance data from device. Error code = {}".format(dwError))
    else:
        # Read spectral radiance from buffer:
        Sprad= np.frombuffer(fSprad, np.float32)
            
        # Overwrite 2nd row of spd array with measured spectral radiance values:
        spd[1,:] = Sprad  
#    except:
#        Errors["SpecRadEx"] = "read_spectral_radiance() fails."
#    finally:
    # Generate requested return:
    if out == "spd,Errors":
        return spd, Errors
    elif out == "spd":
        return spd
    elif out == "Errors":
        return Errors
    else:
        raise Exception("Requested output error.")
Пример #15
0
def get_spd(dvc = 0, Tint = 0.0, autoTint_max = _TINT_MAX, Nscans = 1, wlstep = 1, 
            wlstart = 360, wlend = 830, 
            twait = _TWAIT_STATUS, out = "spd", close_device = True, 
            laser_on = 0, laser_intensity = 1000, verbosity = _VERBOSITY):
    """
    Measure spectral radiance (W/nm.sr.m²).
    
    Args:
        :dvc:
            | 0 or Int or ctypes.wintypes.LP_c_ulong, optional
            | Number of the spectrometer device to load (0 = 1st) or handle (ctypes) to pre_initialized device.
        :Tint:
            | 0 or Float, optional
            | Integration time in seconds. (if 0: find best integration time, but < autoTint_max).
        :autoTint_max:
            | Limit Tint to this value when Tint = 0.
        :Nscans:
            | 1 or Int, optional
            | Number of scans to average.
        :wlstep: 
            | 1 or Int, optional
            | Wavelength step size in nm.
        :wlstart:
            | 360 or Int, optional
            | Start wavelength in nm. (min = 350 nm)
        :wlend:
            | 830 or Int, optional
            | Start wavelength in nm. (max = 1000 nm)
        :twait:
            | 0.1 or Float, optional
            | Time in seconds to wait before checking status of device. 
            | (If 0: wait :Tint: seconds, unless :Tint: == 0, then wait _TWAIT_STATUS seconds)
        :out:
            | "spd" [",dvc, Errors"], optional
            | Requested return. If "spd" in out.split(","):do spectral measurement, else: initialize dvc handle [and turn laser ON or OFF].
        :close_device:
            | True or False, optional
            | Close device at the end of the measurement.
            | If 'dvc' not in out.split(','): always close!!!
        :laser_on:
            | 0: OFF, >0: ON -> 1: PWM 7Hz, 2: PWM 28 Hz, 3: 255 Hz, optional
            | True (>0): turn laser on to select measurement area; False (0): turn off. 
            | (Can only be ON when "spd" is not in out.split(",") | if Tint is None)
        :laser_intensity: 
            | 1000.0, optional
            | Laser intensity in ‰ (pro-mille).
        :verbosity:
            | 1, optional
            | 0: no printed error message output.
    Returns:
        :returns: 
            | spd [,dvc, Errors] (as specified in :out:)
            | - "spd": ndarray with wavelengths (1st row) and spectral radiance (2nd row).
            | - "dvc": ctypes handle to device (if open) or nan (if closed).
            | - "Errors": dict with error message returned by device during various steps of the spectral measurement process.
    """
    # Initialize dict with errors messages for each of the different measurement steps:
    Errors = {} 
    Errors["get_spd"] = None
    out = out.replace(' ','')
    
    # Get wavelength range:
    wls = np.arange(np.int(wlstart), np.int(wlend)+np.int(wlstep), np.int(wlstep), dtype=np.float32)
    
    # Initialize spd filled with nan's:
    spd = np.vstack((wls, np.nan*np.ones(wls.shape)))

    try:
        # Initialize device :
        dvc, Errors = dvc_open(dvc = dvc, Errors = Errors, out = "dvc,Errors", verbosity = verbosity)    
        
        if (_check_dvc_open(dvc)) & (("spd" in out.split(",")) & (Tint is not None)):
            
            # Turn off laser before starting measurement:
            Errors = set_laser(dvc = dvc, laser_on = False, laser_intensity = laser_intensity, Errors = Errors, verbosity = verbosity)
                    
                            
            # Start measurement:
            Tint, Errors = start_meas(dvc, Tint = Tint, autoTint_max = autoTint_max, Nscans = Nscans, wlstep = wlstep, Errors = Errors, out = "Tint, Errors", verbosity = verbosity)
            
            # wait until measurement is finished (check intermediate status every twait seconds):
            status, Errors = wait_until_meas_is_finished(dvc, Tint = Tint, twait = twait, out = "status,Errors", Errors = Errors, verbosity = verbosity)
            
            if status == False:
                # Read measured spectral radiance from device:
                spd, Errors = read_spectral_radiance(dvc, wlstart = wlstart, wlend = wlend, wlstep = wlstep, out = "spd,Errors", Errors = Errors, verbosity = verbosity)    
            
        elif (("spd" not in out.split(",")) | (Tint is None)): # only dvc handle was requested or to turn laser ON.
            Errors = set_laser(dvc = dvc, laser_on = laser_on, laser_intensity = laser_intensity, Errors = Errors, verbosity = verbosity)
        
        # Close device:
        dvc, Errors = dvc_close(dvc, Errors = Errors, close_device = (close_device) | ('dvc' not in out.split(',')), out = "dvc,Errors", verbosity = verbosity)
    
        
        Errors["get_spd"] = int(np.sum([int(bool(x)) for x in Errors.values() if (x is not None)]) > 0)
        
    except:
        Errors["get_spd"] = "get_spd fails."
        
    finally:
        # Generate requested return:
        if out == "spd":
            return spd
        elif out == "dvc":
            return dvc
        elif out == "Errors":
            return Errors
        elif out == "spd,Errors":
            return spd, Errors
        elif out == "spd,dvc":
            return spd, dvc
        elif out == "spd,Errors,dvc":
            return spd, Errors, dvc
        elif out == "spd,dvc,Errors":
            return spd, dvc, Errors
        else:
            raise Exception("Requested output error.")
Пример #16
0
def initialize_VF_hue_angles(hx = None, Cxr = _VF_MAXR, \
                             cri_type = _VF_CRI_DEFAULT, \
                             modeltype = _VF_MODEL_TYPE,\
                             determine_hue_angles = _DETERMINE_HUE_ANGLES):
    """
    Initialize the hue angles that will be used to 'summarize' 
    the VF model fitting parameters.
    
    Args:       
        :hx: 
            | None or ndarray, optional
            | None defaults to Munsell H5 hues.
        :Cxr: 
            | _VF_MAXR, optional
        :cri_type: 
            | _VF_CRI_DEFAULT or str or dict, optional,
            | Cri_type parameters for cri and VF model.
        :modeltype:
            | _VF_MODEL_TYPE or 'M5' or 'M6', optional
            | Determines the type of polynomial model.
        :determine_hue_angles:
            | _DETERMINE_HUE_ANGLES or True or False, optional
            | True: determines the 10 primary / secondary Munsell hues ('5..').
            | Note that for 'M6', an additional 
            
    Returns:
        :pcolorshift: 
            | {'href': href,
            |           'Cref' : _VF_MAXR, 
            |           'sig' : _VF_SIG, 
            |           'labels' : list[str]}
    """
    
    ###########################################
    # Get Munsell H5 hues:
    ###########################################

    rflM = _MUNSELL['R']
    hn = _MUNSELL['H'] # all Munsell hues
    rH5 = np.where([_MUNSELL['H'][:,0][x][0]=='5' for x in range(_MUNSELL['H'][:,0].shape[0])])[0] #all Munsell H5 hues
    hns5 = np.unique(_MUNSELL['H'][rH5]) 
    #------------------------------------------------------------------------------
    # Determine Munsell hue angles in cam02ucs:
    pool = False  
    IllC = _CIE_ILLUMINANTS['C'] # for determining Munsell hue angles in cam02ucs
    outM = VF_colorshift_model(IllC, cri_type = cri_type, sampleset = rflM, vfcolor = 'g',pool = pool)
    #------------------------------------------------------------------------------
    if (determine_hue_angles == True) | (hx is None):
        # find samples at major Munsell hue angles:
        all_h5_Munsell_cam02ucs = np.ones(hns5.shape)
        Jabt_IllC = outM[0]['Jab']['Jabt']
        for i,v in enumerate(hns5):
            hm = np.where(hn == v)[0]
            all_h5_Munsell_cam02ucs[i] = math.positive_arctan([Jabt_IllC[hm,0,1].mean()],[Jabt_IllC[hm,0,2].mean()],htype = 'rad')[0]
        hx = all_h5_Munsell_cam02ucs
        

    #------------------------------------------------------------------------------
    # Setp color shift parameters:
    pcolorshift = {'href': hx,'Cref' : Cxr, 'sig' : _VF_SIG, 'labels' : hns5}
    return pcolorshift
Пример #17
0
def generate_grid(jab_ranges = None, out = 'grid', \
                  ax = np.arange(-_VF_MAXR,_VF_MAXR+_VF_DELTAR,_VF_DELTAR),\
                  bx = np.arange(-_VF_MAXR,_VF_MAXR+_VF_DELTAR,_VF_DELTAR), \
                  jx = None, limit_grid_radius = 0):
    """
    Generate a grid of color coordinates.
    
    Args:
        :out:
            | 'grid' or 'vectors', optional
            |   - 'grid': outputs a single 2d numpy.nd-vector with the grid coordinates
            |   - 'vector': outputs each dimension seperately.
        :jab_ranges:
            | None or ndarray, optional
            | Specifies the pixelization of color space.
              (ndarray.shape = (3,3), with  first axis: J,a,b, and second 
              axis: min, max, delta)
        :ax:
            | default ndarray or user defined ndarray, optional
            | default = np.arange(-_VF_MAXR,_VF_MAXR+_VF_DELTAR,_VF_DELTAR) 
        :bx:
            | default ndarray or user defined ndarray, optional
            | default = np.arange(-_VF_MAXR,_VF_MAXR+_VF_DELTAR,_VF_DELTAR) 
        :jx:
            | None, optional
            | Note that not-None :jab_ranges: override :ax:, :bx: and :jx input.
        :limit_grid_radius:
            | 0, optional
            | A value of zeros keeps grid as specified  by axr,bxr.
            | A value > 0 only keeps (a,b) coordinates within :limit_grid_radius:
            
    Returns:
        :returns: 
            | single ndarray with ax,bx [,jx] 
            |  or
            | seperate ndarrays for each dimension specified.
    """
    # generate grid from jab_ranges array input, otherwise use ax, bx, jx input:
    if jab_ranges is not None:
        if jab_ranges.shape[0] == 3:
            jx = np.arange(jab_ranges[0][0],jab_ranges[0][1],jab_ranges[0][2])
            ax = np.arange(jab_ranges[1][0],jab_ranges[1][1],jab_ranges[1][2])
            bx = np.arange(jab_ranges[2][0],jab_ranges[2][1],jab_ranges[2][2])
        else:
            jx = None
            ax = np.arange(jab_ranges[0][0],jab_ranges[0][1],jab_ranges[0][2])
            bx = np.arange(jab_ranges[1][0],jab_ranges[1][1],jab_ranges[1][2])
   
    # Generate grid from (jx), ax, bx:
    Ax,Bx = np.meshgrid(ax,bx)
    grid = np.dstack((Ax,Bx))
    grid = np.reshape(grid,(np.array(grid.shape[:-1]).prod(),grid.ndim-1))
    if jx is not None:
        for i,v in enumerate(jx):
            gridi = np.hstack((np.ones((grid.shape[0],1))*v,grid))
            if i == 0:
                gridwithJ = gridi
            else:
                gridwithJ = np.vstack((gridwithJ,gridi))
        grid = gridwithJ
    
    if jx is None:
        ax = grid[:,0:1]
        bx = grid[:,1:2]
    else:
        jx = grid[:,0:1]
        ax = grid[:,1:2]
        bx = grid[:,2:3] 
    
    if limit_grid_radius > 0:# limit radius of grid:
        Cr = (ax**2+bx**2)**0.5
        ax = ax[Cr<=limit_grid_radius,None]
        bx = bx[Cr<=limit_grid_radius,None]
        if jx is not None:
            jx = jx[Cr<=limit_grid_radius,None]
    
    # create output:
    if out == 'grid':
        if jx is None:
            return np.hstack((ax,bx))
        else:
            return np.hstack((jx,ax,bx))
    else:
        if jx is None:
            return ax, bx
        else:
            return jx, ax, bx
Пример #18
0
def calculate_VF_PX_models(S, cri_type = _VF_CRI_DEFAULT, sampleset = None, pool = False, \
                           pcolorshift = {'href': np.arange(np.pi/10,2*np.pi,2*np.pi/10),\
                                          'Cref' : _VF_MAXR, 'sig' : _VF_SIG, 'labels' : '#'},\
                           vfcolor = 'k', verbosity = 0):
    """
    Calculate Vector Field and Pixel color shift models.
    
    Args:
        :cri_type: 
            | _VF_CRI_DEFAULT or str or dict, optional
            | Specifies type of color fidelity model to use. 
            | Controls choice of ref. ill., sample set, averaging, scaling, etc.
            | See luxpy.cri.spd_to_cri for more info.
        :sampleset:
            | None or str or ndarray, optional
            | Sampleset to be used when calculating vector field model.
        :pool:
            | False, optional
            | If :S: contains multiple spectra, True pools all jab data before 
              modeling the vector field, while False models a different field 
              for each spectrum.
        :pcolorshift: 
            | default dict (see below) or user defined dict, optional
            | Dict containing the specification input 
              for apply_poly_model_at_hue_x().
            | Default dict = {'href': np.arange(np.pi/10,2*np.pi,2*np.pi/10),
            |                'Cref' : _VF_MAXR, 
            |                'sig' : _VF_SIG, 
            |                'labels' : '#'} 
            | The polynomial models of degree 5 and 6 can be fully specified or 
              summarized by the model parameters themselved OR by calculating the
             dCoverC and dH at resp. 5 and 6 hues.
        :vfcolor:
            | 'k', optional
            | For plotting the vector fields.
        :verbosity: 
            | 0, optional
            | Report warnings or not.
    
    Returns:
        :returns:
            | :dataVF:, :dataPX: 
            | Dicts, for more info, see output description of resp.: 
              luxpy.cri.VF_colorshift_model() and luxpy.cri.PX_colorshift_model()
    """
    # calculate VectorField cri_color_shift model:
    dataVF = VF_colorshift_model(S, cri_type = cri_type, sampleset = sampleset, vfcolor = vfcolor, pcolorshift = pcolorshift, pool = pool, verbosity = verbosity)
    
    # Set jab_ranges and _deltas for PX-model pixel calculations:
    PX_jab_deltas = np.array([_VF_DELTAR,_VF_DELTAR,_VF_DELTAR]) #set same as for vectorfield generation
    PX_jab_ranges = np.vstack(([0,100,_VF_DELTAR],[-_VF_MAXR,_VF_MAXR+_VF_DELTAR,_VF_DELTAR], [-_VF_MAXR,_VF_MAXR+_VF_DELTAR,_VF_DELTAR]))#IES4880 gamut
   
    # Calculate shift vectors using vectorfield and pixel methods:
    delta_SvsVF_vshift_ab_mean = np.nan*np.ones((len(dataVF),1))
    delta_SvsVF_vshift_ab_mean_normalized = delta_SvsVF_vshift_ab_mean.copy()
    delta_PXvsVF_vshift_ab_mean = np.nan*np.ones((len(dataVF),1))
    delta_PXvsVF_vshift_ab_mean_normalized = delta_PXvsVF_vshift_ab_mean.copy()
    dataPX = [[] for k in range(len(dataVF))]
    for Snr in range(len(dataVF)):

        # Calculate shifts using pixel method, PX:
        dataPX[Snr] = PX_colorshift_model(dataVF[Snr]['Jab']['Jabt'][:,0,:],dataVF[Snr]['Jab']['Jabr'][:,0,:], jab_ranges = PX_jab_ranges, jab_deltas = PX_jab_deltas,limit_grid_radius = _VF_MAXR)
        
        # Calculate shift difference between Samples (S) and VectorField model predictions (VF):
        delta_SvsVF_vshift_ab = dataVF[Snr]['vshifts']['vshift_ab_s'] - dataVF[Snr]['vshifts']['vshift_ab_s_vf']
        delta_SvsVF_vshift_ab_mean[Snr] = np.nanmean(np.sqrt((delta_SvsVF_vshift_ab[...,1:3]**2).sum(axis = delta_SvsVF_vshift_ab[...,1:3].ndim-1)), axis=0)
        delta_SvsVF_vshift_ab_mean_normalized[Snr] = delta_SvsVF_vshift_ab_mean[Snr]/dataVF[Snr]['Jab']['DEi'].mean(axis=0)
        
        # Calculate shift difference between PiXel method (PX) and VectorField (VF):
        delta_PXvsVF_vshift_ab = dataPX[Snr]['vshifts']['vectorshift_ab_J0'] - dataVF[Snr]['vshifts']['vshift_ab_vf']
        delta_PXvsVF_vshift_ab_mean[Snr] = np.nanmean(np.sqrt((delta_PXvsVF_vshift_ab[...,1:3]**2).sum(axis = delta_PXvsVF_vshift_ab[...,1:3].ndim-1)), axis=0)
        delta_PXvsVF_vshift_ab_mean_normalized[Snr] = delta_PXvsVF_vshift_ab_mean[Snr]/dataVF[Snr]['Jab']['DEi'].mean(axis=0)

        dataVF[Snr]['vshifts']['delta_PXvsVF_vshift_ab_mean'] = delta_PXvsVF_vshift_ab_mean[Snr]
        dataVF[Snr]['vshifts']['delta_SvsVF_vshift_ab_mean'] = delta_SvsVF_vshift_ab_mean[Snr]
        dataVF[Snr]['vshifts']['delta_SvsVF_vshift_ab_mean_normalized'] = delta_SvsVF_vshift_ab_mean_normalized[Snr]
        dataVF[Snr]['vshifts']['delta_PXvsVF_vshift_ab_mean_normalized'] = delta_PXvsVF_vshift_ab_mean_normalized[Snr]
        dataPX[Snr]['vshifts']['delta_PXvsVF_vshift_ab_mean'] = dataVF[Snr]['vshifts']['delta_PXvsVF_vshift_ab_mean']
        dataPX[Snr]['vshifts']['delta_PXvsVF_vshift_ab_mean_normalized'] = dataVF[Snr]['vshifts']['delta_PXvsVF_vshift_ab_mean_normalized']

    return dataVF, dataPX
Пример #19
0
def getCatObs(n_cat=10,
              fieldsize=2,
              out='LMS',
              wl=None,
              allow_negative_values=False):
    """
    Generate cone fundamentals for categorical observers.
    
    Args: 
        :n_cat: 
            | 10, optional
            | Number of observer CMFs to generate.
        :fieldsize:
            | fieldsize in degrees (between 2° and 10°), optional
            | Defaults to 10°.
        :out: 
            | 'LMS' or str, optional
            | Determines output.
        :wl: 
            | None, optional
            | Interpolation/extraplation of :LMS: output to specified wavelengths.
            |  None: output original _WL = np.array([390,780,5])
        :allow_negative_values:
            | False, optional
            | Cone fundamentals or color matching functions 
            |  should not have negative values.
            |     If False: X[X<0] = 0.
    
    Returns:
        :returns:
            | LMS [,var_age, vAll] 
            |   - LMS: ndarray with population LMS functions.
            |   - var_age: ndarray with population observer ages.
            |   - vAll: dict with population physiological factors (see .keys()) 
    
    Notes:
        1. Categorical observers are observer functions that would represent 
        color-normal populations. They are finite and discrete as opposed to 
        observer functions generated from the individual colorimetric observer 
        model. Thus, they would offer more convenient and practical approaches
        for the personalized color imaging workflow and color matching analyses.
        Categorical observers were derived in two steps. 
        At the first step, 10000 observer functions were generated from the 
        individual colorimetric observer model using Monte Carlo simulation. 
        At the second step, the cluster analysis, a modified k-medoids 
        algorithm, was applied to the 10000 observers minimizing the squared 
        Euclidean distance in cone fundamentals space, and categorical 
        observers were derived iteratively. Since the proposed categorical 
        observers are defined by their physiological parameters and ages, their
        CMFs can be derived for any target field size.

        2. Categorical observers were ordered by the importance; 
        the first categorical observer vas the average observer equivalent to 
        CIEPO06 with 38 year-old for a given field size, followed by the second
        most important categorical observer, the third, and so on.
        
        3. see: https://www.rit.edu/cos/colorscience/re_AsanoObserverFunctions.php
    """
    # Use Iteratively Derived Cat.Obs.:
    var_age = _INDVCMF_CATOBSPFCTR['age'].copy()
    vAll = _INDVCMF_CATOBSPFCTR.copy()
    vAll.pop('age')

    # Set requested wavelength range:
    if wl is not None:
        wl = getwlr(wl3=wl)
    else:
        wl = _WL

    LMS_All = np.nan * np.ones((3 + 1, _WL.shape[0], n_cat))
    for k in range(n_cat):
        t_LMS = cie2006cmfsEx(age = var_age[k],fieldsize = fieldsize, wl = wl,\
                              var_od_lens = vAll['od_lens'][k],\
                              var_od_macula = vAll['od_macula'][k],\
                              var_od_L = vAll['od_L'][k],\
                              var_od_M = vAll['od_M'][k],\
                              var_od_S = vAll['od_S'][k],\
                              var_shft_L = vAll['shft_L'][k],\
                              var_shft_M = vAll['shft_M'][k],\
                              var_shft_S = vAll['shft_S'][k],\
                              out = 'LMS')

        LMS_All[:, :, k] = t_LMS

    LMS_All[np.where(LMS_All < 0)] = 0

    if n_cat == 1:
        LMS_All = np.squeeze(LMS_All, axis=2)

    if ('xyz' in out.lower().split(',')):
        LMS_All = lmsb_to_xyzb(LMS_All,
                               fieldsize,
                               out='xyz',
                               allow_negative_values=allow_negative_values)
        out = out.replace('xyz', 'LMS').replace('XYZ', 'LMS')
    if ('lms' in out.lower().split(',')):
        out = out.replace('lms', 'LMS')

    if (out == 'LMS'):
        return LMS_All
    elif (out == 'LMS,var_age,vAll'):
        return LMS_All, var_age, vAll
    else:
        return eval(out)
Пример #20
0
def xyz_to_cct_ohno(xyzw,
                    cieobs=_CIEOBS,
                    out='cct',
                    wl=None,
                    accuracy=0.1,
                    force_out_of_lut=True,
                    upper_cct_max=10.0**20,
                    approx_cct_temp=True):
    """
    Convert XYZ tristimulus values to correlated color temperature (CCT) and 
    Duv (distance above (>0) or below (<0) the Planckian locus) 
    using Ohno's method. 
    
    Args:
        :xyzw: 
            | ndarray of tristimulus values
        :cieobs: 
            | luxpy._CIEOBS, optional
            | CMF set used to calculated xyzw.
        :out: 
            | 'cct' (or 1), optional
            | Determines what to return.
            | Other options: 'duv' (or -1), 'cct,duv'(or 2), "[cct,duv]" (or -2)
        :wl: 
            | None, optional
            | Wavelengths used when calculating Planckian radiators.
        :accuracy: 
            | float, optional
            | Stop brute-force search when cct :accuracy: is reached.
        :upper_cct_max: 
            | 10.0**20, optional
            | Limit brute-force search to this cct.
        :approx_cct_temp: 
            | True, optional
            | If True: use xyz_to_cct_HA() to get a first estimate of cct 
              to speed up search.
        :force_out_of_lut: 
            | True, optional
            | If True and cct is out of range of the LUT, then switch to 
              brute-force search method, else return numpy.nan values.
        
    Returns:
        :returns: 
            | ndarray with:
            |    cct: out == 'cct' (or 1)
            |    duv: out == 'duv' (or -1)
            |    cct, duv: out == 'cct,duv' (or 2)
            |    [cct,duv]: out == "[cct,duv]" (or -2) 
            
    Note:
        LUTs are stored in ./data/cctluts/
        
    Reference:
        1. `Ohno Y. Practical use and calculation of CCT and Duv. 
        Leukos. 2014 Jan 2;10(1):47-55.
        <http://www.tandfonline.com/doi/abs/10.1080/15502724.2014.839020>`_
    """

    xyzw = np2d(xyzw)

    if len(xyzw.shape) > 2:
        raise Exception('xyz_to_cct_ohno(): Input xyzwa.ndim must be <= 2 !')

    # get 1960 u,v of test source:
    Yuv = xyz_to_Yuv(
        xyzw)  # remove possible 1-dim + convert xyzw to CIE 1976 u',v'
    axis_of_v3 = len(Yuv.shape) - 1  # axis containing color components
    u = Yuv[:, 1, None]  # get CIE 1960 u
    v = (2.0 / 3.0) * Yuv[:, 2, None]  # get CIE 1960 v

    uv = np2d(np.concatenate((u, v), axis=axis_of_v3))

    # load cct & uv from LUT:
    if cieobs not in _CCT_LUT:
        _CCT_LUT[cieobs] = calculate_lut(ccts=None,
                                         cieobs=cieobs,
                                         add_to_lut=False)
    cct_LUT = _CCT_LUT[cieobs][:, 0, None]
    uv_LUT = _CCT_LUT[cieobs][:, 1:3]

    # calculate CCT of each uv:
    CCT = np.ones(uv.shape[0]) * np.nan  # initialize with NaN's
    Duv = CCT.copy()  # initialize with NaN's
    idx_m = 0
    idx_M = uv_LUT.shape[0] - 1
    for i in range(uv.shape[0]):
        out_of_lut = False
        delta_uv = (((uv_LUT - uv[i])**2.0).sum(
            axis=1))**0.5  # calculate distance of uv with uv_LUT
        idx_min = delta_uv.argmin()  # find index of minimum distance

        # find Tm, delta_uv and u,v for 2 points surrounding uv corresponding to idx_min:
        if idx_min == idx_m:
            idx_min_m1 = idx_min
            out_of_lut = True
        else:
            idx_min_m1 = idx_min - 1
        if idx_min == idx_M:
            idx_min_p1 = idx_min
            out_of_lut = True
        else:
            idx_min_p1 = idx_min + 1

        if (out_of_lut == True) & (force_out_of_lut
                                   == True):  # calculate using search-function
            cct_i, Duv_i = xyz_to_cct_search(xyzw[i],
                                             cieobs=cieobs,
                                             wl=wl,
                                             accuracy=accuracy,
                                             out='cct,duv',
                                             upper_cct_max=upper_cct_max,
                                             approx_cct_temp=approx_cct_temp)
            CCT[i] = cct_i
            Duv[i] = Duv_i
            continue
        elif (out_of_lut == True) & (force_out_of_lut == False):
            CCT[i] = np.nan
            Duv[i] = np.nan

        cct_m1 = cct_LUT[idx_min_m1]  # - 2*_EPS
        delta_uv_m1 = delta_uv[idx_min_m1]
        uv_m1 = uv_LUT[idx_min_m1]
        cct_p1 = cct_LUT[idx_min_p1]
        delta_uv_p1 = delta_uv[idx_min_p1]
        uv_p1 = uv_LUT[idx_min_p1]

        cct_0 = cct_LUT[idx_min]
        delta_uv_0 = delta_uv[idx_min]

        # calculate uv distance between Tm_m1 & Tm_p1:
        delta_uv_p1m1 = ((uv_p1[0] - uv_m1[0])**2.0 +
                         (uv_p1[1] - uv_m1[1])**2.0)**0.5

        # Triangular solution:
        x = ((delta_uv_m1**2) - (delta_uv_p1**2) +
             (delta_uv_p1m1**2)) / (2 * delta_uv_p1m1)
        Tx = cct_m1 + ((cct_p1 - cct_m1) * (x / delta_uv_p1m1))
        #uBB = uv_m1[0] + (uv_p1[0] - uv_m1[0]) * (x / delta_uv_p1m1)
        vBB = uv_m1[1] + (uv_p1[1] - uv_m1[1]) * (x / delta_uv_p1m1)

        Tx_corrected_triangular = Tx * 0.99991
        signDuv = np.sign(uv[i][1] - vBB)
        Duv_triangular = signDuv * np.atleast_1d(
            ((delta_uv_m1**2.0) - (x**2.0))**0.5)

        # Parabolic solution:
        a = delta_uv_m1 / (cct_m1 - cct_0 + _EPS) / (cct_m1 - cct_p1 + _EPS)
        b = delta_uv_0 / (cct_0 - cct_m1 + _EPS) / (cct_0 - cct_p1 + _EPS)
        c = delta_uv_p1 / (cct_p1 - cct_0 + _EPS) / (cct_p1 - cct_m1 + _EPS)
        A = a + b + c
        B = -(a * (cct_p1 + cct_0) + b * (cct_p1 + cct_m1) + c *
              (cct_0 + cct_m1))
        C = (a * cct_p1 * cct_0) + (b * cct_p1 * cct_m1) + (c * cct_0 * cct_m1)
        Tx = -B / (2 * A + _EPS)
        Tx_corrected_parabolic = Tx * 0.99991
        Duv_parabolic = signDuv * (A * np.power(Tx_corrected_parabolic, 2) +
                                   B * Tx_corrected_parabolic + C)

        Threshold = 0.002
        if Duv_triangular < Threshold:
            CCT[i] = Tx_corrected_triangular
            Duv[i] = Duv_triangular
        else:
            CCT[i] = Tx_corrected_parabolic
            Duv[i] = Duv_parabolic

    # Regulate output:
    if (out == 'cct') | (out == 1):
        return np2dT(CCT)
    elif (out == 'duv') | (out == -1):
        return np2dT(Duv)
    elif (out == 'cct,duv') | (out == 2):
        return np2dT(CCT), np2dT(Duv)
    elif (out == "[cct,duv]") | (out == -2):
        return np.vstack((CCT, Duv)).T
Пример #21
0
# load TM30 spd data base:
_IESTM30 = {
    'S': {
        'data': getdata(_S_PATH + 'IESTM30_Sspds.dat', kind='np').transpose()
    }
}
_IESTM30['S']['info'] = getdata(_S_PATH + 'IESTM30_Sinfo.txt',
                                kind='np',
                                header='infer',
                                verbosity=False)
_IESTM30_S = _IESTM30['S']

#------------------------------------------------------------------------------
# Illuminant library: set some typical CIE illuminants:
E = np.array([np.linspace(380, 780, 401), np.ones(401)])
D65 = np.array(
    [[
        380, 381, 382, 383, 384, 385, 386, 387, 388, 389, 390, 391, 392, 393,
        394, 395, 396, 397, 398, 399, 400, 401, 402, 403, 404, 405, 406, 407,
        408, 409, 410, 411, 412, 413, 414, 415, 416, 417, 418, 419, 420, 421,
        422, 423, 424, 425, 426, 427, 428, 429, 430, 431, 432, 433, 434, 435,
        436, 437, 438, 439, 440, 441, 442, 443, 444, 445, 446, 447, 448, 449,
        450, 451, 452, 453, 454, 455, 456, 457, 458, 459, 460, 461, 462, 463,
        464, 465, 466, 467, 468, 469, 470, 471, 472, 473, 474, 475, 476, 477,
        478, 479, 480, 481, 482, 483, 484, 485, 486, 487, 488, 489, 490, 491,
        492, 493, 494, 495, 496, 497, 498, 499, 500, 501, 502, 503, 504, 505,
        506, 507, 508, 509, 510, 511, 512, 513, 514, 515, 516, 517, 518, 519,
        520, 521, 522, 523, 524, 525, 526, 527, 528, 529, 530, 531, 532, 533,
        534, 535, 536, 537, 538, 539, 540, 541, 542, 543, 544, 545, 546, 547,
        548, 549, 550, 551, 552, 553, 554, 555, 556, 557, 558, 559, 560, 561,
Пример #22
0
def subsample_RFL_set(rfl, rflpath = '', samplefcn = 'rand', S = _CIE_ILLUMINANTS['E'], \
                      jab_ranges = None, jab_deltas = None, cieobs = _VF_CIEOBS, cspace = _VF_CSPACE, \
                      ax = np.arange(-_VF_MAXR,_VF_MAXR+_VF_DELTAR,_VF_DELTAR), \
                      bx = np.arange(-_VF_MAXR,_VF_MAXR+_VF_DELTAR,_VF_DELTAR), \
                      jx = None, limit_grid_radius = 0):
    """
    Sub-samples a spectral reflectance set by pixelization of color space.
    
    Args:
        :rfl: 
            | ndarray or str
            | Array with of str referring to a set of spectral reflectance 
              functions to be subsampled.
            | If str to file: file must contain data as columns, with first 
              column the wavelengths.
        :rflpath:
            | '' or str, optional
            | Path to folder with rfl-set specified in a str :rfl: filename.
        :samplefcn:
            | 'rand' or 'mean', optional
            |   -'rand': selects a random sample from the samples within each pixel
            |   -'mean': returns the mean spectral reflectance in each pixel.
        :S: 
            | _CIE_ILLUMINANTS['E'], optional
            | Illuminant used to calculate the color coordinates of the spectral 
              reflectance samples.
        :jab_ranges:
            | None or ndarray, optional
            | Specifies the pixelization of color space.
              (ndarray.shape = (3,3), with  first axis: J,a,b, and second 
               axis: min, max, delta)
        :jab_deltas:
            | float or ndarray, optional
            | Specifies the sampling range. 
            | A float uses jab_deltas as the maximum Euclidean distance to select
             samples around each pixel center. A ndarray of 3 deltas, uses
             a city block sampling around each pixel center.
        :cspace:
            | _VF_CSPACE or dict, optional
            | Specifies color space. See _VF_CSPACE_EXAMPLE for example structure.
        :cieobs:
            | _VF_CIEOBS or str, optional
            | Specifies CMF set used to calculate color coordinates.
        :ax: 
            | default ndarray or user defined ndarray, optional
            | default = np.arange(-_VF_MAXR,_VF_MAXR+_VF_DELTAR,_VF_DELTAR) 
        :bx: 
            | default ndarray or user defined ndarray, optional
            | default = np.arange(-_VF_MAXR,_VF_MAXR+_VF_DELTAR,_VF_DELTAR) 
        :jx: 
            | None, optional
            | Note that not-None :jab_ranges: override :ax:, :bx: and :jx input.
        :limit_grid_radius:
            | 0, optional
            | A value of zeros keeps grid as specified  by axr,bxr.
            | A value > 0 only keeps (a,b) coordinates within :limit_grid_radius:
   
    Returns:
        :returns:
            | rflsampled, jabp
            | ndarrays with resp. the subsampled set of spectral reflectance 
              functions and the pixel coordinate centers.
    """
    # Testing effects of sample set, pixel size and gamut size:
    if type(rfl) == str:
        rfl = pd.read_csv(os.path.join(rflpath,rfl),header = None).get_values().T
  
    # Calculate Jab coordinates of samples:
    xyz,xyzw = spd_to_xyz(S, cieobs = cieobs, rfl = rfl.copy(), out = 2)
    cspace_pars = cspace.copy()
    cspace_pars.pop('type')
    cspace_pars['xyzw'] = xyzw
    jab = colortf(xyz,tf = cspace['type'],fwtf = cspace_pars)

    # Generate grid and get samples in each grid:
    gridp,idxp, jabp, pixelsamplenrs, pixelIDs = get_pixel_coordinates(jab, jab_ranges = jab_ranges, jab_deltas = jab_deltas, limit_grid_radius = limit_grid_radius)

    # Get rfls from set using sampling function (mean or rand):
    W = rfl[:1]
    R = rfl[1:]
    rflsampled = np.nan*np.ones((len(idxp),R.shape[1]))
    for i in range(len(idxp)):
        if samplefcn == 'mean':
            rfl_i = np.nanmean(rfl[pixelsamplenrs[i],:],axis = 0)
        else:
            samplenr_i = np.random.randint(len(pixelsamplenrs[i]))
            rfl_i = rfl[pixelsamplenrs[i][samplenr_i],:]
        rflsampled[i,:] = rfl_i        
    rflsampled = np.vstack((W,rflsampled))
    return rflsampled, jabp
Пример #23
0
def cam18sl(data, datab = None, Lb = [100], fov = 10.0, inputtype = 'xyz', direction = 'forward', outin = 'Q,aW,bW', parameters = None):
    """
    Convert between CIE 2006 10°  XYZ tristimulus values (or spectral data) 
    and CAM18sl color appearance correlates.
    
    Args:
        :data: 
            | ndarray of CIE 2006 10°  absolute XYZ tristimulus values or spectral data
              or color appearance attributes of stimulus
        :datab: 
            | ndarray of CIE 2006 10°  absolute XYZ tristimulus values or spectral data
              of stimulus background
        :Lb: 
            | [100], optional
            | Luminance (cd/m²) value(s) of background(s) calculated using the CIE 2006 10° CMFs 
            | (only used in case datab == None and the background is assumed to be an Equal-Energy-White)
        :fov: 
            | 10.0, optional
            | Field-of-view of stimulus (for size effect on brightness)
        :inputtpe:
            | 'xyz' or 'spd', optional
            | Specifies the type of input: 
            |     tristimulus values or spectral data for the forward mode.
        :direction:
            | 'forward' or 'inverse', optional
            |   -'forward': xyz -> cam18sl
            |   -'inverse': cam18sl -> xyz 
        :outin:
            | 'Q,aW,bW' or str, optional
            | 'Q,aW,bW' (brightness and opponent signals for amount-of-neutral)
            |  other options: 'Q,aM,bM' (colorfulness) and 'Q,aS,bS' (saturation)
            | Str specifying the type of 
            |     input (:direction: == 'inverse') and 
            |     output (:direction: == 'forward')
        :parameters:
            | None or dict, optional
            | Set of model parameters.
            |   - None: defaults to luxpy.cam._CAM18SL_PARAMETERS 
            |    (see references below)
    
    Returns:
        :returns: 
            | ndarray with color appearance correlates (:direction: == 'forward')
            |  or 
            | XYZ tristimulus values (:direction: == 'inverse')
            
    Notes:
        | * Instead of using the CIE 1964 10° CMFs in some places of the model,
        |   the CIE 2006 10° CMFs are used througout, making it more self_consistent.
        |   This has an effect on the k scaling factors (now different those in CAM15u) 
        |   and the illuminant E normalization for use in the chromatic adaptation transform.
        |   (see future erratum to Hermans et al., 2018)
        | * The paper also used an equation for the amount of white W, which is
        |   based on a Q value not expressed in 'bright' ('cA' = 0.937 instead of 123). 
        |   This has been corrected for in the luxpy version of the model, i.e.
        |   _CAM18SL_PARAMETERS['cW'][0] has been changed from 2.29 to 1/11672.
        |   (see future erratum to Hermans et al., 2018)

    References: 
        1. `Hermans, S., Smet, K. A. G., & Hanselaer, P. (2018). 
        "Color appearance model for self-luminous stimuli."
        Journal of the Optical Society of America A, 35(12), 2000–2009. 
        <https://doi.org/10.1364/JOSAA.35.002000>`_ 
     """
    
    if parameters is None:
        parameters = _CAM18SL_PARAMETERS
        
    outin = outin.split(',')    
    
    #unpack model parameters:
    cA, cAlms, cHK, cM, cW, ca, calms, cb, cblms, cfov, k, naka, unique_hue_data = [parameters[x] for x in sorted(parameters.keys())]
    
    # precomputations:
    Mlms2xyz = np.linalg.inv(_CMF['2006_10']['M'])
    MAab = np.array([cAlms,calms,cblms])
    invMAab = np.linalg.inv(MAab)    
    
    #-------------------------------------------------
    # setup EEW reference field and default background field (Lr should be equal to Lb):
    # Get Lb values:
    if datab is not None:
        if inputtype != 'xyz':
            Lb = spd_to_xyz(datab, cieobs = '2006_10', relative = False)[...,1:2]
        else:
            Lb = datab[...,1:2]
    else:
        if isinstance(Lb,list):
            Lb = np2dT(Lb)

    # Setup EEW ref of same luminance as datab:
    if inputtype == 'xyz':
        wlr = getwlr(_CAM18SL_WL3)
    else:
        if datab is None:
            wlr = data[0] # use wlr of stimulus data
        else:
            wlr = datab[0] # use wlr of background data
    datar = np.vstack((wlr,np.ones((Lb.shape[0], wlr.shape[0])))) # create eew
    xyzr = spd_to_xyz(datar, cieobs = '2006_10', relative = False) # get abs. tristimulus values
    datar[1:] = datar[1:]/xyzr[...,1:2]*Lb
    # Create datab if None:
    if (datab is None):
        if inputtype != 'xyz':
            datab = datar.copy()
        else:
            datab = spd_to_xyz(datar, cieobs = '2006_10', relative = False)
            datar = datab.copy()

 
    # prepare data and datab for loop over backgrounds: 
    # make axis 1 of datab have 'same' dimensions as data:         
    if (data.ndim == 2): 
        data = np.expand_dims(data, axis = 1)  # add light source axis 1     

    if inputtype == 'xyz': 
        if datab.shape[0] == 1: #make datab and datar have same lights source dimension (used to store different backgrounds) size as data
            datab = np.repeat(datab,data.shape[1],axis=0)  
            datar = np.repeat(datar,data.shape[1],axis=0)               
    else:
        if datab.shape[0] == 2:
            datab = np.vstack((datab[0],np.repeat(datab[1:], data.shape[1], axis = 0)))
        if datar.shape[0] == 2:
            datar = np.vstack((datar[0],np.repeat(datar[1:], data.shape[1], axis = 0)))

    # Flip light source/ background dim to axis 0:
    data = np.transpose(data, axes = (1,0,2))

    #-------------------------------------------------
    
    #initialize camout:     
    dshape = list(data.shape)
    dshape[-1] = len(outin) # requested number of correlates
    if (inputtype != 'xyz') & (direction == 'forward'):
        dshape[-2] = dshape[-2] - 1 # wavelength row doesn't count & only with forward can the input data be spectral
    camout = np.nan*np.ones(dshape)
    
  
    for i in range(data.shape[0]):
       
        # get rho, gamma, beta of background and reference white:
        if (inputtype != 'xyz'):
            xyzb = spd_to_xyz(np.vstack((datab[0], datab[i+1:i+2,:])), cieobs = '2006_10', relative = False)
            xyzr = spd_to_xyz(np.vstack((datar[0], datar[i+1:i+2,:])), cieobs = '2006_10', relative = False)
        else:
            xyzb = datab[i:i+1,:] 
            xyzr = datar[i:i+1,:] 

        lmsb = np.dot(_CMF['2006_10']['M'],xyzb.T).T # convert to l,m,s
        rgbb = (lmsb / _CMF['2006_10']['K']) * k # convert to rho, gamma, beta
        #lmsr = np.dot(_CMF['2006_10']['M'],xyzr.T).T # convert to l,m,s
        #rgbr = (lmsr / _CMF['2006_10']['K']) * k # convert to rho, gamma, beta
        #rgbr = rgbr/rgbr[...,1:2]*Lb[i] # calculated EEW cone excitations at same luminance values as background
        rgbr = np.ones(xyzr.shape)*Lb[i] # explicitely equal EEW cone excitations at same luminance values as background

        if direction == 'forward':
            # get rho, gamma, beta of stimulus:
            if (inputtype != 'xyz'):
                xyz = spd_to_xyz(data[i], cieobs = '2006_10', relative = False)   
            elif (inputtype == 'xyz'):
                xyz = data[i]
            lms = np.dot(_CMF['2006_10']['M'],xyz.T).T # convert to l,m,s
            rgb = (lms / _CMF['2006_10']['K']) * k # convert to rho, gamma, beta

            # apply von-kries cat with D = 1:
            if (rgbb == 0).any():
                Mcat = np.eye(3)
            else:
                Mcat = np.diag((rgbr/rgbb)[0])
            rgba = np.dot(Mcat,rgb.T).T

            # apply naka-rushton compression:
            rgbc = naka_rushton(rgba, n = naka['n'], sig = naka['sig'](rgbr.mean()), noise = naka['noise'], scaling = naka['scaling'])

            #rgbc = np.ones(rgbc.shape)*rgbc.mean() # test if eew ends up at origin
            
            # calculate achromatic and color difference signals, A, a, b:
            Aab = np.dot(MAab, rgbc.T).T
            A,a,b = asplit(Aab)
            a = ca*a
            b = cb*b

            # calculate colorfullness like signal M:
            M = cM*((a**2.0 + b**2.0)**0.5)

            # calculate brightness Q:
            Q = cA*(A + cHK[0]*M**cHK[1]) # last term is contribution of Helmholtz-Kohlrausch effect on brightness

            # calculate saturation, s:
            s = M / Q

            # calculate amount of white, W:
            W = 1 / (1.0 + cW[0]*(s**cW[1]))

            #  adjust Q for size (fov) of stimulus (matter of debate whether to do this before or after calculation of s or W, there was no data on s, M or W for different sized stimuli: after)
            Q = Q*(fov/10.0)**cfov

            # calculate hue, h and Hue quadrature, H:
            h = hue_angle(a,b, htype = 'deg')
            if 'H' in outin:
                H = hue_quadrature(h, unique_hue_data = unique_hue_data)
            else:
                H = None

            # calculate cart. co.:
            if 'aM' in outin:
                aM = M*np.cos(h*np.pi/180.0)
                bM = M*np.sin(h*np.pi/180.0)
            
            if 'aS' in outin:
                aS = s*np.cos(h*np.pi/180.0)
                bS = s*np.sin(h*np.pi/180.0)
            
            if 'aW' in outin:
                aW = W*np.cos(h*np.pi/180.0)
                bW = W*np.sin(h*np.pi/180.0)

            if (outin != ['Q','aW','bW']):
                camout[i] =  eval('ajoin(('+','.join(outin)+'))')
            else:
                camout[i] = ajoin((Q,aW,bW))
    
        
        elif direction == 'inverse':

            # get Q, M and a, b depending on input type:        
            if 'aW' in outin:
                Q,a,b = asplit(data[i])
                Q = Q / ((fov/10.0)**cfov) #adjust Q for size (fov) of stimulus back to that 10° ref
                W = (a**2.0 + b**2.0)**0.5
                s = (((1.0 / W) - 1.0)/cW[0])**(1.0/cW[1])
                M = s*Q
                
            
            if 'aM' in outin:
                Q,a,b = asplit(data[i])
                Q = Q / ((fov/10.0)**cfov) #adjust Q for size (fov) of stimulus back to that 10° ref
                M = (a**2.0 + b**2.0)**0.5
            
            if 'aS' in outin:
                Q,a,b = asplit(data[i])
                Q = Q / ((fov/10.0)**cfov) #adjust Q for size (fov) of stimulus back to that 10° ref
                s = (a**2.0 + b**2.0)**0.5
                M = s*Q
                      
            if 'h' in outin:
                Q, WsM, h = asplit(data[i])
                Q = Q / ((fov/10.0)**cfov) #adjust Q for size (fov) of stimulus back to that 10° ref
                if 'W' in outin:
                     s = (((1.0 / WsM) - 1.0)/cW[0])**(1.0/cW[1])
                     M = s*Q
                elif 's' in outin:
                     M = WsM*Q
                elif 'M' in outin:
                     M = WsM
            
            # calculate achromatic signal, A from Q and M:
            A = Q/cA - cHK[0]*M**cHK[1]

            # calculate hue angle:
            h = hue_angle(a,b, htype = 'rad')
            
            # calculate a,b from M and h:
            a = (M/cM)*np.cos(h)
            b = (M/cM)*np.sin(h)

            a = a/ca
            b = b/cb

            # create Aab:
            Aab = ajoin((A,a,b))    

            # calculate rgbc:
            rgbc = np.dot(invMAab, Aab.T).T    

            # decompress rgbc to (adapted) rgba :
            rgba = naka_rushton(rgbc, n = naka['n'], sig = naka['sig'](rgbr.mean()), noise = naka['noise'], scaling = naka['scaling'], direction = 'inverse')

            # apply inverse von-kries cat with D = 1:
            rgb = np.dot(np.diag((rgbb/rgbr)[0]),rgba.T).T

            # convert rgb to lms to xyz:
            lms = rgb/k*_CMF['2006_10']['K']  
            xyz = np.dot(Mlms2xyz,lms.T).T 
            
            camout[i] = xyz
    
    if camout.shape[0] == 1:
        camout = np.squeeze(camout,axis = 0)
    
    return camout
Пример #24
0
def cam15u(data,
           fov=10.0,
           inputtype='xyz',
           direction='forward',
           outin='Q,aW,bW',
           parameters=None):
    """
    Convert between CIE 2006 10°  XYZ tristimulus values (or spectral data) 
    and CAM15u color appearance correlates.
    
    Args:
        :data: 
            | ndarray of CIE 2006 10°  XYZ tristimulus values or spectral data
              or color appearance attributes
        :fov: 
            | 10.0, optional
            | Field-of-view of stimulus (for size effect on brightness)
        :inputtpe:
            | 'xyz' or 'spd', optional
            | Specifies the type of input: 
            |     tristimulus values or spectral data for the forward mode.
        :direction:
            | 'forward' or 'inverse', optional
            |   -'forward': xyz -> cam15u
            |   -'inverse': cam15u -> xyz 
        :outin:
            | 'Q,aW,bW' or str, optional
            | 'Q,aW,bW' (brightness and opponent signals for amount-of-neutral)
            |  other options: 'Q,aM,bM' (colorfulness) and 'Q,aS,bS' (saturation)
            | Str specifying the type of 
            |     input (:direction: == 'inverse') and 
            |     output (:direction: == 'forward')
        :parameters:
            | None or dict, optional
            | Set of model parameters.
            |   - None: defaults to luxpy.cam._CAM15U_PARAMETERS 
            |    (see references below)
    
    Returns:
        :returns: 
            | ndarray with color appearance correlates (:direction: == 'forward')
            |  or 
            | XYZ tristimulus values (:direction: == 'inverse')

    References: 
        1. `M. Withouck, K. A. G. Smet, W. R. Ryckaert, and P. Hanselaer, 
        “Experimental driven modelling of the color appearance of 
        unrelated self-luminous stimuli: CAM15u,” 
        Opt. Express, vol. 23, no. 9, pp. 12045–12064, 2015.
        <https://www.osapublishing.org/oe/abstract.cfm?uri=oe-23-9-12045&origin=search>`_
        2. `M. Withouck, K. A. G. Smet, and P. Hanselaer, (2015), 
        “Brightness prediction of different sized unrelated self-luminous stimuli,” 
        Opt. Express, vol. 23, no. 10, pp. 13455–13466. 
        <https://www.osapublishing.org/oe/abstract.cfm?uri=oe-23-10-13455&origin=search>`_  
     """

    if parameters is None:
        parameters = _CAM15U_PARAMETERS

    outin = outin.split(',')

    #unpack model parameters:
    Mxyz2rgb, cA, cAlms, cHK, cM, cW, ca, calms, cb, cblms, cfov, cp, k, unique_hue_data = [
        parameters[x] for x in sorted(parameters.keys())
    ]

    # precomputations:
    invMxyz2rgb = np.linalg.inv(Mxyz2rgb)
    MAab = np.array([cAlms, calms, cblms])
    invMAab = np.linalg.inv(MAab)

    #initialize data and camout:
    data = np2d(data)
    if len(data.shape) == 2:
        data = np.expand_dims(data, axis=0)  # avoid looping if not necessary

    if (data.shape[0] > data.shape[1]):  # loop over shortest dim.
        flipaxis0and1 = True
        data = np.transpose(data, axes=(1, 0, 2))
    else:
        flipaxis0and1 = False

    dshape = list(data.shape)
    dshape[-1] = len(outin)  # requested number of correlates
    if (inputtype != 'xyz') & (direction == 'forward'):
        dshape[-2] = dshape[
            -2] - 1  # wavelength row doesn't count & only with forward can the input data be spectral
    camout = np.nan * np.ones(dshape)

    for i in range(data.shape[0]):

        if (inputtype != 'xyz') & (direction == 'forward'):
            xyz = spd_to_xyz(data[i], cieobs='2006_10', relative=False)
            lms = np.dot(_CMF['2006_10']['M'], xyz.T).T  # convert to l,m,s
            rgb = (lms /
                   _CMF['2006_10']['K']) * k  # convert to rho, gamma, beta
        elif (inputtype == 'xyz') & (direction == 'forward'):
            rgb = np.dot(Mxyz2rgb, data[i].T).T

        if direction == 'forward':

            # apply cube-root compression:
            rgbc = rgb**(cp)

            # calculate achromatic and color difference signals, A, a, b:
            Aab = np.dot(MAab, rgbc.T).T
            A, a, b = asplit(Aab)
            A = cA * A
            a = ca * a
            b = cb * b

            # calculate colorfullness like signal M:
            M = cM * ((a**2.0 + b**2.0)**0.5)

            # calculate brightness Q:
            Q = A + cHK[0] * M**cHK[
                1]  # last term is contribution of Helmholtz-Kohlrausch effect on brightness

            # calculate saturation, s:
            s = M / Q

            # calculate amount of white, W:
            W = 100.0 / (1.0 + cW[0] * (s**cW[1]))

            #  adjust Q for size (fov) of stimulus (matter of debate whether to do this before or after calculation of s or W, there was no data on s, M or W for different sized stimuli: after)
            Q = Q * (fov / 10.0)**cfov

            # calculate hue, h and Hue quadrature, H:
            h = hue_angle(a, b, htype='deg')

            if 'H' in outin:
                H = hue_quadrature(h, unique_hue_data=unique_hue_data)
            else:
                H = None

            # calculate cart. co.:
            if 'aM' in outin:
                aM = M * np.cos(h * np.pi / 180.0)
                bM = M * np.sin(h * np.pi / 180.0)

            if 'aS' in outin:
                aS = s * np.cos(h * np.pi / 180.0)
                bS = s * np.sin(h * np.pi / 180.0)

            if 'aW' in outin:
                aW = W * np.cos(h * np.pi / 180.0)
                bW = W * np.sin(h * np.pi / 180.0)

            if (outin != ['Q', 'aW', 'bW']):
                camout[i] = eval('ajoin((' + ','.join(outin) + '))')
            else:
                camout[i] = ajoin((Q, aW, bW))

        elif direction == 'inverse':

            # get Q, M and a, b depending on input type:
            if 'aW' in outin:
                Q, a, b = asplit(data[i])
                Q = Q / (
                    (fov / 10.0)**cfov
                )  #adjust Q for size (fov) of stimulus back to that 10° ref
                W = (a**2.0 + b**2.0)**0.5
                s = (((100 / W) - 1.0) / cW[0])**(1.0 / cW[1])
                M = s * Q

            if 'aM' in outin:
                Q, a, b = asplit(data[i])
                Q = Q / (
                    (fov / 10.0)**cfov
                )  #adjust Q for size (fov) of stimulus back to that 10° ref
                M = (a**2.0 + b**2.0)**0.5

            if 'aS' in outin:
                Q, a, b = asplit(data[i])
                Q = Q / (
                    (fov / 10.0)**cfov
                )  #adjust Q for size (fov) of stimulus back to that 10° ref
                s = (a**2.0 + b**2.0)**0.5
                M = s * Q

            if 'h' in outin:
                Q, WsM, h = asplit(data[i])
                Q = Q / (
                    (fov / 10.0)**cfov
                )  #adjust Q for size (fov) of stimulus back to that 10° ref
                if 'W' in outin:
                    s = (((100.0 / WsM) - 1.0) / cW[0])**(1.0 / cW[1])
                    M = s * Q
                elif 's' in outin:
                    M = WsM * Q
                elif 'M' in outin:
                    M = WsM

            # calculate achromatic signal, A from Q and M:
            A = Q - cHK[0] * M**cHK[1]
            A = A / cA

            # calculate hue angle:
            h = hue_angle(a, b, htype='rad')

            # calculate a,b from M and h:
            a = (M / cM) * np.cos(h)
            b = (M / cM) * np.sin(h)
            a = a / ca
            b = b / cb

            # create Aab:
            Aab = ajoin((A, a, b))

            # calculate rgbc:
            rgbc = np.dot(invMAab, Aab.T).T

            # decompress rgbc to rgb:
            rgb = rgbc**(1 / cp)

            # convert rgb to xyz:
            xyz = np.dot(invMxyz2rgb, rgb.T).T

            camout[i] = xyz

    if flipaxis0and1 == True:  # loop over shortest dim.
        camout = np.transpose(camout, axes=(1, 0, 2))

    if camout.shape[0] == 1:
        camout = np.squeeze(camout, axis=0)

    return camout
Пример #25
0
def xyz_to_cct_search(xyzw,
                      cieobs=_CIEOBS,
                      out='cct',
                      wl=None,
                      accuracy=0.1,
                      upper_cct_max=10.0**20,
                      approx_cct_temp=True):
    """
    Convert XYZ tristimulus values to correlated color temperature (CCT) and 
    Duv(distance above (> 0) or below ( < 0) the Planckian locus) by a 
    brute-force search. 

    | The algorithm uses an approximate cct_temp (HA approx., see xyz_to_cct_HA) 
      as starting point or uses the middle of the allowed cct-range 
      (1e2 K - 1e20 K, higher causes overflow) on a log-scale, then constructs 
      a 4-step section of the blackbody (Planckian) locus on which to find the
      minimum distance to the 1960 uv chromaticity of the test source.

    Args:
        :xyzw: 
            | ndarray of tristimulus values
        :cieobs: 
            | luxpy._CIEOBS, optional
            | CMF set used to calculated xyzw.
        :out: 
            | 'cct' (or 1), optional
            | Determines what to return.
            | Other options: 'duv' (or -1), 'cct,duv'(or 2), "[cct,duv]" (or -2)
        :wl: 
            | None, optional
            | Wavelengths used when calculating Planckian radiators.
        :accuracy: 
            | float, optional
            | Stop brute-force search when cct :accuracy: is reached.
        :upper_cct_max: 
            | 10.0**20, optional
            | Limit brute-force search to this cct.
        :approx_cct_temp: 
            | True, optional
            | If True: use xyz_to_cct_HA() to get a first estimate of cct to 
              speed up search.

    Returns:
        :returns: 
            | ndarray with:
            |    cct: out == 'cct' (or 1)
            |    duv: out == 'duv' (or -1)
            |    cct, duv: out == 'cct,duv' (or 2)
            |    [cct,duv]: out == "[cct,duv]" (or -2) 
    
    Notes:
        This program is more accurate, but slower than xyz_to_cct_ohno!
        Note that cct must be between 1e3 K - 1e20 K 
        (very large cct take a long time!!!)
    """

    xyzw = np2d(xyzw)

    if len(xyzw.shape) > 2:
        raise Exception('xyz_to_cct_search(): Input xyzw.shape must be <= 2 !')

    # get 1960 u,v of test source:
    Yuvt = xyz_to_Yuv(np.squeeze(
        xyzw))  # remove possible 1-dim + convert xyzw to CIE 1976 u',v'
    #axis_of_v3t = len(Yuvt.shape)-1 # axis containing color components
    ut = Yuvt[:, 1, None]  #.take([1],axis = axis_of_v3t) # get CIE 1960 u
    vt = (2 / 3) * Yuvt[:, 2,
                        None]  #.take([2],axis = axis_of_v3t) # get CIE 1960 v

    # Initialize arrays:
    ccts = np.ones((xyzw.shape[0], 1)) * np.nan
    duvs = ccts.copy()

    #calculate preliminary solution(s):
    if (approx_cct_temp == True):
        ccts_est = xyz_to_cct_HA(xyzw)
        procent_estimates = np.array([[3000.0, 100000.0, 0.05],
                                      [100000.0, 200000.0, 0.1],
                                      [200000.0, 300000.0, 0.25],
                                      [300000.0, 400000.0, 0.4],
                                      [400000.0, 600000.0, 0.4],
                                      [600000.0, 800000.0, 0.4],
                                      [800000.0, np.inf, 0.25]])
    else:
        upper_cct = np.array(upper_cct_max)
        lower_cct = np.array(10.0**2)
        cct_scale_fun = lambda x: np.log10(x)
        cct_scale_ifun = lambda x: np.power(10.0, x)
        dT = (cct_scale_fun(upper_cct) - cct_scale_fun(lower_cct)) / 2
        ccttemp = np.array([cct_scale_ifun(cct_scale_fun(lower_cct) + dT)])
        ccts_est = np2d(ccttemp * np.ones((xyzw.shape[0], 1)))
        dT_approx_cct_False = dT.copy()

    # Loop through all ccts:
    for i in range(xyzw.shape[0]):

        #initialize CCT search parameters:
        cct = np.nan
        duv = np.nan
        ccttemp = ccts_est[i].copy()

        # Take care of (-1, NaN)'s from xyz_to_cct_HA signifying (CCT < lower, CCT > upper) bounds:
        approx_cct_temp_temp = approx_cct_temp
        if (approx_cct_temp == True):
            cct_scale_fun = lambda x: x
            cct_scale_ifun = lambda x: x
            if (ccttemp != -1) & (
                    np.isnan(ccttemp) == False
            ):  # within validity range of CCT estimator-function
                for ii in range(procent_estimates.shape[0]):
                    if (ccttemp >=
                        (1.0 - 0.05 *
                         (ii == 0)) * procent_estimates[ii, 0]) & (
                             ccttemp < (1.0 + 0.05 *
                                        (ii == 0)) * procent_estimates[ii, 1]):
                        procent_estimate = procent_estimates[ii, 2]
                        break

                dT = np.multiply(
                    ccttemp, procent_estimate
                )  # determines range around CCTtemp (25% around estimate) or 100 K
            elif (ccttemp == -1) & (np.isnan(ccttemp) == False):
                ccttemp = np.array([procent_estimates[0, 0] / 2])
                procent_estimate = 1  # cover 0 K to min_CCT of estimator
                dT = np.multiply(ccttemp, procent_estimate)
            elif (np.isnan(ccttemp) == True):
                upper_cct = np.array(upper_cct_max)
                lower_cct = np.array(10.0**2)
                cct_scale_fun = lambda x: np.log10(x)
                cct_scale_ifun = lambda x: np.power(10.0, x)
                dT = (cct_scale_fun(upper_cct) - cct_scale_fun(lower_cct)) / 2
                ccttemp = np.array(
                    [cct_scale_ifun(cct_scale_fun(lower_cct) + dT)])
                approx_cct_temp = False
        else:
            dT = dT_approx_cct_False

        nsteps = 3
        signduv = 1.0
        ccttemp = ccttemp[0]
        delta_cct = dT
        while ((delta_cct > accuracy)):  # keep converging on CCT

            #generate range of ccts:
            ccts_i = cct_scale_ifun(
                np.linspace(
                    cct_scale_fun(ccttemp) - dT,
                    cct_scale_fun(ccttemp) + dT, nsteps + 1))

            ccts_i[ccts_i < 100.0] = 100.0  # avoid nan's in calculation

            # Generate BB:
            BB = cri_ref(ccts_i, wl3=wl, ref_type=['BB'], cieobs=cieobs)

            # Calculate xyz:
            xyz = spd_to_xyz(BB, cieobs=cieobs)

            # Convert to CIE 1960 u,v:
            Yuv = xyz_to_Yuv(np.squeeze(
                xyz))  # remove possible 1-dim + convert xyz to CIE 1976 u',v'
            #axis_of_v3 = len(Yuv.shape)-1 # axis containing color components
            u = Yuv[:, 1, None]  # get CIE 1960 u
            v = (2.0 / 3.0) * Yuv[:, 2, None]  # get CIE 1960 v

            # Calculate distance between list of uv's and uv of test source:
            dc = ((ut[i] - u)**2 + (vt[i] - v)**2)**0.5
            if np.isnan(dc.min()) == False:
                #eps = _EPS
                q = dc.argmin()

                if np.size(
                        q
                ) > 1:  #to minimize calculation time: only calculate median when necessary
                    cct = np.median(ccts[q])
                    duv = np.median(dc[q])
                    q = np.median(q)
                    q = int(q)  #must be able to serve as index

                else:
                    cct = ccts_i[q]
                    duv = dc[q]

                if (q == 0):
                    ccttemp = cct_scale_ifun(
                        np.array(cct_scale_fun([cct])) + 2 * dT / nsteps)
                    #dT = 2.0*dT/nsteps
                    continue  # look in higher section of planckian locus

                if (q == np.size(ccts_i)):
                    ccttemp = cct_scale_ifun(
                        np.array(cct_scale_fun([cct])) - 2 * dT / nsteps)
                    #dT = 2.0*dT/nsteps
                    continue  # look in lower section of planckian locus

                if (q > 0) & (q < np.size(ccts_i) - 1):
                    dT = 2 * dT / nsteps
                    # get Duv sign:
                    d_p1m1 = ((u[q + 1] - u[q - 1])**2.0 +
                              (v[q + 1] - v[q - 1])**2.0)**0.5

                    x = (dc[q - 1]**2.0 - dc[q + 1]**2.0 +
                         d_p1m1**2.0) / 2.0 * d_p1m1
                    vBB = v[q - 1] + ((v[q + 1] - v[q - 1]) * (x / d_p1m1))
                    signduv = np.sign(vt[i] - vBB)

                #calculate difference with previous intermediate solution:
                delta_cct = abs(cct - ccttemp)

                ccttemp = np.array(cct)  #%set new intermediate CCT
                approx_cct_temp = approx_cct_temp_temp
            else:
                ccttemp = np.nan
                cct = np.nan
                duv = np.nan

        duvs[i] = signduv * abs(duv)
        ccts[i] = cct

    # Regulate output:
    if (out == 'cct') | (out == 1):
        return np2d(ccts)
    elif (out == 'duv') | (out == -1):
        return np2d(duvs)
    elif (out == 'cct,duv') | (out == 2):
        return np2d(ccts), np2d(duvs)
    elif (out == "[cct,duv]") | (out == -2):
        return np.vstack((ccts, duvs)).T
Пример #26
0
def spd_to_ies_tm30_metrics(SPD, cri_type = None, \
                            hbins = 16, start_hue = 0.0,\
                            scalef = 100, \
                            vf_model_type = _VF_MODEL_TYPE, \
                            vf_pcolorshift = _VF_PCOLORSHIFT,\
                            scale_vf_chroma_to_sample_chroma = False):
    """
    Calculates IES TM30 metrics from spectral data.      
      
      Args:
        :data:
            | numpy.ndarray with spectral data 
        :cri_type:
            | None, optional
            | If None: defaults to cri_type = 'iesrf'.
            | Not none values of :hbins:, :start_hue: and :scalef: overwrite 
              input in cri_type['rg_pars'] 
        :hbins:
            | None or numpy.ndarray with sorted hue bin centers (°), optional
        :start_hue: 
            | None, optional
        :scalef:
            | None, optional
            | Scale factor for reference circle.
        :vf_pcolorshift:
            | _VF_PCOLORSHIFT or user defined dict, optional
            | The polynomial models of degree 5 and 6 can be fully specified or 
              summarized by the model parameters themselved OR by calculating the
              dCoverC and dH at resp. 5 and 6 hues. :VF_pcolorshift: specifies 
              these hues and chroma level.
        :scale_vf_chroma_to_sample_chroma: 
            | False, optional
            | Scale chroma of reference and test vf fields such that average of 
              binned reference chroma equals that of the binned sample chroma
              before calculating hue bin metrics.
            
    Returns:
        :data: 
            | dict with color rendering data:
            | - 'SPD'  : ndarray test SPDs
            | - 'bjabt': ndarray with binned jab data under test SPDs
            | - 'bjabr': ndarray with binned jab data under reference SPDs
            | - 'cct'  : ndarray with CCT of test SPD
            | - 'duv'  : ndarray with distance to blackbody locus of test SPD
            | - 'Rf'   : ndarray with general color fidelity indices
            | - 'Rg'   : ndarray with gamut area indices
            | - 'Rfi'  : ndarray with specific color fidelity indices
            | - 'Rfhi' : ndarray with local (hue binned) fidelity indices
            | - 'Rcshi': ndarray with local chroma shifts indices
            | - 'Rhshi': ndarray with local hue shifts indices
            | - 'Rt'  : ndarray with general metameric uncertainty index Rt
            | - 'Rti' : ndarray with specific metameric uncertainty indices Rti
            | - 'Rfhi_vf' : ndarray with local (hue binned) fidelity indices 
            |               obtained from VF model predictions at color space
            |               pixel coordinates
            | - 'Rcshi_vf': ndarray with local chroma shifts indices 
            |               (same as above)
            | - 'Rhshi_vf': ndarray with local hue shifts indices 
            |               (same as above)
    """
    if cri_type is None:
        cri_type = 'iesrf'

    #Calculate color rendering measures for SPDs in data:
    out = 'Rf,Rg,cct,duv,Rfi,jabt,jabr,Rfhi,Rcshi,Rhshi,cri_type'
    if isinstance(cri_type, str):  # get dict
        cri_type = _CRI_DEFAULTS[cri_type].copy()
    if hbins is not None:
        cri_type['rg_pars']['nhbins'] = hbins
    if start_hue is not None:
        cri_type['rg_pars']['start_hue'] = start_hue
    if scalef is not None:
        cri_type['rg_pars']['normalized_chroma_ref'] = scalef
    Rf, Rg, cct, duv, Rfi, jabt, jabr, Rfhi, Rcshi, Rhshi, cri_type = spd_to_cri(
        SPD, cri_type=cri_type, out=out)
    rg_pars = cri_type['rg_pars']

    #Calculate Metameric uncertainty and base color shifts:
    dataVF = VF_colorshift_model(SPD,
                                 cri_type=cri_type,
                                 model_type=vf_model_type,
                                 cspace=cri_type['cspace'],
                                 sampleset=eval(cri_type['sampleset']),
                                 pool=False,
                                 pcolorshift=vf_pcolorshift,
                                 vfcolor=0)
    Rf_ = np.array([dataVF[i]['metrics']['Rf'] for i in range(len(dataVF))]).T
    Rt = np.array([dataVF[i]['metrics']['Rt'] for i in range(len(dataVF))]).T
    Rti = np.array([dataVF[i]['metrics']['Rti']
                    for i in range(len(dataVF))][0])

    # Get normalized and sliced sample data for plotting:
    rg_pars = cri_type['rg_pars']
    nhbins, normalize_gamut, normalized_chroma_ref, start_hue = [
        rg_pars[x] for x in sorted(rg_pars.keys())
    ]
    normalized_chroma_ref = scalef
    # np.sqrt((jabr[...,1]**2 + jabr[...,2]**2)).mean(axis = 0).mean()

    if scale_vf_chroma_to_sample_chroma == True:
        normalize_gamut = False
        bjabt, bjabr = gamut_slicer(
            jabt,
            jabr,
            out='jabt,jabr',
            nhbins=nhbins,
            start_hue=start_hue,
            normalize_gamut=normalize_gamut,
            normalized_chroma_ref=normalized_chroma_ref,
            close_gamut=True)
        Cr_s = (np.sqrt(bjabr[:-1, ..., 1]**2 + bjabr[:-1, ..., 2]**2)).mean(
            axis=0)  # for rescaling vector field average reference chroma

    normalize_gamut = True  #(for plotting)
    bjabt, bjabr = gamut_slicer(jabt,
                                jabr,
                                out='jabt,jabr',
                                nhbins=nhbins,
                                start_hue=start_hue,
                                normalize_gamut=normalize_gamut,
                                normalized_chroma_ref=normalized_chroma_ref,
                                close_gamut=True)

    Rfhi_vf = np.empty(Rfhi.shape)
    Rcshi_vf = np.empty(Rcshi.shape)
    Rhshi_vf = np.empty(Rhshi.shape)
    for i in range(cct.shape[0]):

        # Get normalized and sliced VF data for hue specific metrics:
        vfjabt = np.hstack(
            (np.ones(dataVF[i]['fielddata']['vectorfield']['axt'].shape),
             dataVF[i]['fielddata']['vectorfield']['axt'],
             dataVF[i]['fielddata']['vectorfield']['bxt']))
        vfjabr = np.hstack(
            (np.ones(dataVF[i]['fielddata']['vectorfield']['axr'].shape),
             dataVF[i]['fielddata']['vectorfield']['axr'],
             dataVF[i]['fielddata']['vectorfield']['bxr']))
        nhbins, normalize_gamut, normalized_chroma_ref, start_hue = [
            rg_pars[x] for x in sorted(rg_pars.keys())
        ]
        vfbjabt, vfbjabr, vfbDEi = gamut_slicer(
            vfjabt,
            vfjabr,
            out='jabt,jabr,DEi',
            nhbins=nhbins,
            start_hue=start_hue,
            normalize_gamut=normalize_gamut,
            normalized_chroma_ref=normalized_chroma_ref,
            close_gamut=False)

        if scale_vf_chroma_to_sample_chroma == True:
            #rescale vfbjabt and vfbjabr to same chroma level as bjabr.
            Cr_vfb = np.sqrt(vfbjabr[..., 1]**2 + vfbjabr[..., 2]**2)
            Cr_vf = np.sqrt(vfjabr[..., 1]**2 + vfjabr[..., 2]**2)
            hr_vf = np.arctan2(vfjabr[..., 2], vfjabr[..., 1])
            Ct_vf = np.sqrt(vfjabt[..., 1]**2 + vfjabt[..., 2]**2)
            ht_vf = np.arctan2(vfjabt[..., 2], vfjabt[..., 1])
            fC = Cr_s.mean() / Cr_vfb.mean()
            vfjabr[..., 1] = fC * Cr_vf * np.cos(hr_vf)
            vfjabr[..., 2] = fC * Cr_vf * np.sin(hr_vf)
            vfjabt[..., 1] = fC * Ct_vf * np.cos(ht_vf)
            vfjabt[..., 2] = fC * Ct_vf * np.sin(ht_vf)
            vfbjabt, vfbjabr, vfbDEi = gamut_slicer(
                vfjabt,
                vfjabr,
                out='jabt,jabr,DEi',
                nhbins=nhbins,
                start_hue=start_hue,
                normalize_gamut=normalize_gamut,
                normalized_chroma_ref=normalized_chroma_ref,
                close_gamut=False)

        scale_factor = cri_type['scale']['cfactor']
        scale_fcn = cri_type['scale']['fcn']
        vfRfhi, vfRcshi, vfRhshi = jab_to_rhi(
            jabt=vfbjabt,
            jabr=vfbjabr,
            DEi=vfbDEi,
            cri_type=cri_type,
            scale_factor=scale_factor,
            scale_fcn=scale_fcn,
            use_bin_avg_DEi=True
        )  # [:-1,...] removes last row from jab as this was added to close the gamut.

        Rfhi_vf[:, i:i + 1] = vfRfhi
        Rhshi_vf[:, i:i + 1] = vfRhshi
        Rcshi_vf[:, i:i + 1] = vfRcshi

    # Create dict with CRI info:
    data = {'SPD' : SPD, 'cct' : cct, 'duv' : duv, 'bjabt' : bjabt, 'bjabr' : bjabr,\
           'Rf' : Rf, 'Rg' : Rg, 'Rfi': Rfi, 'Rfhi' : Rfhi, 'Rchhi' : Rcshi, 'Rhshi' : Rhshi, \
           'Rt' : Rt, 'Rti' : Rti,  'Rfhi_vf' : Rfhi_vf, 'Rfcshi_vf' : Rcshi_vf, 'Rfhshi_vf' : Rhshi_vf, \
           'dataVF' : dataVF,'cri_type' : cri_type}
    return data
Пример #27
0
def cct_to_xyz(ccts,
               duv=None,
               cieobs=_CIEOBS,
               wl=None,
               mode='lut',
               out=None,
               accuracy=0.1,
               force_out_of_lut=True,
               upper_cct_max=10.0 * 20,
               approx_cct_temp=True):
    """
    Convert correlated color temperature (CCT) and Duv (distance above (>0) or 
    below (<0) the Planckian locus) to XYZ tristimulus values.
    
    | Finds xyzw_estimated by minimization of:
    |    
    |    F = numpy.sqrt(((100.0*(cct_min - cct)/(cct))**2.0) 
    |         + (((duv_min - duv)/(duv))**2.0))
    |    
    | with cct,duv the input values and cct_min, duv_min calculated using 
    | luxpy.xyz_to_cct(xyzw_estimated,...).
    
    Args:
        :ccts: 
            | ndarray of cct values
        :duv: 
            | None or ndarray of duv values, optional
            | Note that duv can be supplied together with cct values in :ccts: 
              as ndarray with shape (N,2)
        :cieobs: 
            | luxpy._CIEOBS, optional
            | CMF set used to calculated xyzw.
        :mode: 
            | 'lut' or 'search', optional
            | Determines what method to use.
        :out: 
            | None (or 1), optional
            | If not None or 1: output a ndarray that contains estimated 
              xyz and minimization results: 
            | (cct_min, duv_min, F_min (objective fcn value))
        :wl: 
            | None, optional
            | Wavelengths used when calculating Planckian radiators.
        :accuracy: 
            | float, optional
            | Stop brute-force search when cct :accuracy: is reached.
        :upper_cct_max: 
            | 10.0**20, optional
            | Limit brute-force search to this cct.
        :approx_cct_temp: 
            | True, optional
            | If True: use xyz_to_cct_HA() to get a first estimate of cct to 
              speed up search.
        :force_out_of_lut: 
            | True, optional
            | If True and cct is out of range of the LUT, then switch to 
              brute-force search method, else return numpy.nan values.
        
    Returns:
        :returns: 
            | ndarray with estimated XYZ tristimulus values
    
    Note:
        If duv is not supplied (:ccts:.shape is (N,1) and :duv: is None), 
        source is assumed to be on the Planckian locus.
	 """
    # make ccts a min. 2d np.array:
    if isinstance(ccts, list):
        ccts = np2dT(np.array(ccts))
    else:
        ccts = np2d(ccts)

    if len(ccts.shape) > 2:
        raise Exception('cct_to_xyz(): Input ccts.shape must be <= 2 !')

    # get cct and duv arrays from :ccts:
    cct = np2d(ccts[:, 0, None])

    if (duv is None) & (ccts.shape[1] == 2):
        duv = np2d(ccts[:, 1, None])
    elif duv is not None:
        duv = np2d(duv)

    #get estimates of approximate xyz values in case duv = None:
    BB = cri_ref(ccts=cct, wl3=wl, ref_type=['BB'])
    xyz_est = spd_to_xyz(data=BB, cieobs=cieobs, out=1)
    results = np.ones([ccts.shape[0], 3]) * np.nan

    if duv is not None:

        # optimization/minimization setup:
        def objfcn(uv_offset,
                   uv0,
                   cct,
                   duv,
                   out=1):  #, cieobs = cieobs, wl = wl, mode = mode):
            uv0 = np2d(uv0 + uv_offset)
            Yuv0 = np.concatenate((np2d([100.0]), uv0), axis=1)
            cct_min, duv_min = xyz_to_cct(Yuv_to_xyz(Yuv0),
                                          cieobs=cieobs,
                                          out='cct,duv',
                                          wl=wl,
                                          mode=mode,
                                          accuracy=accuracy,
                                          force_out_of_lut=force_out_of_lut,
                                          upper_cct_max=upper_cct_max,
                                          approx_cct_temp=approx_cct_temp)
            F = np.sqrt(((100.0 * (cct_min[0] - cct[0]) / (cct[0]))**2.0) +
                        (((duv_min[0] - duv[0]) / (duv[0]))**2.0))
            if out == 'F':
                return F
            else:
                return np.concatenate((cct_min, duv_min, np2d(F)), axis=1)

        # loop through each xyz_est:
        for i in range(xyz_est.shape[0]):
            xyz0 = xyz_est[i]
            cct_i = cct[i]
            duv_i = duv[i]
            cct_min, duv_min = xyz_to_cct(xyz0,
                                          cieobs=cieobs,
                                          out='cct,duv',
                                          wl=wl,
                                          mode=mode,
                                          accuracy=accuracy,
                                          force_out_of_lut=force_out_of_lut,
                                          upper_cct_max=upper_cct_max,
                                          approx_cct_temp=approx_cct_temp)

            if np.abs(duv[i]) > _EPS:
                # find xyz:
                Yuv0 = xyz_to_Yuv(xyz0)
                uv0 = Yuv0[0][1:3]

                OptimizeResult = minimize(fun=objfcn,
                                          x0=np.zeros((1, 2)),
                                          args=(uv0, cct_i, duv_i, 'F'),
                                          method='Nelder-Mead',
                                          options={
                                              "maxiter": np.inf,
                                              "maxfev": np.inf,
                                              'xatol': 0.000001,
                                              'fatol': 0.000001
                                          })
                betas = OptimizeResult['x']
                #betas = np.zeros(uv0.shape)
                if out is not None:
                    results[i] = objfcn(betas, uv0, cct_i, duv_i, out=3)

                uv0 = np2d(uv0 + betas)
                Yuv0 = np.concatenate((np2d([100.0]), uv0), axis=1)
                xyz_est[i] = Yuv_to_xyz(Yuv0)

            else:
                xyz_est[i] = xyz0

    if (out is None) | (out == 1):
        return xyz_est
    else:
        # Also output results of minimization:
        return np.concatenate((xyz_est, results), axis=1)
Пример #28
0
def cie2006cmfsEx(age = 32,fieldsize = 10, wl = None,\
                  var_od_lens = 0, var_od_macula = 0, \
                  var_od_L = 0, var_od_M = 0, var_od_S = 0,\
                  var_shft_L = 0, var_shft_M = 0, var_shft_S = 0,\
                  out = 'LMS', allow_negative_values = False):
    """
    Generate Individual Observer CMFs (cone fundamentals) 
    based on CIE2006 cone fundamentals and published literature 
    on observer variability in color matching and in physiological parameters.
    
    Args:
        :age: 
            | 32 or float or int, optional
            | Observer age
        :fieldsize:
            | 10, optional
            | Field size of stimulus in degrees (between 2° and 10°).
        :wl: 
            | None, optional
            | Interpolation/extraplation of :LMS: output to specified wavelengths.
            | None: output original _WL = np.array([390,780,5])
        :var_od_lens:
            | 0, optional
            | Std Dev. in peak optical density [%] of lens.
        :var_od_macula:
            | 0, optional
            | Std Dev. in peak optical density [%] of macula.
        :var_od_L:
            | 0, optional
            | Std Dev. in peak optical density [%] of L-cone.
        :var_od_M:
            | 0, optional
            | Std Dev. in peak optical density [%] of M-cone.
        :var_od_S:
            | 0, optional
            | Std Dev. in peak optical density [%] of S-cone.
        :var_shft_L:
            | 0, optional
            | Std Dev. in peak wavelength shift [nm] of L-cone. 
        :var_shft_L:
            | 0, optional
            | Std Dev. in peak wavelength shift [nm] of M-cone.  
        :var_shft_S:
            | 0, optional
            | Std Dev. in peak wavelength shift [nm] of S-cone. 
        :out: 
            | 'LMS' or , optional
            | Determines output.
        :allow_negative_values:
            | False, optional
            | Cone fundamentals or color matching functions 
              should not have negative values.
            |     If False: X[X<0] = 0.
            
    Returns:
        :returns: 
            | - 'LMS' : ndarray with individual observer area-normalized 
            |           cone fundamentals. Wavelength have been added.
                
            | [- 'trans_lens': ndarray with lens transmission 
            |      (no wavelengths added, no interpolation)
            |  - 'trans_macula': ndarray with macula transmission 
            |      (no wavelengths added, no interpolation)
            |  - 'sens_photopig' : ndarray with photopigment sens. 
            |      (no wavelengths added, no interpolation)]
            
    References:
         1. `Asano Y, Fairchild MD, and Blondé L (2016). 
         Individual Colorimetric Observer Model. 
         PLoS One 11, 1–19. 
         <http://journals.plos.org/plosone/article?id=10.1371/journal.pone.0145671>`_
        
         2. `Asano Y, Fairchild MD, Blondé L, and Morvan P (2016). 
         Color matching experiment for highlighting interobserver variability. 
         Color Res. Appl. 41, 530–539. 
         <https://onlinelibrary.wiley.com/doi/abs/10.1002/col.21975>`_
         
         3. `CIE, and CIE (2006). 
         Fundamental Chromaticity Diagram with Physiological Axes - Part I 
         (Vienna: CIE). 
         <http://www.cie.co.at/publications/fundamental-chromaticity-diagram-physiological-axes-part-1>`_ 
         
         4. `Asano's Individual Colorimetric Observer Model 
         <https://www.rit.edu/cos/colorscience/re_AsanoObserverFunctions.php>`_
    """
    fs = fieldsize
    rmd = _INDVCMF_DATA['rmd'].copy()
    LMSa = _INDVCMF_DATA['LMSa'].copy()
    docul = _INDVCMF_DATA['docul'].copy()

    # field size corrected macular density:
    pkOd_Macula = 0.485 * np.exp(-fs / 6.132) * (
        1 + var_od_macula / 100)  # varied peak optical density of macula
    corrected_rmd = rmd * pkOd_Macula

    # age corrected lens/ocular media density:
    if (age <= 60):
        correct_lomd = docul[:1] * (1 + 0.02 * (age - 32)) + docul[1:2]
    else:
        correct_lomd = docul[:1] * (1.56 + 0.0667 * (age - 60)) + docul[1:2]
    correct_lomd = correct_lomd * (1 + var_od_lens / 100
                                   )  # varied overall optical density of lens

    # Peak Wavelength Shift:
    wl_shifted = np.empty(LMSa.shape)
    wl_shifted[0] = _WL + var_shft_L
    wl_shifted[1] = _WL + var_shft_M
    wl_shifted[2] = _WL + var_shft_S

    LMSa_shft = np.empty(LMSa.shape)
    kind = 'cubic'
    LMSa_shft[0] = interpolate.interp1d(wl_shifted[0],
                                        LMSa[0],
                                        kind=kind,
                                        bounds_error=False,
                                        fill_value="extrapolate")(_WL)
    LMSa_shft[1] = interpolate.interp1d(wl_shifted[1],
                                        LMSa[1],
                                        kind=kind,
                                        bounds_error=False,
                                        fill_value="extrapolate")(_WL)
    LMSa_shft[2] = interpolate.interp1d(wl_shifted[2],
                                        LMSa[2],
                                        kind=kind,
                                        bounds_error=False,
                                        fill_value="extrapolate")(_WL)
    #    LMSa[2,np.where(_WL >= _WL_CRIT)] = 0 #np.nan # Not defined above 620nm
    #    LMSa_shft[2,np.where(_WL >= _WL_CRIT)] = 0

    ssw = np.hstack(
        (0, np.sign(np.diff(LMSa_shft[2, :]))
         ))  #detect poor interpolation (sign switch due to instability)
    LMSa_shft[2, np.where((ssw >= 0) & (_WL > 560))] = np.nan

    # corrected LMS (no age correction):
    pkOd_L = (0.38 + 0.54 * np.exp(-fs / 1.333)) * (
        1 + var_od_L / 100)  # varied peak optical density of L-cone
    pkOd_M = (0.38 + 0.54 * np.exp(-fs / 1.333)) * (
        1 + var_od_M / 100)  # varied peak optical density of M-cone
    pkOd_S = (0.30 + 0.45 * np.exp(-fs / 1.333)) * (
        1 + var_od_S / 100)  # varied peak optical density of S-cone

    alpha_lms = 0. * LMSa_shft
    alpha_lms[0] = 1 - 10**(-pkOd_L * (10**LMSa_shft[0]))
    alpha_lms[1] = 1 - 10**(-pkOd_M * (10**LMSa_shft[1]))
    alpha_lms[2] = 1 - 10**(-pkOd_S * (10**LMSa_shft[2]))

    # this fix is required because the above math fails for alpha_lms[2,:]==0
    alpha_lms[2, np.where(_WL >= _WL_CRIT)] = 0

    # Corrected to Corneal Incidence:
    lms_barq = alpha_lms * (10**(-corrected_rmd - correct_lomd)) * np.ones(
        alpha_lms.shape)

    # Corrected to Energy Terms:
    lms_bar = lms_barq * _WL

    # Set NaN values to zero:
    lms_bar[np.isnan(lms_bar)] = 0

    # normalized:
    LMS = 100 * lms_bar / np.nansum(lms_bar, axis=1, keepdims=True)

    # Output extra:
    trans_lens = 10**(-correct_lomd)
    trans_macula = 10**(-corrected_rmd)
    sens_photopig = alpha_lms * _WL

    # Add wavelengths:
    LMS = np.vstack((_WL, LMS))

    if ('xyz' in out.lower().split(',')):
        LMS = lmsb_to_xyzb(LMS,
                           fieldsize,
                           out='xyz',
                           allow_negative_values=allow_negative_values)
        out = out.replace('xyz', 'LMS').replace('XYZ', 'LMS')
    if ('lms' in out.lower().split(',')):
        out = out.replace('lms', 'LMS')

    # Interpolate/extrapolate:
    if wl is None:
        interpolation = None
    else:
        interpolation = 'cubic'
    LMS = spd(LMS, wl=wl, interpolation=interpolation, norm_type='area')

    if (out == 'LMS'):
        return LMS
    elif (out == 'LMS,trans_lens,trans_macula,sens_photopig'):
        return LMS, trans_lens, trans_macula, sens_photopig
    elif (out == 'LMS,trans_lens,trans_macula,sens_photopig,LMSa'):
        return LMS, trans_lens, trans_macula, sens_photopig, LMSa
    else:
        return eval(out)
Пример #29
0
def spd_to_mcri(SPD, D=0.9, E=None, Yb=20.0, out='Rm', wl=None):
    """
    Calculates the MCRI or Memory Color Rendition Index, Rm
    
    Args: 
        :SPD: 
            | ndarray with spectral data (can be multiple SPDs, 
              first axis are the wavelengths)
        :D: 
            | 0.9, optional
            | Degree of adaptation.
        :E: 
            | None, optional
            | Illuminance in lux 
            |  (used to calculate La = (Yb/100)*(E/pi) to then calculate D 
            |  following the 'cat02' model). 
            | If None: the degree is determined by :D:
            |  If (:E: is not None) & (:Yb: is None):  :E: is assumed to contain 
               the adapting field luminance La (cd/m²).
        :Yb: 
            | 20.0, optional
            | Luminance factor of background. (used when calculating La from E)
            | If None, E contains La (cd/m²).
        :out: 
            | 'Rm' or str, optional
            | Specifies requested output (e.g. 'Rm,Rmi,cct,duv') 
        :wl: 
            | None, optional
            | Wavelengths (or [start, end, spacing]) to interpolate the SPDs to. 
            | None: default to no interpolation   
    
    Returns:
        :returns: 
            | float or ndarray with MCRI Rm for :out: 'Rm'
            | Other output is also possible by changing the :out: str value.        
          
    References:
        1. `K.A.G. Smet, W.R. Ryckaert, M.R. Pointer, G. Deconinck, P. Hanselaer,(2012)
        “A memory colour quality metric for white light sources,” 
        Energy Build., vol. 49, no. C, pp. 216–225.
        <http://www.sciencedirect.com/science/article/pii/S0378778812000837>`_
    """
    SPD = np2d(SPD)

    if wl is not None:
        SPD = spd(data=SPD, interpolation=_S_INTERP_TYPE, kind='np', wl=wl)

    # unpack metric default values:
    avg, catf, cieobs, cri_specific_pars, cspace, ref_type, rg_pars, sampleset, scale = [
        _MCRI_DEFAULTS[x] for x in sorted(_MCRI_DEFAULTS.keys())
    ]
    similarity_ai = cri_specific_pars['similarity_ai']
    Mxyz2lms = cspace['Mxyz2lms']
    scale_fcn = scale['fcn']
    scale_factor = scale['cfactor']
    sampleset = eval(sampleset)

    # A. calculate xyz:
    xyzti, xyztw = spd_to_xyz(SPD, cieobs=cieobs['xyz'], rfl=sampleset, out=2)
    if 'cct' in out.split(','):
        cct, duv = xyz_to_cct(xyztw,
                              cieobs=cieobs['cct'],
                              out='cct,duv',
                              mode='lut')

    # B. perform chromatic adaptation to adopted whitepoint of ipt color space, i.e. D65:
    if catf is not None:
        Dtype_cat, F, Yb_cat, catmode_cat, cattype_cat, mcat_cat, xyzw_cat = [
            catf[x] for x in sorted(catf.keys())
        ]

        # calculate degree of adaptationn D:
        if E is not None:
            if Yb is not None:
                La = (Yb / 100.0) * (E / np.pi)
            else:
                La = E
            D = cat.get_degree_of_adaptation(Dtype=Dtype_cat, F=F, La=La)
        else:
            Dtype_cat = None  # direct input of D

        if (E is None) and (D is None):
            D = 1.0  # set degree of adaptation to 1 !
        if D > 1.0: D = 1.0
        if D < 0.6: D = 0.6  # put a limit on the lowest D

        # apply cat:
        xyzti = cat.apply(xyzti,
                          cattype=cattype_cat,
                          catmode=catmode_cat,
                          xyzw1=xyztw,
                          xyzw0=None,
                          xyzw2=xyzw_cat,
                          D=D,
                          mcat=[mcat_cat],
                          Dtype=Dtype_cat)
        xyztw = cat.apply(xyztw,
                          cattype=cattype_cat,
                          catmode=catmode_cat,
                          xyzw1=xyztw,
                          xyzw0=None,
                          xyzw2=xyzw_cat,
                          D=D,
                          mcat=[mcat_cat],
                          Dtype=Dtype_cat)

    # C. convert xyz to ipt and split:
    ipt = xyz_to_ipt(
        xyzti, cieobs=cieobs['xyz'], M=Mxyz2lms
    )  #input matrix as published in Smet et al. 2012, Energy and Buildings
    I, P, T = asplit(ipt)

    # D. calculate specific (hue dependent) similarity indicators, Si:
    if len(xyzti.shape) == 3:
        ai = np.expand_dims(similarity_ai, axis=1)
    else:
        ai = similarity_ai
    a1, a2, a3, a4, a5 = asplit(ai)
    mahalanobis_d2 = (a3 * np.power((P - a1), 2.0) + a4 * np.power(
        (T - a2), 2.0) + 2.0 * a5 * (P - a1) * (T - a2))
    if (len(mahalanobis_d2.shape) == 3) & (mahalanobis_d2.shape[-1] == 1):
        mahalanobis_d2 = mahalanobis_d2[:, :, 0].T
    Si = np.exp(-0.5 * mahalanobis_d2)

    # E. calculate general similarity indicator, Sa:
    Sa = avg(Si, axis=0, keepdims=True)

    # F. rescale similarity indicators (Si, Sa) with a 0-1 scale to memory color rendition indices (Rmi, Rm) with a 0 - 100 scale:
    Rmi = scale_fcn(np.log(Si), scale_factor=scale_factor)
    Rm = np2d(scale_fcn(np.log(Sa), scale_factor=scale_factor))

    # G. calculate Rg (polyarea of test / polyarea of memory colours):
    if 'Rg' in out.split(','):
        I = I[
            ...,
            None]  #broadcast_shape(I, target_shape = None,expand_2d_to_3d = 0)
        a1 = a1[:, None] * np.ones(
            I.shape
        )  #broadcast_shape(a1, target_shape = None,expand_2d_to_3d = 0)
        a2 = a2[:, None] * np.ones(
            I.shape
        )  #broadcast_shape(a2, target_shape = None,expand_2d_to_3d = 0)
        a12 = np.concatenate(
            (a1, a2), axis=2
        )  #broadcast_shape(np.hstack((a1,a2)), target_shape = ipt.shape,expand_2d_to_3d = 0)
        ipt_mc = np.concatenate((I, a12), axis=2)
        nhbins, normalize_gamut, normalized_chroma_ref, start_hue = [
            rg_pars[x] for x in sorted(rg_pars.keys())
        ]

        Rg = jab_to_rg(ipt,
                       ipt_mc,
                       ordered_and_sliced=False,
                       nhbins=nhbins,
                       start_hue=start_hue,
                       normalize_gamut=normalize_gamut)

    if (out != 'Rm'):
        return eval(out)
    else:
        return Rm
Пример #30
0
def genMonteCarloObs(n_obs=1,
                     fieldsize=10,
                     list_Age=[32],
                     out='LMS',
                     wl=None,
                     allow_negative_values=False):
    """
    Monte-Carlo generation of individual observer cone fundamentals.
    
    Args: 
        :n_obs: 
            | 1, optional
            | Number of observer CMFs to generate.
        :list_Age:
            | list of observer ages or str, optional
            | Defaults to 32 (cfr. CIE2006 CMFs)
            | If 'us_census': use US population census of 2010 
              to generate list_Age.
        :fieldsize: 
            | fieldsize in degrees (between 2° and 10°), optional
            | Defaults to 10°.
        :out: 
            | 'LMS' or str, optional
            | Determines output.
        :wl: 
            | None, optional
            | Interpolation/extraplation of :LMS: output to specified wavelengths.
            | None: output original _WL = np.array([390,780,5])
        :allow_negative_values: 
            | False, optional
            | Cone fundamentals or color matching functions 
            |   should not have negative values.
            |     If False: X[X<0] = 0.
    
    Returns:
        :returns: 
            | LMS [,var_age, vAll] 
            |   - LMS: ndarray with population LMS functions.
            |   - var_age: ndarray with population observer ages.
            |   - vAll: dict with population physiological factors (see .keys()) 
            
    References:
         1. `Asano Y, Fairchild MD, and Blondé L (2016). 
         Individual Colorimetric Observer Model. 
         PLoS One 11, 1–19. 
         <http://journals.plos.org/plosone/article?id=10.1371/journal.pone.0145671>`_
         
         2. `Asano Y, Fairchild MD, Blondé L, and Morvan P (2016). 
         Color matching experiment for highlighting interobserver variability. 
         Color Res. Appl. 41, 530–539. 
         <https://onlinelibrary.wiley.com/doi/abs/10.1002/col.21975>`_
         
         3. `CIE, and CIE (2006). 
         Fundamental Chromaticity Diagram with Physiological Axes - Part I 
         (Vienna: CIE). 
         <http://www.cie.co.at/publications/fundamental-chromaticity-diagram-physiological-axes-part-1>`_ 
         
         4. `Asano's Individual Colorimetric Observer Model 
         <https://www.rit.edu/cos/colorscience/re_AsanoObserverFunctions.php>`_
    """

    # Scale down StdDev by scalars optimized using Asano's 75 observers
    # collected in Germany:
    stdDevAllParam = _INDVCMF_STD_DEV_ALL_PARAM.copy()
    scale_factors = [0.98, 0.98, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5]
    scale_factors = dict(zip(list(stdDevAllParam.keys()), scale_factors))
    stdDevAllParam = {
        k: v * scale_factors[k]
        for (k, v) in stdDevAllParam.items()
    }

    # Get Normally-distributed Physiological Factors:
    vAll = getMonteCarloParam(n_obs=n_obs)

    if list_Age is 'us_census':
        list_Age = getUSCensusAgeDist()

    # Generate Random Ages with the same probability density distribution
    # as color matching experiment:
    sz_interval = 1
    list_AgeRound = np.round(np.array(list_Age) / sz_interval) * sz_interval
    h = math.histogram(list_AgeRound,
                       bins=np.unique(list_AgeRound),
                       bin_center=True)[0]
    p = h / h.sum()  # probability density distribution

    var_age = np.random.choice(np.unique(list_AgeRound), \
                               size = n_obs, replace = True,\
                               p = p)

    # Set requested wavelength range:
    if wl is not None:
        wl = getwlr(wl3=wl)
    else:
        wl = _WL

    LMS_All = np.nan * np.ones((3 + 1, wl.shape[0], n_obs))
    for k in range(n_obs):
        t_LMS, t_trans_lens, t_trans_macula, t_sens_photopig = cie2006cmfsEx(age = var_age[k], fieldsize = fieldsize, wl = wl,\
                                                                          var_od_lens = vAll['od_lens'][k], var_od_macula = vAll['od_macula'][k], \
                                                                          var_od_L = vAll['od_L'][k], var_od_M = vAll['od_M'][k], var_od_S = vAll['od_S'][k],\
                                                                          var_shft_L = vAll['shft_L'][k], var_shft_M = vAll['shft_M'][k], var_shft_S = vAll['shft_S'][k],\
                                                                          out = 'LMS,trans_lens,trans_macula,sens_photopig')
        LMS_All[:, :, k] = t_LMS


#        listout = out.split(',')
#        if ('trans_lens' in listout) | ('trans_macula' in listout) | ('trans_photopig' in listout):
#            trans_lens[:,k] = t_trans_lens
#            trans_macula[:,k] = t_trans_macula
#            sens_photopig[:,:,k] = t_sens_photopig

    if n_obs == 1:
        LMS_All = np.squeeze(LMS_All, axis=2)

    if ('xyz' in out.lower().split(',')):
        LMS_All = lmsb_to_xyzb(LMS_All,
                               fieldsize,
                               out='xyz',
                               allow_negative_values=allow_negative_values)
        out = out.replace('xyz', 'LMS').replace('XYZ', 'LMS')
    if ('lms' in out.lower().split(',')):
        out = out.replace('lms', 'LMS')

    if (out == 'LMS'):
        return LMS_All
    elif (out == 'LMS,var_age,vAll'):
        return LMS_All, var_age, vAll
    else:
        return eval(out)