Exemple #1
0
def line_intersect(a1, a2, b1, b2):
    """
    Line intersections of series of two line segments a and b. 
        
    Args:
        :a1: 
            | ndarray (.shape  = (N,2)) specifying end-point 1 of line a
        :a2: 
            | ndarray (.shape  = (N,2)) specifying end-point 2 of line a
        :b1: 
            | ndarray (.shape  = (N,2)) specifying end-point 1 of line b
        :b2: 
            | ndarray (.shape  = (N,2)) specifying end-point 2 of line b
    
    Note: 
        N is the number of line segments a and b.
    
    Returns:
        :returns: 
            | ndarray with line-intersections (.shape = (N,2))
    
    References:
        1. https://stackoverflow.com/questions/3252194/numpy-and-line-intersections
    """
    T = np.array([[0.0, -1.0], [1.0, 0.0]])
    da = np.atleast_2d(a2 - a1)
    db = np.atleast_2d(b2 - b1)
    dp = np.atleast_2d(a1 - b1)
    dap = np.dot(da, T)
    denom = np.sum(dap * db, axis=1)
    num = np.sum(dap * dp, axis=1)
    return np.atleast_2d(num / denom).T * db + b1
Exemple #2
0
def angle_v1v2(v1,v2,htype = 'deg'):
    """
    Calculates angle between two vectors.
    
    Args:
        :v1: 
            | ndarray with vector 1
        :v2: 
            | ndarray with vector 2
        :htype:
            | 'deg' or 'rad', optional
            | Requested angle type.
    
    Returns:
        :ang: 
            | ndarray 
    """
    denom = magnitude_v(v1)*magnitude_v(v2)
    denom[denom==0.] = np.nan
    ang = np.arccos(np.sum(v1*v2,axis=1)/denom)
    if htype == 'deg':
        ang = ang*180/np.pi
    return ang
Exemple #3
0
def get_spd(dvc = 0, Tint = 0.0, autoTint_max = _TINT_MAX, Nscans = 1, wlstep = 1, 
            wlstart = 360, wlend = 830, 
            twait = _TWAIT_STATUS, out = "spd", close_device = True, 
            laser_on = 0, laser_intensity = 1000, verbosity = _VERBOSITY):
    """
    Measure spectral radiance (W/nm.sr.m²).
    
    Args:
        :dvc:
            | 0 or Int or ctypes.wintypes.LP_c_ulong, optional
            | Number of the spectrometer device to load (0 = 1st) or handle (ctypes) to pre_initialized device.
        :Tint:
            | 0 or Float, optional
            | Integration time in seconds. (if 0: find best integration time, but < autoTint_max).
        :autoTint_max:
            | Limit Tint to this value when Tint = 0.
        :Nscans:
            | 1 or Int, optional
            | Number of scans to average.
        :wlstep: 
            | 1 or Int, optional
            | Wavelength step size in nm.
        :wlstart:
            | 360 or Int, optional
            | Start wavelength in nm. (min = 350 nm)
        :wlend:
            | 830 or Int, optional
            | Start wavelength in nm. (max = 1000 nm)
        :twait:
            | 0.1 or Float, optional
            | Time in seconds to wait before checking status of device. 
            | (If 0: wait :Tint: seconds, unless :Tint: == 0, then wait _TWAIT_STATUS seconds)
        :out:
            | "spd" [",dvc, Errors"], optional
            | Requested return. If "spd" in out.split(","):do spectral measurement, else: initialize dvc handle [and turn laser ON or OFF].
        :close_device:
            | True or False, optional
            | Close device at the end of the measurement.
            | If 'dvc' not in out.split(','): always close!!!
        :laser_on:
            | 0: OFF, >0: ON -> 1: PWM 7Hz, 2: PWM 28 Hz, 3: 255 Hz, optional
            | True (>0): turn laser on to select measurement area; False (0): turn off. 
            | (Can only be ON when "spd" is not in out.split(",") | if Tint is None)
        :laser_intensity: 
            | 1000.0, optional
            | Laser intensity in ‰ (pro-mille).
        :verbosity:
            | 1, optional
            | 0: no printed error message output.
    Returns:
        :returns: 
            | spd [,dvc, Errors] (as specified in :out:)
            | - "spd": ndarray with wavelengths (1st row) and spectral radiance (2nd row).
            | - "dvc": ctypes handle to device (if open) or nan (if closed).
            | - "Errors": dict with error message returned by device during various steps of the spectral measurement process.
    """
    # Initialize dict with errors messages for each of the different measurement steps:
    Errors = {} 
    Errors["get_spd"] = None
    out = out.replace(' ','')
    
    # Get wavelength range:
    wls = np.arange(np.int(wlstart), np.int(wlend)+np.int(wlstep), np.int(wlstep), dtype=np.float32)
    
    # Initialize spd filled with nan's:
    spd = np.vstack((wls, np.nan*np.ones(wls.shape)))

    try:
        # Initialize device :
        dvc, Errors = dvc_open(dvc = dvc, Errors = Errors, out = "dvc,Errors", verbosity = verbosity)    
        
        if (_check_dvc_open(dvc)) & (("spd" in out.split(",")) & (Tint is not None)):
            
            # Turn off laser before starting measurement:
            Errors = set_laser(dvc = dvc, laser_on = False, laser_intensity = laser_intensity, Errors = Errors, verbosity = verbosity)
                    
                            
            # Start measurement:
            Tint, Errors = start_meas(dvc, Tint = Tint, autoTint_max = autoTint_max, Nscans = Nscans, wlstep = wlstep, Errors = Errors, out = "Tint, Errors", verbosity = verbosity)
            
            # wait until measurement is finished (check intermediate status every twait seconds):
            status, Errors = wait_until_meas_is_finished(dvc, Tint = Tint, twait = twait, out = "status,Errors", Errors = Errors, verbosity = verbosity)
            
            if status == False:
                # Read measured spectral radiance from device:
                spd, Errors = read_spectral_radiance(dvc, wlstart = wlstart, wlend = wlend, wlstep = wlstep, out = "spd,Errors", Errors = Errors, verbosity = verbosity)    
            
        elif (("spd" not in out.split(",")) | (Tint is None)): # only dvc handle was requested or to turn laser ON.
            Errors = set_laser(dvc = dvc, laser_on = laser_on, laser_intensity = laser_intensity, Errors = Errors, verbosity = verbosity)
        
        # Close device:
        dvc, Errors = dvc_close(dvc, Errors = Errors, close_device = (close_device) | ('dvc' not in out.split(',')), out = "dvc,Errors", verbosity = verbosity)
    
        
        Errors["get_spd"] = int(np.sum([int(bool(x)) for x in Errors.values() if (x is not None)]) > 0)
        
    except:
        Errors["get_spd"] = "get_spd fails."
        
    finally:
        # Generate requested return:
        if out == "spd":
            return spd
        elif out == "dvc":
            return dvc
        elif out == "Errors":
            return Errors
        elif out == "spd,Errors":
            return spd, Errors
        elif out == "spd,dvc":
            return spd, dvc
        elif out == "spd,Errors,dvc":
            return spd, Errors, dvc
        elif out == "spd,dvc,Errors":
            return spd, dvc, Errors
        else:
            raise Exception("Requested output error.")
def get_poly_model(jabt, jabr, modeltype = _VF_MODEL_TYPE):
    """
    Setup base color shift model (delta_a, delta_b), 
    determine model parameters and accuracy.
    
    | Calculates a base color shift (delta) from the ref. chromaticity ar, br.
    
    Args:
        :jabt: 
            | ndarray with jab color coordinates under the test SPD.
        :jabr: 
            | ndarray with jab color coordinates under the reference SPD.
        :modeltype:
            | _VF_MODEL_TYPE or 'M6' or 'M5', optional
            | Specifies degree 5 or degree 6 polynomial model in ab-coordinates.
              (see notes below)
            
    Returns:
        :returns: 
            | (poly_model, 
            |       pmodel, 
            |       dab_model, 
            |        dab_res, 
            |        dCHoverC_res, 
            |        dab_std, 
            |        dCHoverC_std)
            |
            | :poly_model: function handle to model
            | :pmodel: ndarray with model parameters
            | :dab_model: ndarray with ab model predictions from ar, br.
            | :dab_res: ndarray with residuals between 'da,db' of samples and 
            |            'da,db' predicted by the model.
            | :dCHoverC_res: ndarray with residuals between 'dCoverC,dH' 
            |                 of samples and 'dCoverC,dH' predicted by the model.
            |     Note: dCoverC = (Ct - Cr)/Cr and dH = ht - hr 
            |         (predicted from model, see notes below)
            | :dab_std: ndarray with std of :dab_res:
            | :dCHoverC_std: ndarray with std of :dCHoverC_res: 

    Notes: 
        1. Model types:
            | poly5_model = lambda a,b,p:         p[0]*a + p[1]*b + p[2]*(a**2) + p[3]*a*b + p[4]*(b**2)
            | poly6_model = lambda a,b,p:  p[0] + p[1]*a + p[2]*b + p[3]*(a**2) + p[4]*a*b + p[5]*(b**2)
        
        2. Calculation of dCoverC and dH:
            | dCoverC = (np.cos(hr)*da + np.sin(hr)*db)/Cr
            | dHoverC = (np.cos(hr)*db - np.sin(hr)*da)/Cr    
    """
    at = jabt[...,1]
    bt = jabt[...,2]
    ar = jabr[...,1]
    br = jabr[...,2]
    
    # A. Calculate da, db:
    da = at - ar
    db = bt - br
    
    # B.1 Calculate model matrix:
    # 5-parameter model:
    M5 = np.array([[np.sum(ar*ar), np.sum(ar*br), np.sum(ar*ar**2),np.sum(ar*ar*br),np.sum(ar*br**2)],
            [np.sum(br*ar), np.sum(br*br), np.sum(br*ar**2),np.sum(br*ar*br),np.sum(br*br**2)],
            [np.sum((ar**2)*ar), np.sum((ar**2)*br), np.sum((ar**2)*ar**2),np.sum((ar**2)*ar*br),np.sum((ar**2)*br**2)],
            [np.sum(ar*br*ar), np.sum(ar*br*br), np.sum(ar*br*ar**2),np.sum(ar*br*ar*br),np.sum(ar*br*br**2)],
            [np.sum((br**2)*ar), np.sum((br**2)*br), np.sum((br**2)*ar**2),np.sum((br**2)*ar*br),np.sum((br**2)*br**2)]])
    #6-parameters model
    M6 = np.array([[ar.size,np.sum(1.0*ar), np.sum(1.0*br), np.sum(1.0*ar**2),np.sum(1.0*ar*br),np.sum(1.0*br**2)],
            [np.sum(ar*1.0),np.sum(ar*ar), np.sum(ar*br), np.sum(ar*ar**2),np.sum(ar*ar*br),np.sum(ar*br**2)],
            [np.sum(br*1.0),np.sum(br*ar), np.sum(br*br), np.sum(br*ar**2),np.sum(br*ar*br),np.sum(br*br**2)],
            [np.sum((ar**2)*1.0),np.sum((ar**2)*ar), np.sum((ar**2)*br), np.sum((ar**2)*ar**2),np.sum((ar**2)*ar*br),np.sum((ar**2)*br**2)],
            [np.sum(ar*br*1.0),np.sum(ar*br*ar), np.sum(ar*br*br), np.sum(ar*br*ar**2),np.sum(ar*br*ar*br),np.sum(ar*br*br**2)],
            [np.sum((br**2)*1.0),np.sum((br**2)*ar), np.sum((br**2)*br), np.sum((br**2)*ar**2),np.sum((br**2)*ar*br),np.sum((br**2)*br**2)]])
    
    # B.2 Define model function:
    poly5_model = lambda a,b,p: p[0]*a + p[1]*b + p[2]*(a**2) + p[3]*a*b + p[4]*(b**2)
    poly6_model = lambda a,b,p: p[0] + p[1]*a + p[2]*b + p[3]*(a**2) + p[4]*a*b + p[5]*(b**2)
    
    if modeltype == 'M5':
        M = M5
        poly_model = poly5_model
    else:
        M = M6
        poly_model = poly6_model

    M = np.linalg.inv(M)


    # C.1 Data a,b analysis output:
    if modeltype == 'M5':
        da_model_parameters = np.dot(M, np.array([np.sum(da*ar), np.sum(da*br), np.sum(da*ar**2),np.sum(da*ar*br),np.sum(da*br**2)]))
        db_model_parameters = np.dot(M, np.array([np.sum(db*ar), np.sum(db*br), np.sum(db*ar**2),np.sum(db*ar*br),np.sum(db*br**2)]))
    else:
        da_model_parameters = np.dot(M, np.array([np.sum(da*1.0),np.sum(da*ar), np.sum(da*br), np.sum(da*ar**2),np.sum(da*ar*br),np.sum(da*br**2)]))
        db_model_parameters = np.dot(M, np.array([np.sum(db*1.0),np.sum(db*ar), np.sum(db*br), np.sum(db*ar**2),np.sum(db*ar*br),np.sum(db*br**2)]))
    pmodel = np.vstack((da_model_parameters,db_model_parameters))

    # D.1 Calculate model da, db:
    da_model = poly_model(ar,br,pmodel[0])
    db_model = poly_model(ar,br,pmodel[1])
    dab_model = np.hstack((da_model,db_model))

    # D.2 Calculate residuals for da & db:
    da_res = da - da_model
    db_res = db - db_model
    dab_res = np.hstack((da_res,db_res))
    dab_std = np.vstack((np.std(da_res,axis=0),np.std(db_res,axis=0)))

    # E Calculate href, Cref:
    href = np.arctan2(br,ar)
    Cref = (ar**2 + br**2)**0.5
    
    # F Calculate dC/C, dH/C for data and model and calculate residuals:
    dCoverC = (np.cos(href)*da + np.sin(href)*db)/Cref
    dHoverC = (np.cos(href)*db - np.sin(href)*da)/Cref
    dCoverC_model = (np.cos(href)*da_model + np.sin(href)*db_model)/Cref
    dHoverC_model = (np.cos(href)*db_model - np.sin(href)*da_model)/Cref
    dCoverC_res = dCoverC - dCoverC_model
    dHoverC_res = dHoverC - dHoverC_model
    dCHoverC_std = np.vstack((np.std(dCoverC_res,axis = 0),np.std(dHoverC_res,axis = 0)))
    
    dCHoverC_res = np.hstack((href,dCoverC_res,dHoverC_res))

    return poly_model, pmodel, dab_model, dab_res, dCHoverC_res, dab_std, dCHoverC_std
def VF_colorshift_model(S, cri_type = _VF_CRI_DEFAULT, model_type = _VF_MODEL_TYPE, \
                        cspace = _VF_CSPACE, sampleset = None, pool = False, \
                        pcolorshift = {'href': np.arange(np.pi/10,2*np.pi,2*np.pi/10),'Cref' : _VF_MAXR, 'sig' : _VF_SIG}, \
                        vfcolor = 'k',verbosity = 0):
    """
    Applies full vector field model calculations to spectral data.
    
    Args:
        :S: 
            | nump.ndarray with spectral data.
        :cri_type:
            | _VF_CRI_DEFAULT or str or dict, optional
            | Specifies type of color fidelity model to use. 
            | Controls choice of ref. ill., sample set, averaging, scaling, etc.
            | See luxpy.cri.spd_to_cri for more info.
        :modeltype:
            | _VF_MODEL_TYPE or 'M6' or 'M5', optional
            | Specifies degree 5 or degree 6 polynomial model in ab-coordinates.
        :cspace:
            | _VF_CSPACE or dict, optional
            | Specifies color space. See _VF_CSPACE_EXAMPLE for example structure.
        :sampleset:
            | None or str or ndarray, optional
            | Sampleset to be used when calculating vector field model.
        :pool: 
            | False, optional
            | If :S: contains multiple spectra, True pools all jab data before 
              modeling the vector field, while False models a different field 
              for each spectrum.
        :pcolorshift: 
            | default dict (see below) or user defined dict, optional
            | Dict containing the specification input 
              for apply_poly_model_at_hue_x().
            | Default dict = {'href': np.arange(np.pi/10,2*np.pi,2*np.pi/10),
            |                 'Cref' : _VF_MAXR, 
            |                 'sig' : _VF_SIG, 
            |                 'labels' : '#'} 
            | The polynomial models of degree 5 and 6 can be fully specified or 
              summarized by the model parameters themselved OR by calculating the
              dCoverC and dH at resp. 5 and 6 hues.
        :vfcolor:
            | 'k', optional
            | For plotting the vector fields.
        :verbosity: 
            | 0, optional
            | Report warnings or not.
            
    Returns:
        :returns: 
            | list[dict] (each list element refers to a different test SPD)
            | with the following keys:
            |   - 'Source': dict with ndarrays of the S, cct and duv of source spd.
            |   - 'metrics': dict with ndarrays for:
            |         * Rf (color fidelity: base + metameric shift)
            |         * Rt (metameric uncertainty index) 
            |         * Rfi (specific color fidelity indices)
            |         * Rti (specific metameric uncertainty indices)
            |         * cri_type (str with cri_type)
            |   - 'Jab': dict with with ndarrays for Jabt, Jabr, DEi
            |   - 'dC/C_dH_x_sig' : 
            |           np.vstack((dCoverC_x,dCoverC_x_sig,dH_x,dH_x_sig)).T
            |           See get_poly_model() for more info.
            |   - 'fielddata': dict with dicts containing data on the calculated 
            |      vector-field and circle-fields: 
            |        * 'vectorfield' : {'axt': vfaxt, 'bxt' : vfbxt, 
            |                           'axr' : vfaxr, 'bxr' : vfbxr},
            |        * 'circlefield' : {'axt': cfaxt, 'bxt' : cfbxt, 
            |                           'axr' : cfaxr, 'bxr' : cfbxr}},
            |   - 'modeldata' : dict with model info:
            |                {'pmodel': pmodel, 
            |                'pcolorshift' : pcolorshift, 
            |                  'dab_model' : dab_model, 
            |                  'dab_res' : dab_res,
            |                  'dab_std' : dab_std,
            |                  'modeltype' : modeltype, 
            |                  'fmodel' : poly_model,
            |                  'Jabtm' : Jabtm, 
            |                  'Jabrm' : Jabrm, 
            |                  'DEim' : DEim},
            |   - 'vshifts' :dict with various vector shifts:
            |        * 'Jabshiftvector_r_to_t' : ndarray with difference vectors
            |                                    between jabt and jabr.
            |        * 'vshift_ab_s' : vshift_ab_s: ab-shift vectors of samples 
            |        * 'vshift_ab_s_vf' : vshift_ab_s_vf: ab-shift vectors of 
            |                             VF model predictions of samples.
            |        * 'vshift_ab_vf' : vshift_ab_vf: ab-shift vectors of VF 
            |                            model predictions of vector field grid.
    """
    
    if type(cri_type) == str:
        cri_type_str = cri_type
    else:
        cri_type_str = None
    
    # Calculate Rf, Rfi and Jabr, Jabt:
    Rf, Rfi, Jabt, Jabr,cct,duv,cri_type  = spd_to_cri(S, cri_type= cri_type,out='Rf,Rfi,jabt,jabr,cct,duv,cri_type', sampleset=sampleset)
    
    # In case of multiple source SPDs, pool:
    if (len(Jabr.shape) == 3) & (Jabr.shape[1]>1) & (pool == True):
        #Nsamples = Jabr.shape[0]
        Jabr = np.transpose(Jabr,(1,0,2)) # set lamps on first dimension
        Jabt = np.transpose(Jabt,(1,0,2))
        Jabr = Jabr.reshape(Jabr.shape[0]*Jabr.shape[1],3) # put all lamp data one after the other
        Jabt = Jabt.reshape(Jabt.shape[0]*Jabt.shape[1],3)
        Jabt = Jabt[:,None,:] # add dim = 1
        Jabr = Jabr[:,None,:]
    

    out = [{} for _ in range(Jabr.shape[1])] #initialize empty list of dicts
    if pool == False:
        N = Jabr.shape[1]
    else:
        N = 1
    for i in range(N):
        
        Jabr_i = Jabr[:,i,:].copy()
        Jabr_i = Jabr_i[:,None,:]
        Jabt_i = Jabt[:,i,:].copy()
        Jabt_i = Jabt_i[:,None,:]

        DEi = np.sqrt((Jabr_i[...,0] - Jabt_i[...,0])**2 + (Jabr_i[...,1] - Jabt_i[...,1])**2 + (Jabr_i[...,2] - Jabt_i[...,2])**2)

        # Determine polynomial model:
        poly_model, pmodel, dab_model, dab_res, dCHoverC_res, dab_std, dCHoverC_std = get_poly_model(Jabt_i, Jabr_i, modeltype = _VF_MODEL_TYPE)
        
        # Apply model at fixed hues:
        href = pcolorshift['href']
        Cref = pcolorshift['Cref']
        sig = pcolorshift['sig']
        dCoverC_x, dCoverC_x_sig, dH_x, dH_x_sig = apply_poly_model_at_hue_x(poly_model, pmodel, dCHoverC_res, hx = href, Cxr = Cref, sig = sig)
        
        # Calculate deshifted a,b values on original samples:
        Jt = Jabt_i[...,0].copy()
        at = Jabt_i[...,1].copy()
        bt = Jabt_i[...,2].copy()
        Jr = Jabr_i[...,0].copy()
        ar = Jabr_i[...,1].copy()
        br = Jabr_i[...,2].copy()
        ar = ar + dab_model[:,0:1] # deshift reference to model prediction
        br = br + dab_model[:,1:2] # deshift reference to model prediction
        
        Jabtm = np.hstack((Jt,at,bt))
        Jabrm = np.hstack((Jr,ar,br))
        
        # calculate color differences between test and deshifted ref:
#        DEim = np.sqrt((Jr - Jt)**2 + (at - ar)**2 + (bt - br)**2)
        DEim = np.sqrt(0*(Jr - Jt)**2 + (at - ar)**2 + (bt - br)**2) # J is not used

        # Apply scaling function to convert DEim to Rti:
        scale_factor = cri_type['scale']['cfactor']
        scale_fcn = cri_type['scale']['fcn']
        avg = cri_type['avg']  
        Rfi_deshifted = scale_fcn(DEim,scale_factor)
        Rf_deshifted = scale_fcn(avg(DEim,axis = 0),scale_factor)
        
        rms = lambda x: np.sqrt(np.sum(x**2,axis=0)/x.shape[0])
        Rf_deshifted_rms = scale_fcn(rms(DEim),scale_factor)
    
        # Generate vector field:
        vfaxt,vfbxt,vfaxr,vfbxr = generate_vector_field(poly_model, pmodel,axr = np.arange(-_VF_MAXR,_VF_MAXR+_VF_DELTAR,_VF_DELTAR), bxr = np.arange(-_VF_MAXR,_VF_MAXR+_VF_DELTAR,_VF_DELTAR), limit_grid_radius = _VF_MAXR,color = 0)
        vfaxt,vfbxt,vfaxr,vfbxr = generate_vector_field(poly_model, pmodel,axr = np.arange(-_VF_MAXR,_VF_MAXR+_VF_DELTAR,_VF_DELTAR), bxr = np.arange(-_VF_MAXR,_VF_MAXR+_VF_DELTAR,_VF_DELTAR), limit_grid_radius = _VF_MAXR,color = 0)

        # Calculate ab-shift vectors of samples and VF model predictions:
        vshift_ab_s = calculate_shiftvectors(Jabt_i, Jabr_i, average = False, vtype = 'ab')[:,0,0:3]
        vshift_ab_s_vf = calculate_shiftvectors(Jabtm,Jabrm, average = False, vtype = 'ab')

        # Calculate ab-shift vectors using vector field model:
        Jabt_vf = np.hstack((np.zeros((vfaxt.shape[0],1)), vfaxt, vfbxt))   
        Jabr_vf = np.hstack((np.zeros((vfaxr.shape[0],1)), vfaxr, vfbxr))   
        vshift_ab_vf = calculate_shiftvectors(Jabt_vf,Jabr_vf, average = False, vtype = 'ab')

        # Generate circle field:
        x,y = plotcircle(radii = np.arange(0,_VF_MAXR+_VF_DELTAR,10), angles = np.arange(0,359,1), out = 'x,y')
        cfaxt,cfbxt,cfaxr,cfbxr = generate_vector_field(poly_model, pmodel,make_grid = False,axr = x[:,None], bxr = y[:,None], limit_grid_radius = _VF_MAXR,color = 0)

        out[i] = {'Source' : {'S' : S, 'cct' : cct[i] , 'duv': duv[i]},
               'metrics' : {'Rf':Rf[:,i], 'Rt': Rf_deshifted, 'Rt_rms' : Rf_deshifted_rms, 'Rfi':Rfi[:,i], 'Rti': Rfi_deshifted, 'cri_type' : cri_type_str},
               'Jab' : {'Jabt' : Jabt_i, 'Jabr' : Jabr_i, 'DEi' : DEi},
               'dC/C_dH_x_sig' : np.vstack((dCoverC_x,dCoverC_x_sig,dH_x,dH_x_sig)).T,
               'fielddata': {'vectorfield' : {'axt': vfaxt, 'bxt' : vfbxt, 'axr' : vfaxr, 'bxr' : vfbxr},
                             'circlefield' : {'axt': cfaxt, 'bxt' : cfbxt, 'axr' : cfaxr, 'bxr' : cfbxr}},
               'modeldata' : {'pmodel': pmodel, 'pcolorshift' : pcolorshift, 
                              'dab_model' : dab_model, 'dab_res' : dab_res,'dab_std' : dab_std,
                              'model_type' : model_type, 'fmodel' : poly_model,
                              'Jabtm' : Jabtm, 'Jabrm' : Jabrm, 'DEim' : DEim},
               'vshifts' : {'Jabshiftvector_r_to_t' : np.hstack((Jt-Jr,at-ar,bt-br)),
                            'vshift_ab_s' : vshift_ab_s,
                            'vshift_ab_s_vf' : vshift_ab_s_vf,
                            'vshift_ab_vf' : vshift_ab_vf}}
     
    return out
def Ydlep_to_xyz(Ydlep,
                 cieobs=_CIEOBS,
                 xyzw=_COLORTF_DEFAULT_WHITE_POINT,
                 flip_axes=False,
                 **kwargs):
    """
    Convert Y, dominant (complementary) wavelength and excitation purity to XYZ
    tristimulus values.

    Args:
        :Ydlep: 
            | ndarray with Y, dominant (complementary) wavelength
              and excitation purity
        :xyzw: 
            | None or narray with tristimulus values of a single (!) native white point, optional
            | None defaults to xyz of CIE D65 using the :cieobs: observer.
        :cieobs:
            | luxpy._CIEOBS, optional
            | CMF set to use when calculating spectrum locus coordinates.
        :flip_axes:
            | False, optional
            | If True: flip axis 0 and axis 1 in Ydelep to increase speed of loop in function.
            |          (single xyzw with is not flipped!)
    Returns:
        :xyz: 
            | ndarray with tristimulus values
    """

    Ydlep3 = np3d(Ydlep).copy().astype(np.float)

    # flip axis so that longest dim is on first axis  (save time in looping):
    if (Ydlep3.shape[0] < Ydlep3.shape[1]) & (flip_axes == True):
        axes12flipped = True
        Ydlep3 = Ydlep3.transpose((1, 0, 2))
    else:
        axes12flipped = False

    # convert xyzw to Yxyw:
    Yxyw = xyz_to_Yxy(xyzw)
    Yxywo = Yxyw.copy()

    # get spectrum locus Y,x,y and wavelengths:
    SL = _CMF[cieobs]['bar']
    wlsl = SL[0, None].T
    Yxysl = xyz_to_Yxy(SL[1:4].T)[:, None]

    # center on xyzw:
    Yxysl = Yxysl - Yxyw
    Yxyw = Yxyw - Yxyw

    #split:
    Y, dom, pur = asplit(Ydlep3)
    Yw, xw, yw = asplit(Yxyw)
    Ywo, xwo, ywo = asplit(Yxywo)
    Ysl, xsl, ysl = asplit(Yxysl)

    # loop over longest dim:
    x = np.empty(Y.shape)
    y = np.empty(Y.shape)
    for i in range(Ydlep3.shape[1]):

        # find closest wl's to dom:
        #wlslb,wlib = meshblock(wlsl,np.abs(dom[i,:])) #abs because dom<0--> complemtary wl
        wlib, wlslb = np.meshgrid(np.abs(dom[:, i]), wlsl)

        dwl = np.abs(wlslb - wlib)
        q1 = dwl.argmin(axis=0)  # index of closest wl
        dwl[q1] = 10000.0
        q2 = dwl.argmin(axis=0)  # index of second closest wl

        # calculate x,y of dom:
        x_dom_wl = xsl[q1, 0] + (xsl[q2, 0] - xsl[q1, 0]) * (
            np.abs(dom[:, i]) - wlsl[q1, 0]) / (wlsl[q2, 0] - wlsl[q1, 0]
                                                )  # calculate x of dom. wl
        y_dom_wl = ysl[q1, 0] + (ysl[q2, 0] - ysl[q1, 0]) * (
            np.abs(dom[:, i]) - wlsl[q1, 0]) / (wlsl[q2, 0] - wlsl[q1, 0]
                                                )  # calculate y of dom. wl

        # calculate x,y of test:
        d_wl = (x_dom_wl**2.0 +
                y_dom_wl**2.0)**0.5  # distance from white point to dom
        d = pur[:, i] * d_wl
        hdom = math.positive_arctan(x_dom_wl, y_dom_wl, htype='deg')
        x[:, i] = d * np.cos(hdom * np.pi / 180.0)
        y[:, i] = d * np.sin(hdom * np.pi / 180.0)

        # complementary:
        pc = np.where(dom[:, i] < 0.0)
        hdom[pc] = hdom[pc] - np.sign(dom[:, i][pc] -
                                      180.0) * 180.0  # get positive hue angle

        # calculate intersection of line through white point and test point and purple line:
        xy = np.vstack((x_dom_wl, y_dom_wl)).T
        xyw = np.vstack((xw, yw)).T
        xypl1 = np.vstack((xsl[0, None], ysl[0, None])).T
        xypl2 = np.vstack((xsl[-1, None], ysl[-1, None])).T
        da = (xy - xyw)
        db = (xypl2 - xypl1)
        dp = (xyw - xypl1)
        T = np.array([[0.0, -1.0], [1.0, 0.0]])
        dap = np.dot(da, T)
        denom = np.sum(dap * db, axis=1, keepdims=True)
        num = np.sum(dap * dp, axis=1, keepdims=True)
        xy_linecross = (num / denom) * db + xypl1
        d_linecross = np.atleast_2d(
            (xy_linecross[:, 0]**2.0 + xy_linecross[:, 1]**2.0)**0.5).T[:, 0]
        x[:, i][pc] = pur[:, i][pc] * d_linecross[pc] * np.cos(
            hdom[pc] * np.pi / 180)
        y[:, i][pc] = pur[:, i][pc] * d_linecross[pc] * np.sin(
            hdom[pc] * np.pi / 180)
    Yxy = np.dstack((Ydlep3[:, :, 0], x + xwo, y + ywo))
    if axes12flipped == True:
        Yxy = Yxy.transpose((1, 0, 2))
    else:
        Yxy = Yxy.transpose((0, 1, 2))
    return Yxy_to_xyz(Yxy).reshape(Ydlep.shape)
def xyz_to_Ydlep(xyz,
                 cieobs=_CIEOBS,
                 xyzw=_COLORTF_DEFAULT_WHITE_POINT,
                 flip_axes=False,
                 **kwargs):
    """
    Convert XYZ tristimulus values to Y, dominant (complementary) wavelength
    and excitation purity.

    Args:
        :xyz:
            | ndarray with tristimulus values
        :xyzw:
            | None or ndarray with tristimulus values of a single (!) native white point, optional
            | None defaults to xyz of CIE D65 using the :cieobs: observer.
        :cieobs:
            | luxpy._CIEOBS, optional
            | CMF set to use when calculating spectrum locus coordinates.
        :flip_axes:
            | False, optional
            | If True: flip axis 0 and axis 1 in Ydelep to increase speed of loop in function.
            |          (single xyzw with is not flipped!)
    Returns:
        :Ydlep: 
            | ndarray with Y, dominant (complementary) wavelength
              and excitation purity
    """

    xyz3 = np3d(xyz).copy().astype(np.float)

    # flip axis so that shortest dim is on axis0 (save time in looping):
    if (xyz3.shape[0] < xyz3.shape[1]) & (flip_axes == True):
        axes12flipped = True
        xyz3 = xyz3.transpose((1, 0, 2))
    else:
        axes12flipped = False

    # convert xyz to Yxy:
    Yxy = xyz_to_Yxy(xyz3)
    Yxyw = xyz_to_Yxy(xyzw)

    # get spectrum locus Y,x,y and wavelengths:
    SL = _CMF[cieobs]['bar']

    wlsl = SL[0]
    Yxysl = xyz_to_Yxy(SL[1:4].T)[:, None]

    # center on xyzw:
    Yxy = Yxy - Yxyw
    Yxysl = Yxysl - Yxyw
    Yxyw = Yxyw - Yxyw

    #split:
    Y, x, y = asplit(Yxy)
    Yw, xw, yw = asplit(Yxyw)
    Ysl, xsl, ysl = asplit(Yxysl)

    # calculate hue:
    h = math.positive_arctan(x, y, htype='deg')

    hsl = math.positive_arctan(xsl, ysl, htype='deg')

    hsl_max = hsl[0]  # max hue angle at min wavelength
    hsl_min = hsl[-1]  # min hue angle at max wavelength

    dominantwavelength = np.empty(Y.shape)
    purity = np.empty(Y.shape)
    for i in range(xyz3.shape[1]):

        # find index of complementary wavelengths/hues:
        pc = np.where(
            (h[:, i] >= hsl_max) & (h[:, i] <= hsl_min + 360.0)
        )  # hue's requiring complementary wavelength (purple line)
        h[:, i][pc] = h[:, i][pc] - np.sign(
            h[:, i][pc] - 180.0
        ) * 180.0  # add/subtract 180° to get positive complementary wavelength

        # find 2 closest hues in sl:
        #hslb,hib = meshblock(hsl,h[:,i:i+1])
        hib, hslb = np.meshgrid(h[:, i:i + 1], hsl)
        dh = np.abs(hslb - hib)
        q1 = dh.argmin(axis=0)  # index of closest hue
        dh[q1] = 1000.0
        q2 = dh.argmin(axis=0)  # index of second closest hue

        dominantwavelength[:, i] = wlsl[q1] + np.divide(
            np.multiply((wlsl[q2] - wlsl[q1]),
                        (h[:, i] - hsl[q1, 0])), (hsl[q2, 0] - hsl[q1, 0])
        )  # calculate wl corresponding to h: y = y1 + (y2-y1)*(x-x1)/(x2-x1)
        dominantwavelength[:, i][pc] = -dominantwavelength[:, i][
            pc]  #complementary wavelengths are specified by '-' sign

        # calculate excitation purity:
        x_dom_wl = xsl[q1, 0] + (xsl[q2, 0] - xsl[q1, 0]) * (h[:, i] - hsl[
            q1, 0]) / (hsl[q2, 0] - hsl[q1, 0])  # calculate x of dom. wl
        y_dom_wl = ysl[q1, 0] + (ysl[q2, 0] - ysl[q1, 0]) * (h[:, i] - hsl[
            q1, 0]) / (hsl[q2, 0] - hsl[q1, 0])  # calculate y of dom. wl
        d_wl = (x_dom_wl**2.0 +
                y_dom_wl**2.0)**0.5  # distance from white point to sl
        d = (x[:, i]**2.0 +
             y[:, i]**2.0)**0.5  # distance from white point to test point
        purity[:, i] = d / d_wl

        # correct for those test points that have a complementary wavelength
        # calculate intersection of line through white point and test point and purple line:
        xy = np.vstack((x[:, i], y[:, i])).T
        xyw = np.hstack((xw, yw))
        xypl1 = np.hstack((xsl[0, None], ysl[0, None]))
        xypl2 = np.hstack((xsl[-1, None], ysl[-1, None]))
        da = (xy - xyw)
        db = (xypl2 - xypl1)
        dp = (xyw - xypl1)
        T = np.array([[0.0, -1.0], [1.0, 0.0]])
        dap = np.dot(da, T)
        denom = np.sum(dap * db, axis=1, keepdims=True)
        num = np.sum(dap * dp, axis=1, keepdims=True)
        xy_linecross = (num / denom) * db + xypl1
        d_linecross = np.atleast_2d(
            (xy_linecross[:, 0]**2.0 + xy_linecross[:, 1]**2.0)**0.5).T  #[0]
        purity[:, i][pc] = d[pc] / d_linecross[pc][:, 0]
    Ydlep = np.dstack((xyz3[:, :, 1], dominantwavelength, purity))

    if axes12flipped == True:
        Ydlep = Ydlep.transpose((1, 0, 2))
    else:
        Ydlep = Ydlep.transpose((0, 1, 2))
    return Ydlep.reshape(xyz.shape)
Exemple #8
0
def xyz_to_rfl(xyz, rfl = None, out = 'rfl_est', \
                 refspd = None, D = None, cieobs = _CIEOBS, \
                 cspace = 'ipt', cspace_tf = {},\
                 k_neighbours = 4, verbosity = 0):
    """
    Approximate spectral reflectance of xyz based on k nearest neighbour 
    interpolation of samples from a standard reflectance set.
    
    Args:
        :xyz: 
            | ndarray with tristimulus values of target points.
        :rfl: 
            | ndarray, optional
            | Reflectance set for color coordinate to rfl mapping.
        :out: 
            | 'rfl_est' or str, optional
        :refspd: 
            | None, optional
            | Refer ence spectrum for color coordinate to rfl mapping.
            | None defaults to D65.
        :cieobs:
            | _CIEOBS, optional
            | CMF set used for calculation of xyz from spectral data.
        :cspace:
            | 'ipt',  optional
            | Color space for color coordinate to rfl mapping.
        :cspace_tf:
            | {}, optional
            | Dict with parameters for xyz_to_... and ..._to_xyz transform.
        :k_neighbours:
            | 4 or int, optional
            | Number of nearest neighbours for reflectance spectrum interpolation.
            | Neighbours are found using scipy.cKDTree
        :verbosity:
            | 0, optional
            | If > 0: make a plot of the color coordinates of original and 
              rendered image pixels.

    Returns:
        :returns: 
            | :rfl_est:
            | ndarrays with estimated reflectance spectra.
    """

    # get rfl set:
    if rfl is None:  # use IESTM30['4880'] set
        rfl = _CRI_RFL['ies-tm30']['4880']['5nm']

    # get Ref spd:
    if refspd is None:
        refspd = _CIE_ILLUMINANTS['D65'].copy()

    # Calculate lab-type coordinates of standard rfl set under refspd:
    xyz_rr, xyz_wr = spd_to_xyz(refspd,
                                relative=True,
                                rfl=rfl,
                                cieobs=cieobs,
                                out=2)
    cspace_tf_copy = cspace_tf.copy()
    cspace_tf_copy['xyzw'] = xyz_wr  # put correct white point in param. dict
    lab_rr = colortf(xyz_rr,
                     tf=cspace,
                     fwtf=cspace_tf_copy,
                     bwtf=cspace_tf_copy)[:, 0, :]

    # Convert xyz to lab-type values under refspd:
    lab = colortf(xyz, tf=cspace, fwtf=cspace_tf_copy, bwtf=cspace_tf_copy)

    # Find rfl (cfr. lab_rr) from rfl set that results in 'near' metameric
    # color coordinates for each value in lab_ur (i.e. smallest DE):
    # Construct cKDTree:
    tree = cKDTree(lab_rr, copy_data=True)

    # Interpolate rfls using k nearest neightbours and inverse distance weigthing:
    d, inds = tree.query(lab, k=k_neighbours)
    if k_neighbours > 1:
        w = (1.0 / d**2)[:, :, None]  # inverse distance weigthing
        rfl_est = np.sum(w * rfl[inds + 1, :], axis=1) / np.sum(w, axis=1)
    else:
        rfl_est = rfl[inds + 1, :].copy()
    rfl_est = np.vstack((rfl[0], rfl_est))

    if (verbosity > 0) | ('xyz_est' in out.split(',')) | (
            'lab_est' in out.split(',')) | ('DEi_ab' in out.split(',')) | (
                'DEa_ab' in out.split(',')):
        xyz_est, _ = spd_to_xyz(refspd,
                                rfl=rfl_est,
                                relative=True,
                                cieobs=cieobs,
                                out=2)
        cspace_tf_copy = cspace_tf.copy()
        cspace_tf_copy[
            'xyzw'] = xyz_wr  # put correct white point in param. dict
        lab_est = colortf(xyz_est, tf=cspace, fwtf=cspace_tf_copy)[:, 0, :]
        DEi_ab = np.sqrt(((lab_est[:, 1:3] - lab[:, 1:3])**2).sum(axis=1))
        DEa_ab = DEi_ab.mean()

    if verbosity > 0:
        ax = plot_color_data(lab[...,1], lab[...,2], z = lab[...,0], \
                        show = False, cieobs = cieobs, cspace = cspace, \
                        formatstr = 'ro', label = 'Original')
        plot_color_data(lab_est[...,1], lab_est[...,2], z = lab_est[...,0], \
                        show = True, axh = ax, cieobs = cieobs, cspace = cspace, \
                        formatstr = 'bd', label = 'Rendered')

    if out == 'rfl_est':
        return rfl_est
    elif out == 'rfl_est,xyz_est':
        return rfl_est, xyz_est
    else:
        return eval(out)
Exemple #9
0
def get_macadam_ellipse(xy=None, k_neighbours=3, nsteps=10, average_cik=True):
    """
    Estimate n-step MacAdam ellipse at CIE x,y coordinates xy by calculating 
    average inverse covariance ellipse of the k_neighbours closest ellipses.
    
    Args:
        :xy:
            | None or ndarray, optional
            | If None: output Macadam ellipses, if not None: xy are the 
            | CIE xy coordinates for which ellipses will be estimated.
            
        :k_neighbours:
            | 3, optional
            | Number of nearest ellipses to use to calculate ellipse at xy
        :nsteps:
            | 10, optional
            | Set number of MacAdam steps of ellipse.
        :average_cik:
            | True, optional
            | If True: take distance weighted average of inverse 
            |   'covariance ellipse' elements cik. 
            | If False: average major & minor axis lengths and 
            |   ellipse orientation angles directly.
            
    Returns:
        :v_mac_est:
            | estimated MacAdam ellipse(s) in v-format [Rmax,Rmin,xc,yc,theta]
            
    """
    # list of MacAdam ellipses (x10)
    v_mac = np.atleast_2d([[0.16, 0.057, 0.0085, 0.0035, 62.5],
                           [0.187, 0.118, 0.022, 0.0055, 77],
                           [0.253, 0.125, 0.025, 0.005, 55.5],
                           [0.15, 0.68, 0.096, 0.023, 105],
                           [0.131, 0.521, 0.047, 0.02, 112.5],
                           [0.212, 0.55, 0.058, 0.023, 100],
                           [0.258, 0.45, 0.05, 0.02, 92],
                           [0.152, 0.365, 0.038, 0.019, 110],
                           [0.28, 0.385, 0.04, 0.015, 75.5],
                           [0.38, 0.498, 0.044, 0.012, 70],
                           [0.16, 0.2, 0.021, 0.0095, 104],
                           [0.228, 0.25, 0.031, 0.009, 72],
                           [0.305, 0.323, 0.023, 0.009, 58],
                           [0.385, 0.393, 0.038, 0.016, 65.5],
                           [0.472, 0.399, 0.032, 0.014, 51],
                           [0.527, 0.35, 0.026, 0.013, 20],
                           [0.475, 0.3, 0.029, 0.011, 28.5],
                           [0.51, 0.236, 0.024, 0.012, 29.5],
                           [0.596, 0.283, 0.026, 0.013, 13],
                           [0.344, 0.284, 0.023, 0.009, 60],
                           [0.39, 0.237, 0.025, 0.01, 47],
                           [0.441, 0.198, 0.028, 0.0095, 34.5],
                           [0.278, 0.223, 0.024, 0.0055, 57.5],
                           [0.3, 0.163, 0.029, 0.006, 54],
                           [0.365, 0.153, 0.036, 0.0095, 40]])

    # convert to v-format ([a,b, xc, yc, theta]):
    v_mac = v_mac[:, [2, 3, 0, 1, 4]]

    # convert last column to rad.:
    v_mac[:, -1] = v_mac[:, -1] * np.pi / 180

    # convert to desired number of MacAdam-steps:
    v_mac[:, 0:2] = v_mac[:, 0:2] / 10 * nsteps

    if xy is not None:
        #calculate inverse covariance matrices:
        cik = math.v_to_cik(v_mac, inverse=True)
        if average_cik == True:
            cik_long = np.hstack((cik[:, 0, :], cik[:, 1, :]))

        # Calculate k_neighbours closest ellipses to xy:
        tree = cKDTree(v_mac[:, 2:4], copy_data=True)
        d, inds = tree.query(xy, k=k_neighbours)

        if k_neighbours > 1:
            pd = 1
            w = (1.0 / np.abs(d)**pd)[:, :, None]  # inverse distance weigthing
            if average_cik == True:
                cik_long_est = np.sum(w * cik_long[inds, :], axis=1) / np.sum(
                    w, axis=1)
            else:
                v_mac_est = np.sum(w * v_mac[inds, :], axis=1) / np.sum(
                    w, axis=1)  # for average xyc

        else:
            v_mac_est = v_mac[inds, :].copy()

        # convert cik back to v:
        if (average_cik == True) & (k_neighbours > 1):
            cik_est = np.dstack((cik_long_est[:, 0:2], cik_long_est[:, 2:4]))
            v_mac_est = math.cik_to_v(cik_est, inverse=True)
        v_mac_est[:, 2:4] = xy
    else:
        v_mac_est = v_mac

    return v_mac_est
Exemple #10
0
hr_idx = np.ones_like((hr)) * np.nan
n = hr_idx.shape[-1]

for j in range(nhbins):
    cndt_hj = (ht >= hue_bin_edges[j]) & (ht < hue_bin_edges[j + 1])
    cndr_hj = (hr >= hue_bin_edges[j]) & (hr < hue_bin_edges[j + 1])

    ht_idx[cndt_hj] = j  # store hue bin indices for all samples
    hr_idx[cndr_hj] = j
    #wt = np.sum(cndt_hj,axis=0,keepdims=True).astype(np.float)
    wr = np.nansum(cndr_hj, axis=0, keepdims=True).astype(np.float)

    #wt[wt==0] = np.nan
    wr[wr == 0] = np.nan

    jabt_hj[j, ...] = np.sum(
        (jabt * cndr_hj[..., None]), axis=0) / wr.T  # must use ref. bins !!!
    jabr_hj[j, ...] = np.sum((jabr * cndr_hj[..., None]), axis=0) / wr.T
    DE_hj[j, ...] = np.nansum(
        (DEi * cndr_hj[..., None]) / wr.T,
        axis=0).T  # local color difference is average of DEi per hue bin !!

# calculate normalized hue-bin averages for jabt, jabr:
ht_hj = cam.hue_angle(jabt_hj[..., 1], jabt_hj[..., 2], htype='rad')
hr_hj = cam.hue_angle(jabr_hj[..., 1], jabr_hj[..., 2], htype='rad')
Ct_hj = ((jabt_hj[..., 1]**2 + jabt_hj[..., 2]**2))**0.5
Cr_hj = ((jabr_hj[..., 1]**2 + jabr_hj[..., 2]**2))**0.5
Ctn_hj = normalized_chroma_ref * Ct_hj / (
    Cr_hj + 1e-308)  # calculate normalized chroma for samples under test
Ctn_hj[Cr_hj == 0.0] = np.inf
jabtn_hj = jabt_hj.copy()
jabrn_hj = jabr_hj.copy()
Exemple #11
0
def spd_to_CS_CLa_lrc(El = None, E = None, \
                          sum_sources = False, interpolate_sources = True):
    """
    Calculate Circadian Stimulus (CS) and Circadian Light [LRC: Rea et al 2012].
    
    
    Args:
        :El:
            | ndarray, optional
            | Defaults to D65
            | light source spectral irradiance distribution
        :E: 
            | None, float or ndarray, optional
            | Illuminance of light sources.
            | If None: El is used as is, otherwise El is renormalized to have
              an illuminance equal to E.
        :sum_sources:
            | False, optional
            |   - False: calculate CS and CLa for all sources in El array.
            |   - True: sum sources in El to a single source and perform calc.
        :interpolate_sources:
            | True, optional
            |  - True: El is interpolated to wavelength range of efficiency 
            |          functions (as in LRC calculator). 
            |  - False: interpolate efficiency functions to source range. 
            |           Source interpolation is not recommended due to possible
            |           errors for peaky spectra. 
            |           (see CIE15-2004, "Colorimetry").
            
    Returns:
        :CS:
            | ndarray with Circadian stimulus values
        :CLa:
            | ndarray with Circadian Light values
            
    Notes:
        1. The original 2012 (E.q. 1) had set the peak wavelength of the 
        melanopsin at 480 nm. Rea et al. later published a corrigendum with 
        updated model parameters for k, a_{b-y} and a_rod. The comparison table
        between showing values calculated for a number of sources with the old
        and updated parameters were very close (~1 unit voor CLa). 
        
        2. In that corrrection paper they did not mention a change in the
        factor (1622) that multiplies the (sum of) the integral(s) in Eq. 1. 
        HOWEVER, the excel calculator released in 2017 and the online 
        calculator show that factor to have a value of 1547.9. The change in
        values due to the new factor is much larger than their the updated 
        mentioned in note 1!
        
        3. For reasons of consistency the calculator uses the latest model 
        parameters, as could be read from the excel calculator. They values 
        adopted are: multiplier 1547.9, k = 0.2616, a_{b-y} = 0.7 and 
        a_rod = 3.3. 
        
        4. The parameter values to convert CLa to CS were also taken from the 
        2017 excel calculator.
        
    References:
        
        1. `LRC Online Circadian stimulus calculator <http://www.lrc.rpi.edu/cscalculator/>`_
        
        2. `LRC Excel based Circadian stimulus calculator. <http://www.lrc.rpi.edu/resources/CSCalculator_2017_10_03_Mac.xlsm>`_
        
        3. `Rea MS, Figueiro MG, Bierman A, and Hamner R (2012). 
        Modelling the spectral sensitivity of the human circadian system. 
        Light. Res. Technol. 44, 386–396.  
        <http://journals.sagepub.com/doi/full/10.1177/1477153512467607>`_
            
        4. `Rea MS, Figueiro MG, Bierman A, and Hamner R (2012). 
        Erratum: Modeling the spectral sensitivity of the human circadian system 
        (Lighting Research and Technology (2012) 44:4 (386-396) 
        DOI: 10.1177/1477153511430474)). 
        Light. Res. Technol. 44, 516.
        <http://journals.sagepub.com/doi/10.1177/1477153512467607>`_
    """
    # Create copy of dict with model parameters and spectral data:
    cs_cl_lrs = _LRC_CLA_CS_CONST['CLa'].copy()
    
 
    # Interpolate efficiency functions to light source wl-range:
    if interpolate_sources is False:
        cs_cl_lrs = interpolate_efficiency_functions(El[0], cs_cl_lrs)
    else:
        El = cie_interp(El, cs_cl_lrs['WL'], kind = 'spd')
    
    # Get wavelength spacing:
    dl = getwld(El[0])          
    
    # Separate wavelengths and data:
    wl = El[0]
    Elv = El[1:].copy()
      
    # define integral function:
    integral = lambda x: sp.integrate.trapz(x, x = wl, axis = -1) 
    integral = lambda x: np.sum(x,  axis = -1) 
    
    # Rescale El to E (if not None):
    if E is not None:

        # Calculate current E value of El:
        E_cv = np.atleast_2d(683 * integral(cs_cl_lrs['Vphotl']*Elv*dl))

        # Rescale El to supplied E:
        Elv = (E/E_cv).T*Elv
            
                
    # Sum all sources in array if requested:
    if sum_sources == True:
        Elv = Elv.sum(axis = 0, keepdims = True)  
    
    # Calculate Circadian light using model param. and spectral data:
    CLa = fCLa(wl, Elv, integral,  **cs_cl_lrs)
    
    # Calculate Circadian stimulus:
    CS = 0.7 * (1 - (1/(1 + (CLa/355.7)**1.1026)))
    
    return CS, CLa