Esempio n. 1
0
def line_intersect(a1, a2, b1, b2):
    """
    Line intersections of series of two line segments a and b. 
        
    Args:
        :a1: 
            | ndarray (.shape  = (N,2)) specifying end-point 1 of line a
        :a2: 
            | ndarray (.shape  = (N,2)) specifying end-point 2 of line a
        :b1: 
            | ndarray (.shape  = (N,2)) specifying end-point 1 of line b
        :b2: 
            | ndarray (.shape  = (N,2)) specifying end-point 2 of line b
    
    Note: 
        N is the number of line segments a and b.
    
    Returns:
        :returns: 
            | ndarray with line-intersections (.shape = (N,2))
    
    References:
        1. https://stackoverflow.com/questions/3252194/numpy-and-line-intersections
    """
    T = np.array([[0.0, -1.0], [1.0, 0.0]])
    da = np.atleast_2d(a2 - a1)
    db = np.atleast_2d(b2 - b1)
    dp = np.atleast_2d(a1 - b1)
    dap = np.dot(da, T)
    denom = np.sum(dap * db, axis=1)
    num = np.sum(dap * dp, axis=1)
    return np.atleast_2d(num / denom).T * db + b1
Esempio n. 2
0
def _cri_ref_i(cct,
               wl3=_WL,
               ref_type='iestm30',
               mix_range=[4000, 5000],
               cieobs='1931_2',
               force_daylight_below4000K=False,
               n=None,
               daylight_locus=None):
    """
    Calculates a reference illuminant spectrum based on cct 
    for color rendering index calculations.
    """
    if mix_range is None:
        mix_range = _CRI_REF_TYPES[ref_type]
    if (cct < mix_range[0]) | (ref_type == 'BB'):
        return blackbody(cct, wl3, n=n)
    elif (cct > mix_range[0]) | (ref_type == 'DL'):
        return daylightphase(
            cct,
            wl3,
            force_daylight_below4000K=force_daylight_below4000K,
            cieobs=cieobs,
            daylight_locus=daylight_locus)
    else:
        SrBB = blackbody(cct, wl3, n=n)
        SrDL = daylightphase(
            cct,
            wl3,
            verbosity=None,
            force_daylight_below4000K=force_daylight_below4000K,
            cieobs=cieobs,
            daylight_locus=daylight_locus)
        cmf = _CMF[cieobs]['bar'] if isinstance(cieobs, str) else cieobs
        wl = SrBB[0]
        ld = getwld(wl)

        SrBB = 100.0 * SrBB[1] / np.array(np.sum(SrBB[1] * cmf[2] * ld))
        SrDL = 100.0 * SrDL[1] / np.array(np.sum(SrDL[1] * cmf[2] * ld))
        Tb, Te = float(mix_range[0]), float(mix_range[1])
        cBB, cDL = (Te - cct) / (Te - Tb), (cct - Tb) / (Te - Tb)
        if cBB < 0.0:
            cBB = 0.0
        elif cBB > 1:
            cBB = 1.0
        if cDL < 0.0:
            cDL = 0.0
        elif cDL > 1:
            cDL = 1.0

        Sr = SrBB * cBB + SrDL * cDL
        Sr[Sr == float('NaN')] = 0.0
        Sr = np.vstack((wl, (Sr / Sr[_POS_WL560])))

        return Sr
Esempio n. 3
0
def spd_to_ler(data, cieobs=_CIEOBS, K=None):
    """
    Calculates Luminous efficacy of radiation (LER) from spectral data.
       
    Args: 
        :data: 
            | ndarray or pandas.dataframe with spectral data
            | (.shape = (number of spectra + 1, number of wavelengths))
            | Note that :data: is never interpolated, only CMFs and RFLs. 
            | This way interpolation errors due to peaky spectra are avoided. 
            | Conform CIE15-2018.
        :cieobs: 
            | luxpy._CIEOBS, optional
            | Determines the color matching function set used in the 
            | calculation of LER. For cieobs = '1931_2' the ybar CMF curve equals
            | the CIE 1924 Vlambda curve.
        :K: 
            | None, optional
            |   e.g.  K  = 683 lm/W for '1931_2'
      
    Returns:
        :ler: 
            | ndarray of LER values. 
             
    References:
        1. `CIE15:2018, “Colorimetry,” CIE, Vienna, Austria, 2018. <https://doi.org/10.25039/TR.015.2018>`_
    """

    if isinstance(cieobs, str):
        if K == None: K = _CMF[cieobs]['K']
        Vl = vlbar(cieobs=cieobs, scr='dict', wl_new=data[0],
                   kind='np')[1:2]  #also interpolate to wl of data
    else:
        Vl = spd(wl=data[0], data=cieobs, interpolation='cmf', kind='np')[1:2]
        if K is None:
            raise Exception(
                "spd_to_ler: User defined Vlambda, but no K scaling factor has been supplied."
            )
    dl = getwld(data[0])
    return ((K * np.dot(
        (Vl * dl), data[1:].T)) / np.sum(data[1:] * dl, axis=data.ndim - 1)).T
Esempio n. 4
0
def angle_v1v2(v1,v2,htype = 'deg'):
    """
    Calculates angle between two vectors.
    
    Args:
        :v1: 
            | ndarray with vector 1
        :v2: 
            | ndarray with vector 2
        :htype:
            | 'deg' or 'rad', optional
            | Requested angle type.
    
    Returns:
        :ang: 
            | ndarray 
    """
    denom = magnitude_v(v1)*magnitude_v(v2)
    denom[denom==0.] = np.nan
    ang = np.arccos(np.sum(v1*v2,axis=1)/denom)
    if htype == 'deg':
        ang = ang*180/np.pi
    return ang
Esempio n. 5
0
def xyz_to_rfl(xyz, CSF = None, rfl = None, out = 'rfl_est', \
                 refspd = None, D = None, cieobs = _CIEOBS, \
                 cspace = 'xyz', cspace_tf = {},\
                 interp_type = 'nd', k_neighbours = 4, verbosity = 0):
    """
    Approximate spectral reflectance of xyz values based on nd-dimensional linear interpolation 
    or k nearest neighbour interpolation of samples from a standard reflectance set.
    
    Args:
        :xyz: 
            | ndarray with xyz values of target points.
        :CSF:
            | None, optional
            | RGB camera response functions.
            | If None: input :xyz: contains raw rgb (float) values. Override :cspace:
            | argument and perform estimation directly in raw rgb space!!!
        :rfl: 
            | ndarray, optional
            | Reflectance set for color coordinate to rfl mapping.
        :out: 
            | 'rfl_est' or str, optional
        :refspd: 
            | None, optional
            | Refer ence spectrum for color coordinate to rfl mapping.
            | None defaults to D65.
        :cieobs:
            | _CIEOBS, optional
            | CMF set used for calculation of xyz from spectral data.
        :cspace:
            | 'xyz',  optional
            | Color space for color coordinate to rfl mapping.
            | Tip: Use linear space (e.g. 'xyz', 'Yuv',...) for (interp_type == 'nd'),
            |      and perceptually uniform space (e.g. 'ipt') for (interp_type == 'nearest')
        :cspace_tf:
            | {}, optional
            | Dict with parameters for xyz_to_cspace and cspace_to_xyz transform.
        :interp_type:
            | 'nd', optional
            | Options:
            | - 'nd': perform n-dimensional linear interpolation using Delaunay triangulation.
            | - 'nearest': perform nearest neighbour interpolation. 
        :k_neighbours:
            | 4 or int, optional
            | Number of nearest neighbours for reflectance spectrum interpolation.
            | Neighbours are found using scipy.spatial.cKDTree
        :verbosity:
            | 0, optional
            | If > 0: make a plot of the color coordinates of original and 
            | rendered image pixels.

    Returns:
        :returns: 
            | :rfl_est:
            | ndarrays with estimated reflectance spectra.
    """

    # get rfl set:
    if rfl is None:  # use IESTM30['4880'] set
        rfl = _CRI_RFL['ies-tm30']['4880']['5nm']

    wlr = rfl[0]

    # get Ref spd:
    if refspd is None:
        refspd = _CIE_ILLUMINANTS['D65'].copy()
    refspd = cie_interp(
        refspd, wlr,
        kind='linear')  # force spd to same wavelength range as rfl

    # Calculate rgb values of standard rfl set under refspd:
    if CSF is None:
        # Calculate lab coordinates:
        xyz_rr, xyz_wr = spd_to_xyz(refspd,
                                    relative=True,
                                    rfl=rfl,
                                    cieobs=cieobs,
                                    out=2)
        cspace_tf_copy = cspace_tf.copy()
        cspace_tf_copy[
            'xyzw'] = xyz_wr  # put correct white point in param. dict
        lab_rr = colortf(xyz_rr,
                         tf=cspace,
                         fwtf=cspace_tf_copy,
                         bwtf=cspace_tf_copy)[:, 0, :]
    else:
        # Calculate rgb coordinates from camera sensitivity functions
        rgb_rr = rfl_to_rgb(rfl, spd=refspd, CSF=CSF, wl=None)
        lab_rr = rgb_rr
        xyz = xyz
        lab_rr = np.round(lab_rr, _ROUNDING)  # speed up search

    # Convert xyz to lab-type values under refspd:
    if CSF is None:
        lab = colortf(xyz, tf=cspace, fwtf=cspace_tf_copy, bwtf=cspace_tf_copy)
    else:
        lab = xyz  # xyz contained rgb values !!!
        rgb = xyz
        lab = np.round(lab, _ROUNDING)  # speed up search

    if interp_type == 'nearest':
        # Find rfl (cfr. lab_rr) from rfl set that results in 'near' metameric
        # color coordinates for each value in lab_ur (i.e. smallest DE):
        # Construct cKDTree:
        tree = sp.spatial.cKDTree(lab_rr, copy_data=True)

        # Interpolate rfls using k nearest neightbours and inverse distance weigthing:
        d, inds = tree.query(lab, k=k_neighbours)
        if k_neighbours > 1:
            d += _EPS
            w = (1.0 / d**2)[:, :, None]  # inverse distance weigthing
            rfl_est = np.sum(w * rfl[inds + 1, :], axis=1) / np.sum(w, axis=1)
        else:
            rfl_est = rfl[inds + 1, :].copy()
    elif interp_type == 'nd':

        rfl_est = math.ndinterp1_scipy(lab_rr, rfl[1:], lab)

        _isnan = np.isnan(rfl_est[:, 0])

        if (
                _isnan.any()
        ):  #do nearest neigbour method for those that fail using Delaunay (i.e. ndinterp1_scipy)

            # Find rfl (cfr. lab_rr) from rfl set that results in 'near' metameric
            # color coordinates for each value in lab_ur (i.e. smallest DE):
            # Construct cKDTree:
            tree = sp.spatial.cKDTree(lab_rr, copy_data=True)

            # Interpolate rfls using k nearest neightbours and inverse distance weigthing:
            d, inds = tree.query(lab[_isnan, ...], k=k_neighbours)

            if k_neighbours > 1:
                d += _EPS
                w = (1.0 / d**2)[:, :, None]  # inverse distance weigthing
                rfl_est_isnan = np.sum(w * rfl[inds + 1, :], axis=1) / np.sum(
                    w, axis=1)
            else:
                rfl_est_isnan = rfl[inds + 1, :].copy()
            rfl_est[_isnan, :] = rfl_est_isnan

    else:
        raise Exception('xyz_to_rfl(): unsupported interp_type!')

    rfl_est[
        rfl_est <
        0] = 0  #can occur for points outside convexhull of standard rfl set.

    rfl_est = np.vstack((rfl[0], rfl_est))

    if ((verbosity > 0) | ('xyz_est' in out.split(',')) |
        ('lab_est' in out.split(',')) | ('DEi_ab' in out.split(',')) |
        ('DEa_ab' in out.split(','))) & (CSF is None):
        xyz_est, _ = spd_to_xyz(refspd,
                                rfl=rfl_est,
                                relative=True,
                                cieobs=cieobs,
                                out=2)
        cspace_tf_copy = cspace_tf.copy()
        cspace_tf_copy[
            'xyzw'] = xyz_wr  # put correct white point in param. dict
        lab_est = colortf(xyz_est, tf=cspace, fwtf=cspace_tf_copy)[:, 0, :]
        DEi_ab = np.sqrt(((lab_est[:, 1:3] - lab[:, 1:3])**2).sum(axis=1))
        DEa_ab = DEi_ab.mean()
    elif ((verbosity > 0) | ('xyz_est' in out.split(',')) |
          ('rgb_est' in out.split(',')) | ('DEi_rgb' in out.split(',')) |
          ('DEa_rgb' in out.split(','))) & (CSF is not None):
        rgb_est = rfl_to_rgb(rfl_est[1:], spd=refspd, CSF=CSF, wl=wlr)
        xyz_est = rgb_est
        DEi_rgb = np.sqrt(((rgb_est - rgb)**2).sum(axis=1))
        DEa_rgb = DEi_rgb.mean()

    if verbosity > 0:
        if CSF is None:
            ax = plot_color_data(lab[...,1], lab[...,2], z = lab[...,0], \
                            show = False, cieobs = cieobs, cspace = cspace, \
                            formatstr = 'ro', label = 'Original')
            plot_color_data(lab_est[...,1], lab_est[...,2], z = lab_est[...,0], \
                            show = True, axh = ax, cieobs = cieobs, cspace = cspace, \
                            formatstr = 'bd', label = 'Rendered')
        else:
            n = 100  #min(rfl.shape[0]-1,rfl_est.shape[0]-1)
            s = np.random.permutation(rfl.shape[0] -
                                      1)[:min(n, rfl.shape[0] - 1)]
            st = np.random.permutation(rfl_est.shape[0] -
                                       1)[:min(n, rfl_est.shape[0] - 1)]
            fig = plt.figure()
            ax = np.zeros((3, ), dtype=np.object)
            ax[0] = fig.add_subplot(131)
            ax[1] = fig.add_subplot(132)
            ax[2] = fig.add_subplot(133, projection='3d')
            ax[0].plot(rfl[0], rfl[1:][s].T, linestyle='-')
            ax[0].set_title('Original RFL set (random selection of all)')
            ax[0].set_ylim([0, 1])
            ax[1].plot(rfl_est[0], rfl_est[1:][st].T, linestyle='--')
            ax[0].set_title('Estimated RFL set (random selection of targets)')
            ax[1].set_ylim([0, 1])
            ax[2].plot(rgb[st, 0],
                       rgb[st, 1],
                       rgb[st, 2],
                       'ro',
                       label='Original')
            ax[2].plot(rgb_est[st, 0],
                       rgb_est[st, 1],
                       rgb_est[st, 2],
                       'bd',
                       label='Rendered')
            ax[2].legend()
    if out == 'rfl_est':
        return rfl_est
    elif out == 'rfl_est,xyz_est':
        return rfl_est, xyz_est
    else:
        return eval(out)
Esempio n. 6
0
def VF_colorshift_model(S, cri_type = _VF_CRI_DEFAULT, model_type = _VF_MODEL_TYPE, \
                        cspace = _VF_CSPACE, sampleset = None, pool = False, \
                        pcolorshift = {'href': np.arange(np.pi/10,2*np.pi,2*np.pi/10),'Cref' : _VF_MAXR, 'sig' : _VF_SIG}, \
                        vfcolor = 'k',verbosity = 0):
    """
    Applies full vector field model calculations to spectral data.
    
    Args:
        :S: 
            | nump.ndarray with spectral data.
        :cri_type:
            | _VF_CRI_DEFAULT or str or dict, optional
            | Specifies type of color fidelity model to use. 
            | Controls choice of ref. ill., sample set, averaging, scaling, etc.
            | See luxpy.cri.spd_to_cri for more info.
        :modeltype:
            | _VF_MODEL_TYPE or 'M6' or 'M5', optional
            | Specifies degree 5 or degree 6 polynomial model in ab-coordinates.
        :cspace:
            | _VF_CSPACE or dict, optional
            | Specifies color space. See _VF_CSPACE_EXAMPLE for example structure.
        :sampleset:
            | None or str or ndarray, optional
            | Sampleset to be used when calculating vector field model.
        :pool: 
            | False, optional
            | If :S: contains multiple spectra, True pools all jab data before 
            | modeling the vector field, while False models a different field 
            | for each spectrum.
        :pcolorshift: 
            | default dict (see below) or user defined dict, optional
            | Dict containing the specification input 
            | for apply_poly_model_at_hue_x().
            | Default dict = {'href': np.arange(np.pi/10,2*np.pi,2*np.pi/10),
            |                 'Cref' : _VF_MAXR, 
            |                 'sig' : _VF_SIG, 
            |                 'labels' : '#'} 
            | The polynomial models of degree 5 and 6 can be fully specified or 
            | summarized by the model parameters themselved OR by calculating the
            | dCoverC and dH at resp. 5 and 6 hues.
        :vfcolor:
            | 'k', optional
            | For plotting the vector fields.
        :verbosity: 
            | 0, optional
            | Report warnings or not.
            
    Returns:
        :returns: 
            | list[dict] (each list element refers to a different test SPD)
            | with the following keys:
            |   - 'Source': dict with ndarrays of the S, cct and duv of source spd.
            |   - 'metrics': dict with ndarrays for:
            |         * Rf (color fidelity: base + metameric shift)
            |         * Rt (metameric uncertainty index) 
            |         * Rfi (specific color fidelity indices)
            |         * Rti (specific metameric uncertainty indices)
            |         * cri_type (str with cri_type)
            |   - 'Jab': dict with with ndarrays for Jabt, Jabr, DEi
            |   - 'dC/C_dH_x_sig' : 
            |           np.vstack((dCoverC_x,dCoverC_x_sig,dH_x,dH_x_sig)).T
            |           See get_poly_model() for more info.
            |   - 'fielddata': dict with dicts containing data on the calculated 
            |      vector-field and circle-fields: 
            |        * 'vectorfield' : {'axt': vfaxt, 'bxt' : vfbxt, 
            |                           'axr' : vfaxr, 'bxr' : vfbxr},
            |        * 'circlefield' : {'axt': cfaxt, 'bxt' : cfbxt, 
            |                           'axr' : cfaxr, 'bxr' : cfbxr}},
            |   - 'modeldata' : dict with model info:
            |                {'pmodel': pmodel, 
            |                'pcolorshift' : pcolorshift, 
            |                  'dab_model' : dab_model, 
            |                  'dab_res' : dab_res,
            |                  'dab_std' : dab_std,
            |                  'modeltype' : modeltype, 
            |                  'fmodel' : poly_model,
            |                  'Jabtm' : Jabtm, 
            |                  'Jabrm' : Jabrm, 
            |                  'DEim' : DEim},
            |   - 'vshifts' :dict with various vector shifts:
            |        * 'Jabshiftvector_r_to_t' : ndarray with difference vectors
            |                                    between jabt and jabr.
            |        * 'vshift_ab_s' : vshift_ab_s: ab-shift vectors of samples 
            |        * 'vshift_ab_s_vf' : vshift_ab_s_vf: ab-shift vectors of 
            |                             VF model predictions of samples.
            |        * 'vshift_ab_vf' : vshift_ab_vf: ab-shift vectors of VF 
            |                            model predictions of vector field grid.
    """

    if type(cri_type) == str:
        cri_type_str = cri_type
    else:
        cri_type_str = None

    # Calculate Rf, Rfi and Jabr, Jabt:
    Rf, Rfi, Jabt, Jabr, cct, duv, cri_type = spd_to_cri(
        S,
        cri_type=cri_type,
        out='Rf,Rfi,jabt,jabr,cct,duv,cri_type',
        sampleset=sampleset)

    # In case of multiple source SPDs, pool:
    if (len(Jabr.shape) == 3) & (Jabr.shape[1] > 1) & (pool == True):
        #Nsamples = Jabr.shape[0]
        Jabr = np.transpose(Jabr, (1, 0, 2))  # set lamps on first dimension
        Jabt = np.transpose(Jabt, (1, 0, 2))
        Jabr = Jabr.reshape(Jabr.shape[0] * Jabr.shape[1],
                            3)  # put all lamp data one after the other
        Jabt = Jabt.reshape(Jabt.shape[0] * Jabt.shape[1], 3)
        Jabt = Jabt[:, None, :]  # add dim = 1
        Jabr = Jabr[:, None, :]

    out = [{} for _ in range(Jabr.shape[1])]  #initialize empty list of dicts
    if pool == False:
        N = Jabr.shape[1]
    else:
        N = 1
    for i in range(N):

        Jabr_i = Jabr[:, i, :].copy()
        Jabr_i = Jabr_i[:, None, :]
        Jabt_i = Jabt[:, i, :].copy()
        Jabt_i = Jabt_i[:, None, :]

        DEi = np.sqrt((Jabr_i[..., 0] - Jabt_i[..., 0])**2 +
                      (Jabr_i[..., 1] - Jabt_i[..., 1])**2 +
                      (Jabr_i[..., 2] - Jabt_i[..., 2])**2)

        # Determine polynomial model:
        poly_model, pmodel, dab_model, dab_res, dCHoverC_res, dab_std, dCHoverC_std = get_poly_model(
            Jabt_i, Jabr_i, modeltype=_VF_MODEL_TYPE)

        # Apply model at fixed hues:
        href = pcolorshift['href']
        Cref = pcolorshift['Cref']
        sig = pcolorshift['sig']
        dCoverC_x, dCoverC_x_sig, dH_x, dH_x_sig = apply_poly_model_at_hue_x(
            poly_model, pmodel, dCHoverC_res, hx=href, Cxr=Cref, sig=sig)

        # Calculate deshifted a,b values on original samples:
        Jt = Jabt_i[..., 0].copy()
        at = Jabt_i[..., 1].copy()
        bt = Jabt_i[..., 2].copy()
        Jr = Jabr_i[..., 0].copy()
        ar = Jabr_i[..., 1].copy()
        br = Jabr_i[..., 2].copy()
        ar = ar + dab_model[:, 0:1]  # deshift reference to model prediction
        br = br + dab_model[:, 1:2]  # deshift reference to model prediction

        Jabtm = np.hstack((Jt, at, bt))
        Jabrm = np.hstack((Jr, ar, br))

        # calculate color differences between test and deshifted ref:
        #        DEim = np.sqrt((Jr - Jt)**2 + (at - ar)**2 + (bt - br)**2)
        DEim = np.sqrt(0 * (Jr - Jt)**2 + (at - ar)**2 +
                       (bt - br)**2)  # J is not used

        # Apply scaling function to convert DEim to Rti:
        scale_factor = cri_type['scale']['cfactor']
        scale_fcn = cri_type['scale']['fcn']
        avg = cri_type['avg']
        Rfi_deshifted = scale_fcn(DEim, scale_factor)
        Rf_deshifted = scale_fcn(avg(DEim, axis=0), scale_factor)

        rms = lambda x: np.sqrt(np.sum(x**2, axis=0) / x.shape[0])
        Rf_deshifted_rms = scale_fcn(rms(DEim), scale_factor)

        # Generate vector field:
        vfaxt, vfbxt, vfaxr, vfbxr = generate_vector_field(
            poly_model,
            pmodel,
            axr=np.arange(-_VF_MAXR, _VF_MAXR + _VF_DELTAR, _VF_DELTAR),
            bxr=np.arange(-_VF_MAXR, _VF_MAXR + _VF_DELTAR, _VF_DELTAR),
            limit_grid_radius=_VF_MAXR,
            color=0)
        vfaxt, vfbxt, vfaxr, vfbxr = generate_vector_field(
            poly_model,
            pmodel,
            axr=np.arange(-_VF_MAXR, _VF_MAXR + _VF_DELTAR, _VF_DELTAR),
            bxr=np.arange(-_VF_MAXR, _VF_MAXR + _VF_DELTAR, _VF_DELTAR),
            limit_grid_radius=_VF_MAXR,
            color=0)

        # Calculate ab-shift vectors of samples and VF model predictions:
        vshift_ab_s = calculate_shiftvectors(Jabt_i,
                                             Jabr_i,
                                             average=False,
                                             vtype='ab')[:, 0, 0:3]
        vshift_ab_s_vf = calculate_shiftvectors(Jabtm,
                                                Jabrm,
                                                average=False,
                                                vtype='ab')

        # Calculate ab-shift vectors using vector field model:
        Jabt_vf = np.hstack((np.zeros((vfaxt.shape[0], 1)), vfaxt, vfbxt))
        Jabr_vf = np.hstack((np.zeros((vfaxr.shape[0], 1)), vfaxr, vfbxr))
        vshift_ab_vf = calculate_shiftvectors(Jabt_vf,
                                              Jabr_vf,
                                              average=False,
                                              vtype='ab')

        # Generate circle field:
        x, y = plotcircle(radii=np.arange(0, _VF_MAXR + _VF_DELTAR, 10),
                          angles=np.arange(0, 359, 1),
                          out='x,y')
        cfaxt, cfbxt, cfaxr, cfbxr = generate_vector_field(
            poly_model,
            pmodel,
            make_grid=False,
            axr=x[:, None],
            bxr=y[:, None],
            limit_grid_radius=_VF_MAXR,
            color=0)

        out[i] = {
            'Source': {
                'S': S,
                'cct': cct[i],
                'duv': duv[i]
            },
            'metrics': {
                'Rf': Rf[:, i],
                'Rt': Rf_deshifted,
                'Rt_rms': Rf_deshifted_rms,
                'Rfi': Rfi[:, i],
                'Rti': Rfi_deshifted,
                'cri_type': cri_type_str
            },
            'Jab': {
                'Jabt': Jabt_i,
                'Jabr': Jabr_i,
                'DEi': DEi
            },
            'dC/C_dH_x_sig':
            np.vstack((dCoverC_x, dCoverC_x_sig, dH_x, dH_x_sig)).T,
            'fielddata': {
                'vectorfield': {
                    'axt': vfaxt,
                    'bxt': vfbxt,
                    'axr': vfaxr,
                    'bxr': vfbxr
                },
                'circlefield': {
                    'axt': cfaxt,
                    'bxt': cfbxt,
                    'axr': cfaxr,
                    'bxr': cfbxr
                }
            },
            'modeldata': {
                'pmodel': pmodel,
                'pcolorshift': pcolorshift,
                'dab_model': dab_model,
                'dab_res': dab_res,
                'dab_std': dab_std,
                'model_type': model_type,
                'fmodel': poly_model,
                'Jabtm': Jabtm,
                'Jabrm': Jabrm,
                'DEim': DEim
            },
            'vshifts': {
                'Jabshiftvector_r_to_t': np.hstack(
                    (Jt - Jr, at - ar, bt - br)),
                'vshift_ab_s': vshift_ab_s,
                'vshift_ab_s_vf': vshift_ab_s_vf,
                'vshift_ab_vf': vshift_ab_vf
            }
        }

    return out
Esempio n. 7
0
def get_poly_model(jabt, jabr, modeltype=_VF_MODEL_TYPE):
    """
    Setup base color shift model (delta_a, delta_b), 
    determine model parameters and accuracy.
    
    | Calculates a base color shift (delta) from the ref. chromaticity ar, br.
    
    Args:
        :jabt: 
            | ndarray with jab color coordinates under the test SPD.
        :jabr: 
            | ndarray with jab color coordinates under the reference SPD.
        :modeltype:
            | _VF_MODEL_TYPE or 'M6' or 'M5', optional
            | Specifies degree 5 or degree 6 polynomial model in ab-coordinates.
            | (see notes below)
            
    Returns:
        :returns: 
            | (poly_model, 
            |       pmodel, 
            |       dab_model, 
            |        dab_res, 
            |        dCHoverC_res, 
            |        dab_std, 
            |        dCHoverC_std)
            |
            | :poly_model: function handle to model
            | :pmodel: ndarray with model parameters
            | :dab_model: ndarray with ab model predictions from ar, br.
            | :dab_res: ndarray with residuals between 'da,db' of samples and 
            |            'da,db' predicted by the model.
            | :dCHoverC_res: ndarray with residuals between 'dCoverC,dH' 
            |                 of samples and 'dCoverC,dH' predicted by the model.
            |     Note: dCoverC = (Ct - Cr)/Cr and dH = ht - hr 
            |         (predicted from model, see notes below)
            | :dab_std: ndarray with std of :dab_res:
            | :dCHoverC_std: ndarray with std of :dCHoverC_res: 

    Notes: 
        1. Model types:
            | poly5_model = lambda a,b,p:         p[0]*a + p[1]*b + p[2]*(a**2) + p[3]*a*b + p[4]*(b**2)
            | poly6_model = lambda a,b,p:  p[0] + p[1]*a + p[2]*b + p[3]*(a**2) + p[4]*a*b + p[5]*(b**2)
        
        2. Calculation of dCoverC and dH:
            | dCoverC = (np.cos(hr)*da + np.sin(hr)*db)/Cr
            | dHoverC = (np.cos(hr)*db - np.sin(hr)*da)/Cr    
    """
    at = jabt[..., 1]
    bt = jabt[..., 2]
    ar = jabr[..., 1]
    br = jabr[..., 2]

    # A. Calculate da, db:
    da = at - ar
    db = bt - br

    # B.1 Calculate model matrix:
    # 5-parameter model:
    M5 = np.array([[
        np.sum(ar * ar),
        np.sum(ar * br),
        np.sum(ar * ar**2),
        np.sum(ar * ar * br),
        np.sum(ar * br**2)
    ],
                   [
                       np.sum(br * ar),
                       np.sum(br * br),
                       np.sum(br * ar**2),
                       np.sum(br * ar * br),
                       np.sum(br * br**2)
                   ],
                   [
                       np.sum((ar**2) * ar),
                       np.sum((ar**2) * br),
                       np.sum((ar**2) * ar**2),
                       np.sum((ar**2) * ar * br),
                       np.sum((ar**2) * br**2)
                   ],
                   [
                       np.sum(ar * br * ar),
                       np.sum(ar * br * br),
                       np.sum(ar * br * ar**2),
                       np.sum(ar * br * ar * br),
                       np.sum(ar * br * br**2)
                   ],
                   [
                       np.sum((br**2) * ar),
                       np.sum((br**2) * br),
                       np.sum((br**2) * ar**2),
                       np.sum((br**2) * ar * br),
                       np.sum((br**2) * br**2)
                   ]])
    #6-parameters model
    M6 = np.array([[
        ar.size,
        np.sum(1.0 * ar),
        np.sum(1.0 * br),
        np.sum(1.0 * ar**2),
        np.sum(1.0 * ar * br),
        np.sum(1.0 * br**2)
    ],
                   [
                       np.sum(ar * 1.0),
                       np.sum(ar * ar),
                       np.sum(ar * br),
                       np.sum(ar * ar**2),
                       np.sum(ar * ar * br),
                       np.sum(ar * br**2)
                   ],
                   [
                       np.sum(br * 1.0),
                       np.sum(br * ar),
                       np.sum(br * br),
                       np.sum(br * ar**2),
                       np.sum(br * ar * br),
                       np.sum(br * br**2)
                   ],
                   [
                       np.sum((ar**2) * 1.0),
                       np.sum((ar**2) * ar),
                       np.sum((ar**2) * br),
                       np.sum((ar**2) * ar**2),
                       np.sum((ar**2) * ar * br),
                       np.sum((ar**2) * br**2)
                   ],
                   [
                       np.sum(ar * br * 1.0),
                       np.sum(ar * br * ar),
                       np.sum(ar * br * br),
                       np.sum(ar * br * ar**2),
                       np.sum(ar * br * ar * br),
                       np.sum(ar * br * br**2)
                   ],
                   [
                       np.sum((br**2) * 1.0),
                       np.sum((br**2) * ar),
                       np.sum((br**2) * br),
                       np.sum((br**2) * ar**2),
                       np.sum((br**2) * ar * br),
                       np.sum((br**2) * br**2)
                   ]])

    # B.2 Define model function:
    poly5_model = lambda a, b, p: p[0] * a + p[1] * b + p[2] * (a**2) + p[
        3] * a * b + p[4] * (b**2)
    poly6_model = lambda a, b, p: p[0] + p[1] * a + p[2] * b + p[3] * (
        a**2) + p[4] * a * b + p[5] * (b**2)

    if modeltype == 'M5':
        M = M5
        poly_model = poly5_model
    else:
        M = M6
        poly_model = poly6_model

    M = np.linalg.inv(M)

    # C.1 Data a,b analysis output:
    if modeltype == 'M5':
        da_model_parameters = np.dot(
            M,
            np.array([
                np.sum(da * ar),
                np.sum(da * br),
                np.sum(da * ar**2),
                np.sum(da * ar * br),
                np.sum(da * br**2)
            ]))
        db_model_parameters = np.dot(
            M,
            np.array([
                np.sum(db * ar),
                np.sum(db * br),
                np.sum(db * ar**2),
                np.sum(db * ar * br),
                np.sum(db * br**2)
            ]))
    else:
        da_model_parameters = np.dot(
            M,
            np.array([
                np.sum(da * 1.0),
                np.sum(da * ar),
                np.sum(da * br),
                np.sum(da * ar**2),
                np.sum(da * ar * br),
                np.sum(da * br**2)
            ]))
        db_model_parameters = np.dot(
            M,
            np.array([
                np.sum(db * 1.0),
                np.sum(db * ar),
                np.sum(db * br),
                np.sum(db * ar**2),
                np.sum(db * ar * br),
                np.sum(db * br**2)
            ]))
    pmodel = np.vstack((da_model_parameters, db_model_parameters))

    # D.1 Calculate model da, db:
    da_model = poly_model(ar, br, pmodel[0])
    db_model = poly_model(ar, br, pmodel[1])
    dab_model = np.hstack((da_model, db_model))

    # D.2 Calculate residuals for da & db:
    da_res = da - da_model
    db_res = db - db_model
    dab_res = np.hstack((da_res, db_res))
    dab_std = np.vstack((np.std(da_res, axis=0), np.std(db_res, axis=0)))

    # E Calculate href, Cref:
    href = np.arctan2(br, ar)
    Cref = (ar**2 + br**2)**0.5

    # F Calculate dC/C, dH/C for data and model and calculate residuals:
    dCoverC = (np.cos(href) * da + np.sin(href) * db) / Cref
    dHoverC = (np.cos(href) * db - np.sin(href) * da) / Cref
    dCoverC_model = (np.cos(href) * da_model + np.sin(href) * db_model) / Cref
    dHoverC_model = (np.cos(href) * db_model - np.sin(href) * da_model) / Cref
    dCoverC_res = dCoverC - dCoverC_model
    dHoverC_res = dHoverC - dHoverC_model
    dCHoverC_std = np.vstack((np.std(dCoverC_res,
                                     axis=0), np.std(dHoverC_res, axis=0)))

    dCHoverC_res = np.hstack((href, dCoverC_res, dHoverC_res))

    return poly_model, pmodel, dab_model, dab_res, dCHoverC_res, dab_std, dCHoverC_std
Esempio n. 8
0
def get_macadam_ellipse(xy = None, k_neighbours = 3, nsteps = 10, average_cik = True):
    """
    Estimate n-step MacAdam ellipse at CIE x,y coordinates xy by calculating 
    average inverse covariance ellipse of the k_neighbours closest ellipses.
    
    Args:
        :xy:
            | None or ndarray, optional
            | If None: output Macadam ellipses, if not None: xy are the 
            | CIE xy coordinates for which ellipses will be estimated.
        :k_neighbours:
            | 3, optional
            | Number of nearest ellipses to use to calculate ellipse at xy
        :nsteps:
            | 10, optional
            | Set number of MacAdam steps of ellipse.
        :average_cik:
            | True, optional
            | If True: take distance weighted average of inverse 
            |   'covariance ellipse' elements cik. 
            | If False: average major & minor axis lengths and 
            |   ellipse orientation angles directly.
            
    Returns:
        :v_mac_est:
            | estimated MacAdam ellipse(s) in v-format [Rmax,Rmin,xc,yc,theta]
    
    References:
        1. MacAdam DL. Visual Sensitivities to Color Differences in Daylight*. J Opt Soc Am. 1942;32(5):247-274.
    """
    # list of MacAdam ellipses (x10)
    v_mac = np.atleast_2d([
         [0.16, 0.057, 0.0085, 0.0035, 62.5],
         [0.187, 0.118, 0.022, 0.0055, 77],
         [0.253, 0.125, 0.025, 0.005, 55.5],
         [0.15, 0.68, 0.096, 0.023, 105],
         [0.131, 0.521, 0.047, 0.02, 112.5],
         [0.212, 0.55, 0.058, 0.023, 100],
         [0.258, 0.45, 0.05, 0.02, 92],
         [0.152, 0.365, 0.038, 0.019, 110],
         [0.28, 0.385, 0.04, 0.015, 75.5],
         [0.38, 0.498, 0.044, 0.012, 70],
         [0.16, 0.2, 0.021, 0.0095, 104],
         [0.228, 0.25, 0.031, 0.009, 72],
         [0.305, 0.323, 0.023, 0.009, 58],
         [0.385, 0.393, 0.038, 0.016, 65.5],
         [0.472, 0.399, 0.032, 0.014, 51],
         [0.527, 0.35, 0.026, 0.013, 20],
         [0.475, 0.3, 0.029, 0.011, 28.5],
         [0.51, 0.236, 0.024, 0.012, 29.5],
         [0.596, 0.283, 0.026, 0.013, 13],
         [0.344, 0.284, 0.023, 0.009, 60],
         [0.39, 0.237, 0.025, 0.01, 47],
         [0.441, 0.198, 0.028, 0.0095, 34.5],
         [0.278, 0.223, 0.024, 0.0055, 57.5],
         [0.3, 0.163, 0.029, 0.006, 54],
         [0.365, 0.153, 0.036, 0.0095, 40]
         ])
    
    # convert to v-format ([a,b, xc, yc, theta]):
    v_mac = v_mac[:,[2,3,0,1,4]]
    
    # convert last column to rad.:
    v_mac[:,-1] = v_mac[:,-1]*np.pi/180
    
    # convert to desired number of MacAdam-steps:
    v_mac[:,0:2] = v_mac[:,0:2]/10*nsteps
    
    if xy is not None:
        #calculate inverse covariance matrices:
        cik = math.v_to_cik(v_mac, inverse = True)
        if average_cik == True:
            cik_long = np.hstack((cik[:,0,:],cik[:,1,:]))
        
        # Calculate k_neighbours closest ellipses to xy:
        tree = sp.spatial.cKDTree(v_mac[:,2:4], copy_data = True)
        d, inds = tree.query(xy, k = k_neighbours)
    
        if k_neighbours  > 1:
            pd = 1
            w = (1.0 / np.abs(d)**pd)[:,:,None] # inverse distance weigthing
            if average_cik == True:
                cik_long_est = np.sum(w * cik_long[inds,:], axis=1) / np.sum(w, axis=1)
            else:
                v_mac_est = np.sum(w * v_mac[inds,:], axis=1) / np.sum(w, axis=1) # for average xyc

        else:
            v_mac_est = v_mac[inds,:].copy()
        
        # convert cik back to v:
        if (average_cik == True) & (k_neighbours >1):
            cik_est = np.dstack((cik_long_est[:,0:2],cik_long_est[:,2:4]))
            v_mac_est = math.cik_to_v(cik_est, inverse = True)
        v_mac_est[:,2:4] = xy
    else:
        v_mac_est = v_mac
        
    return v_mac_est
Esempio n. 9
0
def xyz_to_Ydlep(xyz,
                 cieobs=_CIEOBS,
                 xyzw=_COLORTF_DEFAULT_WHITE_POINT,
                 flip_axes=False,
                 SL_max_lambda=None,
                 **kwargs):
    """
    Convert XYZ tristimulus values to Y, dominant (complementary) wavelength
    and excitation purity.

    Args:
        :xyz:
            | ndarray with tristimulus values
        :xyzw:
            | None or ndarray with tristimulus values of a single (!) native white point, optional
            | None defaults to xyz of CIE D65 using the :cieobs: observer.
        :cieobs:
            | luxpy._CIEOBS, optional
            | CMF set to use when calculating spectrum locus coordinates.
        :flip_axes:
            | False, optional
            | If True: flip axis 0 and axis 1 in Ydelep to increase speed of loop in function.
            |          (single xyzw with is not flipped!)
        :SL_max_lambda:
            | None or float, optional
            | Maximum wavelength of spectrum locus before it turns back on itelf in the high wavelength range (~700 nm)
    Returns:
        :Ydlep: 
            | ndarray with Y, dominant (complementary) wavelength
            |  and excitation purity
    """

    xyz3 = np3d(xyz).copy().astype(np.float)

    # flip axis so that shortest dim is on axis0 (save time in looping):
    if (xyz3.shape[0] < xyz3.shape[1]) & (flip_axes == True):
        axes12flipped = True
        xyz3 = xyz3.transpose((1, 0, 2))
    else:
        axes12flipped = False

    # convert xyz to Yxy:
    Yxy = xyz_to_Yxy(xyz3)
    Yxyw = xyz_to_Yxy(xyzw)

    # get spectrum locus Y,x,y and wavelengths:
    SL = _CMF[cieobs]['bar']
    SL = SL[:, SL[1:].sum(axis=0) >
            0]  # avoid div by zero in xyz-to-Yxy conversion
    wlsl = SL[0]
    Yxysl = xyz_to_Yxy(SL[1:4].T)[:, None]

    # Get maximum wavelength of spectrum locus (before it turns back on itself)
    if SL_max_lambda is None:
        pmaxlambda = Yxysl[..., 1].argmax()  # lambda with largest x value
        dwl = np.diff(
            Yxysl[:, 0,
                  1])  # spectrumlocus in that range should have increasing x
        dwl[wlsl[:-1] < 600] = 10000
        pmaxlambda = np.where(
            dwl <= 0)[0][0]  # Take first element with zero or <zero slope
    else:
        pmaxlambda = np.abs(wlsl - SL_max_lambda).argmin()
    Yxysl = Yxysl[:(pmaxlambda + 1), :]
    wlsl = wlsl[:(pmaxlambda + 1)]

    # center on xyzw:
    Yxy = Yxy - Yxyw
    Yxysl = Yxysl - Yxyw
    Yxyw = Yxyw - Yxyw

    #split:
    Y, x, y = asplit(Yxy)
    Yw, xw, yw = asplit(Yxyw)
    Ysl, xsl, ysl = asplit(Yxysl)

    # calculate hue:
    h = math.positive_arctan(x, y, htype='deg')

    hsl = math.positive_arctan(xsl, ysl, htype='deg')

    hsl_max = hsl[0]  # max hue angle at min wavelength
    hsl_min = hsl[-1]  # min hue angle at max wavelength
    if hsl_min < hsl_max: hsl_min += 360

    dominantwavelength = np.empty(Y.shape)
    purity = np.empty(Y.shape)

    for i in range(xyz3.shape[1]):

        # find index of complementary wavelengths/hues:
        pc = np.where(
            (h[:, i] > hsl_max) & (h[:, i] < hsl_min)
        )  # hue's requiring complementary wavelength (purple line)
        h[:, i][pc] = h[:, i][pc] - np.sign(
            h[:, i][pc] - 180.0
        ) * 180.0  # add/subtract 180° to get positive complementary wavelength

        # find 2 closest enclosing hues in sl:
        #hslb,hib = meshblock(hsl,h[:,i:i+1])
        hib, hslb = np.meshgrid(h[:, i:i + 1], hsl)
        dh = (hslb - hib)
        q1 = np.abs(dh).argmin(axis=0)  # index of closest hue
        sign_q1 = np.sign(dh[q1])[0]
        dh[np.sign(dh) ==
           sign_q1] = 1000000  # set all dh on the same side as q1 to a very large value
        q2 = np.abs(dh).argmin(
            axis=0)  # index of second  closest (enclosing) hue

        # # Test changes to code:
        # print('wls',i, wlsl[q1],wlsl[q2])
        # import matplotlib.pyplot as plt
        # plt.figure()
        # plt.plot(wlsl[:-1],np.diff(xsl[:,0]),'k.-')
        # plt.figure()
        # plt.plot(x[0,i],y[0,i],'k.'); plt.plot(xsl,ysl,'r.-');plt.plot(xsl[q1],ysl[q1],'b.');plt.plot(xsl[q2],ysl[q2],'g.');plt.plot(xsl[-1],ysl[-1],'c+')

        dominantwavelength[:, i] = wlsl[q1] + np.multiply(
            (h[:, i] - hsl[q1, 0]),
            np.divide((wlsl[q2] - wlsl[q1]), (hsl[q2, 0] - hsl[q1, 0]))
        )  # calculate wl corresponding to h: y = y1 + (x-x1)*(y2-y1)/(x2-x1)
        dominantwavelength[:, i][pc] = -dominantwavelength[:, i][
            pc]  #complementary wavelengths are specified by '-' sign

        # calculate excitation purity:
        x_dom_wl = xsl[q1, 0] + (xsl[q2, 0] - xsl[q1, 0]) * (h[:, i] - hsl[
            q1, 0]) / (hsl[q2, 0] - hsl[q1, 0])  # calculate x of dom. wl
        y_dom_wl = ysl[q1, 0] + (ysl[q2, 0] - ysl[q1, 0]) * (h[:, i] - hsl[
            q1, 0]) / (hsl[q2, 0] - hsl[q1, 0])  # calculate y of dom. wl
        d_wl = (x_dom_wl**2.0 +
                y_dom_wl**2.0)**0.5  # distance from white point to sl
        d = (x[:, i]**2.0 +
             y[:, i]**2.0)**0.5  # distance from white point to test point
        purity[:, i] = d / d_wl

        # correct for those test points that have a complementary wavelength
        # calculate intersection of line through white point and test point and purple line:
        xy = np.vstack((x[:, i], y[:, i])).T
        xyw = np.hstack((xw, yw))
        xypl1 = np.hstack((xsl[0, None], ysl[0, None]))
        xypl2 = np.hstack((xsl[-1, None], ysl[-1, None]))
        da = (xy - xyw)
        db = (xypl2 - xypl1)
        dp = (xyw - xypl1)
        T = np.array([[0.0, -1.0], [1.0, 0.0]])
        dap = np.dot(da, T)
        denom = np.sum(dap * db, axis=1, keepdims=True)
        num = np.sum(dap * dp, axis=1, keepdims=True)
        xy_linecross = (num / denom) * db + xypl1
        d_linecross = np.atleast_2d(
            (xy_linecross[:, 0]**2.0 + xy_linecross[:, 1]**2.0)**0.5).T  #[0]
        purity[:, i][pc] = d[pc] / d_linecross[pc][:, 0]
    Ydlep = np.dstack((xyz3[:, :, 1], dominantwavelength, purity))

    if axes12flipped == True:
        Ydlep = Ydlep.transpose((1, 0, 2))
    else:
        Ydlep = Ydlep.transpose((0, 1, 2))
    return Ydlep.reshape(xyz.shape)
Esempio n. 10
0
def xyz_to_Ydlep_(xyz,
                  cieobs=_CIEOBS,
                  xyzw=_COLORTF_DEFAULT_WHITE_POINT,
                  flip_axes=False,
                  **kwargs):
    """
    Convert XYZ tristimulus values to Y, dominant (complementary) wavelength
    and excitation purity.

    Args:
        :xyz:
            | ndarray with tristimulus values
        :xyzw:
            | None or ndarray with tristimulus values of a single (!) native white point, optional
            | None defaults to xyz of CIE D65 using the :cieobs: observer.
        :cieobs:
            | luxpy._CIEOBS, optional
            | CMF set to use when calculating spectrum locus coordinates.
        :flip_axes:
            | False, optional
            | If True: flip axis 0 and axis 1 in Ydelep to increase speed of loop in function.
            |          (single xyzw with is not flipped!)
    Returns:
        :Ydlep: 
            | ndarray with Y, dominant (complementary) wavelength
            |  and excitation purity
    """

    xyz3 = np3d(xyz).copy().astype(np.float)

    # flip axis so that shortest dim is on axis0 (save time in looping):
    if (xyz3.shape[0] < xyz3.shape[1]) & (flip_axes == True):
        axes12flipped = True
        xyz3 = xyz3.transpose((1, 0, 2))
    else:
        axes12flipped = False

    # convert xyz to Yxy:
    Yxy = xyz_to_Yxy(xyz3)
    Yxyw = xyz_to_Yxy(xyzw)

    # get spectrum locus Y,x,y and wavelengths:
    SL = _CMF[cieobs]['bar']
    SL = SL[:, SL[1:].sum(axis=0) >
            0]  # avoid div by zero in xyz-to-Yxy conversion
    wlsl = SL[0]
    Yxysl = xyz_to_Yxy(SL[1:4].T)[:, None]
    pmaxlambda = Yxysl[..., 1].argmax()
    maxlambda = wlsl[pmaxlambda]
    maxlambda = 700
    print(np.where(wlsl == maxlambda))
    pmaxlambda = np.where(wlsl == maxlambda)[0][0]
    Yxysl = Yxysl[:(pmaxlambda + 1), :]
    wlsl = wlsl[:(pmaxlambda + 1)]

    # center on xyzw:
    Yxy = Yxy - Yxyw
    Yxysl = Yxysl - Yxyw
    Yxyw = Yxyw - Yxyw

    #split:
    Y, x, y = asplit(Yxy)
    Yw, xw, yw = asplit(Yxyw)
    Ysl, xsl, ysl = asplit(Yxysl)

    # calculate hue:
    h = math.positive_arctan(x, y, htype='deg')
    print(h)
    print('rh', h[0, 0] - h[0, 1])
    print(wlsl[0], wlsl[-1])

    hsl = math.positive_arctan(xsl, ysl, htype='deg')

    hsl_max = hsl[0]  # max hue angle at min wavelength
    hsl_min = hsl[-1]  # min hue angle at max wavelength
    if hsl_min < hsl_max: hsl_min += 360

    dominantwavelength = np.empty(Y.shape)
    purity = np.empty(Y.shape)
    print('xyz:', xyz)
    for i in range(xyz3.shape[1]):
        print('\ni:', i, h[:, i], hsl_max, hsl_min)
        print(h)
        # find index of complementary wavelengths/hues:
        pc = np.where(
            (h[:, i] > hsl_max) & (h[:, i] < hsl_min)
        )  # hue's requiring complementary wavelength (purple line)
        print('pc', (h[:, i] > hsl_max) & (h[:, i] < hsl_min))
        h[:, i][pc] = h[:, i][pc] - np.sign(
            h[:, i][pc] - 180.0
        ) * 180.0  # add/subtract 180° to get positive complementary wavelength

        # find 2 closest hues in sl:
        #hslb,hib = meshblock(hsl,h[:,i:i+1])
        hib, hslb = np.meshgrid(h[:, i:i + 1], hsl)
        dh = np.abs(hslb - hib)
        q1 = dh.argmin(axis=0)  # index of closest hue
        dh[q1] = 1000000.0
        q2 = dh.argmin(axis=0)  # index of second closest hue
        print('q1q2', q2, q1)

        print('wls:', h[:, i], wlsl[q1], wlsl[q2])
        print('hsls:', hsl[q2, 0], hsl[q1, 0])
        print('d', (wlsl[q2] - wlsl[q1]), (hsl[q2, 0] - hsl[q1, 0]),
              (wlsl[q2] - wlsl[q1]) / (hsl[q2, 0] - hsl[q1, 0]))
        print('(h[:,i] - hsl[q1,0])', (h[:, i] - hsl[q1, 0]))
        print('div', np.divide((wlsl[q2] - wlsl[q1]),
                               (hsl[q2, 0] - hsl[q1, 0])))
        print(
            'mult(...)',
            np.multiply((h[:, i] - hsl[q1, 0]),
                        np.divide((wlsl[q2] - wlsl[q1]),
                                  (hsl[q2, 0] - hsl[q1, 0]))))
        dominantwavelength[:, i] = wlsl[q1] + np.multiply(
            (h[:, i] - hsl[q1, 0]),
            np.divide((wlsl[q2] - wlsl[q1]), (hsl[q2, 0] - hsl[q1, 0]))
        )  # calculate wl corresponding to h: y = y1 + (x-x1)*(y2-y1)/(x2-x1)
        print('dom', dominantwavelength[:, i])
        dominantwavelength[(dominantwavelength[:,
                                               i] > max(wlsl[q1], wlsl[q2])),
                           i] = max(wlsl[q1], wlsl[q2])
        dominantwavelength[(dominantwavelength[:,
                                               i] < min(wlsl[q1], wlsl[q2])),
                           i] = min(wlsl[q1], wlsl[q2])

        dominantwavelength[:, i][pc] = -dominantwavelength[:, i][
            pc]  #complementary wavelengths are specified by '-' sign

        # calculate excitation purity:
        x_dom_wl = xsl[q1, 0] + (xsl[q2, 0] - xsl[q1, 0]) * (h[:, i] - hsl[
            q1, 0]) / (hsl[q2, 0] - hsl[q1, 0])  # calculate x of dom. wl
        y_dom_wl = ysl[q1, 0] + (ysl[q2, 0] - ysl[q1, 0]) * (h[:, i] - hsl[
            q1, 0]) / (hsl[q2, 0] - hsl[q1, 0])  # calculate y of dom. wl
        d_wl = (x_dom_wl**2.0 +
                y_dom_wl**2.0)**0.5  # distance from white point to sl
        d = (x[:, i]**2.0 +
             y[:, i]**2.0)**0.5  # distance from white point to test point
        purity[:, i] = d / d_wl

        # correct for those test points that have a complementary wavelength
        # calculate intersection of line through white point and test point and purple line:
        xy = np.vstack((x[:, i], y[:, i])).T
        xyw = np.hstack((xw, yw))
        xypl1 = np.hstack((xsl[0, None], ysl[0, None]))
        xypl2 = np.hstack((xsl[-1, None], ysl[-1, None]))
        da = (xy - xyw)
        db = (xypl2 - xypl1)
        dp = (xyw - xypl1)
        T = np.array([[0.0, -1.0], [1.0, 0.0]])
        dap = np.dot(da, T)
        denom = np.sum(dap * db, axis=1, keepdims=True)
        num = np.sum(dap * dp, axis=1, keepdims=True)
        xy_linecross = (num / denom) * db + xypl1
        d_linecross = np.atleast_2d(
            (xy_linecross[:, 0]**2.0 + xy_linecross[:, 1]**2.0)**0.5).T  #[0]
        purity[:, i][pc] = d[pc] / d_linecross[pc][:, 0]
    Ydlep = np.dstack((xyz3[:, :, 1], dominantwavelength, purity))

    if axes12flipped == True:
        Ydlep = Ydlep.transpose((1, 0, 2))
    else:
        Ydlep = Ydlep.transpose((0, 1, 2))
    return Ydlep.reshape(xyz.shape)
Esempio n. 11
0
def Ydlep_to_xyz(Ydlep,
                 cieobs=_CIEOBS,
                 xyzw=_COLORTF_DEFAULT_WHITE_POINT,
                 flip_axes=False,
                 SL_max_lambda=None,
                 **kwargs):
    """
    Convert Y, dominant (complementary) wavelength and excitation purity to XYZ
    tristimulus values.

    Args:
        :Ydlep: 
            | ndarray with Y, dominant (complementary) wavelength
              and excitation purity
        :xyzw: 
            | None or narray with tristimulus values of a single (!) native white point, optional
            | None defaults to xyz of CIE D65 using the :cieobs: observer.
        :cieobs:
            | luxpy._CIEOBS, optional
            | CMF set to use when calculating spectrum locus coordinates.
        :flip_axes:
            | False, optional
            | If True: flip axis 0 and axis 1 in Ydelep to increase speed of loop in function.
            |          (single xyzw with is not flipped!)
        :SL_max_lambda:
            | None or float, optional
            | Maximum wavelength of spectrum locus before it turns back on itelf in the high wavelength range (~700 nm)

    Returns:
        :xyz: 
            | ndarray with tristimulus values
    """

    Ydlep3 = np3d(Ydlep).copy().astype(np.float)

    # flip axis so that longest dim is on first axis  (save time in looping):
    if (Ydlep3.shape[0] < Ydlep3.shape[1]) & (flip_axes == True):
        axes12flipped = True
        Ydlep3 = Ydlep3.transpose((1, 0, 2))
    else:
        axes12flipped = False

    # convert xyzw to Yxyw:
    Yxyw = xyz_to_Yxy(xyzw)
    Yxywo = Yxyw.copy()

    # get spectrum locus Y,x,y and wavelengths:
    SL = _CMF[cieobs]['bar']
    SL = SL[:, SL[1:].sum(axis=0) >
            0]  # avoid div by zero in xyz-to-Yxy conversion
    wlsl = SL[0, None].T
    Yxysl = xyz_to_Yxy(SL[1:4].T)[:, None]

    # Get maximum wavelength of spectrum locus (before it turns back on itself)
    if SL_max_lambda is None:
        pmaxlambda = Yxysl[..., 1].argmax()  # lambda with largest x value
        dwl = np.diff(
            Yxysl[:, 0,
                  1])  # spectrumlocus in that range should have increasing x
        dwl[wlsl[:-1, 0] < 600] = 10000
        pmaxlambda = np.where(
            dwl <= 0)[0][0]  # Take first element with zero or <zero slope
    else:
        pmaxlambda = np.abs(wlsl - SL_max_lambda).argmin()
    Yxysl = Yxysl[:(pmaxlambda + 1), :]
    wlsl = wlsl[:(pmaxlambda + 1), :1]

    # center on xyzw:
    Yxysl = Yxysl - Yxyw
    Yxyw = Yxyw - Yxyw

    #split:
    Y, dom, pur = asplit(Ydlep3)
    Yw, xw, yw = asplit(Yxyw)
    Ywo, xwo, ywo = asplit(Yxywo)
    Ysl, xsl, ysl = asplit(Yxysl)

    # loop over longest dim:
    x = np.empty(Y.shape)
    y = np.empty(Y.shape)
    for i in range(Ydlep3.shape[1]):

        # find closest wl's to dom:
        #wlslb,wlib = meshblock(wlsl,np.abs(dom[i,:])) #abs because dom<0--> complemtary wl
        wlib, wlslb = np.meshgrid(np.abs(dom[:, i]), wlsl)

        dwl = wlslb - wlib
        q1 = np.abs(dwl).argmin(axis=0)  # index of closest wl
        sign_q1 = np.sign(dwl[q1])
        dwl[np.sign(dwl) ==
            sign_q1] = 1000000  # set all dwl on the same side as q1 to a very large value
        q2 = np.abs(dwl).argmin(
            axis=0)  # index of second closest (enclosing) wl

        # calculate x,y of dom:
        x_dom_wl = xsl[q1, 0] + (xsl[q2, 0] - xsl[q1, 0]) * (
            np.abs(dom[:, i]) - wlsl[q1, 0]) / (wlsl[q2, 0] - wlsl[q1, 0]
                                                )  # calculate x of dom. wl
        y_dom_wl = ysl[q1, 0] + (ysl[q2, 0] - ysl[q1, 0]) * (
            np.abs(dom[:, i]) - wlsl[q1, 0]) / (wlsl[q2, 0] - wlsl[q1, 0]
                                                )  # calculate y of dom. wl

        # calculate x,y of test:
        d_wl = (x_dom_wl**2.0 +
                y_dom_wl**2.0)**0.5  # distance from white point to dom
        d = pur[:, i] * d_wl
        hdom = math.positive_arctan(x_dom_wl, y_dom_wl, htype='deg')
        x[:, i] = d * np.cos(hdom * np.pi / 180.0)
        y[:, i] = d * np.sin(hdom * np.pi / 180.0)

        # complementary:
        pc = np.where(dom[:, i] < 0.0)
        hdom[pc] = hdom[pc] - np.sign(dom[:, i][pc] -
                                      180.0) * 180.0  # get positive hue angle

        # calculate intersection of line through white point and test point and purple line:
        xy = np.vstack((x_dom_wl, y_dom_wl)).T
        xyw = np.vstack((xw, yw)).T
        xypl1 = np.vstack((xsl[0, None], ysl[0, None])).T
        xypl2 = np.vstack((xsl[-1, None], ysl[-1, None])).T
        da = (xy - xyw)
        db = (xypl2 - xypl1)
        dp = (xyw - xypl1)
        T = np.array([[0.0, -1.0], [1.0, 0.0]])
        dap = np.dot(da, T)
        denom = np.sum(dap * db, axis=1, keepdims=True)
        num = np.sum(dap * dp, axis=1, keepdims=True)
        xy_linecross = (num / denom) * db + xypl1
        d_linecross = np.atleast_2d(
            (xy_linecross[:, 0]**2.0 + xy_linecross[:, 1]**2.0)**0.5).T[:, 0]
        x[:, i][pc] = pur[:, i][pc] * d_linecross[pc] * np.cos(
            hdom[pc] * np.pi / 180)
        y[:, i][pc] = pur[:, i][pc] * d_linecross[pc] * np.sin(
            hdom[pc] * np.pi / 180)
    Yxy = np.dstack((Ydlep3[:, :, 0], x + xwo, y + ywo))
    if axes12flipped == True:
        Yxy = Yxy.transpose((1, 0, 2))
    else:
        Yxy = Yxy.transpose((0, 1, 2))
    return Yxy_to_xyz(Yxy).reshape(Ydlep.shape)
Esempio n. 12
0
def spd_normalize(data, norm_type=None, norm_f=1, wl=True, cieobs=_CIEOBS):
    """
    Normalize a spectral power distribution (SPD).
    
    Args:
        :data: 
            | ndarray
        :norm_type: 
            | None, optional 
            |       - 'lambda': make lambda in norm_f equal to 1
            |       - 'area': area-normalization times norm_f
            |       - 'max': max-normalization times norm_f
            |       - 'ru': to :norm_f: radiometric units 
            |       - 'pu': to :norm_f: photometric units 
            |       - 'pusa': to :norm_f: photometric units (with Km corrected
            |                             to standard air, cfr. CIE TN003-2015)
            |       - 'qu': to :norm_f: quantal energy units
        :norm_f:
            | 1, optional
            | Normalization factor that determines the size of normalization 
            | for 'max' and 'area' 
            | or which wavelength is normalized to 1 for 'lambda' option.
        :wl: 
            | True or False, optional 
            | If True, the first column of data contains wavelengths.
        :cieobs:
            | _CIEOBS or str, optional
            | Type of cmf set to use for normalization using photometric units 
            | (norm_type == 'pu')
    
    Returns:
        :returns: 
            | ndarray with normalized data.
    """
    if norm_type is not None:
        if not isinstance(norm_type, list): norm_type = [norm_type]

        if norm_f is not None:
            if not isinstance(norm_f, list): norm_f = [norm_f]

        if ('lambda' in norm_type) | ('qu' in norm_type):
            wl = True  # for lambda & 'qu' normalization wl MUST be first column
            wlr = data[0]

        if (('area' in norm_type) | ('ru' in norm_type) | ('pu' in norm_type) |
            ('pusa' in norm_type)) & (wl == True):
            dl = getwld(data[0])
        else:
            dl = 1  #no wavelengths provided

        offset = int(wl)
        for i in range(data.shape[0] - offset):
            norm_type_ = norm_type[i] if (len(norm_type) > 1) else norm_type[0]

            if norm_f is not None:
                norm_f_ = norm_f[i] if (len(norm_f) > 1) else norm_f[0]
            else:
                norm_f_ = 560.0 if (norm_type_ == 'lambda') else 1.0

            if norm_type_ == 'max':
                data[i + offset] = norm_f_ * data[i + offset] / np.max(
                    data[i + offset])
            elif norm_type_ == 'area':
                data[i + offset] = norm_f_ * data[i + offset] / (
                    np.sum(data[i + offset]) * dl)
            elif norm_type_ == 'lambda':
                wl_index = np.abs(wlr - norm_f_).argmin()
                data[i +
                     offset] = data[i + offset] / data[i + offset][wl_index]
            elif (norm_type_ == 'ru') | (norm_type_ == 'pu') | (
                    norm_type == 'pusa') | (norm_type_ == 'qu'):
                rpq_power = spd_to_power(data[[0, i + offset], :],
                                         cieobs=cieobs,
                                         ptype=norm_type_)
                data[i + offset] = (norm_f / rpq_power) * data[i + offset]
            else:
                data[i + offset] = data[i + offset] / norm_f_
    return data
def xyz_to_rfl(xyz, rfl = None, out = 'rfl_est', \
                 refspd = None, D = None, cieobs = _CIEOBS, \
                 cspace = 'xyz', cspace_tf = {},\
                 interp_type = 'nd', k_neighbours = 4, verbosity = 0):
    """
    Approximate spectral reflectance of xyz based on nd-dimensional linear interpolation 
    or k nearest neighbour interpolation of samples from a standard reflectance set.
    
    Args:
        :xyz: 
            | ndarray with tristimulus values of target points.
        :rfl: 
            | ndarray, optional
            | Reflectance set for color coordinate to rfl mapping.
        :out: 
            | 'rfl_est' or str, optional
        :refspd: 
            | None, optional
            | Refer ence spectrum for color coordinate to rfl mapping.
            | None defaults to D65.
        :cieobs:
            | _CIEOBS, optional
            | CMF set used for calculation of xyz from spectral data.
        :cspace:
            | 'xyz',  optional
            | Color space for color coordinate to rfl mapping.
            | Tip: Use linear space (e.g. 'xyz', 'Yuv',...) for (interp_type == 'nd'),
            |      and perceptually uniform space (e.g. 'ipt') for (interp_type == 'nearest')
        :cspace_tf:
            | {}, optional
            | Dict with parameters for xyz_to_cspace and cspace_to_xyz transform.
        :interp_type:
            | 'nd', optional
            | Options:
            | - 'nd': perform n-dimensional linear interpolation using Delaunay triangulation.
            | - 'nearest': perform nearest neighbour interpolation. 
        :k_neighbours:
            | 4 or int, optional
            | Number of nearest neighbours for reflectance spectrum interpolation.
            | Neighbours are found using scipy.spatial.cKDTree
        :verbosity:
            | 0, optional
            | If > 0: make a plot of the color coordinates of original and 
            | rendered image pixels.

    Returns:
        :returns: 
            | :rfl_est:
            | ndarrays with estimated reflectance spectra.
    """

    # get rfl set:
    if rfl is None:  # use IESTM30['4880'] set
        rfl = _CRI_RFL['ies-tm30']['4880']['5nm']

    # get Ref spd:
    if refspd is None:
        refspd = _CIE_ILLUMINANTS['D65'].copy()

    # Calculate lab-type coordinates of standard rfl set under refspd:
    xyz_rr, xyz_wr = spd_to_xyz(refspd,
                                relative=True,
                                rfl=rfl,
                                cieobs=cieobs,
                                out=2)
    cspace_tf_copy = cspace_tf.copy()
    cspace_tf_copy['xyzw'] = xyz_wr  # put correct white point in param. dict
    lab_rr = colortf(xyz_rr,
                     tf=cspace,
                     fwtf=cspace_tf_copy,
                     bwtf=cspace_tf_copy)[:, 0, :]

    # Convert xyz to lab-type values under refspd:
    lab = colortf(xyz, tf=cspace, fwtf=cspace_tf_copy, bwtf=cspace_tf_copy)

    if interp_type == 'nearest':
        # Find rfl (cfr. lab_rr) from rfl set that results in 'near' metameric
        # color coordinates for each value in lab_ur (i.e. smallest DE):
        # Construct cKDTree:
        tree = sp.spatial.cKDTree(lab_rr, copy_data=True)

        # Interpolate rfls using k nearest neightbours and inverse distance weigthing:
        d, inds = tree.query(lab, k=k_neighbours)
        if k_neighbours > 1:
            d += _EPS
            w = (1.0 / d**2)[:, :, None]  # inverse distance weigthing
            rfl_est = np.sum(w * rfl[inds + 1, :], axis=1) / np.sum(w, axis=1)
        else:
            rfl_est = rfl[inds + 1, :].copy()
    elif interp_type == 'nd':
        rfl_est = math.ndinterp1_scipy(lab_rr, rfl[1:], lab)

        _isnan = np.isnan(rfl_est[:, 0])

        if (
                _isnan.any()
        ):  #do nearest neigbour method for those that fail using Delaunay (i.e. ndinterp1_scipy)

            # Find rfl (cfr. lab_rr) from rfl set that results in 'near' metameric
            # color coordinates for each value in lab_ur (i.e. smallest DE):
            # Construct cKDTree:
            tree = sp.spatial.cKDTree(lab_rr, copy_data=True)

            # Interpolate rfls using k nearest neightbours and inverse distance weigthing:
            d, inds = tree.query(lab[_isnan, ...], k=k_neighbours)
            if k_neighbours > 1:
                d += _EPS
                w = (1.0 / d**2)[:, :, None]  # inverse distance weigthing
                rfl_est_isnan = np.sum(w * rfl[inds + 1, :], axis=1) / np.sum(
                    w, axis=1)
            else:
                rfl_est_isnan = rfl[inds + 1, :].copy()
            rfl_est[_isnan, :] = rfl_est_isnan
    else:
        raise Exception('xyz_to_rfl(): unsupported interp_type!')

    rfl_est[
        rfl_est <
        0] = 0  #can occur for points outside convexhull of standard rfl set.

    rfl_est = np.vstack((rfl[0], rfl_est))

    if (verbosity > 0) | ('xyz_est' in out.split(',')) | (
            'lab_est' in out.split(',')) | ('DEi_ab' in out.split(',')) | (
                'DEa_ab' in out.split(',')):
        xyz_est, _ = spd_to_xyz(refspd,
                                rfl=rfl_est,
                                relative=True,
                                cieobs=cieobs,
                                out=2)
        cspace_tf_copy = cspace_tf.copy()
        cspace_tf_copy[
            'xyzw'] = xyz_wr  # put correct white point in param. dict
        lab_est = colortf(xyz_est, tf=cspace, fwtf=cspace_tf_copy)[:, 0, :]
        DEi_ab = np.sqrt(((lab_est[:, 1:3] - lab[:, 1:3])**2).sum(axis=1))
        DEa_ab = DEi_ab.mean()

    if verbosity > 0:
        ax = plot_color_data(lab[...,1], lab[...,2], z = lab[...,0], \
                        show = False, cieobs = cieobs, cspace = cspace, \
                        formatstr = 'ro', label = 'Original')
        plot_color_data(lab_est[...,1], lab_est[...,2], z = lab_est[...,0], \
                        show = True, axh = ax, cieobs = cieobs, cspace = cspace, \
                        formatstr = 'bd', label = 'Rendered')

    if out == 'rfl_est':
        return rfl_est
    elif out == 'rfl_est,xyz_est':
        return rfl_est, xyz_est
    else:
        return eval(out)
Esempio n. 14
0
def cri_ref(ccts, wl3 = None, ref_type = _CRI_REF_TYPE, mix_range = None, 
            cieobs = None, norm_type = None, norm_f = None, 
            force_daylight_below4000K = False, n = None,
            daylight_locus = None):
    """
    Calculates a reference illuminant spectrum based on cct 
    for color rendering index calculations .
    
    Args:
        :ccts: 
            | list of int/floats or ndarray with ccts.
        :wl3: 
            | None, optional
            | New wavelength range for interpolation. 
            | Defaults to wavelengths specified by luxpy._WL3.
        :ref_type:
            | str or list[str], optional
            | Specifies the type of reference spectrum to be calculated.
            | Defaults to luxpy._CRI_REF_TYPE. 
            | If :ref_type: is list of strings, then for each cct in :ccts: 
            | a different reference illuminant can be specified. 
            | If :ref_type: == 'spd', then :ccts: is assumed to be an ndarray
            | of reference illuminant spectra.
        :mix_range: 
            | None or ndarray, optional
            | Determines the cct range between which the reference illuminant is
            | a weigthed mean of a Planckian and Daylight Phase spectrum. 
            | Weighthing is done as described in IES TM30:
            |    SPDreference = (Te-T)/(Te-Tb)*Planckian+(T-Tb)/(Te-Tb)*daylight
            |    with Tb and Te are resp. the starting and end CCTs of the 
            |    mixing range and whereby the Planckian and Daylight SPDs 
            |    have been normalized for equal luminous flux.
            | If None: use the default specified for :ref_type:.
            | Can be a ndarray with shape[0] > 1, in which different mixing
            | ranges will be used for cct in :ccts:.
        :cieobs: 
            | None, optional
            | Required for the normalization of the Planckian and Daylight SPDs 
            | when calculating a 'mixed' reference illuminant.
            | Required when calculating daylightphase (adjust locus parameters to cieobs)
            | If None: _CIEOBS will be used.
        :norm_type: 
            | None, optional 
            |       - 'lambda': make lambda in norm_f equal to 1
            |       - 'area': area-normalization times norm_f
            |       - 'max': max-normalization times norm_f
            |       - 'ru': to :norm_f: radiometric units 
            |       - 'pu': to :norm_f: photometric units 
            |       - 'pusa': to :norm_f: photometric units (with Km corrected
            |                             to standard air, cfr. CIE TN003-2015)
            |       - 'qu': to :norm_f: quantal energy units
        :norm_f:
            | 1, optional
            | Normalization factor that determines the size of normalization 
            | for 'max' and 'area' 
            | or which wavelength is normalized to 1 for 'lambda' option.
        :force_daylight_below4000K: 
            | False or True, optional
            | Daylight locus approximation is not defined below 4000 K, 
            | but by setting this to True, the calculation can be forced to 
            | calculate it anyway.
        :n:
            | None, optional
            | Refractive index (for use in calculation of blackbody radiators).
            | If None: use the one stored in _BB['n']
        :daylight_locus:
            | None, optional
            | dict with xD(T) and yD(xD) parameters to calculate daylight locus 
            | for specified cieobs.
            | If None: use pre-calculated values.
            | If 'calc': calculate them on the fly.
    
    Returns:
        :returns: 
            | ndarray with reference illuminant spectra.
            | (:returns:[0] contains wavelengths)

    Note: 
        Future versions will have the ability to take a dict as input 
        for ref_type. This way other reference illuminants can be specified 
        than the ones in _CRI_REF_TYPES. 
    """
    if ref_type == 'spd':
        
        # ccts already contains spectrum of reference:
        return spd(ccts, wl = wl3, norm_type = norm_type, norm_f = norm_f)

    else:
        if mix_range is not None: mix_range = np2d(mix_range)

        if not (isinstance(ref_type,list) | isinstance(ref_type,dict)): ref_type = [ref_type]
   
        for i in range(len(ccts)):
            cct = ccts[i]

            # get ref_type and mix_range:
            if isinstance(ref_type,dict):
                raise Exception("cri_ref(): dictionary ref_type: Not yet implemented")
            else:

                ref_type_ = ref_type[i] if (len(ref_type)>1) else ref_type[0]

                if mix_range is None:
                    mix_range_ =  _CRI_REF_TYPES[ref_type_]

                else:
                    mix_range_ = mix_range[i] if (mix_range.shape[0]>1) else mix_range[0]  #must be np2d !!!            
      
            if (mix_range_[0] == mix_range_[1]) | (ref_type_[0:2] == 'BB') | (ref_type_[0:2] == 'DL'):
                if ((cct < mix_range_[0]) & (not (ref_type_[0:2] == 'DL'))) | (ref_type_[0:2] == 'BB'):
                    Sr = blackbody(cct, wl3, n = n)
                elif ((cct >= mix_range_[0]) & (not (ref_type_[0:2] == 'BB'))) | (ref_type_[0:2] == 'DL') :
                    Sr = daylightphase(cct,wl3,force_daylight_below4000K = force_daylight_below4000K, cieobs = cieobs, daylight_locus = daylight_locus)
            else:
                SrBB = blackbody(cct, wl3, n = n)
                SrDL = daylightphase(cct,wl3,verbosity = None,force_daylight_below4000K = force_daylight_below4000K, cieobs = cieobs, daylight_locus = daylight_locus)
                cieobs_ = _CIEOBS if cieobs is None else cieobs
                cmf = xyzbar(cieobs = cieobs_, scr = 'dict', wl_new = wl3)
                wl = SrBB[0]
                ld = getwld(wl)

                SrBB = 100.0*SrBB[1]/np.array(np.sum(SrBB[1]*cmf[2]*ld))
                SrDL = 100.0*SrDL[1]/np.array(np.sum(SrDL[1]*cmf[2]*ld))
                Tb, Te = float(mix_range_[0]), float(mix_range_[1])
                cBB, cDL = (Te-cct)/(Te-Tb), (cct-Tb)/(Te-Tb)
                if cBB < 0.0:
                    cBB = 0.0
                elif cBB > 1:
                    cBB = 1.0
                if cDL < 0.0:
                    cDL = 0.0
                elif cDL > 1:
                    cDL = 1.0

                Sr = SrBB*cBB + SrDL*cDL
                Sr[Sr==float('NaN')] = 0.0
                Sr560 = Sr[np.where(np.abs(wl - 560.0) == np.min(np.abs(wl - 560.0)))[0]]
                Sr = np.vstack((wl,(Sr/Sr560)))
                     
            if i == 0:
                Srs = Sr[1]
            else:
                Srs = np.vstack((Srs,Sr[1]))
                    
        Srs = np.vstack((Sr[0],Srs))

        return  spd(Srs, wl = None, norm_type = norm_type, norm_f = norm_f)