コード例 #1
0
def psy_scale(data,
              scale_factor=[1.0 / 55.0, 3.0 / 2.0, 2.0],
              scale_max=100.0):  # defaults for cri2012
    """
    Psychometric based color rendering index scale from CRI2012: 
    
    | Rfi,a = 100 * (2 / (exp(c1*abs(DEi,a)**(c2) + 1))) ** c3.
        
    Args:
        :data: 
            | float or list[floats] or ndarray 
        :scale_factor:
            | [1/55, 3/2, 2.0] or list[float] or ndarray, optional
            | Rescales color differences before subtracting them from :scale_max:
            | Note that the default value is the one from (Smet et al. 2013, LRT).
        :scale_max: 
            | 100.0, optional
            | Maximum value of linear scale
    
    Returns:
        :returns: 
            | float or list[floats] or ndarray
        
    References:
        1. `Smet, K., Schanda, J., Whitehead, L., & Luo, R. (2013). 
        CRI2012: A proposal for updating the CIE colour rendering index. 
        Lighting Research and Technology, 45, 689–709. 
        <http://lrt.sagepub.com/content/45/6/689>`_  
        
    """
    return scale_max * np.power(
        2.0 /
        (np.exp(scale_factor[0] * np.power(np.abs(data), scale_factor[1])) +
         1.0), scale_factor[2])
コード例 #2
0
ファイル: basics.py プロジェクト: hohlraum/luxpy
def geomean(data, axis = 0, keepdims = False):
    """
    Calculate geometric mean along axis.
    
    Args:
        :data:
            | list of values or ndarray
        :axis:
            | 0, optional
            | Axis along which to calculate geomean.
        :keepdims:
            | False or True, optional
            | Keep original dimensions of array.
    
    Returns:
        :returns:
            | ndarray with geomean values. 
    """
    data = np2d(data)
    return np.power(data.prod(axis=axis, keepdims = keepdims),1/data.shape[axis])
コード例 #3
0
ファイル: basics.py プロジェクト: hohlraum/luxpy
def rms(data,axis = 0, keepdims = False):
    """
    Calculate root-mean-square along axis.
    
    Args:
        :data: 
            | list of values or ndarray
        :axis:
            | 0, optional
            | Axis along which to calculate rms.
        :keepdims:
            | False or True, optional
            | Keep original dimensions of array.
    
    Returns:
        :returns:
            | ndarray with rms values.
    """
    data = np2d(data)
    return np.sqrt(np.power(data,2).mean(axis=axis, keepdims = keepdims))
コード例 #4
0
def spd_to_mcri(SPD, D=0.9, E=None, Yb=20.0, out='Rm', wl=None):
    """
    Calculates the MCRI or Memory Color Rendition Index, Rm
    
    Args: 
        :SPD: 
            | ndarray with spectral data (can be multiple SPDs, 
              first axis are the wavelengths)
        :D: 
            | 0.9, optional
            | Degree of adaptation.
        :E: 
            | None, optional
            | Illuminance in lux 
            |  (used to calculate La = (Yb/100)*(E/pi) to then calculate D 
            |  following the 'cat02' model). 
            | If None: the degree is determined by :D:
            |  If (:E: is not None) & (:Yb: is None):  :E: is assumed to contain 
               the adapting field luminance La (cd/m²).
        :Yb: 
            | 20.0, optional
            | Luminance factor of background. (used when calculating La from E)
            | If None, E contains La (cd/m²).
        :out: 
            | 'Rm' or str, optional
            | Specifies requested output (e.g. 'Rm,Rmi,cct,duv') 
        :wl: 
            | None, optional
            | Wavelengths (or [start, end, spacing]) to interpolate the SPDs to. 
            | None: default to no interpolation   
    
    Returns:
        :returns: 
            | float or ndarray with MCRI Rm for :out: 'Rm'
            | Other output is also possible by changing the :out: str value.        
          
    References:
        1. `K.A.G. Smet, W.R. Ryckaert, M.R. Pointer, G. Deconinck, P. Hanselaer,(2012)
        “A memory colour quality metric for white light sources,” 
        Energy Build., vol. 49, no. C, pp. 216–225.
        <http://www.sciencedirect.com/science/article/pii/S0378778812000837>`_
    """
    SPD = np2d(SPD)

    if wl is not None:
        SPD = spd(data=SPD, interpolation=_S_INTERP_TYPE, kind='np', wl=wl)

    # unpack metric default values:
    avg, catf, cieobs, cri_specific_pars, cspace, ref_type, rg_pars, sampleset, scale = [
        _MCRI_DEFAULTS[x] for x in sorted(_MCRI_DEFAULTS.keys())
    ]
    similarity_ai = cri_specific_pars['similarity_ai']
    Mxyz2lms = cspace['Mxyz2lms']
    scale_fcn = scale['fcn']
    scale_factor = scale['cfactor']
    sampleset = eval(sampleset)

    # A. calculate xyz:
    xyzti, xyztw = spd_to_xyz(SPD, cieobs=cieobs['xyz'], rfl=sampleset, out=2)
    if 'cct' in out.split(','):
        cct, duv = xyz_to_cct(xyztw,
                              cieobs=cieobs['cct'],
                              out='cct,duv',
                              mode='lut')

    # B. perform chromatic adaptation to adopted whitepoint of ipt color space, i.e. D65:
    if catf is not None:
        Dtype_cat, F, Yb_cat, catmode_cat, cattype_cat, mcat_cat, xyzw_cat = [
            catf[x] for x in sorted(catf.keys())
        ]

        # calculate degree of adaptationn D:
        if E is not None:
            if Yb is not None:
                La = (Yb / 100.0) * (E / np.pi)
            else:
                La = E
            D = cat.get_degree_of_adaptation(Dtype=Dtype_cat, F=F, La=La)
        else:
            Dtype_cat = None  # direct input of D

        if (E is None) and (D is None):
            D = 1.0  # set degree of adaptation to 1 !
        if D > 1.0: D = 1.0
        if D < 0.6: D = 0.6  # put a limit on the lowest D

        # apply cat:
        xyzti = cat.apply(xyzti,
                          cattype=cattype_cat,
                          catmode=catmode_cat,
                          xyzw1=xyztw,
                          xyzw0=None,
                          xyzw2=xyzw_cat,
                          D=D,
                          mcat=[mcat_cat],
                          Dtype=Dtype_cat)
        xyztw = cat.apply(xyztw,
                          cattype=cattype_cat,
                          catmode=catmode_cat,
                          xyzw1=xyztw,
                          xyzw0=None,
                          xyzw2=xyzw_cat,
                          D=D,
                          mcat=[mcat_cat],
                          Dtype=Dtype_cat)

    # C. convert xyz to ipt and split:
    ipt = xyz_to_ipt(
        xyzti, cieobs=cieobs['xyz'], M=Mxyz2lms
    )  #input matrix as published in Smet et al. 2012, Energy and Buildings
    I, P, T = asplit(ipt)

    # D. calculate specific (hue dependent) similarity indicators, Si:
    if len(xyzti.shape) == 3:
        ai = np.expand_dims(similarity_ai, axis=1)
    else:
        ai = similarity_ai
    a1, a2, a3, a4, a5 = asplit(ai)
    mahalanobis_d2 = (a3 * np.power((P - a1), 2.0) + a4 * np.power(
        (T - a2), 2.0) + 2.0 * a5 * (P - a1) * (T - a2))
    if (len(mahalanobis_d2.shape) == 3) & (mahalanobis_d2.shape[-1] == 1):
        mahalanobis_d2 = mahalanobis_d2[:, :, 0].T
    Si = np.exp(-0.5 * mahalanobis_d2)

    # E. calculate general similarity indicator, Sa:
    Sa = avg(Si, axis=0, keepdims=True)

    # F. rescale similarity indicators (Si, Sa) with a 0-1 scale to memory color rendition indices (Rmi, Rm) with a 0 - 100 scale:
    Rmi = scale_fcn(np.log(Si), scale_factor=scale_factor)
    Rm = np2d(scale_fcn(np.log(Sa), scale_factor=scale_factor))

    # G. calculate Rg (polyarea of test / polyarea of memory colours):
    if 'Rg' in out.split(','):
        I = I[
            ...,
            None]  #broadcast_shape(I, target_shape = None,expand_2d_to_3d = 0)
        a1 = a1[:, None] * np.ones(
            I.shape
        )  #broadcast_shape(a1, target_shape = None,expand_2d_to_3d = 0)
        a2 = a2[:, None] * np.ones(
            I.shape
        )  #broadcast_shape(a2, target_shape = None,expand_2d_to_3d = 0)
        a12 = np.concatenate(
            (a1, a2), axis=2
        )  #broadcast_shape(np.hstack((a1,a2)), target_shape = ipt.shape,expand_2d_to_3d = 0)
        ipt_mc = np.concatenate((I, a12), axis=2)
        nhbins, normalize_gamut, normalized_chroma_ref, start_hue = [
            rg_pars[x] for x in sorted(rg_pars.keys())
        ]

        Rg = jab_to_rg(ipt,
                       ipt_mc,
                       ordered_and_sliced=False,
                       nhbins=nhbins,
                       start_hue=start_hue,
                       normalize_gamut=normalize_gamut)

    if (out != 'Rm'):
        return eval(out)
    else:
        return Rm
コード例 #5
0
ファイル: cct.py プロジェクト: insurgentmedtech/luxpy
def xyz_to_cct_ohno(xyzw,
                    cieobs=_CIEOBS,
                    out='cct',
                    wl=None,
                    accuracy=0.1,
                    force_out_of_lut=True,
                    upper_cct_max=10.0**20,
                    approx_cct_temp=True):
    """
    Convert XYZ tristimulus values to correlated color temperature (CCT) and 
    Duv (distance above (>0) or below (<0) the Planckian locus) 
    using Ohno's method. 
    
    Args:
        :xyzw: 
            | ndarray of tristimulus values
        :cieobs: 
            | luxpy._CIEOBS, optional
            | CMF set used to calculated xyzw.
        :out: 
            | 'cct' (or 1), optional
            | Determines what to return.
            | Other options: 'duv' (or -1), 'cct,duv'(or 2), "[cct,duv]" (or -2)
        :wl: 
            | None, optional
            | Wavelengths used when calculating Planckian radiators.
        :accuracy: 
            | float, optional
            | Stop brute-force search when cct :accuracy: is reached.
        :upper_cct_max: 
            | 10.0**20, optional
            | Limit brute-force search to this cct.
        :approx_cct_temp: 
            | True, optional
            | If True: use xyz_to_cct_HA() to get a first estimate of cct 
              to speed up search.
        :force_out_of_lut: 
            | True, optional
            | If True and cct is out of range of the LUT, then switch to 
              brute-force search method, else return numpy.nan values.
        
    Returns:
        :returns: 
            | ndarray with:
            |    cct: out == 'cct' (or 1)
            |    duv: out == 'duv' (or -1)
            |    cct, duv: out == 'cct,duv' (or 2)
            |    [cct,duv]: out == "[cct,duv]" (or -2) 
            
    Note:
        LUTs are stored in ./data/cctluts/
        
    Reference:
        1. `Ohno Y. Practical use and calculation of CCT and Duv. 
        Leukos. 2014 Jan 2;10(1):47-55.
        <http://www.tandfonline.com/doi/abs/10.1080/15502724.2014.839020>`_
    """

    xyzw = np2d(xyzw)

    if len(xyzw.shape) > 2:
        raise Exception('xyz_to_cct_ohno(): Input xyzwa.ndim must be <= 2 !')

    # get 1960 u,v of test source:
    Yuv = xyz_to_Yuv(
        xyzw)  # remove possible 1-dim + convert xyzw to CIE 1976 u',v'
    axis_of_v3 = len(Yuv.shape) - 1  # axis containing color components
    u = Yuv[:, 1, None]  # get CIE 1960 u
    v = (2.0 / 3.0) * Yuv[:, 2, None]  # get CIE 1960 v

    uv = np2d(np.concatenate((u, v), axis=axis_of_v3))

    # load cct & uv from LUT:
    if cieobs not in _CCT_LUT:
        _CCT_LUT[cieobs] = calculate_lut(ccts=None,
                                         cieobs=cieobs,
                                         add_to_lut=False)
    cct_LUT = _CCT_LUT[cieobs][:, 0, None]
    uv_LUT = _CCT_LUT[cieobs][:, 1:3]

    # calculate CCT of each uv:
    CCT = np.ones(uv.shape[0]) * np.nan  # initialize with NaN's
    Duv = CCT.copy()  # initialize with NaN's
    idx_m = 0
    idx_M = uv_LUT.shape[0] - 1
    for i in range(uv.shape[0]):
        out_of_lut = False
        delta_uv = (((uv_LUT - uv[i])**2.0).sum(
            axis=1))**0.5  # calculate distance of uv with uv_LUT
        idx_min = delta_uv.argmin()  # find index of minimum distance

        # find Tm, delta_uv and u,v for 2 points surrounding uv corresponding to idx_min:
        if idx_min == idx_m:
            idx_min_m1 = idx_min
            out_of_lut = True
        else:
            idx_min_m1 = idx_min - 1
        if idx_min == idx_M:
            idx_min_p1 = idx_min
            out_of_lut = True
        else:
            idx_min_p1 = idx_min + 1

        if (out_of_lut == True) & (force_out_of_lut
                                   == True):  # calculate using search-function
            cct_i, Duv_i = xyz_to_cct_search(xyzw[i],
                                             cieobs=cieobs,
                                             wl=wl,
                                             accuracy=accuracy,
                                             out='cct,duv',
                                             upper_cct_max=upper_cct_max,
                                             approx_cct_temp=approx_cct_temp)
            CCT[i] = cct_i
            Duv[i] = Duv_i
            continue
        elif (out_of_lut == True) & (force_out_of_lut == False):
            CCT[i] = np.nan
            Duv[i] = np.nan

        cct_m1 = cct_LUT[idx_min_m1]  # - 2*_EPS
        delta_uv_m1 = delta_uv[idx_min_m1]
        uv_m1 = uv_LUT[idx_min_m1]
        cct_p1 = cct_LUT[idx_min_p1]
        delta_uv_p1 = delta_uv[idx_min_p1]
        uv_p1 = uv_LUT[idx_min_p1]

        cct_0 = cct_LUT[idx_min]
        delta_uv_0 = delta_uv[idx_min]

        # calculate uv distance between Tm_m1 & Tm_p1:
        delta_uv_p1m1 = ((uv_p1[0] - uv_m1[0])**2.0 +
                         (uv_p1[1] - uv_m1[1])**2.0)**0.5

        # Triangular solution:
        x = ((delta_uv_m1**2) - (delta_uv_p1**2) +
             (delta_uv_p1m1**2)) / (2 * delta_uv_p1m1)
        Tx = cct_m1 + ((cct_p1 - cct_m1) * (x / delta_uv_p1m1))
        #uBB = uv_m1[0] + (uv_p1[0] - uv_m1[0]) * (x / delta_uv_p1m1)
        vBB = uv_m1[1] + (uv_p1[1] - uv_m1[1]) * (x / delta_uv_p1m1)

        Tx_corrected_triangular = Tx * 0.99991
        signDuv = np.sign(uv[i][1] - vBB)
        Duv_triangular = signDuv * np.atleast_1d(
            ((delta_uv_m1**2.0) - (x**2.0))**0.5)

        # Parabolic solution:
        a = delta_uv_m1 / (cct_m1 - cct_0 + _EPS) / (cct_m1 - cct_p1 + _EPS)
        b = delta_uv_0 / (cct_0 - cct_m1 + _EPS) / (cct_0 - cct_p1 + _EPS)
        c = delta_uv_p1 / (cct_p1 - cct_0 + _EPS) / (cct_p1 - cct_m1 + _EPS)
        A = a + b + c
        B = -(a * (cct_p1 + cct_0) + b * (cct_p1 + cct_m1) + c *
              (cct_0 + cct_m1))
        C = (a * cct_p1 * cct_0) + (b * cct_p1 * cct_m1) + (c * cct_0 * cct_m1)
        Tx = -B / (2 * A + _EPS)
        Tx_corrected_parabolic = Tx * 0.99991
        Duv_parabolic = signDuv * (A * np.power(Tx_corrected_parabolic, 2) +
                                   B * Tx_corrected_parabolic + C)

        Threshold = 0.002
        if Duv_triangular < Threshold:
            CCT[i] = Tx_corrected_triangular
            Duv[i] = Duv_triangular
        else:
            CCT[i] = Tx_corrected_parabolic
            Duv[i] = Duv_parabolic

    # Regulate output:
    if (out == 'cct') | (out == 1):
        return np2dT(CCT)
    elif (out == 'duv') | (out == -1):
        return np2dT(Duv)
    elif (out == 'cct,duv') | (out == 2):
        return np2dT(CCT), np2dT(Duv)
    elif (out == "[cct,duv]") | (out == -2):
        return np.vstack((CCT, Duv)).T
コード例 #6
0
ファイル: cct.py プロジェクト: insurgentmedtech/luxpy
def xyz_to_cct_search(xyzw,
                      cieobs=_CIEOBS,
                      out='cct',
                      wl=None,
                      accuracy=0.1,
                      upper_cct_max=10.0**20,
                      approx_cct_temp=True):
    """
    Convert XYZ tristimulus values to correlated color temperature (CCT) and 
    Duv(distance above (> 0) or below ( < 0) the Planckian locus) by a 
    brute-force search. 

    | The algorithm uses an approximate cct_temp (HA approx., see xyz_to_cct_HA) 
      as starting point or uses the middle of the allowed cct-range 
      (1e2 K - 1e20 K, higher causes overflow) on a log-scale, then constructs 
      a 4-step section of the blackbody (Planckian) locus on which to find the
      minimum distance to the 1960 uv chromaticity of the test source.

    Args:
        :xyzw: 
            | ndarray of tristimulus values
        :cieobs: 
            | luxpy._CIEOBS, optional
            | CMF set used to calculated xyzw.
        :out: 
            | 'cct' (or 1), optional
            | Determines what to return.
            | Other options: 'duv' (or -1), 'cct,duv'(or 2), "[cct,duv]" (or -2)
        :wl: 
            | None, optional
            | Wavelengths used when calculating Planckian radiators.
        :accuracy: 
            | float, optional
            | Stop brute-force search when cct :accuracy: is reached.
        :upper_cct_max: 
            | 10.0**20, optional
            | Limit brute-force search to this cct.
        :approx_cct_temp: 
            | True, optional
            | If True: use xyz_to_cct_HA() to get a first estimate of cct to 
              speed up search.

    Returns:
        :returns: 
            | ndarray with:
            |    cct: out == 'cct' (or 1)
            |    duv: out == 'duv' (or -1)
            |    cct, duv: out == 'cct,duv' (or 2)
            |    [cct,duv]: out == "[cct,duv]" (or -2) 
    
    Notes:
        This program is more accurate, but slower than xyz_to_cct_ohno!
        Note that cct must be between 1e3 K - 1e20 K 
        (very large cct take a long time!!!)
    """

    xyzw = np2d(xyzw)

    if len(xyzw.shape) > 2:
        raise Exception('xyz_to_cct_search(): Input xyzw.shape must be <= 2 !')

    # get 1960 u,v of test source:
    Yuvt = xyz_to_Yuv(np.squeeze(
        xyzw))  # remove possible 1-dim + convert xyzw to CIE 1976 u',v'
    #axis_of_v3t = len(Yuvt.shape)-1 # axis containing color components
    ut = Yuvt[:, 1, None]  #.take([1],axis = axis_of_v3t) # get CIE 1960 u
    vt = (2 / 3) * Yuvt[:, 2,
                        None]  #.take([2],axis = axis_of_v3t) # get CIE 1960 v

    # Initialize arrays:
    ccts = np.ones((xyzw.shape[0], 1)) * np.nan
    duvs = ccts.copy()

    #calculate preliminary solution(s):
    if (approx_cct_temp == True):
        ccts_est = xyz_to_cct_HA(xyzw)
        procent_estimates = np.array([[3000.0, 100000.0, 0.05],
                                      [100000.0, 200000.0, 0.1],
                                      [200000.0, 300000.0, 0.25],
                                      [300000.0, 400000.0, 0.4],
                                      [400000.0, 600000.0, 0.4],
                                      [600000.0, 800000.0, 0.4],
                                      [800000.0, np.inf, 0.25]])
    else:
        upper_cct = np.array(upper_cct_max)
        lower_cct = np.array(10.0**2)
        cct_scale_fun = lambda x: np.log10(x)
        cct_scale_ifun = lambda x: np.power(10.0, x)
        dT = (cct_scale_fun(upper_cct) - cct_scale_fun(lower_cct)) / 2
        ccttemp = np.array([cct_scale_ifun(cct_scale_fun(lower_cct) + dT)])
        ccts_est = np2d(ccttemp * np.ones((xyzw.shape[0], 1)))
        dT_approx_cct_False = dT.copy()

    # Loop through all ccts:
    for i in range(xyzw.shape[0]):

        #initialize CCT search parameters:
        cct = np.nan
        duv = np.nan
        ccttemp = ccts_est[i].copy()

        # Take care of (-1, NaN)'s from xyz_to_cct_HA signifying (CCT < lower, CCT > upper) bounds:
        approx_cct_temp_temp = approx_cct_temp
        if (approx_cct_temp == True):
            cct_scale_fun = lambda x: x
            cct_scale_ifun = lambda x: x
            if (ccttemp != -1) & (
                    np.isnan(ccttemp) == False
            ):  # within validity range of CCT estimator-function
                for ii in range(procent_estimates.shape[0]):
                    if (ccttemp >=
                        (1.0 - 0.05 *
                         (ii == 0)) * procent_estimates[ii, 0]) & (
                             ccttemp < (1.0 + 0.05 *
                                        (ii == 0)) * procent_estimates[ii, 1]):
                        procent_estimate = procent_estimates[ii, 2]
                        break

                dT = np.multiply(
                    ccttemp, procent_estimate
                )  # determines range around CCTtemp (25% around estimate) or 100 K
            elif (ccttemp == -1) & (np.isnan(ccttemp) == False):
                ccttemp = np.array([procent_estimates[0, 0] / 2])
                procent_estimate = 1  # cover 0 K to min_CCT of estimator
                dT = np.multiply(ccttemp, procent_estimate)
            elif (np.isnan(ccttemp) == True):
                upper_cct = np.array(upper_cct_max)
                lower_cct = np.array(10.0**2)
                cct_scale_fun = lambda x: np.log10(x)
                cct_scale_ifun = lambda x: np.power(10.0, x)
                dT = (cct_scale_fun(upper_cct) - cct_scale_fun(lower_cct)) / 2
                ccttemp = np.array(
                    [cct_scale_ifun(cct_scale_fun(lower_cct) + dT)])
                approx_cct_temp = False
        else:
            dT = dT_approx_cct_False

        nsteps = 3
        signduv = 1.0
        ccttemp = ccttemp[0]
        delta_cct = dT
        while ((delta_cct > accuracy)):  # keep converging on CCT

            #generate range of ccts:
            ccts_i = cct_scale_ifun(
                np.linspace(
                    cct_scale_fun(ccttemp) - dT,
                    cct_scale_fun(ccttemp) + dT, nsteps + 1))

            ccts_i[ccts_i < 100.0] = 100.0  # avoid nan's in calculation

            # Generate BB:
            BB = cri_ref(ccts_i, wl3=wl, ref_type=['BB'], cieobs=cieobs)

            # Calculate xyz:
            xyz = spd_to_xyz(BB, cieobs=cieobs)

            # Convert to CIE 1960 u,v:
            Yuv = xyz_to_Yuv(np.squeeze(
                xyz))  # remove possible 1-dim + convert xyz to CIE 1976 u',v'
            #axis_of_v3 = len(Yuv.shape)-1 # axis containing color components
            u = Yuv[:, 1, None]  # get CIE 1960 u
            v = (2.0 / 3.0) * Yuv[:, 2, None]  # get CIE 1960 v

            # Calculate distance between list of uv's and uv of test source:
            dc = ((ut[i] - u)**2 + (vt[i] - v)**2)**0.5
            if np.isnan(dc.min()) == False:
                #eps = _EPS
                q = dc.argmin()

                if np.size(
                        q
                ) > 1:  #to minimize calculation time: only calculate median when necessary
                    cct = np.median(ccts[q])
                    duv = np.median(dc[q])
                    q = np.median(q)
                    q = int(q)  #must be able to serve as index

                else:
                    cct = ccts_i[q]
                    duv = dc[q]

                if (q == 0):
                    ccttemp = cct_scale_ifun(
                        np.array(cct_scale_fun([cct])) + 2 * dT / nsteps)
                    #dT = 2.0*dT/nsteps
                    continue  # look in higher section of planckian locus

                if (q == np.size(ccts_i)):
                    ccttemp = cct_scale_ifun(
                        np.array(cct_scale_fun([cct])) - 2 * dT / nsteps)
                    #dT = 2.0*dT/nsteps
                    continue  # look in lower section of planckian locus

                if (q > 0) & (q < np.size(ccts_i) - 1):
                    dT = 2 * dT / nsteps
                    # get Duv sign:
                    d_p1m1 = ((u[q + 1] - u[q - 1])**2.0 +
                              (v[q + 1] - v[q - 1])**2.0)**0.5

                    x = (dc[q - 1]**2.0 - dc[q + 1]**2.0 +
                         d_p1m1**2.0) / 2.0 * d_p1m1
                    vBB = v[q - 1] + ((v[q + 1] - v[q - 1]) * (x / d_p1m1))
                    signduv = np.sign(vt[i] - vBB)

                #calculate difference with previous intermediate solution:
                delta_cct = abs(cct - ccttemp)

                ccttemp = np.array(cct)  #%set new intermediate CCT
                approx_cct_temp = approx_cct_temp_temp
            else:
                ccttemp = np.nan
                cct = np.nan
                duv = np.nan

        duvs[i] = signduv * abs(duv)
        ccts[i] = cct

    # Regulate output:
    if (out == 'cct') | (out == 1):
        return np2d(ccts)
    elif (out == 'duv') | (out == -1):
        return np2d(duvs)
    elif (out == 'cct,duv') | (out == 2):
        return np2d(ccts), np2d(duvs)
    elif (out == "[cct,duv]") | (out == -2):
        return np.vstack((ccts, duvs)).T
コード例 #7
0
def spd_to_cqs(SPD, version='v9.0', out='Qa', wl=None):
    """
    Calculates CQS Qa (Qai) or Qf (Qfi) or Qp (Qpi) for versions v9.0 or v7.5.
    
    Args:
        :SPD: 
            | ndarray with spectral data (can be multiple SPDs, 
              first axis are the wavelengths)
        :version: 
            | 'v9.0' or 'v7.5', optional
        :out: 
            | 'Qa' or str, optional
            | Specifies requested output (e.g. 'Qa,Qai,Qf,cct,duv') 
        :wl: 
            | None, optional
            | Wavelengths (or [start, end, spacing]) to interpolate the SPDs to. 
            | None: default to no interpolation   
    
    Returns:
        :returns:
            | float or ndarray with CQS Qa for :out: 'Qa'
            | Other output is also possible by changing the :out: str value. 
    
    References:
        1. `W. Davis and Y. Ohno, 
        “Color quality scale,” (2010), 
        Opt. Eng., vol. 49, no. 3, pp. 33602–33616.
        <http://spie.org/Publications/Journal/10.1117/1.3360335>`_
    
    """
    outlist = out.split()
    if isinstance(version, str):
        cri_type = 'cqs-' + version
    elif isinstance(version, dict):
        cri_type = version

    # calculate DEI, labti, labri and get cspace_pars and rg_pars:
    DEi, labti, labri, cct, duv, cri_type = spd_to_DEi(
        SPD, cri_type=cri_type, out='DEi,jabt,jabr,cct,duv,cri_type', wl=wl)

    # further unpack cri_type:
    scale_fcn = cri_type['scale']['fcn']
    scale_factor = cri_type['scale']['cfactor']
    avg = cri_type['avg']
    cri_specific_pars = cri_type['cri_specific_pars']
    rg_pars = cri_type['rg_pars']

    # get maxC: to limit chroma-enhancement:
    maxC = cri_specific_pars['maxC']

    # make 3d:
    test_original_shape = labti.shape
    if len(test_original_shape) < 3:
        labti = labti[:, None]
        labri = labri[:, None]
        DEi = DEi[:, None]
        cct = cct[:, None]

    # calculate Rg for each spd:
    Qf = np.zeros((1, labti.shape[1]))
    Qfi = np.zeros((labti.shape[0], labti.shape[1]))

    if version == 'v7.5':
        GA = (9.2672 * (1.0e-11)) * cct**3.0 - (
            8.3959 * (1.0e-7)) * cct**2.0 + 0.00255 * cct - 1.612
    elif version == 'v9.0':
        GA = np.ones(cct.shape)
    else:
        raise Exception('.cri.spd_to_cqs(): Unrecognized CQS version.')

    if ('Qf' in outlist) | ('Qfi' in outlist):

        # loop of light source spds
        for ii in range(labti.shape[1]):
            Qfi[:, ii] = GA[ii] * scale_fcn(DEi[:, ii], [scale_factor[0]])
            Qf[:, ii] = GA[ii] * scale_fcn(avg(DEi[:, ii, None], axis=0),
                                           [scale_factor[0]])

    if ('Qa' in outlist) | ('Qai' in outlist) | ('Qp' in outlist) | (
            'Qpi' in outlist):

        Qa = Qf.copy()
        Qai = Qfi.copy()
        Qp = Qf.copy()
        Qpi = Qfi.copy()

        # loop of light source spds
        for ii in range(labti.shape[1]):

            # calculate deltaC:
            deltaC = np.sqrt(
                np.power(labti[:, ii, 1:3], 2).sum(
                    axis=1, keepdims=True)) - np.sqrt(
                        np.power(labri[:, ii, 1:3], 2).sum(axis=1,
                                                           keepdims=True))
            # limit chroma increase:
            DEi_Climited = DEi[:, ii, None].copy()
            deltaC_Climited = deltaC.copy()
            if maxC is None:
                maxC = 10000.0
            limitC = np.where(deltaC >= maxC)[0]
            deltaC_Climited[limitC] = maxC
            p_deltaC_pos = np.where(deltaC > 0.0)[0]
            DEi_Climited[p_deltaC_pos] = np.sqrt(
                DEi_Climited[p_deltaC_pos]**2.0 - deltaC_Climited[p_deltaC_pos]
                **2.0)  # increase in chroma is not penalized!

            if ('Qa' in outlist) | ('Qai' in outlist):
                Qai[:, ii,
                    None] = GA[ii] * scale_fcn(DEi_Climited, [scale_factor[1]])
                Qa[:, ii] = GA[ii] * scale_fcn(avg(DEi_Climited, axis=0),
                                               [scale_factor[1]])

            if ('Qp' in outlist) | ('Qpi' in outlist):
                deltaC_pos = deltaC_Climited * (deltaC_Climited >= 0.0)
                deltaCmu = np.mean(deltaC_Climited * (deltaC_Climited >= 0.0))
                Qpi[:, ii, None] = GA[ii] * scale_fcn(
                    (DEi_Climited - deltaC_pos),
                    [scale_factor[2]
                     ])  # or ?? np.sqrt(DEi_Climited**2 - deltaC_pos**2) ??
                Qp[:, ii] = GA[ii] * scale_fcn(
                    (avg(DEi_Climited, axis=0) - deltaCmu), [scale_factor[2]])

    if ('Qg' in outlist):
        Qg = Qf.copy()
        for ii in range(labti.shape[1]):
            Qg[:, ii] = 100.0 * math.polyarea(
                labti[:, ii, 1], labti[:, ii, 2]) / math.polyarea(
                    labri[:, ii, 1], labri[:, ii, 2]
                )  # calculate Rg =  gamut area ratio of test and ref

    if out == 'Qa':
        return Qa
    else:
        return eval(out)
コード例 #8
0
        ((_hr - start_hue * np.pi / 180) / 2 / np.pi) *
        nhbins)  # because of start_hue bin range can be different from 0 : n-1
    _hbins[_hbins >= _nhbins] = _hbins[
        _hbins >= _nhbins] - _nhbins  # reset binnumbers to 0 : n-1 range
    _hbins[_hbins < 0] = (
        _nhbins - 2) - _hbins[_hbins < 0]  # reset binnumbers to 0 : n-1 range

    _jabtii = np.zeros((_nhbins, 3))
    _jabrii = np.zeros((_nhbins, 3))
    for i in range(nhbins):
        if i in _hbins:
            _jabtii[i, :] = _jab_test[_hbins == i, ii, :].mean(axis=0)
            _jabrii[i, :] = _jab_ref[_hbins == i, ii, :].mean(axis=0)
            _DEi[i, ii] = np.sqrt(
                np.power((_jab_test[_hbins == i, ii, :] -
                          _jab_ref[_hbins == i, ii, :]),
                         2).sum(axis=_jab_test[_hbins == i, ii, :].ndim -
                                1)).mean(axis=0)
    _jabt_hj = _jabtii.copy()
    _jabr_hj = _jabrii.copy()

    if normalize_gamut == True:

        #renormalize jab_test, jab_ref using jabrii:
        # if ('jabti' in out) | ('jabri' in out):
        _Cti = np.sqrt(_jab_test[:, ii, 1]**2 + _jab_test[:, ii, 2]**2)
        _Cri = np.sqrt(_jab_ref[:, ii, 1]**2 + _jab_ref[:, ii, 2]**2)
        _hti = _ht.copy()
        _hri = _hr.copy()

        #renormalize jabtii using jabrii:
        _Ct_hj = np.sqrt(_jabtii[:, 1]**2 + _jabtii[:, 2]**2)