コード例 #1
0
def add_to_cmf_dict(bar=None, cieobs='indv', K=683, M=np.eye(3)):
    """
    Add set of cmfs to _CMF dict.
    
    Args:
        :bar: 
            | None, optional
            | Set of CMFs. None: initializes to empty ndarray.
        :cieobs:
            | 'indv' or str, optional
            | Name of CMF set.
        :K: 
            | 683 (lm/W), optional
            | Conversion factor from radiometric to photometric quantity.
        :M: 
            | np.eye, optional
            | Matrix for lms to xyz conversion.

    """
    if bar is None:
        wl3 = getwlr(_WL3)
        bar = np.vstack((wl3, np.empty((3, wl3.shape[0]))))
    _CMF['types'].append(cieobs)
    _CMF[cieobs] = {'bar': bar}
    _CMF[cieobs]['K'] = K
    _CMF[cieobs]['M'] = M
コード例 #2
0
def Vrb_mb_to_xyz(Vrb,
                  cieobs=_CIEOBS,
                  scaling=[1, 1],
                  M=None,
                  Minverted=False,
                  **kwargs):
    """
    Convert V,r,b (Macleod-Boynton) color coordinates to XYZ tristimulus values.

    | Macleod Boynton: V = R+G, r = R/V, b = B/V
    | Note that R,G,B ~ L,M,S

    Args:
        :Vrb: 
            | ndarray with V,r,b (Macleod-Boynton) color coordinates
        :cieobs:
            | luxpy._CIEOBS, optional
            | CMF set to use when getting the default M, which is
            | the xyz to lms conversion matrix.
        :scaling:
            | list of scaling factors for r and b dimensions.
        :M: 
            | None, optional
            | Conversion matrix for going from XYZ to RGB (LMS)
            |   If None, :cieobs: determines the M (function does inversion)
        :Minverted:
            | False, optional
            | Bool that determines whether M should be inverted.

    Returns:
        :xyz: 
            | ndarray with tristimulus values

    Reference:
        1. `MacLeod DI, and Boynton RM (1979).
           Chromaticity diagram showing cone excitation by stimuli of equal luminance.
           J. Opt. Soc. Am. 69, 1183–1186.
           <https://www.osapublishing.org/josa/abstract.cfm?uri=josa-69-8-1183>`_
    """
    Vrb = np2d(Vrb)
    RGB = np.empty(Vrb.shape)
    RGB[..., 0] = Vrb[..., 1] * Vrb[..., 0] / scaling[0]
    RGB[..., 2] = Vrb[..., 2] * Vrb[..., 0] / scaling[1]
    RGB[..., 1] = Vrb[..., 0] - RGB[..., 0]
    if M is None:
        M = _CMF[cieobs]['M']
    if Minverted == False:
        M = np.linalg.inv(M)

    if len(RGB.shape) == 3:
        return np.einsum('ij,klj->kli', M, RGB)
    else:
        return np.einsum('ij,lj->li', M, RGB)
コード例 #3
0
def xyz_to_Vrb_mb(xyz, cieobs=_CIEOBS, scaling=[1, 1], M=None, **kwargs):
    """
    Convert XYZ tristimulus values to V,r,b (Macleod-Boynton) color coordinates.

    | Macleod Boynton: V = R+G, r = R/V, b = B/V
    | Note that R,G,B ~ L,M,S

    Args:
        :xyz: 
            | ndarray with tristimulus values
        :cieobs: 
            | luxpy._CIEOBS, optional
            | CMF set to use when getting the default M, which is
              the xyz to lms conversion matrix.
        :scaling:
            | list of scaling factors for r and b dimensions.
        :M: 
            | None, optional
            | Conversion matrix for going from XYZ to RGB (LMS)
            |   If None, :cieobs: determines the M (function does inversion)

    Returns:
        :Vrb: 
            | ndarray with V,r,b (Macleod-Boynton) color coordinates

    Reference:
        1. `MacLeod DI, and Boynton RM (1979).
           Chromaticity diagram showing cone excitation by stimuli of equal luminance.
           J. Opt. Soc. Am. 69, 1183–1186.
           <https://www.osapublishing.org/josa/abstract.cfm?uri=josa-69-8-1183>`_
    """
    xyz = np2d(xyz)

    if M is None:
        M = _CMF[cieobs]['M']

    if len(xyz.shape) == 3:
        RGB = np.einsum('ij,klj->kli', M, xyz)
    else:
        RGB = np.einsum('ij,lj->li', M, xyz)
    Vrb = np.empty(xyz.shape)
    Vrb[..., 0] = RGB[..., 0] + RGB[..., 1]
    Vrb[..., 1] = RGB[..., 0] / Vrb[..., 0] * scaling[0]
    Vrb[..., 2] = RGB[..., 2] / Vrb[..., 0] * scaling[1]
    return Vrb
コード例 #4
0
def Yxy_to_xyz(Yxy, **kwargs):
    """
    Convert CIE Yxy chromaticity values to XYZ tristimulus values.

    Args:
        :Yxy: 
            | ndarray with Yxy chromaticity values
            |  (Y value refers to luminance or luminance factor)

    Returns:
        :xyz: 
            | ndarray with tristimulus values
    """
    Yxy = np2d(Yxy)
    xyz = np.empty(Yxy.shape)
    xyz[..., 1] = Yxy[..., 0]
    xyz[..., 0] = Yxy[..., 0] * Yxy[..., 1] / Yxy[..., 2]
    xyz[..., 2] = Yxy[..., 0] * (1.0 - Yxy[..., 1] - Yxy[..., 2]) / Yxy[..., 2]
    return xyz
コード例 #5
0
def xyz_to_lab(xyz, xyzw=None, cieobs=_CIEOBS, **kwargs):
    """
    Convert XYZ tristimulus values to CIE 1976 L*a*b* (CIELAB) coordinates.

    Args:
        :xyz: 
            | ndarray with tristimulus values
        :xyzw:
            | None or ndarray with tristimulus values of white point, optional
            | None defaults to xyz of CIE D65 using the :cieobs: observer.
        :cieobs:
            | luxpy._CIEOBS, optional
            | CMF set to use when calculating xyzw.

    Returns:
        :lab: 
            | ndarray with CIE 1976 L*a*b* (CIELAB) color coordinates
    """
    xyz = np2d(xyz)

    if xyzw is None:
        xyzw = spd_to_xyz(_CIE_ILLUMINANTS['D65'], cieobs=cieobs)

    # get and normalize (X,Y,Z) to white point:
    XYZr = xyz / xyzw

    # Apply cube-root compression:
    fXYZr = XYZr**(1.0 / 3.0)

    # Check for T/Tn <= 0.008856: (Note (24/116)**3 = 0.008856)
    pqr = XYZr <= (24 / 116)**3

    # calculate f(T) for T/Tn <= 0.008856: (Note:(1/3)*((116/24)**2) = 841/108 = 7.787)
    fXYZr[pqr] = ((841 / 108) * XYZr[pqr] + 16.0 / 116.0)

    # calculate L*, a*, b*:
    Lab = np.empty(xyz.shape)
    Lab[..., 0] = 116.0 * (fXYZr[..., 1]) - 16.0
    Lab[pqr[..., 1], 0] = 841 / 108 * 116 * XYZr[pqr[..., 1], 1]
    Lab[..., 1] = 500.0 * (fXYZr[..., 0] - fXYZr[..., 1])
    Lab[..., 2] = 200.0 * (fXYZr[..., 1] - fXYZr[..., 2])
    return Lab
コード例 #6
0
def Yuv_to_xyz(Yuv, **kwargs):
    """
    Convert CIE 1976 Yu'v' chromaticity values to XYZ tristimulus values.

    Args:
        :Yuv: 
            | ndarray with CIE 1976 Yu'v' chromaticity values
            |  (Y value refers to luminance or luminance factor)

    Returns:
        :xyz: 
            | ndarray with tristimulus values
    """
    Yuv = np2d(Yuv)
    xyz = np.empty(Yuv.shape)
    xyz[..., 1] = Yuv[..., 0]
    xyz[..., 0] = Yuv[..., 0] * (9.0 * Yuv[..., 1]) / (4.0 * Yuv[..., 2])
    xyz[..., 2] = Yuv[..., 0] * (12.0 - 3.0 * Yuv[..., 1] -
                                 20.0 * Yuv[..., 2]) / (4.0 * Yuv[..., 2])
    return xyz
コード例 #7
0
def xyz_to_Yuv(xyz, **kwargs):
    """
    Convert XYZ tristimulus values CIE 1976 Yu'v' chromaticity values.

    Args:
        :xyz: 
            | ndarray with tristimulus values

    Returns:
        :Yuv: 
            | ndarray with CIE 1976 Yu'v' chromaticity values
            |  (Y value refers to luminance or luminance factor)
    """
    xyz = np2d(xyz)
    Yuv = np.empty(xyz.shape)
    denom = xyz[..., 0] + 15.0 * xyz[..., 1] + 3.0 * xyz[..., 2]
    Yuv[..., 0] = xyz[..., 1]
    Yuv[..., 1] = 4.0 * xyz[..., 0] / denom
    Yuv[..., 2] = 9.0 * xyz[..., 1] / denom
    return Yuv
コード例 #8
0
def xyz_to_Yxy(xyz, **kwargs):
    """
    Convert XYZ tristimulus values CIE Yxy chromaticity values.

    Args:
        :xyz: 
            | ndarray with tristimulus values

    Returns:
        :Yxy: 
            | ndarray with Yxy chromaticity values
            |  (Y value refers to luminance or luminance factor)
    """
    xyz = np2d(xyz)
    Yxy = np.empty(xyz.shape)
    sumxyz = xyz[..., 0] + xyz[..., 1] + xyz[..., 2]
    Yxy[..., 0] = xyz[..., 1]
    Yxy[..., 1] = xyz[..., 0] / sumxyz
    Yxy[..., 2] = xyz[..., 1] / sumxyz
    return Yxy
コード例 #9
0
def lab_to_xyz(lab, xyzw=None, cieobs=_CIEOBS, **kwargs):
    """
    Convert CIE 1976 L*a*b* (CIELAB) color coordinates to XYZ tristimulus values.

    Args:
        :lab: 
            | ndarray with CIE 1976 L*a*b* (CIELAB) color coordinates
        :xyzw:
            | None or ndarray with tristimulus values of white point, optional
            | None defaults to xyz of CIE D65 using the :cieobs: observer.
        :cieobs:
            | luxpy._CIEOBS, optional
            | CMF set to use when calculating xyzw.

    Returns:
        :xyz: 
            | ndarray with tristimulus values
    """
    lab = np2d(lab)

    if xyzw is None:
        xyzw = spd_to_xyz(_CIE_ILLUMINANTS['D65'], cieobs=cieobs)

    # make xyzw same shape as data:
    xyzw = xyzw * np.ones(lab.shape)

    # get L*, a*, b* and Xw, Yw, Zw:
    fXYZ = np.empty(lab.shape)
    fXYZ[..., 1] = (lab[..., 0] + 16.0) / 116.0
    fXYZ[..., 0] = lab[..., 1] / 500.0 + fXYZ[..., 1]
    fXYZ[..., 2] = fXYZ[..., 1] - lab[..., 2] / 200.0

    # apply 3rd power:
    xyz = (fXYZ**3.0) * xyzw

    # Now calculate T where T/Tn is below the knee point:
    pqr = fXYZ <= (24 / 116)  #(24/116)**3**(1/3)
    xyz[pqr] = np.squeeze(xyzw[pqr] * ((fXYZ[pqr] - 16.0 / 116.0) /
                                       (841 / 108)))

    return xyz
コード例 #10
0
def _get_distance_matrix_grouping(*X, metric='euclidean', Dscale=1):
    """ Get distance matrix (skbio format) and grouping indexing array from raw data"""
    # Create long format data array and grouping indices:
    ni = np.empty((len(X), ), dtype=int)
    for i, Xi in enumerate(X):
        ni[i] = Xi.shape[0]
        if i == 0:
            XY = Xi
        else:
            XY = np.vstack((XY, Xi))
    grouping = [[i] * n for i, n in enumerate(ni)]
    grouping = list(itertools.chain(*grouping))

    # Calculate pairwise distances:
    D = scipy.spatial.distance.pdist(XY, metric=metric) * Dscale
    Dsq = scipy.spatial.distance.squareform(D)

    # Get skbio distance matrix:
    Dm = skbio.stats.distance.DistanceMatrix(Dsq)

    return Dm, grouping
コード例 #11
0
def xyz_to_wuv(xyz, xyzw=_COLORTF_DEFAULT_WHITE_POINT, **kwargs):
    """
    Convert XYZ tristimulus values CIE 1964 U*V*W* color space.

    Args:
        :xyz: 
            | ndarray with tristimulus values
        :xyzw: 
            | ndarray with tristimulus values of white point, optional
            |  (Defaults to luxpy._COLORTF_DEFAULT_WHITE_POINT)

    Returns:
        :wuv: 
            | ndarray with W*U*V* values
    """
    Yuv = xyz_to_Yuv(np2d(xyz))  # convert to cie 1976 u'v'
    Yuvw = xyz_to_Yuv(np2d(xyzw))
    wuv = np.empty(xyz.shape)
    wuv[..., 0] = 25.0 * (Yuv[..., 0]**(1 / 3)) - 17.0
    wuv[..., 1] = 13.0 * wuv[..., 0] * (Yuv[..., 1] - Yuvw[..., 1])
    wuv[..., 2] = 13.0 * wuv[..., 0] * (Yuv[..., 2] - Yuvw[..., 2]) * (
        2.0 / 3.0)  #*(2/3) to convert to cie 1960 u, v
    return wuv
コード例 #12
0
def wuv_to_xyz(wuv, xyzw=_COLORTF_DEFAULT_WHITE_POINT, **kwargs):
    """
    Convert CIE 1964 U*V*W* color space coordinates to XYZ tristimulus values.

    Args:
        :wuv: 
            | ndarray with W*U*V* values
        :xyzw: 
            | ndarray with tristimulus values of white point, optional
            |  (Defaults to luxpy._COLORTF_DEFAULT_WHITE_POINT)

    Returns:
        :xyz: 
            | ndarray with tristimulus values
    """
    wuv = np2d(wuv)
    Yuvw = xyz_to_Yuv(xyzw)  # convert to cie 1976 u'v'
    Yuv = np.empty(wuv.shape)
    Yuv[..., 0] = ((wuv[..., 0] + 17.0) / 25.0)**3.0
    Yuv[..., 1] = Yuvw[..., 1] + wuv[..., 1] / (13.0 * wuv[..., 0])
    Yuv[..., 2] = Yuvw[..., 2] + wuv[..., 2] / (13.0 * wuv[..., 0]) * (
        3.0 / 2.0)  # convert to cie 1960 u, v
    return Yuv_to_xyz(Yuv)
コード例 #13
0
def _run_monte_carlo_stats(test_stat_function, grouping, subjects,
                           permutations, paired):
    """Run stat test and compute significance with Monte Carlo permutations."""
    if permutations < 0:
        raise ValueError(
            "Number of permutations must be greater than or equal to zero.")

    stat, effect_sizes = test_stat_function(grouping, subjects, paired)

    p_value = np.nan
    if permutations > 0:
        perm_stats = np.empty(permutations, dtype=np.float64)

        for i in range(permutations):
            perm_grouping, perm_subjects = _permutate_grouping(grouping,
                                                               subjects,
                                                               paired=paired)
            perm_stats[i], _ = test_stat_function(perm_grouping, perm_subjects,
                                                  paired)
        stat, effect_sizes = test_stat_function(grouping, subjects, paired)
        p_value = ((perm_stats >= stat).sum() + 1) / (permutations + 1)

    return stat, p_value, effect_sizes
コード例 #14
0
ファイル: demo_opt.py プロジェクト: simongr2/luxpy
def dtlz2_(x, M):
    """
    DTLZ2 multi-objective function
    
    | This function represents a hyper-sphere.
    | Using k = 10, the number of dimensions must be n = (M - 1) + k.
    | The Pareto optimal solutions are obtained when the last k variables of x
    | are equal to 0.5.
    
    Args:
        :x: 
            | a n x mu ndarray with mu points and n dimensions
        :M: 
            | a scalar with the number of objectives
    
    Returns:
        :f: 
            | a m x mu ndarray with mu points and their m objectives computed at
            | the input
    """
    k = 10
    
    # Error check: the number of dimensions must be M-1+k
    n = (M-1) + k; #this is the default
    if x.shape[0] != n:
       raise Exception('Using k = 10, it is required that the number of dimensions be n = (M - 1) + k = {:1.0f} in this case.'.format(n))
    
    xm = x[(n-k):,:].copy() #xm contains the last k variables
    g = ((xm - 0.5)**2).sum(axis = 0)
    
    # Computes the functions:
    f = np.empty((M,x.shape[1]))
    f[0,:] = (1 + g)*np.prod(np.cos(np.pi/2*x[:(M-1),:]), axis = 0)
    for ii in range(1,M-1):
        f[ii,:] = (1 + g) * np.prod(np.cos(np.pi/2*x[:(M-ii-1),:]), axis = 0) * np.sin(np.pi/2*x[M-ii-1,:])
    f[M-1,:] = (1 + g) * np.sin(np.pi/2*x[0,:])
    return f
コード例 #15
0
def xyz_to_luv(xyz, xyzw=None, cieobs=_CIEOBS, **kwargs):
    """
    Convert XYZ tristimulus values to CIE 1976 L*u*v* (CIELUV) coordinates.

    Args:
        :xyz: 
            | ndarray with tristimulus values
        :xyzw:
            | None or ndarray with tristimulus values of white point, optional
            | None defaults to xyz of CIE D65 using the :cieobs: observer.
        :cieobs:
            | luxpy._CIEOBS, optional
            | CMF set to use when calculating xyzw.

    Returns:
        :luv: 
            | ndarray with CIE 1976 L*u*v* (CIELUV) color coordinates
    """
    xyz = np2d(xyz)

    if xyzw is None:
        xyzw = spd_to_xyz(_CIE_ILLUMINANTS['D65'], cieobs=cieobs)

    # Calculate u',v' of test and white:
    Yuv = xyz_to_Yuv(xyz)
    Yuvw = xyz_to_Yuv(todim(xyzw,
                            xyz.shape))  # todim: make xyzw same shape as xyz

    #uv1976 to CIELUV
    luv = np.empty(xyz.shape)
    YdivYw = Yuv[..., 0] / Yuvw[..., 0]
    luv[..., 0] = 116.0 * YdivYw**(1.0 / 3.0) - 16.0
    p = np.where(YdivYw <= (6.0 / 29.0)**3.0)
    luv[..., 0][p] = ((29.0 / 3.0)**3.0) * YdivYw[p]
    luv[..., 1] = 13.0 * luv[..., 0] * (Yuv[..., 1] - Yuvw[..., 1])
    luv[..., 2] = 13.0 * luv[..., 0] * (Yuv[..., 2] - Yuvw[..., 2])
    return luv
コード例 #16
0
def luv_to_xyz(luv, xyzw=None, cieobs=_CIEOBS, **kwargs):
    """
    Convert CIE 1976 L*u*v* (CIELUVB) coordinates to XYZ tristimulus values.

    Args:
        :luv: 
            | ndarray with CIE 1976 L*u*v* (CIELUV) color coordinates
        :xyzw:
            | None or ndarray with tristimulus values of white point, optional
            | None defaults to xyz of CIE D65 using the :cieobs: observer.
        :cieobs: 
            | luxpy._CIEOBS, optional
            | CMF set to use when calculating xyzw.

    Returns:
        :xyz: 
            | ndarray with tristimulus values
    """
    luv = np2d(luv)

    if xyzw is None:
        xyzw = spd_to_xyz(_CIE_ILLUMINANTS['D65'], cieobs=cieobs)

    # Make xyzw same shape as luv and convert to Yuv:
    Yuvw = todim(xyz_to_Yuv(xyzw), luv.shape, equal_shape=True)

    # calculate u'v' from u*,v*:
    Yuv = np.empty(luv.shape)
    Yuv[..., 1:3] = (luv[..., 1:3] / (13 * luv[..., :1])) + Yuvw[..., 1:3]
    Yuv[Yuv[..., 0] == 0, 1:3] = 0

    Yuv[..., 0] = Yuvw[..., 0] * (((luv[..., 0] + 16.0) / 116.0)**3.0)
    p = np.where((Yuv[..., 0] / Yuvw[..., 0]) < ((6.0 / 29.0)**3.0))
    Yuv[..., 0][p] = Yuvw[..., 0][p] * (luv[..., 0][p] / ((29.0 / 3.0)**3.0))

    return Yuv_to_xyz(Yuv)
コード例 #17
0
def cie2006cmfsEx(age = 32,fieldsize = 10, wl = None,\
                  var_od_lens = 0, var_od_macula = 0, \
                  var_od_L = 0, var_od_M = 0, var_od_S = 0,\
                  var_shft_L = 0, var_shft_M = 0, var_shft_S = 0,\
                  out = 'LMS', allow_negative_values = False):
    """
    Generate Individual Observer CMFs (cone fundamentals) 
    based on CIE2006 cone fundamentals and published literature 
    on observer variability in color matching and in physiological parameters.
    
    Args:
        :age: 
            | 32 or float or int, optional
            | Observer age
        :fieldsize:
            | 10, optional
            | Field size of stimulus in degrees (between 2° and 10°).
        :wl: 
            | None, optional
            | Interpolation/extraplation of :LMS: output to specified wavelengths.
            | None: output original _WL = np.array([390,780,5])
        :var_od_lens:
            | 0, optional
            | Std Dev. in peak optical density [%] of lens.
        :var_od_macula:
            | 0, optional
            | Std Dev. in peak optical density [%] of macula.
        :var_od_L:
            | 0, optional
            | Std Dev. in peak optical density [%] of L-cone.
        :var_od_M:
            | 0, optional
            | Std Dev. in peak optical density [%] of M-cone.
        :var_od_S:
            | 0, optional
            | Std Dev. in peak optical density [%] of S-cone.
        :var_shft_L:
            | 0, optional
            | Std Dev. in peak wavelength shift [nm] of L-cone. 
        :var_shft_L:
            | 0, optional
            | Std Dev. in peak wavelength shift [nm] of M-cone.  
        :var_shft_S:
            | 0, optional
            | Std Dev. in peak wavelength shift [nm] of S-cone. 
        :out: 
            | 'LMS' or , optional
            | Determines output.
        :allow_negative_values:
            | False, optional
            | Cone fundamentals or color matching functions 
              should not have negative values.
            |     If False: X[X<0] = 0.
            
    Returns:
        :returns: 
            | - 'LMS' : ndarray with individual observer area-normalized 
            |           cone fundamentals. Wavelength have been added.
                
            | [- 'trans_lens': ndarray with lens transmission 
            |      (no wavelengths added, no interpolation)
            |  - 'trans_macula': ndarray with macula transmission 
            |      (no wavelengths added, no interpolation)
            |  - 'sens_photopig' : ndarray with photopigment sens. 
            |      (no wavelengths added, no interpolation)]
            
    References:
         1. `Asano Y, Fairchild MD, and Blondé L (2016). 
         Individual Colorimetric Observer Model. 
         PLoS One 11, 1–19. 
         <http://journals.plos.org/plosone/article?id=10.1371/journal.pone.0145671>`_
        
         2. `Asano Y, Fairchild MD, Blondé L, and Morvan P (2016). 
         Color matching experiment for highlighting interobserver variability. 
         Color Res. Appl. 41, 530–539. 
         <https://onlinelibrary.wiley.com/doi/abs/10.1002/col.21975>`_
         
         3. `CIE, and CIE (2006). 
         Fundamental Chromaticity Diagram with Physiological Axes - Part I 
         (Vienna: CIE). 
         <http://www.cie.co.at/publications/fundamental-chromaticity-diagram-physiological-axes-part-1>`_ 
         
         4. `Asano's Individual Colorimetric Observer Model 
         <https://www.rit.edu/cos/colorscience/re_AsanoObserverFunctions.php>`_
    """
    fs = fieldsize
    rmd = _INDVCMF_DATA['rmd'].copy()
    LMSa = _INDVCMF_DATA['LMSa'].copy()
    docul = _INDVCMF_DATA['docul'].copy()

    # field size corrected macular density:
    pkOd_Macula = 0.485 * np.exp(-fs / 6.132) * (
        1 + var_od_macula / 100)  # varied peak optical density of macula
    corrected_rmd = rmd * pkOd_Macula

    # age corrected lens/ocular media density:
    if (age <= 60):
        correct_lomd = docul[:1] * (1 + 0.02 * (age - 32)) + docul[1:2]
    else:
        correct_lomd = docul[:1] * (1.56 + 0.0667 * (age - 60)) + docul[1:2]
    correct_lomd = correct_lomd * (1 + var_od_lens / 100
                                   )  # varied overall optical density of lens

    # Peak Wavelength Shift:
    wl_shifted = np.empty(LMSa.shape)
    wl_shifted[0] = _WL + var_shft_L
    wl_shifted[1] = _WL + var_shft_M
    wl_shifted[2] = _WL + var_shft_S

    LMSa_shft = np.empty(LMSa.shape)
    kind = 'cubic'
    LMSa_shft[0] = sp.interpolate.interp1d(wl_shifted[0],
                                           LMSa[0],
                                           kind=kind,
                                           bounds_error=False,
                                           fill_value="extrapolate")(_WL)
    LMSa_shft[1] = sp.interpolate.interp1d(wl_shifted[1],
                                           LMSa[1],
                                           kind=kind,
                                           bounds_error=False,
                                           fill_value="extrapolate")(_WL)
    LMSa_shft[2] = sp.interpolate.interp1d(wl_shifted[2],
                                           LMSa[2],
                                           kind=kind,
                                           bounds_error=False,
                                           fill_value="extrapolate")(_WL)
    #    LMSa[2,np.where(_WL >= _WL_CRIT)] = 0 #np.nan # Not defined above 620nm
    #    LMSa_shft[2,np.where(_WL >= _WL_CRIT)] = 0

    ssw = np.hstack(
        (0, np.sign(np.diff(LMSa_shft[2, :]))
         ))  #detect poor interpolation (sign switch due to instability)
    LMSa_shft[2, np.where((ssw >= 0) & (_WL > 560))] = np.nan

    # corrected LMS (no age correction):
    pkOd_L = (0.38 + 0.54 * np.exp(-fs / 1.333)) * (
        1 + var_od_L / 100)  # varied peak optical density of L-cone
    pkOd_M = (0.38 + 0.54 * np.exp(-fs / 1.333)) * (
        1 + var_od_M / 100)  # varied peak optical density of M-cone
    pkOd_S = (0.30 + 0.45 * np.exp(-fs / 1.333)) * (
        1 + var_od_S / 100)  # varied peak optical density of S-cone

    alpha_lms = 0. * LMSa_shft
    alpha_lms[0] = 1 - 10**(-pkOd_L * (10**LMSa_shft[0]))
    alpha_lms[1] = 1 - 10**(-pkOd_M * (10**LMSa_shft[1]))
    alpha_lms[2] = 1 - 10**(-pkOd_S * (10**LMSa_shft[2]))

    # this fix is required because the above math fails for alpha_lms[2,:]==0
    alpha_lms[2, np.where(_WL >= _WL_CRIT)] = 0

    # Corrected to Corneal Incidence:
    lms_barq = alpha_lms * (10**(-corrected_rmd - correct_lomd)) * np.ones(
        alpha_lms.shape)

    # Corrected to Energy Terms:
    lms_bar = lms_barq * _WL

    # Set NaN values to zero:
    lms_bar[np.isnan(lms_bar)] = 0

    # normalized:
    LMS = 100 * lms_bar / np.nansum(lms_bar, axis=1, keepdims=True)

    # Output extra:
    trans_lens = 10**(-correct_lomd)
    trans_macula = 10**(-corrected_rmd)
    sens_photopig = alpha_lms * _WL

    # Add wavelengths:
    LMS = np.vstack((_WL, LMS))

    if ('xyz' in out.lower().split(',')):
        LMS = lmsb_to_xyzb(LMS,
                           fieldsize,
                           out='xyz',
                           allow_negative_values=allow_negative_values)
        out = out.replace('xyz', 'LMS').replace('XYZ', 'LMS')
    if ('lms' in out.lower().split(',')):
        out = out.replace('lms', 'LMS')

    # Interpolate/extrapolate:
    if wl is None:
        interpolation = None
    else:
        interpolation = 'cubic'
    LMS = spd(LMS, wl=wl, interpolation=interpolation, norm_type='area')

    if (out == 'LMS'):
        return LMS
    elif (out == 'LMS,trans_lens,trans_macula,sens_photopig'):
        return LMS, trans_lens, trans_macula, sens_photopig
    elif (out == 'LMS,trans_lens,trans_macula,sens_photopig,LMSa'):
        return LMS, trans_lens, trans_macula, sens_photopig, LMSa
    else:
        return eval(out)
コード例 #18
0
def xyz_to_Ydlep(xyz,
                 cieobs=_CIEOBS,
                 xyzw=_COLORTF_DEFAULT_WHITE_POINT,
                 flip_axes=False,
                 SL_max_lambda=None,
                 **kwargs):
    """
    Convert XYZ tristimulus values to Y, dominant (complementary) wavelength
    and excitation purity.

    Args:
        :xyz:
            | ndarray with tristimulus values
        :xyzw:
            | None or ndarray with tristimulus values of a single (!) native white point, optional
            | None defaults to xyz of CIE D65 using the :cieobs: observer.
        :cieobs:
            | luxpy._CIEOBS, optional
            | CMF set to use when calculating spectrum locus coordinates.
        :flip_axes:
            | False, optional
            | If True: flip axis 0 and axis 1 in Ydelep to increase speed of loop in function.
            |          (single xyzw with is not flipped!)
        :SL_max_lambda:
            | None or float, optional
            | Maximum wavelength of spectrum locus before it turns back on itelf in the high wavelength range (~700 nm)
    Returns:
        :Ydlep: 
            | ndarray with Y, dominant (complementary) wavelength
            |  and excitation purity
    """

    xyz3 = np3d(xyz).copy().astype(np.float)

    # flip axis so that shortest dim is on axis0 (save time in looping):
    if (xyz3.shape[0] < xyz3.shape[1]) & (flip_axes == True):
        axes12flipped = True
        xyz3 = xyz3.transpose((1, 0, 2))
    else:
        axes12flipped = False

    # convert xyz to Yxy:
    Yxy = xyz_to_Yxy(xyz3)
    Yxyw = xyz_to_Yxy(xyzw)

    # get spectrum locus Y,x,y and wavelengths:
    SL = _CMF[cieobs]['bar']
    SL = SL[:, SL[1:].sum(axis=0) >
            0]  # avoid div by zero in xyz-to-Yxy conversion
    wlsl = SL[0]
    Yxysl = xyz_to_Yxy(SL[1:4].T)[:, None]

    # Get maximum wavelength of spectrum locus (before it turns back on itself)
    if SL_max_lambda is None:
        pmaxlambda = Yxysl[..., 1].argmax()  # lambda with largest x value
        dwl = np.diff(
            Yxysl[:, 0,
                  1])  # spectrumlocus in that range should have increasing x
        dwl[wlsl[:-1] < 600] = 10000
        pmaxlambda = np.where(
            dwl <= 0)[0][0]  # Take first element with zero or <zero slope
    else:
        pmaxlambda = np.abs(wlsl - SL_max_lambda).argmin()
    Yxysl = Yxysl[:(pmaxlambda + 1), :]
    wlsl = wlsl[:(pmaxlambda + 1)]

    # center on xyzw:
    Yxy = Yxy - Yxyw
    Yxysl = Yxysl - Yxyw
    Yxyw = Yxyw - Yxyw

    #split:
    Y, x, y = asplit(Yxy)
    Yw, xw, yw = asplit(Yxyw)
    Ysl, xsl, ysl = asplit(Yxysl)

    # calculate hue:
    h = math.positive_arctan(x, y, htype='deg')

    hsl = math.positive_arctan(xsl, ysl, htype='deg')

    hsl_max = hsl[0]  # max hue angle at min wavelength
    hsl_min = hsl[-1]  # min hue angle at max wavelength
    if hsl_min < hsl_max: hsl_min += 360

    dominantwavelength = np.empty(Y.shape)
    purity = np.empty(Y.shape)

    for i in range(xyz3.shape[1]):

        # find index of complementary wavelengths/hues:
        pc = np.where(
            (h[:, i] > hsl_max) & (h[:, i] < hsl_min)
        )  # hue's requiring complementary wavelength (purple line)
        h[:, i][pc] = h[:, i][pc] - np.sign(
            h[:, i][pc] - 180.0
        ) * 180.0  # add/subtract 180° to get positive complementary wavelength

        # find 2 closest enclosing hues in sl:
        #hslb,hib = meshblock(hsl,h[:,i:i+1])
        hib, hslb = np.meshgrid(h[:, i:i + 1], hsl)
        dh = (hslb - hib)
        q1 = np.abs(dh).argmin(axis=0)  # index of closest hue
        sign_q1 = np.sign(dh[q1])[0]
        dh[np.sign(dh) ==
           sign_q1] = 1000000  # set all dh on the same side as q1 to a very large value
        q2 = np.abs(dh).argmin(
            axis=0)  # index of second  closest (enclosing) hue

        # # Test changes to code:
        # print('wls',i, wlsl[q1],wlsl[q2])
        # import matplotlib.pyplot as plt
        # plt.figure()
        # plt.plot(wlsl[:-1],np.diff(xsl[:,0]),'k.-')
        # plt.figure()
        # plt.plot(x[0,i],y[0,i],'k.'); plt.plot(xsl,ysl,'r.-');plt.plot(xsl[q1],ysl[q1],'b.');plt.plot(xsl[q2],ysl[q2],'g.');plt.plot(xsl[-1],ysl[-1],'c+')

        dominantwavelength[:, i] = wlsl[q1] + np.multiply(
            (h[:, i] - hsl[q1, 0]),
            np.divide((wlsl[q2] - wlsl[q1]), (hsl[q2, 0] - hsl[q1, 0]))
        )  # calculate wl corresponding to h: y = y1 + (x-x1)*(y2-y1)/(x2-x1)
        dominantwavelength[:, i][pc] = -dominantwavelength[:, i][
            pc]  #complementary wavelengths are specified by '-' sign

        # calculate excitation purity:
        x_dom_wl = xsl[q1, 0] + (xsl[q2, 0] - xsl[q1, 0]) * (h[:, i] - hsl[
            q1, 0]) / (hsl[q2, 0] - hsl[q1, 0])  # calculate x of dom. wl
        y_dom_wl = ysl[q1, 0] + (ysl[q2, 0] - ysl[q1, 0]) * (h[:, i] - hsl[
            q1, 0]) / (hsl[q2, 0] - hsl[q1, 0])  # calculate y of dom. wl
        d_wl = (x_dom_wl**2.0 +
                y_dom_wl**2.0)**0.5  # distance from white point to sl
        d = (x[:, i]**2.0 +
             y[:, i]**2.0)**0.5  # distance from white point to test point
        purity[:, i] = d / d_wl

        # correct for those test points that have a complementary wavelength
        # calculate intersection of line through white point and test point and purple line:
        xy = np.vstack((x[:, i], y[:, i])).T
        xyw = np.hstack((xw, yw))
        xypl1 = np.hstack((xsl[0, None], ysl[0, None]))
        xypl2 = np.hstack((xsl[-1, None], ysl[-1, None]))
        da = (xy - xyw)
        db = (xypl2 - xypl1)
        dp = (xyw - xypl1)
        T = np.array([[0.0, -1.0], [1.0, 0.0]])
        dap = np.dot(da, T)
        denom = np.sum(dap * db, axis=1, keepdims=True)
        num = np.sum(dap * dp, axis=1, keepdims=True)
        xy_linecross = (num / denom) * db + xypl1
        d_linecross = np.atleast_2d(
            (xy_linecross[:, 0]**2.0 + xy_linecross[:, 1]**2.0)**0.5).T  #[0]
        purity[:, i][pc] = d[pc] / d_linecross[pc][:, 0]
    Ydlep = np.dstack((xyz3[:, :, 1], dominantwavelength, purity))

    if axes12flipped == True:
        Ydlep = Ydlep.transpose((1, 0, 2))
    else:
        Ydlep = Ydlep.transpose((0, 1, 2))
    return Ydlep.reshape(xyz.shape)
コード例 #19
0
def xyz_to_Ydlep_(xyz,
                  cieobs=_CIEOBS,
                  xyzw=_COLORTF_DEFAULT_WHITE_POINT,
                  flip_axes=False,
                  **kwargs):
    """
    Convert XYZ tristimulus values to Y, dominant (complementary) wavelength
    and excitation purity.

    Args:
        :xyz:
            | ndarray with tristimulus values
        :xyzw:
            | None or ndarray with tristimulus values of a single (!) native white point, optional
            | None defaults to xyz of CIE D65 using the :cieobs: observer.
        :cieobs:
            | luxpy._CIEOBS, optional
            | CMF set to use when calculating spectrum locus coordinates.
        :flip_axes:
            | False, optional
            | If True: flip axis 0 and axis 1 in Ydelep to increase speed of loop in function.
            |          (single xyzw with is not flipped!)
    Returns:
        :Ydlep: 
            | ndarray with Y, dominant (complementary) wavelength
            |  and excitation purity
    """

    xyz3 = np3d(xyz).copy().astype(np.float)

    # flip axis so that shortest dim is on axis0 (save time in looping):
    if (xyz3.shape[0] < xyz3.shape[1]) & (flip_axes == True):
        axes12flipped = True
        xyz3 = xyz3.transpose((1, 0, 2))
    else:
        axes12flipped = False

    # convert xyz to Yxy:
    Yxy = xyz_to_Yxy(xyz3)
    Yxyw = xyz_to_Yxy(xyzw)

    # get spectrum locus Y,x,y and wavelengths:
    SL = _CMF[cieobs]['bar']
    SL = SL[:, SL[1:].sum(axis=0) >
            0]  # avoid div by zero in xyz-to-Yxy conversion
    wlsl = SL[0]
    Yxysl = xyz_to_Yxy(SL[1:4].T)[:, None]
    pmaxlambda = Yxysl[..., 1].argmax()
    maxlambda = wlsl[pmaxlambda]
    maxlambda = 700
    print(np.where(wlsl == maxlambda))
    pmaxlambda = np.where(wlsl == maxlambda)[0][0]
    Yxysl = Yxysl[:(pmaxlambda + 1), :]
    wlsl = wlsl[:(pmaxlambda + 1)]

    # center on xyzw:
    Yxy = Yxy - Yxyw
    Yxysl = Yxysl - Yxyw
    Yxyw = Yxyw - Yxyw

    #split:
    Y, x, y = asplit(Yxy)
    Yw, xw, yw = asplit(Yxyw)
    Ysl, xsl, ysl = asplit(Yxysl)

    # calculate hue:
    h = math.positive_arctan(x, y, htype='deg')
    print(h)
    print('rh', h[0, 0] - h[0, 1])
    print(wlsl[0], wlsl[-1])

    hsl = math.positive_arctan(xsl, ysl, htype='deg')

    hsl_max = hsl[0]  # max hue angle at min wavelength
    hsl_min = hsl[-1]  # min hue angle at max wavelength
    if hsl_min < hsl_max: hsl_min += 360

    dominantwavelength = np.empty(Y.shape)
    purity = np.empty(Y.shape)
    print('xyz:', xyz)
    for i in range(xyz3.shape[1]):
        print('\ni:', i, h[:, i], hsl_max, hsl_min)
        print(h)
        # find index of complementary wavelengths/hues:
        pc = np.where(
            (h[:, i] > hsl_max) & (h[:, i] < hsl_min)
        )  # hue's requiring complementary wavelength (purple line)
        print('pc', (h[:, i] > hsl_max) & (h[:, i] < hsl_min))
        h[:, i][pc] = h[:, i][pc] - np.sign(
            h[:, i][pc] - 180.0
        ) * 180.0  # add/subtract 180° to get positive complementary wavelength

        # find 2 closest hues in sl:
        #hslb,hib = meshblock(hsl,h[:,i:i+1])
        hib, hslb = np.meshgrid(h[:, i:i + 1], hsl)
        dh = np.abs(hslb - hib)
        q1 = dh.argmin(axis=0)  # index of closest hue
        dh[q1] = 1000000.0
        q2 = dh.argmin(axis=0)  # index of second closest hue
        print('q1q2', q2, q1)

        print('wls:', h[:, i], wlsl[q1], wlsl[q2])
        print('hsls:', hsl[q2, 0], hsl[q1, 0])
        print('d', (wlsl[q2] - wlsl[q1]), (hsl[q2, 0] - hsl[q1, 0]),
              (wlsl[q2] - wlsl[q1]) / (hsl[q2, 0] - hsl[q1, 0]))
        print('(h[:,i] - hsl[q1,0])', (h[:, i] - hsl[q1, 0]))
        print('div', np.divide((wlsl[q2] - wlsl[q1]),
                               (hsl[q2, 0] - hsl[q1, 0])))
        print(
            'mult(...)',
            np.multiply((h[:, i] - hsl[q1, 0]),
                        np.divide((wlsl[q2] - wlsl[q1]),
                                  (hsl[q2, 0] - hsl[q1, 0]))))
        dominantwavelength[:, i] = wlsl[q1] + np.multiply(
            (h[:, i] - hsl[q1, 0]),
            np.divide((wlsl[q2] - wlsl[q1]), (hsl[q2, 0] - hsl[q1, 0]))
        )  # calculate wl corresponding to h: y = y1 + (x-x1)*(y2-y1)/(x2-x1)
        print('dom', dominantwavelength[:, i])
        dominantwavelength[(dominantwavelength[:,
                                               i] > max(wlsl[q1], wlsl[q2])),
                           i] = max(wlsl[q1], wlsl[q2])
        dominantwavelength[(dominantwavelength[:,
                                               i] < min(wlsl[q1], wlsl[q2])),
                           i] = min(wlsl[q1], wlsl[q2])

        dominantwavelength[:, i][pc] = -dominantwavelength[:, i][
            pc]  #complementary wavelengths are specified by '-' sign

        # calculate excitation purity:
        x_dom_wl = xsl[q1, 0] + (xsl[q2, 0] - xsl[q1, 0]) * (h[:, i] - hsl[
            q1, 0]) / (hsl[q2, 0] - hsl[q1, 0])  # calculate x of dom. wl
        y_dom_wl = ysl[q1, 0] + (ysl[q2, 0] - ysl[q1, 0]) * (h[:, i] - hsl[
            q1, 0]) / (hsl[q2, 0] - hsl[q1, 0])  # calculate y of dom. wl
        d_wl = (x_dom_wl**2.0 +
                y_dom_wl**2.0)**0.5  # distance from white point to sl
        d = (x[:, i]**2.0 +
             y[:, i]**2.0)**0.5  # distance from white point to test point
        purity[:, i] = d / d_wl

        # correct for those test points that have a complementary wavelength
        # calculate intersection of line through white point and test point and purple line:
        xy = np.vstack((x[:, i], y[:, i])).T
        xyw = np.hstack((xw, yw))
        xypl1 = np.hstack((xsl[0, None], ysl[0, None]))
        xypl2 = np.hstack((xsl[-1, None], ysl[-1, None]))
        da = (xy - xyw)
        db = (xypl2 - xypl1)
        dp = (xyw - xypl1)
        T = np.array([[0.0, -1.0], [1.0, 0.0]])
        dap = np.dot(da, T)
        denom = np.sum(dap * db, axis=1, keepdims=True)
        num = np.sum(dap * dp, axis=1, keepdims=True)
        xy_linecross = (num / denom) * db + xypl1
        d_linecross = np.atleast_2d(
            (xy_linecross[:, 0]**2.0 + xy_linecross[:, 1]**2.0)**0.5).T  #[0]
        purity[:, i][pc] = d[pc] / d_linecross[pc][:, 0]
    Ydlep = np.dstack((xyz3[:, :, 1], dominantwavelength, purity))

    if axes12flipped == True:
        Ydlep = Ydlep.transpose((1, 0, 2))
    else:
        Ydlep = Ydlep.transpose((0, 1, 2))
    return Ydlep.reshape(xyz.shape)
コード例 #20
0
ファイル: ies_tm30_metrics.py プロジェクト: simongr2/luxpy
def spd_to_ies_tm30_metrics(SPD, cri_type = None, \
                            hbins = 16, start_hue = 0.0,\
                            scalef = 100, \
                            vf_model_type = _VF_MODEL_TYPE, \
                            vf_pcolorshift = _VF_PCOLORSHIFT,\
                            scale_vf_chroma_to_sample_chroma = False):
    """
    Calculates IES TM30 metrics from spectral data.      
      
      Args:
        :data:
            | numpy.ndarray with spectral data 
        :cri_type:
            | None, optional
            | If None: defaults to cri_type = 'iesrf'.
            | Not none values of :hbins:, :start_hue: and :scalef: overwrite 
            | input in cri_type['rg_pars'] 
        :hbins:
            | None or numpy.ndarray with sorted hue bin centers (°), optional
        :start_hue: 
            | None, optional
        :scalef:
            | None, optional
            | Scale factor for reference circle.
        :vf_pcolorshift:
            | _VF_PCOLORSHIFT or user defined dict, optional
            | The polynomial models of degree 5 and 6 can be fully specified or 
            | summarized by the model parameters themselved OR by calculating the
            | dCoverC and dH at resp. 5 and 6 hues. :VF_pcolorshift: specifies 
            | these hues and chroma level.
        :scale_vf_chroma_to_sample_chroma: 
            | False, optional
            | Scale chroma of reference and test vf fields such that average of 
            | binned reference chroma equals that of the binned sample chroma
            | before calculating hue bin metrics.
            
    Returns:
        :data: 
            | dict with color rendering data:
            | - 'SPD'  : ndarray test SPDs
            | - 'bjabt': ndarray with binned jab data under test SPDs
            | - 'bjabr': ndarray with binned jab data under reference SPDs
            | - 'jabti': ndarray with individual jab data under test SPDs (scaled such that bjabr are on a circle)
            | - 'jabri': ndarray with individual jab data under reference SPDs (scaled such that bjabr are on a circle)
            | - 'hbinnr': ndarray with the hue bin number the samples belong to.
            | - 'cct'  : ndarray with CCT of test SPD
            | - 'duv'  : ndarray with distance to blackbody locus of test SPD
            | - 'Rf'   : ndarray with general color fidelity indices
            | - 'Rg'   : ndarray with gamut area indices
            | - 'Rfi'  : ndarray with specific color fidelity indices
            | - 'Rfhi' : ndarray with local (hue binned) fidelity indices
            | - 'Rcshi': ndarray with local chroma shifts indices
            | - 'Rhshi': ndarray with local hue shifts indices
            | - 'Rt'  : ndarray with general metameric uncertainty index Rt
            | - 'Rti' : ndarray with specific metameric uncertainty indices Rti
            | - 'Rfhi_vf' : ndarray with local (hue binned) fidelity indices 
            |               obtained from VF model predictions at color space
            |               pixel coordinates
            | - 'Rcshi_vf': ndarray with local chroma shifts indices 
            |               (same as above)
            | - 'Rhshi_vf': ndarray with local hue shifts indices 
            |               (same as above)
    """
    if cri_type is None:
        cri_type = 'iesrf'

    #Calculate color rendering measures for SPDs in data:
    out = 'Rf,Rg,cct,duv,Rfi,jabt,jabr,Rfhi,Rcshi,Rhshi,cri_type'
    if isinstance(cri_type, str):  # get dict
        cri_type = copy.deepcopy(_CRI_DEFAULTS[cri_type])
    if hbins is not None:
        cri_type['rg_pars']['nhbins'] = hbins
    if start_hue is not None:
        cri_type['rg_pars']['start_hue'] = start_hue
    if scalef is not None:
        cri_type['rg_pars']['normalized_chroma_ref'] = scalef
    Rf, Rg, cct, duv, Rfi, jabt, jabr, Rfhi, Rcshi, Rhshi, cri_type = spd_to_cri(
        SPD, cri_type=cri_type, out=out)
    rg_pars = cri_type['rg_pars']

    #Calculate Metameric uncertainty and base color shifts:
    dataVF = VF_colorshift_model(SPD,
                                 cri_type=cri_type,
                                 model_type=vf_model_type,
                                 cspace=cri_type['cspace'],
                                 sampleset=eval(cri_type['sampleset']),
                                 pool=False,
                                 pcolorshift=vf_pcolorshift,
                                 vfcolor=0)
    Rf_ = np.array([dataVF[i]['metrics']['Rf'] for i in range(len(dataVF))]).T
    Rt = np.array([dataVF[i]['metrics']['Rt'] for i in range(len(dataVF))]).T
    Rti = np.array([dataVF[i]['metrics']['Rti']
                    for i in range(len(dataVF))][0])

    # Get normalized and sliced sample data for plotting:
    rg_pars = cri_type['rg_pars']
    nhbins, normalize_gamut, normalized_chroma_ref, start_hue = [
        rg_pars[x] for x in sorted(rg_pars.keys())
    ]
    normalized_chroma_ref = scalef
    # np.sqrt((jabr[...,1]**2 + jabr[...,2]**2)).mean(axis = 0).mean()

    if scale_vf_chroma_to_sample_chroma == True:
        normalize_gamut = False
        bjabt, bjabr = gamut_slicer(
            jabt,
            jabr,
            out='jabt,jabr',
            nhbins=nhbins,
            start_hue=start_hue,
            normalize_gamut=normalize_gamut,
            normalized_chroma_ref=normalized_chroma_ref,
            close_gamut=True)
        Cr_s = (np.sqrt(bjabr[:-1, ..., 1]**2 + bjabr[:-1, ..., 2]**2)).mean(
            axis=0)  # for rescaling vector field average reference chroma

    normalize_gamut = True  #(for plotting)
    bjabt, bjabr, binnrs, jabti, jabri = gamut_slicer(
        jabt,
        jabr,
        out='jabt,jabr,binnr,jabti,jabri',
        nhbins=nhbins,
        start_hue=start_hue,
        normalize_gamut=normalize_gamut,
        normalized_chroma_ref=normalized_chroma_ref,
        close_gamut=True)

    Rfhi_vf = np.empty(Rfhi.shape)
    Rcshi_vf = np.empty(Rcshi.shape)
    Rhshi_vf = np.empty(Rhshi.shape)
    for i in range(cct.shape[0]):

        # Get normalized and sliced VF data for hue specific metrics:
        vfjabt = np.hstack(
            (np.ones(dataVF[i]['fielddata']['vectorfield']['axt'].shape),
             dataVF[i]['fielddata']['vectorfield']['axt'],
             dataVF[i]['fielddata']['vectorfield']['bxt']))
        vfjabr = np.hstack(
            (np.ones(dataVF[i]['fielddata']['vectorfield']['axr'].shape),
             dataVF[i]['fielddata']['vectorfield']['axr'],
             dataVF[i]['fielddata']['vectorfield']['bxr']))
        nhbins, normalize_gamut, normalized_chroma_ref, start_hue = [
            rg_pars[x] for x in sorted(rg_pars.keys())
        ]
        vfbjabt, vfbjabr, vfbDEi = gamut_slicer(
            vfjabt,
            vfjabr,
            out='jabt,jabr,DEi',
            nhbins=nhbins,
            start_hue=start_hue,
            normalize_gamut=normalize_gamut,
            normalized_chroma_ref=normalized_chroma_ref,
            close_gamut=False)

        if scale_vf_chroma_to_sample_chroma == True:
            #rescale vfbjabt and vfbjabr to same chroma level as bjabr.
            Cr_vfb = np.sqrt(vfbjabr[..., 1]**2 + vfbjabr[..., 2]**2)
            Cr_vf = np.sqrt(vfjabr[..., 1]**2 + vfjabr[..., 2]**2)
            hr_vf = np.arctan2(vfjabr[..., 2], vfjabr[..., 1])
            Ct_vf = np.sqrt(vfjabt[..., 1]**2 + vfjabt[..., 2]**2)
            ht_vf = np.arctan2(vfjabt[..., 2], vfjabt[..., 1])
            fC = Cr_s.mean() / Cr_vfb.mean()
            vfjabr[..., 1] = fC * Cr_vf * np.cos(hr_vf)
            vfjabr[..., 2] = fC * Cr_vf * np.sin(hr_vf)
            vfjabt[..., 1] = fC * Ct_vf * np.cos(ht_vf)
            vfjabt[..., 2] = fC * Ct_vf * np.sin(ht_vf)
            vfbjabt, vfbjabr, vfbDEi = gamut_slicer(
                vfjabt,
                vfjabr,
                out='jabt,jabr,DEi',
                nhbins=nhbins,
                start_hue=start_hue,
                normalize_gamut=normalize_gamut,
                normalized_chroma_ref=normalized_chroma_ref,
                close_gamut=False)

        scale_factor = cri_type['scale']['cfactor']
        scale_fcn = cri_type['scale']['fcn']
        vfRfhi, vfRcshi, vfRhshi = jab_to_rhi(
            jabt=vfbjabt,
            jabr=vfbjabr,
            DEi=vfbDEi,
            cri_type=cri_type,
            scale_factor=scale_factor,
            scale_fcn=scale_fcn,
            use_bin_avg_DEi=True
        )  # [:-1,...] removes last row from jab as this was added to close the gamut.

        Rfhi_vf[:, i:i + 1] = vfRfhi
        Rhshi_vf[:, i:i + 1] = vfRhshi
        Rcshi_vf[:, i:i + 1] = vfRcshi

    # Create dict with CRI info:
    data = {'SPD' : SPD, 'cct' : cct, 'duv' : duv, 'bjabt' : bjabt, 'bjabr' : bjabr,\
            'jabti':jabti, 'jabri':jabri, 'hbinnr':binnrs,\
           'Rf' : Rf, 'Rg' : Rg, 'Rfi': Rfi, 'Rfhi' : Rfhi, 'Rcshi' : Rcshi, 'Rhshi' : Rhshi, \
           'Rt' : Rt, 'Rti' : Rti,  'Rfhi_vf' : Rfhi_vf, 'Rfcshi_vf' : Rcshi_vf, 'Rfhshi_vf' : Rhshi_vf, \
           'dataVF' : dataVF,'cri_type' : cri_type,
           # 'jabt_':jabt_,'jabr_':jabr_
           }
    return data
コード例 #21
0
def Ydlep_to_xyz(Ydlep,
                 cieobs=_CIEOBS,
                 xyzw=_COLORTF_DEFAULT_WHITE_POINT,
                 flip_axes=False,
                 SL_max_lambda=None,
                 **kwargs):
    """
    Convert Y, dominant (complementary) wavelength and excitation purity to XYZ
    tristimulus values.

    Args:
        :Ydlep: 
            | ndarray with Y, dominant (complementary) wavelength
              and excitation purity
        :xyzw: 
            | None or narray with tristimulus values of a single (!) native white point, optional
            | None defaults to xyz of CIE D65 using the :cieobs: observer.
        :cieobs:
            | luxpy._CIEOBS, optional
            | CMF set to use when calculating spectrum locus coordinates.
        :flip_axes:
            | False, optional
            | If True: flip axis 0 and axis 1 in Ydelep to increase speed of loop in function.
            |          (single xyzw with is not flipped!)
        :SL_max_lambda:
            | None or float, optional
            | Maximum wavelength of spectrum locus before it turns back on itelf in the high wavelength range (~700 nm)

    Returns:
        :xyz: 
            | ndarray with tristimulus values
    """

    Ydlep3 = np3d(Ydlep).copy().astype(np.float)

    # flip axis so that longest dim is on first axis  (save time in looping):
    if (Ydlep3.shape[0] < Ydlep3.shape[1]) & (flip_axes == True):
        axes12flipped = True
        Ydlep3 = Ydlep3.transpose((1, 0, 2))
    else:
        axes12flipped = False

    # convert xyzw to Yxyw:
    Yxyw = xyz_to_Yxy(xyzw)
    Yxywo = Yxyw.copy()

    # get spectrum locus Y,x,y and wavelengths:
    SL = _CMF[cieobs]['bar']
    SL = SL[:, SL[1:].sum(axis=0) >
            0]  # avoid div by zero in xyz-to-Yxy conversion
    wlsl = SL[0, None].T
    Yxysl = xyz_to_Yxy(SL[1:4].T)[:, None]

    # Get maximum wavelength of spectrum locus (before it turns back on itself)
    if SL_max_lambda is None:
        pmaxlambda = Yxysl[..., 1].argmax()  # lambda with largest x value
        dwl = np.diff(
            Yxysl[:, 0,
                  1])  # spectrumlocus in that range should have increasing x
        dwl[wlsl[:-1, 0] < 600] = 10000
        pmaxlambda = np.where(
            dwl <= 0)[0][0]  # Take first element with zero or <zero slope
    else:
        pmaxlambda = np.abs(wlsl - SL_max_lambda).argmin()
    Yxysl = Yxysl[:(pmaxlambda + 1), :]
    wlsl = wlsl[:(pmaxlambda + 1), :1]

    # center on xyzw:
    Yxysl = Yxysl - Yxyw
    Yxyw = Yxyw - Yxyw

    #split:
    Y, dom, pur = asplit(Ydlep3)
    Yw, xw, yw = asplit(Yxyw)
    Ywo, xwo, ywo = asplit(Yxywo)
    Ysl, xsl, ysl = asplit(Yxysl)

    # loop over longest dim:
    x = np.empty(Y.shape)
    y = np.empty(Y.shape)
    for i in range(Ydlep3.shape[1]):

        # find closest wl's to dom:
        #wlslb,wlib = meshblock(wlsl,np.abs(dom[i,:])) #abs because dom<0--> complemtary wl
        wlib, wlslb = np.meshgrid(np.abs(dom[:, i]), wlsl)

        dwl = wlslb - wlib
        q1 = np.abs(dwl).argmin(axis=0)  # index of closest wl
        sign_q1 = np.sign(dwl[q1])
        dwl[np.sign(dwl) ==
            sign_q1] = 1000000  # set all dwl on the same side as q1 to a very large value
        q2 = np.abs(dwl).argmin(
            axis=0)  # index of second closest (enclosing) wl

        # calculate x,y of dom:
        x_dom_wl = xsl[q1, 0] + (xsl[q2, 0] - xsl[q1, 0]) * (
            np.abs(dom[:, i]) - wlsl[q1, 0]) / (wlsl[q2, 0] - wlsl[q1, 0]
                                                )  # calculate x of dom. wl
        y_dom_wl = ysl[q1, 0] + (ysl[q2, 0] - ysl[q1, 0]) * (
            np.abs(dom[:, i]) - wlsl[q1, 0]) / (wlsl[q2, 0] - wlsl[q1, 0]
                                                )  # calculate y of dom. wl

        # calculate x,y of test:
        d_wl = (x_dom_wl**2.0 +
                y_dom_wl**2.0)**0.5  # distance from white point to dom
        d = pur[:, i] * d_wl
        hdom = math.positive_arctan(x_dom_wl, y_dom_wl, htype='deg')
        x[:, i] = d * np.cos(hdom * np.pi / 180.0)
        y[:, i] = d * np.sin(hdom * np.pi / 180.0)

        # complementary:
        pc = np.where(dom[:, i] < 0.0)
        hdom[pc] = hdom[pc] - np.sign(dom[:, i][pc] -
                                      180.0) * 180.0  # get positive hue angle

        # calculate intersection of line through white point and test point and purple line:
        xy = np.vstack((x_dom_wl, y_dom_wl)).T
        xyw = np.vstack((xw, yw)).T
        xypl1 = np.vstack((xsl[0, None], ysl[0, None])).T
        xypl2 = np.vstack((xsl[-1, None], ysl[-1, None])).T
        da = (xy - xyw)
        db = (xypl2 - xypl1)
        dp = (xyw - xypl1)
        T = np.array([[0.0, -1.0], [1.0, 0.0]])
        dap = np.dot(da, T)
        denom = np.sum(dap * db, axis=1, keepdims=True)
        num = np.sum(dap * dp, axis=1, keepdims=True)
        xy_linecross = (num / denom) * db + xypl1
        d_linecross = np.atleast_2d(
            (xy_linecross[:, 0]**2.0 + xy_linecross[:, 1]**2.0)**0.5).T[:, 0]
        x[:, i][pc] = pur[:, i][pc] * d_linecross[pc] * np.cos(
            hdom[pc] * np.pi / 180)
        y[:, i][pc] = pur[:, i][pc] * d_linecross[pc] * np.sin(
            hdom[pc] * np.pi / 180)
    Yxy = np.dstack((Ydlep3[:, :, 0], x + xwo, y + ywo))
    if axes12flipped == True:
        Yxy = Yxy.transpose((1, 0, 2))
    else:
        Yxy = Yxy.transpose((0, 1, 2))
    return Yxy_to_xyz(Yxy).reshape(Ydlep.shape)