Beispiel #1
0
def _polyarea3D(xyz):

    x, y, z = asplit(xyz)

    RY = np.sqrt((x[0] - x[1])**2 + (y[0] - y[1])**2 + (z[0] - z[1])**2)
    YG = np.sqrt((x[1] - x[2])**2 + (y[1] - y[2])**2 + (z[1] - z[2])**2)
    GR = np.sqrt((x[2] - x[0])**2 + (y[2] - y[0])**2 + (z[2] - z[0])**2)
    RB = np.sqrt((x[0] - x[3])**2 + (y[0] - y[3])**2 + (z[0] - z[3])**2)
    BG = np.sqrt((x[2] - x[3])**2 + (y[2] - y[3])**2 + (z[2] - z[3])**2)
    S1 = (RY + YG + GR) / 2
    S2 = (RB + BG + GR) / 2
    GA1 = np.sqrt(S1 * (S1 - RY) * (S1 - YG) * (S1 - GR))
    GA2 = np.sqrt(S2 * (S2 - RB) * (S2 - BG) * (S2 - GR))
    GA = GA1 + GA2
    return GA
Beispiel #2
0
def smet2017_D(xyzw, Dmax=None):
    """
    Calculate the degree of adaptation based on chromaticity following 
    Smet et al. (2017) 
    
    Args:
        :xyzw: 
            | ndarray with white point data (CIE 1964 10° XYZs!!)
        :Dmax:
            | None or float, optional
            | Defaults to 0.6539 (max D obtained under experimental conditions, 
            | but probably too low due to dark surround leading to incomplete 
            | chromatic adaptation even for neutral illuminants 
            | resulting in background luminance (fov~50°) of 760 cd/m²))
            
    Returns:
        :D: 
            | ndarray with degrees of adaptation
    
    References: 
        1. `Smet, K.A.G.*, Zhai, Q., Luo, M.R., Hanselaer, P., (2017), 
        Study of chromatic adaptation using memory color matches, 
        Part II: colored illuminants, 
        Opt. Express, 25(7), pp. 8350-8365.
        <https://www.osapublishing.org/oe/abstract.cfm?uri=oe-25-7-8350&origin=search)>`_

    """

    # Convert xyzw to log-compressed Macleod_Boyton coordinates:
    Vl, rl, bl = asplit(
        np.log(xyz_to_Vrb_mb(xyzw, M=_MCATS['hpe']))
    )  # force use of HPE matrix (which was the one used when deriving the model parameters!!)

    # apply Dmodel (technically only for cieobs = '1964_10')
    pD = (1.0e7) * np.array([
        0.021081326530436, 4.751255762876845, -0.000000071025181,
        -0.000000063627042, -0.146952821492957, 3.117390441655821
    ])  #D model parameters for gaussian model in log(MB)-space (july 2016)
    if Dmax is None:
        Dmax = 0.6539  # max D obtained under experimental conditions (probably too low due to dark surround leading to incomplete chromatic adaptation even for neutral illuminants resulting in background luminance (fov~50°) of 760 cd/m²)
    return Dmax * math.bvgpdf(x=rl,
                              y=bl,
                              mu=pD[2:4],
                              sigmainv=np.linalg.inv(
                                  np.array([[pD[0], pD[4]], [pD[4], pD[1]]
                                            ])))**pD[5]
Beispiel #3
0
def spd_to_mcri(SPD, D = 0.9, E = None, Yb = 20.0, out = 'Rm', wl = None):
    """
    Calculates the MCRI or Memory Color Rendition Index, Rm
    
    Args: 
        :SPD: 
            | ndarray with spectral data (can be multiple SPDs, 
            |  first axis are the wavelengths)
        :D: 
            | 0.9, optional
            | Degree of adaptation.
        :E: 
            | None, optional
            | Illuminance in lux 
            |  (used to calculate La = (Yb/100)*(E/pi) to then calculate D 
            |  following the 'cat02' model). 
            | If None: the degree is determined by :D:
            |  If (:E: is not None) & (:Yb: is None):  :E: is assumed to contain 
            |  the adapting field luminance La (cd/m²).
        :Yb: 
            | 20.0, optional
            | Luminance factor of background. (used when calculating La from E)
            | If None, E contains La (cd/m²).
        :out: 
            | 'Rm' or str, optional
            | Specifies requested output (e.g. 'Rm,Rmi,cct,duv') 
        :wl: 
            | None, optional
            | Wavelengths (or [start, end, spacing]) to interpolate the SPDs to. 
            | None: default to no interpolation   
    
    Returns:
        :returns: 
            | float or ndarray with MCRI Rm for :out: 'Rm'
            | Other output is also possible by changing the :out: str value.        
          
    References:
        1. `K.A.G. Smet, W.R. Ryckaert, M.R. Pointer, G. Deconinck, P. Hanselaer,(2012)
        “A memory colour quality metric for white light sources,” 
        Energy Build., vol. 49, no. C, pp. 216–225.
        <http://www.sciencedirect.com/science/article/pii/S0378778812000837>`_
    """
    SPD = np2d(SPD)
    
    if wl is not None: 
        SPD = spd(data = SPD, interpolation = _S_INTERP_TYPE, kind = 'np', wl = wl)
    
    
    # unpack metric default values:
    avg, catf, cieobs, cri_specific_pars, cspace, ref_type, rg_pars, sampleset, scale = [_MCRI_DEFAULTS[x] for x in sorted(_MCRI_DEFAULTS.keys())] 
    similarity_ai = cri_specific_pars['similarity_ai']
    Mxyz2lms = cspace['Mxyz2lms'] 
    scale_fcn = scale['fcn']
    scale_factor = scale['cfactor']
    sampleset = eval(sampleset)
    
    # A. calculate xyz:
    xyzti, xyztw = spd_to_xyz(SPD, cieobs = cieobs['xyz'],  rfl = sampleset, out = 2)
    if 'cct' in out.split(','):
        cct, duv = xyz_to_cct(xyztw, cieobs = cieobs['cct'], out = 'cct,duv',mode = 'lut')
        
    # B. perform chromatic adaptation to adopted whitepoint of ipt color space, i.e. D65:
    if catf is not None:
        Dtype_cat, F, Yb_cat, catmode_cat, cattype_cat, mcat_cat, xyzw_cat = [catf[x] for x in sorted(catf.keys())]
        
        # calculate degree of adaptationn D:
        if E is not None:
            if Yb is not None:
                La = (Yb/100.0)*(E/np.pi)
            else:
                La = E
            D = cat.get_degree_of_adaptation(Dtype = Dtype_cat, F = F, La = La)
        else:
            Dtype_cat = None # direct input of D

        if (E is None) and (D is None):
            D = 1.0 # set degree of adaptation to 1 !
        if D > 1.0: D = 1.0
        if D < 0.6: D = 0.6 # put a limit on the lowest D

        # apply cat:
        xyzti = cat.apply(xyzti, cattype = cattype_cat, catmode = catmode_cat, xyzw1 = xyztw,xyzw0 = None, xyzw2 = xyzw_cat, D = D, mcat = [mcat_cat], Dtype = Dtype_cat)
        xyztw = cat.apply(xyztw, cattype = cattype_cat, catmode = catmode_cat, xyzw1 = xyztw,xyzw0 = None, xyzw2 = xyzw_cat, D = D, mcat = [mcat_cat], Dtype = Dtype_cat)
     
    # C. convert xyz to ipt and split:
    ipt = xyz_to_ipt(xyzti, cieobs = cieobs['xyz'], M = Mxyz2lms) #input matrix as published in Smet et al. 2012, Energy and Buildings
    I,P,T = asplit(ipt)  

    # D. calculate specific (hue dependent) similarity indicators, Si:
    if len(xyzti.shape) == 3:
        ai = np.expand_dims(similarity_ai, axis = 1)
    else: 
        ai = similarity_ai
    a1,a2,a3,a4,a5 = asplit(ai)
    mahalanobis_d2 = (a3*np.power((P - a1),2.0) + a4*np.power((T - a2),2.0) + 2.0*a5*(P-a1)*(T-a2))
    if (len(mahalanobis_d2.shape)==3) & (mahalanobis_d2.shape[-1]==1):
        mahalanobis_d2 = mahalanobis_d2[:,:,0].T
    Si = np.exp(-0.5*mahalanobis_d2)

    # E. calculate general similarity indicator, Sa:
    Sa = avg(Si, axis = 0,keepdims = True)

    # F. rescale similarity indicators (Si, Sa) with a 0-1 scale to memory color rendition indices (Rmi, Rm) with a 0 - 100 scale:
    Rmi = scale_fcn(np.log(Si),scale_factor = scale_factor)
    Rm = np2d(scale_fcn(np.log(Sa),scale_factor = scale_factor))

    # G. calculate Rg (polyarea of test / polyarea of memory colours):
    if 'Rg' in out.split(','):
        I = I[...,None] #broadcast_shape(I, target_shape = None,expand_2d_to_3d = 0)
        a1 = a1[:,None]*np.ones(I.shape)#broadcast_shape(a1, target_shape = None,expand_2d_to_3d = 0)
        a2 = a2[:,None]*np.ones(I.shape) #broadcast_shape(a2, target_shape = None,expand_2d_to_3d = 0)
        a12 = np.concatenate((a1,a2),axis=2) #broadcast_shape(np.hstack((a1,a2)), target_shape = ipt.shape,expand_2d_to_3d = 0)
        ipt_mc = np.concatenate((I,a12),axis=2)
        nhbins, normalize_gamut, normalized_chroma_ref, start_hue  = [rg_pars[x] for x in sorted(rg_pars.keys())]
    
        hue_bin_data = _get_hue_bin_data(ipt, ipt_mc, 
                                         start_hue = start_hue, nhbins = nhbins,
                                         normalized_chroma_ref = normalized_chroma_ref)
        Rg = _hue_bin_data_to_rg(hue_bin_data)

    if (out != 'Rm'):
        return  eval(out)
    else:
        return Rm
Beispiel #4
0
def mahalanobis2(x, y = None, z = None, mu = None, sigmainv = None):
    """
    Evaluate the squared mahalanobis distance
    
    Args: 
        :x: 
            | scalar or list or ndarray (.ndim = 1 or 2) with x(y)-coordinates 
              at which to evaluate the mahalanobis distance squared.
        :y: 
            | None or scalar or list or ndarray (.ndim = 1) with y-coordinates 
              at which to evaluate the mahalanobis distance squared, optional.
            | If :y: is None, :x: should be a 2d array.
        :z: 
            | None or scalar or list or ndarray (.ndim = 1) with z-coordinates 
              at which to evaluate the mahalanobis distance squared, optional.
            | If :z: is None & :y: is None, then :x: should be a 2d array.
        :mu: 
            | None or ndarray (.ndim = 1) with center coordinates of the 
              mahalanobis ellipse, optional. 
            | None defaults to zeros(2) or zeros(3).
        :sigmainv:
            | None or ndarray with 'inverse covariance matrix', optional 
            | Determines the shape and orientation of the PD.
            | None default to np.eye(2) or eye(3).
    Returns:
         :returns: 
             | ndarray with magnitude of mahalanobis2(x,y[,z])

    """
    if (y is None) & (z is None):
        p = x.shape[-1]
    elif (z is None):
        p = x.shape[-1] if (y is None) else 2
    elif (z is not None):
        p = 3 if (y is not None) else 2
    
    if mu is None:
        mu = np.zeros(p)
    if sigmainv is None:
        sigmainv = np.eye(p)
    
    x = np2d(x)
    mu = np2d(mu)

    if (y is None) & (z is None):
        x = x - mu
        if p == 2:
            x, y = asplit(x)
        elif p==3:
            x, y, z = asplit(x)
    elif (z is None):
        if y is None:
            x = x - mu
            x, y = asplit(x)
        else:
            x = x - mu[...,0] # center data on mu 
            y = np2d(y) - mu[...,1] # center data on mu 
    elif (z is not None):
        if (y is not None):
            x = x - mu[0] # center data on mu 
            y = np2d(y) - mu[...,1] # center data on mu 
            z = np2d(z) - mu[...,2] # center data on mu 
        else:
            x = x - mu[...,0] # center data on mu 
            y = np2d(z) - mu[...,1] # center data on mu 
            
    if p == 2:
        return (sigmainv[0,0] * (x**2.0) + sigmainv[1,1] * (y**2.0) + 2.0*sigmainv[0,1]*(x*y))
    else:
        return (sigmainv[0,0] * (x**2.0) + sigmainv[1,1] * (y**2.0) + 2.0*sigmainv[0,1]*(x*y) + 
                sigmainv[2,2] * (z**2.0) + 2.0*sigmainv[0,2]*(x*z) +  2.0*sigmainv[1,2]*(y*z))
Beispiel #5
0
def plot_chromaticity_diagram_colors(diagram_samples = 256, diagram_opacity = 1.0, diagram_lightness = 0.25,\
                                      cieobs = _CIEOBS, cspace = 'Yxy', cspace_pars = {},\
                                      show = True, axh = None,\
                                      show_grid = False, label_fontname = 'Times New Roman', label_fontsize = 12,\
                                      **kwargs):
    """
    Plot the chromaticity diagram colors.
    
    Args:
        :diagram_samples:
            | 256, optional
            | Sampling resolution of color space.
        :diagram_opacity:
            | 1.0, optional
            | Sets opacity of chromaticity diagram
        :diagram_lightness:
            | 0.25, optional
            | Sets lightness of chromaticity diagram
        :axh: 
            | None or axes handle, optional
            | Determines axes to plot data in.
            | None: make new figure.
        :show:
            | True or False, optional
            | Invoke matplotlib.pyplot.show() right after plotting
        :cieobs:
            | luxpy._CIEOBS or str, optional
            | Determines CMF set to calculate spectrum locus or other.
        :cspace:
            | luxpy._CSPACE or str, optional
            | Determines color space / chromaticity diagram to plot data in.
            | Note that data is expected to be in specified :cspace:
        :cspace_pars:
            | {} or dict, optional
            | Dict with parameters required by color space specified in :cspace: 
            | (for use with luxpy.colortf())
        :show_grid:
            | False, optional
            | Show grid (True) or not (False)
        :label_fontname: 
            | 'Times New Roman', optional
            | Sets font type of axis labels.
        :label_fontsize:
            | 12, optional
            | Sets font size of axis labels.
        :kwargs: 
            | additional keyword arguments for use with matplotlib.pyplot.
        
    Returns:
        
    """

    if isinstance(cieobs, str):
        SL = _CMF[cieobs]['bar'][1:4].T
    else:
        SL = cieobs[1:4].T
    SL = 100.0 * SL / (SL[:, 1, None] + _EPS)
    SL = SL[SL.sum(axis=1) >
            0, :]  # avoid div by zero in xyz-to-Yxy conversion
    SL = colortf(SL, tf=cspace, tfa0=cspace_pars)
    plambdamax = SL[:, 1].argmax()
    SL = np.vstack(
        (SL[:(plambdamax + 1), :], SL[0])
    )  # add lowest wavelength data and go to max of gamut in x (there is a reversal for some cmf set wavelengths >~700 nm!)
    Y, x, y = asplit(SL)
    SL = np.vstack((x, y)).T

    # create grid for conversion to srgb
    offset = _EPS
    min_x = min(offset, x.min())
    max_x = max(1, x.max())
    min_y = min(offset, y.min())
    max_y = max(1, y.max())
    ii, jj = np.meshgrid(
        np.linspace(min_x - offset, max_x + offset, int(diagram_samples)),
        np.linspace(max_y + offset, min_y - offset, int(diagram_samples)))
    ij = np.dstack((ii, jj))
    ij[ij == 0] = offset

    ij2D = ij.reshape((diagram_samples**2, 2))
    ij2D = np.hstack((diagram_lightness * 100 * np.ones(
        (ij2D.shape[0], 1)), ij2D))
    xyz = colortf(ij2D, tf=cspace + '>xyz', tfa0=cspace_pars)

    xyz[xyz < 0] = 0
    xyz[np.isinf(xyz.sum(axis=1)), :] = np.nan
    xyz[np.isnan(xyz.sum(axis=1)), :] = offset

    srgb = xyz_to_srgb(xyz)
    srgb = srgb / srgb.max()
    srgb = srgb.reshape((diagram_samples, diagram_samples, 3))

    if show == True:
        if axh is None:
            fig = plt.figure()
            axh = fig.add_subplot(111)
        polygon = Polygon(SL, facecolor='none', edgecolor='none')
        axh.add_patch(polygon)
        image = axh.imshow(srgb,
                           interpolation='bilinear',
                           extent=(min_x, max_x, min_y - 0.05, max_y),
                           clip_path=None,
                           alpha=diagram_opacity)
        image.set_clip_path(polygon)
        axh.plot(x, y, color='darkgray')
        if (cspace == 'Yxy') & (isinstance(cieobs, str)):
            axh.set_xlim([0, 1])
            axh.set_ylim([0, 1])
        elif (cspace == 'Yuv') & (isinstance(cieobs, str)):
            axh.set_xlim([0, 0.6])
            axh.set_ylim([0, 0.6])
        if (cspace is not None):
            xlabel = _CSPACE_AXES[cspace][1]
            ylabel = _CSPACE_AXES[cspace][2]
            if (label_fontname is not None) & (label_fontsize is not None):
                axh.set_xlabel(xlabel,
                               fontname=label_fontname,
                               fontsize=label_fontsize)
                axh.set_ylabel(ylabel,
                               fontname=label_fontname,
                               fontsize=label_fontsize)

        if show_grid == True:
            axh.grid(True)
        #plt.show()

        return axh
    else:
        return None
Beispiel #6
0
def plotSL(cieobs =_CIEOBS, cspace = _CSPACE, DL = False, BBL = True, D65 = False,\
           EEW = False, cctlabels = False, axh = None, show = True,\
           cspace_pars = {}, formatstr = 'k-',\
           diagram_colors = False, diagram_samples = 100, diagram_opacity = 1.0,\
           diagram_lightness = 0.25,\
           **kwargs):
    """
    Plot spectrum locus for cieobs in cspace.
    
    Args: 
        :DL: 
            | True or False, optional
            | True plots Daylight Locus as well.
        :BBL: 
            | True or False, optional
            | True plots BlackBody Locus as well. 
        :D65: 
            | False or True, optional
            | True plots D65 chromaticity as well. 
        :EEW: 
            | False or True, optional
            | True plots Equi-Energy-White chromaticity as well. 
        :cctlabels:
            | False or True, optional
            | Add cct text labels at various points along the blackbody locus.
        :axh: 
            | None or axes handle, optional
            | Determines axes to plot data in.
            | None: make new figure.
        :show:
            | True or False, optional
            | Invoke matplotlib.pyplot.show() right after plotting
        :cieobs:
            | luxpy._CIEOBS or str, optional
            | Determines CMF set to calculate spectrum locus or other.
        :cspace:
            | luxpy._CSPACE or str, optional
            | Determines color space / chromaticity diagram to plot data in.
            | Note that data is expected to be in specified :cspace:
        :formatstr:
            | 'k-' or str, optional
            | Format str for plotting (see ?matplotlib.pyplot.plot)
        :cspace_pars:
            | {} or dict, optional
            | Dict with parameters required by color space specified in :cspace: 
            | (for use with luxpy.colortf())
        :diagram_colors:
            | False, optional
            | True: plot colored chromaticity diagram.
        :diagram_samples:
            | 256, optional
            | Sampling resolution of color space.
        :diagram_opacity:
            | 1.0, optional
            | Sets opacity of chromaticity diagram
        :diagram_lightness:
            | 0.25, optional
            | Sets lightness of chromaticity diagram
        :kwargs: 
            | additional keyword arguments for use with matplotlib.pyplot.
    
    Returns:
        :returns: 
            | None (:show: == True) 
            |  or 
            | handle to current axes (:show: == False)
    """
    if isinstance(cieobs, str):
        SL = _CMF[cieobs]['bar'][1:4].T
    else:
        SL = cieobs[1:4].T
    SL = 100.0 * SL / (SL[:, 1, None] + _EPS)
    SL = SL[SL.sum(axis=1) >
            0, :]  # avoid div by zero in xyz-to-Yxy conversion
    SL = colortf(SL, tf=cspace, tfa0=cspace_pars)
    plambdamax = SL[:, 1].argmax()
    SL = np.vstack(
        (SL[:(plambdamax + 1), :], SL[0])
    )  # add lowest wavelength data and go to max of gamut in x (there is a reversal for some cmf set wavelengths >~700 nm!)
    Y, x, y = asplit(SL)

    showcopy = show
    if np.any([DL, BBL, D65, EEW]):
        show = False

    if diagram_colors == True:
        axh_ = plot_chromaticity_diagram_colors(axh = axh, show = diagram_colors, cspace = cspace, cieobs = cieobs,\
                                                cspace_pars = cspace_pars,\
                                                diagram_samples = diagram_samples,\
                                                diagram_opacity = diagram_opacity,\
                                                diagram_lightness = diagram_lightness,\
                                                label_fontname = None, label_fontsize = None)
    else:
        axh_ = axh

    axh_ = plot_color_data(x,
                           y,
                           axh=axh_,
                           cieobs=cieobs,
                           cspace=cspace,
                           show=show,
                           formatstr=formatstr,
                           **kwargs)

    if DL == True:
        if 'label' in kwargs.keys():  # avoid label also being used for DL
            kwargs.pop('label')
        plotDL(ccts=None,
               cieobs=cieobs,
               cspace=cspace,
               axh=axh_,
               show=show,
               cspace_pars=cspace_pars,
               formatstr='k:',
               **kwargs)

    if BBL == True:
        if 'label' in kwargs.keys():  # avoid label also being used for BB
            kwargs.pop('label')
        plotBB(ccts=None,
               cieobs=cieobs,
               cspace=cspace,
               axh=axh_,
               show=show,
               cspace_pars=cspace_pars,
               cctlabels=cctlabels,
               formatstr='k-.',
               **kwargs)

    if D65 == True:
        YxyD65 = colortf(spd_to_xyz(_CIE_ILLUMINANTS['D65'], cieobs=cieobs),
                         tf=cspace,
                         tfa0=cspace_pars)
        axh.plot(YxyD65[..., 1], YxyD65[..., 2], 'bo')
    if EEW == True:
        YxyEEW = colortf(spd_to_xyz(_CIE_ILLUMINANTS['E'], cieobs=cieobs),
                         tf=cspace,
                         tfa0=cspace_pars)
        axh.plot(YxyEEW[..., 1], YxyEEW[..., 2], 'ko')

    if showcopy == False:
        return axh_
    else:
        plt.show()
Beispiel #7
0
def plotBB(ccts=None,
           cieobs=_CIEOBS,
           cspace=_CSPACE,
           axh=None,
           cctlabels=True,
           show=True,
           cspace_pars={},
           formatstr='k-',
           **kwargs):
    """
    Plot blackbody locus.
        
    Args: 
        :ccts: 
            | None or list[float], optional
            | None defaults to [1000 to 1e19 K].
            | Range: 
            |     [1000,1500,2000,2500,3000,3500,4000,5000,6000,8000,10000] 
            |    + [15000 K to 1e19 K] in 100 steps on a log10 scale
        :cctlabels:
            | True or False, optional
            | Add cct text labels at various points along the blackbody locus.
        :axh: 
            | None or axes handle, optional
            | Determines axes to plot data in.
            | None: make new figure.
        :show:
            | True or False, optional
            | Invoke matplotlib.pyplot.show() right after plotting
        :cieobs:
            | luxpy._CIEOBS or str, optional
            | Determines CMF set to calculate spectrum locus or other.
        :cspace:
            | luxpy._CSPACE or str, optional
            | Determines color space / chromaticity diagram to plot data in.
            | Note that data is expected to be in specified :cspace:
        :formatstr:
            | 'k-' or str, optional
            | Format str for plotting (see ?matplotlib.pyplot.plot)
        :cspace_pars:
            | {} or dict, optional
            | Dict with parameters required by color space specified in :cspace: 
            | (for use with luxpy.colortf())
        :kwargs: 
            | additional keyword arguments for use with matplotlib.pyplot.
    
    Returns:
        :returns: 
            | None (:show: == True) 
            |  or 
            | handle to current axes (:show: == False)
    """
    if ccts is None:
        ccts1 = np.array([
            1000.0, 1500.0, 2000.0, 2500.0, 3000.0, 3500.0, 4000.0, 5000.0,
            6000.0, 8000.0, 10000.0
        ])
        ccts2 = 10**np.linspace(np.log10(15000.0), np.log10(10.0**19.0), 100)
        ccts = np.hstack((ccts1, ccts2))
    else:
        ccts1 = None

    BB = cri_ref(ccts, ref_type='BB')
    xyz = spd_to_xyz(BB, cieobs=cieobs)
    Yxy = colortf(xyz, tf=cspace, tfa0=cspace_pars)
    Y, x, y = asplit(Yxy)

    axh = plot_color_data(x,
                          y,
                          axh=axh,
                          cieobs=cieobs,
                          cspace=cspace,
                          show=show,
                          formatstr=formatstr,
                          **kwargs)

    if (cctlabels == True) & (ccts1 is not None):
        for i in range(ccts1.shape[0]):
            if ccts1[i] >= 3000.0:
                if i % 2 == 0.0:
                    axh.plot(x[i], y[i], 'k+', color='0.5')
                    axh.text(x[i] * 1.05,
                             y[i] * 0.95,
                             '{:1.0f}K'.format(ccts1[i]),
                             color='0.5')
        axh.plot(x[-1], y[-1], 'k+', color='0.5')
        axh.text(x[-1] * 1.05,
                 y[-1] * 0.95,
                 '{:1.0e}K'.format(ccts[-1]),
                 color='0.5')
    if show == False:
        return axh
Beispiel #8
0
def plotDL(ccts = None, cieobs =_CIEOBS, cspace = _CSPACE, axh = None, \
           show = True, force_daylight_below4000K = False, cspace_pars = {}, \
           formatstr = 'k-',  **kwargs):
    """
    Plot daylight locus.
    
    Args: 
        :ccts: 
            | None or list[float], optional
            | None defaults to [4000 K to 1e19 K] in 100 steps on a log10 scale.
        :force_daylight_below4000K: 
            | False or True, optional
            | CIE daylight phases are not defined below 4000 K. 
            | If True plot anyway.
        :axh: 
            | None or axes handle, optional
            | Determines axes to plot data in.
            | None: make new figure.
        :show: 
            | True or False, optional
            | Invoke matplotlib.pyplot.show() right after plotting
        :cieobs:
            | luxpy._CIEOBS or str, optional
            | Determines CMF set to calculate spectrum locus or other.
        :cspace:
            | luxpy._CSPACE or str, optional
            | Determines color space / chromaticity diagram to plot data in.
            | Note that data is expected to be in specified :cspace:
        :formatstr:
            | 'k-' or str, optional
            | Format str for plotting (see ?matplotlib.pyplot.plot)
        :cspace_pars:
            | {} or dict, optional
            | Dict with parameters required by color space specified in :cspace: 
            | (for use with luxpy.colortf())
        :kwargs: 
            | additional keyword arguments for use with matplotlib.pyplot.
    
    Returns:
        :returns: 
            | None (:show: == True) 
            |  or 
            | handle to current axes (:show: == False)
    """

    if ccts is None:
        ccts = 10**np.linspace(np.log10(4000.0), np.log10(10.0**19.0), 100)

    xD, yD = daylightlocus(ccts,
                           cieobs=cieobs,
                           force_daylight_below4000K=force_daylight_below4000K)
    Y = 100 * np.ones(xD.shape)
    DL = Yxy_to_xyz(np.vstack((Y, xD, yD)).T)
    DL = colortf(DL, tf=cspace, tfa0=cspace_pars)
    Y, x, y = asplit(DL)

    axh = plot_color_data(x,
                          y,
                          axh=axh,
                          cieobs=cieobs,
                          cspace=cspace,
                          show=show,
                          formatstr=formatstr,
                          **kwargs)

    if show == False:
        return axh
Beispiel #9
0
def xyz_to_Ydlep(xyz,
                 cieobs=_CIEOBS,
                 xyzw=_COLORTF_DEFAULT_WHITE_POINT,
                 flip_axes=False,
                 SL_max_lambda=None,
                 **kwargs):
    """
    Convert XYZ tristimulus values to Y, dominant (complementary) wavelength
    and excitation purity.

    Args:
        :xyz:
            | ndarray with tristimulus values
        :xyzw:
            | None or ndarray with tristimulus values of a single (!) native white point, optional
            | None defaults to xyz of CIE D65 using the :cieobs: observer.
        :cieobs:
            | luxpy._CIEOBS, optional
            | CMF set to use when calculating spectrum locus coordinates.
        :flip_axes:
            | False, optional
            | If True: flip axis 0 and axis 1 in Ydelep to increase speed of loop in function.
            |          (single xyzw with is not flipped!)
        :SL_max_lambda:
            | None or float, optional
            | Maximum wavelength of spectrum locus before it turns back on itelf in the high wavelength range (~700 nm)
    Returns:
        :Ydlep: 
            | ndarray with Y, dominant (complementary) wavelength
            |  and excitation purity
    """

    xyz3 = np3d(xyz).copy().astype(np.float)

    # flip axis so that shortest dim is on axis0 (save time in looping):
    if (xyz3.shape[0] < xyz3.shape[1]) & (flip_axes == True):
        axes12flipped = True
        xyz3 = xyz3.transpose((1, 0, 2))
    else:
        axes12flipped = False

    # convert xyz to Yxy:
    Yxy = xyz_to_Yxy(xyz3)
    Yxyw = xyz_to_Yxy(xyzw)

    # get spectrum locus Y,x,y and wavelengths:
    SL = _CMF[cieobs]['bar']
    SL = SL[:, SL[1:].sum(axis=0) >
            0]  # avoid div by zero in xyz-to-Yxy conversion
    wlsl = SL[0]
    Yxysl = xyz_to_Yxy(SL[1:4].T)[:, None]

    # Get maximum wavelength of spectrum locus (before it turns back on itself)
    if SL_max_lambda is None:
        pmaxlambda = Yxysl[..., 1].argmax()  # lambda with largest x value
        dwl = np.diff(
            Yxysl[:, 0,
                  1])  # spectrumlocus in that range should have increasing x
        dwl[wlsl[:-1] < 600] = 10000
        pmaxlambda = np.where(
            dwl <= 0)[0][0]  # Take first element with zero or <zero slope
    else:
        pmaxlambda = np.abs(wlsl - SL_max_lambda).argmin()
    Yxysl = Yxysl[:(pmaxlambda + 1), :]
    wlsl = wlsl[:(pmaxlambda + 1)]

    # center on xyzw:
    Yxy = Yxy - Yxyw
    Yxysl = Yxysl - Yxyw
    Yxyw = Yxyw - Yxyw

    #split:
    Y, x, y = asplit(Yxy)
    Yw, xw, yw = asplit(Yxyw)
    Ysl, xsl, ysl = asplit(Yxysl)

    # calculate hue:
    h = math.positive_arctan(x, y, htype='deg')

    hsl = math.positive_arctan(xsl, ysl, htype='deg')

    hsl_max = hsl[0]  # max hue angle at min wavelength
    hsl_min = hsl[-1]  # min hue angle at max wavelength
    if hsl_min < hsl_max: hsl_min += 360

    dominantwavelength = np.empty(Y.shape)
    purity = np.empty(Y.shape)

    for i in range(xyz3.shape[1]):

        # find index of complementary wavelengths/hues:
        pc = np.where(
            (h[:, i] > hsl_max) & (h[:, i] < hsl_min)
        )  # hue's requiring complementary wavelength (purple line)
        h[:, i][pc] = h[:, i][pc] - np.sign(
            h[:, i][pc] - 180.0
        ) * 180.0  # add/subtract 180° to get positive complementary wavelength

        # find 2 closest enclosing hues in sl:
        #hslb,hib = meshblock(hsl,h[:,i:i+1])
        hib, hslb = np.meshgrid(h[:, i:i + 1], hsl)
        dh = (hslb - hib)
        q1 = np.abs(dh).argmin(axis=0)  # index of closest hue
        sign_q1 = np.sign(dh[q1])[0]
        dh[np.sign(dh) ==
           sign_q1] = 1000000  # set all dh on the same side as q1 to a very large value
        q2 = np.abs(dh).argmin(
            axis=0)  # index of second  closest (enclosing) hue

        # # Test changes to code:
        # print('wls',i, wlsl[q1],wlsl[q2])
        # import matplotlib.pyplot as plt
        # plt.figure()
        # plt.plot(wlsl[:-1],np.diff(xsl[:,0]),'k.-')
        # plt.figure()
        # plt.plot(x[0,i],y[0,i],'k.'); plt.plot(xsl,ysl,'r.-');plt.plot(xsl[q1],ysl[q1],'b.');plt.plot(xsl[q2],ysl[q2],'g.');plt.plot(xsl[-1],ysl[-1],'c+')

        dominantwavelength[:, i] = wlsl[q1] + np.multiply(
            (h[:, i] - hsl[q1, 0]),
            np.divide((wlsl[q2] - wlsl[q1]), (hsl[q2, 0] - hsl[q1, 0]))
        )  # calculate wl corresponding to h: y = y1 + (x-x1)*(y2-y1)/(x2-x1)
        dominantwavelength[:, i][pc] = -dominantwavelength[:, i][
            pc]  #complementary wavelengths are specified by '-' sign

        # calculate excitation purity:
        x_dom_wl = xsl[q1, 0] + (xsl[q2, 0] - xsl[q1, 0]) * (h[:, i] - hsl[
            q1, 0]) / (hsl[q2, 0] - hsl[q1, 0])  # calculate x of dom. wl
        y_dom_wl = ysl[q1, 0] + (ysl[q2, 0] - ysl[q1, 0]) * (h[:, i] - hsl[
            q1, 0]) / (hsl[q2, 0] - hsl[q1, 0])  # calculate y of dom. wl
        d_wl = (x_dom_wl**2.0 +
                y_dom_wl**2.0)**0.5  # distance from white point to sl
        d = (x[:, i]**2.0 +
             y[:, i]**2.0)**0.5  # distance from white point to test point
        purity[:, i] = d / d_wl

        # correct for those test points that have a complementary wavelength
        # calculate intersection of line through white point and test point and purple line:
        xy = np.vstack((x[:, i], y[:, i])).T
        xyw = np.hstack((xw, yw))
        xypl1 = np.hstack((xsl[0, None], ysl[0, None]))
        xypl2 = np.hstack((xsl[-1, None], ysl[-1, None]))
        da = (xy - xyw)
        db = (xypl2 - xypl1)
        dp = (xyw - xypl1)
        T = np.array([[0.0, -1.0], [1.0, 0.0]])
        dap = np.dot(da, T)
        denom = np.sum(dap * db, axis=1, keepdims=True)
        num = np.sum(dap * dp, axis=1, keepdims=True)
        xy_linecross = (num / denom) * db + xypl1
        d_linecross = np.atleast_2d(
            (xy_linecross[:, 0]**2.0 + xy_linecross[:, 1]**2.0)**0.5).T  #[0]
        purity[:, i][pc] = d[pc] / d_linecross[pc][:, 0]
    Ydlep = np.dstack((xyz3[:, :, 1], dominantwavelength, purity))

    if axes12flipped == True:
        Ydlep = Ydlep.transpose((1, 0, 2))
    else:
        Ydlep = Ydlep.transpose((0, 1, 2))
    return Ydlep.reshape(xyz.shape)
Beispiel #10
0
def xyz_to_Ydlep_(xyz,
                  cieobs=_CIEOBS,
                  xyzw=_COLORTF_DEFAULT_WHITE_POINT,
                  flip_axes=False,
                  **kwargs):
    """
    Convert XYZ tristimulus values to Y, dominant (complementary) wavelength
    and excitation purity.

    Args:
        :xyz:
            | ndarray with tristimulus values
        :xyzw:
            | None or ndarray with tristimulus values of a single (!) native white point, optional
            | None defaults to xyz of CIE D65 using the :cieobs: observer.
        :cieobs:
            | luxpy._CIEOBS, optional
            | CMF set to use when calculating spectrum locus coordinates.
        :flip_axes:
            | False, optional
            | If True: flip axis 0 and axis 1 in Ydelep to increase speed of loop in function.
            |          (single xyzw with is not flipped!)
    Returns:
        :Ydlep: 
            | ndarray with Y, dominant (complementary) wavelength
            |  and excitation purity
    """

    xyz3 = np3d(xyz).copy().astype(np.float)

    # flip axis so that shortest dim is on axis0 (save time in looping):
    if (xyz3.shape[0] < xyz3.shape[1]) & (flip_axes == True):
        axes12flipped = True
        xyz3 = xyz3.transpose((1, 0, 2))
    else:
        axes12flipped = False

    # convert xyz to Yxy:
    Yxy = xyz_to_Yxy(xyz3)
    Yxyw = xyz_to_Yxy(xyzw)

    # get spectrum locus Y,x,y and wavelengths:
    SL = _CMF[cieobs]['bar']
    SL = SL[:, SL[1:].sum(axis=0) >
            0]  # avoid div by zero in xyz-to-Yxy conversion
    wlsl = SL[0]
    Yxysl = xyz_to_Yxy(SL[1:4].T)[:, None]
    pmaxlambda = Yxysl[..., 1].argmax()
    maxlambda = wlsl[pmaxlambda]
    maxlambda = 700
    print(np.where(wlsl == maxlambda))
    pmaxlambda = np.where(wlsl == maxlambda)[0][0]
    Yxysl = Yxysl[:(pmaxlambda + 1), :]
    wlsl = wlsl[:(pmaxlambda + 1)]

    # center on xyzw:
    Yxy = Yxy - Yxyw
    Yxysl = Yxysl - Yxyw
    Yxyw = Yxyw - Yxyw

    #split:
    Y, x, y = asplit(Yxy)
    Yw, xw, yw = asplit(Yxyw)
    Ysl, xsl, ysl = asplit(Yxysl)

    # calculate hue:
    h = math.positive_arctan(x, y, htype='deg')
    print(h)
    print('rh', h[0, 0] - h[0, 1])
    print(wlsl[0], wlsl[-1])

    hsl = math.positive_arctan(xsl, ysl, htype='deg')

    hsl_max = hsl[0]  # max hue angle at min wavelength
    hsl_min = hsl[-1]  # min hue angle at max wavelength
    if hsl_min < hsl_max: hsl_min += 360

    dominantwavelength = np.empty(Y.shape)
    purity = np.empty(Y.shape)
    print('xyz:', xyz)
    for i in range(xyz3.shape[1]):
        print('\ni:', i, h[:, i], hsl_max, hsl_min)
        print(h)
        # find index of complementary wavelengths/hues:
        pc = np.where(
            (h[:, i] > hsl_max) & (h[:, i] < hsl_min)
        )  # hue's requiring complementary wavelength (purple line)
        print('pc', (h[:, i] > hsl_max) & (h[:, i] < hsl_min))
        h[:, i][pc] = h[:, i][pc] - np.sign(
            h[:, i][pc] - 180.0
        ) * 180.0  # add/subtract 180° to get positive complementary wavelength

        # find 2 closest hues in sl:
        #hslb,hib = meshblock(hsl,h[:,i:i+1])
        hib, hslb = np.meshgrid(h[:, i:i + 1], hsl)
        dh = np.abs(hslb - hib)
        q1 = dh.argmin(axis=0)  # index of closest hue
        dh[q1] = 1000000.0
        q2 = dh.argmin(axis=0)  # index of second closest hue
        print('q1q2', q2, q1)

        print('wls:', h[:, i], wlsl[q1], wlsl[q2])
        print('hsls:', hsl[q2, 0], hsl[q1, 0])
        print('d', (wlsl[q2] - wlsl[q1]), (hsl[q2, 0] - hsl[q1, 0]),
              (wlsl[q2] - wlsl[q1]) / (hsl[q2, 0] - hsl[q1, 0]))
        print('(h[:,i] - hsl[q1,0])', (h[:, i] - hsl[q1, 0]))
        print('div', np.divide((wlsl[q2] - wlsl[q1]),
                               (hsl[q2, 0] - hsl[q1, 0])))
        print(
            'mult(...)',
            np.multiply((h[:, i] - hsl[q1, 0]),
                        np.divide((wlsl[q2] - wlsl[q1]),
                                  (hsl[q2, 0] - hsl[q1, 0]))))
        dominantwavelength[:, i] = wlsl[q1] + np.multiply(
            (h[:, i] - hsl[q1, 0]),
            np.divide((wlsl[q2] - wlsl[q1]), (hsl[q2, 0] - hsl[q1, 0]))
        )  # calculate wl corresponding to h: y = y1 + (x-x1)*(y2-y1)/(x2-x1)
        print('dom', dominantwavelength[:, i])
        dominantwavelength[(dominantwavelength[:,
                                               i] > max(wlsl[q1], wlsl[q2])),
                           i] = max(wlsl[q1], wlsl[q2])
        dominantwavelength[(dominantwavelength[:,
                                               i] < min(wlsl[q1], wlsl[q2])),
                           i] = min(wlsl[q1], wlsl[q2])

        dominantwavelength[:, i][pc] = -dominantwavelength[:, i][
            pc]  #complementary wavelengths are specified by '-' sign

        # calculate excitation purity:
        x_dom_wl = xsl[q1, 0] + (xsl[q2, 0] - xsl[q1, 0]) * (h[:, i] - hsl[
            q1, 0]) / (hsl[q2, 0] - hsl[q1, 0])  # calculate x of dom. wl
        y_dom_wl = ysl[q1, 0] + (ysl[q2, 0] - ysl[q1, 0]) * (h[:, i] - hsl[
            q1, 0]) / (hsl[q2, 0] - hsl[q1, 0])  # calculate y of dom. wl
        d_wl = (x_dom_wl**2.0 +
                y_dom_wl**2.0)**0.5  # distance from white point to sl
        d = (x[:, i]**2.0 +
             y[:, i]**2.0)**0.5  # distance from white point to test point
        purity[:, i] = d / d_wl

        # correct for those test points that have a complementary wavelength
        # calculate intersection of line through white point and test point and purple line:
        xy = np.vstack((x[:, i], y[:, i])).T
        xyw = np.hstack((xw, yw))
        xypl1 = np.hstack((xsl[0, None], ysl[0, None]))
        xypl2 = np.hstack((xsl[-1, None], ysl[-1, None]))
        da = (xy - xyw)
        db = (xypl2 - xypl1)
        dp = (xyw - xypl1)
        T = np.array([[0.0, -1.0], [1.0, 0.0]])
        dap = np.dot(da, T)
        denom = np.sum(dap * db, axis=1, keepdims=True)
        num = np.sum(dap * dp, axis=1, keepdims=True)
        xy_linecross = (num / denom) * db + xypl1
        d_linecross = np.atleast_2d(
            (xy_linecross[:, 0]**2.0 + xy_linecross[:, 1]**2.0)**0.5).T  #[0]
        purity[:, i][pc] = d[pc] / d_linecross[pc][:, 0]
    Ydlep = np.dstack((xyz3[:, :, 1], dominantwavelength, purity))

    if axes12flipped == True:
        Ydlep = Ydlep.transpose((1, 0, 2))
    else:
        Ydlep = Ydlep.transpose((0, 1, 2))
    return Ydlep.reshape(xyz.shape)
Beispiel #11
0
def Ydlep_to_xyz(Ydlep,
                 cieobs=_CIEOBS,
                 xyzw=_COLORTF_DEFAULT_WHITE_POINT,
                 flip_axes=False,
                 SL_max_lambda=None,
                 **kwargs):
    """
    Convert Y, dominant (complementary) wavelength and excitation purity to XYZ
    tristimulus values.

    Args:
        :Ydlep: 
            | ndarray with Y, dominant (complementary) wavelength
              and excitation purity
        :xyzw: 
            | None or narray with tristimulus values of a single (!) native white point, optional
            | None defaults to xyz of CIE D65 using the :cieobs: observer.
        :cieobs:
            | luxpy._CIEOBS, optional
            | CMF set to use when calculating spectrum locus coordinates.
        :flip_axes:
            | False, optional
            | If True: flip axis 0 and axis 1 in Ydelep to increase speed of loop in function.
            |          (single xyzw with is not flipped!)
        :SL_max_lambda:
            | None or float, optional
            | Maximum wavelength of spectrum locus before it turns back on itelf in the high wavelength range (~700 nm)

    Returns:
        :xyz: 
            | ndarray with tristimulus values
    """

    Ydlep3 = np3d(Ydlep).copy().astype(np.float)

    # flip axis so that longest dim is on first axis  (save time in looping):
    if (Ydlep3.shape[0] < Ydlep3.shape[1]) & (flip_axes == True):
        axes12flipped = True
        Ydlep3 = Ydlep3.transpose((1, 0, 2))
    else:
        axes12flipped = False

    # convert xyzw to Yxyw:
    Yxyw = xyz_to_Yxy(xyzw)
    Yxywo = Yxyw.copy()

    # get spectrum locus Y,x,y and wavelengths:
    SL = _CMF[cieobs]['bar']
    SL = SL[:, SL[1:].sum(axis=0) >
            0]  # avoid div by zero in xyz-to-Yxy conversion
    wlsl = SL[0, None].T
    Yxysl = xyz_to_Yxy(SL[1:4].T)[:, None]

    # Get maximum wavelength of spectrum locus (before it turns back on itself)
    if SL_max_lambda is None:
        pmaxlambda = Yxysl[..., 1].argmax()  # lambda with largest x value
        dwl = np.diff(
            Yxysl[:, 0,
                  1])  # spectrumlocus in that range should have increasing x
        dwl[wlsl[:-1, 0] < 600] = 10000
        pmaxlambda = np.where(
            dwl <= 0)[0][0]  # Take first element with zero or <zero slope
    else:
        pmaxlambda = np.abs(wlsl - SL_max_lambda).argmin()
    Yxysl = Yxysl[:(pmaxlambda + 1), :]
    wlsl = wlsl[:(pmaxlambda + 1), :1]

    # center on xyzw:
    Yxysl = Yxysl - Yxyw
    Yxyw = Yxyw - Yxyw

    #split:
    Y, dom, pur = asplit(Ydlep3)
    Yw, xw, yw = asplit(Yxyw)
    Ywo, xwo, ywo = asplit(Yxywo)
    Ysl, xsl, ysl = asplit(Yxysl)

    # loop over longest dim:
    x = np.empty(Y.shape)
    y = np.empty(Y.shape)
    for i in range(Ydlep3.shape[1]):

        # find closest wl's to dom:
        #wlslb,wlib = meshblock(wlsl,np.abs(dom[i,:])) #abs because dom<0--> complemtary wl
        wlib, wlslb = np.meshgrid(np.abs(dom[:, i]), wlsl)

        dwl = wlslb - wlib
        q1 = np.abs(dwl).argmin(axis=0)  # index of closest wl
        sign_q1 = np.sign(dwl[q1])
        dwl[np.sign(dwl) ==
            sign_q1] = 1000000  # set all dwl on the same side as q1 to a very large value
        q2 = np.abs(dwl).argmin(
            axis=0)  # index of second closest (enclosing) wl

        # calculate x,y of dom:
        x_dom_wl = xsl[q1, 0] + (xsl[q2, 0] - xsl[q1, 0]) * (
            np.abs(dom[:, i]) - wlsl[q1, 0]) / (wlsl[q2, 0] - wlsl[q1, 0]
                                                )  # calculate x of dom. wl
        y_dom_wl = ysl[q1, 0] + (ysl[q2, 0] - ysl[q1, 0]) * (
            np.abs(dom[:, i]) - wlsl[q1, 0]) / (wlsl[q2, 0] - wlsl[q1, 0]
                                                )  # calculate y of dom. wl

        # calculate x,y of test:
        d_wl = (x_dom_wl**2.0 +
                y_dom_wl**2.0)**0.5  # distance from white point to dom
        d = pur[:, i] * d_wl
        hdom = math.positive_arctan(x_dom_wl, y_dom_wl, htype='deg')
        x[:, i] = d * np.cos(hdom * np.pi / 180.0)
        y[:, i] = d * np.sin(hdom * np.pi / 180.0)

        # complementary:
        pc = np.where(dom[:, i] < 0.0)
        hdom[pc] = hdom[pc] - np.sign(dom[:, i][pc] -
                                      180.0) * 180.0  # get positive hue angle

        # calculate intersection of line through white point and test point and purple line:
        xy = np.vstack((x_dom_wl, y_dom_wl)).T
        xyw = np.vstack((xw, yw)).T
        xypl1 = np.vstack((xsl[0, None], ysl[0, None])).T
        xypl2 = np.vstack((xsl[-1, None], ysl[-1, None])).T
        da = (xy - xyw)
        db = (xypl2 - xypl1)
        dp = (xyw - xypl1)
        T = np.array([[0.0, -1.0], [1.0, 0.0]])
        dap = np.dot(da, T)
        denom = np.sum(dap * db, axis=1, keepdims=True)
        num = np.sum(dap * dp, axis=1, keepdims=True)
        xy_linecross = (num / denom) * db + xypl1
        d_linecross = np.atleast_2d(
            (xy_linecross[:, 0]**2.0 + xy_linecross[:, 1]**2.0)**0.5).T[:, 0]
        x[:, i][pc] = pur[:, i][pc] * d_linecross[pc] * np.cos(
            hdom[pc] * np.pi / 180)
        y[:, i][pc] = pur[:, i][pc] * d_linecross[pc] * np.sin(
            hdom[pc] * np.pi / 180)
    Yxy = np.dstack((Ydlep3[:, :, 0], x + xwo, y + ywo))
    if axes12flipped == True:
        Yxy = Yxy.transpose((1, 0, 2))
    else:
        Yxy = Yxy.transpose((0, 1, 2))
    return Yxy_to_xyz(Yxy).reshape(Ydlep.shape)
Beispiel #12
0
def cam15u(data,
           fov=10.0,
           inputtype='xyz',
           direction='forward',
           outin='Q,aW,bW',
           parameters=None):
    """
    Convert between CIE 2006 10°  XYZ tristimulus values (or spectral data) 
    and CAM15u color appearance correlates.
    
    Args:
        :data: 
            | ndarray of CIE 2006 10°  XYZ tristimulus values or spectral data
            |  or color appearance attributes
        :fov: 
            | 10.0, optional
            | Field-of-view of stimulus (for size effect on brightness)
        :inputtpe:
            | 'xyz' or 'spd', optional
            | Specifies the type of input: 
            |     tristimulus values or spectral data for the forward mode.
        :direction:
            | 'forward' or 'inverse', optional
            |   -'forward': xyz -> cam15u
            |   -'inverse': cam15u -> xyz 
        :outin:
            | 'Q,aW,bW' or str, optional
            | 'Q,aW,bW' (brightness and opponent signals for amount-of-neutral)
            |  other options: 'Q,aM,bM' (colorfulness) and 'Q,aS,bS' (saturation)
            | Str specifying the type of 
            |     input (:direction: == 'inverse') and 
            |     output (:direction: == 'forward')
        :parameters:
            | None or dict, optional
            | Set of model parameters.
            |   - None: defaults to luxpy.cam._CAM15U_PARAMETERS 
            |    (see references below)
    
    Returns:
        :returns: 
            | ndarray with color appearance correlates (:direction: == 'forward')
            |  or 
            | XYZ tristimulus values (:direction: == 'inverse')

    References: 
        1. `M. Withouck, K. A. G. Smet, W. R. Ryckaert, and P. Hanselaer, 
        “Experimental driven modelling of the color appearance of 
        unrelated self-luminous stimuli: CAM15u,” 
        Opt. Express, vol. 23, no. 9, pp. 12045–12064, 2015.
        <https://www.osapublishing.org/oe/abstract.cfm?uri=oe-23-9-12045&origin=search>`_
        2. `M. Withouck, K. A. G. Smet, and P. Hanselaer, (2015), 
        “Brightness prediction of different sized unrelated self-luminous stimuli,” 
        Opt. Express, vol. 23, no. 10, pp. 13455–13466. 
        <https://www.osapublishing.org/oe/abstract.cfm?uri=oe-23-10-13455&origin=search>`_  
     """

    if parameters is None:
        parameters = _CAM15U_PARAMETERS

    outin = outin.split(',')

    #unpack model parameters:
    Mxyz2rgb, cA, cAlms, cHK, cM, cW, ca, calms, cb, cblms, cfov, cp, k, unique_hue_data = [
        parameters[x] for x in sorted(parameters.keys())
    ]

    # precomputations:
    invMxyz2rgb = np.linalg.inv(Mxyz2rgb)
    MAab = np.array([cAlms, calms, cblms])
    invMAab = np.linalg.inv(MAab)

    #initialize data and camout:
    data = np2d(data)
    if len(data.shape) == 2:
        data = np.expand_dims(data, axis=0)  # avoid looping if not necessary

    if (data.shape[0] > data.shape[1]):  # loop over shortest dim.
        flipaxis0and1 = True
        data = np.transpose(data, axes=(1, 0, 2))
    else:
        flipaxis0and1 = False

    dshape = list(data.shape)
    dshape[-1] = len(outin)  # requested number of correlates
    if (inputtype != 'xyz') & (direction == 'forward'):
        dshape[-2] = dshape[
            -2] - 1  # wavelength row doesn't count & only with forward can the input data be spectral

    camout = np.zeros(dshape)
    camout.fill(np.nan)

    for i in range(data.shape[0]):

        if (inputtype != 'xyz') & (direction == 'forward'):
            xyz = spd_to_xyz(data[i], cieobs='2006_10', relative=False)
            lms = np.dot(_CMF['2006_10']['M'], xyz.T).T  # convert to l,m,s
            rgb = (lms /
                   _CMF['2006_10']['K']) * k  # convert to rho, gamma, beta
        elif (inputtype == 'xyz') & (direction == 'forward'):
            rgb = np.dot(Mxyz2rgb, data[i].T).T

        if direction == 'forward':

            # apply cube-root compression:
            rgbc = rgb**(cp)

            # calculate achromatic and color difference signals, A, a, b:
            Aab = np.dot(MAab, rgbc.T).T
            A, a, b = asplit(Aab)
            A = cA * A
            a = ca * a
            b = cb * b

            # calculate colorfullness like signal M:
            M = cM * ((a**2.0 + b**2.0)**0.5)

            # calculate brightness Q:
            Q = A + cHK[0] * M**cHK[
                1]  # last term is contribution of Helmholtz-Kohlrausch effect on brightness

            # calculate saturation, s:
            s = M / Q

            # calculate amount of white, W:
            W = 100.0 / (1.0 + cW[0] * (s**cW[1]))

            #  adjust Q for size (fov) of stimulus (matter of debate whether to do this before or after calculation of s or W, there was no data on s, M or W for different sized stimuli: after)
            Q = Q * (fov / 10.0)**cfov

            # calculate hue, h and Hue quadrature, H:
            h = hue_angle(a, b, htype='deg')

            if 'H' in outin:
                H = hue_quadrature(h, unique_hue_data=unique_hue_data)
            else:
                H = None

            # calculate cart. co.:
            if 'aM' in outin:
                aM = M * np.cos(h * np.pi / 180.0)
                bM = M * np.sin(h * np.pi / 180.0)

            if 'aS' in outin:
                aS = s * np.cos(h * np.pi / 180.0)
                bS = s * np.sin(h * np.pi / 180.0)

            if 'aW' in outin:
                aW = W * np.cos(h * np.pi / 180.0)
                bW = W * np.sin(h * np.pi / 180.0)

            if (outin != ['Q', 'aW', 'bW']):
                camout[i] = eval('ajoin((' + ','.join(outin) + '))')
            else:
                camout[i] = ajoin((Q, aW, bW))

        elif direction == 'inverse':

            # get Q, M and a, b depending on input type:
            if 'aW' in outin:
                Q, a, b = asplit(data[i])
                Q = Q / (
                    (fov / 10.0)**cfov
                )  #adjust Q for size (fov) of stimulus back to that 10° ref
                W = (a**2.0 + b**2.0)**0.5
                s = (((100 / W) - 1.0) / cW[0])**(1.0 / cW[1])
                M = s * Q

            if 'aM' in outin:
                Q, a, b = asplit(data[i])
                Q = Q / (
                    (fov / 10.0)**cfov
                )  #adjust Q for size (fov) of stimulus back to that 10° ref
                M = (a**2.0 + b**2.0)**0.5

            if 'aS' in outin:
                Q, a, b = asplit(data[i])
                Q = Q / (
                    (fov / 10.0)**cfov
                )  #adjust Q for size (fov) of stimulus back to that 10° ref
                s = (a**2.0 + b**2.0)**0.5
                M = s * Q

            if 'h' in outin:
                Q, WsM, h = asplit(data[i])
                Q = Q / (
                    (fov / 10.0)**cfov
                )  #adjust Q for size (fov) of stimulus back to that 10° ref
                if 'W' in outin:
                    s = (((100.0 / WsM) - 1.0) / cW[0])**(1.0 / cW[1])
                    M = s * Q
                elif 's' in outin:
                    M = WsM * Q
                elif 'M' in outin:
                    M = WsM

            # calculate achromatic signal, A from Q and M:
            A = Q - cHK[0] * M**cHK[1]
            A = A / cA

            # calculate hue angle:
            h = hue_angle(a, b, htype='rad')

            # calculate a,b from M and h:
            a = (M / cM) * np.cos(h)
            b = (M / cM) * np.sin(h)
            a = a / ca
            b = b / cb

            # create Aab:
            Aab = ajoin((A, a, b))

            # calculate rgbc:
            rgbc = np.dot(invMAab, Aab.T).T

            # decompress rgbc to rgb:
            rgb = rgbc**(1 / cp)

            # convert rgb to xyz:
            xyz = np.dot(invMxyz2rgb, rgb.T).T

            camout[i] = xyz

    if flipaxis0and1 == True:  # loop over shortest dim.
        camout = np.transpose(camout, axes=(1, 0, 2))

    if camout.shape[0] == 1:
        camout = np.squeeze(camout, axis=0)

    return camout
Beispiel #13
0
def cam_sww16(data, dataw = None, Yb = 20.0, Lw = 400.0, Ccwb = None, relative = True, \
              parameters = None, inputtype = 'xyz', direction = 'forward', \
              cieobs = '2006_10'):
    """
    A simple principled color appearance model based on a mapping 
    of the Munsell color system.
    
    | This function implements the JOSA A (parameters = 'JOSA') published model. 
    
    Args:
        :data: 
            | ndarray with input tristimulus values 
            | or spectral data 
            | or input color appearance correlates
            | Can be of shape: (N [, xM], x 3), whereby: 
            | N refers to samples and M refers to light sources.
            | Note that for spectral input shape is (N x (M+1) x wl) 
        :dataw: 
            | None or ndarray, optional
            | Input tristimulus values or spectral data of white point.
            | None defaults to the use of CIE illuminant C.
        :Yb: 
            | 20.0, optional
            | Luminance factor of background (perfect white diffuser, Yw = 100)
        :Lw:
            | 400.0, optional
            | Luminance (cd/m²) of white point.
        :Ccwb:
            | None,  optional
            | Degree of cognitive adaptation (white point balancing)
            | If None: use [..,..] from parameters dict.
        :relative:
            | True or False, optional
            | True: xyz tristimulus values are relative (Yw = 100)
        :parameters:
            | None or str or dict, optional
            | Dict with model parameters.
            |    - None: defaults to luxpy.cam._CAM_SWW_2016_PARAMETERS['JOSA']
            |    - str: 'best-fit-JOSA' or 'best-fit-all-Munsell'
            |    - dict: user defined model parameters 
            |            (dict should have same structure)
        :inputtype:
            | 'xyz' or 'spd', optional
            | Specifies the type of input: 
            |     tristimulus values or spectral data for the forward mode.
        :direction:
            | 'forward' or 'inverse', optional
            |   -'forward': xyz -> cam_sww_2016
            |   -'inverse': cam_sww_2016 -> xyz 
        :cieobs:
            | '2006_10', optional
            | CMF set to use to perform calculations where spectral data 
              is involved (inputtype == 'spd'; dataw = None)
            | Other options: see luxpy._CMF['types']
    
    Returns:
        :returns: 
            | ndarray with color appearance correlates (:direction: == 'forward')
            |  or 
            | XYZ tristimulus values (:direction: == 'inverse')
    
    Notes:
        | This function implements the JOSA A (parameters = 'JOSA') 
          published model. 
        | With:
        |    1. A correction for the parameter 
        |         in Eq.4 of Fig. 11: 0.952 --> -0.952 
        |         
        |     2. The delta_ac and delta_bc white-balance shifts in Eq. 5e & 5f 
        |         should be: -0.028 & 0.821 
        |  
        |     (cfr. Ccwb = 0.66 in: 
        |         ab_test_out = ab_test_int - Ccwb*ab_gray_adaptation_field_int))
             
    References:
        1. `Smet, K. A. G., Webster, M. A., & Whitehead, L. A. (2016). 
        A simple principled approach for modeling and understanding uniform color metrics. 
        Journal of the Optical Society of America A, 33(3), A319–A331. 
        <https://doi.org/10.1364/JOSAA.33.00A319>`_

    """

    # get model parameters
    args = locals().copy()
    if parameters is None:
        parameters = _CAM_SWW16_PARAMETERS['JOSA']
    if isinstance(parameters, str):
        parameters = _CAM_SWW16_PARAMETERS[parameters]
    parameters = put_args_in_db(
        parameters,
        args)  #overwrite parameters with other (not-None) args input

    #unpack model parameters:
    Cc, Ccwb, Cf, Mxyz2lms, cLMS, cab_int, cab_out, calpha, cbeta, cga1, cga2, cgb1, cgb2, cl_int, clambda, lms0 = [
        parameters[x] for x in sorted(parameters.keys())
    ]

    # setup default adaptation field:
    if (dataw is None):
        dataw = _CIE_ILLUMINANTS['C'].copy()  # get illuminant C
        xyzw = spd_to_xyz(dataw, cieobs=cieobs,
                          relative=False)  # get abs. tristimulus values
        if relative == False:  #input is expected to be absolute
            dataw[1:] = Lw * dataw[
                1:] / xyzw[:, 1:2]  #dataw = Lw*dataw # make absolute
        else:
            dataw = dataw  # make relative (Y=100)
        if inputtype == 'xyz':
            dataw = spd_to_xyz(dataw, cieobs=cieobs, relative=relative)

    # precomputations:
    Mxyz2lms = np.dot(
        np.diag(cLMS),
        math.normalize_3x3_matrix(Mxyz2lms, np.array([[1, 1, 1]]))
    )  # normalize matrix for xyz-> lms conversion to ill. E weighted with cLMS
    invMxyz2lms = np.linalg.inv(Mxyz2lms)
    MAab = np.array([clambda, calpha, cbeta])
    invMAab = np.linalg.inv(MAab)

    #initialize data and camout:
    data = np2d(data).copy(
    )  # stimulus data (can be upto NxMx3 for xyz, or [N x (M+1) x wl] for spd))
    dataw = np2d(dataw).copy(
    )  # white point (can be upto Nx3 for xyz, or [(N+1) x wl] for spd)

    # make axis 1 of dataw have 'same' dimensions as data:
    if (data.ndim == 2):
        data = np.expand_dims(data, axis=1)  # add light source axis 1

    if inputtype == 'xyz':
        if dataw.shape[
                0] == 1:  #make dataw have same lights source dimension size as data
            dataw = np.repeat(dataw, data.shape[1], axis=0)
    else:
        if dataw.shape[0] == 2:
            dataw = np.vstack(
                (dataw[0], np.repeat(dataw[1:], data.shape[1], axis=0)))

    # Flip light source dim to axis 0:
    data = np.transpose(data, axes=(1, 0, 2))

    # Initialize output array:
    dshape = list(data.shape)
    dshape[-1] = 3  # requested number of correlates: l_int, a_int, b_int
    if (inputtype != 'xyz') & (direction == 'forward'):
        dshape[-2] = dshape[
            -2] - 1  # wavelength row doesn't count & only with forward can the input data be spectral
    camout = np.zeros(dshape)
    camout.fill(np.nan)

    # apply forward/inverse model for each row in data:
    for i in range(data.shape[0]):

        # stage 1: calculate photon rates of stimulus and adapting field, lmst & lmsf:
        if (inputtype != 'xyz'):
            if relative == True:
                xyzw_abs = spd_to_xyz(np.vstack((dataw[0], dataw[i + 1])),
                                      cieobs=cieobs,
                                      relative=False)
                dataw[i +
                      1] = Lw * dataw[i + 1] / xyzw_abs[0, 1]  # make absolute
            xyzw = spd_to_xyz(np.vstack((dataw[0], dataw[i + 1])),
                              cieobs=cieobs,
                              relative=False)
            lmsw = 683.0 * np.dot(Mxyz2lms, xyzw.T).T / _CMF[cieobs]['K']
            lmsf = (Yb / 100.0
                    ) * lmsw  # calculate adaptation field and convert to l,m,s
            if (direction == 'forward'):
                if relative == True:
                    data[i, 1:, :] = Lw * data[i, 1:, :] / xyzw_abs[
                        0, 1]  # make absolute
                xyzt = spd_to_xyz(data[i], cieobs=cieobs,
                                  relative=False) / _CMF[cieobs]['K']
                lmst = 683.0 * np.dot(Mxyz2lms, xyzt.T).T  # convert to l,m,s
            else:
                lmst = lmsf  # put lmsf in lmst for inverse-mode

        elif (inputtype == 'xyz'):
            if relative == True:
                dataw[i] = Lw * dataw[i] / 100.0  # make absolute
            lmsw = 683.0 * np.dot(
                Mxyz2lms, dataw[i].T).T / _CMF[cieobs]['K']  # convert to lms
            lmsf = (Yb / 100.0) * lmsw
            if (direction == 'forward'):
                if relative == True:
                    data[i] = Lw * data[i] / 100.0  # make absolute
                lmst = 683.0 * np.dot(
                    Mxyz2lms,
                    data[i].T).T / _CMF[cieobs]['K']  # convert to lms
            else:
                lmst = lmsf  # put lmsf in lmst for inverse-mode

        # stage 2: calculate cone outputs of stimulus lmstp
        lmstp = math.erf(Cc * (np.log(lmst / lms0) + Cf * np.log(lmsf / lms0)))
        lmsfp = math.erf(Cc * (np.log(lmsf / lms0) + Cf * np.log(lmsf / lms0)))
        lmstp = np.vstack(
            (lmsfp, lmstp)
        )  # add adaptation field lms temporarily to lmsp for quick calculation

        # stage 3: calculate optic nerve signals, lam*, alphp, betp:
        lstar, alph, bet = asplit(np.dot(MAab, lmstp.T).T)

        alphp = cga1[0] * alph
        alphp[alph < 0] = cga1[1] * alph[alph < 0]
        betp = cgb1[0] * bet
        betp[bet < 0] = cgb1[1] * bet[bet < 0]

        # stage 4: calculate recoded nerve signals, alphapp, betapp:
        alphpp = cga2[0] * (alphp + betp)
        betpp = cgb2[0] * (alphp - betp)

        # stage 5: calculate conscious color perception:
        lstar_int = cl_int[0] * (lstar + cl_int[1])
        alph_int = cab_int[0] * (np.cos(cab_int[1] * np.pi / 180.0) * alphpp -
                                 np.sin(cab_int[1] * np.pi / 180.0) * betpp)
        bet_int = cab_int[0] * (np.sin(cab_int[1] * np.pi / 180.0) * alphpp +
                                np.cos(cab_int[1] * np.pi / 180.0) * betpp)
        lstar_out = lstar_int

        if direction == 'forward':
            if Ccwb is None:
                alph_out = alph_int - cab_out[0]
                bet_out = bet_int - cab_out[1]
            else:
                Ccwb = Ccwb * np.ones((2))
                Ccwb[Ccwb < 0.0] = 0.0
                Ccwb[Ccwb > 1.0] = 1.0
                alph_out = alph_int - Ccwb[0] * alph_int[
                    0]  # white balance shift using adaptation gray background (Yb=20%), with Ccw: degree of adaptation
                bet_out = bet_int - Ccwb[1] * bet_int[0]

            camout[i] = np.vstack(
                (lstar_out[1:], alph_out[1:], bet_out[1:])
            ).T  # stack together and remove adaptation field from vertical stack
        elif direction == 'inverse':
            labf_int = np.hstack((lstar_int[0], alph_int[0], bet_int[0]))

            # get lstar_out, alph_out & bet_out for data:
            lstar_out, alph_out, bet_out = asplit(data[i])

            # stage 5 inverse:
            # undo cortical white-balance:
            if Ccwb is None:
                alph_int = alph_out + cab_out[0]
                bet_int = bet_out + cab_out[1]
            else:
                Ccwb = Ccwb * np.ones((2))
                Ccwb[Ccwb < 0.0] = 0.0
                Ccwb[Ccwb > 1.0] = 1.0
                alph_int = alph_out + Ccwb[0] * alph_int[
                    0]  #  inverse white balance shift using adaptation gray background (Yb=20%), with Ccw: degree of adaptation
                bet_int = bet_out + Ccwb[1] * bet_int[0]

            lstar_int = lstar_out
            alphpp = (1.0 / cab_int[0]) * (
                np.cos(-cab_int[1] * np.pi / 180.0) * alph_int -
                np.sin(-cab_int[1] * np.pi / 180.0) * bet_int)
            betpp = (1.0 / cab_int[0]) * (
                np.sin(-cab_int[1] * np.pi / 180.0) * alph_int +
                np.cos(-cab_int[1] * np.pi / 180.0) * bet_int)
            lstar_int = lstar_out
            lstar = (lstar_int / cl_int[0]) - cl_int[1]

            # stage 4 inverse:
            alphp = 0.5 * (alphpp / cga2[0] + betpp / cgb2[0]
                           )  # <-- alphpp = (Cga2.*(alphp+betp));
            betp = 0.5 * (alphpp / cga2[0] - betpp / cgb2[0]
                          )  # <-- betpp = (Cgb2.*(alphp-betp));

            # stage 3 invers:
            alph = alphp / cga1[0]
            bet = betp / cgb1[0]
            sa = np.sign(cga1[1])
            sb = np.sign(cgb1[1])
            alph[(sa * alphp) < 0.0] = alphp[(sa * alphp) < 0] / cga1[1]
            bet[(sb * betp) < 0.0] = betp[(sb * betp) < 0] / cgb1[1]
            lab = ajoin((lstar, alph, bet))

            # stage 2 inverse:
            lmstp = np.dot(invMAab, lab.T).T
            lmstp[lmstp < -1.0] = -1.0
            lmstp[lmstp > 1.0] = 1.0

            lmstp = math.erfinv(lmstp) / Cc - Cf * np.log(lmsf / lms0)
            lmst = np.exp(lmstp) * lms0

            # stage 1 inverse:
            xyzt = np.dot(invMxyz2lms, lmst.T).T

            if relative == True:
                xyzt = (100.0 / Lw) * xyzt

            camout[i] = xyzt

#    if flipaxis0and1 == True: # loop over shortest dim.
#        camout = np.transpose(camout, axes = (1,0,2))

# Flip light source dim back to axis 1:
    camout = np.transpose(camout, axes=(1, 0, 2))

    if camout.shape[0] == 1:
        camout = np.squeeze(camout, axis=0)

    return camout
Beispiel #14
0
def cam_sww16(data,
              dataw=None,
              Yb=20.0,
              Lw=400.0,
              Ccwb=None,
              relative=True,
              inputtype='xyz',
              direction='forward',
              parameters=None,
              cieobs='2006_10',
              match_to_conversionmatrix_to_cieobs=True):
    """
    A simple principled color appearance model based on a mapping of 
    the Munsell color system.
    
    | This function implements the JOSA A (parameters = 'JOSA') published model. 
    
    Args:
        :data: 
            | ndarray with input tristimulus values 
            | or spectral data 
            | or input color appearance correlates
            | Can be of shape: (N [, xM], x 3), whereby: 
            | N refers to samples and M refers to light sources.
            | Note that for spectral input shape is (N x (M+1) x wl) 
        :dataw: 
            | None or ndarray, optional
            | Input tristimulus values or spectral data of white point.
            | None defaults to the use of CIE illuminant C.
        :Yb: 
            | 20.0, optional
            | Luminance factor of background (perfect white diffuser, Yw = 100)
        :Lw:
            | 400.0, optional
            | Luminance (cd/m²) of white point.
        :Ccwb:
            | None,  optional
            | Degree of cognitive adaptation (white point balancing)
            | If None: use [..,..] from parameters dict.
        :relative:
            | True or False, optional
            | True: xyz tristimulus values are relative (Yw = 100)
        :parameters:
            | None or str or dict, optional
            | Dict with model parameters.
            |    - None: defaults to luxpy.cam._CAM_SWW_2016_PARAMETERS['JOSA']
            |    - str: 'best-fit-JOSA' or 'best-fit-all-Munsell'
            |    - dict: user defined model parameters 
            |            (dict should have same structure)
        :inputtype:
            | 'xyz' or 'spd', optional
            | Specifies the type of input: 
            |     tristimulus values or spectral data for the forward mode.
        :direction:
            | 'forward' or 'inverse', optional
            |   -'forward': xyz -> cam_sww_2016
            |   -'inverse': cam_sww_2016 -> xyz 
        :cieobs:
            | '2006_10', optional
            | CMF set to use to perform calculations where spectral data 
            | is involved (inputtype == 'spd'; dataw = None)
            | Other options: see luxpy._CMF['types']
        :match_to_conversionmatrix_to_cieobs:
            | When channging to a different CIE observer, change the xyz-to_lms
            | matrix to the one corresponding to that observer. If False: use 
            | the one set in parameters or _CAM_SWW16_PARAMETERS
    
    Returns:
        :returns: 
            | ndarray with color appearance correlates (:direction: == 'forward')
            |  or 
            | XYZ tristimulus values (:direction: == 'inverse')
    
    Notes:
        | This function implements the JOSA A (parameters = 'JOSA') 
        | published model. 
        | With:
        |    1. A correction for the parameter 
        |         in Eq.4 of Fig. 11: 0.952 --> -0.952 
        |         
        |     2. The delta_ac and delta_bc white-balance shifts in Eq. 5e & 5f 
        |         should be: -0.028 & 0.821 
        |  
        |     (cfr. Ccwb = 0.66 in: 
        |         ab_test_out = ab_test_int - Ccwb*ab_gray_adaptation_field_int))
             
    References:
        1. `Smet, K. A. G., Webster, M. A., & Whitehead, L. A. (2016). 
        A simple principled approach for modeling and understanding uniform color metrics. 
        Journal of the Optical Society of America A, 33(3), A319–A331. 
        <https://doi.org/10.1364/JOSAA.33.00A319>`_

    """
    #--------------------------------------------------------------------------
    # Get model parameters:
    #--------------------------------------------------------------------------
    args = locals().copy()
    parameters = _update_parameter_dict(
        args,
        parameters=parameters,
        match_to_conversionmatrix_to_cieobs=match_to_conversionmatrix_to_cieobs
    )

    #unpack model parameters:
    Cc, Ccwb, Cf, Mxyz2lms, cLMS, cab_int, cab_out, calpha, cbeta, cga1, cga2, cgb1, cgb2, cl_int, clambda, lms0 = [
        parameters[x] for x in sorted(parameters.keys())
    ]

    #--------------------------------------------------------------------------
    # Setup default adaptation field:
    #--------------------------------------------------------------------------
    dataw = _setup_default_adaptation_field(dataw=dataw,
                                            Lw=Lw,
                                            inputtype=inputtype,
                                            relative=relative,
                                            cieobs=cieobs)

    #--------------------------------------------------------------------------
    # Redimension input data to ensure most appropriate sizes
    # for easy and efficient looping and initialize output array:
    #--------------------------------------------------------------------------
    data, dataw, camout, originalshape = _massage_input_and_init_output(
        data, dataw, inputtype=inputtype, direction=direction)

    #--------------------------------------------------------------------------
    # Do precomputations needed for both the forward and inverse model,
    # and which do not depend on sample or light source data:
    #--------------------------------------------------------------------------
    Mxyz2lms = np.dot(
        np.diag(cLMS), Mxyz2lms
    )  # weight the xyz-to-lms conversion matrix with cLMS (cfr. stage 1 calculations)
    invMxyz2lms = np.linalg.inv(
        Mxyz2lms)  # Calculate the inverse lms-to-xyz conversion matrix
    MAab = np.array(
        [clambda, calpha, cbeta]
    )  # Create matrix with scale factors for L, M, S for quick matrix multiplications
    invMAab = np.linalg.inv(
        MAab)  # Pre-calculate its inverse to avoid repeat in loop.

    #--------------------------------------------------------------------------
    # Apply forward/inverse model by looping over each row (=light source dim.)
    # in data:
    #--------------------------------------------------------------------------
    N = data.shape[0]
    for i in range(N):
        #++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
        #  START FORWARD MODE and common part of inverse mode
        #++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

        #-----------------------------------------------------------------------------
        # Get absolute tristimulus values for stimulus field and white point for row i:
        #-----------------------------------------------------------------------------
        xyzt, xyzw, xyzw_abs = _get_absolute_xyz_xyzw(data,
                                                      dataw,
                                                      i=i,
                                                      Lw=Lw,
                                                      direction=direction,
                                                      cieobs=cieobs,
                                                      inputtype=inputtype,
                                                      relative=relative)

        #-----------------------------------------------------------------------------
        # stage 1: calculate photon rates of stimulus and white white, and
        # adapting field: i.e. lmst, lmsw and lmsf
        #-----------------------------------------------------------------------------
        # Convert to white point l,m,s:
        lmsw = 683.0 * np.dot(Mxyz2lms, xyzw.T).T / _CMF[cieobs]['K']

        # Calculate adaptation field and convert to l,m,s:
        lmsf = (Yb / 100.0) * lmsw

        # Calculate lms of stimulus
        # or put adaptation lmsf in test field lmst for later use in inverse-mode (no xyz in 'inverse' mode!!!):
        lmst = (683.0 * np.dot(Mxyz2lms, xyzt.T).T /
                _CMF[cieobs]['K']) if (direction == 'forward') else lmsf

        #-----------------------------------------------------------------------------
        # stage 2: calculate cone outputs of stimulus lmstp
        #-----------------------------------------------------------------------------
        lmstp = math.erf(Cc *
                         (np.log(lmst / lms0) +
                          Cf * np.log(lmsf / lms0)))  # stimulus test field
        lmsfp = math.erf(Cc * (np.log(lmsf / lms0) +
                               Cf * np.log(lmsf / lms0)))  # adaptation field

        # add adaptation field lms temporarily to lmstp for quick calculation
        lmstp = np.vstack((lmsfp, lmstp))

        #-----------------------------------------------------------------------------
        # stage 3: calculate optic nerve signals, lam*, alphp, betp:
        #-----------------------------------------------------------------------------
        lstar, alph, bet = asplit(np.dot(MAab, lmstp.T).T)

        alphp = cga1[0] * alph
        alphp[alph < 0] = cga1[1] * alph[alph < 0]
        betp = cgb1[0] * bet
        betp[bet < 0] = cgb1[1] * bet[bet < 0]

        #-----------------------------------------------------------------------------
        #  stage 4: calculate recoded nerve signals, alphapp, betapp:
        #-----------------------------------------------------------------------------
        alphpp = cga2[0] * (alphp + betp)
        betpp = cgb2[0] * (alphp - betp)

        #-----------------------------------------------------------------------------
        #  stage 5: calculate conscious color perception:
        #-----------------------------------------------------------------------------
        lstar_int = cl_int[0] * (lstar + cl_int[1])
        alph_int = cab_int[0] * (np.cos(cab_int[1] * np.pi / 180.0) * alphpp -
                                 np.sin(cab_int[1] * np.pi / 180.0) * betpp)
        bet_int = cab_int[0] * (np.sin(cab_int[1] * np.pi / 180.0) * alphpp +
                                np.cos(cab_int[1] * np.pi / 180.0) * betpp)
        lstar_out = lstar_int

        #++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
        #  stage 5 continued but SPLIT IN FORWARD AND INVERSE MODES:
        #++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

        #--------------------------------------
        # FORWARD MODE TO PERCEPTUAL SIGNALS:
        #--------------------------------------
        if direction == 'forward':
            if Ccwb is None:
                alph_out = alph_int - cab_out[0]
                bet_out = bet_int - cab_out[1]

            else:
                Ccwb = Ccwb * np.ones((2))
                Ccwb[Ccwb < 0.0] = 0.0
                Ccwb[Ccwb > 1.0] = 1.0

                # white balance shift using adaptation gray background (Yb=20%), with Ccw: degree of adaptation:
                alph_out = alph_int - Ccwb[0] * alph_int[0]
                bet_out = bet_int - Ccwb[1] * bet_int[0]

            # stack together and remove adaptation field from vertical stack
            # camout is an ndarray with perceptual signals:
            camout[i] = np.vstack((lstar_out[1:], alph_out[1:], bet_out[1:])).T

        #--------------------------------------
        # INVERSE MODE FROM PERCEPTUAL SIGNALS:
        #--------------------------------------
        elif direction == 'inverse':

            # stack cognitive pre-adapted adaptation field signals (first on stack) together:
            #labf_int = np.hstack((lstar_int[0],alph_int[0],bet_int[0]))

            # get lstar_out, alph_out & bet_out for data
            #(contains model perceptual signals in inverse mode!!!):
            lstar_out, alph_out, bet_out = asplit(data[i])

            #------------------------------------------------------------------------
            #  Inverse stage 5: undo cortical white-balance:
            #------------------------------------------------------------------------
            if Ccwb is None:
                alph_int = alph_out + cab_out[0]
                bet_int = bet_out + cab_out[1]
            else:
                Ccwb = Ccwb * np.ones((2))
                Ccwb[Ccwb < 0.0] = 0.0
                Ccwb[Ccwb > 1.0] = 1.0

                #  inverse white balance shift using adaptation gray background (Yb=20%), with Ccw: degree of adaptation
                alph_int = alph_out + Ccwb[0] * alph_int[0]
                bet_int = bet_out + Ccwb[1] * bet_int[0]

            alphpp = (1.0 / cab_int[0]) * (
                np.cos(-cab_int[1] * np.pi / 180.0) * alph_int -
                np.sin(-cab_int[1] * np.pi / 180.0) * bet_int)
            betpp = (1.0 / cab_int[0]) * (
                np.sin(-cab_int[1] * np.pi / 180.0) * alph_int +
                np.cos(-cab_int[1] * np.pi / 180.0) * bet_int)
            lstar_int = lstar_out
            lstar = (lstar_int / cl_int[0]) - cl_int[1]

            #---------------------------------------------------------------------------
            #  Inverse stage 4: pre-adapted perceptual signals to recoded nerve signals:
            #---------------------------------------------------------------------------
            alphp = 0.5 * (alphpp / cga2[0] + betpp / cgb2[0]
                           )  # <-- alphpp = (Cga2.*(alphp+betp));
            betp = 0.5 * (alphpp / cga2[0] - betpp / cgb2[0]
                          )  # <-- betpp = (Cgb2.*(alphp-betp));

            #---------------------------------------------------------------------------
            #  Inverse stage 3: recoded nerve signals to optic nerve signals:
            #---------------------------------------------------------------------------
            alph = alphp / cga1[0]
            bet = betp / cgb1[0]
            sa = np.sign(cga1[1])
            sb = np.sign(cgb1[1])
            alph[(sa * alphp) < 0.0] = alphp[(sa * alphp) < 0] / cga1[1]
            bet[(sb * betp) < 0.0] = betp[(sb * betp) < 0] / cgb1[1]
            lab = ajoin((lstar, alph, bet))

            #---------------------------------------------------------------------------
            #  Inverse stage 2: optic nerve signals to cone outputs:
            #---------------------------------------------------------------------------
            lmstp = np.dot(invMAab, lab.T).T
            lmstp[lmstp < -1.0] = -1.0
            lmstp[lmstp > 1.0] = 1.0

            #---------------------------------------------------------------------------
            #  Inverse stage 1: cone outputs to photon rates:
            #---------------------------------------------------------------------------
            lmstp = math.erfinv(lmstp) / Cc - Cf * np.log(lmsf / lms0)
            lmst = np.exp(lmstp) * lms0

            #---------------------------------------------------------------------------
            #  Photon rates to absolute or relative tristimulus values:
            #---------------------------------------------------------------------------
            xyzt = np.dot(invMxyz2lms, lmst.T).T * (_CMF[cieobs]['K'] / 683.0)
            if relative == True:
                xyzt = (100 / Lw) * xyzt

            # store in same named variable as forward mode:
            camout[i] = xyzt

            #++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
            #  END inverse mode
            #++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

    return _massage_output_data_to_original_shape(camout, originalshape)
Beispiel #15
0
def cam18sl(data,
            datab=None,
            Lb=[100],
            fov=10.0,
            inputtype='xyz',
            direction='forward',
            outin='Q,aS,bS',
            parameters=None):
    """
    Convert between CIE 2006 10°  XYZ tristimulus values (or spectral data) 
    and CAM18sl color appearance correlates.
    
    Args:
        :data: 
            | ndarray of CIE 2006 10°  absolute XYZ tristimulus values or spectral data
            |  or color appearance attributes of stimulus
        :datab: 
            | ndarray of CIE 2006 10°  absolute XYZ tristimulus values or spectral data
            |  of stimulus background
        :Lb: 
            | [100], optional
            | Luminance (cd/m²) value(s) of background(s) calculated using the CIE 2006 10° CMFs 
            | (only used in case datab == None and the background is assumed to be an Equal-Energy-White)
        :fov: 
            | 10.0, optional
            | Field-of-view of stimulus (for size effect on brightness)
        :inputtpe:
            | 'xyz' or 'spd', optional
            | Specifies the type of input: 
            |     tristimulus values or spectral data for the forward mode.
        :direction:
            | 'forward' or 'inverse', optional
            |   -'forward': xyz -> cam18sl
            |   -'inverse': cam18sl -> xyz 
        :outin:
            | 'Q,aS,bS' or str, optional
            | 'Q,aS,bS' (brightness and opponent signals for saturation)
            |  other options: 'Q,aM,bM' (colorfulness) 
            |                 (Note that 'Q,aW,bW' would lead to a Cartesian 
            |                  a,b-coordinate system centered at (1,0))
            | Str specifying the type of 
            |     input (:direction: == 'inverse') and 
            |     output (:direction: == 'forward')
        :parameters:
            | None or dict, optional
            | Set of model parameters.
            |   - None: defaults to luxpy.cam._CAM18SL_PARAMETERS 
            |    (see references below)
    
    Returns:
        :returns: 
            | ndarray with color appearance correlates (:direction: == 'forward')
            |  or 
            | XYZ tristimulus values (:direction: == 'inverse')
            
    Notes:
        | * Instead of using the CIE 1964 10° CMFs in some places of the model,
        |   the CIE 2006 10° CMFs are used througout, making it more self_consistent.
        |   This has an effect on the k scaling factors (now different those in CAM15u) 
        |   and the illuminant E normalization for use in the chromatic adaptation transform.
        |   (see future erratum to Hermans et al., 2018)
        | * The paper also used an equation for the amount of white W, which is
        |   based on a Q value not expressed in 'bright' ('cA' = 0.937 instead of 123). 
        |   This has been corrected for in the luxpy version of the model, i.e.
        |   _CAM18SL_PARAMETERS['cW'][0] has been changed from 2.29 to 1/11672.
        |   (see future erratum to Hermans et al., 2018)
        | * Default output was 'Q,aW,bW' prior to March 2020, but since this
        |   is an a,b Cartesian system centered on (1,0), the default output
        |   has been changed to 'Q,aS,bS'.

    References: 
        1. `Hermans, S., Smet, K. A. G., & Hanselaer, P. (2018). 
        "Color appearance model for self-luminous stimuli."
        Journal of the Optical Society of America A, 35(12), 2000–2009. 
        <https://doi.org/10.1364/JOSAA.35.002000>`_ 
     """

    if parameters is None:
        parameters = _CAM18SL_PARAMETERS

    outin = outin.split(',')

    #unpack model parameters:
    cA, cAlms, cHK, cM, cW, ca, calms, cb, cblms, cfov, cieobs, k, naka, unique_hue_data = [
        parameters[x] for x in sorted(parameters.keys())
    ]

    # precomputations:
    Mlms2xyz = np.linalg.inv(_CMF[cieobs]['M'])
    MAab = np.array([cAlms, calms, cblms])
    invMAab = np.linalg.inv(MAab)

    #-------------------------------------------------
    # setup EEW reference field and default background field (Lr should be equal to Lb):
    # Get Lb values:
    if datab is not None:
        if inputtype != 'xyz':
            Lb = spd_to_xyz(datab, cieobs=cieobs, relative=False)[..., 1:2]
        else:
            Lb = datab[..., 1:2]
    else:
        if isinstance(Lb, list):
            Lb = np2dT(Lb)

    # Setup EEW ref of same luminance as datab:
    if inputtype == 'xyz':
        wlr = getwlr(_CAM18SL_WL3)
    else:
        if datab is None:
            wlr = data[0]  # use wlr of stimulus data
        else:
            wlr = datab[0]  # use wlr of background data
    datar = np.vstack((wlr, np.ones(
        (Lb.shape[0], wlr.shape[0]))))  # create eew
    xyzr = spd_to_xyz(datar, cieobs=cieobs,
                      relative=False)  # get abs. tristimulus values
    datar[1:] = datar[1:] / xyzr[..., 1:2] * Lb

    # Create datab if None:
    if (datab is None):
        if inputtype != 'xyz':
            datab = datar.copy()
        else:
            datab = spd_to_xyz(datar, cieobs=cieobs, relative=False)

    # prepare data and datab for loop over backgrounds:
    # make axis 1 of datab have 'same' dimensions as data:
    if (data.ndim == 2):
        data = np.expand_dims(data, axis=1)  # add light source axis 1

    if inputtype == 'xyz':
        datar = spd_to_xyz(datar, cieobs=cieobs,
                           relative=False)  # convert to xyz!!
        if datab.shape[
                0] == 1:  #make datab and datar have same lights source dimension (used to store different backgrounds) size as data
            datab = np.repeat(datab, data.shape[1], axis=0)
            datar = np.repeat(datar, data.shape[1], axis=0)
    else:
        if datab.shape[0] == 2:
            datab = np.vstack(
                (datab[0], np.repeat(datab[1:], data.shape[1], axis=0)))
        if datar.shape[0] == 2:
            datar = np.vstack(
                (datar[0], np.repeat(datar[1:], data.shape[1], axis=0)))

    # Flip light source/ background dim to axis 0:
    data = np.transpose(data, axes=(1, 0, 2))

    #-------------------------------------------------

    #initialize camout:
    dshape = list(data.shape)
    dshape[-1] = len(outin)  # requested number of correlates
    if (inputtype != 'xyz') & (direction == 'forward'):
        dshape[-2] = dshape[
            -2] - 1  # wavelength row doesn't count & only with forward can the input data be spectral
    camout = np.zeros(dshape)
    camout.fill(np.nan)

    for i in range(data.shape[0]):

        # get rho, gamma, beta of background and reference white:
        if (inputtype != 'xyz'):
            xyzb = spd_to_xyz(np.vstack((datab[0], datab[i + 1:i + 2, :])),
                              cieobs=cieobs,
                              relative=False)
            xyzr = spd_to_xyz(np.vstack((datar[0], datar[i + 1:i + 2, :])),
                              cieobs=cieobs,
                              relative=False)
        else:
            xyzb = datab[i:i + 1, :]
            xyzr = datar[i:i + 1, :]

        lmsb = np.dot(_CMF[cieobs]['M'], xyzb.T).T  # convert to l,m,s
        rgbb = (lmsb / _CMF[cieobs]['K']) * k  # convert to rho, gamma, beta
        #lmsr = np.dot(_CMF[cieobs]['M'],xyzr.T).T # convert to l,m,s
        #rgbr = (lmsr / _CMF[cieobs]['K']) * k # convert to rho, gamma, beta
        #rgbr = rgbr/rgbr[...,1:2]*Lb[i] # calculated EEW cone excitations at same luminance values as background
        rgbr = np.ones(xyzr.shape) * Lb[
            i]  # explicitely equal EEW cone excitations at same luminance values as background

        if direction == 'forward':
            # get rho, gamma, beta of stimulus:
            if (inputtype != 'xyz'):
                xyz = spd_to_xyz(data[i], cieobs=cieobs, relative=False)
            elif (inputtype == 'xyz'):
                xyz = data[i]
            lms = np.dot(_CMF[cieobs]['M'], xyz.T).T  # convert to l,m,s
            rgb = (lms / _CMF[cieobs]['K']) * k  # convert to rho, gamma, beta

            # apply von-kries cat with D = 1:
            if (rgbb == 0).any():
                Mcat = np.eye(3)
            else:
                Mcat = np.diag((rgbr / rgbb)[0])
            rgba = np.dot(Mcat, rgb.T).T

            # apply naka-rushton compression:
            rgbc = naka_rushton(rgba,
                                n=naka['n'],
                                sig=naka['sig'](rgbr.mean()),
                                noise=naka['noise'],
                                scaling=naka['scaling'])

            #rgbc = np.ones(rgbc.shape)*rgbc.mean() # test if eew ends up at origin

            # calculate achromatic and color difference signals, A, a, b:
            Aab = np.dot(MAab, rgbc.T).T
            A, a, b = asplit(Aab)
            a = ca * a
            b = cb * b

            # calculate colorfullness like signal M:
            M = cM * ((a**2.0 + b**2.0)**0.5)

            # calculate brightness Q:
            Q = cA * (
                A + cHK[0] * M**cHK[1]
            )  # last term is contribution of Helmholtz-Kohlrausch effect on brightness

            # calculate saturation, s:
            s = M / Q
            S = s  # make extra variable, jsut in case 'S' is called

            # calculate amount of white, W:
            W = 1 / (1.0 + cW[0] * (s**cW[1]))

            #  adjust Q for size (fov) of stimulus (matter of debate whether to do this before or after calculation of s or W, there was no data on s, M or W for different sized stimuli: after)
            Q = Q * (fov / 10.0)**cfov

            # calculate hue, h and Hue quadrature, H:
            h = hue_angle(a, b, htype='deg')
            if 'H' in outin:
                H = hue_quadrature(h, unique_hue_data=unique_hue_data)
            else:
                H = None

            # calculate cart. co.:
            if 'aM' in outin:
                aM = M * np.cos(h * np.pi / 180.0)
                bM = M * np.sin(h * np.pi / 180.0)

            if 'aS' in outin:
                aS = s * np.cos(h * np.pi / 180.0)
                bS = s * np.sin(h * np.pi / 180.0)

            if 'aW' in outin:
                aW = W * np.cos(h * np.pi / 180.0)
                bW = W * np.sin(h * np.pi / 180.0)

            if (outin != ['Q', 'as', 'bs']):
                camout[i] = eval('ajoin((' + ','.join(outin) + '))')
            else:
                camout[i] = ajoin((Q, aS, bS))

        elif direction == 'inverse':

            # get Q, M and a, b depending on input type:
            if 'aW' in outin:
                Q, a, b = asplit(data[i])
                Q = Q / (
                    (fov / 10.0)**cfov
                )  #adjust Q for size (fov) of stimulus back to that 10° ref
                W = (a**2.0 + b**2.0)**0.5
                s = (((1.0 / W) - 1.0) / cW[0])**(1.0 / cW[1])
                M = s * Q

            if 'aM' in outin:
                Q, a, b = asplit(data[i])
                Q = Q / (
                    (fov / 10.0)**cfov
                )  #adjust Q for size (fov) of stimulus back to that 10° ref
                M = (a**2.0 + b**2.0)**0.5

            if 'aS' in outin:
                Q, a, b = asplit(data[i])
                Q = Q / (
                    (fov / 10.0)**cfov
                )  #adjust Q for size (fov) of stimulus back to that 10° ref
                s = (a**2.0 + b**2.0)**0.5
                M = s * Q

            if 'h' in outin:
                Q, WsM, h = asplit(data[i])
                Q = Q / (
                    (fov / 10.0)**cfov
                )  #adjust Q for size (fov) of stimulus back to that 10° ref
                if 'W' in outin:
                    s = (((1.0 / WsM) - 1.0) / cW[0])**(1.0 / cW[1])
                    M = s * Q
                elif 's' in outin:
                    M = WsM * Q
                elif 'M' in outin:
                    M = WsM

            # calculate achromatic signal, A from Q and M:
            A = Q / cA - cHK[0] * M**cHK[1]

            # calculate hue angle:
            h = hue_angle(a, b, htype='rad')

            # calculate a,b from M and h:
            a = (M / cM) * np.cos(h)
            b = (M / cM) * np.sin(h)

            a = a / ca
            b = b / cb

            # create Aab:
            Aab = ajoin((A, a, b))

            # calculate rgbc:
            rgbc = np.dot(invMAab, Aab.T).T

            # decompress rgbc to (adapted) rgba :
            rgba = naka_rushton(rgbc,
                                n=naka['n'],
                                sig=naka['sig'](rgbr.mean()),
                                noise=naka['noise'],
                                scaling=naka['scaling'],
                                direction='inverse')

            # apply inverse von-kries cat with D = 1:
            rgb = np.dot(np.diag((rgbb / rgbr)[0]), rgba.T).T

            # convert rgb to lms to xyz:
            lms = rgb / k * _CMF[cieobs]['K']
            xyz = np.dot(Mlms2xyz, lms.T).T

            camout[i] = xyz

    camout = np.transpose(camout, axes=(1, 0, 2))

    if camout.shape[1] == 1:
        camout = np.squeeze(camout, axis=1)

    return camout