Exemplo n.º 1
0
def psy_scale(data,
              scale_factor=[1.0 / 55.0, 3.0 / 2.0, 2.0],
              scale_max=100.0):  # defaults for cri2012
    """
    Psychometric based color rendering index scale from CRI2012: 
    
    | Rfi,a = 100 * (2 / (exp(c1*abs(DEi,a)**(c2) + 1))) ** c3.
        
    Args:
        :data: 
            | float or list[floats] or ndarray 
        :scale_factor:
            | [1/55, 3/2, 2.0] or list[float] or ndarray, optional
            | Rescales color differences before subtracting them from :scale_max:
            | Note that the default value is the one from (Smet et al. 2013, LRT).
        :scale_max: 
            | 100.0, optional
            | Maximum value of linear scale
    
    Returns:
        :returns: 
            | float or list[floats] or ndarray
        
    References:
        1. `Smet, K., Schanda, J., Whitehead, L., & Luo, R. (2013). 
        CRI2012: A proposal for updating the CIE colour rendering index. 
        Lighting Research and Technology, 45, 689–709. 
        <http://lrt.sagepub.com/content/45/6/689>`_  
        
    """
    return scale_max * np.power(
        2.0 /
        (np.exp(scale_factor[0] * np.power(np.abs(data), scale_factor[1])) +
         1.0), scale_factor[2])
Exemplo n.º 2
0
 def get_xyz(self, *args):
     """ get cartesian coordinates """
     theta, phi, r = args
     x = r * np.sin(theta) * np.cos(phi)
     y = r * np.sin(theta) * np.sin(phi)
     z = r * np.cos(theta)
     z[np.abs(z) < self._TINY] = 0.0
     return x, y, z
Exemplo n.º 3
0
def naka_rushton(data, sig=2.0, n=0.73, scaling=1.0, noise=0.0, forward=True):
    """
    Apply a Naka-Rushton response compression (n) and an adaptive shift (sig).
    
    | NK(x) = sign(x) * scaling * ((abs(x)**n) / ((abs(x)**n) + (sig**n))) + noise
    
    Args:
        :data:
            | float or ndarray
        :sig: 
            | 2.0, optional
            | Semi-saturation constant. Value for which NK(:data:) is 1/2
        :n: 
            | 0.73, optional
            | Compression power.
        :scaling:
            | 1.0, optional
            | Maximum value of NK-function.
        :noise:
            | 0.0, optional
            | Cone excitation noise.
        :forward:
            | True, optional
            | True: do NK(x) 
            | False: do NK(x)**(-1).
    
    Returns:
        :returns: 
            | float or ndarray with NK-(de)compressed input :x:        
    """
    if forward:
        return np.sign(data) * scaling * ((np.abs(data)**n) /
                                          ((np.abs(data)**n) +
                                           (sig**n))) + noise
    elif forward == False:
        Ip = sig * (((np.abs(np.abs(data) - noise)) /
                     (scaling - np.abs(np.abs(data) - noise))))**(1 / n)
        if not np.isscalar(Ip):
            p = np.where(np.abs(data) < noise)
            Ip[p] = -Ip[p]
        else:
            if np.abs(data) < noise:
                Ip = -Ip
        return Ip
Exemplo n.º 4
0
def polyarea(x,y):
    """
    Calculates area of polygon. 
    
    | First coordinate should also be last.
    
    Args:
        :x: 
            | ndarray of x-coordinates of polygon vertices.
        :y: 
            | ndarray of x-coordinates of polygon vertices.     
    
    Returns:
        :returns:
            | float (area or polygon)
    
    """
    return 0.5*np.abs(np.dot(x,np.roll(y,1).T)-np.dot(y,np.roll(x,1).T))
Exemplo n.º 5
0
def xyz_to_cct_ohno2011(xyz):
    """
    Calculate cct and Duv from CIE 1931 2° xyz following Ohno (2011).
    
    Args:
        :xyz:
            | ndarray with CIE 1931 2° X,Y,Z tristimulus values
            
    Returns:
        :cct, duv:
            | ndarrays with correlated color temperatures and distance to blackbody locus in CIE 1960 uv
            
    References:
        1. Ohno, Y. (2011). Calculation of CCT and Duv and Practical Conversion Formulae. 
        CORM 2011 Conference, Gaithersburg, MD, May 3-5, 2011
    """
    uvp = xyz_to_Yuv(xyz)[..., 1:]
    uv = uvp * np.array([[1, 2 / 3]])
    Lfp = ((uv[..., 0] - 0.292)**2 + (uv[..., 1] - 0.24)**2)**0.5
    a = np.arctan((uv[..., 1] - 0.24) / (uv[..., 0] - 0.292))
    a[a < 0] = a[a < 0] + np.pi
    Lbb = np.polyval(_KIJ[0, :], a)
    Duv = Lfp - Lbb

    T1 = 1 / np.polyval(_KIJ[1, :], a)
    T1[a >= 2.54] = 1 / np.polyval(_KIJ[2, :], a[a >= 2.54])
    dTc1 = np.polyval(_KIJ[3, :], a) * (Lbb + 0.01) / Lfp * Duv / 0.01
    dTc1[a >= 2.54] = 1 / np.polyval(_KIJ[4, :], a[a >= 2.54]) * (
        Lbb[a >= 2.54] + 0.01) / Lfp[a >= 2.54] * Duv[a >= 2.54] / 0.01
    T2 = T1 - dTc1
    c = np.log10(T2)
    c[T2 == 0] = -np.inf
    dTc2 = np.polyval(_KIJ[5, :], c)
    dTc2[Duv < 0] = np.polyval(_KIJ[6, :], c[Duv < 0]) * np.abs(
        Duv[Duv < 0] / 0.03)**2
    Tfinal = T2 - dTc2
    return Tfinal, Duv
Exemplo n.º 6
0
def xyz_to_Ydlep(xyz,
                 cieobs=_CIEOBS,
                 xyzw=_COLORTF_DEFAULT_WHITE_POINT,
                 flip_axes=False,
                 SL_max_lambda=None,
                 **kwargs):
    """
    Convert XYZ tristimulus values to Y, dominant (complementary) wavelength
    and excitation purity.

    Args:
        :xyz:
            | ndarray with tristimulus values
        :xyzw:
            | None or ndarray with tristimulus values of a single (!) native white point, optional
            | None defaults to xyz of CIE D65 using the :cieobs: observer.
        :cieobs:
            | luxpy._CIEOBS, optional
            | CMF set to use when calculating spectrum locus coordinates.
        :flip_axes:
            | False, optional
            | If True: flip axis 0 and axis 1 in Ydelep to increase speed of loop in function.
            |          (single xyzw with is not flipped!)
        :SL_max_lambda:
            | None or float, optional
            | Maximum wavelength of spectrum locus before it turns back on itelf in the high wavelength range (~700 nm)
    Returns:
        :Ydlep: 
            | ndarray with Y, dominant (complementary) wavelength
            |  and excitation purity
    """

    xyz3 = np3d(xyz).copy().astype(np.float)

    # flip axis so that shortest dim is on axis0 (save time in looping):
    if (xyz3.shape[0] < xyz3.shape[1]) & (flip_axes == True):
        axes12flipped = True
        xyz3 = xyz3.transpose((1, 0, 2))
    else:
        axes12flipped = False

    # convert xyz to Yxy:
    Yxy = xyz_to_Yxy(xyz3)
    Yxyw = xyz_to_Yxy(xyzw)

    # get spectrum locus Y,x,y and wavelengths:
    SL = _CMF[cieobs]['bar']
    SL = SL[:, SL[1:].sum(axis=0) >
            0]  # avoid div by zero in xyz-to-Yxy conversion
    wlsl = SL[0]
    Yxysl = xyz_to_Yxy(SL[1:4].T)[:, None]

    # Get maximum wavelength of spectrum locus (before it turns back on itself)
    if SL_max_lambda is None:
        pmaxlambda = Yxysl[..., 1].argmax()  # lambda with largest x value
        dwl = np.diff(
            Yxysl[:, 0,
                  1])  # spectrumlocus in that range should have increasing x
        dwl[wlsl[:-1] < 600] = 10000
        pmaxlambda = np.where(
            dwl <= 0)[0][0]  # Take first element with zero or <zero slope
    else:
        pmaxlambda = np.abs(wlsl - SL_max_lambda).argmin()
    Yxysl = Yxysl[:(pmaxlambda + 1), :]
    wlsl = wlsl[:(pmaxlambda + 1)]

    # center on xyzw:
    Yxy = Yxy - Yxyw
    Yxysl = Yxysl - Yxyw
    Yxyw = Yxyw - Yxyw

    #split:
    Y, x, y = asplit(Yxy)
    Yw, xw, yw = asplit(Yxyw)
    Ysl, xsl, ysl = asplit(Yxysl)

    # calculate hue:
    h = math.positive_arctan(x, y, htype='deg')

    hsl = math.positive_arctan(xsl, ysl, htype='deg')

    hsl_max = hsl[0]  # max hue angle at min wavelength
    hsl_min = hsl[-1]  # min hue angle at max wavelength
    if hsl_min < hsl_max: hsl_min += 360

    dominantwavelength = np.empty(Y.shape)
    purity = np.empty(Y.shape)

    for i in range(xyz3.shape[1]):

        # find index of complementary wavelengths/hues:
        pc = np.where(
            (h[:, i] > hsl_max) & (h[:, i] < hsl_min)
        )  # hue's requiring complementary wavelength (purple line)
        h[:, i][pc] = h[:, i][pc] - np.sign(
            h[:, i][pc] - 180.0
        ) * 180.0  # add/subtract 180° to get positive complementary wavelength

        # find 2 closest enclosing hues in sl:
        #hslb,hib = meshblock(hsl,h[:,i:i+1])
        hib, hslb = np.meshgrid(h[:, i:i + 1], hsl)
        dh = (hslb - hib)
        q1 = np.abs(dh).argmin(axis=0)  # index of closest hue
        sign_q1 = np.sign(dh[q1])[0]
        dh[np.sign(dh) ==
           sign_q1] = 1000000  # set all dh on the same side as q1 to a very large value
        q2 = np.abs(dh).argmin(
            axis=0)  # index of second  closest (enclosing) hue

        # # Test changes to code:
        # print('wls',i, wlsl[q1],wlsl[q2])
        # import matplotlib.pyplot as plt
        # plt.figure()
        # plt.plot(wlsl[:-1],np.diff(xsl[:,0]),'k.-')
        # plt.figure()
        # plt.plot(x[0,i],y[0,i],'k.'); plt.plot(xsl,ysl,'r.-');plt.plot(xsl[q1],ysl[q1],'b.');plt.plot(xsl[q2],ysl[q2],'g.');plt.plot(xsl[-1],ysl[-1],'c+')

        dominantwavelength[:, i] = wlsl[q1] + np.multiply(
            (h[:, i] - hsl[q1, 0]),
            np.divide((wlsl[q2] - wlsl[q1]), (hsl[q2, 0] - hsl[q1, 0]))
        )  # calculate wl corresponding to h: y = y1 + (x-x1)*(y2-y1)/(x2-x1)
        dominantwavelength[:, i][pc] = -dominantwavelength[:, i][
            pc]  #complementary wavelengths are specified by '-' sign

        # calculate excitation purity:
        x_dom_wl = xsl[q1, 0] + (xsl[q2, 0] - xsl[q1, 0]) * (h[:, i] - hsl[
            q1, 0]) / (hsl[q2, 0] - hsl[q1, 0])  # calculate x of dom. wl
        y_dom_wl = ysl[q1, 0] + (ysl[q2, 0] - ysl[q1, 0]) * (h[:, i] - hsl[
            q1, 0]) / (hsl[q2, 0] - hsl[q1, 0])  # calculate y of dom. wl
        d_wl = (x_dom_wl**2.0 +
                y_dom_wl**2.0)**0.5  # distance from white point to sl
        d = (x[:, i]**2.0 +
             y[:, i]**2.0)**0.5  # distance from white point to test point
        purity[:, i] = d / d_wl

        # correct for those test points that have a complementary wavelength
        # calculate intersection of line through white point and test point and purple line:
        xy = np.vstack((x[:, i], y[:, i])).T
        xyw = np.hstack((xw, yw))
        xypl1 = np.hstack((xsl[0, None], ysl[0, None]))
        xypl2 = np.hstack((xsl[-1, None], ysl[-1, None]))
        da = (xy - xyw)
        db = (xypl2 - xypl1)
        dp = (xyw - xypl1)
        T = np.array([[0.0, -1.0], [1.0, 0.0]])
        dap = np.dot(da, T)
        denom = np.sum(dap * db, axis=1, keepdims=True)
        num = np.sum(dap * dp, axis=1, keepdims=True)
        xy_linecross = (num / denom) * db + xypl1
        d_linecross = np.atleast_2d(
            (xy_linecross[:, 0]**2.0 + xy_linecross[:, 1]**2.0)**0.5).T  #[0]
        purity[:, i][pc] = d[pc] / d_linecross[pc][:, 0]
    Ydlep = np.dstack((xyz3[:, :, 1], dominantwavelength, purity))

    if axes12flipped == True:
        Ydlep = Ydlep.transpose((1, 0, 2))
    else:
        Ydlep = Ydlep.transpose((0, 1, 2))
    return Ydlep.reshape(xyz.shape)
Exemplo n.º 7
0
def DE2000(xyzt,
           xyzr,
           dtype='xyz',
           DEtype='jab',
           avg=None,
           avg_axis=0,
           out='DEi',
           xyzwt=None,
           xyzwr=None,
           KLCH=None):
    """
    Calculate DE2000 color difference.
    
    Args:
        :xyzt: 
            | ndarray with tristimulus values of test data.
        :xyzr:
            | ndarray with tristimulus values of reference data.
        :dtype:
            | 'xyz' or 'lab', optional
            | Specifies data type in :xyzt: and :xyzr:.
        :xyzwt:
            | None or ndarray, optional
            |   White point tristimulus values of test data
            |   None defaults to the one set in lx.xyz_to_lab()
        :xyzwr:
            | None or ndarray, optional
            |    Whitepoint tristimulus values of reference data
            |    None defaults to the one set in lx.xyz_to_lab()
        :DEtype:
            | 'jab' or str, optional
            | Options: 
            |    - 'jab' : calculates full color difference over all 3 dimensions.
            |    - 'ab'  : calculates chromaticity difference.
            |    - 'j'   : calculates lightness or brightness difference 
            |             (depending on :outin:).
            |    - 'j,ab': calculates both 'j' and 'ab' options 
            |              and returns them as a tuple.
        :KLCH: 
            | None, optional
            | Weigths for L, C, H 
            | None: default to [1,1,1] 
        :avg:
            | None, optional
            | None: don't calculate average DE, 
            |       otherwise use function handle in :avg:.
        :avg_axis:
            | axis to calculate average over, optional
        :out: 
            | 'DEi' or str, optional
            | Requested output.
        
    Note:
        For the other input arguments, see specific color space used.
        
    Returns:
        :returns: 
            | ndarray with DEi [, DEa] or other as specified by :out:
            
    References:
        1. `Sharma, G., Wu, W., & Dalal, E. N. (2005). 
        The CIEDE2000 color‐difference formula: Implementation notes, 
        supplementary test data, and mathematical observations. 
        Color Research & Application, 30(1), 21–30. 
        <https://doi.org/10.1002/col.20070>`_
    """

    if KLCH is None:
        KLCH = [1, 1, 1]

    if dtype == 'xyz':
        labt = xyz_to_lab(xyzt, xyzw=xyzwt)
        labr = xyz_to_lab(xyzr, xyzw=xyzwr)
    else:
        labt = xyzt
        labr = xyzr

    Lt = labt[..., 0:1]
    at = labt[..., 1:2]
    bt = labt[..., 2:3]
    Ct = np.sqrt(at**2 + bt**2)
    #ht = cam.hue_angle(at,bt,htype = 'rad')

    Lr = labr[..., 0:1]
    ar = labr[..., 1:2]
    br = labr[..., 2:3]
    Cr = np.sqrt(ar**2 + br**2)
    #hr = cam.hue_angle(at,bt,htype = 'rad')

    # Step 1:
    Cavg = (Ct + Cr) / 2
    G = 0.5 * (1 - np.sqrt((Cavg**7.0) / ((Cavg**7.0) + (25.0**7))))
    apt = (1 + G) * at
    apr = (1 + G) * ar

    Cpt = np.sqrt(apt**2 + bt**2)
    Cpr = np.sqrt(apr**2 + br**2)
    Cpprod = Cpt * Cpr

    hpt = cam.hue_angle(apt, bt, htype='deg')
    hpr = cam.hue_angle(apr, br, htype='deg')
    hpt[(apt == 0) * (bt == 0)] = 0
    hpr[(apr == 0) * (br == 0)] = 0

    # Step 2:
    dL = np.abs(Lr - Lt)
    dCp = np.abs(Cpr - Cpt)
    dhp_ = hpr - hpt

    dhp = dhp_.copy()
    dhp[np.where(np.abs(dhp_) > 180)] = dhp[np.where(np.abs(dhp_) > 180)] - 360
    dhp[np.where(
        np.abs(dhp_) < -180)] = dhp[np.where(np.abs(dhp_) < -180)] + 360
    dhp[np.where(Cpprod == 0)] = 0

    #dH = 2*np.sqrt(Cpprod)*np.sin(dhp/2*np.pi/180)
    dH = deltaH(dhp, Cpprod, htype='deg')

    # Step 3:
    Lp = (Lr + Lt) / 2
    Cp = (Cpr + Cpt) / 2

    hps = hpt + hpr
    hp = (hpt + hpr) / 2
    hp[np.where((np.abs(dhp_) > 180)
                & (hps < 360))] = hp[np.where((np.abs(dhp_) > 180)
                                              & (hps < 360))] + 180
    hp[np.where((np.abs(dhp_) > 180)
                & (hps >= 360))] = hp[np.where((np.abs(dhp_) > 180)
                                               & (hps >= 360))] - 180
    hp[np.where(Cpprod == 0)] = 0

    T = 1 - 0.17*np.cos((hp - 30)*np.pi/180) + 0.24*np.cos(2*hp*np.pi/180) +\
        0.32*np.cos((3*hp + 6)*np.pi/180) - 0.20*np.cos((4*hp - 63)*np.pi/180)
    dtheta = 30 * np.exp(-((hp - 275) / 25)**2)
    RC = 2 * np.sqrt((Cp**7) / ((Cp**7) + (25**7)))
    SL = 1 + ((0.015 * (Lp - 50)**2) / np.sqrt(20 + (Lp - 50)**2))
    SC = 1 + 0.045 * Cp
    SH = 1 + 0.015 * Cp * T
    RT = -np.sin(2 * dtheta * np.pi / 180) * RC

    kL, kC, kH = KLCH

    DEi = ((dL / (kL * SL))**2, (dCp / (kC * SC))**2 + (dH / (kH * SH))**2 +
           RT * (dCp / (kC * SC)) * (dH / (kH * SH)))

    return _process_DEi(DEi,
                        DEtype=DEtype,
                        avg=avg,
                        avg_axis=avg_axis,
                        out=out)
Exemplo n.º 8
0
def _xyz_to_jab_cam02ucs(xyz, xyzw, ucs=True, conditions=None):
    """ 
    Calculate CAM02-UCS J'a'b' coordinates from xyz tristimulus values of sample and white point.
    
    Args:
        :xyz:
            | ndarray with sample tristimulus values
        :xyzw:
            | ndarray with white point tristimulus values  
        :conditions:
            | None, optional
            | Dictionary with viewing conditions.
            | None results in:
            |   {'La':100, 'Yb':20, 'D':1, 'surround':'avg'}
            | For more info see luxpy.cam.ciecam02()?
    
    Returns:
        :jab:
            | ndarray with J'a'b' coordinates.
    """
    #--------------------------------------------
    # Get/ set conditions parameters:
    if conditions is not None:
        surround_parameters = {
            'surrounds': ['avg', 'dim', 'dark'],
            'avg': {
                'c': 0.69,
                'Nc': 1.0,
                'F': 1.0,
                'FLL': 1.0
            },
            'dim': {
                'c': 0.59,
                'Nc': 0.9,
                'F': 0.9,
                'FLL': 1.0
            },
            'dark': {
                'c': 0.525,
                'Nc': 0.8,
                'F': 0.8,
                'FLL': 1.0
            }
        }
        La = conditions['La']
        Yb = conditions['Yb']
        D = conditions['D']
        surround = conditions['surround']
        if isinstance(surround, str):
            surround = surround_parameters[conditions['surround']]
        F, FLL, Nc, c = [surround[x] for x in sorted(surround.keys())]
    else:
        # set defaults:
        La, Yb, D, F, FLL, Nc, c = 100, 20, 1, 1, 1, 1, 0.69

    #--------------------------------------------
    # Define sensor space and cat matrices:
    mhpe = np.array([[0.38971, 0.68898, -0.07868], [-0.22981, 1.1834, 0.04641],
                     [0.0, 0.0, 1.0]
                     ])  # Hunt-Pointer-Estevez sensors (cone fundamentals)

    mcat = np.array([[0.7328, 0.4296, -0.1624], [-0.7036, 1.6975, 0.0061],
                     [0.0030, 0.0136, 0.9834]])  # CAT02 sensor space

    #--------------------------------------------
    # pre-calculate some matrices:
    invmcat = np.linalg.inv(mcat)
    mhpe_x_invmcat = np.dot(mhpe, invmcat)

    #--------------------------------------------
    # calculate condition dependent parameters:
    Yw = xyzw[..., 1:2].T
    k = 1.0 / (5.0 * La + 1.0)
    FL = 0.2 * (k**4.0) * (5.0 * La) + 0.1 * ((1.0 - k**4.0)**2.0) * (
        (5.0 * La)**(1.0 / 3.0))  # luminance adaptation factor
    n = Yb / Yw
    Nbb = 0.725 * (1 / n)**0.2
    Ncb = Nbb
    z = 1.48 + FLL * n**0.5

    if D is None:
        D = F * (1.0 - (1.0 / 3.6) * np.exp((-La - 42.0) / 92.0))

    #--------------------------------------------
    # transform from xyz, xyzw to cat sensor space:
    rgb = math.dot23(mcat, xyz.T)
    rgbw = mcat @ xyzw.T

    #--------------------------------------------
    # apply von Kries cat:
    rgbc = (
        (D * Yw / rgbw)[..., None] + (1 - D)
    ) * rgb  # factor 100 from ciecam02 is replaced with Yw[i] in ciecam16, but see 'note' in Fairchild's "Color Appearance Models" (p291 ni 3ed.)
    rgbwc = (
        (D * Yw / rgbw) + (1 - D)
    ) * rgbw  # factor 100 from ciecam02 is replaced with Yw[i] in ciecam16, but see 'note' in Fairchild's "Color Appearance Models" (p291 ni 3ed.)

    #--------------------------------------------
    # convert from cat02 sensor space to cone sensors (hpe):
    rgbp = math.dot23(mhpe_x_invmcat, rgbc).T
    rgbwp = (mhpe_x_invmcat @ rgbwc).T

    #--------------------------------------------
    # apply Naka_rushton repsonse compression:
    naka_rushton = lambda x: 400 * x**0.42 / (x**0.42 + 27.13) + 0.1

    rgbpa = naka_rushton(FL * rgbp / 100.0)
    p = np.where(rgbp < 0)
    rgbpa[p] = 0.1 - (naka_rushton(FL * np.abs(rgbp[p]) / 100.0) - 0.1)

    rgbwpa = naka_rushton(FL * rgbwp / 100.0)
    pw = np.where(rgbwp < 0)
    rgbwpa[pw] = 0.1 - (naka_rushton(FL * np.abs(rgbwp[pw]) / 100.0) - 0.1)

    #--------------------------------------------
    # Calculate achromatic signal:
    A = (2.0 * rgbpa[..., 0] + rgbpa[..., 1] +
         (1.0 / 20.0) * rgbpa[..., 2] - 0.305) * Nbb
    Aw = (2.0 * rgbwpa[..., 0] + rgbwpa[..., 1] +
          (1.0 / 20.0) * rgbwpa[..., 2] - 0.305) * Nbb

    #--------------------------------------------
    # calculate initial opponent channels:
    a = rgbpa[..., 0] - 12.0 * rgbpa[..., 1] / 11.0 + rgbpa[..., 2] / 11.0
    b = (1.0 / 9.0) * (rgbpa[..., 0] + rgbpa[..., 1] - 2.0 * rgbpa[..., 2])

    #--------------------------------------------
    # calculate hue h and eccentricity factor, et:
    h = np.arctan2(b, a)
    et = (1.0 / 4.0) * (np.cos(h + 2.0) + 3.8)

    #--------------------------------------------
    # calculate lightness, J:
    J = 100.0 * (A / Aw)**(c * z)

    #--------------------------------------------
    # calculate chroma, C:
    t = ((50000.0 / 13.0) * Nc * Ncb * et *
         ((a**2.0 + b**2.0)**0.5)) / (rgbpa[..., 0] + rgbpa[..., 1] +
                                      (21.0 / 20.0 * rgbpa[..., 2]))
    C = (t**0.9) * ((J / 100.0)**0.5) * (1.64 - 0.29**n)**0.73

    #--------------------------------------------
    # Calculate colorfulness, M:
    M = C * FL**0.25

    #--------------------------------------------
    # convert to cam02ucs J', aM', bM':
    if ucs == True:
        KL, c1, c2 = 1.0, 0.007, 0.0228
        Jp = (1.0 + 100.0 * c1) * J / (1.0 + c1 * J)
        Mp = (1.0 / c2) * np.log(1.0 + c2 * M)
    else:
        Jp = J
        Mp = M
    aMp = Mp * np.cos(h)
    bMp = Mp * np.sin(h)

    return np.dstack((Jp, aMp, bMp))
Exemplo n.º 9
0
def plot_spectrum_colors(spd = None, spdmax = None,\
                         wavelength_height = -0.05, wavelength_opacity = 1.0, wavelength_lightness = 1.0,\
                         cieobs = _CIEOBS, show = True, axh = None,\
                         show_grid = False,ylabel = 'Spectral intensity (a.u.)',xlim=None,\
                         **kwargs):
    """
    Plot the spectrum colors.
    
    Args:
        :spd:
            | None, optional
            | Spectrum
        :spdmax:
            | None, optional
            | max ylim is set at 1.05 or (1+abs(wavelength_height)*spdmax)
        :wavelength_opacity:
            | 1.0, optional
            | Sets opacity of wavelength rectangle.
        :wavelength_lightness:
            | 1.0, optional
            | Sets lightness of wavelength rectangle.
        :wavelength_height:
            | -0.05 or 'spd', optional
            | Determine wavelength bar height 
            | if not 'spd': x% of spd.max()
        :axh: 
            | None or axes handle, optional
            | Determines axes to plot data in.
            | None: make new figure.
        :show:
            | True or False, optional
            | Invoke matplotlib.pyplot.show() right after plotting
        :cieobs:
            | luxpy._CIEOBS or str, optional
            | Determines CMF set to calculate spectrum locus or other.
        :show_grid:
            | False, optional
            | Show grid (True) or not (False)
        :ylabel:
            | 'Spectral intensity (a.u.)' or str, optional
            | Set y-axis label.
        :xlim:
            | None, optional
            | list or ndarray with xlimits.
        :kwargs: 
            | additional keyword arguments for use with matplotlib.pyplot.
        
    Returns:
        
    """

    if isinstance(cieobs, str):
        cmfs = _CMF[cieobs]['bar']
    else:
        cmfs = cieobs
    cmfs = cmfs[:, cmfs[1:].sum(axis=0) >
                0]  # avoid div by zero in xyz-to-Yxy conversion

    wavs = cmfs[0:1].T
    SL = cmfs[1:4].T

    srgb = xyz_to_srgb(wavelength_lightness * 100 * SL)
    srgb = srgb / srgb.max()

    if show == True:
        if axh is None:
            fig = plt.figure()
            axh = fig.add_subplot(111)

        if (wavelength_height == 'spd') & (spd is not None):
            if spdmax is None:
                spdmax = np.nanmax(spd[1:, :])
            y_min, y_max = 0.0, spdmax * (1.05)
            if xlim is None:
                x_min, x_max = spd[0, :].min(), spd[0, :].max()
            else:
                x_min, x_max = xlim

            SLrect = np.vstack([
                (x_min, 0.0),
                spd.T,
                (x_max, 0.0),
            ])
            wavelength_height = y_max
            spdmax = 1
        else:
            if (spdmax is None) & (spd is not None):
                spdmax = np.nanmax(spd[1:, :])
                y_min, y_max = wavelength_height * spdmax, spdmax * (
                    1 + np.abs(wavelength_height))

            elif (spdmax is None) & (spd is None):
                spdmax = 1
                y_min, y_max = wavelength_height, 0

            elif (spdmax is not None):
                y_min, y_max = wavelength_height * spdmax, spdmax  #*(1 + np.abs(wavelength_height))

            if xlim is None:
                x_min, x_max = wavs.min(), wavs.max()
            else:
                x_min, x_max = xlim

            SLrect = np.vstack([
                (x_min, 0.0),
                (x_min, wavelength_height * spdmax),
                (x_max, wavelength_height * spdmax),
                (x_max, 0.0),
            ])

        axh.set_xlim([x_min, x_max])
        axh.set_ylim([y_min, y_max])

        polygon = Polygon(SLrect, facecolor=None, edgecolor=None)
        axh.add_patch(polygon)
        padding = 0.1
        axh.bar(x=wavs - padding,
                height=wavelength_height * spdmax,
                width=1 + padding,
                color=srgb,
                align='edge',
                linewidth=0,
                clip_path=polygon)

        if spd is not None:
            axh.plot(spd[0:1, :].T, spd[1:, :].T, color='k', label='spd')

        if show_grid == True:
            plt.grid(True)
        axh.set_xlabel('Wavelength (nm)', kwargs)
        axh.set_ylabel(ylabel, kwargs)

        #plt.show()

        return axh
    else:
        return None
Exemplo n.º 10
0
def get_macadam_ellipse(xy = None, k_neighbours = 3, nsteps = 10, average_cik = True):
    """
    Estimate n-step MacAdam ellipse at CIE x,y coordinates xy by calculating 
    average inverse covariance ellipse of the k_neighbours closest ellipses.
    
    Args:
        :xy:
            | None or ndarray, optional
            | If None: output Macadam ellipses, if not None: xy are the 
            | CIE xy coordinates for which ellipses will be estimated.
        :k_neighbours:
            | 3, optional
            | Number of nearest ellipses to use to calculate ellipse at xy
        :nsteps:
            | 10, optional
            | Set number of MacAdam steps of ellipse.
        :average_cik:
            | True, optional
            | If True: take distance weighted average of inverse 
            |   'covariance ellipse' elements cik. 
            | If False: average major & minor axis lengths and 
            |   ellipse orientation angles directly.
            
    Returns:
        :v_mac_est:
            | estimated MacAdam ellipse(s) in v-format [Rmax,Rmin,xc,yc,theta]
    
    References:
        1. MacAdam DL. Visual Sensitivities to Color Differences in Daylight*. J Opt Soc Am. 1942;32(5):247-274.
    """
    # list of MacAdam ellipses (x10)
    v_mac = np.atleast_2d([
         [0.16, 0.057, 0.0085, 0.0035, 62.5],
         [0.187, 0.118, 0.022, 0.0055, 77],
         [0.253, 0.125, 0.025, 0.005, 55.5],
         [0.15, 0.68, 0.096, 0.023, 105],
         [0.131, 0.521, 0.047, 0.02, 112.5],
         [0.212, 0.55, 0.058, 0.023, 100],
         [0.258, 0.45, 0.05, 0.02, 92],
         [0.152, 0.365, 0.038, 0.019, 110],
         [0.28, 0.385, 0.04, 0.015, 75.5],
         [0.38, 0.498, 0.044, 0.012, 70],
         [0.16, 0.2, 0.021, 0.0095, 104],
         [0.228, 0.25, 0.031, 0.009, 72],
         [0.305, 0.323, 0.023, 0.009, 58],
         [0.385, 0.393, 0.038, 0.016, 65.5],
         [0.472, 0.399, 0.032, 0.014, 51],
         [0.527, 0.35, 0.026, 0.013, 20],
         [0.475, 0.3, 0.029, 0.011, 28.5],
         [0.51, 0.236, 0.024, 0.012, 29.5],
         [0.596, 0.283, 0.026, 0.013, 13],
         [0.344, 0.284, 0.023, 0.009, 60],
         [0.39, 0.237, 0.025, 0.01, 47],
         [0.441, 0.198, 0.028, 0.0095, 34.5],
         [0.278, 0.223, 0.024, 0.0055, 57.5],
         [0.3, 0.163, 0.029, 0.006, 54],
         [0.365, 0.153, 0.036, 0.0095, 40]
         ])
    
    # convert to v-format ([a,b, xc, yc, theta]):
    v_mac = v_mac[:,[2,3,0,1,4]]
    
    # convert last column to rad.:
    v_mac[:,-1] = v_mac[:,-1]*np.pi/180
    
    # convert to desired number of MacAdam-steps:
    v_mac[:,0:2] = v_mac[:,0:2]/10*nsteps
    
    if xy is not None:
        #calculate inverse covariance matrices:
        cik = math.v_to_cik(v_mac, inverse = True)
        if average_cik == True:
            cik_long = np.hstack((cik[:,0,:],cik[:,1,:]))
        
        # Calculate k_neighbours closest ellipses to xy:
        tree = sp.spatial.cKDTree(v_mac[:,2:4], copy_data = True)
        d, inds = tree.query(xy, k = k_neighbours)
    
        if k_neighbours  > 1:
            pd = 1
            w = (1.0 / np.abs(d)**pd)[:,:,None] # inverse distance weigthing
            if average_cik == True:
                cik_long_est = np.sum(w * cik_long[inds,:], axis=1) / np.sum(w, axis=1)
            else:
                v_mac_est = np.sum(w * v_mac[inds,:], axis=1) / np.sum(w, axis=1) # for average xyc

        else:
            v_mac_est = v_mac[inds,:].copy()
        
        # convert cik back to v:
        if (average_cik == True) & (k_neighbours >1):
            cik_est = np.dstack((cik_long_est[:,0:2],cik_long_est[:,2:4]))
            v_mac_est = math.cik_to_v(cik_est, inverse = True)
        v_mac_est[:,2:4] = xy
    else:
        v_mac_est = v_mac
        
    return v_mac_est
Exemplo n.º 11
0
 def __eq__(self, other):
     return np.abs(self - other) < self._TINY
Exemplo n.º 12
0
 def __pow__(self, x):
     return (self * self) if x == 2 else np.abs(self)**x
Exemplo n.º 13
0
def spd_normalize(data, norm_type=None, norm_f=1, wl=True, cieobs=_CIEOBS):
    """
    Normalize a spectral power distribution (SPD).
    
    Args:
        :data: 
            | ndarray
        :norm_type: 
            | None, optional 
            |       - 'lambda': make lambda in norm_f equal to 1
            |       - 'area': area-normalization times norm_f
            |       - 'max': max-normalization times norm_f
            |       - 'ru': to :norm_f: radiometric units 
            |       - 'pu': to :norm_f: photometric units 
            |       - 'pusa': to :norm_f: photometric units (with Km corrected
            |                             to standard air, cfr. CIE TN003-2015)
            |       - 'qu': to :norm_f: quantal energy units
        :norm_f:
            | 1, optional
            | Normalization factor that determines the size of normalization 
            | for 'max' and 'area' 
            | or which wavelength is normalized to 1 for 'lambda' option.
        :wl: 
            | True or False, optional 
            | If True, the first column of data contains wavelengths.
        :cieobs:
            | _CIEOBS or str, optional
            | Type of cmf set to use for normalization using photometric units 
            | (norm_type == 'pu')
    
    Returns:
        :returns: 
            | ndarray with normalized data.
    """
    if norm_type is not None:
        if not isinstance(norm_type, list): norm_type = [norm_type]

        if norm_f is not None:
            if not isinstance(norm_f, list): norm_f = [norm_f]

        if ('lambda' in norm_type) | ('qu' in norm_type):
            wl = True  # for lambda & 'qu' normalization wl MUST be first column
            wlr = data[0]

        if (('area' in norm_type) | ('ru' in norm_type) | ('pu' in norm_type) |
            ('pusa' in norm_type)) & (wl == True):
            dl = getwld(data[0])
        else:
            dl = 1  #no wavelengths provided

        offset = int(wl)
        for i in range(data.shape[0] - offset):
            norm_type_ = norm_type[i] if (len(norm_type) > 1) else norm_type[0]

            if norm_f is not None:
                norm_f_ = norm_f[i] if (len(norm_f) > 1) else norm_f[0]
            else:
                norm_f_ = 560.0 if (norm_type_ == 'lambda') else 1.0

            if norm_type_ == 'max':
                data[i + offset] = norm_f_ * data[i + offset] / np.max(
                    data[i + offset])
            elif norm_type_ == 'area':
                data[i + offset] = norm_f_ * data[i + offset] / (
                    np.sum(data[i + offset]) * dl)
            elif norm_type_ == 'lambda':
                wl_index = np.abs(wlr - norm_f_).argmin()
                data[i +
                     offset] = data[i + offset] / data[i + offset][wl_index]
            elif (norm_type_ == 'ru') | (norm_type_ == 'pu') | (
                    norm_type == 'pusa') | (norm_type_ == 'qu'):
                rpq_power = spd_to_power(data[[0, i + offset], :],
                                         cieobs=cieobs,
                                         ptype=norm_type_)
                data[i + offset] = (norm_f / rpq_power) * data[i + offset]
            else:
                data[i + offset] = data[i + offset] / norm_f_
    return data
Exemplo n.º 14
0
def cri_ref(ccts, wl3 = None, ref_type = _CRI_REF_TYPE, mix_range = None, 
            cieobs = None, norm_type = None, norm_f = None, 
            force_daylight_below4000K = False, n = None,
            daylight_locus = None):
    """
    Calculates a reference illuminant spectrum based on cct 
    for color rendering index calculations .
    
    Args:
        :ccts: 
            | list of int/floats or ndarray with ccts.
        :wl3: 
            | None, optional
            | New wavelength range for interpolation. 
            | Defaults to wavelengths specified by luxpy._WL3.
        :ref_type:
            | str or list[str], optional
            | Specifies the type of reference spectrum to be calculated.
            | Defaults to luxpy._CRI_REF_TYPE. 
            | If :ref_type: is list of strings, then for each cct in :ccts: 
            | a different reference illuminant can be specified. 
            | If :ref_type: == 'spd', then :ccts: is assumed to be an ndarray
            | of reference illuminant spectra.
        :mix_range: 
            | None or ndarray, optional
            | Determines the cct range between which the reference illuminant is
            | a weigthed mean of a Planckian and Daylight Phase spectrum. 
            | Weighthing is done as described in IES TM30:
            |    SPDreference = (Te-T)/(Te-Tb)*Planckian+(T-Tb)/(Te-Tb)*daylight
            |    with Tb and Te are resp. the starting and end CCTs of the 
            |    mixing range and whereby the Planckian and Daylight SPDs 
            |    have been normalized for equal luminous flux.
            | If None: use the default specified for :ref_type:.
            | Can be a ndarray with shape[0] > 1, in which different mixing
            | ranges will be used for cct in :ccts:.
        :cieobs: 
            | None, optional
            | Required for the normalization of the Planckian and Daylight SPDs 
            | when calculating a 'mixed' reference illuminant.
            | Required when calculating daylightphase (adjust locus parameters to cieobs)
            | If None: _CIEOBS will be used.
        :norm_type: 
            | None, optional 
            |       - 'lambda': make lambda in norm_f equal to 1
            |       - 'area': area-normalization times norm_f
            |       - 'max': max-normalization times norm_f
            |       - 'ru': to :norm_f: radiometric units 
            |       - 'pu': to :norm_f: photometric units 
            |       - 'pusa': to :norm_f: photometric units (with Km corrected
            |                             to standard air, cfr. CIE TN003-2015)
            |       - 'qu': to :norm_f: quantal energy units
        :norm_f:
            | 1, optional
            | Normalization factor that determines the size of normalization 
            | for 'max' and 'area' 
            | or which wavelength is normalized to 1 for 'lambda' option.
        :force_daylight_below4000K: 
            | False or True, optional
            | Daylight locus approximation is not defined below 4000 K, 
            | but by setting this to True, the calculation can be forced to 
            | calculate it anyway.
        :n:
            | None, optional
            | Refractive index (for use in calculation of blackbody radiators).
            | If None: use the one stored in _BB['n']
        :daylight_locus:
            | None, optional
            | dict with xD(T) and yD(xD) parameters to calculate daylight locus 
            | for specified cieobs.
            | If None: use pre-calculated values.
            | If 'calc': calculate them on the fly.
    
    Returns:
        :returns: 
            | ndarray with reference illuminant spectra.
            | (:returns:[0] contains wavelengths)

    Note: 
        Future versions will have the ability to take a dict as input 
        for ref_type. This way other reference illuminants can be specified 
        than the ones in _CRI_REF_TYPES. 
    """
    if ref_type == 'spd':
        
        # ccts already contains spectrum of reference:
        return spd(ccts, wl = wl3, norm_type = norm_type, norm_f = norm_f)

    else:
        if mix_range is not None: mix_range = np2d(mix_range)

        if not (isinstance(ref_type,list) | isinstance(ref_type,dict)): ref_type = [ref_type]
   
        for i in range(len(ccts)):
            cct = ccts[i]

            # get ref_type and mix_range:
            if isinstance(ref_type,dict):
                raise Exception("cri_ref(): dictionary ref_type: Not yet implemented")
            else:

                ref_type_ = ref_type[i] if (len(ref_type)>1) else ref_type[0]

                if mix_range is None:
                    mix_range_ =  _CRI_REF_TYPES[ref_type_]

                else:
                    mix_range_ = mix_range[i] if (mix_range.shape[0]>1) else mix_range[0]  #must be np2d !!!            
      
            if (mix_range_[0] == mix_range_[1]) | (ref_type_[0:2] == 'BB') | (ref_type_[0:2] == 'DL'):
                if ((cct < mix_range_[0]) & (not (ref_type_[0:2] == 'DL'))) | (ref_type_[0:2] == 'BB'):
                    Sr = blackbody(cct, wl3, n = n)
                elif ((cct >= mix_range_[0]) & (not (ref_type_[0:2] == 'BB'))) | (ref_type_[0:2] == 'DL') :
                    Sr = daylightphase(cct,wl3,force_daylight_below4000K = force_daylight_below4000K, cieobs = cieobs, daylight_locus = daylight_locus)
            else:
                SrBB = blackbody(cct, wl3, n = n)
                SrDL = daylightphase(cct,wl3,verbosity = None,force_daylight_below4000K = force_daylight_below4000K, cieobs = cieobs, daylight_locus = daylight_locus)
                cieobs_ = _CIEOBS if cieobs is None else cieobs
                cmf = xyzbar(cieobs = cieobs_, scr = 'dict', wl_new = wl3)
                wl = SrBB[0]
                ld = getwld(wl)

                SrBB = 100.0*SrBB[1]/np.array(np.sum(SrBB[1]*cmf[2]*ld))
                SrDL = 100.0*SrDL[1]/np.array(np.sum(SrDL[1]*cmf[2]*ld))
                Tb, Te = float(mix_range_[0]), float(mix_range_[1])
                cBB, cDL = (Te-cct)/(Te-Tb), (cct-Tb)/(Te-Tb)
                if cBB < 0.0:
                    cBB = 0.0
                elif cBB > 1:
                    cBB = 1.0
                if cDL < 0.0:
                    cDL = 0.0
                elif cDL > 1:
                    cDL = 1.0

                Sr = SrBB*cBB + SrDL*cDL
                Sr[Sr==float('NaN')] = 0.0
                Sr560 = Sr[np.where(np.abs(wl - 560.0) == np.min(np.abs(wl - 560.0)))[0]]
                Sr = np.vstack((wl,(Sr/Sr560)))
                     
            if i == 0:
                Srs = Sr[1]
            else:
                Srs = np.vstack((Srs,Sr[1]))
                    
        Srs = np.vstack((Sr[0],Srs))

        return  spd(Srs, wl = None, norm_type = norm_type, norm_f = norm_f)
Exemplo n.º 15
0
def daylightphase(cct, wl3 = None, nominal_cct = False, force_daylight_below4000K = False, verbosity = None, 
                  n = None, cieobs = None, daylight_locus = None, daylight_Mi_coeffs = None):
    """
    Calculate daylight phase spectrum for correlated color temperature (cct).
        
    Args:
        :cct: 
            | int or float 
            | (for list of cct values, use cri_ref() with ref_type = 'DL')
        :wl3: 
            | None, optional
            | New wavelength range for interpolation. 
            | Defaults to wavelengths specified by luxpy._WL3.
        :nominal_cct:
            | False, optional
            | If cct is nominal (e.g. when calculating D65): multiply cct first
            | by 1.4388/1.4380 to account for change in 'c2' in definition of Planckian.
        :cieobs:
            | None or str or ndarray, optional
            | CMF set to use when calculating coefficients for daylight locus and for M1, M2 weights.
            | If None: use standard coefficients for CIE 1931 2° CMFs (for Si at 10 nm).
            | Else: calculate coefficients following Appendix C of CIE15-2004 and Judd (1964).
        :force_daylight_below4000K: 
            | False or True, optional
            | Daylight locus approximation is not defined below 4000 K, 
            | but by setting this to True, the calculation can be forced to 
            | calculate it anyway.
        :verbosity: 
            | None, optional
            |   If None: do not print warning when CCT < 4000 K.
        :n:
            | None, optional
            | Refractive index (for use in calculation of blackbody radiators).
            | If None: use the one stored in _BB['n']
        :daylight_locus:
            | None, optional
            | dict with xD(T) and yD(xD) parameters to calculate daylight locus 
            | for specified cieobs.
            | If None: use pre-calculated values.
            | If 'calc': calculate them on the fly.
        :daylight_Mi_coeffs:
            | None, optional
            | dict with coefficients for M1 & M2 weights for specified cieobs.
            | If None: use pre-calculated values.
            | If 'calc': calculate them on the fly.

    Returns:
        :returns: 
            | ndarray with daylight phase spectrum
            | (:returns:[0] contains wavelengths)

    References:
        1. `CIE15:2018, “Colorimetry,” CIE, Vienna, Austria, 2018. <https://doi.org/10.25039/TR.015.2018>`_
        
        2. `Judd, MacAdam, Wyszecki, Budde, Condit, Henderson, & Simonds (1964). 
        Spectral Distribution of Typical Daylight as a Function of Correlated Color Temperature. 
        J. Opt. Soc. Am., 54(8), 1031–1040. 
        <https://doi.org/10.1364/JOSA.54.001031>`_
    """
    cct = float(cct)
    if wl3 is None: wl3 = _WL3 
    if (cct < (4000.0)) & (force_daylight_below4000K == False):
        if verbosity is not None:
            print('Warning daylightphase spd not defined below 4000 K. Using blackbody radiator instead.')
        return blackbody(cct,wl3, n = n)
    else:
        if nominal_cct: cct*=(1.4388/1.4380) # account for change in c2 in def. of Planckian

        wl = getwlr(wl3) 
        
        #interpolate _S012_DAYLIGHTPHASE first to wl range:
        if  not np.array_equal(_S012_DAYLIGHTPHASE[0],wl):
            S012_daylightphase = cie_interp(data = _S012_DAYLIGHTPHASE, wl_new = wl, kind = 'linear',negative_values_allowed = True)
        else:
            S012_daylightphase = _S012_DAYLIGHTPHASE

        # Get coordinates of daylight locus corresponding to cct:
        xD, yD = daylightlocus(cct, force_daylight_below4000K = force_daylight_below4000K, cieobs = cieobs, daylight_locus = daylight_locus)
        
        # Get M1 & M2 component weights:
        if (cieobs is None): # original M1,M2 for Si at 10 nm spacing and CIE 1931 xy
            Mcoeffs = {'i':0.0241,'j':0.2562,'k':-0.7341,
            'i1':-1.3515,'j1':-1.7703,'k1':5.9114,
            'i2':0.0300,'j2':-31.4424,'k2':30.0717}
        else:
            Mcoeffs = daylight_Mi_coeffs
        M1, M2, _ = _get_daylightphase_Mi_values(xD, yD, Mcoeffs = Mcoeffs, cieobs = cieobs, S012_daylightphase = S012_daylightphase) 
        
        # Calculate weigthed combination of S0, S1 & S2 components:
        Sr = S012_daylightphase[1,:] + M1*S012_daylightphase[2,:] + M2*S012_daylightphase[3,:]
        
        # Normalize to 1 at (or near) 560 nm:
        Sr560 = Sr[:,np.where(np.abs(S012_daylightphase[0,:] - 560.0) == np.min(np.abs(S012_daylightphase[0,:] - 560)))[0]]
        Sr /= Sr560
        Sr[Sr==float('NaN')] = 0
        return np.vstack((wl,Sr))
Exemplo n.º 16
0
def plot_hue_bins(hbins = 16, start_hue = 0.0, scalef = 100, \
        plot_axis_labels = False, bin_labels = '#', plot_edge_lines = True, \
        plot_center_lines = False, plot_bin_colors = True, \
        plot_10_20_circles = False,\
        axtype = 'polar', ax = None, force_CVG_layout = False):
    """
    Makes basis plot for Color Vector Graphic (CVG).
    
    Args:
        :hbins:
            | 16 or ndarray with sorted hue bin centers (°), optional
        :start_hue:
            | 0.0, optional
        :scalef:
            | 100, optional
            | Scale factor for graphic.
        :plot_axis_labels:
            | False, optional
            | Turns axis ticks on/off (True/False).
        :bin_labels:
            | None or list[str] or '#', optional
            | Plots labels at the bin center hues.
            |   - None: don't plot.
            |   - list[str]: list with str for each bin. 
            |                (len(:bin_labels:) = :nhbins:)
            |   - '#': plots number.
        :plot_edge_lines:
            | True or False, optional
            | Plot grey bin edge lines with '--'.
        :plot_center_lines:
            | False or True, optional
            | Plot colored lines at 'center' of hue bin.
        :plot_bin_colors:
            | True, optional
            | Colorize hue bins.
        :plot_10_20_circles:
            | False, optional
            | If True and :axtype: == 'cart': Plot white circles at 
            | 80%, 90%, 100%, 110% and 120% of :scalef: 
        :axtype: 
            | 'polar' or 'cart', optional
            | Make polar or Cartesian plot.
        :ax: 
            | None or 'new' or 'same', optional
            |   - None or 'new' creates new plot
            |   - 'same': continue plot on same axes.
            |   - axes handle: plot on specified axes.
        :force_CVG_layout:
            | False or True, optional
            | True: Force plot of basis of CVG on first encounter.
            
    Returns:
        :returns: 
            | gcf(), gca(), list with rgb colors for hue bins (for use in 
              other plotting fcns)
        
    """

    # Setup hbincenters and hsv_hues:
    if isinstance(hbins, float) | isinstance(hbins, int):
        nhbins = hbins
        dhbins = 360 / (nhbins)  # hue bin width
        hbincenters = np.arange(start_hue + dhbins / 2, 360, dhbins)
        hbincenters = np.sort(hbincenters)

    else:
        hbincenters = hbins
        idx = np.argsort(hbincenters)
        if isinstance(bin_labels, list) | isinstance(bin_labels, np.ndarray):
            bin_labels = bin_labels[idx]
        hbincenters = hbincenters[idx]
        nhbins = hbincenters.shape[0]
    hbincenters = hbincenters * np.pi / 180

    # Setup hbin labels:
    if bin_labels is '#':
        bin_labels = ['#{:1.0f}'.format(i + 1) for i in range(nhbins)]
    elif isinstance(bin_labels, str):
        bin_labels = [
            bin_labels + '{:1.0f}'.format(i + 1) for i in range(nhbins)
        ]

    # initializing the figure
    cmap = None
    if (ax is None) or (ax == 'new'):
        fig = plt.figure()
        newfig = True
    else:
        fig = plt.gcf()
        newfig = False
    rect = [0.1, 0.1, 0.8,
            0.8]  # setting the axis limits in [left, bottom, width, height]

    if axtype == 'polar':
        # the polar axis:
        if newfig == True:
            ax = fig.add_axes(rect, polar=True, frameon=False)
    else:
        #cartesian axis:
        if newfig == True:
            ax = fig.add_axes(rect)

    if (newfig == True) | (force_CVG_layout == True):

        # Calculate hue-bin boundaries:
        r = np.vstack((np.zeros(hbincenters.shape),
                       1. * scalef * np.ones(hbincenters.shape)))
        theta = np.vstack((np.zeros(hbincenters.shape), hbincenters))
        #t = hbincenters.copy()
        dU = np.roll(hbincenters.copy(), -1)
        dL = np.roll(hbincenters.copy(), 1)
        dtU = dU - hbincenters
        dtL = hbincenters - dL
        dtU[dtU < 0] = dtU[dtU < 0] + 2 * np.pi
        dtL[dtL < 0] = dtL[dtL < 0] + 2 * np.pi
        dL = hbincenters - dtL / 2
        dU = hbincenters + dtU / 2
        dt = (dU - dL)
        dM = dL + dt / 2

        # Setup color for plotting hue bins:
        hsv_hues = hbincenters - 30 * np.pi / 180
        hsv_hues = hsv_hues / hsv_hues.max()

        edges = np.vstack(
            (np.zeros(hbincenters.shape), dL))  # setup hue bin edges array

        if axtype == 'cart':
            if plot_center_lines == True:
                hx = r * np.cos(theta) * 1.2
                hy = r * np.sin(theta) * 1.2
            if bin_labels is not None:
                hxv = np.vstack((np.zeros(hbincenters.shape),
                                 1.4 * scalef * np.cos(hbincenters)))
                hyv = np.vstack((np.zeros(hbincenters.shape),
                                 1.4 * scalef * np.sin(hbincenters)))
            if plot_edge_lines == True:
                #hxe = np.vstack((np.zeros(hbincenters.shape),1.2*scalef*np.cos(dL)))
                #hye = np.vstack((np.zeros(hbincenters.shape),1.2*scalef*np.sin(dL)))
                hxe = np.vstack(
                    (0.1 * scalef * np.cos(dL), 1.5 * scalef * np.cos(dL)))
                hye = np.vstack(
                    (0.1 * scalef * np.sin(dL), 1.5 * scalef * np.sin(dL)))

        # Plot hue-bins:
        for i in range(nhbins):

            # Create color from hue angle:
            #c = np.abs(np.array(colorsys.hsv_to_rgb(hsv_hues[i], 0.75, 0.85)))
            c = np.abs(np.array(colorsys.hls_to_rgb(hsv_hues[i], 0.45, 0.5)))
            if i == 0:
                cmap = [c]
            else:
                cmap.append(c)

            if axtype == 'polar':
                if plot_edge_lines == True:
                    ax.plot(edges[:, i],
                            r[:, i] * 1.,
                            color='grey',
                            marker='None',
                            linestyle='--',
                            linewidth=1,
                            markersize=2)
                if plot_center_lines == True:
                    if np.mod(i, 2) == 1:
                        ax.plot(theta[:, i],
                                r[:, i],
                                color=c,
                                marker=None,
                                linestyle='--',
                                linewidth=1)
                    else:
                        ax.plot(theta[:, i],
                                r[:, i],
                                color=c,
                                marker=None,
                                linestyle='--',
                                linewidth=1,
                                markersize=10)
                if plot_bin_colors == True:
                    bar = ax.bar(dM[i],
                                 r[1, i],
                                 width=dt[i],
                                 color=c,
                                 alpha=0.25)
                if bin_labels is not None:
                    ax.text(hbincenters[i],
                            1.3 * scalef,
                            bin_labels[i],
                            fontsize=10,
                            horizontalalignment='center',
                            verticalalignment='center',
                            color=np.array([1, 1, 1]) * 0.45)
                if plot_axis_labels == False:
                    ax.set_xticklabels([])
                    ax.set_yticklabels([])
            else:
                axis_ = 1. * np.array(
                    [-scalef * 1.5, scalef * 1.5, -scalef * 1.5, scalef * 1.5])
                if plot_edge_lines == True:
                    ax.plot(hxe[:, i],
                            hye[:, i],
                            color='grey',
                            marker='None',
                            linestyle='--',
                            linewidth=1,
                            markersize=2)

                if plot_center_lines == True:
                    if np.mod(i, 2) == 1:
                        ax.plot(hx[:, i],
                                hy[:, i],
                                color=c,
                                marker=None,
                                linestyle='--',
                                linewidth=1)
                    else:
                        ax.plot(hx[:, i],
                                hy[:, i],
                                color=c,
                                marker=None,
                                linestyle='--',
                                linewidth=1,
                                markersize=10)
                if bin_labels is not None:
                    ax.text(hxv[1, i],
                            hyv[1, i],
                            bin_labels[i],
                            fontsize=10,
                            horizontalalignment='center',
                            verticalalignment='center',
                            color=np.array([1, 1, 1]) * 0.45)
                ax.axis(axis_)

        if plot_axis_labels == False:
            ax.set_xticklabels([])
            ax.set_yticklabels([])
        else:
            ax.set_xlabel("a'")
            ax.set_ylabel("b'")

        ax.plot(0, 0, color='grey', marker='+', linestyle=None, markersize=6)

        if (axtype != 'polar') & (plot_10_20_circles == True):
            r = np.array([
                0.8, 0.9, 1.1, 1.2
            ]) * scalef  # plot circles at 80, 90, 100, 110, 120 % of scale f
            plotcircle(radii=r,
                       angles=np.arange(0, 365, 5),
                       color='w',
                       linestyle='-',
                       axh=ax,
                       linewidth=0.5)
            plotcircle(radii=[scalef],
                       angles=np.arange(0, 365, 5),
                       color='k',
                       linestyle='-',
                       axh=ax,
                       linewidth=1)
            ax.text(0,
                    -0.75 * scalef,
                    '-20%',
                    fontsize=8,
                    horizontalalignment='center',
                    verticalalignment='center',
                    color='w')
            ax.text(0,
                    -1.25 * scalef,
                    '+20%',
                    fontsize=8,
                    horizontalalignment='center',
                    verticalalignment='center',
                    color='w')

        if (axtype != 'polar') & (plot_bin_colors == True) & (_CVG_BG
                                                              is not None):
            ax.imshow(_CVG_BG, origin='upper', extent=axis_)

    return fig, ax, cmap
Exemplo n.º 17
0
def run(data,
        xyzw=_DEFAULT_WHITE_POINT,
        Yw=None,
        outin='J,aM,bM',
        conditions=None,
        forward=True,
        yellowbluepurplecorrect=False,
        mcat='cat02'):
    """ 
    Run CIECAM02 color appearance model in forward or backward modes.
    
    Args:
        :data:
            | ndarray with relative sample xyz values (forward mode) or J'a'b' coordinates (inverse mode)
        :xyzw:
            | ndarray with relative white point tristimulus values 
        :Yw: 
            | None, optional
            | Luminance factor of white point.
            | If None: xyz (in data) and xyzw are entered as relative tristimulus values 
            |          (normalized to Yw = 100). 
            | If not None: input tristimulus are absolute and Yw is used to
            |              rescale the absolute values to relative ones 
            |              (relative to a reference perfect white diffuser 
            |               with Ywr = 100). 
            | Yw can be < 100 for e.g. paper as white point. If Yw is None, it 
            | is assumed that the relative Y-tristimulus value in xyzw 
            | represents the luminance factor Yw.
        :conditions:
            | None, optional
            | Dictionary with viewing condition parameters for:
            |       La, Yb, D and surround.
            |  surround can contain:
            |      - str (options: 'avg','dim','dark') or 
            |      - dict with keys c, Nc, F.
            | None results in:
            |   {'La':100, 'Yb':20, 'D':1, 'surround':'avg'}
        :forward:
            | True, optional
            | If True: run in CAM in forward mode, else: inverse mode.
        :outin:
            | 'J,aM,bM', optional
            | String with requested output (e.g. "J,aM,bM,M,h") [Forward mode]
            | - attributes: 'J': lightness,'Q': brightness,
            |               'M': colorfulness,'C': chroma, 's': saturation,
            |               'h': hue angle, 'H': hue quadrature/composition,
            | String with inputs in data [inverse mode]. 
            | Input must have data.shape[-1]==3 and last dim of data must have 
            | the following structure for inverse mode: 
            |  * data[...,0] = J or Q,
            |  * data[...,1:] = (aM,bM) or (aC,bC) or (aS,bS) or (M,h) or (C, h), ...
        :yellowbluepurplecorrect:
            | False, optional
            | If False: don't correct for yellow-blue and purple problems in ciecam02. 
            | If 'brill-suss': 
            |       for yellow-blue problem, see: 
            |          - Brill [Color Res Appl, 2006; 31, 142-145] and 
            |          - Brill and Süsstrunk [Color Res Appl, 2008; 33, 424-426] 
            | If 'jiang-luo': 
            |       for yellow-blue problem + purple line problem, see:
            |          - Jiang, Jun et al. [Color Res Appl 2015: 40(5), 491-503] 
        :mcat:
            | 'cat02', optional
            | Specifies CAT sensor space.
            | - options:
            |    - None defaults to 'cat02' 
            |         (others e.g. 'cat02-bs', 'cat02-jiang',
            |         all trying to correct gamut problems of original cat02 matrix)
            |    - str: see see luxpy.cat._MCATS.keys() for options 
            |         (details on type, ?luxpy.cat)
            |    - ndarray: matrix with sensor primaries
    Returns:
        :camout: 
            | ndarray with color appearance correlates (forward mode) 
            |  or 
            | XYZ tristimulus values (inverse mode)
        
    References:
        1. `N. Moroney, M. D. Fairchild, R. W. G. Hunt, C. Li, M. R. Luo, and T. Newman, (2002), 
        "The CIECAM02 color appearance model,” 
        IS&T/SID Tenth Color Imaging Conference. p. 23, 2002.
        <http://rit-mcsl.org/fairchild/PDFs/PRO19.pdf>`_
    """
    outin = outin.split(',') if isinstance(outin, str) else outin

    #--------------------------------------------
    # Get condition parameters:
    if conditions is None:
        conditions = _DEFAULT_CONDITIONS
    D, Dtype, La, Yb, surround = (conditions[x]
                                  for x in sorted(conditions.keys()))

    surround_parameters = _SURROUND_PARAMETERS
    if isinstance(surround, str):
        surround = surround_parameters[conditions['surround']]
    F, FLL, Nc, c = [surround[x] for x in sorted(surround.keys())]

    #--------------------------------------------
    # Define sensor space and cat matrices:
    # Hunt-Pointer-Estevez sensors (cone fundamentals)
    mhpe = cat._MCATS['hpe']

    # chromatic adaptation sensors:
    if (mcat is None) | (mcat == 'cat02'):
        mcat = cat._MCATS['cat02']
        if yellowbluepurplecorrect == 'brill-suss':
            mcat = cat._MCATS[
                'cat02-bs']  # for yellow-blue problem, Brill [Color Res Appl 2006;31:142-145] and Brill and Süsstrunk [Color Res Appl 2008;33:424-426]
        elif yellowbluepurplecorrect == 'jiang-luo':
            mcat = cat._MCATS[
                'cat02-jiang-luo']  # for yellow-blue problem + purple line problem
    elif isinstance(mcat, str):
        mcat = cat._MCATS[mcat]

    #--------------------------------------------
    # pre-calculate some matrices:
    invmcat = np.linalg.inv(mcat)
    mhpe_x_invmcat = np.dot(mhpe, invmcat)
    if not forward: mcat_x_invmhpe = np.dot(mcat, np.linalg.inv(mhpe))

    #--------------------------------------------
    # Set Yw:
    if Yw is not None:
        Yw = (Yw * np.ones_like(xyzw2[..., 1:2]).T)
    else:
        Yw = xyzw[..., 1:2].T

    #--------------------------------------------
    # calculate condition dependent parameters:
    k = 1.0 / (5.0 * La + 1.0)
    FL = 0.2 * (k**4.0) * (5.0 * La) + 0.1 * ((1.0 - k**4.0)**2.0) * (
        (5.0 * La)**(1.0 / 3.0))  # luminance adaptation factor
    n = Yb / Yw
    Nbb = 0.725 * (1 / n)**0.2
    Ncb = Nbb
    z = 1.48 + FLL * n**0.5
    yw = xyzw[..., 1:2].T  # original Y in xyzw (pre-transposed)

    #--------------------------------------------
    # Calculate degree of chromatic adaptation:
    if D is None:
        D = F * (1.0 - (1.0 / 3.6) * np.exp((-La - 42.0) / 92.0))

    #===================================================================
    # WHITE POINT transformations (common to forward and inverse modes):

    #--------------------------------------------
    # Normalize white point (keep transpose for next step):
    xyzw = Yw * xyzw.T / yw

    #--------------------------------------------
    # transform from xyzw to cat sensor space:
    rgbw = math.dot23(mcat, xyzw)

    #--------------------------------------------
    # apply von Kries cat:
    rgbwc = (
        (D * Yw / rgbw) + (1 - D)
    ) * rgbw  # factor 100 from ciecam02 is replaced with Yw[i] in ciecam16, but see 'note' in Fairchild's "Color Appearance Models" (p291 ni 3ed.)

    #--------------------------------------------
    # convert from cat02 sensor space to cone sensors (hpe):
    rgbwp = math.dot23(mhpe_x_invmcat, rgbwc).T

    #--------------------------------------------
    # apply Naka_rushton repsonse compression to white:
    NK = lambda x, forward: naka_rushton(x,
                                         scaling=400,
                                         n=0.42,
                                         sig=27.13**(1 / 0.42),
                                         noise=0.1,
                                         forward=forward)

    pw = np.where(rgbwp < 0)

    # if requested apply yellow-blue correction:
    if (yellowbluepurplecorrect == 'brill-suss'
        ):  # Brill & Susstrunck approach, for purple line problem
        rgbwp[pw] = 0.0
    rgbwpa = NK(FL * rgbwp / 100.0, True)
    rgbwpa[pw] = 0.1 - (NK(FL * np.abs(rgbwp[pw]) / 100.0, True) - 0.1)

    #--------------------------------------------
    # Calculate achromatic signal of white:
    Aw = (2.0 * rgbwpa[..., 0] + rgbwpa[..., 1] +
          (1.0 / 20.0) * rgbwpa[..., 2] - 0.305) * Nbb

    # massage shape of data for broadcasting:
    original_ndim = data.ndim
    if data.ndim == 2: data = data[:, None]

    #===================================================================
    # STIMULUS transformations
    if forward:

        #--------------------------------------------
        # Normalize xyz (keep transpose for matrix multiplication in next step):
        xyz = (Yw / yw)[..., None] * data.T

        #--------------------------------------------
        # transform from xyz to cat sensor space:
        rgb = math.dot23(mcat, xyz)

        #--------------------------------------------
        # apply von Kries cat:
        rgbc = (
            (D * Yw / rgbw)[..., None] + (1 - D)
        ) * rgb  # factor 100 from ciecam02 is replaced with Yw[i] in ciecam16, but see 'note' in Fairchild's "Color Appearance Models" (p291 ni 3ed.)

        #--------------------------------------------
        # convert from cat02 sensor space to cone sensors (hpe):
        rgbp = math.dot23(mhpe_x_invmcat, rgbc).T

        #--------------------------------------------
        # apply Naka_rushton repsonse compression:
        p = np.where(rgbp < 0)
        if (yellowbluepurplecorrect == 'brill-suss'
            ):  # Brill & Susstrunck approach, for purple line problem
            rgbp[p] = 0.0
        rgbpa = NK(FL * rgbp / 100.0, forward)
        rgbpa[p] = 0.1 - (NK(FL * np.abs(rgbp[p]) / 100.0, forward) - 0.1)

        #--------------------------------------------
        # Calculate achromatic signal:
        A = (2.0 * rgbpa[..., 0] + rgbpa[..., 1] +
             (1.0 / 20.0) * rgbpa[..., 2] - 0.305) * Nbb

        #--------------------------------------------
        # calculate initial opponent channels:
        a = rgbpa[..., 0] - 12.0 * rgbpa[..., 1] / 11.0 + rgbpa[..., 2] / 11.0
        b = (1.0 / 9.0) * (rgbpa[..., 0] + rgbpa[..., 1] - 2.0 * rgbpa[..., 2])

        #--------------------------------------------
        # calculate hue h and eccentricity factor, et:
        h = hue_angle(a, b, htype='deg')
        et = (1.0 / 4.0) * (np.cos(h * np.pi / 180 + 2.0) + 3.8)

        #--------------------------------------------
        # calculate Hue quadrature (if requested in 'out'):
        if 'H' in outin:
            H = hue_quadrature(h, unique_hue_data=_UNIQUE_HUE_DATA)
        else:
            H = None

        #--------------------------------------------
        # calculate lightness, J:
        J = 100.0 * (A / Aw)**(c * z)

        #--------------------------------------------
        # calculate brightness, Q:
        Q = (4.0 / c) * ((J / 100.0)**0.5) * (Aw + 4.0) * (FL**0.25)

        #--------------------------------------------
        # calculate chroma, C:
        t = ((50000.0 / 13.0) * Nc * Ncb * et *
             ((a**2.0 + b**2.0)**0.5)) / (rgbpa[..., 0] + rgbpa[..., 1] +
                                          (21.0 / 20.0 * rgbpa[..., 2]))
        C = (t**0.9) * ((J / 100.0)**0.5) * (1.64 - 0.29**n)**0.73

        #--------------------------------------------
        # calculate colorfulness, M:
        M = C * FL**0.25

        #--------------------------------------------
        # calculate saturation, s:
        s = 100.0 * (M / Q)**0.5
        S = s  # make extra variable, jsut in case 'S' is called

        #--------------------------------------------
        # calculate cartesian coordinates:
        if ('aS' in outin):
            aS = s * np.cos(h * np.pi / 180.0)
            bS = s * np.sin(h * np.pi / 180.0)

        if ('aC' in outin):
            aC = C * np.cos(h * np.pi / 180.0)
            bC = C * np.sin(h * np.pi / 180.0)

        if ('aM' in outin):
            aM = M * np.cos(h * np.pi / 180.0)
            bM = M * np.sin(h * np.pi / 180.0)

        #--------------------------------------------
        if outin != ['J', 'aM', 'bM']:
            camout = eval('ajoin((' + ','.join(outin) + '))')
        else:
            camout = ajoin((J, aM, bM))

        if (camout.shape[1] == 1) & (original_ndim < 3):
            camout = camout[:, 0, :]

        return camout

    elif forward == False:

        #--------------------------------------------
        # Get Lightness J from data:
        if ('J' in outin[0]):
            J = data[..., 0].copy()
        elif ('Q' in outin[0]):
            Q = data[..., 0].copy()
            J = 100.0 * (Q / ((Aw + 4.0) * (FL**0.25) * (4.0 / c)))**2.0
        else:
            raise Exception(
                'No lightness or brightness values in data. Inverse CAM-transform not possible!'
            )

        #--------------------------------------------
        if 'a' in outin[1]:
            # calculate hue h:
            h = hue_angle(data[..., 1], data[..., 2], htype='deg')

            #--------------------------------------------
            # calculate Colorfulness M or Chroma C or Saturation s from a,b:
            MCs = (data[..., 1]**2.0 + data[..., 2]**2.0)**0.5
        else:
            h = data[..., 2]
            MCs = data[..., 1]

        if ('S' in outin[1]):
            Q = (4.0 / c) * ((J / 100.0)**0.5) * (Aw + 4.0) * (FL**0.25)
            M = Q * (MCs / 100.0)**2.0
            C = M / (FL**0.25)

        if ('M' in outin[1]):  # convert M to C:
            C = MCs / (FL**0.25)

        if ('C' in outin[1]):
            C = MCs

        #--------------------------------------------
        # calculate t from J, C:
        t = (C / ((J / 100.0)**(1.0 / 2.0) * (1.64 - 0.29**n)**0.73))**(1.0 /
                                                                        0.9)

        #--------------------------------------------
        # calculate eccentricity factor, et:
        et = (np.cos(h * np.pi / 180.0 + 2.0) + 3.8) / 4.0

        #--------------------------------------------
        # calculate achromatic signal, A:
        A = Aw * (J / 100.0)**(1.0 / (c * z))

        #--------------------------------------------
        # calculate temporary cart. co. at, bt and p1,p2,p3,p4,p5:
        at = np.cos(h * np.pi / 180.0)
        bt = np.sin(h * np.pi / 180.0)
        p1 = (50000.0 / 13.0) * Nc * Ncb * et / t
        p2 = A / Nbb + 0.305
        p3 = 21.0 / 20.0
        p4 = p1 / bt
        p5 = p1 / at

        #--------------------------------------------
        #q = np.where(np.abs(bt) < np.abs(at))[0]
        q = (np.abs(bt) < np.abs(at))

        b = p2 * (2.0 + p3) * (460.0 / 1403.0) / (p4 + (2.0 + p3) *
                                                  (220.0 / 1403.0) *
                                                  (at / bt) -
                                                  (27.0 / 1403.0) + p3 *
                                                  (6300.0 / 1403.0))
        a = b * (at / bt)

        a[q] = p2[q] * (2.0 + p3) * (460.0 / 1403.0) / (p5[q] + (2.0 + p3) *
                                                        (220.0 / 1403.0) -
                                                        ((27.0 / 1403.0) - p3 *
                                                         (6300.0 / 1403.0)) *
                                                        (bt[q] / at[q]))
        b[q] = a[q] * (bt[q] / at[q])

        #--------------------------------------------
        # calculate post-adaptation values
        rpa = (460.0 * p2 + 451.0 * a + 288.0 * b) / 1403.0
        gpa = (460.0 * p2 - 891.0 * a - 261.0 * b) / 1403.0
        bpa = (460.0 * p2 - 220.0 * a - 6300.0 * b) / 1403.0

        #--------------------------------------------
        # join values:
        rgbpa = ajoin((rpa, gpa, bpa))

        #--------------------------------------------
        # decompress signals:
        rgbp = (100.0 / FL) * NK(rgbpa, forward)

        # apply yellow-blue correction:
        if (yellowbluepurplecorrect == 'brill-suss'
            ):  # Brill & Susstrunck approach, for purple line problem
            p = np.where(rgbp < 0.0)
            rgbp[p] = 0.0

        #--------------------------------------------
        # convert from to cone sensors (hpe) cat02 sensor space:
        rgbc = math.dot23(mcat_x_invmhpe, rgbp.T)

        #--------------------------------------------
        # apply inverse von Kries cat:
        rgb = rgbc / ((D * Yw / rgbw)[..., None] + (1.0 - D))

        #--------------------------------------------
        # transform from cat sensor space to xyz:
        xyz = math.dot23(invmcat, rgb)

        #--------------------------------------------
        # unnormalize xyz:
        xyz = ((yw / Yw)[..., None] * xyz).T

        return xyz
Exemplo n.º 18
0
def _get_hue_map(hbins = 16, start_hue = 0.0, 
                 hbinnrs = None, xyzri = None, xyzrw = None, cri_type = None):
    """
    Generate color map for hue bins.
    
    Args:
        :hbins:
            | 16 or ndarray with sorted hue bin centers (°), optional
        :start_hue:
            | 0.0, optional
        :hbinnrs: 
            | None, optional
            | ndarray with hue bin number of each sample.
            | If hbinnrs, xyzri, xyzrw and cri_type are all not-None: 
            |    use these to calculate color map, otherwise just use number of
            |    hue bins :hbins: and :start_hue:
        :xyzri:
            | None, optional
            | relative xyz tristimulus values of samples under ref. illuminant.
            | see :hbinnrs: for more info when this is used.
        :xyzrw:
            | None, optional
            | relative xyz tristimulus values of ref. illuminant white point.
            | see :hbinnrs: for more info when this is used.
        :cri_type:
            | None, optional
            | Specifies dict with default cri model parameters 
            | (needed to get correct :cieobs:) 
            | see :hbinnrs: for more info when this is used.
    
    Returns:
        :cmap:
            | list with rgb values (one for each hue bin) for plotting.
    """
    # Setup hbincenters and hsv_hues:
    if isinstance(hbins,float) | isinstance(hbins,int):
        nhbins = hbins
        dhbins = 360/(nhbins) # hue bin width
        hbincenters = np.arange(start_hue + dhbins/2, 360, dhbins)
        hbincenters = np.sort(hbincenters)
    else:
        hbincenters = hbins
        idx = np.argsort(hbincenters)
        hbincenters = hbincenters[idx]
        nhbins = hbincenters.shape[0]
    
    cmap = []
    if (hbinnrs is not None) & (xyzri is not None) & (xyzrw is not None) & (cri_type is not None):
        xyzw = spd_to_xyz(_CIE_D65, relative = True, cieobs = cri_type['cieobs']['xyz'])
        xyzri = cat.apply(xyzri[:,0,:],xyzw1 = xyzrw, xyzw2 = xyzw)
                
        # Create color from xyz average:
        for i in range(nhbins):
            xyzrhi = xyzri[hbinnrs[:,0] == i,:].mean(axis=0,keepdims=True)
            rgbrhi = xyz_to_srgb(xyzrhi)/255
            cmap.append(rgbrhi)
    else:
        # Create color from hue angle:
            
        # Setup color for plotting hue bins:
        hbincenters = hbincenters*np.pi/180
        hsv_hues = hbincenters - 30*np.pi/180
        hsv_hues = hsv_hues/hsv_hues.max()
            
        for i in range(nhbins):   
            #c = np.abs(np.array(colorsys.hsv_to_rgb(hsv_hues[i], 0.75, 0.85)))
            c = np.abs(np.array(colorsys.hls_to_rgb(hsv_hues[i], 0.45, 0.5)))
            cmap.append(c)
    
    return cmap
Exemplo n.º 19
0
def apply_poly_model_at_hue_x(poly_model, pmodel, dCHoverC_res, \
                              hx = None, Cxr = 40, sig = _VF_SIG):
    """
    Applies base color shift model at (hue,chroma) coordinates
    
    Args:
        :poly_model: 
            | function handle to model
        :pmodel:
            | ndarray with model parameters.
        :dCHoverC_res:
            | ndarray with residuals between 'dCoverC,dH' of samples 
            | and 'dCoverC,dH' predicted by the model.
            | Note: dCoverC = (Ct - Cr)/Cr and dH = ht - hr 
            |      (predicted from model, see notes luxpy.cri.get_poly_model())
        :hx:
            | None or ndarray, optional
            | None defaults to np.arange(np.pi/10.0,2*np.pi,2*np.pi/10.0)
        :Cxr:
            | 40, optional
        :sig: 
            | _VF_SIG or float, optional
            | Determines smooth transition between hue-bin-boundaries (no hard 
            | cutoff at hue bin boundary).
        
    Returns:
        :returns: 
            | ndarrays with dCoverC_x, dCoverC_x_sig, dH_x, dH_x_sig
            | Note '_sig' denotes the uncertainty: 
            |     e.g.  dH_x_sig is the uncertainty of dH at input (hue/chroma).
    """

    if hx is None:
        dh = 2 * np.pi / 10.0
        hx = np.arange(
            dh / 2, 2 * np.pi, dh
        )  #hue angles at which to apply model, i.e. calculate 'average' measures

    # A calculate reference coordinates:
    axr = Cxr * np.cos(hx)
    bxr = Cxr * np.sin(hx)

    # B apply model at reference coordinates to obtain test coordinates:
    axt, bxt, Cxt, hxt, axr, bxr, Cxr, hxr = apply_poly_model_at_x(
        poly_model, pmodel, axr, bxr)

    # C Calculate dC/C, dH for test and ref at fixed hues:
    dCoverC_x = (Cxt - Cxr) / (np.hstack((Cxr + Cxt)).max())
    dH_x = (180 / np.pi) * (hxt - hxr)
    #    dCoverC_x = np.round(dCoverC_x,decimals = 2)
    #    dH_x = np.round(dH_x,decimals = 0)

    # D calculate 'average' noise measures using sig-value:
    href = dCHoverC_res[:, 0:1]
    dCoverC_res = dCHoverC_res[:, 1:2]
    dHoverC_res = dCHoverC_res[:, 2:3]
    dHsigi = np.exp((np.dstack(
        (np.abs(hx - href), np.abs((hx - href - 2 * np.pi)),
         np.abs(hx - href - 2 * np.pi))).min(axis=2)**2) / (-2) / sig)
    dH_x_sig = (180 / np.pi) * (np.sqrt(
        (dHsigi * (dHoverC_res**2)).sum(axis=0, keepdims=True) /
        dHsigi.sum(axis=0, keepdims=True)))
    #dH_x_sig_avg = np.sqrt(np.sum(dH_x_sig**2,axis=1)/hx.shape[0])
    dCoverC_x_sig = (np.sqrt(
        (dHsigi * (dCoverC_res**2)).sum(axis=0, keepdims=True) /
        dHsigi.sum(axis=0, keepdims=True)))
    #dCoverC_x_sig_avg = np.sqrt(np.sum(dCoverC_x_sig**2,axis=1)/hx.shape[0])

    return dCoverC_x, dCoverC_x_sig, dH_x, dH_x_sig
Exemplo n.º 20
0
def plot_tm30_Rhshj(spd, cri_type = 'ies-tm30', axh = None, 
                    xlabel = True, y_offset = 0, 
                    font_size = _TM30_FONT_SIZE, **kwargs):
    """
    Plot Local Hue Shift values (Rhshj) (one for each hue-bin).
    
    Args:
        :spd:
            | ndarray or dict
            | If ndarray: single spectral power distribution.
            | If dict: dictionary with pre-computed parameters (using _tm30_process_spd()).
            |  required keys:
            |   'Rf','Rg','cct','duv','Sr','cri_type','xyzri','xyzrw',
            |   'hbinnrs','Rfi','Rfhi','Rcshi','Rhshi',
            |   'jabt_binned','jabr_binned',
            |   'nhbins','start_hue','normalize_gamut','normalized_chroma_ref'
            | see cri.spd_to_cri() for more info on parameters.
        :cri_type:
            | _CRI_TYPE_DEFAULT or str or dict, optional
            |   -'str: specifies dict with default cri model parameters 
            |     (for supported types, see luxpy.cri._CRI_DEFAULTS['cri_types'])
            |   - dict: user defined model parameters 
            |     (see e.g. luxpy.cri._CRI_DEFAULTS['cierf'] 
            |     for required structure)
            | Note that any non-None input arguments (in kwargs) 
            | to the function will override default values in cri_type dict.
        :axh: 
            | None, optional
            | If None: create new figure with single axes, else plot on specified axes. 
        :xlabel:
            | True, optional
            | If False: don't add label and numbers to x-axis 
            | (useful when plotting plotting all 'Local Rfhi, Rcshi, Rshhi' 
            |  values in 3x1 subplots with 'shared x-axis': saves vertical space)
        :y_offset:
            | 0, optional
            | text-offset from top of bars in barplot.
        :font_size:
            | _TM30_FONT_SIZE, optional
            | Font size of text, axis labels and axis values.
        :kwargs:
            | Additional optional keyword arguments, 
            | the same as in cri.spd_to_cri()
            
    Returns:
        :axh: 
            | handle to figure axes.
        :data:
            | dictionary with required parameters for plotting functions.     
    """

    
    data = _tm30_process_spd(spd, cri_type = 'ies-tm30',**kwargs)
    Rhshi = data['Rhshi']

    # Get color map based on sample colors:
    cmap = _get_hue_map(hbins = data['nhbins'], start_hue = data['start_hue'], 
                        hbinnrs = data['hbinnrs'], 
                        xyzri = data['xyzri'], 
                        xyzrw = data['xyzrw'], 
                        cri_type = data['cri_type'])
    
    # Plot local hue shift, Rhshi:
    hbins = range(data['nhbins'])
    if axh is None:
        fig, axh = plt.subplots(nrows = 1, ncols = 1)
    for j in hbins:
        axh.bar(hbins[j],Rhshi[j,0], color = cmap[j], width = 1,edgecolor = 'k', alpha = 1)
        ypos = ((np.abs(Rhshi[j,0]) + 0.05 + y_offset))*np.sign(Rhshi[j,0])
        axh.text(hbins[j],ypos, '{:1.2f}'.format(Rhshi[j,0]) ,fontsize = font_size,horizontalalignment='center',verticalalignment='center',color = np.array([1,1,1])*0.3, rotation = 90)
    
    xticks = np.array(hbins)
    axh.set_xticks(xticks)
    if xlabel == True:
        xtickslabels = ['{:1.0f}'.format(ii+1) for ii in hbins]
        axh.set_xlabel('Hue-Angle Bin (j)', fontsize = font_size)
    else:
        xtickslabels = [''.format(ii+1) for ii in hbins]
    axh.set_xticklabels(xtickslabels, fontsize = font_size)
    axh.set_xlim([-0.5,data['nhbins']-0.5])
    
    axh.set_ylabel(r'Local Hue Shift $(R_{hs,hj})$', fontsize = 9)
    axh.set_ylim([min([-0.5,Rhshi.min()]),max([0.5,Rhshi.max()])])
    
    return axh, data
Exemplo n.º 21
0
import copy

from luxpy import (math, spd_to_xyz, xyz_to_cct, getwld, getwlr, _CMF,
                   blackbody, daylightphase, _CRI_RFL, _CRI_REF_TYPES,
                   _CRI_REF_TYPE, _CIEOBS, xyzbar, cie_interp)
from luxpy.utils import np, plt
from luxpy.color.cri.utils.DE_scalers import log_scale
from luxpy.color.cri.utils.helpers import _get_hue_bin_data

__all__ = ['_cri_ref', '_xyz_to_jab_cam02ucs',
           'spd_to_tm30']  # new or redefined

_DL = 1
_WL3 = [360, 830, _DL]
_WL = getwlr(_WL3)
_POS_WL560 = np.where(np.abs(_WL - 560.0) == np.min(np.abs(_WL - 560.0)))[0]
_TM30_SAMPLE_SET = _CRI_RFL['ies-tm30-18']['99']['{:1.0f}nm'.format(_DL)]


def _cri_ref_i(cct,
               wl3=_WL,
               ref_type='iestm30',
               mix_range=[4000, 5000],
               cieobs='1931_2',
               force_daylight_below4000K=False,
               n=None,
               daylight_locus=None):
    """
    Calculates a reference illuminant spectrum based on cct 
    for color rendering index calculations.
    """
Exemplo n.º 22
0
def apply(data, n_step = 2, catmode = None, cattype = 'vonkries', xyzw1 = None, xyzw2 = None, xyzw0 = None,\
          D = None, mcat = [_MCAT_DEFAULT], normxyz0 = None, outtype = 'xyz', La = None, F = None, Dtype = None):
    """
    Calculate corresponding colors by applying a von Kries chromatic adaptation
    transform (CAT), i.e. independent rescaling of 'sensor sensitivity' to data
    to adapt from current adaptation conditions (1) to the new conditions (2).
    
    Args:
        :data: 
            | ndarray of tristimulus values (can be NxMx3)
        :n_step:
            | 2, optional
            | Number of step in CAT (1: 1-step, 2: 2-step)
        :catmode: 
            | None, optional
            |    - None: use :n_step: to set mode: 1 = '1>2', 2:'1>0>2'
            |    -'1>0>2': Two-step CAT 
            |      from illuminant 1 to baseline illuminant 0 to illuminant 2.
            |    -'1>2': One-step CAT
            |      from illuminant 1 to illuminant 2.
            |    -'1>0': One-step CAT 
            |      from illuminant 1 to baseline illuminant 0.
            |    -'0>2': One-step CAT 
            |      from baseline illuminant 0 to illuminant 2. 
        :cattype: 
            | 'vonkries' (others: 'rlab', see Farchild 1990), optional
        :xyzw1:
            | None, depending on :catmode: optional (can be Mx3)
        :xyzw2:
            | None, depending on :catmode: optional (can be Mx3)
        :xyzw0:
            | None, depending on :catmode: optional (can be Mx3)
        :D: 
            | None, optional
            | Degrees of adaptation. Defaults to [1.0, 1.0]. 
        :La: 
            | None, optional
            | Adapting luminances. 
            | If None: xyz values are absolute or relative.
            | If not None: xyz are relative. 
        :F: 
            | None, optional
            | Surround parameter(s) for CAT02/CAT16 calculations 
            |  (:Dtype: == 'cat02' or 'cat16')
            | Defaults to [1.0, 1.0]. 
        :Dtype:
            | None, optional
            | Type of degree of adaptation function from literature
            | See luxpy.cat.get_degree_of_adaptation()
        :mcat:
            | [_MCAT_DEFAULT], optional
            | List[str] or List[ndarray] of sensor space matrices for each 
            |  condition pair. If len(:mcat:) == 1, the same matrix is used.
        :normxyz0: 
            | None, optional
            | Set of xyz tristimulus values to normalize the sensor space matrix to.
        :outtype:
            | 'xyz' or 'lms', optional
            |   - 'xyz': return corresponding tristimulus values 
            |   - 'lms': return corresponding sensor space excitation values 
            |            (e.g. for further calculations) 
      
    Returns:
          :returns: 
              | ndarray with corresponding colors
        
    Reference:
        1. `Smet, K. A. G., & Ma, S. (2020). 
        Some concerns regarding the CAT16 chromatic adaptation transform. 
        Color Research & Application, 45(1), 172–177. 
        <https://doi.org/10.1002/col.22457>`_
    """

    if (xyzw1 is None) & (xyzw2 is None):
        return data  # do nothing

    else:
        # Set catmode:
        if catmode is None:
            if n_step == 2:
                catmode = '1>0>2'
            elif n_step == 1:
                catmode = '1>2'
            else:
                raise Exception(
                    'cat.apply(n_step = {:1.0f}, catmode = None): Unknown requested n-step CAT mode !'
                    .format(n_step))

        # Make data 2d:
        data = np2d(data)
        data_original_shape = data.shape
        if data.ndim < 3:
            target_shape = np.hstack((1, data.shape))
            data = data * np.ones(target_shape)
        else:
            target_shape = data.shape

        target_shape = data.shape

        # initialize xyzw0:
        if (xyzw0 is None):  # set to iLL.E
            xyzw0 = np2d([100.0, 100.0, 100.0])
        xyzw0 = np.ones(target_shape) * xyzw0
        La0 = xyzw0[..., 1, None]

        # Determine cat-type (1-step or 2-step) + make input same shape as data for block calculations:
        expansion_axis = np.abs(1 * (len(data_original_shape) == 2) - 1)
        if ((xyzw1 is not None) & (xyzw2 is not None)):
            xyzw1 = xyzw1 * np.ones(target_shape)
            xyzw2 = xyzw2 * np.ones(target_shape)
            default_La12 = [xyzw1[..., 1, None], xyzw2[..., 1, None]]

        elif (xyzw2 is None) & (xyzw1
                                is not None):  # apply one-step CAT: 1-->0
            catmode = '1>0'  #override catmode input
            xyzw1 = xyzw1 * np.ones(target_shape)
            default_La12 = [xyzw1[..., 1, None], La0]

        elif (xyzw1 is None) & (xyzw2 is not None):
            raise Exception(
                "von_kries(): cat transformation '0>2' not supported, use '1>0' !"
            )

        # Get or set La (La == None: xyz are absolute or relative, La != None: xyz are relative):
        target_shape_1 = tuple(np.hstack((target_shape[:-1], 1)))
        La1, La2 = parse_x1x2_parameters(La,
                                         target_shape=target_shape_1,
                                         catmode=catmode,
                                         expand_2d_to_3d=expansion_axis,
                                         default=default_La12)

        # Set degrees of adaptation, D10, D20:  (note D20 is degree of adaptation for 2-->0!!)
        D10, D20 = parse_x1x2_parameters(D,
                                         target_shape=target_shape_1,
                                         catmode=catmode,
                                         expand_2d_to_3d=expansion_axis)

        # Set F surround in case of Dtype == 'cat02':
        F1, F2 = parse_x1x2_parameters(F,
                                       target_shape=target_shape_1,
                                       catmode=catmode,
                                       expand_2d_to_3d=expansion_axis)

        # Make xyz relative to go to relative xyz0:
        if La is None:
            data = 100 * data / La1
            xyzw1 = 100 * xyzw1 / La1
            xyzw0 = 100 * xyzw0 / La0
            if (catmode == '1>0>2') | (catmode == '1>2'):
                xyzw2 = 100 * xyzw2 / La2

        # transform data (xyz) to sensor space (lms) and perform cat:
        xyzc = np.zeros(data.shape)
        xyzc.fill(np.nan)
        mcat = np.array(mcat)
        if (mcat.shape[0] != data.shape[1]) & (mcat.shape[0] == 1):
            mcat = np.repeat(mcat, data.shape[1], axis=0)
        elif (mcat.shape[0] != data.shape[1]) & (mcat.shape[0] > 1):
            raise Exception(
                'von_kries(): mcat.shape[0] > 1 and does not match data.shape[0]!'
            )

        for i in range(xyzc.shape[1]):
            # get cat sensor matrix:
            if mcat[i].dtype == np.float64:
                mcati = mcat[i]
            else:
                mcati = _MCATS[mcat[i]]

            # normalize sensor matrix:
            if normxyz0 is not None:
                mcati = math.normalize_3x3_matrix(mcati, xyz0=normxyz0)

            # convert from xyz to lms:
            lms = np.dot(mcati, data[:, i].T).T
            lmsw0 = np.dot(mcati, xyzw0[:, i].T).T
            if (catmode == '1>0>2') | (catmode == '1>0'):
                lmsw1 = np.dot(mcati, xyzw1[:, i].T).T
                Dpar1 = dict(D=D10[:, i],
                             F=F1[:, i],
                             La=La1[:, i],
                             La0=La0[:, i],
                             order='1>0')
                D10[:, i] = get_degree_of_adaptation(
                    Dtype=Dtype,
                    **Dpar1)  #get degree of adaptation depending on Dtype
                lmsw2 = None  # in case of '1>0'

            if (catmode == '1>0>2'):
                lmsw2 = np.dot(mcati, xyzw2[:, i].T).T
                Dpar2 = dict(D=D20[:, i],
                             F=F2[:, i],
                             La=La2[:, i],
                             La0=La0[:, i],
                             order='0>2')

                D20[:, i] = get_degree_of_adaptation(
                    Dtype=Dtype,
                    **Dpar2)  #get degree of adaptation depending on Dtype

            if (catmode == '1>2'):
                lmsw1 = np.dot(mcati, xyzw1[:, i].T).T
                lmsw2 = np.dot(mcati, xyzw2[:, i].T).T
                Dpar12 = dict(D=D10[:, i],
                              F=F1[:, i],
                              La=La1[:, i],
                              La2=La2[:, i],
                              order='1>2')
                D10[:, i] = get_degree_of_adaptation(
                    Dtype=Dtype,
                    **Dpar12)  #get degree of adaptation depending on Dtype

            # Determine transfer function Dt:
            Dt = get_transfer_function(cattype=cattype,
                                       catmode=catmode,
                                       lmsw1=lmsw1,
                                       lmsw2=lmsw2,
                                       lmsw0=lmsw0,
                                       D10=D10[:, i],
                                       D20=D20[:, i],
                                       La1=La1[:, i],
                                       La2=La2[:, i])

            # Perform cat:
            lms = np.dot(np.diagflat(Dt[0]), lms.T).T

            # Make xyz, lms 'absolute' again:
            if (catmode == '1>0>2'):
                lms = (La2[:, i] / La1[:, i]) * lms
            elif (catmode == '1>0'):
                lms = (La0[:, i] / La1[:, i]) * lms
            elif (catmode == '1>2'):
                lms = (La2[:, i] / La1[:, i]) * lms

            # transform back from sensor space to xyz (or not):
            if outtype == 'xyz':
                xyzci = np.dot(np.linalg.inv(mcati), lms.T).T
                xyzci[np.where(xyzci < 0)] = _EPS
                xyzc[:, i] = xyzci
            else:
                xyzc[:, i] = lms

        # return data to original shape:
        if len(data_original_shape) == 2:
            xyzc = xyzc[0]

        return xyzc
Exemplo n.º 23
0
def _polyarea(x, y):
    return 0.5 * np.abs(
        np.dot(x, np.roll(y, 1, axis=0)) - np.dot(y, np.roll(x, 1, axis=0)))
Exemplo n.º 24
0
def Ydlep_to_xyz(Ydlep,
                 cieobs=_CIEOBS,
                 xyzw=_COLORTF_DEFAULT_WHITE_POINT,
                 flip_axes=False,
                 SL_max_lambda=None,
                 **kwargs):
    """
    Convert Y, dominant (complementary) wavelength and excitation purity to XYZ
    tristimulus values.

    Args:
        :Ydlep: 
            | ndarray with Y, dominant (complementary) wavelength
              and excitation purity
        :xyzw: 
            | None or narray with tristimulus values of a single (!) native white point, optional
            | None defaults to xyz of CIE D65 using the :cieobs: observer.
        :cieobs:
            | luxpy._CIEOBS, optional
            | CMF set to use when calculating spectrum locus coordinates.
        :flip_axes:
            | False, optional
            | If True: flip axis 0 and axis 1 in Ydelep to increase speed of loop in function.
            |          (single xyzw with is not flipped!)
        :SL_max_lambda:
            | None or float, optional
            | Maximum wavelength of spectrum locus before it turns back on itelf in the high wavelength range (~700 nm)

    Returns:
        :xyz: 
            | ndarray with tristimulus values
    """

    Ydlep3 = np3d(Ydlep).copy().astype(np.float)

    # flip axis so that longest dim is on first axis  (save time in looping):
    if (Ydlep3.shape[0] < Ydlep3.shape[1]) & (flip_axes == True):
        axes12flipped = True
        Ydlep3 = Ydlep3.transpose((1, 0, 2))
    else:
        axes12flipped = False

    # convert xyzw to Yxyw:
    Yxyw = xyz_to_Yxy(xyzw)
    Yxywo = Yxyw.copy()

    # get spectrum locus Y,x,y and wavelengths:
    SL = _CMF[cieobs]['bar']
    SL = SL[:, SL[1:].sum(axis=0) >
            0]  # avoid div by zero in xyz-to-Yxy conversion
    wlsl = SL[0, None].T
    Yxysl = xyz_to_Yxy(SL[1:4].T)[:, None]

    # Get maximum wavelength of spectrum locus (before it turns back on itself)
    if SL_max_lambda is None:
        pmaxlambda = Yxysl[..., 1].argmax()  # lambda with largest x value
        dwl = np.diff(
            Yxysl[:, 0,
                  1])  # spectrumlocus in that range should have increasing x
        dwl[wlsl[:-1, 0] < 600] = 10000
        pmaxlambda = np.where(
            dwl <= 0)[0][0]  # Take first element with zero or <zero slope
    else:
        pmaxlambda = np.abs(wlsl - SL_max_lambda).argmin()
    Yxysl = Yxysl[:(pmaxlambda + 1), :]
    wlsl = wlsl[:(pmaxlambda + 1), :1]

    # center on xyzw:
    Yxysl = Yxysl - Yxyw
    Yxyw = Yxyw - Yxyw

    #split:
    Y, dom, pur = asplit(Ydlep3)
    Yw, xw, yw = asplit(Yxyw)
    Ywo, xwo, ywo = asplit(Yxywo)
    Ysl, xsl, ysl = asplit(Yxysl)

    # loop over longest dim:
    x = np.empty(Y.shape)
    y = np.empty(Y.shape)
    for i in range(Ydlep3.shape[1]):

        # find closest wl's to dom:
        #wlslb,wlib = meshblock(wlsl,np.abs(dom[i,:])) #abs because dom<0--> complemtary wl
        wlib, wlslb = np.meshgrid(np.abs(dom[:, i]), wlsl)

        dwl = wlslb - wlib
        q1 = np.abs(dwl).argmin(axis=0)  # index of closest wl
        sign_q1 = np.sign(dwl[q1])
        dwl[np.sign(dwl) ==
            sign_q1] = 1000000  # set all dwl on the same side as q1 to a very large value
        q2 = np.abs(dwl).argmin(
            axis=0)  # index of second closest (enclosing) wl

        # calculate x,y of dom:
        x_dom_wl = xsl[q1, 0] + (xsl[q2, 0] - xsl[q1, 0]) * (
            np.abs(dom[:, i]) - wlsl[q1, 0]) / (wlsl[q2, 0] - wlsl[q1, 0]
                                                )  # calculate x of dom. wl
        y_dom_wl = ysl[q1, 0] + (ysl[q2, 0] - ysl[q1, 0]) * (
            np.abs(dom[:, i]) - wlsl[q1, 0]) / (wlsl[q2, 0] - wlsl[q1, 0]
                                                )  # calculate y of dom. wl

        # calculate x,y of test:
        d_wl = (x_dom_wl**2.0 +
                y_dom_wl**2.0)**0.5  # distance from white point to dom
        d = pur[:, i] * d_wl
        hdom = math.positive_arctan(x_dom_wl, y_dom_wl, htype='deg')
        x[:, i] = d * np.cos(hdom * np.pi / 180.0)
        y[:, i] = d * np.sin(hdom * np.pi / 180.0)

        # complementary:
        pc = np.where(dom[:, i] < 0.0)
        hdom[pc] = hdom[pc] - np.sign(dom[:, i][pc] -
                                      180.0) * 180.0  # get positive hue angle

        # calculate intersection of line through white point and test point and purple line:
        xy = np.vstack((x_dom_wl, y_dom_wl)).T
        xyw = np.vstack((xw, yw)).T
        xypl1 = np.vstack((xsl[0, None], ysl[0, None])).T
        xypl2 = np.vstack((xsl[-1, None], ysl[-1, None])).T
        da = (xy - xyw)
        db = (xypl2 - xypl1)
        dp = (xyw - xypl1)
        T = np.array([[0.0, -1.0], [1.0, 0.0]])
        dap = np.dot(da, T)
        denom = np.sum(dap * db, axis=1, keepdims=True)
        num = np.sum(dap * dp, axis=1, keepdims=True)
        xy_linecross = (num / denom) * db + xypl1
        d_linecross = np.atleast_2d(
            (xy_linecross[:, 0]**2.0 + xy_linecross[:, 1]**2.0)**0.5).T[:, 0]
        x[:, i][pc] = pur[:, i][pc] * d_linecross[pc] * np.cos(
            hdom[pc] * np.pi / 180)
        y[:, i][pc] = pur[:, i][pc] * d_linecross[pc] * np.sin(
            hdom[pc] * np.pi / 180)
    Yxy = np.dstack((Ydlep3[:, :, 0], x + xwo, y + ywo))
    if axes12flipped == True:
        Yxy = Yxy.transpose((1, 0, 2))
    else:
        Yxy = Yxy.transpose((0, 1, 2))
    return Yxy_to_xyz(Yxy).reshape(Ydlep.shape)
Exemplo n.º 25
0
def ipt_to_xyz(ipt, cieobs=_CIEOBS, xyzw=None, M=None, **kwargs):
    """
    Convert XYZ tristimulus values to IPT color coordinates.

    | I: Lightness axis, P, red-green axis, T: yellow-blue axis.

    Args:
        :ipt: 
            | ndarray with IPT color coordinates
        :xyzw:
            | None or ndarray with tristimulus values of white point, optional
            | None defaults to xyz of CIE D65 using the :cieobs: observer.
        :cieobs:
            | luxpy._CIEOBS, optional
            | CMF set to use when calculating xyzw for rescaling Mxyz2lms
            | (only when not None).
        :M: | None, optional
            | None defaults to xyz to lms conversion matrix determined by:cieobs:

    Returns:
        :xyz: 
            | ndarray with tristimulus values

    Note:
        :xyz: is assumed to be under D65 viewing conditions! If necessary perform chromatic adaptation !

    Reference:
        1. `Ebner F, and Fairchild MD (1998).
           Development and testing of a color space (IPT) with improved hue uniformity.
           In IS&T 6th Color Imaging Conference, (Scottsdale, Arizona, USA), pp. 8–13.
           <http://www.ingentaconnect.com/content/ist/cic/1998/00001998/00000001/art00003?crawler=true>`_
    """
    ipt = np2d(ipt)

    # get M to convert xyz to lms and apply normalization to matrix or input your own:
    if M is None:
        M = _IPT_M['xyz2lms'][cieobs].copy(
        )  # matrix conversions from xyz to lms
        if xyzw is None:
            xyzw = spd_to_xyz(_CIE_ILLUMINANTS['D65'], cieobs=cieobs,
                              out=1) / 100.0
        else:
            xyzw = xyzw / 100.0
        M = math.normalize_3x3_matrix(M, xyzw)

    # convert from ipt to lms':
    if len(ipt.shape) == 3:
        lmsp = np.einsum('ij,klj->kli', np.linalg.inv(_IPT_M['lms2ipt']), ipt)
    else:
        lmsp = np.einsum('ij,lj->li', np.linalg.inv(_IPT_M['lms2ipt']), ipt)

    # reverse response compression: lms' to lms
    lms = lmsp**(1.0 / 0.43)
    p = np.where(lmsp < 0.0)
    lms[p] = -np.abs(lmsp[p])**(1.0 / 0.43)

    # convert from lms to xyz:
    if np.ndim(M) == 2:
        if len(ipt.shape) == 3:
            xyz = np.einsum('ij,klj->kli', np.linalg.inv(M), lms)
        else:
            xyz = np.einsum('ij,lj->li', np.linalg.inv(M), lms)
    else:
        if len(
                ipt.shape
        ) == 3:  # second dim of lms must match dim of 1st of M and 1st dim of xyzw
            xyz = np.concatenate([
                np.einsum('ij,klj->kli', np.linalg.inv(M[i]),
                          lms[:, i:i + 1, :]) for i in range(M.shape[0])
            ],
                                 axis=1)
        else:  # first dim of lms must match dim of 1st of M and 1st dim of xyzw
            xyz = np.concatenate([
                np.einsum('ij,lj->li', np.linalg.inv(M[i]), lms[i:i + 1, :])
                for i in range(M.shape[0])
            ],
                                 axis=0)

    #xyz = np.dot(np.linalg.inv(M),lms.T).T
    xyz = xyz * 100.0
    xyz[np.where(xyz < 0.0)] = 0.0

    return xyz
Exemplo n.º 26
0
def xyz_to_Ydlep_(xyz,
                  cieobs=_CIEOBS,
                  xyzw=_COLORTF_DEFAULT_WHITE_POINT,
                  flip_axes=False,
                  **kwargs):
    """
    Convert XYZ tristimulus values to Y, dominant (complementary) wavelength
    and excitation purity.

    Args:
        :xyz:
            | ndarray with tristimulus values
        :xyzw:
            | None or ndarray with tristimulus values of a single (!) native white point, optional
            | None defaults to xyz of CIE D65 using the :cieobs: observer.
        :cieobs:
            | luxpy._CIEOBS, optional
            | CMF set to use when calculating spectrum locus coordinates.
        :flip_axes:
            | False, optional
            | If True: flip axis 0 and axis 1 in Ydelep to increase speed of loop in function.
            |          (single xyzw with is not flipped!)
    Returns:
        :Ydlep: 
            | ndarray with Y, dominant (complementary) wavelength
            |  and excitation purity
    """

    xyz3 = np3d(xyz).copy().astype(np.float)

    # flip axis so that shortest dim is on axis0 (save time in looping):
    if (xyz3.shape[0] < xyz3.shape[1]) & (flip_axes == True):
        axes12flipped = True
        xyz3 = xyz3.transpose((1, 0, 2))
    else:
        axes12flipped = False

    # convert xyz to Yxy:
    Yxy = xyz_to_Yxy(xyz3)
    Yxyw = xyz_to_Yxy(xyzw)

    # get spectrum locus Y,x,y and wavelengths:
    SL = _CMF[cieobs]['bar']
    SL = SL[:, SL[1:].sum(axis=0) >
            0]  # avoid div by zero in xyz-to-Yxy conversion
    wlsl = SL[0]
    Yxysl = xyz_to_Yxy(SL[1:4].T)[:, None]
    pmaxlambda = Yxysl[..., 1].argmax()
    maxlambda = wlsl[pmaxlambda]
    maxlambda = 700
    print(np.where(wlsl == maxlambda))
    pmaxlambda = np.where(wlsl == maxlambda)[0][0]
    Yxysl = Yxysl[:(pmaxlambda + 1), :]
    wlsl = wlsl[:(pmaxlambda + 1)]

    # center on xyzw:
    Yxy = Yxy - Yxyw
    Yxysl = Yxysl - Yxyw
    Yxyw = Yxyw - Yxyw

    #split:
    Y, x, y = asplit(Yxy)
    Yw, xw, yw = asplit(Yxyw)
    Ysl, xsl, ysl = asplit(Yxysl)

    # calculate hue:
    h = math.positive_arctan(x, y, htype='deg')
    print(h)
    print('rh', h[0, 0] - h[0, 1])
    print(wlsl[0], wlsl[-1])

    hsl = math.positive_arctan(xsl, ysl, htype='deg')

    hsl_max = hsl[0]  # max hue angle at min wavelength
    hsl_min = hsl[-1]  # min hue angle at max wavelength
    if hsl_min < hsl_max: hsl_min += 360

    dominantwavelength = np.empty(Y.shape)
    purity = np.empty(Y.shape)
    print('xyz:', xyz)
    for i in range(xyz3.shape[1]):
        print('\ni:', i, h[:, i], hsl_max, hsl_min)
        print(h)
        # find index of complementary wavelengths/hues:
        pc = np.where(
            (h[:, i] > hsl_max) & (h[:, i] < hsl_min)
        )  # hue's requiring complementary wavelength (purple line)
        print('pc', (h[:, i] > hsl_max) & (h[:, i] < hsl_min))
        h[:, i][pc] = h[:, i][pc] - np.sign(
            h[:, i][pc] - 180.0
        ) * 180.0  # add/subtract 180° to get positive complementary wavelength

        # find 2 closest hues in sl:
        #hslb,hib = meshblock(hsl,h[:,i:i+1])
        hib, hslb = np.meshgrid(h[:, i:i + 1], hsl)
        dh = np.abs(hslb - hib)
        q1 = dh.argmin(axis=0)  # index of closest hue
        dh[q1] = 1000000.0
        q2 = dh.argmin(axis=0)  # index of second closest hue
        print('q1q2', q2, q1)

        print('wls:', h[:, i], wlsl[q1], wlsl[q2])
        print('hsls:', hsl[q2, 0], hsl[q1, 0])
        print('d', (wlsl[q2] - wlsl[q1]), (hsl[q2, 0] - hsl[q1, 0]),
              (wlsl[q2] - wlsl[q1]) / (hsl[q2, 0] - hsl[q1, 0]))
        print('(h[:,i] - hsl[q1,0])', (h[:, i] - hsl[q1, 0]))
        print('div', np.divide((wlsl[q2] - wlsl[q1]),
                               (hsl[q2, 0] - hsl[q1, 0])))
        print(
            'mult(...)',
            np.multiply((h[:, i] - hsl[q1, 0]),
                        np.divide((wlsl[q2] - wlsl[q1]),
                                  (hsl[q2, 0] - hsl[q1, 0]))))
        dominantwavelength[:, i] = wlsl[q1] + np.multiply(
            (h[:, i] - hsl[q1, 0]),
            np.divide((wlsl[q2] - wlsl[q1]), (hsl[q2, 0] - hsl[q1, 0]))
        )  # calculate wl corresponding to h: y = y1 + (x-x1)*(y2-y1)/(x2-x1)
        print('dom', dominantwavelength[:, i])
        dominantwavelength[(dominantwavelength[:,
                                               i] > max(wlsl[q1], wlsl[q2])),
                           i] = max(wlsl[q1], wlsl[q2])
        dominantwavelength[(dominantwavelength[:,
                                               i] < min(wlsl[q1], wlsl[q2])),
                           i] = min(wlsl[q1], wlsl[q2])

        dominantwavelength[:, i][pc] = -dominantwavelength[:, i][
            pc]  #complementary wavelengths are specified by '-' sign

        # calculate excitation purity:
        x_dom_wl = xsl[q1, 0] + (xsl[q2, 0] - xsl[q1, 0]) * (h[:, i] - hsl[
            q1, 0]) / (hsl[q2, 0] - hsl[q1, 0])  # calculate x of dom. wl
        y_dom_wl = ysl[q1, 0] + (ysl[q2, 0] - ysl[q1, 0]) * (h[:, i] - hsl[
            q1, 0]) / (hsl[q2, 0] - hsl[q1, 0])  # calculate y of dom. wl
        d_wl = (x_dom_wl**2.0 +
                y_dom_wl**2.0)**0.5  # distance from white point to sl
        d = (x[:, i]**2.0 +
             y[:, i]**2.0)**0.5  # distance from white point to test point
        purity[:, i] = d / d_wl

        # correct for those test points that have a complementary wavelength
        # calculate intersection of line through white point and test point and purple line:
        xy = np.vstack((x[:, i], y[:, i])).T
        xyw = np.hstack((xw, yw))
        xypl1 = np.hstack((xsl[0, None], ysl[0, None]))
        xypl2 = np.hstack((xsl[-1, None], ysl[-1, None]))
        da = (xy - xyw)
        db = (xypl2 - xypl1)
        dp = (xyw - xypl1)
        T = np.array([[0.0, -1.0], [1.0, 0.0]])
        dap = np.dot(da, T)
        denom = np.sum(dap * db, axis=1, keepdims=True)
        num = np.sum(dap * dp, axis=1, keepdims=True)
        xy_linecross = (num / denom) * db + xypl1
        d_linecross = np.atleast_2d(
            (xy_linecross[:, 0]**2.0 + xy_linecross[:, 1]**2.0)**0.5).T  #[0]
        purity[:, i][pc] = d[pc] / d_linecross[pc][:, 0]
    Ydlep = np.dstack((xyz3[:, :, 1], dominantwavelength, purity))

    if axes12flipped == True:
        Ydlep = Ydlep.transpose((1, 0, 2))
    else:
        Ydlep = Ydlep.transpose((0, 1, 2))
    return Ydlep.reshape(xyz.shape)
Exemplo n.º 27
0
def get_pixel_coordinates(jab,
                          jab_ranges=None,
                          jab_deltas=None,
                          limit_grid_radius=0):
    """
    Get pixel coordinates corresponding to array of jab color coordinates.
    
    Args:
        :jab: 
            | ndarray of color coordinates
        :jab_ranges:
            | None or ndarray, optional
            | Specifies the pixelization of color space.
            |    (ndarray.shape = (3,3), with  first axis: J,a,b, and second 
                 axis: min, max, delta)
        :jab_deltas:
            | float or ndarray, optional
            | Specifies the sampling range. 
            | A float uses jab_deltas as the maximum Euclidean distance to select
            | samples around each pixel center. A ndarray of 3 deltas, uses
            | a city block sampling around each pixel center.
        :limit_grid_radius: 
            | 0, optional
            | A value of zeros keeps grid as specified by axr,bxr.
            | A value > 0 only keeps (a,b) coordinates within :limit_grid_radius: 
    
    Returns:
        :returns:
            | gridp, idxp, jabp, samplenrs, samplesIDs
            |   - :gridp: ndarray with coordinates of all pixel centers.
            |   - :idxp: list[int] with pixel index for each non-empty pixel
            |   - :jabp: ndarray with center color coordinates of non-empty pixels
            |   - :samplenrs: list[list[int]] with sample numbers belong to each 
            |                 non-empty pixel
            |   - :sampleIDs: summarizing list, 
            |                 with column order: 'idxp, jabp, samplenrs'
    """
    if jab_deltas is None:
        jab_deltas = np.array([_VF_DELTAR, _VF_DELTAR, _VF_DELTAR])
    if jab_ranges is None:
        jab_ranges = np.vstack(
            ([0, 100, jab_deltas[0]
              ], [-_VF_MAXR, _VF_MAXR + jab_deltas[1], jab_deltas[1]],
             [-_VF_MAXR, _VF_MAXR + jab_deltas[2], jab_deltas[2]]))

    # Get pixel grid:
    gridp = generate_grid(jab_ranges=jab_ranges,
                          limit_grid_radius=limit_grid_radius)

    # determine pixel coordinates of each sample in jab:
    samplesIDs = []
    for idx in range(gridp.shape[0]):

        # get pixel coordinates:
        jp = gridp[idx, 0]
        ap = gridp[idx, 1]
        bp = gridp[idx, 2]
        #Cp = np.sqrt(ap**2+bp**2)

        if type(jab_deltas) == np.ndarray:
            sampleID = np.where(
                ((np.abs(jab[..., 0] - jp) <= jab_deltas[0] / 2) &
                 (np.abs(jab[..., 1] - ap) <= jab_deltas[1] / 2) &
                 (np.abs(jab[..., 2] - bp) <= jab_deltas[2] / 2)))
        else:
            sampleID = np.where(
                (np.sqrt((jab[..., 0] - jp)**2 + (jab[..., 1] - ap)**2 +
                         (jab[..., 2] - bp)**2) <= jab_deltas / 2))

        if (sampleID[0].shape[0] > 0):
            samplesIDs.append(
                np.hstack((idx, np.array([jp, ap, bp]), sampleID[0])))

    idxp = [np.int(samplesIDs[i][0]) for i in range(len(samplesIDs))]
    jabp = np.vstack([samplesIDs[i][1:4] for i in range(len(samplesIDs))])
    samplenrs = [
        np.array(samplesIDs[i][4:], dtype=int).tolist()
        for i in range(len(samplesIDs))
    ]

    return gridp, idxp, jabp, samplenrs, samplesIDs
Exemplo n.º 28
0
def _compute_f_stat(sample_size, num_groups, tri_idxs, distances, group_sizes,
                    s_T, grouping, subjects, paired):
    """Compute PERMANOVA Pseudo-F."""
    s_WG, s_WS, s_WG_V = _compute_s_W_S(sample_size, num_groups, tri_idxs,
                                        distances, group_sizes, grouping,
                                        subjects, paired)

    # for pseudo-F1:
    s_BG = s_T - s_WG  # = s_Effect
    dfBG = (num_groups - 1)

    if (paired == True):
        s_BS = s_T - s_WS
        s_Error = s_WS - s_BG
        dfErr = (num_groups - 1) * (len(np.unique(subjects)) - 1)
        if np.isclose(s_Error, 0, atol=1e-9):
            s_Error = np.abs(s_Error)
        if (s_Error < 0):
            print('WARNING: s_Error = {:1.4f} < 0!'.format(s_Error))
            print(
                '         s_BG = {:1.4f}, s_WG = {:1.4f}, s_BS = {:1.4f}, s_WS = {:1.4f}.'
                .format(s_BG, s_WG, s_BS, s_WS))
            print(
                '         Setting s_Error to s_WGB (s_S -> 0) (cfr. paired = False)!'
            )
            s_Error = s_WG
            s_BS = np.nan
            dfErr = (sample_size - num_groups)

    else:
        s_Error = s_WG  # for pseudo-F1
        s_Error2 = s_WG_V  # for pseudo-F2
        s_BS = np.nan
        dfErr = (sample_size - num_groups)

    # test statistic, pseudo-F1:
    stat_ = (s_BG / dfBG) / (s_Error / dfErr)

    if paired == True:
        # test statistic, pseudo-F2 (equals pseudo-F1 for equal sample sizes!):
        stat = stat_
    else:
        # test statistic, pseudo-F2:
        stat = (s_BG) / (s_Error2)

    # effect sizes:
    p_eta2 = s_BG / (s_BG + s_Error)
    omega2 = (s_BG - dfBG * (s_Error / dfErr)) / (s_T - (s_Error / dfErr))
    R2 = 1.0 - 1 / (1 + stat * (dfBG / dfErr))
    #print('t:',sample_size, num_groups, (sample_size - num_groups - 1))
    R2adj = 1.0 - ((1 - R2) * (sample_size - 1) /
                   (sample_size - num_groups - 1))
    effect_sizes = {
        'p_eta2': p_eta2,
        'omega2': omega2,
        'R2': R2,
        'R2adj': R2adj
    }

    #    print('s_BG = {:1.2f}, s_WG = {:1.2f}, s_BS = {:1.2f}, s_WS = {:1.2f}, s_Err = {:1.2f} -- > s_T = {:1.2f}(Sum={:1.2f}:{:1.2f}).'.format(s_BG, s_WG, s_BS, s_WS, s_Error, s_T, s_BG + s_WG, s_BS + s_WS))

    if s_Error < 0:
        print('WARNING: s_Error = {:1.4f} <= 0!'.format(s_Error))
        print(
            '         s_BG = {:1.4f}, s_WG = {:1.4f}, s_BS = {:1.4f}, s_WS = {:1.4f}.'
            .format(s_BG, s_WG, s_BS, s_WS))
        print('         Setting F to NaN.')
        stat = np.nan
    return stat, effect_sizes