def positive_arctan(x,y, htype = 'deg'): """ Calculate positive angle (0°-360° or 0 - 2*pi rad.) from x and y. Args: :x: | ndarray of x-coordinates :y: | ndarray of y-coordinates :htype: | 'deg' or 'rad', optional | - 'deg': hue angle between 0° and 360° | - 'rad': hue angle between 0 and 2pi radians Returns: :returns: | ndarray of positive angles. """ if htype == 'deg': r2d = 180.0/np.pi h360 = 360.0 else: r2d = 1.0 h360 = 2.0*np.pi h = np.atleast_1d((np.arctan2(y,x)*r2d)) h[np.where(h<0)] = h[np.where(h<0)] + h360 return h
def positive_arctan(x, y, htype='deg'): if htype == 'deg': r2d = 180.0 / np.pi h360 = 360.0 else: r2d = 1.0 h360 = 2.0 * np.pi h = np.atleast_1d((np.arctan2(y, x) * r2d)) h[np.where(h < 0)] = h[np.where(h < 0)] + h360 return h
def xyz_to_srgb(xyz, gamma=2.4, **kwargs): """ Calculates IEC:61966 sRGB values from xyz. Args: :xyz: | ndarray with relative tristimulus values. :gamma: | 2.4, optional | compression in sRGB Returns: :rgb: | ndarray with R,G,B values (uint8). """ xyz = np2d(xyz) # define 3x3 matrix M = np.array([[3.2404542, -1.5371385, -0.4985314], [-0.9692660, 1.8760108, 0.0415560], [0.0556434, -0.2040259, 1.0572252]]) if len(xyz.shape) == 3: srgb = np.einsum('ij,klj->kli', M, xyz / 100) else: srgb = np.einsum('ij,lj->li', M, xyz / 100) # perform clipping: srgb[np.where(srgb > 1)] = 1 srgb[np.where(srgb < 0)] = 0 # test for the dark colours in the non-linear part of the function: dark = np.where(srgb <= 0.0031308) # apply gamma function: g = 1 / gamma # and scale to range 0-255: rgb = srgb.copy() rgb = (1.055 * rgb**g - 0.055) * 255 # non-linear bit for dark colours rgb[dark] = (srgb[dark].copy() * 12.92) * 255 # clip to range: rgb[rgb > 255] = 255 rgb[rgb < 0] = 0 return rgb
def getMonteCarloParam(n_obs=1, stdDevAllParam=_INDVCMF_STD_DEV_ALL_PARAM.copy()): """ Get dict with normally-distributed physiological factors for a population of observers. Args: :n_obs: | 1, optional | Number of individual observers in population. :stdDevAllParam: | _INDVCMF_STD_DEV_ALL_PARAM, optional | Dict with parameters for: | ['od_lens', 'od_macula', | 'od_L', 'od_M', 'od_S', | 'shft_L', 'shft_M', 'shft_S'] Returns: :returns: | dict with n_obs randomly drawn parameters. """ varParam = {} for k in list(stdDevAllParam.keys()): varParam[k] = stdDevAllParam[k] * np.random.randn(n_obs) # limit varAllParam so that it doesn't create negative val for # lens, macula, pkod_LMS: if (k == 'od_lens') | (k == 'od_macula') | (k == 'od_L') | ( k == 'od_M') | (k == 'od_S'): varParam[k][np.where(varParam[k] < -100)] = -100 return varParam
def todim(x, tshape, add_axis=1, equal_shape=False): """ Expand x to dims that are broadcast-compatable with shape of another array. Args: :x: | ndarray :tshape: | tuple with target shape :add_axis: | 1, optional | Determines where in x.shape an axis should be added :equal_shape: | False or True, optional | True: expand :x: to identical dimensions (speficied by :tshape:) Returns: :returns: | ndarray broadcast-compatable with tshape. """ if x is None: return np.broadcast_arrays(x, np.ones(tshape))[0] else: x = np2d(x) sx = x.shape lsx = len(sx) ltshape = len(tshape) if (sx == tshape): pass else: if ((lsx == 1) | (sx == (1, tshape[-1])) | (sx == (tshape[-1], 1))): if (sx == (tshape[-1], 1)): x = x.T if lsx != ltshape: x = np.expand_dims(x, 0) elif (lsx == 2): if (ltshape == 3): sd = np.setdiff1d(tshape, sx, assume_unique=True) if len(sd) == 0: ax = add_axis else: ax = np.where(tshape == sd)[0][0] x = np.expand_dims(x, ax) else: raise Exception( "todim(x,tshape): dimensions do not match for 2d arrays." ) else: raise Exception( "todim(x,tshape): no matching dimensions between 3d x and tshape." ) if equal_shape == False: return x else: return np.ones( tshape) * x #make dims of x equal to those of a (tshape)
def luv_to_xyz(luv, xyzw = None, cieobs = _CIEOBS, **kwargs): """ Convert CIE 1976 L*u*v* (CIELUVB) coordinates to XYZ tristimulus values. Args: :luv: | ndarray with CIE 1976 L*u*v* (CIELUV) color coordinates :xyzw: | None or ndarray with tristimulus values of white point, optional | None defaults to xyz of CIE D65 using the :cieobs: observer. :cieobs: | luxpy._CIEOBS, optional | CMF set to use when calculating xyzw. Returns: :xyz: | ndarray with tristimulus values """ luv = np2d(luv) if xyzw is None: xyzw = spd_to_xyz(_CIE_ILLUMINANTS['D65'],cieobs = cieobs) # Make xyzw same shape as luv: Yuvw = todim(xyz_to_Yuv(xyzw), luv.shape, equal_shape = True) # Get Yw, uw,vw: Yw,uw,vw = asplit(Yuvw) # calculate u'v' from u*,v*: L,u,v = asplit(luv) up,vp = [(x / (13*L)) + xw for (x,xw) in ((u,uw),(v,vw))] up[np.where(L == 0.0)] = 0.0 vp[np.where(L == 0.0)] = 0.0 fy = (L + 16.0) / 116.0 Y = Yw*(fy**3.0) p = np.where((Y/Yw) < ((6.0/29.0)**3.0)) Y[p] = Yw[p]*(L[p]/((29.0/3.0)**3.0)) return Yuv_to_xyz(ajoin((Y,up,vp)))
def lab_to_xyz(lab, xyzw = None, cieobs = _CIEOBS, **kwargs): """ Convert CIE 1976 L*a*b* (CIELAB) color coordinates to XYZ tristimulus values. Args: :lab: | ndarray with CIE 1976 L*a*b* (CIELAB) color coordinates :xyzw: | None or ndarray with tristimulus values of white point, optional | None defaults to xyz of CIE D65 using the :cieobs: observer. :cieobs: | luxpy._CIEOBS, optional | CMF set to use when calculating xyzw. Returns: :xyz: | ndarray with tristimulus values """ lab = np2d(lab) if xyzw is None: xyzw = spd_to_xyz(_CIE_ILLUMINANTS['D65'],cieobs = cieobs) # make xyzw same shape as data: xyzw = xyzw*np.ones(lab.shape) # set knee point of function: k=(24/116) #(24/116)**3**(1/3) # get L*, a*, b* and Xw, Yw, Zw: L,a,b = asplit(lab) Xw,Yw,Zw = asplit(xyzw) fy = (L + 16.0) / 116.0 fx = a / 500.0 + fy fz = fy - b/200.0 # apply 3rd power: X,Y,Z = [xw*(x**3.0) for (x,xw) in ((fx,Xw),(fy,Yw),(fz,Zw))] # Now calculate T where T/Tn is below the knee point: p,q,r = [np.where(x<k) for x in (fx,fy,fz)] X[p],Y[q],Z[r] = [np.squeeze(xw[xp]*((x[xp] - 16.0/116.0) / (841/108))) for (x,xw,xp) in ((fx,Xw,p),(fy,Yw,q),(fz,Zw,r))] return ajoin((X,Y,Z))
def naka_rushton(data, sig=2.0, n=0.73, scaling=1.0, noise=0.0, forward=True): """ Apply a Naka-Rushton response compression (n) and an adaptive shift (sig). | NK(x) = sign(x) * scaling * ((abs(x)**n) / ((abs(x)**n) + (sig**n))) + noise Args: :data: | float or ndarray :sig: | 2.0, optional | Semi-saturation constant. Value for which NK(:data:) is 1/2 :n: | 0.73, optional | Compression power. :scaling: | 1.0, optional | Maximum value of NK-function. :noise: | 0.0, optional | Cone excitation noise. :forward: | True, optional | True: do NK(x) | False: do NK(x)**(-1). Returns: :returns: | float or ndarray with NK-(de)compressed input :x: """ if forward: return np.sign(data) * scaling * ((np.abs(data)**n) / ((np.abs(data)**n) + (sig**n))) + noise elif forward == False: Ip = sig * (((np.abs(np.abs(data) - noise)) / (scaling - np.abs(np.abs(data) - noise))))**(1 / n) if not np.isscalar(Ip): p = np.where(np.abs(data) < noise) Ip[p] = -Ip[p] else: if np.abs(data) < noise: Ip = -Ip return Ip
def recombination(Xp, Xm, options): """ Performs recombination in the individuals. The recombination combines the information of the parents and the mutated individuals (also called "trial vectors") to create the offspring. Assuming x represents the i-th parent, and u the i-th trial vector (obtained from the mutation), the offspring xo will have the following j-th coordinate: xo_j = u_j if rand_j <= CR, x_j otherwise wherein rand_j is a number drawn from a uniform distribution from 0 to 1, and CR is called the crossover factor. To prevent mere copies, at least one coordinate is guaranteed to belong to the trial vector. Args: :Xp: | a n x mu ndarray with the mu parents :Xm: | a n x mu ndarray with the mu mutated points :options: | the dict with the internal parameters Returns: Xo: | a n x mu ndarray with the recombinated points (offspring) """ # Draws random numbers and checks whether they are smaller or # greater than CR: n = Xp.shape[0] #dimension of the problem aux = np.random.rand(n, options['mu']) <= options['CR'] # Now assures at least one coordinate will be changed, that is, # there is at least one 'true' in each column auxs = aux.sum(axis=0) == 0 #gets the columns with no trues indc = np.where(auxs)[0] #get the number of the columns indr = np.random.randint(0, n, auxs.sum()) #define random indexes of rows #ind = np.ravel_multi_index((indr,indc),(n, options['mu'])) #converts to lin. indexes aux[indr, indc] = True # Finally, creates the offspring Xo = Xp Xo[aux] = Xm[aux] return Xo
def srgb_to_xyz(rgb, gamma=2.4, **kwargs): """ Calculates xyz from IEC:61966 sRGB values. Args: :rgb: | ndarray with srgb values (uint8). :gamma: | 2.4, optional | compression in sRGB Returns: :xyz: | ndarray with relative tristimulus values. """ rgb = np2d(rgb) # define 3x3 matrix M = np.array([[0.4124564, 0.3575761, 0.1804375], [0.2126729, 0.7151522, 0.0721750], [0.0193339, 0.1191920, 0.9503041]]) # scale device coordinates: sRGB = rgb / 255 # test for non-linear part of conversion nonlin = np.where((rgb / 255) < 0.0031308) #0.03928) # apply gamma function to convert to sRGB srgb = sRGB.copy() srgb = ((srgb + 0.055) / 1.055)**gamma srgb[nonlin] = sRGB[nonlin] / 12.92 if len(srgb.shape) == 3: xyz = np.einsum('ij,klj->kli', M, srgb) * 100 else: xyz = np.einsum('ij,lj->li', M, srgb) * 100 return xyz
def xyz_to_luv(xyz, xyzw = None, cieobs = _CIEOBS, **kwargs): """ Convert XYZ tristimulus values to CIE 1976 L*u*v* (CIELUV) coordinates. Args: :xyz: | ndarray with tristimulus values :xyzw: | None or ndarray with tristimulus values of white point, optional | None defaults to xyz of CIE D65 using the :cieobs: observer. :cieobs: | luxpy._CIEOBS, optional | CMF set to use when calculating xyzw. Returns: :luv: | ndarray with CIE 1976 L*u*v* (CIELUV) color coordinates """ xyz = np2d(xyz) if xyzw is None: xyzw = spd_to_xyz(_CIE_ILLUMINANTS['D65'],cieobs = cieobs) # make xyzw same shape as xyz: xyzw = todim(xyzw, xyz.shape) # Calculate u',v' of test and white: Y,u,v = asplit(xyz_to_Yuv(xyz)) Yw,uw,vw = asplit(xyz_to_Yuv(xyzw)) #uv1976 to CIELUV YdivYw = Y / Yw L = 116.0*YdivYw**(1.0/3.0) - 16.0 p = np.where(YdivYw <= (6.0/29.0)**3.0) L[p] = ((29.0/3.0)**3.0)*YdivYw[p] u = 13.0*L*(u-uw) v = 13.0*L*(v-vw) return ajoin((L,u,v))
def xyz_to_luv(xyz, xyzw=None, cieobs=_CIEOBS, **kwargs): """ Convert XYZ tristimulus values to CIE 1976 L*u*v* (CIELUV) coordinates. Args: :xyz: | ndarray with tristimulus values :xyzw: | None or ndarray with tristimulus values of white point, optional | None defaults to xyz of CIE D65 using the :cieobs: observer. :cieobs: | luxpy._CIEOBS, optional | CMF set to use when calculating xyzw. Returns: :luv: | ndarray with CIE 1976 L*u*v* (CIELUV) color coordinates """ xyz = np2d(xyz) if xyzw is None: xyzw = spd_to_xyz(_CIE_ILLUMINANTS['D65'], cieobs=cieobs) # Calculate u',v' of test and white: Yuv = xyz_to_Yuv(xyz) Yuvw = xyz_to_Yuv(todim(xyzw, xyz.shape)) # todim: make xyzw same shape as xyz #uv1976 to CIELUV luv = np.empty(xyz.shape) YdivYw = Yuv[..., 0] / Yuvw[..., 0] luv[..., 0] = 116.0 * YdivYw**(1.0 / 3.0) - 16.0 p = np.where(YdivYw <= (6.0 / 29.0)**3.0) luv[..., 0][p] = ((29.0 / 3.0)**3.0) * YdivYw[p] luv[..., 1] = 13.0 * luv[..., 0] * (Yuv[..., 1] - Yuvw[..., 1]) luv[..., 2] = 13.0 * luv[..., 0] * (Yuv[..., 2] - Yuvw[..., 2]) return luv
def luv_to_xyz(luv, xyzw=None, cieobs=_CIEOBS, **kwargs): """ Convert CIE 1976 L*u*v* (CIELUVB) coordinates to XYZ tristimulus values. Args: :luv: | ndarray with CIE 1976 L*u*v* (CIELUV) color coordinates :xyzw: | None or ndarray with tristimulus values of white point, optional | None defaults to xyz of CIE D65 using the :cieobs: observer. :cieobs: | luxpy._CIEOBS, optional | CMF set to use when calculating xyzw. Returns: :xyz: | ndarray with tristimulus values """ luv = np2d(luv) if xyzw is None: xyzw = spd_to_xyz(_CIE_ILLUMINANTS['D65'], cieobs=cieobs) # Make xyzw same shape as luv and convert to Yuv: Yuvw = todim(xyz_to_Yuv(xyzw), luv.shape, equal_shape=True) # calculate u'v' from u*,v*: Yuv = np.empty(luv.shape) Yuv[..., 1:3] = (luv[..., 1:3] / (13 * luv[..., 0])) + Yuvw[..., 1:3] Yuv[Yuv[..., 0] == 0, 1:3] = 0 Yuv[..., 0] = Yuvw[..., 0] * (((luv[..., 0] + 16.0) / 116.0)**3.0) p = np.where((Yuv[..., 0] / Yuvw[..., 0]) < ((6.0 / 29.0)**3.0)) Yuv[..., 0][p] = Yuvw[..., 0][p] * (luv[..., 0][p] / ((29.0 / 3.0)**3.0)) return Yuv_to_xyz(Yuv)
def lmsb_to_xyzb(lms, fieldsize=10, out='XYZ', allow_negative_values=False): """ Convert from LMS cone fundamentals to XYZ color matching functions. Args: :lms: | ndarray with lms cone fundamentals, optional :fieldsize: | fieldsize in degrees, optional | Defaults to 10°. :out: | 'xyz' or str, optional | Determines output. :allow_negative_values: | False, optional | XYZ color matching functions should not have negative values. | If False: xyz[xyz<0] = 0. Returns: :returns: | LMS | - LMS: ndarray with population XYZ color matching functions. Note: For intermediate field sizes (2° < fieldsize < 10°) a conversion matrix is calculated by linear interpolation between the _INDVCMF_M_2d and _INDVCMF_M_10d matrices. """ wl = lms[None, 0] #store wavelengths M = get_lms_to_xyz_matrix(fieldsize=fieldsize) if lms.ndim > 2: xyz = np.vstack((wl, math.dot23(M, lms[1:, ...], keepdims=False))) else: xyz = np.vstack((wl, np.dot(M, lms[1:, ...]))) if allow_negative_values == False: xyz[np.where(xyz < 0)] = 0 return xyz
def DE2000(xyzt, xyzr, dtype = 'xyz', DEtype = 'jab', avg = None, avg_axis = 0, out = 'DEi', xyzwt = None, xyzwr = None, KLCH = None): """ Calculate DE2000 color difference. Args: :xyzt: | ndarray with tristimulus values of test data. :xyzr: | ndarray with tristimulus values of reference data. :dtype: | 'xyz' or 'lab', optional | Specifies data type in :xyzt: and :xyzr:. :xyzwt: | None or ndarray, optional | White point tristimulus values of test data | None defaults to the one set in lx.xyz_to_lab() :xyzwr: | None or ndarray, optional | Whitepoint tristimulus values of reference data | None defaults to the one set in lx.xyz_to_lab() :DEtype: | 'jab' or str, optional | Options: | - 'jab' : calculates full color difference over all 3 dimensions. | - 'ab' : calculates chromaticity difference. | - 'j' : calculates lightness or brightness difference | (depending on :outin:). | - 'j,ab': calculates both 'j' and 'ab' options and returns them as a tuple. :KLCH: | None, optional | Weigths for L, C, H | None: default to [1,1,1] :avg: | None, optional | None: don't calculate average DE, | otherwise use function handle in :avg:. :avg_axis: | axis to calculate average over, optional :out: | 'DEi' or str, optional | Requested output. Note: For the other input arguments, see specific color space used. Returns: :returns: | ndarray with DEi [, DEa] or other as specified by :out: References: 1. `Sharma, G., Wu, W., & Dalal, E. N. (2005). The CIEDE2000 color‐difference formula: Implementation notes, supplementary test data, and mathematical observations. Color Research & Application, 30(1), 21–30. <https://doi.org/10.1002/col.20070>`_ """ if KLCH is None: KLCH = [1,1,1] if dtype == 'xyz': labt = xyz_to_lab(xyzt, xyzw = xyzwt) labr = xyz_to_lab(xyzr, xyzw = xyzwr) else: labt = xyzt labr = xyzr Lt = labt[...,0:1] at = labt[...,1:2] bt = labt[...,2:3] Ct = np.sqrt(at**2 + bt**2) #ht = cam.hue_angle(at,bt,htype = 'rad') Lr = labr[...,0:1] ar = labr[...,1:2] br = labr[...,2:3] Cr = np.sqrt(ar**2 + br**2) #hr = cam.hue_angle(at,bt,htype = 'rad') # Step 1: Cavg = (Ct + Cr)/2 G = 0.5*(1 - np.sqrt((Cavg**7.0)/((Cavg**7.0) + (25.0**7)))) apt = (1 + G)*at apr = (1 + G)*ar Cpt = np.sqrt(apt**2 + bt**2) Cpr = np.sqrt(apr**2 + br**2) Cpprod = Cpt*Cpr hpt = cam.hue_angle(apt,bt, htype = 'deg') hpr = cam.hue_angle(apr,br, htype = 'deg') hpt[(apt==0)*(bt==0)] = 0 hpr[(apr==0)*(br==0)] = 0 # Step 2: dL = np.abs(Lr - Lt) dCp = np.abs(Cpr - Cpt) dhp_ = hpr - hpt dhp = dhp_.copy() dhp[np.where(np.abs(dhp_) > 180)] = dhp[np.where(np.abs(dhp_) > 180)] - 360 dhp[np.where(np.abs(dhp_) < -180)] = dhp[np.where(np.abs(dhp_) < -180)] + 360 dhp[np.where(Cpprod == 0)] = 0 #dH = 2*np.sqrt(Cpprod)*np.sin(dhp/2*np.pi/180) dH = deltaH(dhp, Cpprod, htype = 'deg') # Step 3: Lp = (Lr + Lt)/2 Cp = (Cpr + Cpt)/2 hps = hpt + hpr hp = (hpt + hpr)/2 hp[np.where((np.abs(dhp_) > 180) & (hps < 360))] = hp[np.where((np.abs(dhp_) > 180) & (hps < 360))] + 180 hp[np.where((np.abs(dhp_) > 180) & (hps >= 360))] = hp[np.where((np.abs(dhp_) > 180) & (hps >= 360))] - 180 hp[np.where(Cpprod == 0)] = 0 T = 1 - 0.17*np.cos((hp - 30)*np.pi/180) + 0.24*np.cos(2*hp*np.pi/180) +\ 0.32*np.cos((3*hp + 6)*np.pi/180) - 0.20*np.cos((4*hp - 63)*np.pi/180) dtheta = 30*np.exp(-((hp-275)/25)**2) RC = 2*np.sqrt((Cp**7)/((Cp**7) + (25**7))) SL = 1 + ((0.015*(Lp-50)**2)/np.sqrt(20 + (Lp - 50)**2)) SC = 1 + 0.045*Cp SH = 1 + 0.015*Cp*T RT = -np.sin(2*dtheta*np.pi/180)*RC kL, kC, kH = KLCH DEi = ((dL/(kL*SL))**2 , (dCp/(kC*SC))**2 + (dH/(kH*SH))**2 + RT*(dCp/(kC*SC))*(dH/(kH*SH))) return _process_DEi(DEi, DEtype = DEtype, avg = avg, avg_axis = avg_axis, out = out)
def spd_to_cqs(SPD, version='v9.0', out='Qa', wl=None): """ Calculates CQS Qa (Qai) or Qf (Qfi) or Qp (Qpi) for versions v9.0 or v7.5. Args: :SPD: | ndarray with spectral data (can be multiple SPDs, first axis are the wavelengths) :version: | 'v9.0' or 'v7.5', optional :out: | 'Qa' or str, optional | Specifies requested output (e.g. 'Qa,Qai,Qf,cct,duv') :wl: | None, optional | Wavelengths (or [start, end, spacing]) to interpolate the SPDs to. | None: default to no interpolation Returns: :returns: | float or ndarray with CQS Qa for :out: 'Qa' | Other output is also possible by changing the :out: str value. References: 1. `W. Davis and Y. Ohno, “Color quality scale,” (2010), Opt. Eng., vol. 49, no. 3, pp. 33602–33616. <http://spie.org/Publications/Journal/10.1117/1.3360335>`_ """ outlist = out.split() if isinstance(version, str): cri_type = 'cqs-' + version elif isinstance(version, dict): cri_type = version # calculate DEI, labti, labri and get cspace_pars and rg_pars: DEi, labti, labri, cct, duv, cri_type = spd_to_DEi( SPD, cri_type=cri_type, out='DEi,jabt,jabr,cct,duv,cri_type', wl=wl) # further unpack cri_type: scale_fcn = cri_type['scale']['fcn'] scale_factor = cri_type['scale']['cfactor'] avg = cri_type['avg'] cri_specific_pars = cri_type['cri_specific_pars'] rg_pars = cri_type['rg_pars'] # get maxC: to limit chroma-enhancement: maxC = cri_specific_pars['maxC'] # make 3d: test_original_shape = labti.shape if len(test_original_shape) < 3: labti = labti[:, None] labri = labri[:, None] DEi = DEi[:, None] cct = cct[:, None] # calculate Rg for each spd: Qf = np.zeros((1, labti.shape[1])) Qfi = np.zeros((labti.shape[0], labti.shape[1])) if version == 'v7.5': GA = (9.2672 * (1.0e-11)) * cct**3.0 - ( 8.3959 * (1.0e-7)) * cct**2.0 + 0.00255 * cct - 1.612 elif version == 'v9.0': GA = np.ones(cct.shape) else: raise Exception('.cri.spd_to_cqs(): Unrecognized CQS version.') if ('Qf' in outlist) | ('Qfi' in outlist): # loop of light source spds for ii in range(labti.shape[1]): Qfi[:, ii] = GA[ii] * scale_fcn(DEi[:, ii], [scale_factor[0]]) Qf[:, ii] = GA[ii] * scale_fcn(avg(DEi[:, ii, None], axis=0), [scale_factor[0]]) if ('Qa' in outlist) | ('Qai' in outlist) | ('Qp' in outlist) | ( 'Qpi' in outlist): Qa = Qf.copy() Qai = Qfi.copy() Qp = Qf.copy() Qpi = Qfi.copy() # loop of light source spds for ii in range(labti.shape[1]): # calculate deltaC: deltaC = np.sqrt( np.power(labti[:, ii, 1:3], 2).sum( axis=1, keepdims=True)) - np.sqrt( np.power(labri[:, ii, 1:3], 2).sum(axis=1, keepdims=True)) # limit chroma increase: DEi_Climited = DEi[:, ii, None].copy() deltaC_Climited = deltaC.copy() if maxC is None: maxC = 10000.0 limitC = np.where(deltaC >= maxC)[0] deltaC_Climited[limitC] = maxC p_deltaC_pos = np.where(deltaC > 0.0)[0] DEi_Climited[p_deltaC_pos] = np.sqrt( DEi_Climited[p_deltaC_pos]**2.0 - deltaC_Climited[p_deltaC_pos] **2.0) # increase in chroma is not penalized! if ('Qa' in outlist) | ('Qai' in outlist): Qai[:, ii, None] = GA[ii] * scale_fcn(DEi_Climited, [scale_factor[1]]) Qa[:, ii] = GA[ii] * scale_fcn(avg(DEi_Climited, axis=0), [scale_factor[1]]) if ('Qp' in outlist) | ('Qpi' in outlist): deltaC_pos = deltaC_Climited * (deltaC_Climited >= 0.0) deltaCmu = np.mean(deltaC_Climited * (deltaC_Climited >= 0.0)) Qpi[:, ii, None] = GA[ii] * scale_fcn( (DEi_Climited - deltaC_pos), [scale_factor[2] ]) # or ?? np.sqrt(DEi_Climited**2 - deltaC_pos**2) ?? Qp[:, ii] = GA[ii] * scale_fcn( (avg(DEi_Climited, axis=0) - deltaCmu), [scale_factor[2]]) if ('Qg' in outlist): Qg = Qf.copy() for ii in range(labti.shape[1]): Qg[:, ii] = 100.0 * math.polyarea( labti[:, ii, 1], labti[:, ii, 2]) / math.polyarea( labri[:, ii, 1], labri[:, ii, 2] ) # calculate Rg = gamut area ratio of test and ref if out == 'Qa': return Qa else: return eval(out)
def xyz_to_ipt(xyz, cieobs = _CIEOBS, xyzw = None, M = None, **kwargs): """ Convert XYZ tristimulus values to IPT color coordinates. | I: Lightness axis, P, red-green axis, T: yellow-blue axis. Args: :xyz: | ndarray with tristimulus values :xyzw: | None or ndarray with tristimulus values of white point, optional | None defaults to xyz of CIE D65 using the :cieobs: observer. :cieobs: | luxpy._CIEOBS, optional | CMF set to use when calculating xyzw for rescaling M (only when not None). :M: | None, optional | None defaults to xyz to lms conversion matrix determined by :cieobs: Returns: :ipt: | ndarray with IPT color coordinates Note: :xyz: is assumed to be under D65 viewing conditions! If necessary perform chromatic adaptation ! Reference: 1. `Ebner F, and Fairchild MD (1998). Development and testing of a color space (IPT) with improved hue uniformity. In IS&T 6th Color Imaging Conference, (Scottsdale, Arizona, USA), pp. 8–13. <http://www.ingentaconnect.com/content/ist/cic/1998/00001998/00000001/art00003?crawler=true>`_ """ xyz = np2d(xyz) # get M to convert xyz to lms and apply normalization to matrix or input your own: if M is None: M = _IPT_M['xyz2lms'][cieobs].copy() # matrix conversions from xyz to lms if xyzw is None: xyzw = spd_to_xyz(_CIE_ILLUMINANTS['D65'],cieobs = cieobs, out = 1)[0]/100.0 else: xyzw = xyzw/100.0 M = math.normalize_3x3_matrix(M,xyzw) # get xyz and normalize to 1: xyz = xyz/100.0 # convert xyz to lms: if len(xyz.shape) == 3: lms = np.einsum('ij,klj->kli', M, xyz) else: lms = np.einsum('ij,lj->li', M, xyz) #lms = np.dot(M,xyz.T).T #response compression: lms to lms' lmsp = lms**0.43 p = np.where(lms<0.0) lmsp[p] = -np.abs(lms[p])**0.43 # convert lms' to ipt coordinates: if len(xyz.shape) == 3: ipt = np.einsum('ij,klj->kli', _IPT_M['lms2ipt'], lmsp) else: ipt = np.einsum('ij,lj->li', _IPT_M['lms2ipt'], lmsp) return ipt
def Ydlep_to_xyz(Ydlep, cieobs=_CIEOBS, xyzw=_COLORTF_DEFAULT_WHITE_POINT, flip_axes=False, **kwargs): """ Convert Y, dominant (complementary) wavelength and excitation purity to XYZ tristimulus values. Args: :Ydlep: | ndarray with Y, dominant (complementary) wavelength and excitation purity :xyzw: | None or narray with tristimulus values of a single (!) native white point, optional | None defaults to xyz of CIE D65 using the :cieobs: observer. :cieobs: | luxpy._CIEOBS, optional | CMF set to use when calculating spectrum locus coordinates. :flip_axes: | False, optional | If True: flip axis 0 and axis 1 in Ydelep to increase speed of loop in function. | (single xyzw with is not flipped!) Returns: :xyz: | ndarray with tristimulus values """ Ydlep3 = np3d(Ydlep).copy().astype(np.float) # flip axis so that longest dim is on first axis (save time in looping): if (Ydlep3.shape[0] < Ydlep3.shape[1]) & (flip_axes == True): axes12flipped = True Ydlep3 = Ydlep3.transpose((1, 0, 2)) else: axes12flipped = False # convert xyzw to Yxyw: Yxyw = xyz_to_Yxy(xyzw) Yxywo = Yxyw.copy() # get spectrum locus Y,x,y and wavelengths: SL = _CMF[cieobs]['bar'] wlsl = SL[0, None].T Yxysl = xyz_to_Yxy(SL[1:4].T)[:, None] # center on xyzw: Yxysl = Yxysl - Yxyw Yxyw = Yxyw - Yxyw #split: Y, dom, pur = asplit(Ydlep3) Yw, xw, yw = asplit(Yxyw) Ywo, xwo, ywo = asplit(Yxywo) Ysl, xsl, ysl = asplit(Yxysl) # loop over longest dim: x = np.empty(Y.shape) y = np.empty(Y.shape) for i in range(Ydlep3.shape[1]): # find closest wl's to dom: #wlslb,wlib = meshblock(wlsl,np.abs(dom[i,:])) #abs because dom<0--> complemtary wl wlib, wlslb = np.meshgrid(np.abs(dom[:, i]), wlsl) dwl = np.abs(wlslb - wlib) q1 = dwl.argmin(axis=0) # index of closest wl dwl[q1] = 10000.0 q2 = dwl.argmin(axis=0) # index of second closest wl # calculate x,y of dom: x_dom_wl = xsl[q1, 0] + (xsl[q2, 0] - xsl[q1, 0]) * ( np.abs(dom[:, i]) - wlsl[q1, 0]) / (wlsl[q2, 0] - wlsl[q1, 0] ) # calculate x of dom. wl y_dom_wl = ysl[q1, 0] + (ysl[q2, 0] - ysl[q1, 0]) * ( np.abs(dom[:, i]) - wlsl[q1, 0]) / (wlsl[q2, 0] - wlsl[q1, 0] ) # calculate y of dom. wl # calculate x,y of test: d_wl = (x_dom_wl**2.0 + y_dom_wl**2.0)**0.5 # distance from white point to dom d = pur[:, i] * d_wl hdom = math.positive_arctan(x_dom_wl, y_dom_wl, htype='deg') x[:, i] = d * np.cos(hdom * np.pi / 180.0) y[:, i] = d * np.sin(hdom * np.pi / 180.0) # complementary: pc = np.where(dom[:, i] < 0.0) hdom[pc] = hdom[pc] - np.sign(dom[:, i][pc] - 180.0) * 180.0 # get positive hue angle # calculate intersection of line through white point and test point and purple line: xy = np.vstack((x_dom_wl, y_dom_wl)).T xyw = np.vstack((xw, yw)).T xypl1 = np.vstack((xsl[0, None], ysl[0, None])).T xypl2 = np.vstack((xsl[-1, None], ysl[-1, None])).T da = (xy - xyw) db = (xypl2 - xypl1) dp = (xyw - xypl1) T = np.array([[0.0, -1.0], [1.0, 0.0]]) dap = np.dot(da, T) denom = np.sum(dap * db, axis=1, keepdims=True) num = np.sum(dap * dp, axis=1, keepdims=True) xy_linecross = (num / denom) * db + xypl1 d_linecross = np.atleast_2d( (xy_linecross[:, 0]**2.0 + xy_linecross[:, 1]**2.0)**0.5).T[:, 0] x[:, i][pc] = pur[:, i][pc] * d_linecross[pc] * np.cos( hdom[pc] * np.pi / 180) y[:, i][pc] = pur[:, i][pc] * d_linecross[pc] * np.sin( hdom[pc] * np.pi / 180) Yxy = np.dstack((Ydlep3[:, :, 0], x + xwo, y + ywo)) if axes12flipped == True: Yxy = Yxy.transpose((1, 0, 2)) else: Yxy = Yxy.transpose((0, 1, 2)) return Yxy_to_xyz(Yxy).reshape(Ydlep.shape)
def ipt_to_xyz(ipt, cieobs=_CIEOBS, xyzw=None, M=None, **kwargs): """ Convert XYZ tristimulus values to IPT color coordinates. | I: Lightness axis, P, red-green axis, T: yellow-blue axis. Args: :ipt: | ndarray with IPT color coordinates :xyzw: | None or ndarray with tristimulus values of white point, optional | None defaults to xyz of CIE D65 using the :cieobs: observer. :cieobs: | luxpy._CIEOBS, optional | CMF set to use when calculating xyzw for rescaling Mxyz2lms (only when not None). :M: | None, optional | None defaults to xyz to lms conversion matrix determined by:cieobs: Returns: :xyz: | ndarray with tristimulus values Note: :xyz: is assumed to be under D65 viewing conditions! If necessary perform chromatic adaptation ! Reference: 1. `Ebner F, and Fairchild MD (1998). Development and testing of a color space (IPT) with improved hue uniformity. In IS&T 6th Color Imaging Conference, (Scottsdale, Arizona, USA), pp. 8–13. <http://www.ingentaconnect.com/content/ist/cic/1998/00001998/00000001/art00003?crawler=true>`_ """ ipt = np2d(ipt) # get M to convert xyz to lms and apply normalization to matrix or input your own: if M is None: M = _IPT_M['xyz2lms'][cieobs].copy( ) # matrix conversions from xyz to lms if xyzw is None: xyzw = spd_to_xyz(_CIE_ILLUMINANTS['D65'], cieobs=cieobs, out=1) / 100.0 else: xyzw = xyzw / 100.0 M = math.normalize_3x3_matrix(M, xyzw) # convert from ipt to lms': if len(ipt.shape) == 3: lmsp = np.einsum('ij,klj->kli', np.linalg.inv(_IPT_M['lms2ipt']), ipt) else: lmsp = np.einsum('ij,lj->li', np.linalg.inv(_IPT_M['lms2ipt']), ipt) # reverse response compression: lms' to lms lms = lmsp**(1.0 / 0.43) p = np.where(lmsp < 0.0) lms[p] = -np.abs(lmsp[p])**(1.0 / 0.43) # convert from lms to xyz: if np.ndim(M) == 2: if len(ipt.shape) == 3: xyz = np.einsum('ij,klj->kli', np.linalg.inv(M), lms) else: xyz = np.einsum('ij,lj->li', np.linalg.inv(M), lms) else: if len( ipt.shape ) == 3: # second dim of lms must match dim of 1st of M and 1st dim of xyzw xyz = np.concatenate([ np.einsum('ij,klj->kli', np.linalg.inv(M[i]), lms[:, i:i + 1, :]) for i in range(M.shape[0]) ], axis=1) else: # first dim of lms must match dim of 1st of M and 1st dim of xyzw xyz = np.concatenate([ np.einsum('ij,lj->li', np.linalg.inv(M[i]), lms[i:i + 1, :]) for i in range(M.shape[0]) ], axis=0) #xyz = np.dot(np.linalg.inv(M),lms.T).T xyz = xyz * 100.0 xyz[np.where(xyz < 0.0)] = 0.0 return xyz
def apply(data, catmode = '1>0>2', cattype = 'vonkries', xyzw1 = None, xyzw2 = None, xyzw0 = None,\ D = None, mcat = ['cat02'], normxyz0 = None, outtype = 'xyz', La = None, F = None, Dtype = None): """ Calculate corresponding colors by applying a von Kries chromatic adaptation transform (CAT), i.e. independent rescaling of 'sensor sensitivity' to data to adapt from current adaptation conditions (1) to the new conditions (2). Args: :data: | ndarray of tristimulus values (can be NxMx3) :catmode: | '1>0>2, optional | -'1>0>2': Two-step CAT | from illuminant 1 to baseline illuminant 0 to illuminant 2. | -'1>0': One-step CAT | from illuminant 1 to baseline illuminant 0. | -'0>2': One-step CAT | from baseline illuminant 0 to illuminant 2. :cattype: | 'vonkries' (others: 'rlab', see Farchild 1990), optional :xyzw1: | None, depending on :catmode: optional (can be Mx3) :xyzw2: | None, depending on :catmode: optional (can be Mx3) :xyzw0: | None, depending on :catmode: optional (can be Mx3) :D: | None, optional | Degrees of adaptation. Defaults to [1.0, 1.0]. :La: | None, optional | Adapting luminances. | If None: xyz values are absolute or relative. | If not None: xyz are relative. :F: | None, optional | Surround parameter(s) for CAT02/CAT16 calculations (:Dtype: == 'cat02' or 'cat16') | Defaults to [1.0, 1.0]. :Dtype: | None, optional | Type of degree of adaptation function from literature | See luxpy.cat.get_degree_of_adaptation() :mcat: | ['cat02'], optional | List[str] or List[ndarray] of sensor space matrices for each condition pair. If len(:mcat:) == 1, the same matrix is used. :normxyz0: | None, optional | Set of xyz tristimulus values to normalize the sensor space matrix to. :outtype: | 'xyz' or 'lms', optional | - 'xyz': return corresponding tristimulus values | - 'lms': return corresponding sensor space excitation values | (e.g. for further calculations) Returns: :returns: | ndarray with corresponding colors """ if (xyzw1 is None) & (xyzw2 is None): return data # do nothing else: # Make data 2d: data = np2d(data) data_original_shape = data.shape if data.ndim < 3: target_shape = np.hstack((1, data.shape)) data = data * np.ones(target_shape) else: target_shape = data.shape target_shape = data.shape # initialize xyzw0: if (xyzw0 is None): # set to iLL.E xyzw0 = np2d([100.0, 100.0, 100.0]) xyzw0 = np.ones(target_shape) * xyzw0 La0 = xyzw0[..., 1, None] # Determine cat-type (1-step or 2-step) + make input same shape as data for block calculations: expansion_axis = np.abs(1 * (len(data_original_shape) == 2) - 1) if ((xyzw1 is not None) & (xyzw2 is not None)): xyzw1 = xyzw1 * np.ones(target_shape) xyzw2 = xyzw2 * np.ones(target_shape) default_La12 = [xyzw1[..., 1, None], xyzw2[..., 1, None]] elif (xyzw2 is None) & (xyzw1 is not None): # apply one-step CAT: 1-->0 catmode = '1>0' #override catmode input xyzw1 = xyzw1 * np.ones(target_shape) default_La12 = [xyzw1[..., 1, None], La0] elif (xyzw1 is None) & (xyzw2 is not None): raise Exception( "von_kries(): cat transformation '0>2' not supported, use '1>0' !" ) # Get or set La (La == None: xyz are absolute or relative, La != None: xyz are relative): target_shape_1 = tuple(np.hstack((target_shape[:-1], 1))) La1, La2 = parse_x1x2_parameters(La, target_shape=target_shape_1, catmode=catmode, expand_2d_to_3d=expansion_axis, default=default_La12) # Set degrees of adaptation, D10, D20: (note D20 is degree of adaptation for 2-->0!!) D10, D20 = parse_x1x2_parameters(D, target_shape=target_shape_1, catmode=catmode, expand_2d_to_3d=expansion_axis) # Set F surround in case of Dtype == 'cat02': F1, F2 = parse_x1x2_parameters(F, target_shape=target_shape_1, catmode=catmode, expand_2d_to_3d=expansion_axis) # Make xyz relative to go to relative xyz0: if La is None: data = 100 * data / La1 xyzw1 = 100 * xyzw1 / La1 xyzw0 = 100 * xyzw0 / La0 if (catmode == '1>0>2') | (catmode == '1>2'): xyzw2 = 100 * xyzw2 / La2 # transform data (xyz) to sensor space (lms) and perform cat: xyzc = np.ones(data.shape) * np.nan mcat = np.array(mcat) if (mcat.shape[0] != data.shape[1]) & (mcat.shape[0] == 1): mcat = np.repeat(mcat, data.shape[1], axis=0) elif (mcat.shape[0] != data.shape[1]) & (mcat.shape[0] > 1): raise Exception( 'von_kries(): mcat.shape[0] > 1 and does not match data.shape[0]!' ) for i in range(xyzc.shape[1]): # get cat sensor matrix: if mcat[i].dtype == np.float64: mcati = mcat[i] else: mcati = _MCATS[mcat[i]] # normalize sensor matrix: if normxyz0 is not None: mcati = math.normalize_3x3_matrix(mcati, xyz0=normxyz0) # convert from xyz to lms: lms = np.dot(mcati, data[:, i].T).T lmsw0 = np.dot(mcati, xyzw0[:, i].T).T if (catmode == '1>0>2') | (catmode == '1>0'): lmsw1 = np.dot(mcati, xyzw1[:, i].T).T Dpar1 = dict(D=D10[:, i], F=F1[:, i], La=La1[:, i], La0=La0[:, i], order='1>0') D10[:, i] = get_degree_of_adaptation( Dtype=Dtype, **Dpar1) #get degree of adaptation depending on Dtype lmsw2 = None # in case of '1>0' if (catmode == '1>0>2'): lmsw2 = np.dot(mcati, xyzw2[:, i].T).T Dpar2 = dict(D=D20[:, i], F=F2[:, i], La=La2[:, i], La0=La0[:, i], order='0>2') D20[:, i] = get_degree_of_adaptation( Dtype=Dtype, **Dpar2) #get degree of adaptation depending on Dtype if (catmode == '1>2'): lmsw1 = np.dot(mcati, xyzw1[:, i].T).T lmsw2 = np.dot(mcati, xyzw2[:, i].T).T Dpar12 = dict(D=D10[:, i], F=F1[:, i], La=La1[:, i], La2=La2[:, i], order='1>2') D10[:, i] = get_degree_of_adaptation( Dtype=Dtype, **Dpar12) #get degree of adaptation depending on Dtype # Determine transfer function Dt: Dt = get_transfer_function(cattype=cattype, catmode=catmode, lmsw1=lmsw1, lmsw2=lmsw2, lmsw0=lmsw0, D10=D10[:, i], D20=D20[:, i], La1=La1[:, i], La2=La2[:, i]) # Perform cat: lms = np.dot(np.diagflat(Dt[0]), lms.T).T # Make xyz, lms 'absolute' again: if (catmode == '1>0>2'): lms = (La2[:, i] / La1[:, i]) * lms elif (catmode == '1>0'): lms = (La0[:, i] / La1[:, i]) * lms elif (catmode == '1>2'): lms = (La2[:, i] / La1[:, i]) * lms # transform back from sensor space to xyz (or not): if outtype == 'xyz': xyzci = np.dot(np.linalg.inv(mcati), lms.T).T xyzci[np.where(xyzci < 0)] = _EPS xyzc[:, i] = xyzci else: xyzc[:, i] = lms # return data to original shape: if len(data_original_shape) == 2: xyzc = xyzc[0] return xyzc
def get_degree_of_adaptation(Dtype=None, **kwargs): """ Calculates the degree of adaptation according to some function published in literature. Args: :Dtype: | None, optional | If None: kwargs should contain 'D' with value. | If 'manual: kwargs should contain 'D' with value. | If 'cat02' or 'cat16': kwargs should contain keys 'F' and 'La'. | Calculate D according to CAT02 or CAT16 model: | D = F*(1-(1/3.6)*numpy.exp((-La-42)/92)) | If 'cmc': kwargs should contain 'La', 'La0'(or 'La2') and 'order' | for 'order' = '1>0': 'La' is set La1 and 'La0' to La0. | for 'order' = '0>2': 'La' is set La0 and 'La0' to La1. | for 'order' = '1>2': 'La' is set La1 and 'La2' to La0. | D is calculated as follows: | D = 0.08*numpy.log10(La1+La0)+0.76-0.45*(La1-La0)/(La1+La0) | If 'smet2017': kwargs should contain 'xyzw' and 'Dmax' (see Smet2017_D for more details). | If "? user defined", then D is calculated by: | D = ndarray(eval(:Dtype:)) Returns: :D: | ndarray with degree of adaptation values. Notes: 1. D passes either right through or D is calculated following some D-function (Dtype) published in literature. 2. D is limited to values between zero and one 3. If kwargs do not contain the required parameters, an exception is raised. """ try: if Dtype is None: PAR = ["D"] D = np.array([kwargs['D']]) elif Dtype == 'manual': PAR = ["D"] D = np.array([kwargs['D']]) elif (Dtype == 'cat02') | (Dtype == 'cat16'): PAR = ["F, La"] F = kwargs['F'] if isinstance(F, str): #CIECAM02 / CAT02 surround based F values if (F == 'avg') | (F == 'average'): F = 1 elif (F == 'dim'): F = 0.9 elif (F == 'dark'): F = 0.8 elif (F == 'disp') | (F == 'display'): F = 0.0 else: F = eval(F) F = np.array([F]) La = np.array([kwargs['La']]) D = F * (1 - (1 / 3.6) * np.exp((-La - 42) / 92)) elif Dtype == 'cmc': PAR = ["La, La0, order"] order = np.array([kwargs['order']]) if order == '1>0': La1 = np.array([kwargs['La']]) La0 = np.array([kwargs['La0']]) elif order == '0>2': La0 = np.array([kwargs['La']]) La1 = np.array([kwargs['La0']]) elif order == '1>2': La1 = np.array([kwargs['La']]) La0 = np.array([kwargs['La2']]) D = 0.08 * np.log10(La1 + La0) + 0.76 - 0.45 * (La1 - La0) / (La1 + La0) elif 'smet2017': PAR = ['xyzw', 'Dmax'] xyzw = np.array([kwargs['xyzw']]) Dmax = np.array([kwargs['Dmax']]) D = smet2017_D(xyzw, Dmax=Dmax) else: PAR = ["? user defined"] D = np.array(eval(Dtype)) D[np.where(D < 0)] = 0 D[np.where(D > 1)] = 1 except: raise Exception( 'degree_of_adaptation_D(): **kwargs does not contain the necessary parameters ({}) for Dtype = {}' .format(PAR, Dtype)) return D
def xyz_to_Ydlep(xyz, cieobs=_CIEOBS, xyzw=_COLORTF_DEFAULT_WHITE_POINT, flip_axes=False, **kwargs): """ Convert XYZ tristimulus values to Y, dominant (complementary) wavelength and excitation purity. Args: :xyz: | ndarray with tristimulus values :xyzw: | None or ndarray with tristimulus values of a single (!) native white point, optional | None defaults to xyz of CIE D65 using the :cieobs: observer. :cieobs: | luxpy._CIEOBS, optional | CMF set to use when calculating spectrum locus coordinates. :flip_axes: | False, optional | If True: flip axis 0 and axis 1 in Ydelep to increase speed of loop in function. | (single xyzw with is not flipped!) Returns: :Ydlep: | ndarray with Y, dominant (complementary) wavelength and excitation purity """ xyz3 = np3d(xyz).copy().astype(np.float) # flip axis so that shortest dim is on axis0 (save time in looping): if (xyz3.shape[0] < xyz3.shape[1]) & (flip_axes == True): axes12flipped = True xyz3 = xyz3.transpose((1, 0, 2)) else: axes12flipped = False # convert xyz to Yxy: Yxy = xyz_to_Yxy(xyz3) Yxyw = xyz_to_Yxy(xyzw) # get spectrum locus Y,x,y and wavelengths: SL = _CMF[cieobs]['bar'] wlsl = SL[0] Yxysl = xyz_to_Yxy(SL[1:4].T)[:, None] # center on xyzw: Yxy = Yxy - Yxyw Yxysl = Yxysl - Yxyw Yxyw = Yxyw - Yxyw #split: Y, x, y = asplit(Yxy) Yw, xw, yw = asplit(Yxyw) Ysl, xsl, ysl = asplit(Yxysl) # calculate hue: h = math.positive_arctan(x, y, htype='deg') hsl = math.positive_arctan(xsl, ysl, htype='deg') hsl_max = hsl[0] # max hue angle at min wavelength hsl_min = hsl[-1] # min hue angle at max wavelength dominantwavelength = np.empty(Y.shape) purity = np.empty(Y.shape) for i in range(xyz3.shape[1]): # find index of complementary wavelengths/hues: pc = np.where( (h[:, i] >= hsl_max) & (h[:, i] <= hsl_min + 360.0) ) # hue's requiring complementary wavelength (purple line) h[:, i][pc] = h[:, i][pc] - np.sign( h[:, i][pc] - 180.0 ) * 180.0 # add/subtract 180° to get positive complementary wavelength # find 2 closest hues in sl: #hslb,hib = meshblock(hsl,h[:,i:i+1]) hib, hslb = np.meshgrid(h[:, i:i + 1], hsl) dh = np.abs(hslb - hib) q1 = dh.argmin(axis=0) # index of closest hue dh[q1] = 1000.0 q2 = dh.argmin(axis=0) # index of second closest hue dominantwavelength[:, i] = wlsl[q1] + np.divide( np.multiply((wlsl[q2] - wlsl[q1]), (h[:, i] - hsl[q1, 0])), (hsl[q2, 0] - hsl[q1, 0]) ) # calculate wl corresponding to h: y = y1 + (y2-y1)*(x-x1)/(x2-x1) dominantwavelength[:, i][pc] = -dominantwavelength[:, i][ pc] #complementary wavelengths are specified by '-' sign # calculate excitation purity: x_dom_wl = xsl[q1, 0] + (xsl[q2, 0] - xsl[q1, 0]) * (h[:, i] - hsl[ q1, 0]) / (hsl[q2, 0] - hsl[q1, 0]) # calculate x of dom. wl y_dom_wl = ysl[q1, 0] + (ysl[q2, 0] - ysl[q1, 0]) * (h[:, i] - hsl[ q1, 0]) / (hsl[q2, 0] - hsl[q1, 0]) # calculate y of dom. wl d_wl = (x_dom_wl**2.0 + y_dom_wl**2.0)**0.5 # distance from white point to sl d = (x[:, i]**2.0 + y[:, i]**2.0)**0.5 # distance from white point to test point purity[:, i] = d / d_wl # correct for those test points that have a complementary wavelength # calculate intersection of line through white point and test point and purple line: xy = np.vstack((x[:, i], y[:, i])).T xyw = np.hstack((xw, yw)) xypl1 = np.hstack((xsl[0, None], ysl[0, None])) xypl2 = np.hstack((xsl[-1, None], ysl[-1, None])) da = (xy - xyw) db = (xypl2 - xypl1) dp = (xyw - xypl1) T = np.array([[0.0, -1.0], [1.0, 0.0]]) dap = np.dot(da, T) denom = np.sum(dap * db, axis=1, keepdims=True) num = np.sum(dap * dp, axis=1, keepdims=True) xy_linecross = (num / denom) * db + xypl1 d_linecross = np.atleast_2d( (xy_linecross[:, 0]**2.0 + xy_linecross[:, 1]**2.0)**0.5).T #[0] purity[:, i][pc] = d[pc] / d_linecross[pc][:, 0] Ydlep = np.dstack((xyz3[:, :, 1], dominantwavelength, purity)) if axes12flipped == True: Ydlep = Ydlep.transpose((1, 0, 2)) else: Ydlep = Ydlep.transpose((0, 1, 2)) return Ydlep.reshape(xyz.shape)
def get_pixel_coordinates(jab, jab_ranges=None, jab_deltas=None, limit_grid_radius=0): """ Get pixel coordinates corresponding to array of jab color coordinates. Args: :jab: | ndarray of color coordinates :jab_ranges: | None or ndarray, optional | Specifies the pixelization of color space. | (ndarray.shape = (3,3), with first axis: J,a,b, and second axis: min, max, delta) :jab_deltas: | float or ndarray, optional | Specifies the sampling range. | A float uses jab_deltas as the maximum Euclidean distance to select samples around each pixel center. A ndarray of 3 deltas, uses a city block sampling around each pixel center. :limit_grid_radius: | 0, optional | A value of zeros keeps grid as specified by axr,bxr. | A value > 0 only keeps (a,b) coordinates within :limit_grid_radius: Returns: :returns: | gridp, idxp, jabp, samplenrs, samplesIDs | - :gridp: ndarray with coordinates of all pixel centers. | - :idxp: list[int] with pixel index for each non-empty pixel | - :jabp: ndarray with center color coordinates of non-empty pixels | - :samplenrs: list[list[int]] with sample numbers belong to each | non-empty pixel | - :sampleIDs: summarizing list, | with column order: 'idxp, jabp, samplenrs' """ if jab_deltas is None: jab_deltas = np.array([_VF_DELTAR, _VF_DELTAR, _VF_DELTAR]) if jab_ranges is None: jab_ranges = np.vstack( ([0, 100, jab_deltas[0] ], [-_VF_MAXR, _VF_MAXR + jab_deltas[1], jab_deltas[1]], [-_VF_MAXR, _VF_MAXR + jab_deltas[2], jab_deltas[2]])) # Get pixel grid: gridp = generate_grid(jab_ranges=jab_ranges, limit_grid_radius=limit_grid_radius) # determine pixel coordinates of each sample in jab: samplesIDs = [] for idx in range(gridp.shape[0]): # get pixel coordinates: jp = gridp[idx, 0] ap = gridp[idx, 1] bp = gridp[idx, 2] #Cp = np.sqrt(ap**2+bp**2) if type(jab_deltas) == np.ndarray: sampleID = np.where( ((np.abs(jab[..., 0] - jp) <= jab_deltas[0] / 2) & (np.abs(jab[..., 1] - ap) <= jab_deltas[1] / 2) & (np.abs(jab[..., 2] - bp) <= jab_deltas[2] / 2))) else: sampleID = np.where( (np.sqrt((jab[..., 0] - jp)**2 + (jab[..., 1] - ap)**2 + (jab[..., 2] - bp)**2) <= jab_deltas / 2)) if (sampleID[0].shape[0] > 0): samplesIDs.append( np.hstack((idx, np.array([jp, ap, bp]), sampleID[0]))) idxp = [np.int(samplesIDs[i][0]) for i in range(len(samplesIDs))] jabp = np.vstack([samplesIDs[i][1:4] for i in range(len(samplesIDs))]) samplenrs = [ np.array(samplesIDs[i][4:], dtype=int).tolist() for i in range(len(samplesIDs)) ] return gridp, idxp, jabp, samplenrs, samplesIDs
def cie2006cmfsEx(age = 32,fieldsize = 10, wl = None,\ var_od_lens = 0, var_od_macula = 0, \ var_od_L = 0, var_od_M = 0, var_od_S = 0,\ var_shft_L = 0, var_shft_M = 0, var_shft_S = 0,\ out = 'LMS', allow_negative_values = False): """ Generate Individual Observer CMFs (cone fundamentals) based on CIE2006 cone fundamentals and published literature on observer variability in color matching and in physiological parameters. Args: :age: | 32 or float or int, optional | Observer age :fieldsize: | 10, optional | Field size of stimulus in degrees (between 2° and 10°). :wl: | None, optional | Interpolation/extraplation of :LMS: output to specified wavelengths. | None: output original _WL = np.array([390,780,5]) :var_od_lens: | 0, optional | Std Dev. in peak optical density [%] of lens. :var_od_macula: | 0, optional | Std Dev. in peak optical density [%] of macula. :var_od_L: | 0, optional | Std Dev. in peak optical density [%] of L-cone. :var_od_M: | 0, optional | Std Dev. in peak optical density [%] of M-cone. :var_od_S: | 0, optional | Std Dev. in peak optical density [%] of S-cone. :var_shft_L: | 0, optional | Std Dev. in peak wavelength shift [nm] of L-cone. :var_shft_L: | 0, optional | Std Dev. in peak wavelength shift [nm] of M-cone. :var_shft_S: | 0, optional | Std Dev. in peak wavelength shift [nm] of S-cone. :out: | 'LMS' or , optional | Determines output. :allow_negative_values: | False, optional | Cone fundamentals or color matching functions should not have negative values. | If False: X[X<0] = 0. Returns: :returns: | - 'LMS' : ndarray with individual observer area-normalized | cone fundamentals. Wavelength have been added. | [- 'trans_lens': ndarray with lens transmission | (no wavelengths added, no interpolation) | - 'trans_macula': ndarray with macula transmission | (no wavelengths added, no interpolation) | - 'sens_photopig' : ndarray with photopigment sens. | (no wavelengths added, no interpolation)] References: 1. `Asano Y, Fairchild MD, and Blondé L (2016). Individual Colorimetric Observer Model. PLoS One 11, 1–19. <http://journals.plos.org/plosone/article?id=10.1371/journal.pone.0145671>`_ 2. `Asano Y, Fairchild MD, Blondé L, and Morvan P (2016). Color matching experiment for highlighting interobserver variability. Color Res. Appl. 41, 530–539. <https://onlinelibrary.wiley.com/doi/abs/10.1002/col.21975>`_ 3. `CIE, and CIE (2006). Fundamental Chromaticity Diagram with Physiological Axes - Part I (Vienna: CIE). <http://www.cie.co.at/publications/fundamental-chromaticity-diagram-physiological-axes-part-1>`_ 4. `Asano's Individual Colorimetric Observer Model <https://www.rit.edu/cos/colorscience/re_AsanoObserverFunctions.php>`_ """ fs = fieldsize rmd = _INDVCMF_DATA['rmd'].copy() LMSa = _INDVCMF_DATA['LMSa'].copy() docul = _INDVCMF_DATA['docul'].copy() # field size corrected macular density: pkOd_Macula = 0.485 * np.exp(-fs / 6.132) * ( 1 + var_od_macula / 100) # varied peak optical density of macula corrected_rmd = rmd * pkOd_Macula # age corrected lens/ocular media density: if (age <= 60): correct_lomd = docul[:1] * (1 + 0.02 * (age - 32)) + docul[1:2] else: correct_lomd = docul[:1] * (1.56 + 0.0667 * (age - 60)) + docul[1:2] correct_lomd = correct_lomd * (1 + var_od_lens / 100 ) # varied overall optical density of lens # Peak Wavelength Shift: wl_shifted = np.empty(LMSa.shape) wl_shifted[0] = _WL + var_shft_L wl_shifted[1] = _WL + var_shft_M wl_shifted[2] = _WL + var_shft_S LMSa_shft = np.empty(LMSa.shape) kind = 'cubic' LMSa_shft[0] = interpolate.interp1d(wl_shifted[0], LMSa[0], kind=kind, bounds_error=False, fill_value="extrapolate")(_WL) LMSa_shft[1] = interpolate.interp1d(wl_shifted[1], LMSa[1], kind=kind, bounds_error=False, fill_value="extrapolate")(_WL) LMSa_shft[2] = interpolate.interp1d(wl_shifted[2], LMSa[2], kind=kind, bounds_error=False, fill_value="extrapolate")(_WL) # LMSa[2,np.where(_WL >= _WL_CRIT)] = 0 #np.nan # Not defined above 620nm # LMSa_shft[2,np.where(_WL >= _WL_CRIT)] = 0 ssw = np.hstack( (0, np.sign(np.diff(LMSa_shft[2, :])) )) #detect poor interpolation (sign switch due to instability) LMSa_shft[2, np.where((ssw >= 0) & (_WL > 560))] = np.nan # corrected LMS (no age correction): pkOd_L = (0.38 + 0.54 * np.exp(-fs / 1.333)) * ( 1 + var_od_L / 100) # varied peak optical density of L-cone pkOd_M = (0.38 + 0.54 * np.exp(-fs / 1.333)) * ( 1 + var_od_M / 100) # varied peak optical density of M-cone pkOd_S = (0.30 + 0.45 * np.exp(-fs / 1.333)) * ( 1 + var_od_S / 100) # varied peak optical density of S-cone alpha_lms = 0. * LMSa_shft alpha_lms[0] = 1 - 10**(-pkOd_L * (10**LMSa_shft[0])) alpha_lms[1] = 1 - 10**(-pkOd_M * (10**LMSa_shft[1])) alpha_lms[2] = 1 - 10**(-pkOd_S * (10**LMSa_shft[2])) # this fix is required because the above math fails for alpha_lms[2,:]==0 alpha_lms[2, np.where(_WL >= _WL_CRIT)] = 0 # Corrected to Corneal Incidence: lms_barq = alpha_lms * (10**(-corrected_rmd - correct_lomd)) * np.ones( alpha_lms.shape) # Corrected to Energy Terms: lms_bar = lms_barq * _WL # Set NaN values to zero: lms_bar[np.isnan(lms_bar)] = 0 # normalized: LMS = 100 * lms_bar / np.nansum(lms_bar, axis=1, keepdims=True) # Output extra: trans_lens = 10**(-correct_lomd) trans_macula = 10**(-corrected_rmd) sens_photopig = alpha_lms * _WL # Add wavelengths: LMS = np.vstack((_WL, LMS)) if ('xyz' in out.lower().split(',')): LMS = lmsb_to_xyzb(LMS, fieldsize, out='xyz', allow_negative_values=allow_negative_values) out = out.replace('xyz', 'LMS').replace('XYZ', 'LMS') if ('lms' in out.lower().split(',')): out = out.replace('lms', 'LMS') # Interpolate/extrapolate: if wl is None: interpolation = None else: interpolation = 'cubic' LMS = spd(LMS, wl=wl, interpolation=interpolation, norm_type='area') if (out == 'LMS'): return LMS elif (out == 'LMS,trans_lens,trans_macula,sens_photopig'): return LMS, trans_lens, trans_macula, sens_photopig elif (out == 'LMS,trans_lens,trans_macula,sens_photopig,LMSa'): return LMS, trans_lens, trans_macula, sens_photopig, LMSa else: return eval(out)
def run(data, xyzw, out = 'J,aM,bM', conditions = None, forward = True): """ Run CIECAM02 color appearance model in forward or backward modes. Args: :data: | ndarray with relative sample xyz values (forward mode) or J'a'b' coordinates (inverse mode) :xyzw: | ndarray with relative white point tristimulus values :conditions: | None, optional | Dictionary with viewing conditions. | None results in: | {'La':100, 'Yb':20, 'D':1, 'surround':'avg'} | For more info see luxpy.cam.ciecam02()? :forward: | True, optional | If True: run in CAM in forward mode, else: inverse mode. :out: | 'J,aM,bM', optional | String with requested output (e.g. "J,aM,bM,M,h") [Forward mode] | String with inputs in data. | Input must have data.shape[-1]==3 and last dim of data must have | the following structure: | * data[...,0] = J or Q, | * data[...,1:] = (aM,bM) or (aC,bC) or (aS,bS) Returns: :camout: | ndarray with Jab coordinates or whatever correlates requested in out. Note: * This is a simplified, less flexible, but faster version than the main ciecam02(). References: 1. `N. Moroney, M. D. Fairchild, R. W. G. Hunt, C. Li, M. R. Luo, and T. Newman, (2002), "The CIECAM02 color appearance model,” IS&T/SID Tenth Color Imaging Conference. p. 23, 2002. <http://rit-mcsl.org/fairchild/PDFs/PRO19.pdf>`_ """ outin = out.split(',') if isinstance(out,str) else out #-------------------------------------------- # Get/ set conditions parameters: if conditions is not None: surround_parameters = {'surrounds': ['avg', 'dim', 'dark'], 'avg' : {'c':0.69, 'Nc':1.0, 'F':1.0,'FLL': 1.0}, 'dim' : {'c':0.59, 'Nc':0.9, 'F':0.9,'FLL':1.0} , 'dark' : {'c':0.525, 'Nc':0.8, 'F':0.8,'FLL':1.0}} La = conditions['La'] Yb = conditions['Yb'] D = conditions['D'] surround = conditions['surround'] if isinstance(surround, str): surround = surround_parameters[conditions['surround']] F, FLL, Nc, c = [surround[x] for x in sorted(surround.keys())] else: # set defaults: La, Yb, D, F, FLL, Nc, c = 100, 20, 1, 1, 1, 1, 0.69 #-------------------------------------------- # Define sensor space and cat matrices: mhpe = np.array([[0.38971,0.68898,-0.07868], [-0.22981,1.1834,0.04641], [0.0,0.0,1.0]]) # Hunt-Pointer-Estevez sensors (cone fundamentals) mcat = np.array([[0.7328, 0.4296, -0.1624], [ -0.7036, 1.6975, 0.0061], [ 0.0030, 0.0136, 0.9834]]) # CAT02 sensor space #-------------------------------------------- # pre-calculate some matrices: invmcat = np.linalg.inv(mcat) mhpe_x_invmcat = np.dot(mhpe,invmcat) if not forward: mcat_x_invmhpe = np.dot(mcat,np.linalg.inv(mhpe)) #-------------------------------------------- # calculate condition dependent parameters: Yw = xyzw[...,1:2].T k = 1.0 / (5.0*La + 1.0) FL = 0.2*(k**4.0)*(5.0*La) + 0.1*((1.0 - k**4.0)**2.0)*((5.0*La)**(1.0/3.0)) # luminance adaptation factor n = Yb/Yw Nbb = 0.725*(1/n)**0.2 Ncb = Nbb z = 1.48 + FLL*n**0.5 if D is None: D = F*(1.0-(1.0/3.6)*np.exp((-La-42.0)/92.0)) #=================================================================== # WHITE POINT transformations (common to forward and inverse modes): #-------------------------------------------- # transform from xyzw to cat sensor space: rgbw = mcat @ xyzw.T #-------------------------------------------- # apply von Kries cat: rgbwc = ((D*Yw/rgbw) + (1 - D))*rgbw # factor 100 from ciecam02 is replaced with Yw[i] in cam16, but see 'note' in Fairchild's "Color Appearance Models" (p291 ni 3ed.) #-------------------------------------------- # convert from cat02 sensor space to cone sensors (hpe): rgbwp = (mhpe_x_invmcat @ rgbwc).T #-------------------------------------------- # apply Naka_rushton repsonse compression to white: NK = lambda x, forward: naka_rushton(x, scaling = 400, n = 0.42, sig = 27.13**(1/0.42), noise = 0.1, forward = forward) rgbwpa = NK(FL*rgbwp/100.0, True) pw = np.where(rgbwp<0) rgbwpa[pw] = 0.1 - (NK(FL*np.abs(rgbwp[pw])/100.0, True) - 0.1) #-------------------------------------------- # Calculate achromatic signal of white: Aw = (2.0*rgbwpa[...,0] + rgbwpa[...,1] + (1.0/20.0)*rgbwpa[...,2] - 0.305)*Nbb # massage shape of data for broadcasting: if data.ndim == 2: data = data[:,None] #=================================================================== # STIMULUS transformations if forward: #-------------------------------------------- # transform from xyz to cat sensor space: rgb = math.dot23(mcat, data.T) #-------------------------------------------- # apply von Kries cat: rgbc = ((D*Yw/rgbw)[...,None] + (1 - D))*rgb # factor 100 from ciecam02 is replaced with Yw[i] in cam16, but see 'note' in Fairchild's "Color Appearance Models" (p291 ni 3ed.) #-------------------------------------------- # convert from cat02 sensor space to cone sensors (hpe): rgbp = math.dot23(mhpe_x_invmcat,rgbc).T #-------------------------------------------- # apply Naka_rushton repsonse compression: rgbpa = NK(FL*rgbp/100.0, forward) p = np.where(rgbp<0) rgbpa[p] = 0.1 - (NK(FL*np.abs(rgbp[p])/100.0, forward) - 0.1) #-------------------------------------------- # Calculate achromatic signal: A = (2.0*rgbpa[...,0] + rgbpa[...,1] + (1.0/20.0)*rgbpa[...,2] - 0.305)*Nbb #-------------------------------------------- # calculate initial opponent channels: a = rgbpa[...,0] - 12.0*rgbpa[...,1]/11.0 + rgbpa[...,2]/11.0 b = (1.0/9.0)*(rgbpa[...,0] + rgbpa[...,1] - 2.0*rgbpa[...,2]) #-------------------------------------------- # calculate hue h and eccentricity factor, et: h = hue_angle(a,b, htype = 'deg') et = (1.0/4.0)*(np.cos(h*np.pi/180 + 2.0) + 3.8) #-------------------------------------------- # calculate Hue quadrature (if requested in 'out'): if 'H' in outin: H = hue_quadrature(h, unique_hue_data = 'ciecam02') else: H = None #-------------------------------------------- # calculate lightness, J: if ('J' in outin) | ('Q' in outin) | ('C' in outin) | ('M' in outin) | ('s' in outin) | ('aS' in outin) | ('aC' in outin) | ('aM' in outin): J = 100.0* (A / Aw)**(c*z) #-------------------------------------------- # calculate brightness, Q: if ('Q' in outin) | ('s' in outin) | ('aS' in outin): Q = (4.0/c)* ((J/100.0)**0.5) * (Aw + 4.0)*(FL**0.25) #-------------------------------------------- # calculate chroma, C: if ('C' in outin) | ('M' in outin) | ('s' in outin) | ('aS' in outin) | ('aC' in outin) | ('aM' in outin): t = ((50000.0/13.0)*Nc*Ncb*et*((a**2.0 + b**2.0)**0.5)) / (rgbpa[...,0] + rgbpa[...,1] + (21.0/20.0*rgbpa[...,2])) C = (t**0.9)*((J/100.0)**0.5) * (1.64 - 0.29**n)**0.73 #-------------------------------------------- # calculate colorfulness, M: if ('M' in outin) | ('s' in outin) | ('aM' in outin) | ('aS' in outin): M = C*FL**0.25 #-------------------------------------------- # calculate saturation, s: if ('s' in outin) | ('aS' in outin): s = 100.0* (M/Q)**0.5 #-------------------------------------------- # calculate cartesian coordinates: if ('aS' in outin): aS = s*np.cos(h*np.pi/180.0) bS = s*np.sin(h*np.pi/180.0) if ('aC' in outin): aC = C*np.cos(h*np.pi/180.0) bC = C*np.sin(h*np.pi/180.0) if ('aM' in outin): aM = M*np.cos(h*np.pi/180.0) bM = M*np.sin(h*np.pi/180.0) #-------------------------------------------- if outin != ['J','aM','bM']: camout = eval('ajoin(('+','.join(outin)+'))') else: camout = ajoin((J,aM,bM)) if camout.shape[1] == 1: camout = camout[:,0,:] return camout elif forward == False: #-------------------------------------------- # Get Lightness J from data: if ('J' in outin): J = data[...,0].copy() elif ('Q' in outin): Q = data[...,0].copy() J = 100.0*(Q / ((Aw + 4.0)*(FL**0.25)*(4.0/c)))**2.0 else: raise Exception('No lightness or brightness values in data. Inverse CAM-transform not possible!') #-------------------------------------------- # calculate hue h: h = hue_angle(data[...,1],data[...,2], htype = 'deg') #-------------------------------------------- # calculate Colorfulness M or Chroma C or Saturation s from a,b: MCs = (data[...,1]**2.0 + data[...,2]**2.0)**0.5 if ('aS' in outin): Q = (4.0/c)* ((J/100.0)**0.5) * (Aw + 4.0)*(FL**0.25) M = Q*(MCs/100.0)**2.0 C = M/(FL**0.25) if ('aM' in outin): # convert M to C: C = MCs/(FL**0.25) if ('aC' in outin): C = MCs #-------------------------------------------- # calculate t from J, C: t = (C / ((J/100.0)**(1.0/2.0) * (1.64 - 0.29**n)**0.73))**(1.0/0.9) #-------------------------------------------- # calculate eccentricity factor, et: et = (np.cos(h*np.pi/180.0 + 2.0) + 3.8) / 4.0 #-------------------------------------------- # calculate achromatic signal, A: A = Aw*(J/100.0)**(1.0/(c*z)) #-------------------------------------------- # calculate temporary cart. co. at, bt and p1,p2,p3,p4,p5: at = np.cos(h*np.pi/180.0) bt = np.sin(h*np.pi/180.0) p1 = (50000.0/13.0)*Nc*Ncb*et/t p2 = A/Nbb + 0.305 p3 = 21.0/20.0 p4 = p1/bt p5 = p1/at #-------------------------------------------- #q = np.where(np.abs(bt) < np.abs(at))[0] q = (np.abs(bt) < np.abs(at)) b = p2*(2.0 + p3) * (460.0/1403.0) / (p4 + (2.0 + p3) * (220.0/1403.0) * (at/bt) - (27.0/1403.0) + p3*(6300.0/1403.0)) a = b * (at/bt) a[q] = p2[q]*(2.0 + p3) * (460.0/1403.0) / (p5[q] + (2.0 + p3) * (220.0/1403.0) - ((27.0/1403.0) - p3*(6300.0/1403.0)) * (bt[q]/at[q])) b[q] = a[q] * (bt[q]/at[q]) #-------------------------------------------- # calculate post-adaptation values rpa = (460.0*p2 + 451.0*a + 288.0*b) / 1403.0 gpa = (460.0*p2 - 891.0*a - 261.0*b) / 1403.0 bpa = (460.0*p2 - 220.0*a - 6300.0*b) / 1403.0 #-------------------------------------------- # join values: rgbpa = ajoin((rpa,gpa,bpa)) #-------------------------------------------- # decompress signals: rgbp = (100.0/FL)*NK(rgbpa, forward) #-------------------------------------------- # convert from to cone sensors (hpe) cat02 sensor space: rgbc = math.dot23(mcat_x_invmhpe,rgbp.T) #-------------------------------------------- # apply inverse von Kries cat: rgb = rgbc / ((D*Yw/rgbw)[...,None] + (1.0 - D)) #-------------------------------------------- # transform from cat sensor space to xyz: xyz = math.dot23(invmcat,rgb).T return xyz
def getCatObs(n_cat=10, fieldsize=2, out='LMS', wl=None, allow_negative_values=False): """ Generate cone fundamentals for categorical observers. Args: :n_cat: | 10, optional | Number of observer CMFs to generate. :fieldsize: | fieldsize in degrees (between 2° and 10°), optional | Defaults to 10°. :out: | 'LMS' or str, optional | Determines output. :wl: | None, optional | Interpolation/extraplation of :LMS: output to specified wavelengths. | None: output original _WL = np.array([390,780,5]) :allow_negative_values: | False, optional | Cone fundamentals or color matching functions | should not have negative values. | If False: X[X<0] = 0. Returns: :returns: | LMS [,var_age, vAll] | - LMS: ndarray with population LMS functions. | - var_age: ndarray with population observer ages. | - vAll: dict with population physiological factors (see .keys()) Notes: 1. Categorical observers are observer functions that would represent color-normal populations. They are finite and discrete as opposed to observer functions generated from the individual colorimetric observer model. Thus, they would offer more convenient and practical approaches for the personalized color imaging workflow and color matching analyses. Categorical observers were derived in two steps. At the first step, 10000 observer functions were generated from the individual colorimetric observer model using Monte Carlo simulation. At the second step, the cluster analysis, a modified k-medoids algorithm, was applied to the 10000 observers minimizing the squared Euclidean distance in cone fundamentals space, and categorical observers were derived iteratively. Since the proposed categorical observers are defined by their physiological parameters and ages, their CMFs can be derived for any target field size. 2. Categorical observers were ordered by the importance; the first categorical observer vas the average observer equivalent to CIEPO06 with 38 year-old for a given field size, followed by the second most important categorical observer, the third, and so on. 3. see: https://www.rit.edu/cos/colorscience/re_AsanoObserverFunctions.php """ # Use Iteratively Derived Cat.Obs.: var_age = _INDVCMF_CATOBSPFCTR['age'].copy() vAll = _INDVCMF_CATOBSPFCTR.copy() vAll.pop('age') # Set requested wavelength range: if wl is not None: wl = getwlr(wl3=wl) else: wl = _WL LMS_All = np.nan * np.ones((3 + 1, _WL.shape[0], n_cat)) for k in range(n_cat): t_LMS = cie2006cmfsEx(age = var_age[k],fieldsize = fieldsize, wl = wl,\ var_od_lens = vAll['od_lens'][k],\ var_od_macula = vAll['od_macula'][k],\ var_od_L = vAll['od_L'][k],\ var_od_M = vAll['od_M'][k],\ var_od_S = vAll['od_S'][k],\ var_shft_L = vAll['shft_L'][k],\ var_shft_M = vAll['shft_M'][k],\ var_shft_S = vAll['shft_S'][k],\ out = 'LMS') LMS_All[:, :, k] = t_LMS LMS_All[np.where(LMS_All < 0)] = 0 if n_cat == 1: LMS_All = np.squeeze(LMS_All, axis=2) if ('xyz' in out.lower().split(',')): LMS_All = lmsb_to_xyzb(LMS_All, fieldsize, out='xyz', allow_negative_values=allow_negative_values) out = out.replace('xyz', 'LMS').replace('XYZ', 'LMS') if ('lms' in out.lower().split(',')): out = out.replace('lms', 'LMS') if (out == 'LMS'): return LMS_All elif (out == 'LMS,var_age,vAll'): return LMS_All, var_age, vAll else: return eval(out)
def symmM_to_posdefM(A = None, atol = 1.0e-9, rtol = 1.0e-9, method = 'make', forcesymm = True): """ Convert a symmetric matrix to a positive definite one. Args: :A: | ndarray :atol: | float, optional | The absolute tolerance parameter (see Notes of numpy.allclose()) :rtol: | float, optional | The relative tolerance parameter (see Notes of numpy.allclose()) :method: | 'make' or 'nearest', optional (see notes for more info) :forcesymm: | True or False, optional | If A is not symmetric, force symmetry using: | A = numpy.triu(A) + numpy.triu(A).T - numpy.diag(numpy.diag(A)) Returns: :returns: | ndarray with positive-definite matrix. Notes on supported methods: 1. `'make': A Python/Numpy port of Muhammad Asim Mubeen's matlab function Spd_Mat.m <https://nl.mathworks.com/matlabcentral/fileexchange/45873-positive-definite-matrix>`_ 2. `'nearest': A Python/Numpy port of John D'Errico's `nearestSPD` MATLAB code. <https://stackoverflow.com/questions/43238173/python-convert-matrix-to-positive-semi-definite>`_ """ if A is not None: A = np2d(A) # Make sure matrix A is symmetric up to a certain tolerance: sn = check_symmetric(A, atol = atol, rtol = rtol) if ((A.shape[0] != A.shape[1]) | (sn != True)): if (forcesymm == True) & (A.shape[0] == A.shape[1]): A = np.triu(A) + np.triu(A).T - np.diag(np.diag(A)) else: raise Exception('symmM_to_posdefM(): matrix A not symmetric.') if check_posdef(A, atol = atol, rtol = rtol) == True: return A else: if method == 'make': # A Python/Numpy port of Muhammad Asim Mubeen's matlab function Spd_Mat.m # # See: https://nl.mathworks.com/matlabcentral/fileexchange/45873-positive-definite-matrix Val, Vec = np.linalg.eig(A) Val = np.real(Val) Vec = np.real(Vec) Val[np.where(Val==0)] = _EPS #making zero eigenvalues non-zero p = np.where(Val<0) Val[p] = -Val[p] #making negative eigenvalues positive return np.dot(Vec,np.dot(np.diag(Val) , Vec.T)) elif method == 'nearest': # A Python/Numpy port of John D'Errico's `nearestSPD` MATLAB code [1], which # credits [2]. # # [1] https://www.mathworks.com/matlabcentral/fileexchange/42885-nearestspd # # [2] N.J. Higham, "Computing a nearest symmetric positive semidefinite # matrix" (1988): https://doi.org/10.1016/0024-3795(88)90223-6 # # See: https://stackoverflow.com/questions/43238173/python-convert-matrix-to-positive-semi-definite B = (A + A.T) / 2.0 _, s, V = np.linalg.svd(B) H = np.dot(V.T, np.dot(np.diag(s), V)) A2 = (B + H) / 2.0 A3 = (A2 + A2.T) / 2.0 if check_posdef(A3, atol = atol, rtol = rtol) == True: return A3 spacing = np.spacing(np.linalg.norm(A)) I = np.eye(A.shape[0]) k = 1 while not check_posdef(A3, atol = atol, rtol = rtol): mineig = np.min(np.real(np.linalg.eigvals(A3))) A3 += I * (-mineig * k**2.0+ spacing) k += 1 return A3
def initialize_VF_hue_angles(hx = None, Cxr = _VF_MAXR, \ cri_type = _VF_CRI_DEFAULT, \ modeltype = _VF_MODEL_TYPE,\ determine_hue_angles = _DETERMINE_HUE_ANGLES): """ Initialize the hue angles that will be used to 'summarize' the VF model fitting parameters. Args: :hx: | None or ndarray, optional | None defaults to Munsell H5 hues. :Cxr: | _VF_MAXR, optional :cri_type: | _VF_CRI_DEFAULT or str or dict, optional, | Cri_type parameters for cri and VF model. :modeltype: | _VF_MODEL_TYPE or 'M5' or 'M6', optional | Determines the type of polynomial model. :determine_hue_angles: | _DETERMINE_HUE_ANGLES or True or False, optional | True: determines the 10 primary / secondary Munsell hues ('5..'). | Note that for 'M6', an additional Returns: :pcolorshift: | {'href': href, | 'Cref' : _VF_MAXR, | 'sig' : _VF_SIG, | 'labels' : list[str]} """ ########################################### # Get Munsell H5 hues: ########################################### rflM = _MUNSELL['R'] hn = _MUNSELL['H'] # all Munsell hues rH5 = np.where([_MUNSELL['H'][:,0][x][0]=='5' for x in range(_MUNSELL['H'][:,0].shape[0])])[0] #all Munsell H5 hues hns5 = np.unique(_MUNSELL['H'][rH5]) #------------------------------------------------------------------------------ # Determine Munsell hue angles in cam02ucs: pool = False IllC = _CIE_ILLUMINANTS['C'] # for determining Munsell hue angles in cam02ucs outM = VF_colorshift_model(IllC, cri_type = cri_type, sampleset = rflM, vfcolor = 'g',pool = pool) #------------------------------------------------------------------------------ if (determine_hue_angles == True) | (hx is None): # find samples at major Munsell hue angles: all_h5_Munsell_cam02ucs = np.ones(hns5.shape) Jabt_IllC = outM[0]['Jab']['Jabt'] for i,v in enumerate(hns5): hm = np.where(hn == v)[0] all_h5_Munsell_cam02ucs[i] = math.positive_arctan([Jabt_IllC[hm,0,1].mean()],[Jabt_IllC[hm,0,2].mean()],htype = 'rad')[0] hx = all_h5_Munsell_cam02ucs #------------------------------------------------------------------------------ # Setp color shift parameters: pcolorshift = {'href': hx,'Cref' : Cxr, 'sig' : _VF_SIG, 'labels' : hns5} return pcolorshift
def spd_to_COI_ASNZS1680(S = None, tf = _COI_CSPACE, cieobs = _COI_CIEOBS, out = 'COI,cct', extrapolate_rfl = False): """ Calculate the Cyanosis Observation Index (COI) [ASNZS 1680.2.5-1995]. Args: :S: | ndarray with light source spectrum (first column are wavelengths). :tf: | _COI_CSPACE, optional | Color space in which to calculate the COI. | Default is CIELAB. :cieobs: | _COI_CIEOBS, optional | CMF set to use. | Default is '1931_2'. :out: | 'COI,cct' or str, optional | Determines output. :extrapolate_rfl: | False, optional | If False: | limit the wavelength range of the source to that of the standard | reflectance spectra for the 50% and 100% oxygenated blood. Returns: :COI: | ndarray with cyanosis indices for input sources. :cct: | ndarray with correlated color temperatures. Note: Clause 7.2 of the ASNZS 1680.2.5-1995. standard mentions the properties demanded of the light source used in region where visual conditions suitable to the detection of cyanosis should be provided: 1. The correlated color temperature (CCT) of the source should be from 3300 to 5300 K. 2. The cyanosis observation index should not exceed 3.3 """ if S is None: #use default S = _CIE_ILLUMINANTS['F4'] if extrapolate_rfl == False: # _COI_RFL do not cover the full 360-830nm range. wl_min = _COI_RFL_BLOOD[0].min() wl_max = _COI_RFL_BLOOD[0].max() S = S[:,np.where((S[0] >= wl_min) & (S[0] <= wl_max))[0]] # Calculate reference spd: Sr = blackbody(4000, wl3 = S[0]) # same wavelength range # Calculate xyz of blood under test source and ref. source: xyzt,xyzwt = spd_to_xyz(S, rfl = _COI_RFL_BLOOD, relative = True, cieobs = cieobs, out = 2) xyzr,xyzwr = spd_to_xyz(Sr, rfl = _COI_RFL_BLOOD, relative = True, cieobs = cieobs, out = 2) # Calculate color difference between blood under test and ref. DEi = deltaE.DE_cspace(xyzt,xyzr, xyzwt = xyzwt, xyzwr = xyzwr, tf = tf) # Calculate Cyanosis Observation Index: COI = np.nanmean(DEi, axis = 0)[:,None] # Calculate cct, if requested: if 'cct' in out.split(','): cct, duv = xyz_to_cct(xyzwt, cieobs = cieobs, out = 2) # manage output: if out == 'COI': return COI elif out == 'COI,cct': return COI, cct else: return eval(out)