def cart2spher(x,y,z, deg = True): """ Convert cartesian to spherical coordinates. Args: :x, y, z: | tuple of floats, ints or ndarrays | Cartesian coordinates Returns: :theta: | Float, int or ndarray | Angle with positive z-axis. :phi: | Float, int or ndarray | Angle around positive z-axis starting from x-axis. :r: | 1, optional | Float, int or ndarray | radius """ r = np.sqrt(x*x + y*y + z*z) phi = np.arctan2(y,x) phi[phi<0.] = phi[phi<0.] + 2*np.pi zdr = z/r zdr[zdr > 1.] = 1. zdr[zdr<-1.] = -1 theta = np.arccos(zdr) if deg == True: theta = theta*180/np.pi phi = phi *180/np.pi return theta, phi, r
def positive_arctan(x,y, htype = 'deg'): """ Calculate positive angle (0°-360° or 0 - 2*pi rad.) from x and y. Args: :x: | ndarray of x-coordinates :y: | ndarray of y-coordinates :htype: | 'deg' or 'rad', optional | - 'deg': hue angle between 0° and 360° | - 'rad': hue angle between 0 and 2pi radians Returns: :returns: | ndarray of positive angles. """ if htype == 'deg': r2d = 180.0/np.pi h360 = 360.0 else: r2d = 1.0 h360 = 2.0*np.pi h = np.atleast_1d((np.arctan2(y,x)*r2d)) h[np.where(h<0)] = h[np.where(h<0)] + h360 return h
def cik_to_v(cik, xyc = None, inverse = False): """ Calculate v-format ellipse descriptor from 2x2 'covariance matrix'^-1 cik Args: :cik: | 'Nx2x2' (covariance matrix)^-1 :inverse: | If True: input is inverse of cik. Returns: :v: | (Nx5) np.ndarray | ellipse parameters [Rmax,Rmin,xc,yc,theta] Notes: | cik is not actually the inverse covariance matrix, | only for a Gaussian or normal distribution! """ if cik.ndim < 3: cik = cik[None,...] if inverse == True: for i in range(cik.shape[0]): cik[i,:,:] = np.linalg.inv(cik[i,:,:]) g11 = cik[:,0,0] g22 = cik[:,1,1] g12 = cik[:,0,1] theta = 0.5*np.arctan2(2*g12,(g11-g22)) + (np.pi/2)*(g12<0) #theta = theta2 + (np.pi/2)*(g12<0) #theta2 = theta cottheta = np.cos(theta)/np.sin(theta) #np.cot(theta) cottheta[np.isinf(cottheta)] = 0 a = 1/np.sqrt((g22 + g12*cottheta)) b = 1/np.sqrt((g11 - g12*cottheta)) # ensure largest ellipse axis is first (correct angle): c = b>a; a[c], b[c], theta[c] = b[c],a[c],theta[c]+np.pi/2 v = np.vstack((a, b, np.zeros(a.shape), np.zeros(a.shape), theta)).T # add center coordinates: if xyc is not None: v[:,2:4] = xyc return v
def get_tpr(self, *args): """ get spherical coordinates tpr (theta, phi, radius) """ if len(args) > 0: x, y, z = args else: x, y, z = self.x, self.y, self.z r = np.sqrt(x * x + y * y + z * z) zdr = np.asarray(z / r) zdr[zdr > 1.0] = 1.0 zdr[zdr < -1.0] = -1.0 theta = np.arccos(zdr) phi = np.arctan2(y, x) phi[phi < 0.0] = phi[phi < 0.0] + 2 * np.pi phi[r < self._TINY] = 0.0 theta[r < self._TINY] = 0.0 return theta, phi, r
def fit_ellipse(xy, center_on_mean_xy = False): """ Fit an ellipse to supplied data points. Args: :xy: | coordinates of points to fit (Nx2 array) :center_on_mean_xy: | False, optional | Center ellipse on mean of xy | (otherwise it might be offset due to solving | the contrained minization problem: aT*S*a, see ref below.) Returns: :v: | vector with ellipse parameters [Rmax,Rmin, xc,yc, theta (rad.)] Reference: 1. Fitzgibbon, A.W., Pilu, M., and Fischer R.B., Direct least squares fitting of ellipsees, Proc. of the 13th Internation Conference on Pattern Recognition, pp 253–257, Vienna, 1996. """ # remove centroid: # center = xy.mean(axis=0) # xy = xy - center # Fit ellipse: x, y = xy[:,0:1], xy[:,1:2] D = np.hstack((x * x, x * y, y * y, x, y, np.ones_like(x))) S, C = np.dot(D.T, D), np.zeros([6, 6]) C[0, 2], C[2, 0], C[1, 1] = 2, 2, -1 U, s, V = np.linalg.svd(np.dot(np.linalg.inv(S), C)) e = U[:, 0] # E, V = np.linalg.eig(np.dot(np.linalg.inv(S), C)) # n = np.argmax(np.abs(E)) # e = V[:,n] # get ellipse axis lengths, center and orientation: b, c, d, f, g, a = e[1] / 2, e[2], e[3] / 2, e[4] / 2, e[5], e[0] # get ellipse center: num = b * b - a * c if num == 0: xc = 0 yc = 0 else: xc = ((c * d - b * f) / num) yc = ((a * f - b * d) / num) # get ellipse orientation: theta = np.arctan2(np.array(2 * b), np.array((a - c))) / 2 # if b == 0: # if a > c: # theta = 0 # else: # theta = np.pi/2 # else: # if a > c: # theta = np.arctan2(2*b,(a-c))/2 # else: # theta = np.arctan2(2*b,(a-c))/2 + np.pi/2 # axis lengths: up = 2 * (a * f * f + c * d * d + g * b * b - 2 * b * d * f - a * c * g) down1 = (b * b - a * c) * ((c - a) * np.sqrt(1 + 4 * b * b / ((a - c) * (a - c))) - (c + a)) down2 = (b * b - a * c) * ((a - c) * np.sqrt(1 + 4 * b * b / ((a - c) * (a - c))) - (c + a)) a, b = np.sqrt((up / down1)), np.sqrt((up / down2)) # assert that a is the major axis (otherwise swap and correct angle) if(b > a): b, a = a, b # ensure the angle is betwen 0 and 2*pi theta = fmod(theta, 2.0 * np.pi) if center_on_mean_xy == True: xc,yc = xy.mean(axis=0) return np.hstack((a, b, xc, yc, theta))
def _xyz_to_jab_cam02ucs(xyz, xyzw, ucs=True, conditions=None): """ Calculate CAM02-UCS J'a'b' coordinates from xyz tristimulus values of sample and white point. Args: :xyz: | ndarray with sample tristimulus values :xyzw: | ndarray with white point tristimulus values :conditions: | None, optional | Dictionary with viewing conditions. | None results in: | {'La':100, 'Yb':20, 'D':1, 'surround':'avg'} | For more info see luxpy.cam.ciecam02()? Returns: :jab: | ndarray with J'a'b' coordinates. """ #-------------------------------------------- # Get/ set conditions parameters: if conditions is not None: surround_parameters = { 'surrounds': ['avg', 'dim', 'dark'], 'avg': { 'c': 0.69, 'Nc': 1.0, 'F': 1.0, 'FLL': 1.0 }, 'dim': { 'c': 0.59, 'Nc': 0.9, 'F': 0.9, 'FLL': 1.0 }, 'dark': { 'c': 0.525, 'Nc': 0.8, 'F': 0.8, 'FLL': 1.0 } } La = conditions['La'] Yb = conditions['Yb'] D = conditions['D'] surround = conditions['surround'] if isinstance(surround, str): surround = surround_parameters[conditions['surround']] F, FLL, Nc, c = [surround[x] for x in sorted(surround.keys())] else: # set defaults: La, Yb, D, F, FLL, Nc, c = 100, 20, 1, 1, 1, 1, 0.69 #-------------------------------------------- # Define sensor space and cat matrices: mhpe = np.array([[0.38971, 0.68898, -0.07868], [-0.22981, 1.1834, 0.04641], [0.0, 0.0, 1.0] ]) # Hunt-Pointer-Estevez sensors (cone fundamentals) mcat = np.array([[0.7328, 0.4296, -0.1624], [-0.7036, 1.6975, 0.0061], [0.0030, 0.0136, 0.9834]]) # CAT02 sensor space #-------------------------------------------- # pre-calculate some matrices: invmcat = np.linalg.inv(mcat) mhpe_x_invmcat = np.dot(mhpe, invmcat) #-------------------------------------------- # calculate condition dependent parameters: Yw = xyzw[..., 1:2].T k = 1.0 / (5.0 * La + 1.0) FL = 0.2 * (k**4.0) * (5.0 * La) + 0.1 * ((1.0 - k**4.0)**2.0) * ( (5.0 * La)**(1.0 / 3.0)) # luminance adaptation factor n = Yb / Yw Nbb = 0.725 * (1 / n)**0.2 Ncb = Nbb z = 1.48 + FLL * n**0.5 if D is None: D = F * (1.0 - (1.0 / 3.6) * np.exp((-La - 42.0) / 92.0)) #-------------------------------------------- # transform from xyz, xyzw to cat sensor space: rgb = math.dot23(mcat, xyz.T) rgbw = mcat @ xyzw.T #-------------------------------------------- # apply von Kries cat: rgbc = ( (D * Yw / rgbw)[..., None] + (1 - D) ) * rgb # factor 100 from ciecam02 is replaced with Yw[i] in ciecam16, but see 'note' in Fairchild's "Color Appearance Models" (p291 ni 3ed.) rgbwc = ( (D * Yw / rgbw) + (1 - D) ) * rgbw # factor 100 from ciecam02 is replaced with Yw[i] in ciecam16, but see 'note' in Fairchild's "Color Appearance Models" (p291 ni 3ed.) #-------------------------------------------- # convert from cat02 sensor space to cone sensors (hpe): rgbp = math.dot23(mhpe_x_invmcat, rgbc).T rgbwp = (mhpe_x_invmcat @ rgbwc).T #-------------------------------------------- # apply Naka_rushton repsonse compression: naka_rushton = lambda x: 400 * x**0.42 / (x**0.42 + 27.13) + 0.1 rgbpa = naka_rushton(FL * rgbp / 100.0) p = np.where(rgbp < 0) rgbpa[p] = 0.1 - (naka_rushton(FL * np.abs(rgbp[p]) / 100.0) - 0.1) rgbwpa = naka_rushton(FL * rgbwp / 100.0) pw = np.where(rgbwp < 0) rgbwpa[pw] = 0.1 - (naka_rushton(FL * np.abs(rgbwp[pw]) / 100.0) - 0.1) #-------------------------------------------- # Calculate achromatic signal: A = (2.0 * rgbpa[..., 0] + rgbpa[..., 1] + (1.0 / 20.0) * rgbpa[..., 2] - 0.305) * Nbb Aw = (2.0 * rgbwpa[..., 0] + rgbwpa[..., 1] + (1.0 / 20.0) * rgbwpa[..., 2] - 0.305) * Nbb #-------------------------------------------- # calculate initial opponent channels: a = rgbpa[..., 0] - 12.0 * rgbpa[..., 1] / 11.0 + rgbpa[..., 2] / 11.0 b = (1.0 / 9.0) * (rgbpa[..., 0] + rgbpa[..., 1] - 2.0 * rgbpa[..., 2]) #-------------------------------------------- # calculate hue h and eccentricity factor, et: h = np.arctan2(b, a) et = (1.0 / 4.0) * (np.cos(h + 2.0) + 3.8) #-------------------------------------------- # calculate lightness, J: J = 100.0 * (A / Aw)**(c * z) #-------------------------------------------- # calculate chroma, C: t = ((50000.0 / 13.0) * Nc * Ncb * et * ((a**2.0 + b**2.0)**0.5)) / (rgbpa[..., 0] + rgbpa[..., 1] + (21.0 / 20.0 * rgbpa[..., 2])) C = (t**0.9) * ((J / 100.0)**0.5) * (1.64 - 0.29**n)**0.73 #-------------------------------------------- # Calculate colorfulness, M: M = C * FL**0.25 #-------------------------------------------- # convert to cam02ucs J', aM', bM': if ucs == True: KL, c1, c2 = 1.0, 0.007, 0.0228 Jp = (1.0 + 100.0 * c1) * J / (1.0 + c1 * J) Mp = (1.0 / c2) * np.log(1.0 + c2 * M) else: Jp = J Mp = M aMp = Mp * np.cos(h) bMp = Mp * np.sin(h) return np.dstack((Jp, aMp, bMp))
def get_poly_model(jabt, jabr, modeltype=_VF_MODEL_TYPE): """ Setup base color shift model (delta_a, delta_b), determine model parameters and accuracy. | Calculates a base color shift (delta) from the ref. chromaticity ar, br. Args: :jabt: | ndarray with jab color coordinates under the test SPD. :jabr: | ndarray with jab color coordinates under the reference SPD. :modeltype: | _VF_MODEL_TYPE or 'M6' or 'M5', optional | Specifies degree 5 or degree 6 polynomial model in ab-coordinates. | (see notes below) Returns: :returns: | (poly_model, | pmodel, | dab_model, | dab_res, | dCHoverC_res, | dab_std, | dCHoverC_std) | | :poly_model: function handle to model | :pmodel: ndarray with model parameters | :dab_model: ndarray with ab model predictions from ar, br. | :dab_res: ndarray with residuals between 'da,db' of samples and | 'da,db' predicted by the model. | :dCHoverC_res: ndarray with residuals between 'dCoverC,dH' | of samples and 'dCoverC,dH' predicted by the model. | Note: dCoverC = (Ct - Cr)/Cr and dH = ht - hr | (predicted from model, see notes below) | :dab_std: ndarray with std of :dab_res: | :dCHoverC_std: ndarray with std of :dCHoverC_res: Notes: 1. Model types: | poly5_model = lambda a,b,p: p[0]*a + p[1]*b + p[2]*(a**2) + p[3]*a*b + p[4]*(b**2) | poly6_model = lambda a,b,p: p[0] + p[1]*a + p[2]*b + p[3]*(a**2) + p[4]*a*b + p[5]*(b**2) 2. Calculation of dCoverC and dH: | dCoverC = (np.cos(hr)*da + np.sin(hr)*db)/Cr | dHoverC = (np.cos(hr)*db - np.sin(hr)*da)/Cr """ at = jabt[..., 1] bt = jabt[..., 2] ar = jabr[..., 1] br = jabr[..., 2] # A. Calculate da, db: da = at - ar db = bt - br # B.1 Calculate model matrix: # 5-parameter model: M5 = np.array([[ np.sum(ar * ar), np.sum(ar * br), np.sum(ar * ar**2), np.sum(ar * ar * br), np.sum(ar * br**2) ], [ np.sum(br * ar), np.sum(br * br), np.sum(br * ar**2), np.sum(br * ar * br), np.sum(br * br**2) ], [ np.sum((ar**2) * ar), np.sum((ar**2) * br), np.sum((ar**2) * ar**2), np.sum((ar**2) * ar * br), np.sum((ar**2) * br**2) ], [ np.sum(ar * br * ar), np.sum(ar * br * br), np.sum(ar * br * ar**2), np.sum(ar * br * ar * br), np.sum(ar * br * br**2) ], [ np.sum((br**2) * ar), np.sum((br**2) * br), np.sum((br**2) * ar**2), np.sum((br**2) * ar * br), np.sum((br**2) * br**2) ]]) #6-parameters model M6 = np.array([[ ar.size, np.sum(1.0 * ar), np.sum(1.0 * br), np.sum(1.0 * ar**2), np.sum(1.0 * ar * br), np.sum(1.0 * br**2) ], [ np.sum(ar * 1.0), np.sum(ar * ar), np.sum(ar * br), np.sum(ar * ar**2), np.sum(ar * ar * br), np.sum(ar * br**2) ], [ np.sum(br * 1.0), np.sum(br * ar), np.sum(br * br), np.sum(br * ar**2), np.sum(br * ar * br), np.sum(br * br**2) ], [ np.sum((ar**2) * 1.0), np.sum((ar**2) * ar), np.sum((ar**2) * br), np.sum((ar**2) * ar**2), np.sum((ar**2) * ar * br), np.sum((ar**2) * br**2) ], [ np.sum(ar * br * 1.0), np.sum(ar * br * ar), np.sum(ar * br * br), np.sum(ar * br * ar**2), np.sum(ar * br * ar * br), np.sum(ar * br * br**2) ], [ np.sum((br**2) * 1.0), np.sum((br**2) * ar), np.sum((br**2) * br), np.sum((br**2) * ar**2), np.sum((br**2) * ar * br), np.sum((br**2) * br**2) ]]) # B.2 Define model function: poly5_model = lambda a, b, p: p[0] * a + p[1] * b + p[2] * (a**2) + p[ 3] * a * b + p[4] * (b**2) poly6_model = lambda a, b, p: p[0] + p[1] * a + p[2] * b + p[3] * ( a**2) + p[4] * a * b + p[5] * (b**2) if modeltype == 'M5': M = M5 poly_model = poly5_model else: M = M6 poly_model = poly6_model M = np.linalg.inv(M) # C.1 Data a,b analysis output: if modeltype == 'M5': da_model_parameters = np.dot( M, np.array([ np.sum(da * ar), np.sum(da * br), np.sum(da * ar**2), np.sum(da * ar * br), np.sum(da * br**2) ])) db_model_parameters = np.dot( M, np.array([ np.sum(db * ar), np.sum(db * br), np.sum(db * ar**2), np.sum(db * ar * br), np.sum(db * br**2) ])) else: da_model_parameters = np.dot( M, np.array([ np.sum(da * 1.0), np.sum(da * ar), np.sum(da * br), np.sum(da * ar**2), np.sum(da * ar * br), np.sum(da * br**2) ])) db_model_parameters = np.dot( M, np.array([ np.sum(db * 1.0), np.sum(db * ar), np.sum(db * br), np.sum(db * ar**2), np.sum(db * ar * br), np.sum(db * br**2) ])) pmodel = np.vstack((da_model_parameters, db_model_parameters)) # D.1 Calculate model da, db: da_model = poly_model(ar, br, pmodel[0]) db_model = poly_model(ar, br, pmodel[1]) dab_model = np.hstack((da_model, db_model)) # D.2 Calculate residuals for da & db: da_res = da - da_model db_res = db - db_model dab_res = np.hstack((da_res, db_res)) dab_std = np.vstack((np.std(da_res, axis=0), np.std(db_res, axis=0))) # E Calculate href, Cref: href = np.arctan2(br, ar) Cref = (ar**2 + br**2)**0.5 # F Calculate dC/C, dH/C for data and model and calculate residuals: dCoverC = (np.cos(href) * da + np.sin(href) * db) / Cref dHoverC = (np.cos(href) * db - np.sin(href) * da) / Cref dCoverC_model = (np.cos(href) * da_model + np.sin(href) * db_model) / Cref dHoverC_model = (np.cos(href) * db_model - np.sin(href) * da_model) / Cref dCoverC_res = dCoverC - dCoverC_model dHoverC_res = dHoverC - dHoverC_model dCHoverC_std = np.vstack((np.std(dCoverC_res, axis=0), np.std(dHoverC_res, axis=0))) dCHoverC_res = np.hstack((href, dCoverC_res, dHoverC_res)) return poly_model, pmodel, dab_model, dab_res, dCHoverC_res, dab_std, dCHoverC_std
def run(data, xyzw=_DEFAULT_WHITE_POINT, Yw=None, conditions=None, ucstype='ucs', forward=True, yellowbluepurplecorrect=False, mcat='cat02'): """ Run the CAM02-UCS[,-LCD,-SDC] color appearance difference model in forward or backward modes. Args: :data: | ndarray with sample xyz values (forward mode) or J'a'b' coordinates (inverse mode) :xyzw: | ndarray with white point tristimulus values :conditions: | None, optional | Dictionary with viewing conditions. | None results in: | {'La':100, 'Yb':20, 'D':1, 'surround':'avg'} | For more info see luxpy.cam.ciecam02()? :ucstype: | 'ucs', optional | String with type of color difference appearance space | options: 'ucs', 'scd', 'lcd' :forward: | True, optional | If True: run in CAM in forward mode, else: inverse mode. :yellowbluepurplecorrect: | False, optional | If False: don't correct for yellow-blue and purple problems in ciecam02. | If 'brill-suss': | for yellow-blue problem, see: | - Brill [Color Res Appl, 2006; 31, 142-145] and | - Brill and Süsstrunk [Color Res Appl, 2008; 33, 424-426] | If 'jiang-luo': | for yellow-blue problem + purple line problem, see: | - Jiang, Jun et al. [Color Res Appl 2015: 40(5), 491-503] :mcat: | 'cat02', optional | Specifies CAT sensor space. | - options: | - None defaults to 'cat02' | (others e.g. 'cat02-bs', 'cat02-jiang', | all trying to correct gamut problems of original cat02 matrix) | - str: see see luxpy.cat._MCATS.keys() for options | (details on type, ?luxpy.cat) | - ndarray: matrix with sensor primaries Returns: :camout: | ndarray with J'a'b' coordinates (forward mode) | or | XYZ tristimulus values (inverse mode) References: 1. `M.R. Luo, G. Cui, and C. Li, 'Uniform colour spaces based on CIECAM02 colour appearance model,' Color Res. Appl., vol. 31, no. 4, pp. 320–330, 2006. <http://onlinelibrary.wiley.com/doi/10.1002/col.20227/abstract)>`_ """ # get ucs parameters: if isinstance(ucstype, str): ucs_pars = _CAM_UCS_PARAMETERS ucs = ucs_pars[ucstype] else: ucs = ucstype KL, c1, c2 = ucs['KL'], ucs['c1'], ucs['c2'] # set conditions to use in CIECAM02 (overrides None-default in ciecam02() !!!) if conditions is None: conditions = _DEFAULT_CONDITIONS if forward == True: # run ciecam02 to get JMh: data = ciecam02(data, xyzw, outin='J,M,h', conditions=conditions, forward=True, mcat=mcat, yellowbluepurplecorrect=yellowbluepurplecorrect) camout = np.zeros_like(data) # for output #-------------------------------------------- # convert to cam02ucs J', aM', bM': camout[..., 0] = (1.0 + 100.0 * c1) * data[..., 0] / (1.0 + c1 * data[..., 0]) Mp = ((1.0 / c2) * np.log(1.0 + c2 * data[..., 1])) if (c2 != 0) else data[..., 1] camout[..., 1] = Mp * np.cos(data[..., 2] * np.pi / 180) camout[..., 2] = Mp * np.sin(data[..., 2] * np.pi / 180) return camout else: #-------------------------------------------- # convert cam02ucs J', aM', bM' to xyz: # calc ciecam02 hue angle #Jp, aMp, bMp = asplit(data) h = np.arctan2(data[..., 2], data[..., 1]) # calc cam02ucs and CIECAM02 colourfulness Mp = (data[..., 1]**2.0 + data[..., 2]**2.0)**0.5 M = ((np.exp(c2 * Mp) - 1.0) / c2) if (c2 != 0) else Mp # calculate ciecam02 aM, bM: aM = M * np.cos(h) bM = M * np.sin(h) # calc ciecam02 lightness J = data[..., 0] / (1.0 + (100.0 - data[..., 0]) * c1) # run ciecam02 in inverse mode to get xyz: return ciecam02(ajoin((J, aM, bM)), xyzw, outin='J,aM,bM', conditions=conditions, forward=False, mcat=mcat, yellowbluepurplecorrect=yellowbluepurplecorrect)
def spd_to_ies_tm30_metrics(SPD, cri_type = None, \ hbins = 16, start_hue = 0.0,\ scalef = 100, \ vf_model_type = _VF_MODEL_TYPE, \ vf_pcolorshift = _VF_PCOLORSHIFT,\ scale_vf_chroma_to_sample_chroma = False): """ Calculates IES TM30 metrics from spectral data. Args: :data: | numpy.ndarray with spectral data :cri_type: | None, optional | If None: defaults to cri_type = 'iesrf'. | Not none values of :hbins:, :start_hue: and :scalef: overwrite | input in cri_type['rg_pars'] :hbins: | None or numpy.ndarray with sorted hue bin centers (°), optional :start_hue: | None, optional :scalef: | None, optional | Scale factor for reference circle. :vf_pcolorshift: | _VF_PCOLORSHIFT or user defined dict, optional | The polynomial models of degree 5 and 6 can be fully specified or | summarized by the model parameters themselved OR by calculating the | dCoverC and dH at resp. 5 and 6 hues. :VF_pcolorshift: specifies | these hues and chroma level. :scale_vf_chroma_to_sample_chroma: | False, optional | Scale chroma of reference and test vf fields such that average of | binned reference chroma equals that of the binned sample chroma | before calculating hue bin metrics. Returns: :data: | dict with color rendering data: | - 'SPD' : ndarray test SPDs | - 'bjabt': ndarray with binned jab data under test SPDs | - 'bjabr': ndarray with binned jab data under reference SPDs | - 'jabti': ndarray with individual jab data under test SPDs (scaled such that bjabr are on a circle) | - 'jabri': ndarray with individual jab data under reference SPDs (scaled such that bjabr are on a circle) | - 'hbinnr': ndarray with the hue bin number the samples belong to. | - 'cct' : ndarray with CCT of test SPD | - 'duv' : ndarray with distance to blackbody locus of test SPD | - 'Rf' : ndarray with general color fidelity indices | - 'Rg' : ndarray with gamut area indices | - 'Rfi' : ndarray with specific color fidelity indices | - 'Rfhi' : ndarray with local (hue binned) fidelity indices | - 'Rcshi': ndarray with local chroma shifts indices | - 'Rhshi': ndarray with local hue shifts indices | - 'Rt' : ndarray with general metameric uncertainty index Rt | - 'Rti' : ndarray with specific metameric uncertainty indices Rti | - 'Rfhi_vf' : ndarray with local (hue binned) fidelity indices | obtained from VF model predictions at color space | pixel coordinates | - 'Rcshi_vf': ndarray with local chroma shifts indices | (same as above) | - 'Rhshi_vf': ndarray with local hue shifts indices | (same as above) """ if cri_type is None: cri_type = 'iesrf' #Calculate color rendering measures for SPDs in data: out = 'Rf,Rg,cct,duv,Rfi,jabt,jabr,Rfhi,Rcshi,Rhshi,cri_type' if isinstance(cri_type, str): # get dict cri_type = copy.deepcopy(_CRI_DEFAULTS[cri_type]) if hbins is not None: cri_type['rg_pars']['nhbins'] = hbins if start_hue is not None: cri_type['rg_pars']['start_hue'] = start_hue if scalef is not None: cri_type['rg_pars']['normalized_chroma_ref'] = scalef Rf, Rg, cct, duv, Rfi, jabt, jabr, Rfhi, Rcshi, Rhshi, cri_type = spd_to_cri( SPD, cri_type=cri_type, out=out) rg_pars = cri_type['rg_pars'] #Calculate Metameric uncertainty and base color shifts: dataVF = VF_colorshift_model(SPD, cri_type=cri_type, model_type=vf_model_type, cspace=cri_type['cspace'], sampleset=eval(cri_type['sampleset']), pool=False, pcolorshift=vf_pcolorshift, vfcolor=0) Rf_ = np.array([dataVF[i]['metrics']['Rf'] for i in range(len(dataVF))]).T Rt = np.array([dataVF[i]['metrics']['Rt'] for i in range(len(dataVF))]).T Rti = np.array([dataVF[i]['metrics']['Rti'] for i in range(len(dataVF))][0]) # Get normalized and sliced sample data for plotting: rg_pars = cri_type['rg_pars'] nhbins, normalize_gamut, normalized_chroma_ref, start_hue = [ rg_pars[x] for x in sorted(rg_pars.keys()) ] normalized_chroma_ref = scalef # np.sqrt((jabr[...,1]**2 + jabr[...,2]**2)).mean(axis = 0).mean() if scale_vf_chroma_to_sample_chroma == True: normalize_gamut = False bjabt, bjabr = gamut_slicer( jabt, jabr, out='jabt,jabr', nhbins=nhbins, start_hue=start_hue, normalize_gamut=normalize_gamut, normalized_chroma_ref=normalized_chroma_ref, close_gamut=True) Cr_s = (np.sqrt(bjabr[:-1, ..., 1]**2 + bjabr[:-1, ..., 2]**2)).mean( axis=0) # for rescaling vector field average reference chroma normalize_gamut = True #(for plotting) bjabt, bjabr, binnrs, jabti, jabri = gamut_slicer( jabt, jabr, out='jabt,jabr,binnr,jabti,jabri', nhbins=nhbins, start_hue=start_hue, normalize_gamut=normalize_gamut, normalized_chroma_ref=normalized_chroma_ref, close_gamut=True) Rfhi_vf = np.empty(Rfhi.shape) Rcshi_vf = np.empty(Rcshi.shape) Rhshi_vf = np.empty(Rhshi.shape) for i in range(cct.shape[0]): # Get normalized and sliced VF data for hue specific metrics: vfjabt = np.hstack( (np.ones(dataVF[i]['fielddata']['vectorfield']['axt'].shape), dataVF[i]['fielddata']['vectorfield']['axt'], dataVF[i]['fielddata']['vectorfield']['bxt'])) vfjabr = np.hstack( (np.ones(dataVF[i]['fielddata']['vectorfield']['axr'].shape), dataVF[i]['fielddata']['vectorfield']['axr'], dataVF[i]['fielddata']['vectorfield']['bxr'])) nhbins, normalize_gamut, normalized_chroma_ref, start_hue = [ rg_pars[x] for x in sorted(rg_pars.keys()) ] vfbjabt, vfbjabr, vfbDEi = gamut_slicer( vfjabt, vfjabr, out='jabt,jabr,DEi', nhbins=nhbins, start_hue=start_hue, normalize_gamut=normalize_gamut, normalized_chroma_ref=normalized_chroma_ref, close_gamut=False) if scale_vf_chroma_to_sample_chroma == True: #rescale vfbjabt and vfbjabr to same chroma level as bjabr. Cr_vfb = np.sqrt(vfbjabr[..., 1]**2 + vfbjabr[..., 2]**2) Cr_vf = np.sqrt(vfjabr[..., 1]**2 + vfjabr[..., 2]**2) hr_vf = np.arctan2(vfjabr[..., 2], vfjabr[..., 1]) Ct_vf = np.sqrt(vfjabt[..., 1]**2 + vfjabt[..., 2]**2) ht_vf = np.arctan2(vfjabt[..., 2], vfjabt[..., 1]) fC = Cr_s.mean() / Cr_vfb.mean() vfjabr[..., 1] = fC * Cr_vf * np.cos(hr_vf) vfjabr[..., 2] = fC * Cr_vf * np.sin(hr_vf) vfjabt[..., 1] = fC * Ct_vf * np.cos(ht_vf) vfjabt[..., 2] = fC * Ct_vf * np.sin(ht_vf) vfbjabt, vfbjabr, vfbDEi = gamut_slicer( vfjabt, vfjabr, out='jabt,jabr,DEi', nhbins=nhbins, start_hue=start_hue, normalize_gamut=normalize_gamut, normalized_chroma_ref=normalized_chroma_ref, close_gamut=False) scale_factor = cri_type['scale']['cfactor'] scale_fcn = cri_type['scale']['fcn'] vfRfhi, vfRcshi, vfRhshi = jab_to_rhi( jabt=vfbjabt, jabr=vfbjabr, DEi=vfbDEi, cri_type=cri_type, scale_factor=scale_factor, scale_fcn=scale_fcn, use_bin_avg_DEi=True ) # [:-1,...] removes last row from jab as this was added to close the gamut. Rfhi_vf[:, i:i + 1] = vfRfhi Rhshi_vf[:, i:i + 1] = vfRhshi Rcshi_vf[:, i:i + 1] = vfRcshi # Create dict with CRI info: data = {'SPD' : SPD, 'cct' : cct, 'duv' : duv, 'bjabt' : bjabt, 'bjabr' : bjabr,\ 'jabti':jabti, 'jabri':jabri, 'hbinnr':binnrs,\ 'Rf' : Rf, 'Rg' : Rg, 'Rfi': Rfi, 'Rfhi' : Rfhi, 'Rcshi' : Rcshi, 'Rhshi' : Rhshi, \ 'Rt' : Rt, 'Rti' : Rti, 'Rfhi_vf' : Rfhi_vf, 'Rfcshi_vf' : Rcshi_vf, 'Rfhshi_vf' : Rhshi_vf, \ 'dataVF' : dataVF,'cri_type' : cri_type, # 'jabt_':jabt_,'jabr_':jabr_ } return data
def run(data, xyzw, conditions=None, ucs_type='ucs', forward=True): """ Run the CAM02-UCS[,-LCD,-SDC] color appearance difference model in forward or backward modes. Args: :data: | ndarray with sample xyz values (forward mode) or J'a'b' coordinates (inverse mode) :xyzw: | ndarray with white point tristimulus values :conditions: | None, optional | Dictionary with viewing conditions. | None results in: | {'La':100, 'Yb':20, 'D':1, 'surround':'avg'} | For more info see luxpy.cam.ciecam02()? :ucs_type: | 'ucs', optional | String with type of color difference appearance space | options: 'ucs', 'scd', 'lcd' :forward: | True, optional | If True: run in CAM in forward mode, else: inverse mode. Returns: :camout: | ndarray with J'a'b' coordinates or whatever correlates requested in out. Note: * This is a simplified, less flexible, but faster version than the main cam02ucs(). """ # get ucs parameters: if isinstance(ucs_type, str): ucs_pars = { 'ucs': { 'KL': 1.0, 'c1': 0.007, 'c2': 0.0228 }, 'lcd': { 'KL': 0.77, 'c1': 0.007, 'c2': 0.0053 }, 'scd': { 'KL': 1.24, 'c1': 0.007, 'c2': 0.0363 } } ucs = ucs_pars[ucs_type] else: ucs = ucs_type KL, c1, c2 = ucs['KL'], ucs['c1'], ucs['c2'] if forward == True: # run ciecam02 to get JMh: data = ciecam02(data, xyzw, out='J,M,h', conditions=conditions, forward=True) camout = np.zeros_like(data) # for output #-------------------------------------------- # convert to cam02ucs J', aM', bM': camout[..., 0] = (1.0 + 100.0 * c1) * data[..., 0] / (1.0 + c1 * data[..., 0]) Mp = (1.0 / c2) * np.log(1.0 + c2 * data[..., 1]) camout[..., 1] = Mp * np.cos(data[..., 2] * np.pi / 180) camout[..., 2] = Mp * np.sin(data[..., 2] * np.pi / 180) return camout else: #-------------------------------------------- # convert cam02ucs J', aM', bM' to xyz: # calc CAM02 hue angle #Jp, aMp, bMp = asplit(data) h = np.arctan2(data[..., 2], data[..., 1]) # calc CAM02 and CIECAM02 colourfulness Mp = (data[..., 1]**2.0 + data[..., 2]**2.0)**0.5 M = (np.exp(c2 * Mp) - 1.0) / c2 # calculate ciecam02 aM, bM: aM = M * np.cos(h) bM = M * np.sin(h) # calc CAM02 lightness J = data[..., 0] / (1.0 + (100.0 - data[..., 0]) * c1) # run ciecam02 in inverse mode to get xyz: return ciecam02(ajoin((J, aM, bM)), xyzw, out='J,aM,bM', conditions=conditions, forward=False)