def naka_rushton(data, sig=2.0, n=0.73, scaling=1.0, noise=0.0, forward=True): """ Apply a Naka-Rushton response compression (n) and an adaptive shift (sig). | NK(x) = sign(x) * scaling * ((abs(x)**n) / ((abs(x)**n) + (sig**n))) + noise Args: :data: | float or ndarray :sig: | 2.0, optional | Semi-saturation constant. Value for which NK(:data:) is 1/2 :n: | 0.73, optional | Compression power. :scaling: | 1.0, optional | Maximum value of NK-function. :noise: | 0.0, optional | Cone excitation noise. :forward: | True, optional | True: do NK(x) | False: do NK(x)**(-1). Returns: :returns: | float or ndarray with NK-(de)compressed input :x: """ if forward: return np.sign(data) * scaling * ((np.abs(data)**n) / ((np.abs(data)**n) + (sig**n))) + noise elif forward == False: Ip = sig * (((np.abs(np.abs(data) - noise)) / (scaling - np.abs(np.abs(data) - noise))))**(1 / n) if not np.isscalar(Ip): p = np.where(np.abs(data) < noise) Ip[p] = -Ip[p] else: if np.abs(data) < noise: Ip = -Ip return Ip
def xyz_to_cct_ohno(xyzw, cieobs=_CIEOBS, out='cct', wl=None, accuracy=0.1, force_out_of_lut=True, upper_cct_max=10.0**20, approx_cct_temp=True): """ Convert XYZ tristimulus values to correlated color temperature (CCT) and Duv (distance above (>0) or below (<0) the Planckian locus) using Ohno's method. Args: :xyzw: | ndarray of tristimulus values :cieobs: | luxpy._CIEOBS, optional | CMF set used to calculated xyzw. :out: | 'cct' (or 1), optional | Determines what to return. | Other options: 'duv' (or -1), 'cct,duv'(or 2), "[cct,duv]" (or -2) :wl: | None, optional | Wavelengths used when calculating Planckian radiators. :accuracy: | float, optional | Stop brute-force search when cct :accuracy: is reached. :upper_cct_max: | 10.0**20, optional | Limit brute-force search to this cct. :approx_cct_temp: | True, optional | If True: use xyz_to_cct_HA() to get a first estimate of cct to speed up search. :force_out_of_lut: | True, optional | If True and cct is out of range of the LUT, then switch to brute-force search method, else return numpy.nan values. Returns: :returns: | ndarray with: | cct: out == 'cct' (or 1) | duv: out == 'duv' (or -1) | cct, duv: out == 'cct,duv' (or 2) | [cct,duv]: out == "[cct,duv]" (or -2) Note: LUTs are stored in ./data/cctluts/ Reference: 1. `Ohno Y. Practical use and calculation of CCT and Duv. Leukos. 2014 Jan 2;10(1):47-55. <http://www.tandfonline.com/doi/abs/10.1080/15502724.2014.839020>`_ """ xyzw = np2d(xyzw) if len(xyzw.shape) > 2: raise Exception('xyz_to_cct_ohno(): Input xyzwa.ndim must be <= 2 !') # get 1960 u,v of test source: Yuv = xyz_to_Yuv( xyzw) # remove possible 1-dim + convert xyzw to CIE 1976 u',v' axis_of_v3 = len(Yuv.shape) - 1 # axis containing color components u = Yuv[:, 1, None] # get CIE 1960 u v = (2.0 / 3.0) * Yuv[:, 2, None] # get CIE 1960 v uv = np2d(np.concatenate((u, v), axis=axis_of_v3)) # load cct & uv from LUT: if cieobs not in _CCT_LUT: _CCT_LUT[cieobs] = calculate_lut(ccts=None, cieobs=cieobs, add_to_lut=False) cct_LUT = _CCT_LUT[cieobs][:, 0, None] uv_LUT = _CCT_LUT[cieobs][:, 1:3] # calculate CCT of each uv: CCT = np.ones(uv.shape[0]) * np.nan # initialize with NaN's Duv = CCT.copy() # initialize with NaN's idx_m = 0 idx_M = uv_LUT.shape[0] - 1 for i in range(uv.shape[0]): out_of_lut = False delta_uv = (((uv_LUT - uv[i])**2.0).sum( axis=1))**0.5 # calculate distance of uv with uv_LUT idx_min = delta_uv.argmin() # find index of minimum distance # find Tm, delta_uv and u,v for 2 points surrounding uv corresponding to idx_min: if idx_min == idx_m: idx_min_m1 = idx_min out_of_lut = True else: idx_min_m1 = idx_min - 1 if idx_min == idx_M: idx_min_p1 = idx_min out_of_lut = True else: idx_min_p1 = idx_min + 1 if (out_of_lut == True) & (force_out_of_lut == True): # calculate using search-function cct_i, Duv_i = xyz_to_cct_search(xyzw[i], cieobs=cieobs, wl=wl, accuracy=accuracy, out='cct,duv', upper_cct_max=upper_cct_max, approx_cct_temp=approx_cct_temp) CCT[i] = cct_i Duv[i] = Duv_i continue elif (out_of_lut == True) & (force_out_of_lut == False): CCT[i] = np.nan Duv[i] = np.nan cct_m1 = cct_LUT[idx_min_m1] # - 2*_EPS delta_uv_m1 = delta_uv[idx_min_m1] uv_m1 = uv_LUT[idx_min_m1] cct_p1 = cct_LUT[idx_min_p1] delta_uv_p1 = delta_uv[idx_min_p1] uv_p1 = uv_LUT[idx_min_p1] cct_0 = cct_LUT[idx_min] delta_uv_0 = delta_uv[idx_min] # calculate uv distance between Tm_m1 & Tm_p1: delta_uv_p1m1 = ((uv_p1[0] - uv_m1[0])**2.0 + (uv_p1[1] - uv_m1[1])**2.0)**0.5 # Triangular solution: x = ((delta_uv_m1**2) - (delta_uv_p1**2) + (delta_uv_p1m1**2)) / (2 * delta_uv_p1m1) Tx = cct_m1 + ((cct_p1 - cct_m1) * (x / delta_uv_p1m1)) #uBB = uv_m1[0] + (uv_p1[0] - uv_m1[0]) * (x / delta_uv_p1m1) vBB = uv_m1[1] + (uv_p1[1] - uv_m1[1]) * (x / delta_uv_p1m1) Tx_corrected_triangular = Tx * 0.99991 signDuv = np.sign(uv[i][1] - vBB) Duv_triangular = signDuv * np.atleast_1d( ((delta_uv_m1**2.0) - (x**2.0))**0.5) # Parabolic solution: a = delta_uv_m1 / (cct_m1 - cct_0 + _EPS) / (cct_m1 - cct_p1 + _EPS) b = delta_uv_0 / (cct_0 - cct_m1 + _EPS) / (cct_0 - cct_p1 + _EPS) c = delta_uv_p1 / (cct_p1 - cct_0 + _EPS) / (cct_p1 - cct_m1 + _EPS) A = a + b + c B = -(a * (cct_p1 + cct_0) + b * (cct_p1 + cct_m1) + c * (cct_0 + cct_m1)) C = (a * cct_p1 * cct_0) + (b * cct_p1 * cct_m1) + (c * cct_0 * cct_m1) Tx = -B / (2 * A + _EPS) Tx_corrected_parabolic = Tx * 0.99991 Duv_parabolic = signDuv * (A * np.power(Tx_corrected_parabolic, 2) + B * Tx_corrected_parabolic + C) Threshold = 0.002 if Duv_triangular < Threshold: CCT[i] = Tx_corrected_triangular Duv[i] = Duv_triangular else: CCT[i] = Tx_corrected_parabolic Duv[i] = Duv_parabolic # Regulate output: if (out == 'cct') | (out == 1): return np2dT(CCT) elif (out == 'duv') | (out == -1): return np2dT(Duv) elif (out == 'cct,duv') | (out == 2): return np2dT(CCT), np2dT(Duv) elif (out == "[cct,duv]") | (out == -2): return np.vstack((CCT, Duv)).T
def xyz_to_cct_search(xyzw, cieobs=_CIEOBS, out='cct', wl=None, accuracy=0.1, upper_cct_max=10.0**20, approx_cct_temp=True): """ Convert XYZ tristimulus values to correlated color temperature (CCT) and Duv(distance above (> 0) or below ( < 0) the Planckian locus) by a brute-force search. | The algorithm uses an approximate cct_temp (HA approx., see xyz_to_cct_HA) as starting point or uses the middle of the allowed cct-range (1e2 K - 1e20 K, higher causes overflow) on a log-scale, then constructs a 4-step section of the blackbody (Planckian) locus on which to find the minimum distance to the 1960 uv chromaticity of the test source. Args: :xyzw: | ndarray of tristimulus values :cieobs: | luxpy._CIEOBS, optional | CMF set used to calculated xyzw. :out: | 'cct' (or 1), optional | Determines what to return. | Other options: 'duv' (or -1), 'cct,duv'(or 2), "[cct,duv]" (or -2) :wl: | None, optional | Wavelengths used when calculating Planckian radiators. :accuracy: | float, optional | Stop brute-force search when cct :accuracy: is reached. :upper_cct_max: | 10.0**20, optional | Limit brute-force search to this cct. :approx_cct_temp: | True, optional | If True: use xyz_to_cct_HA() to get a first estimate of cct to speed up search. Returns: :returns: | ndarray with: | cct: out == 'cct' (or 1) | duv: out == 'duv' (or -1) | cct, duv: out == 'cct,duv' (or 2) | [cct,duv]: out == "[cct,duv]" (or -2) Notes: This program is more accurate, but slower than xyz_to_cct_ohno! Note that cct must be between 1e3 K - 1e20 K (very large cct take a long time!!!) """ xyzw = np2d(xyzw) if len(xyzw.shape) > 2: raise Exception('xyz_to_cct_search(): Input xyzw.shape must be <= 2 !') # get 1960 u,v of test source: Yuvt = xyz_to_Yuv(np.squeeze( xyzw)) # remove possible 1-dim + convert xyzw to CIE 1976 u',v' #axis_of_v3t = len(Yuvt.shape)-1 # axis containing color components ut = Yuvt[:, 1, None] #.take([1],axis = axis_of_v3t) # get CIE 1960 u vt = (2 / 3) * Yuvt[:, 2, None] #.take([2],axis = axis_of_v3t) # get CIE 1960 v # Initialize arrays: ccts = np.ones((xyzw.shape[0], 1)) * np.nan duvs = ccts.copy() #calculate preliminary solution(s): if (approx_cct_temp == True): ccts_est = xyz_to_cct_HA(xyzw) procent_estimates = np.array([[3000.0, 100000.0, 0.05], [100000.0, 200000.0, 0.1], [200000.0, 300000.0, 0.25], [300000.0, 400000.0, 0.4], [400000.0, 600000.0, 0.4], [600000.0, 800000.0, 0.4], [800000.0, np.inf, 0.25]]) else: upper_cct = np.array(upper_cct_max) lower_cct = np.array(10.0**2) cct_scale_fun = lambda x: np.log10(x) cct_scale_ifun = lambda x: np.power(10.0, x) dT = (cct_scale_fun(upper_cct) - cct_scale_fun(lower_cct)) / 2 ccttemp = np.array([cct_scale_ifun(cct_scale_fun(lower_cct) + dT)]) ccts_est = np2d(ccttemp * np.ones((xyzw.shape[0], 1))) dT_approx_cct_False = dT.copy() # Loop through all ccts: for i in range(xyzw.shape[0]): #initialize CCT search parameters: cct = np.nan duv = np.nan ccttemp = ccts_est[i].copy() # Take care of (-1, NaN)'s from xyz_to_cct_HA signifying (CCT < lower, CCT > upper) bounds: approx_cct_temp_temp = approx_cct_temp if (approx_cct_temp == True): cct_scale_fun = lambda x: x cct_scale_ifun = lambda x: x if (ccttemp != -1) & ( np.isnan(ccttemp) == False ): # within validity range of CCT estimator-function for ii in range(procent_estimates.shape[0]): if (ccttemp >= (1.0 - 0.05 * (ii == 0)) * procent_estimates[ii, 0]) & ( ccttemp < (1.0 + 0.05 * (ii == 0)) * procent_estimates[ii, 1]): procent_estimate = procent_estimates[ii, 2] break dT = np.multiply( ccttemp, procent_estimate ) # determines range around CCTtemp (25% around estimate) or 100 K elif (ccttemp == -1) & (np.isnan(ccttemp) == False): ccttemp = np.array([procent_estimates[0, 0] / 2]) procent_estimate = 1 # cover 0 K to min_CCT of estimator dT = np.multiply(ccttemp, procent_estimate) elif (np.isnan(ccttemp) == True): upper_cct = np.array(upper_cct_max) lower_cct = np.array(10.0**2) cct_scale_fun = lambda x: np.log10(x) cct_scale_ifun = lambda x: np.power(10.0, x) dT = (cct_scale_fun(upper_cct) - cct_scale_fun(lower_cct)) / 2 ccttemp = np.array( [cct_scale_ifun(cct_scale_fun(lower_cct) + dT)]) approx_cct_temp = False else: dT = dT_approx_cct_False nsteps = 3 signduv = 1.0 ccttemp = ccttemp[0] delta_cct = dT while ((delta_cct > accuracy)): # keep converging on CCT #generate range of ccts: ccts_i = cct_scale_ifun( np.linspace( cct_scale_fun(ccttemp) - dT, cct_scale_fun(ccttemp) + dT, nsteps + 1)) ccts_i[ccts_i < 100.0] = 100.0 # avoid nan's in calculation # Generate BB: BB = cri_ref(ccts_i, wl3=wl, ref_type=['BB'], cieobs=cieobs) # Calculate xyz: xyz = spd_to_xyz(BB, cieobs=cieobs) # Convert to CIE 1960 u,v: Yuv = xyz_to_Yuv(np.squeeze( xyz)) # remove possible 1-dim + convert xyz to CIE 1976 u',v' #axis_of_v3 = len(Yuv.shape)-1 # axis containing color components u = Yuv[:, 1, None] # get CIE 1960 u v = (2.0 / 3.0) * Yuv[:, 2, None] # get CIE 1960 v # Calculate distance between list of uv's and uv of test source: dc = ((ut[i] - u)**2 + (vt[i] - v)**2)**0.5 if np.isnan(dc.min()) == False: #eps = _EPS q = dc.argmin() if np.size( q ) > 1: #to minimize calculation time: only calculate median when necessary cct = np.median(ccts[q]) duv = np.median(dc[q]) q = np.median(q) q = int(q) #must be able to serve as index else: cct = ccts_i[q] duv = dc[q] if (q == 0): ccttemp = cct_scale_ifun( np.array(cct_scale_fun([cct])) + 2 * dT / nsteps) #dT = 2.0*dT/nsteps continue # look in higher section of planckian locus if (q == np.size(ccts_i)): ccttemp = cct_scale_ifun( np.array(cct_scale_fun([cct])) - 2 * dT / nsteps) #dT = 2.0*dT/nsteps continue # look in lower section of planckian locus if (q > 0) & (q < np.size(ccts_i) - 1): dT = 2 * dT / nsteps # get Duv sign: d_p1m1 = ((u[q + 1] - u[q - 1])**2.0 + (v[q + 1] - v[q - 1])**2.0)**0.5 x = (dc[q - 1]**2.0 - dc[q + 1]**2.0 + d_p1m1**2.0) / 2.0 * d_p1m1 vBB = v[q - 1] + ((v[q + 1] - v[q - 1]) * (x / d_p1m1)) signduv = np.sign(vt[i] - vBB) #calculate difference with previous intermediate solution: delta_cct = abs(cct - ccttemp) ccttemp = np.array(cct) #%set new intermediate CCT approx_cct_temp = approx_cct_temp_temp else: ccttemp = np.nan cct = np.nan duv = np.nan duvs[i] = signduv * abs(duv) ccts[i] = cct # Regulate output: if (out == 'cct') | (out == 1): return np2d(ccts) elif (out == 'duv') | (out == -1): return np2d(duvs) elif (out == 'cct,duv') | (out == 2): return np2d(ccts), np2d(duvs) elif (out == "[cct,duv]") | (out == -2): return np.vstack((ccts, duvs)).T
def cie2006cmfsEx(age = 32,fieldsize = 10, wl = None,\ var_od_lens = 0, var_od_macula = 0, \ var_od_L = 0, var_od_M = 0, var_od_S = 0,\ var_shft_L = 0, var_shft_M = 0, var_shft_S = 0,\ out = 'LMS', allow_negative_values = False): """ Generate Individual Observer CMFs (cone fundamentals) based on CIE2006 cone fundamentals and published literature on observer variability in color matching and in physiological parameters. Args: :age: | 32 or float or int, optional | Observer age :fieldsize: | 10, optional | Field size of stimulus in degrees (between 2° and 10°). :wl: | None, optional | Interpolation/extraplation of :LMS: output to specified wavelengths. | None: output original _WL = np.array([390,780,5]) :var_od_lens: | 0, optional | Std Dev. in peak optical density [%] of lens. :var_od_macula: | 0, optional | Std Dev. in peak optical density [%] of macula. :var_od_L: | 0, optional | Std Dev. in peak optical density [%] of L-cone. :var_od_M: | 0, optional | Std Dev. in peak optical density [%] of M-cone. :var_od_S: | 0, optional | Std Dev. in peak optical density [%] of S-cone. :var_shft_L: | 0, optional | Std Dev. in peak wavelength shift [nm] of L-cone. :var_shft_L: | 0, optional | Std Dev. in peak wavelength shift [nm] of M-cone. :var_shft_S: | 0, optional | Std Dev. in peak wavelength shift [nm] of S-cone. :out: | 'LMS' or , optional | Determines output. :allow_negative_values: | False, optional | Cone fundamentals or color matching functions should not have negative values. | If False: X[X<0] = 0. Returns: :returns: | - 'LMS' : ndarray with individual observer area-normalized | cone fundamentals. Wavelength have been added. | [- 'trans_lens': ndarray with lens transmission | (no wavelengths added, no interpolation) | - 'trans_macula': ndarray with macula transmission | (no wavelengths added, no interpolation) | - 'sens_photopig' : ndarray with photopigment sens. | (no wavelengths added, no interpolation)] References: 1. `Asano Y, Fairchild MD, and Blondé L (2016). Individual Colorimetric Observer Model. PLoS One 11, 1–19. <http://journals.plos.org/plosone/article?id=10.1371/journal.pone.0145671>`_ 2. `Asano Y, Fairchild MD, Blondé L, and Morvan P (2016). Color matching experiment for highlighting interobserver variability. Color Res. Appl. 41, 530–539. <https://onlinelibrary.wiley.com/doi/abs/10.1002/col.21975>`_ 3. `CIE, and CIE (2006). Fundamental Chromaticity Diagram with Physiological Axes - Part I (Vienna: CIE). <http://www.cie.co.at/publications/fundamental-chromaticity-diagram-physiological-axes-part-1>`_ 4. `Asano's Individual Colorimetric Observer Model <https://www.rit.edu/cos/colorscience/re_AsanoObserverFunctions.php>`_ """ fs = fieldsize rmd = _INDVCMF_DATA['rmd'].copy() LMSa = _INDVCMF_DATA['LMSa'].copy() docul = _INDVCMF_DATA['docul'].copy() # field size corrected macular density: pkOd_Macula = 0.485 * np.exp(-fs / 6.132) * ( 1 + var_od_macula / 100) # varied peak optical density of macula corrected_rmd = rmd * pkOd_Macula # age corrected lens/ocular media density: if (age <= 60): correct_lomd = docul[:1] * (1 + 0.02 * (age - 32)) + docul[1:2] else: correct_lomd = docul[:1] * (1.56 + 0.0667 * (age - 60)) + docul[1:2] correct_lomd = correct_lomd * (1 + var_od_lens / 100 ) # varied overall optical density of lens # Peak Wavelength Shift: wl_shifted = np.empty(LMSa.shape) wl_shifted[0] = _WL + var_shft_L wl_shifted[1] = _WL + var_shft_M wl_shifted[2] = _WL + var_shft_S LMSa_shft = np.empty(LMSa.shape) kind = 'cubic' LMSa_shft[0] = interpolate.interp1d(wl_shifted[0], LMSa[0], kind=kind, bounds_error=False, fill_value="extrapolate")(_WL) LMSa_shft[1] = interpolate.interp1d(wl_shifted[1], LMSa[1], kind=kind, bounds_error=False, fill_value="extrapolate")(_WL) LMSa_shft[2] = interpolate.interp1d(wl_shifted[2], LMSa[2], kind=kind, bounds_error=False, fill_value="extrapolate")(_WL) # LMSa[2,np.where(_WL >= _WL_CRIT)] = 0 #np.nan # Not defined above 620nm # LMSa_shft[2,np.where(_WL >= _WL_CRIT)] = 0 ssw = np.hstack( (0, np.sign(np.diff(LMSa_shft[2, :])) )) #detect poor interpolation (sign switch due to instability) LMSa_shft[2, np.where((ssw >= 0) & (_WL > 560))] = np.nan # corrected LMS (no age correction): pkOd_L = (0.38 + 0.54 * np.exp(-fs / 1.333)) * ( 1 + var_od_L / 100) # varied peak optical density of L-cone pkOd_M = (0.38 + 0.54 * np.exp(-fs / 1.333)) * ( 1 + var_od_M / 100) # varied peak optical density of M-cone pkOd_S = (0.30 + 0.45 * np.exp(-fs / 1.333)) * ( 1 + var_od_S / 100) # varied peak optical density of S-cone alpha_lms = 0. * LMSa_shft alpha_lms[0] = 1 - 10**(-pkOd_L * (10**LMSa_shft[0])) alpha_lms[1] = 1 - 10**(-pkOd_M * (10**LMSa_shft[1])) alpha_lms[2] = 1 - 10**(-pkOd_S * (10**LMSa_shft[2])) # this fix is required because the above math fails for alpha_lms[2,:]==0 alpha_lms[2, np.where(_WL >= _WL_CRIT)] = 0 # Corrected to Corneal Incidence: lms_barq = alpha_lms * (10**(-corrected_rmd - correct_lomd)) * np.ones( alpha_lms.shape) # Corrected to Energy Terms: lms_bar = lms_barq * _WL # Set NaN values to zero: lms_bar[np.isnan(lms_bar)] = 0 # normalized: LMS = 100 * lms_bar / np.nansum(lms_bar, axis=1, keepdims=True) # Output extra: trans_lens = 10**(-correct_lomd) trans_macula = 10**(-corrected_rmd) sens_photopig = alpha_lms * _WL # Add wavelengths: LMS = np.vstack((_WL, LMS)) if ('xyz' in out.lower().split(',')): LMS = lmsb_to_xyzb(LMS, fieldsize, out='xyz', allow_negative_values=allow_negative_values) out = out.replace('xyz', 'LMS').replace('XYZ', 'LMS') if ('lms' in out.lower().split(',')): out = out.replace('lms', 'LMS') # Interpolate/extrapolate: if wl is None: interpolation = None else: interpolation = 'cubic' LMS = spd(LMS, wl=wl, interpolation=interpolation, norm_type='area') if (out == 'LMS'): return LMS elif (out == 'LMS,trans_lens,trans_macula,sens_photopig'): return LMS, trans_lens, trans_macula, sens_photopig elif (out == 'LMS,trans_lens,trans_macula,sens_photopig,LMSa'): return LMS, trans_lens, trans_macula, sens_photopig, LMSa else: return eval(out)
def Ydlep_to_xyz(Ydlep, cieobs=_CIEOBS, xyzw=_COLORTF_DEFAULT_WHITE_POINT, flip_axes=False, **kwargs): """ Convert Y, dominant (complementary) wavelength and excitation purity to XYZ tristimulus values. Args: :Ydlep: | ndarray with Y, dominant (complementary) wavelength and excitation purity :xyzw: | None or narray with tristimulus values of a single (!) native white point, optional | None defaults to xyz of CIE D65 using the :cieobs: observer. :cieobs: | luxpy._CIEOBS, optional | CMF set to use when calculating spectrum locus coordinates. :flip_axes: | False, optional | If True: flip axis 0 and axis 1 in Ydelep to increase speed of loop in function. | (single xyzw with is not flipped!) Returns: :xyz: | ndarray with tristimulus values """ Ydlep3 = np3d(Ydlep).copy().astype(np.float) # flip axis so that longest dim is on first axis (save time in looping): if (Ydlep3.shape[0] < Ydlep3.shape[1]) & (flip_axes == True): axes12flipped = True Ydlep3 = Ydlep3.transpose((1, 0, 2)) else: axes12flipped = False # convert xyzw to Yxyw: Yxyw = xyz_to_Yxy(xyzw) Yxywo = Yxyw.copy() # get spectrum locus Y,x,y and wavelengths: SL = _CMF[cieobs]['bar'] wlsl = SL[0, None].T Yxysl = xyz_to_Yxy(SL[1:4].T)[:, None] # center on xyzw: Yxysl = Yxysl - Yxyw Yxyw = Yxyw - Yxyw #split: Y, dom, pur = asplit(Ydlep3) Yw, xw, yw = asplit(Yxyw) Ywo, xwo, ywo = asplit(Yxywo) Ysl, xsl, ysl = asplit(Yxysl) # loop over longest dim: x = np.empty(Y.shape) y = np.empty(Y.shape) for i in range(Ydlep3.shape[1]): # find closest wl's to dom: #wlslb,wlib = meshblock(wlsl,np.abs(dom[i,:])) #abs because dom<0--> complemtary wl wlib, wlslb = np.meshgrid(np.abs(dom[:, i]), wlsl) dwl = np.abs(wlslb - wlib) q1 = dwl.argmin(axis=0) # index of closest wl dwl[q1] = 10000.0 q2 = dwl.argmin(axis=0) # index of second closest wl # calculate x,y of dom: x_dom_wl = xsl[q1, 0] + (xsl[q2, 0] - xsl[q1, 0]) * ( np.abs(dom[:, i]) - wlsl[q1, 0]) / (wlsl[q2, 0] - wlsl[q1, 0] ) # calculate x of dom. wl y_dom_wl = ysl[q1, 0] + (ysl[q2, 0] - ysl[q1, 0]) * ( np.abs(dom[:, i]) - wlsl[q1, 0]) / (wlsl[q2, 0] - wlsl[q1, 0] ) # calculate y of dom. wl # calculate x,y of test: d_wl = (x_dom_wl**2.0 + y_dom_wl**2.0)**0.5 # distance from white point to dom d = pur[:, i] * d_wl hdom = math.positive_arctan(x_dom_wl, y_dom_wl, htype='deg') x[:, i] = d * np.cos(hdom * np.pi / 180.0) y[:, i] = d * np.sin(hdom * np.pi / 180.0) # complementary: pc = np.where(dom[:, i] < 0.0) hdom[pc] = hdom[pc] - np.sign(dom[:, i][pc] - 180.0) * 180.0 # get positive hue angle # calculate intersection of line through white point and test point and purple line: xy = np.vstack((x_dom_wl, y_dom_wl)).T xyw = np.vstack((xw, yw)).T xypl1 = np.vstack((xsl[0, None], ysl[0, None])).T xypl2 = np.vstack((xsl[-1, None], ysl[-1, None])).T da = (xy - xyw) db = (xypl2 - xypl1) dp = (xyw - xypl1) T = np.array([[0.0, -1.0], [1.0, 0.0]]) dap = np.dot(da, T) denom = np.sum(dap * db, axis=1, keepdims=True) num = np.sum(dap * dp, axis=1, keepdims=True) xy_linecross = (num / denom) * db + xypl1 d_linecross = np.atleast_2d( (xy_linecross[:, 0]**2.0 + xy_linecross[:, 1]**2.0)**0.5).T[:, 0] x[:, i][pc] = pur[:, i][pc] * d_linecross[pc] * np.cos( hdom[pc] * np.pi / 180) y[:, i][pc] = pur[:, i][pc] * d_linecross[pc] * np.sin( hdom[pc] * np.pi / 180) Yxy = np.dstack((Ydlep3[:, :, 0], x + xwo, y + ywo)) if axes12flipped == True: Yxy = Yxy.transpose((1, 0, 2)) else: Yxy = Yxy.transpose((0, 1, 2)) return Yxy_to_xyz(Yxy).reshape(Ydlep.shape)
def xyz_to_Ydlep(xyz, cieobs=_CIEOBS, xyzw=_COLORTF_DEFAULT_WHITE_POINT, flip_axes=False, **kwargs): """ Convert XYZ tristimulus values to Y, dominant (complementary) wavelength and excitation purity. Args: :xyz: | ndarray with tristimulus values :xyzw: | None or ndarray with tristimulus values of a single (!) native white point, optional | None defaults to xyz of CIE D65 using the :cieobs: observer. :cieobs: | luxpy._CIEOBS, optional | CMF set to use when calculating spectrum locus coordinates. :flip_axes: | False, optional | If True: flip axis 0 and axis 1 in Ydelep to increase speed of loop in function. | (single xyzw with is not flipped!) Returns: :Ydlep: | ndarray with Y, dominant (complementary) wavelength and excitation purity """ xyz3 = np3d(xyz).copy().astype(np.float) # flip axis so that shortest dim is on axis0 (save time in looping): if (xyz3.shape[0] < xyz3.shape[1]) & (flip_axes == True): axes12flipped = True xyz3 = xyz3.transpose((1, 0, 2)) else: axes12flipped = False # convert xyz to Yxy: Yxy = xyz_to_Yxy(xyz3) Yxyw = xyz_to_Yxy(xyzw) # get spectrum locus Y,x,y and wavelengths: SL = _CMF[cieobs]['bar'] wlsl = SL[0] Yxysl = xyz_to_Yxy(SL[1:4].T)[:, None] # center on xyzw: Yxy = Yxy - Yxyw Yxysl = Yxysl - Yxyw Yxyw = Yxyw - Yxyw #split: Y, x, y = asplit(Yxy) Yw, xw, yw = asplit(Yxyw) Ysl, xsl, ysl = asplit(Yxysl) # calculate hue: h = math.positive_arctan(x, y, htype='deg') hsl = math.positive_arctan(xsl, ysl, htype='deg') hsl_max = hsl[0] # max hue angle at min wavelength hsl_min = hsl[-1] # min hue angle at max wavelength dominantwavelength = np.empty(Y.shape) purity = np.empty(Y.shape) for i in range(xyz3.shape[1]): # find index of complementary wavelengths/hues: pc = np.where( (h[:, i] >= hsl_max) & (h[:, i] <= hsl_min + 360.0) ) # hue's requiring complementary wavelength (purple line) h[:, i][pc] = h[:, i][pc] - np.sign( h[:, i][pc] - 180.0 ) * 180.0 # add/subtract 180° to get positive complementary wavelength # find 2 closest hues in sl: #hslb,hib = meshblock(hsl,h[:,i:i+1]) hib, hslb = np.meshgrid(h[:, i:i + 1], hsl) dh = np.abs(hslb - hib) q1 = dh.argmin(axis=0) # index of closest hue dh[q1] = 1000.0 q2 = dh.argmin(axis=0) # index of second closest hue dominantwavelength[:, i] = wlsl[q1] + np.divide( np.multiply((wlsl[q2] - wlsl[q1]), (h[:, i] - hsl[q1, 0])), (hsl[q2, 0] - hsl[q1, 0]) ) # calculate wl corresponding to h: y = y1 + (y2-y1)*(x-x1)/(x2-x1) dominantwavelength[:, i][pc] = -dominantwavelength[:, i][ pc] #complementary wavelengths are specified by '-' sign # calculate excitation purity: x_dom_wl = xsl[q1, 0] + (xsl[q2, 0] - xsl[q1, 0]) * (h[:, i] - hsl[ q1, 0]) / (hsl[q2, 0] - hsl[q1, 0]) # calculate x of dom. wl y_dom_wl = ysl[q1, 0] + (ysl[q2, 0] - ysl[q1, 0]) * (h[:, i] - hsl[ q1, 0]) / (hsl[q2, 0] - hsl[q1, 0]) # calculate y of dom. wl d_wl = (x_dom_wl**2.0 + y_dom_wl**2.0)**0.5 # distance from white point to sl d = (x[:, i]**2.0 + y[:, i]**2.0)**0.5 # distance from white point to test point purity[:, i] = d / d_wl # correct for those test points that have a complementary wavelength # calculate intersection of line through white point and test point and purple line: xy = np.vstack((x[:, i], y[:, i])).T xyw = np.hstack((xw, yw)) xypl1 = np.hstack((xsl[0, None], ysl[0, None])) xypl2 = np.hstack((xsl[-1, None], ysl[-1, None])) da = (xy - xyw) db = (xypl2 - xypl1) dp = (xyw - xypl1) T = np.array([[0.0, -1.0], [1.0, 0.0]]) dap = np.dot(da, T) denom = np.sum(dap * db, axis=1, keepdims=True) num = np.sum(dap * dp, axis=1, keepdims=True) xy_linecross = (num / denom) * db + xypl1 d_linecross = np.atleast_2d( (xy_linecross[:, 0]**2.0 + xy_linecross[:, 1]**2.0)**0.5).T #[0] purity[:, i][pc] = d[pc] / d_linecross[pc][:, 0] Ydlep = np.dstack((xyz3[:, :, 1], dominantwavelength, purity)) if axes12flipped == True: Ydlep = Ydlep.transpose((1, 0, 2)) else: Ydlep = Ydlep.transpose((0, 1, 2)) return Ydlep.reshape(xyz.shape)
def plot_cri_graphics(data, cri_type = None, hbins = 16, start_hue = 0.0, scalef = 100, \ plot_axis_labels = False, bin_labels = None, plot_edge_lines = True, \ plot_center_lines = False, plot_bin_colors = True, \ axtype = 'polar', ax = None, force_CVG_layout = True, vf_model_type = _VF_MODEL_TYPE, vf_pcolorshift = _VF_PCOLORSHIFT, vf_color = 'k', \ vf_bin_labels = _VF_PCOLORSHIFT['labels'], vf_plot_bin_colors = True, \ scale_vf_chroma_to_sample_chroma = False,\ plot_VF = True, plot_CF = False, plot_SF = False): """ Plot graphical information on color rendition properties. Args: :data: | ndarray with spectral data or dict with pre-computed metrics. :cri_type: | None, optional | If None: defaults to cri_type = 'iesrf'. | :hbins:, :start_hue: and :scalef: are ignored if cri_type not None | and values are replaced by those in cri_type['rg_pars'] :hbins: | 16 or ndarray with sorted hue bin centers (°), optional :start_hue: | 0.0, optional :scalef: | 100, optional | Scale factor for graphic. :plot_axis_labels: | False, optional | Turns axis ticks on/off (True/False). :bin_labels: | None or list[str] or '#', optional | Plots labels at the bin center hues. | - None: don't plot. | - list[str]: list with str for each bin. | (len(:bin_labels:) = :nhbins:) | - '#': plots number. :plot_edge_lines: | True or False, optional | Plot grey bin edge lines with '--'. :plot_center_lines: | False or True, optional | Plot colored lines at 'center' of hue bin. :plot_bin_colors: | True, optional | Colorize hue bins. :axtype: | 'polar' or 'cart', optional | Make polar or Cartesian plot. :ax: | None or 'new' or 'same', optional | - None or 'new' creates new plot | - 'same': continue plot on same axes. | - axes handle: plot on specified axes. :force_CVG_layout: | False or True, optional | True: Force plot of basis of CVG. :vf_model_type: | _VF_MODEL_TYPE or 'M6' or 'M5', optional | Type of polynomial vector field model to use for the calculation of base color shift and metameric uncertainty. :vf_pcolorshift: | _VF_PCOLORSHIFT or user defined dict, optional | The polynomial models of degree 5 and 6 can be fully specified or summarized by the model parameters themselved OR by calculating the dCoverC and dH at resp. 5 and 6 hues. :VF_pcolorshift: specifies these hues and chroma level. :vf_color: | 'k', optional | For plotting the vector fields. :vf_plot_bin_colors: | True, optional | Colorize hue bins of VF graph. :scale_vf_chroma_to_sample_chroma: | False, optional | Scale chroma of reference and test vf fields such that average of binned reference chroma equals that of the binned sample chroma before calculating hue bin metrics. :vf_bin_labels: | see :bin_labels: | Set VF model hue-bin labels. :plot_CF: | False, optional | Plot circle fields. :plot_VF: | True, optional | Plot vector fields. :plot_SF: | True, optional | Plot sample shifts. Returns: :returns: | (data, | [plt.gcf(),ax_spd, ax_CVG, ax_locC, ax_locH, ax_VF], | cmap ) | | :data: dict with color rendering data | with keys: | - 'SPD' : ndarray test SPDs | - 'bjabt': ndarray with binned jab data under test SPDs | - 'bjabr': ndarray with binned jab data under reference SPDs | - 'cct' : ndarray with CCT of test SPD | - 'duv' : ndarray with distance to blackbody locus of test SPD | - 'Rf' : ndarray with general color fidelity indices | - 'Rg' : ndarray with gamut area indices | - 'Rfi' : ndarray with specific color fidelity indices | - 'Rfhi' : ndarray with local (hue binned) fidelity indices | - 'Rcshi': ndarray with local chroma shifts indices | - 'Rhshi': ndarray with local hue shifts indices | - 'Rt' : ndarray with general metameric uncertainty index Rt | - 'Rti' : ndarray with specific metameric uncertainty indices Rti | - 'Rfhi_vf' : ndarray with local (hue binned) fidelity indices | obtained from VF model predictions at color space | pixel coordinates | - 'Rcshi_vf': ndarray with local chroma shifts indices | (same as above) | - 'Rhshi_vf': ndarray with local hue shifts indices | (same as above) | | :[...]: list with handles to current figure and 5 axes. | | :cmap: list with rgb colors for hue bins (for use in other plotting fcns) """ if not isinstance(data,dict): data = spd_to_ies_tm30_metrics(data, cri_type = cri_type, hbins = hbins, start_hue = start_hue, scalef = scalef, vf_model_type = vf_model_type, vf_pcolorshift = vf_pcolorshift, scale_vf_chroma_to_sample_chroma = scale_vf_chroma_to_sample_chroma) Rcshi, Rf, Rfchhi_vf, Rfhi, Rfhi_vf, Rfhshi_vf, Rfi, Rg, Rhshi, Rt, Rti, SPD, bjabr, bjabt, cct, cri_type, dataVF, duv = [data[x] for x in sorted(data.keys())] hbins = cri_type['rg_pars']['nhbins'] start_hue = cri_type['rg_pars']['start_hue'] scalef = cri_type['rg_pars']['normalized_chroma_ref'] #layout = np.array([[3,3,0,0],[1,0,2,2],[0,0,2,1],[2,2,1,1],[0,2,1,1],[1,2,1,1]]) #layout = np.array([[6,6,0,0],[0,3,3,3],[3,3,3,3],[0,0,3,2],[2,2,2,2],[2,0,2,2],[4,0,2,2]]) layout = np.array([[6,7,0,0],[0,4,3,3],[3,4,3,3],[0,0,4,2],[2,0,2,2],[4,2,2,2],[4,0,2,2],[2,2,2,2]]) def create_subplot(layout,n, polar = False, frameon = True): ax = plt.subplot2grid(layout[0,0:2], layout[n,0:2], colspan = layout[n,2], rowspan = layout[n,3], polar = polar, frameon = frameon) return ax for i in range(cct.shape[0]): fig = plt.figure(figsize=(10, 6), dpi=144) # Plot CVG: ax_CVG = create_subplot(layout,1, polar = True, frameon = False) figCVG, ax, cmap = plot_ColorVectorGraphic(bjabt[...,i,:], bjabr[...,i,:], hbins = hbins, axtype = axtype, ax = ax_CVG, plot_center_lines = plot_center_lines, plot_edge_lines = plot_edge_lines, plot_bin_colors = plot_bin_colors, scalef = scalef, force_CVG_layout = force_CVG_layout, bin_labels = '#') # Plot VF: ax_VF = create_subplot(layout,2, polar = True, frameon = False) if i == 0: hbin_cmap = None ax_VF, hbin_cmap = plot_VF_PX_models([dataVF[i]], dataPX = None, plot_VF = plot_VF, plot_PX = None, axtype = 'polar', ax = ax_VF, \ plot_circle_field = plot_CF, plot_sample_shifts = plot_SF, plot_bin_colors = vf_plot_bin_colors, \ plot_samples_shifts_at_pixel_center = False, jabp_sampled = None, \ plot_VF_colors = [vf_color], plot_PX_colors = ['r'], hbin_cmap = hbin_cmap, force_CVG_layout = True, bin_labels = vf_bin_labels) # Plot test SPD: ax_spd = create_subplot(layout,3) ax_spd.plot(SPD[0],SPD[i+1]/SPD[i+1].max(),'r-') ax_spd.text(730,0.9,'CCT = {:1.0f} K'.format(cct[i][0]),fontsize = 9, horizontalalignment='left',verticalalignment='center',rotation = 0, color = np.array([1,1,1])*0.3) ax_spd.text(730,0.8,'Duv = {:1.4f}'.format(duv[i][0]),fontsize = 9, horizontalalignment='left',verticalalignment='center',rotation = 0, color = np.array([1,1,1])*0.3) ax_spd.text(730,0.7,'IES Rf = {:1.0f}'.format(Rf[:,i][0]),fontsize = 9, horizontalalignment='left',verticalalignment='center',rotation = 0, color = np.array([1,1,1])*0.3) ax_spd.text(730,0.6,'IES Rg = {:1.0f}'.format(Rg[:,i][0]),fontsize = 9, horizontalalignment='left',verticalalignment='center',rotation = 0, color = np.array([1,1,1])*0.3) ax_spd.text(730,0.5,'Rt = {:1.0f}'.format(Rt[:,i][0]),fontsize = 9, horizontalalignment='left',verticalalignment='center',rotation = 0, color = np.array([1,1,1])*0.3) ax_spd.set_xlabel('Wavelength (nm)', fontsize = 9) ax_spd.set_ylabel('Rel. spectral intensity', fontsize = 9) ax_spd.set_xlim([360,830]) # Plot local color fidelity, Rfhi: ax_Rfi = create_subplot(layout,4) for j in range(hbins): ax_Rfi.bar(range(hbins)[j],Rfhi[j,i], color = cmap[j], width = 1,edgecolor = 'k', alpha = 0.4) ax_Rfi.text(range(hbins)[j],Rfhi[j,i]*1.1, '{:1.0f}'.format(Rfhi[j,i]) ,fontsize = 9,horizontalalignment='center',verticalalignment='center',color = np.array([1,1,1])*0.3) ax_Rfi.set_ylim([0,120]) xticks = np.arange(hbins) xtickslabels = ['{:1.0f}'.format(ii+1) for ii in range(hbins)] ax_Rfi.set_xticks(xticks) ax_Rfi.set_xticklabels(xtickslabels, fontsize = 8) ax_Rfi.set_ylabel(r'Local color fidelity $R_{f,hi}$') ax_Rfi.set_xlabel('Hue bin #') # Plot local chroma shift, Rcshi: ax_locC = create_subplot(layout,5) for j in range(hbins): ax_locC.bar(range(hbins)[j],Rcshi[j,i], color = cmap[j], width = 1,edgecolor = 'k', alpha = 0.4) ax_locC.text(range(hbins)[j],-np.sign(Rcshi[j,i])*0.1, '{:1.0f}%'.format(100*Rcshi[j,i]) ,fontsize = 9,horizontalalignment='center',verticalalignment='center',rotation = 90, color = np.array([1,1,1])*0.3) ylim = np.array([np.abs(Rcshi.min()),np.abs(Rcshi.min()),0.2]).max()*1.5 ax_locC.set_ylim([-ylim,ylim]) ax_locC.set_ylabel(r'Local chroma shift, $R_{cs,hi}$') ax_locC.set_xticklabels([]) ax_locC.set_yticklabels(['{:1.2f}'.format(ii) for ii in ax_locC.set_ylim()], color = 'white') # Plot local hue shift, Rhshi: ax_locH = create_subplot(layout,6) for j in range(hbins): ax_locH.bar(range(hbins)[j],Rhshi[j,i], color = cmap[j], width = 1,edgecolor = 'k', alpha = 0.4) ax_locH.text(range(hbins)[j],-np.sign(Rhshi[j,i])*0.2, '{:1.3f}'.format(Rhshi[j,i]) ,fontsize = 9,horizontalalignment='center',verticalalignment='center',rotation = 90, color = np.array([1,1,1])*0.3) ylim = np.array([np.abs(Rhshi.min()),np.abs(Rhshi.min()),0.2]).max()*1.5 ax_locH.set_ylim([-ylim,ylim]) ax_locH.set_ylabel(r'Local hue shift, $R_{hs,hi}$') ax_locH.set_xticklabels([]) ax_locH.set_yticklabels(['{:1.2f}'.format(ii) for ii in ax_locH.set_ylim()], color = 'white') # Plot local color fidelity of VF, vfRfhi: ax_vfRfi = create_subplot(layout,7) for j in range(hbins): ax_vfRfi.bar(range(hbins)[j],Rfhi_vf[j,i], color = cmap[j], width = 1,edgecolor = 'k', alpha = 0.4) ax_vfRfi.text(range(hbins)[j],Rfhi_vf[j,i]*1.1, '{:1.0f}'.format(Rfhi_vf[j,i]) ,fontsize = 9,horizontalalignment='center',verticalalignment='center',color = np.array([1,1,1])*0.3) ax_vfRfi.set_ylim([0,120]) xticks = np.arange(hbins) xtickslabels = ['{:1.0f}'.format(ii+1) for ii in range(hbins)] ax_vfRfi.set_xticks(xticks) ax_vfRfi.set_xticklabels(xtickslabels, fontsize = 8) ax_vfRfi.set_ylabel(r'Local VF color fidelity $vfR_{f,hi}$') ax_vfRfi.set_xlabel('Hue bin #') plt.tight_layout() return data, [plt.gcf(),ax_spd, ax_CVG, ax_locC, ax_locH, ax_VF], cmap
def cam_sww16(data, dataw = None, Yb = 20.0, Lw = 400.0, Ccwb = None, relative = True, \ parameters = None, inputtype = 'xyz', direction = 'forward', \ cieobs = '2006_10'): """ A simple principled color appearance model based on a mapping of the Munsell color system. | This function implements the JOSA A (parameters = 'JOSA') published model. Args: :data: | ndarray with input tristimulus values | or spectral data | or input color appearance correlates | Can be of shape: (N [, xM], x 3), whereby: | N refers to samples and M refers to light sources. | Note that for spectral input shape is (N x (M+1) x wl) :dataw: | None or ndarray, optional | Input tristimulus values or spectral data of white point. | None defaults to the use of CIE illuminant C. :Yb: | 20.0, optional | Luminance factor of background (perfect white diffuser, Yw = 100) :Lw: | 400.0, optional | Luminance (cd/m²) of white point. :Ccwb: | None, optional | Degree of cognitive adaptation (white point balancing) | If None: use [..,..] from parameters dict. :relative: | True or False, optional | True: xyz tristimulus values are relative (Yw = 100) :parameters: | None or str or dict, optional | Dict with model parameters. | - None: defaults to luxpy.cam._CAM_SWW_2016_PARAMETERS['JOSA'] | - str: 'best-fit-JOSA' or 'best-fit-all-Munsell' | - dict: user defined model parameters | (dict should have same structure) :inputtype: | 'xyz' or 'spd', optional | Specifies the type of input: | tristimulus values or spectral data for the forward mode. :direction: | 'forward' or 'inverse', optional | -'forward': xyz -> cam_sww_2016 | -'inverse': cam_sww_2016 -> xyz :cieobs: | '2006_10', optional | CMF set to use to perform calculations where spectral data is involved (inputtype == 'spd'; dataw = None) | Other options: see luxpy._CMF['types'] Returns: :returns: | ndarray with color appearance correlates (:direction: == 'forward') | or | XYZ tristimulus values (:direction: == 'inverse') Notes: | This function implements the JOSA A (parameters = 'JOSA') published model. | With: | 1. A correction for the parameter | in Eq.4 of Fig. 11: 0.952 --> -0.952 | | 2. The delta_ac and delta_bc white-balance shifts in Eq. 5e & 5f | should be: -0.028 & 0.821 | | (cfr. Ccwb = 0.66 in: | ab_test_out = ab_test_int - Ccwb*ab_gray_adaptation_field_int)) References: 1. `Smet, K. A. G., Webster, M. A., & Whitehead, L. A. (2016). A simple principled approach for modeling and understanding uniform color metrics. Journal of the Optical Society of America A, 33(3), A319–A331. <https://doi.org/10.1364/JOSAA.33.00A319>`_ """ # get model parameters args = locals().copy() if parameters is None: parameters = _CAM_SWW16_PARAMETERS['JOSA'] if isinstance(parameters,str): parameters = _CAM_SWW16_PARAMETERS[parameters] parameters = put_args_in_db(parameters,args) #overwrite parameters with other (not-None) args input #unpack model parameters: Cc, Ccwb, Cf, Mxyz2lms, cLMS, cab_int, cab_out, calpha, cbeta,cga1, cga2, cgb1, cgb2, cl_int, clambda, lms0 = [parameters[x] for x in sorted(parameters.keys())] # setup default adaptation field: if (dataw is None): dataw = _CIE_ILLUMINANTS['C'].copy() # get illuminant C xyzw = spd_to_xyz(dataw, cieobs = cieobs,relative=False) # get abs. tristimulus values if relative == False: #input is expected to be absolute dataw[1:] = Lw*dataw[1:]/xyzw[:,1:2] #dataw = Lw*dataw # make absolute else: dataw = dataw # make relative (Y=100) if inputtype == 'xyz': dataw = spd_to_xyz(dataw, cieobs = cieobs, relative = relative) # precomputations: Mxyz2lms = np.dot(np.diag(cLMS),math.normalize_3x3_matrix(Mxyz2lms, np.array([[1, 1, 1]]))) # normalize matrix for xyz-> lms conversion to ill. E weighted with cLMS invMxyz2lms = np.linalg.inv(Mxyz2lms) MAab = np.array([clambda,calpha,cbeta]) invMAab = np.linalg.inv(MAab) #initialize data and camout: data = np2d(data).copy() # stimulus data (can be upto NxMx3 for xyz, or [N x (M+1) x wl] for spd)) dataw = np2d(dataw).copy() # white point (can be upto Nx3 for xyz, or [(N+1) x wl] for spd) # make axis 1 of dataw have 'same' dimensions as data: if (data.ndim == 2): data = np.expand_dims(data, axis = 1) # add light source axis 1 if inputtype == 'xyz': if dataw.shape[0] == 1: #make dataw have same lights source dimension size as data dataw = np.repeat(dataw,data.shape[1],axis=0) else: if dataw.shape[0] == 2: dataw = np.vstack((dataw[0],np.repeat(dataw[1:], data.shape[1], axis = 0))) # Flip light source dim to axis 0: data = np.transpose(data, axes = (1,0,2)) # Initialize output array: dshape = list(data.shape) dshape[-1] = 3 # requested number of correlates: l_int, a_int, b_int if (inputtype != 'xyz') & (direction == 'forward'): dshape[-2] = dshape[-2] - 1 # wavelength row doesn't count & only with forward can the input data be spectral camout = np.nan*np.ones(dshape) # apply forward/inverse model for each row in data: for i in range(data.shape[0]): # stage 1: calculate photon rates of stimulus and adapting field, lmst & lmsf: if (inputtype != 'xyz'): if relative == True: xyzw_abs = spd_to_xyz(np.vstack((dataw[0],dataw[i+1])), cieobs = cieobs, relative = False) dataw[i+1] = Lw*dataw[i+1]/xyzw_abs[0,1] # make absolute xyzw = spd_to_xyz(np.vstack((dataw[0],dataw[i+1])), cieobs = cieobs, relative = False) lmsw = 683.0*np.dot(Mxyz2lms,xyzw.T).T/_CMF[cieobs]['K'] lmsf = (Yb/100.0)*lmsw # calculate adaptation field and convert to l,m,s if (direction == 'forward'): if relative == True: data[i,1:,:] = Lw*data[i,1:,:]/xyzw_abs[0,1] # make absolute xyzt = spd_to_xyz(data[i], cieobs = cieobs, relative = False)/_CMF[cieobs]['K'] lmst = 683.0*np.dot(Mxyz2lms,xyzt.T).T # convert to l,m,s else: lmst = lmsf # put lmsf in lmst for inverse-mode elif (inputtype == 'xyz'): if relative == True: dataw[i] = Lw*dataw[i]/100.0 # make absolute lmsw = 683.0* np.dot(Mxyz2lms, dataw[i].T).T /_CMF[cieobs]['K'] # convert to lms lmsf = (Yb/100.0)*lmsw if (direction == 'forward'): if relative == True: data[i] = Lw*data[i]/100.0 # make absolute lmst = 683.0* np.dot(Mxyz2lms, data[i].T).T /_CMF[cieobs]['K'] # convert to lms else: lmst = lmsf # put lmsf in lmst for inverse-mode # stage 2: calculate cone outputs of stimulus lmstp lmstp = math.erf(Cc*(np.log(lmst/lms0) + Cf*np.log(lmsf/lms0))) lmsfp = math.erf(Cc*(np.log(lmsf/lms0) + Cf*np.log(lmsf/lms0))) lmstp = np.vstack((lmsfp,lmstp)) # add adaptation field lms temporarily to lmsp for quick calculation # stage 3: calculate optic nerve signals, lam*, alphp, betp: lstar,alph, bet = asplit(np.dot(MAab, lmstp.T).T) alphp = cga1[0]*alph alphp[alph<0] = cga1[1]*alph[alph<0] betp = cgb1[0]*bet betp[bet<0] = cgb1[1]*bet[bet<0] # stage 4: calculate recoded nerve signals, alphapp, betapp: alphpp = cga2[0]*(alphp + betp) betpp = cgb2[0]*(alphp - betp) # stage 5: calculate conscious color perception: lstar_int = cl_int[0]*(lstar + cl_int[1]) alph_int = cab_int[0]*(np.cos(cab_int[1]*np.pi/180.0)*alphpp - np.sin(cab_int[1]*np.pi/180.0)*betpp) bet_int = cab_int[0]*(np.sin(cab_int[1]*np.pi/180.0)*alphpp + np.cos(cab_int[1]*np.pi/180.0)*betpp) lstar_out = lstar_int if direction == 'forward': if Ccwb is None: alph_out = alph_int - cab_out[0] bet_out = bet_int - cab_out[1] else: Ccwb = Ccwb*np.ones((2)) Ccwb[Ccwb<0.0] = 0.0 Ccwb[Ccwb>1.0] = 1.0 alph_out = alph_int - Ccwb[0]*alph_int[0] # white balance shift using adaptation gray background (Yb=20%), with Ccw: degree of adaptation bet_out = bet_int - Ccwb[1]*bet_int[0] camout[i] = np.vstack((lstar_out[1:],alph_out[1:],bet_out[1:])).T # stack together and remove adaptation field from vertical stack elif direction == 'inverse': labf_int = np.hstack((lstar_int[0],alph_int[0],bet_int[0])) # get lstar_out, alph_out & bet_out for data: lstar_out, alph_out, bet_out = asplit(data[i]) # stage 5 inverse: # undo cortical white-balance: if Ccwb is None: alph_int = alph_out + cab_out[0] bet_int = bet_out + cab_out[1] else: Ccwb = Ccwb*np.ones((2)) Ccwb[Ccwb<0.0] = 0.0 Ccwb[Ccwb>1.0] = 1.0 alph_int = alph_out + Ccwb[0]*alph_int[0] # inverse white balance shift using adaptation gray background (Yb=20%), with Ccw: degree of adaptation bet_int = bet_out + Ccwb[1]*bet_int[0] lstar_int = lstar_out alphpp = (1.0 / cab_int[0]) * (np.cos(-cab_int[1]*np.pi/180.0)*alph_int - np.sin(-cab_int[1]*np.pi/180.0)*bet_int) betpp = (1.0 / cab_int[0]) * (np.sin(-cab_int[1]*np.pi/180.0)*alph_int + np.cos(-cab_int[1]*np.pi/180.0)*bet_int) lstar_int = lstar_out lstar = (lstar_int /cl_int[0]) - cl_int[1] # stage 4 inverse: alphp = 0.5*(alphpp/cga2[0] + betpp/cgb2[0]) # <-- alphpp = (Cga2.*(alphp+betp)); betp = 0.5*(alphpp/cga2[0] - betpp/cgb2[0]) # <-- betpp = (Cgb2.*(alphp-betp)); # stage 3 invers: alph = alphp/cga1[0] bet = betp/cgb1[0] sa = np.sign(cga1[1]) sb = np.sign(cgb1[1]) alph[(sa*alphp)<0.0] = alphp[(sa*alphp)<0] / cga1[1] bet[(sb*betp)<0.0] = betp[(sb*betp)<0] / cgb1[1] lab = ajoin((lstar, alph, bet)) # stage 2 inverse: lmstp = np.dot(invMAab,lab.T).T lmstp[lmstp<-1.0] = -1.0 lmstp[lmstp>1.0] = 1.0 lmstp = math.erfinv(lmstp) / Cc - Cf*np.log(lmsf/lms0) lmst = np.exp(lmstp) * lms0 # stage 1 inverse: xyzt = np.dot(invMxyz2lms,lmst.T).T if relative == True: xyzt = (100.0/Lw) * xyzt camout[i] = xyzt # if flipaxis0and1 == True: # loop over shortest dim. # camout = np.transpose(camout, axes = (1,0,2)) # Flip light source dim back to axis 1: camout = np.transpose(camout, axes = (1,0,2)) if camout.shape[0] == 1: camout = np.squeeze(camout,axis = 0) return camout