def naka_rushton(data, sig=2.0, n=0.73, scaling=1.0, noise=0.0, forward=True): """ Apply a Naka-Rushton response compression (n) and an adaptive shift (sig). | NK(x) = sign(x) * scaling * ((abs(x)**n) / ((abs(x)**n) + (sig**n))) + noise Args: :data: | float or ndarray :sig: | 2.0, optional | Semi-saturation constant. Value for which NK(:data:) is 1/2 :n: | 0.73, optional | Compression power. :scaling: | 1.0, optional | Maximum value of NK-function. :noise: | 0.0, optional | Cone excitation noise. :forward: | True, optional | True: do NK(x) | False: do NK(x)**(-1). Returns: :returns: | float or ndarray with NK-(de)compressed input :x: """ if forward: return np.sign(data) * scaling * ((np.abs(data)**n) / ((np.abs(data)**n) + (sig**n))) + noise elif forward == False: Ip = sig * (((np.abs(np.abs(data) - noise)) / (scaling - np.abs(np.abs(data) - noise))))**(1 / n) if not np.isscalar(Ip): p = np.where(np.abs(data) < noise) Ip[p] = -Ip[p] else: if np.abs(data) < noise: Ip = -Ip return Ip
def xyz_to_Ydlep(xyz, cieobs=_CIEOBS, xyzw=_COLORTF_DEFAULT_WHITE_POINT, flip_axes=False, SL_max_lambda=None, **kwargs): """ Convert XYZ tristimulus values to Y, dominant (complementary) wavelength and excitation purity. Args: :xyz: | ndarray with tristimulus values :xyzw: | None or ndarray with tristimulus values of a single (!) native white point, optional | None defaults to xyz of CIE D65 using the :cieobs: observer. :cieobs: | luxpy._CIEOBS, optional | CMF set to use when calculating spectrum locus coordinates. :flip_axes: | False, optional | If True: flip axis 0 and axis 1 in Ydelep to increase speed of loop in function. | (single xyzw with is not flipped!) :SL_max_lambda: | None or float, optional | Maximum wavelength of spectrum locus before it turns back on itelf in the high wavelength range (~700 nm) Returns: :Ydlep: | ndarray with Y, dominant (complementary) wavelength | and excitation purity """ xyz3 = np3d(xyz).copy().astype(np.float) # flip axis so that shortest dim is on axis0 (save time in looping): if (xyz3.shape[0] < xyz3.shape[1]) & (flip_axes == True): axes12flipped = True xyz3 = xyz3.transpose((1, 0, 2)) else: axes12flipped = False # convert xyz to Yxy: Yxy = xyz_to_Yxy(xyz3) Yxyw = xyz_to_Yxy(xyzw) # get spectrum locus Y,x,y and wavelengths: SL = _CMF[cieobs]['bar'] SL = SL[:, SL[1:].sum(axis=0) > 0] # avoid div by zero in xyz-to-Yxy conversion wlsl = SL[0] Yxysl = xyz_to_Yxy(SL[1:4].T)[:, None] # Get maximum wavelength of spectrum locus (before it turns back on itself) if SL_max_lambda is None: pmaxlambda = Yxysl[..., 1].argmax() # lambda with largest x value dwl = np.diff( Yxysl[:, 0, 1]) # spectrumlocus in that range should have increasing x dwl[wlsl[:-1] < 600] = 10000 pmaxlambda = np.where( dwl <= 0)[0][0] # Take first element with zero or <zero slope else: pmaxlambda = np.abs(wlsl - SL_max_lambda).argmin() Yxysl = Yxysl[:(pmaxlambda + 1), :] wlsl = wlsl[:(pmaxlambda + 1)] # center on xyzw: Yxy = Yxy - Yxyw Yxysl = Yxysl - Yxyw Yxyw = Yxyw - Yxyw #split: Y, x, y = asplit(Yxy) Yw, xw, yw = asplit(Yxyw) Ysl, xsl, ysl = asplit(Yxysl) # calculate hue: h = math.positive_arctan(x, y, htype='deg') hsl = math.positive_arctan(xsl, ysl, htype='deg') hsl_max = hsl[0] # max hue angle at min wavelength hsl_min = hsl[-1] # min hue angle at max wavelength if hsl_min < hsl_max: hsl_min += 360 dominantwavelength = np.empty(Y.shape) purity = np.empty(Y.shape) for i in range(xyz3.shape[1]): # find index of complementary wavelengths/hues: pc = np.where( (h[:, i] > hsl_max) & (h[:, i] < hsl_min) ) # hue's requiring complementary wavelength (purple line) h[:, i][pc] = h[:, i][pc] - np.sign( h[:, i][pc] - 180.0 ) * 180.0 # add/subtract 180° to get positive complementary wavelength # find 2 closest enclosing hues in sl: #hslb,hib = meshblock(hsl,h[:,i:i+1]) hib, hslb = np.meshgrid(h[:, i:i + 1], hsl) dh = (hslb - hib) q1 = np.abs(dh).argmin(axis=0) # index of closest hue sign_q1 = np.sign(dh[q1])[0] dh[np.sign(dh) == sign_q1] = 1000000 # set all dh on the same side as q1 to a very large value q2 = np.abs(dh).argmin( axis=0) # index of second closest (enclosing) hue # # Test changes to code: # print('wls',i, wlsl[q1],wlsl[q2]) # import matplotlib.pyplot as plt # plt.figure() # plt.plot(wlsl[:-1],np.diff(xsl[:,0]),'k.-') # plt.figure() # plt.plot(x[0,i],y[0,i],'k.'); plt.plot(xsl,ysl,'r.-');plt.plot(xsl[q1],ysl[q1],'b.');plt.plot(xsl[q2],ysl[q2],'g.');plt.plot(xsl[-1],ysl[-1],'c+') dominantwavelength[:, i] = wlsl[q1] + np.multiply( (h[:, i] - hsl[q1, 0]), np.divide((wlsl[q2] - wlsl[q1]), (hsl[q2, 0] - hsl[q1, 0])) ) # calculate wl corresponding to h: y = y1 + (x-x1)*(y2-y1)/(x2-x1) dominantwavelength[:, i][pc] = -dominantwavelength[:, i][ pc] #complementary wavelengths are specified by '-' sign # calculate excitation purity: x_dom_wl = xsl[q1, 0] + (xsl[q2, 0] - xsl[q1, 0]) * (h[:, i] - hsl[ q1, 0]) / (hsl[q2, 0] - hsl[q1, 0]) # calculate x of dom. wl y_dom_wl = ysl[q1, 0] + (ysl[q2, 0] - ysl[q1, 0]) * (h[:, i] - hsl[ q1, 0]) / (hsl[q2, 0] - hsl[q1, 0]) # calculate y of dom. wl d_wl = (x_dom_wl**2.0 + y_dom_wl**2.0)**0.5 # distance from white point to sl d = (x[:, i]**2.0 + y[:, i]**2.0)**0.5 # distance from white point to test point purity[:, i] = d / d_wl # correct for those test points that have a complementary wavelength # calculate intersection of line through white point and test point and purple line: xy = np.vstack((x[:, i], y[:, i])).T xyw = np.hstack((xw, yw)) xypl1 = np.hstack((xsl[0, None], ysl[0, None])) xypl2 = np.hstack((xsl[-1, None], ysl[-1, None])) da = (xy - xyw) db = (xypl2 - xypl1) dp = (xyw - xypl1) T = np.array([[0.0, -1.0], [1.0, 0.0]]) dap = np.dot(da, T) denom = np.sum(dap * db, axis=1, keepdims=True) num = np.sum(dap * dp, axis=1, keepdims=True) xy_linecross = (num / denom) * db + xypl1 d_linecross = np.atleast_2d( (xy_linecross[:, 0]**2.0 + xy_linecross[:, 1]**2.0)**0.5).T #[0] purity[:, i][pc] = d[pc] / d_linecross[pc][:, 0] Ydlep = np.dstack((xyz3[:, :, 1], dominantwavelength, purity)) if axes12flipped == True: Ydlep = Ydlep.transpose((1, 0, 2)) else: Ydlep = Ydlep.transpose((0, 1, 2)) return Ydlep.reshape(xyz.shape)
def cie2006cmfsEx(age = 32,fieldsize = 10, wl = None,\ var_od_lens = 0, var_od_macula = 0, \ var_od_L = 0, var_od_M = 0, var_od_S = 0,\ var_shft_L = 0, var_shft_M = 0, var_shft_S = 0,\ out = 'LMS', allow_negative_values = False): """ Generate Individual Observer CMFs (cone fundamentals) based on CIE2006 cone fundamentals and published literature on observer variability in color matching and in physiological parameters. Args: :age: | 32 or float or int, optional | Observer age :fieldsize: | 10, optional | Field size of stimulus in degrees (between 2° and 10°). :wl: | None, optional | Interpolation/extraplation of :LMS: output to specified wavelengths. | None: output original _WL = np.array([390,780,5]) :var_od_lens: | 0, optional | Std Dev. in peak optical density [%] of lens. :var_od_macula: | 0, optional | Std Dev. in peak optical density [%] of macula. :var_od_L: | 0, optional | Std Dev. in peak optical density [%] of L-cone. :var_od_M: | 0, optional | Std Dev. in peak optical density [%] of M-cone. :var_od_S: | 0, optional | Std Dev. in peak optical density [%] of S-cone. :var_shft_L: | 0, optional | Std Dev. in peak wavelength shift [nm] of L-cone. :var_shft_L: | 0, optional | Std Dev. in peak wavelength shift [nm] of M-cone. :var_shft_S: | 0, optional | Std Dev. in peak wavelength shift [nm] of S-cone. :out: | 'LMS' or , optional | Determines output. :allow_negative_values: | False, optional | Cone fundamentals or color matching functions should not have negative values. | If False: X[X<0] = 0. Returns: :returns: | - 'LMS' : ndarray with individual observer area-normalized | cone fundamentals. Wavelength have been added. | [- 'trans_lens': ndarray with lens transmission | (no wavelengths added, no interpolation) | - 'trans_macula': ndarray with macula transmission | (no wavelengths added, no interpolation) | - 'sens_photopig' : ndarray with photopigment sens. | (no wavelengths added, no interpolation)] References: 1. `Asano Y, Fairchild MD, and Blondé L (2016). Individual Colorimetric Observer Model. PLoS One 11, 1–19. <http://journals.plos.org/plosone/article?id=10.1371/journal.pone.0145671>`_ 2. `Asano Y, Fairchild MD, Blondé L, and Morvan P (2016). Color matching experiment for highlighting interobserver variability. Color Res. Appl. 41, 530–539. <https://onlinelibrary.wiley.com/doi/abs/10.1002/col.21975>`_ 3. `CIE, and CIE (2006). Fundamental Chromaticity Diagram with Physiological Axes - Part I (Vienna: CIE). <http://www.cie.co.at/publications/fundamental-chromaticity-diagram-physiological-axes-part-1>`_ 4. `Asano's Individual Colorimetric Observer Model <https://www.rit.edu/cos/colorscience/re_AsanoObserverFunctions.php>`_ """ fs = fieldsize rmd = _INDVCMF_DATA['rmd'].copy() LMSa = _INDVCMF_DATA['LMSa'].copy() docul = _INDVCMF_DATA['docul'].copy() # field size corrected macular density: pkOd_Macula = 0.485 * np.exp(-fs / 6.132) * ( 1 + var_od_macula / 100) # varied peak optical density of macula corrected_rmd = rmd * pkOd_Macula # age corrected lens/ocular media density: if (age <= 60): correct_lomd = docul[:1] * (1 + 0.02 * (age - 32)) + docul[1:2] else: correct_lomd = docul[:1] * (1.56 + 0.0667 * (age - 60)) + docul[1:2] correct_lomd = correct_lomd * (1 + var_od_lens / 100 ) # varied overall optical density of lens # Peak Wavelength Shift: wl_shifted = np.empty(LMSa.shape) wl_shifted[0] = _WL + var_shft_L wl_shifted[1] = _WL + var_shft_M wl_shifted[2] = _WL + var_shft_S LMSa_shft = np.empty(LMSa.shape) kind = 'cubic' LMSa_shft[0] = sp.interpolate.interp1d(wl_shifted[0], LMSa[0], kind=kind, bounds_error=False, fill_value="extrapolate")(_WL) LMSa_shft[1] = sp.interpolate.interp1d(wl_shifted[1], LMSa[1], kind=kind, bounds_error=False, fill_value="extrapolate")(_WL) LMSa_shft[2] = sp.interpolate.interp1d(wl_shifted[2], LMSa[2], kind=kind, bounds_error=False, fill_value="extrapolate")(_WL) # LMSa[2,np.where(_WL >= _WL_CRIT)] = 0 #np.nan # Not defined above 620nm # LMSa_shft[2,np.where(_WL >= _WL_CRIT)] = 0 ssw = np.hstack( (0, np.sign(np.diff(LMSa_shft[2, :])) )) #detect poor interpolation (sign switch due to instability) LMSa_shft[2, np.where((ssw >= 0) & (_WL > 560))] = np.nan # corrected LMS (no age correction): pkOd_L = (0.38 + 0.54 * np.exp(-fs / 1.333)) * ( 1 + var_od_L / 100) # varied peak optical density of L-cone pkOd_M = (0.38 + 0.54 * np.exp(-fs / 1.333)) * ( 1 + var_od_M / 100) # varied peak optical density of M-cone pkOd_S = (0.30 + 0.45 * np.exp(-fs / 1.333)) * ( 1 + var_od_S / 100) # varied peak optical density of S-cone alpha_lms = 0. * LMSa_shft alpha_lms[0] = 1 - 10**(-pkOd_L * (10**LMSa_shft[0])) alpha_lms[1] = 1 - 10**(-pkOd_M * (10**LMSa_shft[1])) alpha_lms[2] = 1 - 10**(-pkOd_S * (10**LMSa_shft[2])) # this fix is required because the above math fails for alpha_lms[2,:]==0 alpha_lms[2, np.where(_WL >= _WL_CRIT)] = 0 # Corrected to Corneal Incidence: lms_barq = alpha_lms * (10**(-corrected_rmd - correct_lomd)) * np.ones( alpha_lms.shape) # Corrected to Energy Terms: lms_bar = lms_barq * _WL # Set NaN values to zero: lms_bar[np.isnan(lms_bar)] = 0 # normalized: LMS = 100 * lms_bar / np.nansum(lms_bar, axis=1, keepdims=True) # Output extra: trans_lens = 10**(-correct_lomd) trans_macula = 10**(-corrected_rmd) sens_photopig = alpha_lms * _WL # Add wavelengths: LMS = np.vstack((_WL, LMS)) if ('xyz' in out.lower().split(',')): LMS = lmsb_to_xyzb(LMS, fieldsize, out='xyz', allow_negative_values=allow_negative_values) out = out.replace('xyz', 'LMS').replace('XYZ', 'LMS') if ('lms' in out.lower().split(',')): out = out.replace('lms', 'LMS') # Interpolate/extrapolate: if wl is None: interpolation = None else: interpolation = 'cubic' LMS = spd(LMS, wl=wl, interpolation=interpolation, norm_type='area') if (out == 'LMS'): return LMS elif (out == 'LMS,trans_lens,trans_macula,sens_photopig'): return LMS, trans_lens, trans_macula, sens_photopig elif (out == 'LMS,trans_lens,trans_macula,sens_photopig,LMSa'): return LMS, trans_lens, trans_macula, sens_photopig, LMSa else: return eval(out)
def xyz_to_Ydlep_(xyz, cieobs=_CIEOBS, xyzw=_COLORTF_DEFAULT_WHITE_POINT, flip_axes=False, **kwargs): """ Convert XYZ tristimulus values to Y, dominant (complementary) wavelength and excitation purity. Args: :xyz: | ndarray with tristimulus values :xyzw: | None or ndarray with tristimulus values of a single (!) native white point, optional | None defaults to xyz of CIE D65 using the :cieobs: observer. :cieobs: | luxpy._CIEOBS, optional | CMF set to use when calculating spectrum locus coordinates. :flip_axes: | False, optional | If True: flip axis 0 and axis 1 in Ydelep to increase speed of loop in function. | (single xyzw with is not flipped!) Returns: :Ydlep: | ndarray with Y, dominant (complementary) wavelength | and excitation purity """ xyz3 = np3d(xyz).copy().astype(np.float) # flip axis so that shortest dim is on axis0 (save time in looping): if (xyz3.shape[0] < xyz3.shape[1]) & (flip_axes == True): axes12flipped = True xyz3 = xyz3.transpose((1, 0, 2)) else: axes12flipped = False # convert xyz to Yxy: Yxy = xyz_to_Yxy(xyz3) Yxyw = xyz_to_Yxy(xyzw) # get spectrum locus Y,x,y and wavelengths: SL = _CMF[cieobs]['bar'] SL = SL[:, SL[1:].sum(axis=0) > 0] # avoid div by zero in xyz-to-Yxy conversion wlsl = SL[0] Yxysl = xyz_to_Yxy(SL[1:4].T)[:, None] pmaxlambda = Yxysl[..., 1].argmax() maxlambda = wlsl[pmaxlambda] maxlambda = 700 print(np.where(wlsl == maxlambda)) pmaxlambda = np.where(wlsl == maxlambda)[0][0] Yxysl = Yxysl[:(pmaxlambda + 1), :] wlsl = wlsl[:(pmaxlambda + 1)] # center on xyzw: Yxy = Yxy - Yxyw Yxysl = Yxysl - Yxyw Yxyw = Yxyw - Yxyw #split: Y, x, y = asplit(Yxy) Yw, xw, yw = asplit(Yxyw) Ysl, xsl, ysl = asplit(Yxysl) # calculate hue: h = math.positive_arctan(x, y, htype='deg') print(h) print('rh', h[0, 0] - h[0, 1]) print(wlsl[0], wlsl[-1]) hsl = math.positive_arctan(xsl, ysl, htype='deg') hsl_max = hsl[0] # max hue angle at min wavelength hsl_min = hsl[-1] # min hue angle at max wavelength if hsl_min < hsl_max: hsl_min += 360 dominantwavelength = np.empty(Y.shape) purity = np.empty(Y.shape) print('xyz:', xyz) for i in range(xyz3.shape[1]): print('\ni:', i, h[:, i], hsl_max, hsl_min) print(h) # find index of complementary wavelengths/hues: pc = np.where( (h[:, i] > hsl_max) & (h[:, i] < hsl_min) ) # hue's requiring complementary wavelength (purple line) print('pc', (h[:, i] > hsl_max) & (h[:, i] < hsl_min)) h[:, i][pc] = h[:, i][pc] - np.sign( h[:, i][pc] - 180.0 ) * 180.0 # add/subtract 180° to get positive complementary wavelength # find 2 closest hues in sl: #hslb,hib = meshblock(hsl,h[:,i:i+1]) hib, hslb = np.meshgrid(h[:, i:i + 1], hsl) dh = np.abs(hslb - hib) q1 = dh.argmin(axis=0) # index of closest hue dh[q1] = 1000000.0 q2 = dh.argmin(axis=0) # index of second closest hue print('q1q2', q2, q1) print('wls:', h[:, i], wlsl[q1], wlsl[q2]) print('hsls:', hsl[q2, 0], hsl[q1, 0]) print('d', (wlsl[q2] - wlsl[q1]), (hsl[q2, 0] - hsl[q1, 0]), (wlsl[q2] - wlsl[q1]) / (hsl[q2, 0] - hsl[q1, 0])) print('(h[:,i] - hsl[q1,0])', (h[:, i] - hsl[q1, 0])) print('div', np.divide((wlsl[q2] - wlsl[q1]), (hsl[q2, 0] - hsl[q1, 0]))) print( 'mult(...)', np.multiply((h[:, i] - hsl[q1, 0]), np.divide((wlsl[q2] - wlsl[q1]), (hsl[q2, 0] - hsl[q1, 0])))) dominantwavelength[:, i] = wlsl[q1] + np.multiply( (h[:, i] - hsl[q1, 0]), np.divide((wlsl[q2] - wlsl[q1]), (hsl[q2, 0] - hsl[q1, 0])) ) # calculate wl corresponding to h: y = y1 + (x-x1)*(y2-y1)/(x2-x1) print('dom', dominantwavelength[:, i]) dominantwavelength[(dominantwavelength[:, i] > max(wlsl[q1], wlsl[q2])), i] = max(wlsl[q1], wlsl[q2]) dominantwavelength[(dominantwavelength[:, i] < min(wlsl[q1], wlsl[q2])), i] = min(wlsl[q1], wlsl[q2]) dominantwavelength[:, i][pc] = -dominantwavelength[:, i][ pc] #complementary wavelengths are specified by '-' sign # calculate excitation purity: x_dom_wl = xsl[q1, 0] + (xsl[q2, 0] - xsl[q1, 0]) * (h[:, i] - hsl[ q1, 0]) / (hsl[q2, 0] - hsl[q1, 0]) # calculate x of dom. wl y_dom_wl = ysl[q1, 0] + (ysl[q2, 0] - ysl[q1, 0]) * (h[:, i] - hsl[ q1, 0]) / (hsl[q2, 0] - hsl[q1, 0]) # calculate y of dom. wl d_wl = (x_dom_wl**2.0 + y_dom_wl**2.0)**0.5 # distance from white point to sl d = (x[:, i]**2.0 + y[:, i]**2.0)**0.5 # distance from white point to test point purity[:, i] = d / d_wl # correct for those test points that have a complementary wavelength # calculate intersection of line through white point and test point and purple line: xy = np.vstack((x[:, i], y[:, i])).T xyw = np.hstack((xw, yw)) xypl1 = np.hstack((xsl[0, None], ysl[0, None])) xypl2 = np.hstack((xsl[-1, None], ysl[-1, None])) da = (xy - xyw) db = (xypl2 - xypl1) dp = (xyw - xypl1) T = np.array([[0.0, -1.0], [1.0, 0.0]]) dap = np.dot(da, T) denom = np.sum(dap * db, axis=1, keepdims=True) num = np.sum(dap * dp, axis=1, keepdims=True) xy_linecross = (num / denom) * db + xypl1 d_linecross = np.atleast_2d( (xy_linecross[:, 0]**2.0 + xy_linecross[:, 1]**2.0)**0.5).T #[0] purity[:, i][pc] = d[pc] / d_linecross[pc][:, 0] Ydlep = np.dstack((xyz3[:, :, 1], dominantwavelength, purity)) if axes12flipped == True: Ydlep = Ydlep.transpose((1, 0, 2)) else: Ydlep = Ydlep.transpose((0, 1, 2)) return Ydlep.reshape(xyz.shape)
def Ydlep_to_xyz(Ydlep, cieobs=_CIEOBS, xyzw=_COLORTF_DEFAULT_WHITE_POINT, flip_axes=False, SL_max_lambda=None, **kwargs): """ Convert Y, dominant (complementary) wavelength and excitation purity to XYZ tristimulus values. Args: :Ydlep: | ndarray with Y, dominant (complementary) wavelength and excitation purity :xyzw: | None or narray with tristimulus values of a single (!) native white point, optional | None defaults to xyz of CIE D65 using the :cieobs: observer. :cieobs: | luxpy._CIEOBS, optional | CMF set to use when calculating spectrum locus coordinates. :flip_axes: | False, optional | If True: flip axis 0 and axis 1 in Ydelep to increase speed of loop in function. | (single xyzw with is not flipped!) :SL_max_lambda: | None or float, optional | Maximum wavelength of spectrum locus before it turns back on itelf in the high wavelength range (~700 nm) Returns: :xyz: | ndarray with tristimulus values """ Ydlep3 = np3d(Ydlep).copy().astype(np.float) # flip axis so that longest dim is on first axis (save time in looping): if (Ydlep3.shape[0] < Ydlep3.shape[1]) & (flip_axes == True): axes12flipped = True Ydlep3 = Ydlep3.transpose((1, 0, 2)) else: axes12flipped = False # convert xyzw to Yxyw: Yxyw = xyz_to_Yxy(xyzw) Yxywo = Yxyw.copy() # get spectrum locus Y,x,y and wavelengths: SL = _CMF[cieobs]['bar'] SL = SL[:, SL[1:].sum(axis=0) > 0] # avoid div by zero in xyz-to-Yxy conversion wlsl = SL[0, None].T Yxysl = xyz_to_Yxy(SL[1:4].T)[:, None] # Get maximum wavelength of spectrum locus (before it turns back on itself) if SL_max_lambda is None: pmaxlambda = Yxysl[..., 1].argmax() # lambda with largest x value dwl = np.diff( Yxysl[:, 0, 1]) # spectrumlocus in that range should have increasing x dwl[wlsl[:-1, 0] < 600] = 10000 pmaxlambda = np.where( dwl <= 0)[0][0] # Take first element with zero or <zero slope else: pmaxlambda = np.abs(wlsl - SL_max_lambda).argmin() Yxysl = Yxysl[:(pmaxlambda + 1), :] wlsl = wlsl[:(pmaxlambda + 1), :1] # center on xyzw: Yxysl = Yxysl - Yxyw Yxyw = Yxyw - Yxyw #split: Y, dom, pur = asplit(Ydlep3) Yw, xw, yw = asplit(Yxyw) Ywo, xwo, ywo = asplit(Yxywo) Ysl, xsl, ysl = asplit(Yxysl) # loop over longest dim: x = np.empty(Y.shape) y = np.empty(Y.shape) for i in range(Ydlep3.shape[1]): # find closest wl's to dom: #wlslb,wlib = meshblock(wlsl,np.abs(dom[i,:])) #abs because dom<0--> complemtary wl wlib, wlslb = np.meshgrid(np.abs(dom[:, i]), wlsl) dwl = wlslb - wlib q1 = np.abs(dwl).argmin(axis=0) # index of closest wl sign_q1 = np.sign(dwl[q1]) dwl[np.sign(dwl) == sign_q1] = 1000000 # set all dwl on the same side as q1 to a very large value q2 = np.abs(dwl).argmin( axis=0) # index of second closest (enclosing) wl # calculate x,y of dom: x_dom_wl = xsl[q1, 0] + (xsl[q2, 0] - xsl[q1, 0]) * ( np.abs(dom[:, i]) - wlsl[q1, 0]) / (wlsl[q2, 0] - wlsl[q1, 0] ) # calculate x of dom. wl y_dom_wl = ysl[q1, 0] + (ysl[q2, 0] - ysl[q1, 0]) * ( np.abs(dom[:, i]) - wlsl[q1, 0]) / (wlsl[q2, 0] - wlsl[q1, 0] ) # calculate y of dom. wl # calculate x,y of test: d_wl = (x_dom_wl**2.0 + y_dom_wl**2.0)**0.5 # distance from white point to dom d = pur[:, i] * d_wl hdom = math.positive_arctan(x_dom_wl, y_dom_wl, htype='deg') x[:, i] = d * np.cos(hdom * np.pi / 180.0) y[:, i] = d * np.sin(hdom * np.pi / 180.0) # complementary: pc = np.where(dom[:, i] < 0.0) hdom[pc] = hdom[pc] - np.sign(dom[:, i][pc] - 180.0) * 180.0 # get positive hue angle # calculate intersection of line through white point and test point and purple line: xy = np.vstack((x_dom_wl, y_dom_wl)).T xyw = np.vstack((xw, yw)).T xypl1 = np.vstack((xsl[0, None], ysl[0, None])).T xypl2 = np.vstack((xsl[-1, None], ysl[-1, None])).T da = (xy - xyw) db = (xypl2 - xypl1) dp = (xyw - xypl1) T = np.array([[0.0, -1.0], [1.0, 0.0]]) dap = np.dot(da, T) denom = np.sum(dap * db, axis=1, keepdims=True) num = np.sum(dap * dp, axis=1, keepdims=True) xy_linecross = (num / denom) * db + xypl1 d_linecross = np.atleast_2d( (xy_linecross[:, 0]**2.0 + xy_linecross[:, 1]**2.0)**0.5).T[:, 0] x[:, i][pc] = pur[:, i][pc] * d_linecross[pc] * np.cos( hdom[pc] * np.pi / 180) y[:, i][pc] = pur[:, i][pc] * d_linecross[pc] * np.sin( hdom[pc] * np.pi / 180) Yxy = np.dstack((Ydlep3[:, :, 0], x + xwo, y + ywo)) if axes12flipped == True: Yxy = Yxy.transpose((1, 0, 2)) else: Yxy = Yxy.transpose((0, 1, 2)) return Yxy_to_xyz(Yxy).reshape(Ydlep.shape)
def plot_tm30_Rhshj(spd, cri_type = 'ies-tm30', axh = None, xlabel = True, y_offset = 0, font_size = _TM30_FONT_SIZE, **kwargs): """ Plot Local Hue Shift values (Rhshj) (one for each hue-bin). Args: :spd: | ndarray or dict | If ndarray: single spectral power distribution. | If dict: dictionary with pre-computed parameters (using _tm30_process_spd()). | required keys: | 'Rf','Rg','cct','duv','Sr','cri_type','xyzri','xyzrw', | 'hbinnrs','Rfi','Rfhi','Rcshi','Rhshi', | 'jabt_binned','jabr_binned', | 'nhbins','start_hue','normalize_gamut','normalized_chroma_ref' | see cri.spd_to_cri() for more info on parameters. :cri_type: | _CRI_TYPE_DEFAULT or str or dict, optional | -'str: specifies dict with default cri model parameters | (for supported types, see luxpy.cri._CRI_DEFAULTS['cri_types']) | - dict: user defined model parameters | (see e.g. luxpy.cri._CRI_DEFAULTS['cierf'] | for required structure) | Note that any non-None input arguments (in kwargs) | to the function will override default values in cri_type dict. :axh: | None, optional | If None: create new figure with single axes, else plot on specified axes. :xlabel: | True, optional | If False: don't add label and numbers to x-axis | (useful when plotting plotting all 'Local Rfhi, Rcshi, Rshhi' | values in 3x1 subplots with 'shared x-axis': saves vertical space) :y_offset: | 0, optional | text-offset from top of bars in barplot. :font_size: | _TM30_FONT_SIZE, optional | Font size of text, axis labels and axis values. :kwargs: | Additional optional keyword arguments, | the same as in cri.spd_to_cri() Returns: :axh: | handle to figure axes. :data: | dictionary with required parameters for plotting functions. """ data = _tm30_process_spd(spd, cri_type = 'ies-tm30',**kwargs) Rhshi = data['Rhshi'] # Get color map based on sample colors: cmap = _get_hue_map(hbins = data['nhbins'], start_hue = data['start_hue'], hbinnrs = data['hbinnrs'], xyzri = data['xyzri'], xyzrw = data['xyzrw'], cri_type = data['cri_type']) # Plot local hue shift, Rhshi: hbins = range(data['nhbins']) if axh is None: fig, axh = plt.subplots(nrows = 1, ncols = 1) for j in hbins: axh.bar(hbins[j],Rhshi[j,0], color = cmap[j], width = 1,edgecolor = 'k', alpha = 1) ypos = ((np.abs(Rhshi[j,0]) + 0.05 + y_offset))*np.sign(Rhshi[j,0]) axh.text(hbins[j],ypos, '{:1.2f}'.format(Rhshi[j,0]) ,fontsize = font_size,horizontalalignment='center',verticalalignment='center',color = np.array([1,1,1])*0.3, rotation = 90) xticks = np.array(hbins) axh.set_xticks(xticks) if xlabel == True: xtickslabels = ['{:1.0f}'.format(ii+1) for ii in hbins] axh.set_xlabel('Hue-Angle Bin (j)', fontsize = font_size) else: xtickslabels = [''.format(ii+1) for ii in hbins] axh.set_xticklabels(xtickslabels, fontsize = font_size) axh.set_xlim([-0.5,data['nhbins']-0.5]) axh.set_ylabel(r'Local Hue Shift $(R_{hs,hj})$', fontsize = 9) axh.set_ylim([min([-0.5,Rhshi.min()]),max([0.5,Rhshi.max()])]) return axh, data
def cam_sww16(data, dataw = None, Yb = 20.0, Lw = 400.0, Ccwb = None, relative = True, \ parameters = None, inputtype = 'xyz', direction = 'forward', \ cieobs = '2006_10'): """ A simple principled color appearance model based on a mapping of the Munsell color system. | This function implements the JOSA A (parameters = 'JOSA') published model. Args: :data: | ndarray with input tristimulus values | or spectral data | or input color appearance correlates | Can be of shape: (N [, xM], x 3), whereby: | N refers to samples and M refers to light sources. | Note that for spectral input shape is (N x (M+1) x wl) :dataw: | None or ndarray, optional | Input tristimulus values or spectral data of white point. | None defaults to the use of CIE illuminant C. :Yb: | 20.0, optional | Luminance factor of background (perfect white diffuser, Yw = 100) :Lw: | 400.0, optional | Luminance (cd/m²) of white point. :Ccwb: | None, optional | Degree of cognitive adaptation (white point balancing) | If None: use [..,..] from parameters dict. :relative: | True or False, optional | True: xyz tristimulus values are relative (Yw = 100) :parameters: | None or str or dict, optional | Dict with model parameters. | - None: defaults to luxpy.cam._CAM_SWW_2016_PARAMETERS['JOSA'] | - str: 'best-fit-JOSA' or 'best-fit-all-Munsell' | - dict: user defined model parameters | (dict should have same structure) :inputtype: | 'xyz' or 'spd', optional | Specifies the type of input: | tristimulus values or spectral data for the forward mode. :direction: | 'forward' or 'inverse', optional | -'forward': xyz -> cam_sww_2016 | -'inverse': cam_sww_2016 -> xyz :cieobs: | '2006_10', optional | CMF set to use to perform calculations where spectral data is involved (inputtype == 'spd'; dataw = None) | Other options: see luxpy._CMF['types'] Returns: :returns: | ndarray with color appearance correlates (:direction: == 'forward') | or | XYZ tristimulus values (:direction: == 'inverse') Notes: | This function implements the JOSA A (parameters = 'JOSA') published model. | With: | 1. A correction for the parameter | in Eq.4 of Fig. 11: 0.952 --> -0.952 | | 2. The delta_ac and delta_bc white-balance shifts in Eq. 5e & 5f | should be: -0.028 & 0.821 | | (cfr. Ccwb = 0.66 in: | ab_test_out = ab_test_int - Ccwb*ab_gray_adaptation_field_int)) References: 1. `Smet, K. A. G., Webster, M. A., & Whitehead, L. A. (2016). A simple principled approach for modeling and understanding uniform color metrics. Journal of the Optical Society of America A, 33(3), A319–A331. <https://doi.org/10.1364/JOSAA.33.00A319>`_ """ # get model parameters args = locals().copy() if parameters is None: parameters = _CAM_SWW16_PARAMETERS['JOSA'] if isinstance(parameters, str): parameters = _CAM_SWW16_PARAMETERS[parameters] parameters = put_args_in_db( parameters, args) #overwrite parameters with other (not-None) args input #unpack model parameters: Cc, Ccwb, Cf, Mxyz2lms, cLMS, cab_int, cab_out, calpha, cbeta, cga1, cga2, cgb1, cgb2, cl_int, clambda, lms0 = [ parameters[x] for x in sorted(parameters.keys()) ] # setup default adaptation field: if (dataw is None): dataw = _CIE_ILLUMINANTS['C'].copy() # get illuminant C xyzw = spd_to_xyz(dataw, cieobs=cieobs, relative=False) # get abs. tristimulus values if relative == False: #input is expected to be absolute dataw[1:] = Lw * dataw[ 1:] / xyzw[:, 1:2] #dataw = Lw*dataw # make absolute else: dataw = dataw # make relative (Y=100) if inputtype == 'xyz': dataw = spd_to_xyz(dataw, cieobs=cieobs, relative=relative) # precomputations: Mxyz2lms = np.dot( np.diag(cLMS), math.normalize_3x3_matrix(Mxyz2lms, np.array([[1, 1, 1]])) ) # normalize matrix for xyz-> lms conversion to ill. E weighted with cLMS invMxyz2lms = np.linalg.inv(Mxyz2lms) MAab = np.array([clambda, calpha, cbeta]) invMAab = np.linalg.inv(MAab) #initialize data and camout: data = np2d(data).copy( ) # stimulus data (can be upto NxMx3 for xyz, or [N x (M+1) x wl] for spd)) dataw = np2d(dataw).copy( ) # white point (can be upto Nx3 for xyz, or [(N+1) x wl] for spd) # make axis 1 of dataw have 'same' dimensions as data: if (data.ndim == 2): data = np.expand_dims(data, axis=1) # add light source axis 1 if inputtype == 'xyz': if dataw.shape[ 0] == 1: #make dataw have same lights source dimension size as data dataw = np.repeat(dataw, data.shape[1], axis=0) else: if dataw.shape[0] == 2: dataw = np.vstack( (dataw[0], np.repeat(dataw[1:], data.shape[1], axis=0))) # Flip light source dim to axis 0: data = np.transpose(data, axes=(1, 0, 2)) # Initialize output array: dshape = list(data.shape) dshape[-1] = 3 # requested number of correlates: l_int, a_int, b_int if (inputtype != 'xyz') & (direction == 'forward'): dshape[-2] = dshape[ -2] - 1 # wavelength row doesn't count & only with forward can the input data be spectral camout = np.zeros(dshape) camout.fill(np.nan) # apply forward/inverse model for each row in data: for i in range(data.shape[0]): # stage 1: calculate photon rates of stimulus and adapting field, lmst & lmsf: if (inputtype != 'xyz'): if relative == True: xyzw_abs = spd_to_xyz(np.vstack((dataw[0], dataw[i + 1])), cieobs=cieobs, relative=False) dataw[i + 1] = Lw * dataw[i + 1] / xyzw_abs[0, 1] # make absolute xyzw = spd_to_xyz(np.vstack((dataw[0], dataw[i + 1])), cieobs=cieobs, relative=False) lmsw = 683.0 * np.dot(Mxyz2lms, xyzw.T).T / _CMF[cieobs]['K'] lmsf = (Yb / 100.0 ) * lmsw # calculate adaptation field and convert to l,m,s if (direction == 'forward'): if relative == True: data[i, 1:, :] = Lw * data[i, 1:, :] / xyzw_abs[ 0, 1] # make absolute xyzt = spd_to_xyz(data[i], cieobs=cieobs, relative=False) / _CMF[cieobs]['K'] lmst = 683.0 * np.dot(Mxyz2lms, xyzt.T).T # convert to l,m,s else: lmst = lmsf # put lmsf in lmst for inverse-mode elif (inputtype == 'xyz'): if relative == True: dataw[i] = Lw * dataw[i] / 100.0 # make absolute lmsw = 683.0 * np.dot( Mxyz2lms, dataw[i].T).T / _CMF[cieobs]['K'] # convert to lms lmsf = (Yb / 100.0) * lmsw if (direction == 'forward'): if relative == True: data[i] = Lw * data[i] / 100.0 # make absolute lmst = 683.0 * np.dot( Mxyz2lms, data[i].T).T / _CMF[cieobs]['K'] # convert to lms else: lmst = lmsf # put lmsf in lmst for inverse-mode # stage 2: calculate cone outputs of stimulus lmstp lmstp = math.erf(Cc * (np.log(lmst / lms0) + Cf * np.log(lmsf / lms0))) lmsfp = math.erf(Cc * (np.log(lmsf / lms0) + Cf * np.log(lmsf / lms0))) lmstp = np.vstack( (lmsfp, lmstp) ) # add adaptation field lms temporarily to lmsp for quick calculation # stage 3: calculate optic nerve signals, lam*, alphp, betp: lstar, alph, bet = asplit(np.dot(MAab, lmstp.T).T) alphp = cga1[0] * alph alphp[alph < 0] = cga1[1] * alph[alph < 0] betp = cgb1[0] * bet betp[bet < 0] = cgb1[1] * bet[bet < 0] # stage 4: calculate recoded nerve signals, alphapp, betapp: alphpp = cga2[0] * (alphp + betp) betpp = cgb2[0] * (alphp - betp) # stage 5: calculate conscious color perception: lstar_int = cl_int[0] * (lstar + cl_int[1]) alph_int = cab_int[0] * (np.cos(cab_int[1] * np.pi / 180.0) * alphpp - np.sin(cab_int[1] * np.pi / 180.0) * betpp) bet_int = cab_int[0] * (np.sin(cab_int[1] * np.pi / 180.0) * alphpp + np.cos(cab_int[1] * np.pi / 180.0) * betpp) lstar_out = lstar_int if direction == 'forward': if Ccwb is None: alph_out = alph_int - cab_out[0] bet_out = bet_int - cab_out[1] else: Ccwb = Ccwb * np.ones((2)) Ccwb[Ccwb < 0.0] = 0.0 Ccwb[Ccwb > 1.0] = 1.0 alph_out = alph_int - Ccwb[0] * alph_int[ 0] # white balance shift using adaptation gray background (Yb=20%), with Ccw: degree of adaptation bet_out = bet_int - Ccwb[1] * bet_int[0] camout[i] = np.vstack( (lstar_out[1:], alph_out[1:], bet_out[1:]) ).T # stack together and remove adaptation field from vertical stack elif direction == 'inverse': labf_int = np.hstack((lstar_int[0], alph_int[0], bet_int[0])) # get lstar_out, alph_out & bet_out for data: lstar_out, alph_out, bet_out = asplit(data[i]) # stage 5 inverse: # undo cortical white-balance: if Ccwb is None: alph_int = alph_out + cab_out[0] bet_int = bet_out + cab_out[1] else: Ccwb = Ccwb * np.ones((2)) Ccwb[Ccwb < 0.0] = 0.0 Ccwb[Ccwb > 1.0] = 1.0 alph_int = alph_out + Ccwb[0] * alph_int[ 0] # inverse white balance shift using adaptation gray background (Yb=20%), with Ccw: degree of adaptation bet_int = bet_out + Ccwb[1] * bet_int[0] lstar_int = lstar_out alphpp = (1.0 / cab_int[0]) * ( np.cos(-cab_int[1] * np.pi / 180.0) * alph_int - np.sin(-cab_int[1] * np.pi / 180.0) * bet_int) betpp = (1.0 / cab_int[0]) * ( np.sin(-cab_int[1] * np.pi / 180.0) * alph_int + np.cos(-cab_int[1] * np.pi / 180.0) * bet_int) lstar_int = lstar_out lstar = (lstar_int / cl_int[0]) - cl_int[1] # stage 4 inverse: alphp = 0.5 * (alphpp / cga2[0] + betpp / cgb2[0] ) # <-- alphpp = (Cga2.*(alphp+betp)); betp = 0.5 * (alphpp / cga2[0] - betpp / cgb2[0] ) # <-- betpp = (Cgb2.*(alphp-betp)); # stage 3 invers: alph = alphp / cga1[0] bet = betp / cgb1[0] sa = np.sign(cga1[1]) sb = np.sign(cgb1[1]) alph[(sa * alphp) < 0.0] = alphp[(sa * alphp) < 0] / cga1[1] bet[(sb * betp) < 0.0] = betp[(sb * betp) < 0] / cgb1[1] lab = ajoin((lstar, alph, bet)) # stage 2 inverse: lmstp = np.dot(invMAab, lab.T).T lmstp[lmstp < -1.0] = -1.0 lmstp[lmstp > 1.0] = 1.0 lmstp = math.erfinv(lmstp) / Cc - Cf * np.log(lmsf / lms0) lmst = np.exp(lmstp) * lms0 # stage 1 inverse: xyzt = np.dot(invMxyz2lms, lmst.T).T if relative == True: xyzt = (100.0 / Lw) * xyzt camout[i] = xyzt # if flipaxis0and1 == True: # loop over shortest dim. # camout = np.transpose(camout, axes = (1,0,2)) # Flip light source dim back to axis 1: camout = np.transpose(camout, axes=(1, 0, 2)) if camout.shape[0] == 1: camout = np.squeeze(camout, axis=0) return camout
def cam_sww16(data, dataw=None, Yb=20.0, Lw=400.0, Ccwb=None, relative=True, inputtype='xyz', direction='forward', parameters=None, cieobs='2006_10', match_to_conversionmatrix_to_cieobs=True): """ A simple principled color appearance model based on a mapping of the Munsell color system. | This function implements the JOSA A (parameters = 'JOSA') published model. Args: :data: | ndarray with input tristimulus values | or spectral data | or input color appearance correlates | Can be of shape: (N [, xM], x 3), whereby: | N refers to samples and M refers to light sources. | Note that for spectral input shape is (N x (M+1) x wl) :dataw: | None or ndarray, optional | Input tristimulus values or spectral data of white point. | None defaults to the use of CIE illuminant C. :Yb: | 20.0, optional | Luminance factor of background (perfect white diffuser, Yw = 100) :Lw: | 400.0, optional | Luminance (cd/m²) of white point. :Ccwb: | None, optional | Degree of cognitive adaptation (white point balancing) | If None: use [..,..] from parameters dict. :relative: | True or False, optional | True: xyz tristimulus values are relative (Yw = 100) :parameters: | None or str or dict, optional | Dict with model parameters. | - None: defaults to luxpy.cam._CAM_SWW_2016_PARAMETERS['JOSA'] | - str: 'best-fit-JOSA' or 'best-fit-all-Munsell' | - dict: user defined model parameters | (dict should have same structure) :inputtype: | 'xyz' or 'spd', optional | Specifies the type of input: | tristimulus values or spectral data for the forward mode. :direction: | 'forward' or 'inverse', optional | -'forward': xyz -> cam_sww_2016 | -'inverse': cam_sww_2016 -> xyz :cieobs: | '2006_10', optional | CMF set to use to perform calculations where spectral data | is involved (inputtype == 'spd'; dataw = None) | Other options: see luxpy._CMF['types'] :match_to_conversionmatrix_to_cieobs: | When channging to a different CIE observer, change the xyz-to_lms | matrix to the one corresponding to that observer. If False: use | the one set in parameters or _CAM_SWW16_PARAMETERS Returns: :returns: | ndarray with color appearance correlates (:direction: == 'forward') | or | XYZ tristimulus values (:direction: == 'inverse') Notes: | This function implements the JOSA A (parameters = 'JOSA') | published model. | With: | 1. A correction for the parameter | in Eq.4 of Fig. 11: 0.952 --> -0.952 | | 2. The delta_ac and delta_bc white-balance shifts in Eq. 5e & 5f | should be: -0.028 & 0.821 | | (cfr. Ccwb = 0.66 in: | ab_test_out = ab_test_int - Ccwb*ab_gray_adaptation_field_int)) References: 1. `Smet, K. A. G., Webster, M. A., & Whitehead, L. A. (2016). A simple principled approach for modeling and understanding uniform color metrics. Journal of the Optical Society of America A, 33(3), A319–A331. <https://doi.org/10.1364/JOSAA.33.00A319>`_ """ #-------------------------------------------------------------------------- # Get model parameters: #-------------------------------------------------------------------------- args = locals().copy() parameters = _update_parameter_dict( args, parameters=parameters, match_to_conversionmatrix_to_cieobs=match_to_conversionmatrix_to_cieobs ) #unpack model parameters: Cc, Ccwb, Cf, Mxyz2lms, cLMS, cab_int, cab_out, calpha, cbeta, cga1, cga2, cgb1, cgb2, cl_int, clambda, lms0 = [ parameters[x] for x in sorted(parameters.keys()) ] #-------------------------------------------------------------------------- # Setup default adaptation field: #-------------------------------------------------------------------------- dataw = _setup_default_adaptation_field(dataw=dataw, Lw=Lw, inputtype=inputtype, relative=relative, cieobs=cieobs) #-------------------------------------------------------------------------- # Redimension input data to ensure most appropriate sizes # for easy and efficient looping and initialize output array: #-------------------------------------------------------------------------- data, dataw, camout, originalshape = _massage_input_and_init_output( data, dataw, inputtype=inputtype, direction=direction) #-------------------------------------------------------------------------- # Do precomputations needed for both the forward and inverse model, # and which do not depend on sample or light source data: #-------------------------------------------------------------------------- Mxyz2lms = np.dot( np.diag(cLMS), Mxyz2lms ) # weight the xyz-to-lms conversion matrix with cLMS (cfr. stage 1 calculations) invMxyz2lms = np.linalg.inv( Mxyz2lms) # Calculate the inverse lms-to-xyz conversion matrix MAab = np.array( [clambda, calpha, cbeta] ) # Create matrix with scale factors for L, M, S for quick matrix multiplications invMAab = np.linalg.inv( MAab) # Pre-calculate its inverse to avoid repeat in loop. #-------------------------------------------------------------------------- # Apply forward/inverse model by looping over each row (=light source dim.) # in data: #-------------------------------------------------------------------------- N = data.shape[0] for i in range(N): #++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ # START FORWARD MODE and common part of inverse mode #++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #----------------------------------------------------------------------------- # Get absolute tristimulus values for stimulus field and white point for row i: #----------------------------------------------------------------------------- xyzt, xyzw, xyzw_abs = _get_absolute_xyz_xyzw(data, dataw, i=i, Lw=Lw, direction=direction, cieobs=cieobs, inputtype=inputtype, relative=relative) #----------------------------------------------------------------------------- # stage 1: calculate photon rates of stimulus and white white, and # adapting field: i.e. lmst, lmsw and lmsf #----------------------------------------------------------------------------- # Convert to white point l,m,s: lmsw = 683.0 * np.dot(Mxyz2lms, xyzw.T).T / _CMF[cieobs]['K'] # Calculate adaptation field and convert to l,m,s: lmsf = (Yb / 100.0) * lmsw # Calculate lms of stimulus # or put adaptation lmsf in test field lmst for later use in inverse-mode (no xyz in 'inverse' mode!!!): lmst = (683.0 * np.dot(Mxyz2lms, xyzt.T).T / _CMF[cieobs]['K']) if (direction == 'forward') else lmsf #----------------------------------------------------------------------------- # stage 2: calculate cone outputs of stimulus lmstp #----------------------------------------------------------------------------- lmstp = math.erf(Cc * (np.log(lmst / lms0) + Cf * np.log(lmsf / lms0))) # stimulus test field lmsfp = math.erf(Cc * (np.log(lmsf / lms0) + Cf * np.log(lmsf / lms0))) # adaptation field # add adaptation field lms temporarily to lmstp for quick calculation lmstp = np.vstack((lmsfp, lmstp)) #----------------------------------------------------------------------------- # stage 3: calculate optic nerve signals, lam*, alphp, betp: #----------------------------------------------------------------------------- lstar, alph, bet = asplit(np.dot(MAab, lmstp.T).T) alphp = cga1[0] * alph alphp[alph < 0] = cga1[1] * alph[alph < 0] betp = cgb1[0] * bet betp[bet < 0] = cgb1[1] * bet[bet < 0] #----------------------------------------------------------------------------- # stage 4: calculate recoded nerve signals, alphapp, betapp: #----------------------------------------------------------------------------- alphpp = cga2[0] * (alphp + betp) betpp = cgb2[0] * (alphp - betp) #----------------------------------------------------------------------------- # stage 5: calculate conscious color perception: #----------------------------------------------------------------------------- lstar_int = cl_int[0] * (lstar + cl_int[1]) alph_int = cab_int[0] * (np.cos(cab_int[1] * np.pi / 180.0) * alphpp - np.sin(cab_int[1] * np.pi / 180.0) * betpp) bet_int = cab_int[0] * (np.sin(cab_int[1] * np.pi / 180.0) * alphpp + np.cos(cab_int[1] * np.pi / 180.0) * betpp) lstar_out = lstar_int #++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ # stage 5 continued but SPLIT IN FORWARD AND INVERSE MODES: #++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #-------------------------------------- # FORWARD MODE TO PERCEPTUAL SIGNALS: #-------------------------------------- if direction == 'forward': if Ccwb is None: alph_out = alph_int - cab_out[0] bet_out = bet_int - cab_out[1] else: Ccwb = Ccwb * np.ones((2)) Ccwb[Ccwb < 0.0] = 0.0 Ccwb[Ccwb > 1.0] = 1.0 # white balance shift using adaptation gray background (Yb=20%), with Ccw: degree of adaptation: alph_out = alph_int - Ccwb[0] * alph_int[0] bet_out = bet_int - Ccwb[1] * bet_int[0] # stack together and remove adaptation field from vertical stack # camout is an ndarray with perceptual signals: camout[i] = np.vstack((lstar_out[1:], alph_out[1:], bet_out[1:])).T #-------------------------------------- # INVERSE MODE FROM PERCEPTUAL SIGNALS: #-------------------------------------- elif direction == 'inverse': # stack cognitive pre-adapted adaptation field signals (first on stack) together: #labf_int = np.hstack((lstar_int[0],alph_int[0],bet_int[0])) # get lstar_out, alph_out & bet_out for data #(contains model perceptual signals in inverse mode!!!): lstar_out, alph_out, bet_out = asplit(data[i]) #------------------------------------------------------------------------ # Inverse stage 5: undo cortical white-balance: #------------------------------------------------------------------------ if Ccwb is None: alph_int = alph_out + cab_out[0] bet_int = bet_out + cab_out[1] else: Ccwb = Ccwb * np.ones((2)) Ccwb[Ccwb < 0.0] = 0.0 Ccwb[Ccwb > 1.0] = 1.0 # inverse white balance shift using adaptation gray background (Yb=20%), with Ccw: degree of adaptation alph_int = alph_out + Ccwb[0] * alph_int[0] bet_int = bet_out + Ccwb[1] * bet_int[0] alphpp = (1.0 / cab_int[0]) * ( np.cos(-cab_int[1] * np.pi / 180.0) * alph_int - np.sin(-cab_int[1] * np.pi / 180.0) * bet_int) betpp = (1.0 / cab_int[0]) * ( np.sin(-cab_int[1] * np.pi / 180.0) * alph_int + np.cos(-cab_int[1] * np.pi / 180.0) * bet_int) lstar_int = lstar_out lstar = (lstar_int / cl_int[0]) - cl_int[1] #--------------------------------------------------------------------------- # Inverse stage 4: pre-adapted perceptual signals to recoded nerve signals: #--------------------------------------------------------------------------- alphp = 0.5 * (alphpp / cga2[0] + betpp / cgb2[0] ) # <-- alphpp = (Cga2.*(alphp+betp)); betp = 0.5 * (alphpp / cga2[0] - betpp / cgb2[0] ) # <-- betpp = (Cgb2.*(alphp-betp)); #--------------------------------------------------------------------------- # Inverse stage 3: recoded nerve signals to optic nerve signals: #--------------------------------------------------------------------------- alph = alphp / cga1[0] bet = betp / cgb1[0] sa = np.sign(cga1[1]) sb = np.sign(cgb1[1]) alph[(sa * alphp) < 0.0] = alphp[(sa * alphp) < 0] / cga1[1] bet[(sb * betp) < 0.0] = betp[(sb * betp) < 0] / cgb1[1] lab = ajoin((lstar, alph, bet)) #--------------------------------------------------------------------------- # Inverse stage 2: optic nerve signals to cone outputs: #--------------------------------------------------------------------------- lmstp = np.dot(invMAab, lab.T).T lmstp[lmstp < -1.0] = -1.0 lmstp[lmstp > 1.0] = 1.0 #--------------------------------------------------------------------------- # Inverse stage 1: cone outputs to photon rates: #--------------------------------------------------------------------------- lmstp = math.erfinv(lmstp) / Cc - Cf * np.log(lmsf / lms0) lmst = np.exp(lmstp) * lms0 #--------------------------------------------------------------------------- # Photon rates to absolute or relative tristimulus values: #--------------------------------------------------------------------------- xyzt = np.dot(invMxyz2lms, lmst.T).T * (_CMF[cieobs]['K'] / 683.0) if relative == True: xyzt = (100 / Lw) * xyzt # store in same named variable as forward mode: camout[i] = xyzt #++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ # END inverse mode #++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ return _massage_output_data_to_original_shape(camout, originalshape)