def ctf(self, **kwargs): """ Convert color space coordinates to XYZ tristimulus values. Args: :dtype: | 'xyz' | Convert to this color space. :**kwargs: | additional input arguments required for color space transformation. | See specific luxpy function for more info | (e.g. ?luxpy.xyz_to_lab) Returns: :returns: | luxpy.XYZ with .value field that is a ndarray with tristimulus values """ db = put_args_in_db(self.cspace_par, locals().copy()) return XYZ(value=colortf(self.value, tf='{:s}>xyz'.format(self.dtype), bwtf=db), relative=self.relative, cieobs=self.cieobs, dtype='xyz')
def process_cri_type_input(cri_type, args, callerfunction = ''): """ Processes cri_type input in a function (helper function). | This function replaces the values of keys in the cri_type dict with the corresponding not-None values in args. Args: :cri_type: | str or dict | Database with CRI model parameters. :args: | arguments from a caller function :callerfunction: | str with function the args originated from Returns: :cri_type: | dict with database of CRI model parameters. """ if isinstance(cri_type,str): if (cri_type in _CRI_DEFAULTS['cri_types']): cri_type = _CRI_DEFAULTS[cri_type].copy() else: raise Exception('.{}(): Unrecognized cri_type: {}'.format(callerfunction,cri_type)) elif not isinstance(cri_type,dict): raise Exception('.{}(): cri_type is not a dict !'.format(callerfunction)) cri_type = put_args_in_db(cri_type,args) return cri_type
def init_options(options={}, F=None, CR=None, kmax=None, mu=None, display=None): """ Initialize options dict. If input arg is None, the default value is used. Args: :options: {}, optional | Dict with options | {} initializes dict to default values. :F: scale factor, optional :CR: crossover factor, optional :kmax: maximum number of iterations, optional :mu: population size, optional :display: show or not the population during execution, optional Returns: :options: dict with options. """ args = locals().copy() if bool(options) == False: options = { 'F': 0.5, 'CR': 0.3, 'kmax': 300, 'mu': 100, 'display': False } return put_args_in_db(options, args)
def to_xyz(self, **kwargs): """ Convert color space coordinates to XYZ tristimulus values. """ db = put_args_in_db(self.cspace_par, locals().copy()) return XYZ(value=colortf(self.value, tf='{:s}>xyz'.format(self.dtype), bwtf=db), relative=self.relative, cieobs=self.cieobs, dtype='xyz')
def cam_sww16(data, dataw = None, Yb = 20.0, Lw = 400.0, Ccwb = None, relative = True, \ parameters = None, inputtype = 'xyz', direction = 'forward', \ cieobs = '2006_10'): """ A simple principled color appearance model based on a mapping of the Munsell color system. | This function implements the JOSA A (parameters = 'JOSA') published model. Args: :data: | ndarray with input tristimulus values | or spectral data | or input color appearance correlates | Can be of shape: (N [, xM], x 3), whereby: | N refers to samples and M refers to light sources. | Note that for spectral input shape is (N x (M+1) x wl) :dataw: | None or ndarray, optional | Input tristimulus values or spectral data of white point. | None defaults to the use of CIE illuminant C. :Yb: | 20.0, optional | Luminance factor of background (perfect white diffuser, Yw = 100) :Lw: | 400.0, optional | Luminance (cd/m²) of white point. :Ccwb: | None, optional | Degree of cognitive adaptation (white point balancing) | If None: use [..,..] from parameters dict. :relative: | True or False, optional | True: xyz tristimulus values are relative (Yw = 100) :parameters: | None or str or dict, optional | Dict with model parameters. | - None: defaults to luxpy.cam._CAM_SWW_2016_PARAMETERS['JOSA'] | - str: 'best-fit-JOSA' or 'best-fit-all-Munsell' | - dict: user defined model parameters | (dict should have same structure) :inputtype: | 'xyz' or 'spd', optional | Specifies the type of input: | tristimulus values or spectral data for the forward mode. :direction: | 'forward' or 'inverse', optional | -'forward': xyz -> cam_sww_2016 | -'inverse': cam_sww_2016 -> xyz :cieobs: | '2006_10', optional | CMF set to use to perform calculations where spectral data is involved (inputtype == 'spd'; dataw = None) | Other options: see luxpy._CMF['types'] Returns: :returns: | ndarray with color appearance correlates (:direction: == 'forward') | or | XYZ tristimulus values (:direction: == 'inverse') Notes: | This function implements the JOSA A (parameters = 'JOSA') published model. | With: | 1. A correction for the parameter | in Eq.4 of Fig. 11: 0.952 --> -0.952 | | 2. The delta_ac and delta_bc white-balance shifts in Eq. 5e & 5f | should be: -0.028 & 0.821 | | (cfr. Ccwb = 0.66 in: | ab_test_out = ab_test_int - Ccwb*ab_gray_adaptation_field_int)) References: 1. `Smet, K. A. G., Webster, M. A., & Whitehead, L. A. (2016). A simple principled approach for modeling and understanding uniform color metrics. Journal of the Optical Society of America A, 33(3), A319–A331. <https://doi.org/10.1364/JOSAA.33.00A319>`_ """ # get model parameters args = locals().copy() if parameters is None: parameters = _CAM_SWW16_PARAMETERS['JOSA'] if isinstance(parameters,str): parameters = _CAM_SWW16_PARAMETERS[parameters] parameters = put_args_in_db(parameters,args) #overwrite parameters with other (not-None) args input #unpack model parameters: Cc, Ccwb, Cf, Mxyz2lms, cLMS, cab_int, cab_out, calpha, cbeta,cga1, cga2, cgb1, cgb2, cl_int, clambda, lms0 = [parameters[x] for x in sorted(parameters.keys())] # setup default adaptation field: if (dataw is None): dataw = _CIE_ILLUMINANTS['C'].copy() # get illuminant C xyzw = spd_to_xyz(dataw, cieobs = cieobs,relative=False) # get abs. tristimulus values if relative == False: #input is expected to be absolute dataw[1:] = Lw*dataw[1:]/xyzw[:,1:2] #dataw = Lw*dataw # make absolute else: dataw = dataw # make relative (Y=100) if inputtype == 'xyz': dataw = spd_to_xyz(dataw, cieobs = cieobs, relative = relative) # precomputations: Mxyz2lms = np.dot(np.diag(cLMS),math.normalize_3x3_matrix(Mxyz2lms, np.array([[1, 1, 1]]))) # normalize matrix for xyz-> lms conversion to ill. E weighted with cLMS invMxyz2lms = np.linalg.inv(Mxyz2lms) MAab = np.array([clambda,calpha,cbeta]) invMAab = np.linalg.inv(MAab) #initialize data and camout: data = np2d(data).copy() # stimulus data (can be upto NxMx3 for xyz, or [N x (M+1) x wl] for spd)) dataw = np2d(dataw).copy() # white point (can be upto Nx3 for xyz, or [(N+1) x wl] for spd) # make axis 1 of dataw have 'same' dimensions as data: if (data.ndim == 2): data = np.expand_dims(data, axis = 1) # add light source axis 1 if inputtype == 'xyz': if dataw.shape[0] == 1: #make dataw have same lights source dimension size as data dataw = np.repeat(dataw,data.shape[1],axis=0) else: if dataw.shape[0] == 2: dataw = np.vstack((dataw[0],np.repeat(dataw[1:], data.shape[1], axis = 0))) # Flip light source dim to axis 0: data = np.transpose(data, axes = (1,0,2)) # Initialize output array: dshape = list(data.shape) dshape[-1] = 3 # requested number of correlates: l_int, a_int, b_int if (inputtype != 'xyz') & (direction == 'forward'): dshape[-2] = dshape[-2] - 1 # wavelength row doesn't count & only with forward can the input data be spectral camout = np.nan*np.ones(dshape) # apply forward/inverse model for each row in data: for i in range(data.shape[0]): # stage 1: calculate photon rates of stimulus and adapting field, lmst & lmsf: if (inputtype != 'xyz'): if relative == True: xyzw_abs = spd_to_xyz(np.vstack((dataw[0],dataw[i+1])), cieobs = cieobs, relative = False) dataw[i+1] = Lw*dataw[i+1]/xyzw_abs[0,1] # make absolute xyzw = spd_to_xyz(np.vstack((dataw[0],dataw[i+1])), cieobs = cieobs, relative = False) lmsw = 683.0*np.dot(Mxyz2lms,xyzw.T).T/_CMF[cieobs]['K'] lmsf = (Yb/100.0)*lmsw # calculate adaptation field and convert to l,m,s if (direction == 'forward'): if relative == True: data[i,1:,:] = Lw*data[i,1:,:]/xyzw_abs[0,1] # make absolute xyzt = spd_to_xyz(data[i], cieobs = cieobs, relative = False)/_CMF[cieobs]['K'] lmst = 683.0*np.dot(Mxyz2lms,xyzt.T).T # convert to l,m,s else: lmst = lmsf # put lmsf in lmst for inverse-mode elif (inputtype == 'xyz'): if relative == True: dataw[i] = Lw*dataw[i]/100.0 # make absolute lmsw = 683.0* np.dot(Mxyz2lms, dataw[i].T).T /_CMF[cieobs]['K'] # convert to lms lmsf = (Yb/100.0)*lmsw if (direction == 'forward'): if relative == True: data[i] = Lw*data[i]/100.0 # make absolute lmst = 683.0* np.dot(Mxyz2lms, data[i].T).T /_CMF[cieobs]['K'] # convert to lms else: lmst = lmsf # put lmsf in lmst for inverse-mode # stage 2: calculate cone outputs of stimulus lmstp lmstp = math.erf(Cc*(np.log(lmst/lms0) + Cf*np.log(lmsf/lms0))) lmsfp = math.erf(Cc*(np.log(lmsf/lms0) + Cf*np.log(lmsf/lms0))) lmstp = np.vstack((lmsfp,lmstp)) # add adaptation field lms temporarily to lmsp for quick calculation # stage 3: calculate optic nerve signals, lam*, alphp, betp: lstar,alph, bet = asplit(np.dot(MAab, lmstp.T).T) alphp = cga1[0]*alph alphp[alph<0] = cga1[1]*alph[alph<0] betp = cgb1[0]*bet betp[bet<0] = cgb1[1]*bet[bet<0] # stage 4: calculate recoded nerve signals, alphapp, betapp: alphpp = cga2[0]*(alphp + betp) betpp = cgb2[0]*(alphp - betp) # stage 5: calculate conscious color perception: lstar_int = cl_int[0]*(lstar + cl_int[1]) alph_int = cab_int[0]*(np.cos(cab_int[1]*np.pi/180.0)*alphpp - np.sin(cab_int[1]*np.pi/180.0)*betpp) bet_int = cab_int[0]*(np.sin(cab_int[1]*np.pi/180.0)*alphpp + np.cos(cab_int[1]*np.pi/180.0)*betpp) lstar_out = lstar_int if direction == 'forward': if Ccwb is None: alph_out = alph_int - cab_out[0] bet_out = bet_int - cab_out[1] else: Ccwb = Ccwb*np.ones((2)) Ccwb[Ccwb<0.0] = 0.0 Ccwb[Ccwb>1.0] = 1.0 alph_out = alph_int - Ccwb[0]*alph_int[0] # white balance shift using adaptation gray background (Yb=20%), with Ccw: degree of adaptation bet_out = bet_int - Ccwb[1]*bet_int[0] camout[i] = np.vstack((lstar_out[1:],alph_out[1:],bet_out[1:])).T # stack together and remove adaptation field from vertical stack elif direction == 'inverse': labf_int = np.hstack((lstar_int[0],alph_int[0],bet_int[0])) # get lstar_out, alph_out & bet_out for data: lstar_out, alph_out, bet_out = asplit(data[i]) # stage 5 inverse: # undo cortical white-balance: if Ccwb is None: alph_int = alph_out + cab_out[0] bet_int = bet_out + cab_out[1] else: Ccwb = Ccwb*np.ones((2)) Ccwb[Ccwb<0.0] = 0.0 Ccwb[Ccwb>1.0] = 1.0 alph_int = alph_out + Ccwb[0]*alph_int[0] # inverse white balance shift using adaptation gray background (Yb=20%), with Ccw: degree of adaptation bet_int = bet_out + Ccwb[1]*bet_int[0] lstar_int = lstar_out alphpp = (1.0 / cab_int[0]) * (np.cos(-cab_int[1]*np.pi/180.0)*alph_int - np.sin(-cab_int[1]*np.pi/180.0)*bet_int) betpp = (1.0 / cab_int[0]) * (np.sin(-cab_int[1]*np.pi/180.0)*alph_int + np.cos(-cab_int[1]*np.pi/180.0)*bet_int) lstar_int = lstar_out lstar = (lstar_int /cl_int[0]) - cl_int[1] # stage 4 inverse: alphp = 0.5*(alphpp/cga2[0] + betpp/cgb2[0]) # <-- alphpp = (Cga2.*(alphp+betp)); betp = 0.5*(alphpp/cga2[0] - betpp/cgb2[0]) # <-- betpp = (Cgb2.*(alphp-betp)); # stage 3 invers: alph = alphp/cga1[0] bet = betp/cgb1[0] sa = np.sign(cga1[1]) sb = np.sign(cgb1[1]) alph[(sa*alphp)<0.0] = alphp[(sa*alphp)<0] / cga1[1] bet[(sb*betp)<0.0] = betp[(sb*betp)<0] / cgb1[1] lab = ajoin((lstar, alph, bet)) # stage 2 inverse: lmstp = np.dot(invMAab,lab.T).T lmstp[lmstp<-1.0] = -1.0 lmstp[lmstp>1.0] = 1.0 lmstp = math.erfinv(lmstp) / Cc - Cf*np.log(lmsf/lms0) lmst = np.exp(lmstp) * lms0 # stage 1 inverse: xyzt = np.dot(invMxyz2lms,lmst.T).T if relative == True: xyzt = (100.0/Lw) * xyzt camout[i] = xyzt # if flipaxis0and1 == True: # loop over shortest dim. # camout = np.transpose(camout, axes = (1,0,2)) # Flip light source dim back to axis 1: camout = np.transpose(camout, axes = (1,0,2)) if camout.shape[0] == 1: camout = np.squeeze(camout,axis = 0) return camout