def do_xftf(self, kw=2): xftf(self.group.k, chi=self.group.chi, group=self.group, window = self.fft['window'], kmin = self.fft['kmin'], kmax = self.fft['kmax'], dk = self.fft['dk'], kweight = kw, with_phase=True, _larch=LARCH)
def prep(self): ## the next several lines seem necessary because the version ## of Larch currently at BMM is not correctly resolving ## pre1=pre2=None or norm1=norm2=None. The following ## approximates Larch's defaults if self.pre['e0'] is None: find_e0(self.group.energy, mu=self.group.mu, group=self.group, _larch=LARCH) ezero = self.group.e0 else: ezero = self.pre['e0'] if self.pre['norm2'] is None: self.pre['norm2'] = self.group.energy.max() - ezero if self.pre['norm1'] is None: self.pre['norm1'] = self.pre['norm2'] / 5 if self.pre['pre1'] is None: self.pre['pre1'] = self.group.energy.min() - ezero if self.pre['pre2'] is None: self.pre['pre2'] = self.pre['pre1'] / 3 pre_edge(self.group.energy, mu=self.group.mu, group=self.group, e0=ezero, step=None, pre1=self.pre['pre1'], pre2=self.pre['pre2'], norm1=self.pre['norm1'], norm2=self.pre['norm2'], nnorm=self.pre['nnorm'], nvict=self.pre['nvict'], _larch=LARCH) autobk(self.group.energy, mu=self.group.mu, group=self.group, rbkg=self.bkg['rbkg'], e0=self.bkg['e0'], kmin=self.bkg['kmin'], kmax=self.bkg['kmax'], kweight=self.bkg['kweight'], _larch=LARCH) xftf(self.group.k, chi=self.group.chi, group=self.group, window=self.fft['window'], kmin=self.fft['kmin'], kmax=self.fft['kmax'], dk=self.fft['dk'], _larch=LARCH)
def paths_optimizations(self, number=0.01, verbose=False): r""" Paths optimizations using simpsons area calculation. The calculation are used to perform Inputs: number (float): cut off percentage for paths_optimizations, the vals is typically set at 1% """ total = 0 total_area = 0 contrib = [] contrib_area = [] # print(self.num_paths) for i in range(self.num_paths): self.best.k = self.ind_export_paths[2 * i, :] self.best.chi = self.ind_export_paths[2 * i + 1, :] xftf(self.best.k, self.best.chi, kmin=self.params['Kmin'], kmax=self.params['Kmax'], dk=4, window='hanning', kweight=self.params['kweight'], group=self.best, _larch=self.mylarch) total += np.linalg.norm(self.best.chir_mag) contrib.append(np.linalg.norm(self.best.chir_mag)) contrib_area.append(simps(self.best.chir_mag, self.best.r)) total_area += simps(self.best.chir_mag, self.best.r) contrib_p = [i / total for i in contrib] contrib_ap = [i / total_area for i in contrib_area] if verbose: print( "Paths, Contrib Percentage (2-Norm), Contrib Percentage (Area)" ) for i in range(len(self.paths)): print(i + 1, contrib_p[i].round(3), contrib_ap[i].round(3)) #print(total) new_path = (np.argwhere(np.array(contrib_ap) >= number)).flatten() + 1 print("New Paths") print(new_path) plt.bar(np.arange(self.num_paths), height=contrib_ap) plt.xticks(np.arange(self.num_paths), self.flat_paths)
def calc_with_defaults(xafs_group): # calculate mu and normalise with background extraction # should let the user specify the colums for i0, it, mu, iR. if not hasattr(xafs_group, 'mu'): xafs_group = get_mu(xafs_group) # calculate pre-edge and post edge and add them to group pre_edge(xafs_group) # perform background removal autobk(xafs_group) # using defaults so no additional parameters are passed # calculate fourier transform xftf(xafs_group, kweight=0.5, kmin=3.0, kmax=12.871, dk=1, kwindow='Hanning') return xafs_group
def stacks_plot(self): # print(self.num_paths) # get the array size first: self.best.k = self.ind_export_paths[0, :] self.best.chi = self.ind_export_paths[1, :] xftf(self.best.k, self.best.chi, kmin=self.params['Kmin'], kmax=self.params['Kmax'], dk=4, window='hanning', kweight=self.params['kweight'], group=self.best, _larch=self.mylarch) y_arr = np.zeros((self.num_paths, len(self.best.r))) y_tot = np.zeros(len(self.best.r)) # grab all the data for i in range(self.num_paths): self.best.k = self.ind_export_paths[2 * i, :] self.best.chi = self.ind_export_paths[2 * i + 1, :] xftf(self.best.k, self.best.chi, kmin=self.params['Kmin'], kmax=self.params['Kmax'], dk=4, window='hanning', kweight=self.params['kweight'], group=self.best, _larch=self.mylarch) y_arr[i, :] = self.best.chir_mag y_tot += self.best.chir_mag # print(len(self.best.chir_mag)) x = self.best.r # print(y_arr) # print(x.shape) # print(y_arr.shape) # print(y_arr.shape) plt.figure() plt.plot(x, y_tot) plt.figure() plt.stackplot(x, y_arr, labels=np.arange(1, self.num_paths + 1)) plt.legend()
def draw_rspace(self): # # TODO: # If any of these parameters changes, we need to replot. self.draw_background(visual=False, main=False) self.draw_kspace(visual=False) self.update_parameters() xftf(self.data.k, self.data.chi, kmin=self.kmin_val, kmax=self.kmax_val, dk=4, window='hanning', kweight=self.kweight_val, group=self.data, _larch=self.mylarch) self.ax.clear() self.ax.plot(self.data.r, self.data.chir_mag, 'b', label='R Space') self.ax.set_xlabel('$r$ (Å$^{-1}$)') self.ax.set_ylabel('$\chi(r)$') self.fig.tight_layout() self.canvas.draw()
def read(self, filename=None, match=None, do_preedge=True, do_bkg=True, do_fft=True, use_hashkey=False): """ read Athena project to group of groups, one for each Athena dataset in the project file. This supports both gzipped and unzipped files and old-style perl-like project files and new-style JSON project files Arguments: filename (string): name of Athena Project file match (string): pattern to use to limit imported groups (see Note 1) do_preedge (bool): whether to do pre-edge subtraction [True] do_bkg (bool): whether to do XAFS background subtraction [True] do_fft (bool): whether to do XAFS Fast Fourier transform [True] use_hashkey (bool): whether to use Athena's hash key as the group name instead of the Athena label [False] Returns: None, fills in attributes `header`, `journal`, `filename`, `groups` Notes: 1. To limit the imported groups, use the pattern in `match`, using '*' to match 'all', '?' to match any single character, or [sequence] to match any of a sequence of letters. The match will always be insensitive to case. 3. do_preedge, do_bkg, and do_fft will attempt to reproduce the pre-edge, background subtraction, and FFT from Athena by using the parameters saved in the project file. 2. use_hashkey=True will name groups from the internal 5 character string used by Athena, instead of the group label. Example: 1. read in all groups from a project file: cr_data = read_athena('My Cr Project.prj') 2. read in only the "merged" data from a Project, and don't do FFT: zn_data = read_athena('Zn on Stuff.prj', match='*merge*', do_fft=False) """ if filename is not None: self.filename = filename if not os.path.exists(self.filename): raise IOError("%s '%s': cannot find file" % (ERR_MSG, self.filename)) from larch.xafs import pre_edge, autobk, xftf if not os.path.exists(filename): raise IOError("file '%s' not found" % filename) text = _read_raw_athena(filename) # failed to read: if text is None: raise OSError(errval) if not _test_athena_text(text): raise ValueError("%s '%s': invalid Athena File" % (ERR_MSG, filename)) # decode JSON or Perl format data = None try: data = parse_jsonathena(text, self.filename) except ValueError: # try as perl format # print("Not json-athena ", sys.exc_info()) try: data = parse_perlathena(text, self.filename) except: # print("Not perl-athena ", sys.exc_info()) pass if data is None: raise ValueError("cannot read file '%s' as Athena Project File" % (self.filename)) self.header = data.header self.journal = data.journal self.group_names = data.group_names for gname in data.group_names: oname = gname if match is not None: if not fnmatch(gname.lower(), match): continue this = getattr(data, gname) if use_hashkey: oname = this.athena_id if (do_preedge or do_bkg) and (self._larch is not None): pars = this.bkg_params pre_edge(this, e0=float(pars.e0), pre1=float(pars.pre1), pre2=float(pars.pre2), norm1=float(pars.nor1), norm2=float(pars.nor2), nnorm=float(pars.nnorm), make_flat=bool(pars.flatten), _larch=self._larch) if do_bkg and hasattr(pars, 'rbkg'): autobk(this, _larch=self._larch, e0=float(pars.e0), rbkg=float(pars.rbkg), kmin=float(pars.spl1), kmax=float(pars.spl2), kweight=float(pars.kw), dk=float(pars.dk), clamp_lo=float(pars.clamp1), clamp_hi=float(pars.clamp2)) if do_fft: pars = this.fft_params kweight=2 if hasattr(pars, 'kw'): kweight = float(pars.kw) xftf(this, _larch=self._larch, kmin=float(pars.kmin), kmax=float(pars.kmax), kweight=kweight, window=pars.kwindow, dk=float(pars.dk)) self.groups[oname] = this
ax3[1].plot(dat.k, dat.chi * dat.k, label='k') ax3[2].plot(dat.k, dat.chi * dat.k**2, label=r'k^2') ax3[3].plot(dat.k, dat.chi * dat.k**3, label=r'k^3') fig3.suptitle('k-space Ferrocene', fontsize=24) for i in range(4): ax3[i].set_xlim(0, 14) ax3[i].set_ylim(-4.00 * (i + 1), 4.00 * (i + 1)) ax3[i].legend(loc=4, fancybox=True, shadow=False, prop={'size': 16}, numpoints=1, ncol=1) ####WINDOWS ''' xafs.xftf(dat.k,dat.chi,kmin=3, kmax=12, dk=3, kweight=0, window='kaiser', group=dat) fig4,ax4=plt.subplots(figsize=(10,10)) ax4.plot(dat.k, dat.chi*dat.k**2,label=r'k^2, kaiser') ax4.plot(dat.k, dat.kwin) ax4.set_title('k-space window') ax4.legend(loc=4,fancybox=True,shadow=False,prop ={'size':16},numpoints=1,ncol=1)#.draggable() ''' ####FFT xafs.xftf(dat.k, dat.chi, kmin=3, kmax=10, dk=5, dk2=5,
def read(self, filename=None, match=None, do_preedge=True, do_bkg=True, do_fft=True, use_hashkey=False): """ read Athena project to group of groups, one for each Athena dataset in the project file. This supports both gzipped and unzipped files and old-style perl-like project files and new-style JSON project files Arguments: filename (string): name of Athena Project file match (string): pattern to use to limit imported groups (see Note 1) do_preedge (bool): whether to do pre-edge subtraction [True] do_bkg (bool): whether to do XAFS background subtraction [True] do_fft (bool): whether to do XAFS Fast Fourier transform [True] use_hashkey (bool): whether to use Athena's hash key as the group name instead of the Athena label [False] Returns: None, fills in attributes `header`, `journal`, `filename`, `groups` Notes: 1. To limit the imported groups, use the pattern in `match`, using '*' to match 'all', '?' to match any single character, or [sequence] to match any of a sequence of letters. The match will always be insensitive to case. 3. do_preedge, do_bkg, and do_fft will attempt to reproduce the pre-edge, background subtraction, and FFT from Athena by using the parameters saved in the project file. 2. use_hashkey=True will name groups from the internal 5 character string used by Athena, instead of the group label. Example: 1. read in all groups from a project file: cr_data = read_athena('My Cr Project.prj') 2. read in only the "merged" data from a Project, and don't do FFT: zn_data = read_athena('Zn on Stuff.prj', match='*merge*', do_fft=False) """ if filename is not None: self.filename = filename if not os.path.exists(self.filename): raise IOError("%s '%s': cannot find file" % (ERR_MSG, self.filename)) from larch.xafs import pre_edge, autobk, xftf if not os.path.exists(filename): raise IOError("file '%s' not found" % filename) text = _read_raw_athena(filename) # failed to read: if text is None: raise OSError(errval) if not _test_athena_text(text): raise ValueError("%s '%s': invalid Athena File" % (ERR_MSG, filename)) # decode JSON or Perl format data = None try: data = parse_jsonathena(text, self.filename) except: # try as perl format try: data = parse_perlathena(text, self.filename) except: print("Not perl-athena ", sys.exc_info()) if data is None: raise ValueError("cannot read file '%s' as Athena Project File" % (self.filename)) self.header = data.header self.journal = data.journal self.group_names = data.group_names for gname in data.group_names: oname = gname if match is not None: if not fnmatch(gname.lower(), match): continue this = getattr(data, gname) if use_hashkey: oname = this.athena_id is_xmu = bool(int(getattr(this.athena_params, 'is_xmu', 1.0))) is_chi = bool(int(getattr(this.athena_params, 'is_chi', 0.0))) is_xmu = is_xmu and not is_chi for aname in ('is_xmudat', 'is_bkg', 'is_diff', 'is_proj', 'is_pixel', 'is_rsp'): val = bool(int(getattr(this.athena_params, aname, 0.0))) is_xmu = is_xmu and not val if is_xmu and (do_preedge or do_bkg) and (self._larch is not None): pars = clean_bkg_params(this.bkg_params) pre_edge(this, e0=float(pars.e0), pre1=float(pars.pre1), pre2=float(pars.pre2), norm1=float(pars.nor1), norm2=float(pars.nor2), nnorm=float(pars.nnorm), make_flat=bool(pars.flatten), _larch=self._larch) if do_bkg and hasattr(pars, 'rbkg'): autobk(this, _larch=self._larch, e0=float(pars.e0), rbkg=float(pars.rbkg), kmin=float(pars.spl1), kmax=float(pars.spl2), kweight=float(pars.kw), dk=float(pars.dk), clamp_lo=float(pars.clamp1), clamp_hi=float(pars.clamp2)) if do_fft: pars = clean_fft_params(this.fft_params) kweight=2 if hasattr(pars, 'kw'): kweight = float(pars.kw) xftf(this, _larch=self._larch, kmin=float(pars.kmin), kmax=float(pars.kmax), kweight=kweight, window=pars.kwindow, dk=float(pars.dk)) if is_chi: this.k = this.energy*1.0 this.chi = this.mu*1.0 del this.energy del this.mu self.groups[oname] = this
def larch_init(CSV_sub, params): r""" Larch initialization for data analysis Inputs: CSV_sub (str): files location of the data files (CSV/XMU) params (dics): dicts contain all parameters """ global intervalK global best global KMIN global KMAX global KWEIGHT global g # Kmin = params['Kmin'] Kmax = params['Kmax'] deltak = params['deltak'] BIG = int(Kmax / deltak) SMALL = int(Kmin / deltak) MID = int(BIG - SMALL + 1) RBKG = params['rbkg'] KWEIGHT = params['kweight'] KMIN = Kmin KMAX = Kmax BKGKW = params['bkgkw'] # cu = 1 hfal2 = 2.0 BKGKMAX = params['bkgkmax'] # cu = 25, hfal2 = 15 CSV_PATH = os.path.join(base, CSV_sub) g = read_ascii(CSV_PATH) best = read_ascii(CSV_PATH) sumgroup = read_ascii(CSV_PATH) # back ground subtraction using autobk # data kweight try: g.chi except AttributeError: autobk(g, rbkg=RBKG, kweight=BKGKW, kmax=BKGKMAX, _larch=mylarch) autobk(best, rbkg=RBKG, _larch=mylarch) autobk(sumgroup, rbkg=RBKG, _larch=mylarch) intervalK = (np.linspace(SMALL, BIG, MID)).tolist() '''chang''' xftf(g.k, g.chi, kmin=KMIN, kmax=KMAX, dk=4, window='hanning', kweight=KWEIGHT, group=g, _larch=mylarch) xftf(best.k, best.chi, kmin=KMIN, kmax=KMAX, dk=4, window='hanning', kweight=KWEIGHT, group=best, _larch=mylarch) xftf(sumgroup.k, sumgroup.chi, kmin=KMIN, kmax=KMAX, dk=4, window='hanning', kweight=KWEIGHT, group=sumgroup, _larch=mylarch) '''chang end''' exp = g.chi params['SMALL'] = SMALL params['BIG'] = BIG params['intervalK'] = intervalK return exp, g, params, mylarch
def fitness(exp, arr, full_paths, params, return_r=True): base = Path(os.getcwd()).parent.parent compounds_list = params['front'] num_comp = len(params['front']) if num_comp > 1: front = [os.path.join(base, i) for i in compounds_list] else: front = os.path.join(base, params['front'][0]) end = '.dat' loss = 0 yTotal = [0] * (401) offset = 2 global best Kmin = params['Kmin'] Kmax = params['Kmax'] SMALL = params['SMALL'] BIG = params['BIG'] Kweight = params['kweight'] arr_r = [] array_str = "---------------------\n" for i in range(num_comp): if num_comp > 1: paths = full_paths[i] else: paths = full_paths for j in range(len(paths)): if num_comp > 1: filename = front[i] + str(paths[j]).zfill(4) + end else: filename = front + str(paths[j]).zfill(4) + end path = feffdat.feffpath(filename, s02=str(arr[j, 0]), e0=str(arr[j, 1]), sigma2=str(arr[j, 2]), deltar=str(arr[j, 3]), _larch=mylarch) feffdat.path2chi(path, larch=mylarch) print("Path", paths[j], path.s02, path.e0, path.sigma2, path.reff + arr[j, 3]) temp = [ float(path.s02), float(path.e0), float(path.sigma2), float(path.reff + arr[j, 3]), float(path.degen), float(path.nleg), (path.geom) ] arr_r.append(temp) y = path.chi for k in intervalK: yTotal[int(k)] += y[int(k)] best.chi = yTotal best.k = path.k xftf(best.k, best.chi, kmin=Kmin, kmax=Kmax, dk=4, window='hanning', kweight=Kweight, group=best, _larch=mylarch) for j in intervalK: loss = loss + (yTotal[int(j)] * g.k[int(j)]**2 - exp[int(j)] * g.k[int(j)]**2)**2 if return_r == True: return path, yTotal, best, loss, arr_r, array_str else: return path, yTotal, best, loss
def fitness_individal(exp, arr, full_paths, params, plot=False, export=False, fig_gui=None): r""" Fittness for individual score Inputs: exp (larch_object): expereince data for larch object arr (array): array for best fit path (list): path list params (dicts): dictionary for params calculations plot (bool): plot for individual paths export (bool): return array for each paths Outputs: """ global intervalK global best loss = 0 yTotal = [0] * (401) offset = 5 global best num_comp = len(params['front']) base = Path(os.getcwd()).parent.parent compounds_list = params['front'] if num_comp > 1: front = [os.path.join(base, i) for i in compounds_list] else: front = os.path.join(base, params['front'][0]) end = '.dat' Kmax = params['Kmax'] SMALL = params['SMALL'] BIG = params['BIG'] export_paths = np.zeros((2 * len(flatten_2d_list(full_paths)), 401)) if plot: fig, ax = plt.subplots(ncols=1, nrows=1, figsize=(7, 6)) if fig_gui != None: ax = fig_gui.add_subplot(111) iterator = 0 # print(num_comp) for i in range(num_comp): if num_comp > 1: paths = full_paths[i] else: paths = full_paths for j in range(len(paths)): if num_comp > 1: filename = front[i] + str(paths[j]).zfill(4) + end else: filename = front + str(paths[j]).zfill(4) + end path = feffdat.feffpath(filename, s02=str(arr[j, 0]), e0=str(arr[j, 1]), sigma2=str(arr[j, 2]), deltar=str(arr[j, 3]), _larch=mylarch) feffdat.path2chi(path, _larch=mylarch) if plot: ax.plot(path.k, path.chi * path.k**2.0 + offset * (iterator + 1), label='Path: ' + str(paths[i][j])) ax.set_xlabel("k ($\AA^{-1}$)") ax.set_ylabel("k$^{2}$ ($\chi(k)\AA^{-1}$)") ax.set_ylim(-10, len(paths) * offset + offset) ax.set_xlim(0, Kmax + 1) if fig_gui != None: ax.plot(path.k, path.chi * path.k**2.0 + offset * (iterator + 1), label='Path' + str(paths[i][j])) ax.set_xlabel("k ($\AA^{-1}$)") ax.set_ylabel("k$^{2}$ ($\chi(k)\AA^{-1}$)") ax.set_ylim(-10, len(paths) * offset + offset) ax.set_xlim(0, Kmax + 1) if export: export_paths[2 * iterator, :] = path.k export_paths[2 * iterator + 1, :] = (path.chi * path.k**2.0) y = path.chi for k in intervalK: yTotal[int(k)] += y[int(k)] iterator = iterator + 1 best.chi = yTotal best.k = path.k xftf(best.k, best.chi, kmin=KMIN, kmax=KMAX, dk=4, window='hanning', kweight=KWEIGHT, group=best) offset = 0 if plot == True or fig_gui != None: ax.plot(g.k, g.chi * g.k**2 + offset, 'r--', label='Data') ax.plot(path.k[SMALL:BIG], yTotal[SMALL:BIG] * path.k[SMALL:BIG]**2 + offset, 'b--', label="GA") ax.legend(bbox_to_anchor=(1.05, 1.0), loc='upper left') fig_gui.tight_layout() for j in intervalK: #loss = loss + (yTotal[int(j)]*g.k[int(j)]**2 - exp[int(j)]*g.k[int(j)]**2)**2 loss = loss + (yTotal[int(j)] * g.k[int(j)]**2 - exp[int(j)] * g.k[int(j)]**2)**2 #print(loss) return path, yTotal, best, loss, export_paths
def plot(self, title='Test', fig_gui=None): r""" Plot the K and R Space """ SMALL = self.small BIG = self.big self.best.k = self.path.k self.best.chi = self.yTotal xftf(self.best.k, self.best.chi, kmin=self.params['Kmin'], kmax=self.params['Kmax'], dk=4, window='hanning', kweight=self.kweight, group=self.best) if fig_gui == None: fig, ax = plt.subplots(1, 2, figsize=(15, 5)) ax[0].plot(self.g.k, self.g.chi * self.g.k**self.kweight, 'b--', label="Experiment Data") ax[0].plot(self.path.k[SMALL:BIG], self.yTotal[SMALL:BIG] * self.path.k[SMALL:BIG]**self.kweight, 'r-', label="Genetic Algorithm") ax[0].legend() ax[0].set_title(title + " K Space") ax[1].plot(self.g.r, self.g.chir_mag, 'b--', label='Experiment Data') ax[1].plot(self.best.r, self.best.chir_mag, 'r-', label='Genetic Algorithm') ax[1].set_title(title + " R Space") ax[1].legend() else: ax = fig_gui.add_subplot(121) ax.plot(self.g.k, self.g.chi * self.g.k**self.kweight, 'b--', label="Experiment Data") ax.plot(self.path.k[SMALL:BIG], self.yTotal[SMALL:BIG] * self.path.k[SMALL:BIG]**self.kweight, 'r-', label="Genetic Algorithm") ax.legend() ax.set_title(title + " K Space") ax = fig_gui.add_subplot(122) ax.plot(self.g.r, self.g.chir_mag, 'b--', label='Experiment Data') ax.plot(self.best.r, self.best.chir_mag, 'r-', label='Genetic Algorithm') ax.set_title(title + " R Space") ax.legend() fig_gui.tight_layout()