def restore(fname, top_level=True, _larch=None): """restore data from a json Larch save file Arguments --------- top_level bool whether to restore to _main [True] Returns ------- None with `top_level=True` or group with `top_level=False` Notes ----- 1. With top_level=False, a new group containing the recovered data will be returned. """ grouplist = _larch.symtable._sys.saverestore_groups datalines = open(fname, 'r').readlines() line1 = datalines.pop(0) if not line1.startswith("#Larch Save File:"): raise ValueError("%s is not a valid Larch save file" % fname) version_string = line1.split(':')[1].strip() version_info = [s for s in version_string.split('.')] ivar = 0 header = {'version': version_info} varnames = [] gname = fix_varname('restore_%s' % fname) out = Group(name=gname) for line in datalines: line = line[:-1] if line.startswith('#save.'): key, value = line[6:].split(':', 1) value = value.strip() if key == 'nitems': value = int(value) header[key] = value elif line.startswith('#=>'): name = fix_varname(line[4:].strip()) ivar += 1 if name in (None, 'None', '__unknown__') or name in varnames: name = 'var_%5.5i' % (ivar) varnames.append(name) else: val = decode4js(json.loads(line), grouplist) setattr(out, varnames[-1], val) setattr(out, '_restore_metadata_', header) if top_level: _main = _larch.symtable for objname in dir(out): setattr(_main, objname, getattr(out, objname)) return return out
def onRead(self, path): if path in self.controller.file_groups: if wx.ID_YES != popup(self, "Re-read file '%s'?" % path, 'Re-read file?'): return filedir, filename = os.path.split(path) pref = fix_varname(filename.replace('.', '_')) if len(pref) > 15: pref = pref[:15] groupname = pref count, maxcount = 0, 999 while hasattr(self.larch.symtable, groupname) and count < maxcount: count += 1 groupname = '%s_%2.2i' % (pref, count) if self.config['chdir_on_fileopen']: os.chdir(filedir) kwargs = dict(filename=path, _larch=self.larch_buffer.larchshell, last_array_sel=self.last_array_sel, read_ok_cb=self.onRead_OK) # check for athena projects if is_athena_project(path): self.show_subframe('athena_import', AthenaImporter, **kwargs) else: self.show_subframe('readfile', ColumnDataFileFrame, **kwargs)
def merge_groups(self, grouplist, master=None, yarray='mu', outgroup=None): """merge groups""" cmd = """%s = merge_groups(%s, master=%s, xarray='energy', yarray='%s', kind='cubic', trim=True)""" glist = "[%s]" % (', '.join(grouplist)) outgroup = fix_varname(outgroup.lower()) if outgroup is None: outgroup = 'merged' outgroup = unique_name(outgroup, self.file_groups, max=1000) cmd = cmd % (outgroup, glist, master, yarray) self.larch.eval(cmd) if master is None: master = grouplist[0] this = self.get_group(outgroup) master = self.get_group(master) if not hasattr(this, 'xasnorm_config'): this.xasnorm_config = {} this.xasnorm_config.update(master.xasnorm_config) this.datatype = master.datatype this.xdat = 1.0*this.energy this.ydat = 1.0*getattr(this, yarray) this.yerr = getattr(this, 'd' + yarray, 1.0) if yarray != 'mu': this.mu = this.ydat this.plot_xlabel = 'energy' this.plot_ylabel = yarray return outgroup
def merge_groups(self, grouplist, master=None, yarray='mu', outgroup=None): """merge groups""" cmd = """%s = merge_groups(%s, master=%s, xarray='energy', yarray='%s', kind='cubic', trim=True)""" glist = "[%s]" % (', '.join(grouplist)) outgroup = fix_varname(outgroup.lower()) if outgroup is None: outgroup = 'merged' outgroup = unique_name(outgroup, self.file_groups, max=1000) cmd = cmd % (outgroup, glist, master, yarray) self.larch.eval(cmd) if master is None: master = grouplist[0] this = self.get_group(outgroup) master = self.get_group(master) if not hasattr(this, 'xasnorm_config'): this.xasnorm_config = {} this.xasnorm_config.update(master.xasnorm_config) this.datatype = master.datatype this.xdat = 1.0 * this.energy this.ydat = 1.0 * getattr(this, yarray) this.plot_xlabel = 'energy' this.plot_ylabel = yarray return outgroup
def assign_gsescan_groups(group): labels = group.array_labels labels = [] for i, name in enumerate(group.pos_desc): name = fix_varname(name.lower()) labels.append(name) setattr(group, name, group.pos[i, :]) for i, name in enumerate(group.sums_names): name = fix_varname(name.lower()) labels.append(name) setattr(group, name, group.sums_corr[i, :]) for i, name in enumerate(group.det_desc): name = fix_varname(name.lower()) labels.append(name) setattr(group, name, group.det_corr[i, :]) group.array_labels = labels
def onReadScan(self, evt=None): dlg = wx.FileDialog(self, message="Load Column Data File", defaultDir=os.getcwd(), wildcard=FILE_WILDCARDS, style=wx.FD_OPEN) if dlg.ShowModal() == wx.ID_OK: path = dlg.GetPath() path = path.replace('\\', '/') if path in self.file_groups: if wx.ID_YES != popup(self, "Re-read file '%s'?" % path, 'Re-read file?'): return filedir, filename = os.path.split(path) pref = fix_varname((filename + '____')[:7]).replace('.', '_') count, maxcount = 1, 9999 groupname = "%s%3.3i" % (pref, count) while hasattr(self.larch.symtable, groupname) and count < maxcount: count += 1 groupname = '%s%3.3i' % (pref, count) if self.config['chdir_on_fileopen']: os.chdir(filedir) fh = open(path, 'r') line1 = fh.readline().lower() fh.close() reader = read_ascii if 'epics stepscan file' in line1: reader = read_gsexdi elif 'epics scan' in line1: reader = gsescan_group elif 'xdi' in line1: reader = read_xdi dgroup = reader(str(path), _larch=self.larch) if reader == gsescan_group: assign_gsescan_groups(dgroup) dgroup._path = path dgroup._filename = filename dgroup._groupname = groupname self.show_subframe('coledit', EditColumnFrame, group=dgroup, last_array_sel=self.last_array_sel, read_ok_cb=self.onReadScan_Success) dlg.Destroy()
def onRead(self, path): if path in self.controller.file_groups: if wx.ID_YES != popup(self, "Re-read file '%s'?" % path, 'Re-read file?'): return filedir, filename = os.path.split(path) pref= fix_varname(filename.replace('.', '_')) if len(pref) > 15: pref = pref[:15] groupname = pref count, maxcount = 0, 999 while hasattr(self.larch.symtable, groupname) and count < maxcount: count += 1 groupname = '%s_%2.2i' % (pref, count) if self.config['chdir_on_fileopen']: os.chdir(filedir) # check for athena projects if is_athena_project(path): self.show_subframe('athena_import', AthenaImporter, filename=path, _larch=self.larch, read_ok_cb=partial(self.onRead_OK, overwrite=False)) return ## not athena, plain ASCII: fh = open(path, 'r') line1 = fh.readline().lower() fh.close() reader = read_ascii if 'epics stepscan file' in line1: reader = read_gsexdi elif 'epics scan' in line1: reader = gsescan_group elif 'xdi' in line1: reader = read_xdi dgroup = reader(str(path), _larch=self.larch) if reader == gsescan_group: assign_gsescan_groups(dgroup) dgroup.path = path dgroup.filename = filename dgroup.groupname = groupname self.show_subframe('coledit', EditColumnFrame, group=dgroup, last_array_sel=self.last_array_sel, _larch=self.larch, read_ok_cb=partial(self.onRead_OK, overwrite=False))
def onReadData(self, event=None): wildcard = 'Data file (*.dat)|*.dat|All files (*.*)|*.*' dlg = wx.FileDialog(self, message='Open Data File', defaultDir=os.getcwd(), wildcard=FILE_WILDCARDS, style=wx.FD_OPEN|wx.FD_CHANGE_DIR) path = None if dlg.ShowModal() == wx.ID_OK: path = os.path.abspath(dlg.GetPath()).replace('\\', '/') dlg.Destroy() if path is None: return if is_athena_project(path): self.show_subframe(name='athena_import', filename=path, creator=AthenaImporter, read_ok_cb=self.onReadAthenaProject_OK) else: filedir, filename = os.path.split(path) pref = fix_varname((filename + '_'*8)[:8]).replace('.', '_').lower() count, maxcount = 1, 9999 groupname = "%s%3.3i" % (pref, count) while hasattr(self.larchshell.symtable, groupname) and count < maxcount: count += 1 groupname = '%s%3.3i' % (pref, count) fh = open(path, 'r') line1 = fh.readline().lower() fh.close() reader = read_ascii if 'epics stepscan file' in line1: reader = read_gsexdi elif 'epics scan' in line1: reader = gsescan_group elif 'xdi' in line1: reader = read_xdi dgroup = reader(str(path), _larch=self.larchshell._larch) dgroup._path = path dgroup._filename = filename dgroup._groupname = groupname self.show_subframe(name='coledit', event=None, creator=ColumnDataFileFrame, filename=path, last_array_sel=self.last_array_sel, read_ok_cb=self.onReadScan_Success)
def onOK(self, event=None): """ build arrays according to selection """ if not hasattr(self.dgroup, '_xdat'): self.onColumnChoice() if self.wid_groupname is not None: self.dgroup._groupname = fix_varname(self.wid_groupname.GetValue()) if self.plotframe is not None: try: self.plotframe.Destroy() except: pass if self.read_ok_cb is not None: self.read_ok_cb(self.dgroup, self.array_sel) self.Destroy()
def onOK(self, event=None): """ build arrays according to selection """ if self.wid_groupname is not None: self.outgroup.groupname = fix_varname(self.wid_groupname.GetValue()) yerr_op = self.yerr_op.GetStringSelection().lower() if yerr_op.startswith('const'): self.outgroup.yerr = self.yerr_const.GetValue() elif yerr_op.startswith('array'): yerr = self.yerr_arr.GetStringSelection().strip() self.outgroup.yerr = get_data(rawgroup, yerr) elif yerr_op.startswith('sqrt'): self.outgroup.yerr = np.sqrt(outgroup.ydat) if self.read_ok_cb is not None: self.read_ok_cb(self.outgroup, array_sel=self.array_sel) self.Destroy()
def onReadData(self, event=None): wildcard = 'Data file (*.dat)|*.dat|All files (*.*)|*.*' dlg = wx.FileDialog(self, message='Open Data File', defaultDir=os.getcwd(), wildcard=FILE_WILDCARDS, style=wx.FD_OPEN | wx.FD_CHANGE_DIR) dgroup = None if dlg.ShowModal() == wx.ID_OK: path = os.path.abspath(dlg.GetPath()).replace('\\', '/') filedir, filename = os.path.split(path) pref = fix_varname((filename + '_' * 8)[:8]).replace('.', '_').lower() count, maxcount = 1, 9999 groupname = "%s%3.3i" % (pref, count) while hasattr(self.larchshell.symtable, groupname) and count < maxcount: count += 1 groupname = '%s%3.3i' % (pref, count) fh = open(path, 'r') line1 = fh.readline().lower() fh.close() reader = read_ascii if 'epics stepscan file' in line1: reader = read_gsexdi elif 'epics scan' in line1: reader = gsescan_group elif 'xdi' in line1: reader = read_xdi dgroup = reader(str(path), _larch=self.larchshell.larch) dgroup._path = path dgroup._filename = filename dgroup._groupname = groupname dlg.Destroy() if dgroup is not None: self.show_subframe(name='coledit', event=None, creator=EditColumnFrame, group=dgroup, last_array_sel=self.last_array_sel, read_ok_cb=self.onReadScan_Success)
def onMergeData(self, event=None): groups = [] for checked in self.controller.filelist.GetCheckedStrings(): groups.append(self.controller.file_groups[str(checked)]) if len(groups) < 1: return outgroup = unique_name('merge', self.controller.file_groups) dlg = MergeDialog(self, groups, outgroup=outgroup) res = dlg.GetResponse() dlg.Destroy() if res.ok: fname = res.group gname = fix_varname(res.group.lower()) yname = 'norm' if res.ynorm else 'mu' self.controller.merge_groups(groups, master=res.master, yarray=yname, outgroup=gname) self.install_group(gname, fname, overwrite=False) self.controller.filelist.SetStringSelection(fname)
def onOK(self, event=None): """ import groups """ for name in self.grouplist.GetCheckedStrings(): rawgroup = getattr(self.all, name) npts = len(rawgroup.energy) outgroup = Group(datatype='xas', path="%s::%s" %(self.filename, name), filename=name, groupname = fix_varname(name), raw=rawgroup, xdat=rawgroup.energy, ydat=rawgroup.mu, y=rawgroup.mu, yerr=1.0, npts=npts, _index=1.0*np.arange(npts), plot_xlabel='Energy (eV)', plot_ylabel='mu') self.read_ok_cb(outgroup, array_sel=None, overwrite=True) self.Destroy()
def onOK(self, event=None): """ import groups """ for name in self.grouplist.GetCheckedStrings(): rawgroup = getattr(self.all, name) npts = len(rawgroup.energy) outgroup = Group(datatype='xas', path="%s::%s" % (self.filename, name), filename=name, groupname=fix_varname(name), raw=rawgroup, xdat=rawgroup.energy, ydat=rawgroup.mu, y=rawgroup.mu, yerr=1.0, npts=npts, _index=1.0 * np.arange(npts), plot_xlabel='Energy (eV)', plot_ylabel='mu') if self.read_ok_cb is not None: self.read_ok_cb(outgroup, array_sel=None, overwrite=True) self.Destroy()
def onMergeData(self, event=None): groups = OrderedDict() for checked in self.controller.filelist.GetCheckedStrings(): cname = str(checked) groups[cname] = self.controller.file_groups[cname] if len(groups) < 1: return outgroup = unique_name('merge', self.controller.file_groups) dlg = MergeDialog(self, list(groups.keys()), outgroup=outgroup) res = dlg.GetResponse() dlg.Destroy() if res.ok: fname = res.group gname = fix_varname(res.group.lower()) master = self.controller.file_groups[res.master] yname = 'norm' if res.ynorm else 'mu' self.controller.merge_groups(list(groups.values()), master=master, yarray=yname, outgroup=gname) self.install_group(gname, fname, overwrite=False) self.controller.filelist.SetStringSelection(fname)
def add_data(self, group, name, data): name = fix_varname(name) if self.isgroup(data): g = self.add_h5group(group, name, attrs={'larchtype': 'group', 'class': data.__class__.__name__}) for comp in dir(data): self.add_data(g, comp, getattr(data, comp)) elif isinstance(data, (list, tuple)): dtype = 'list' if isinstance(data, tuple): dtype = 'tuple' g = self.add_h5group(group, name, attrs={'larchtype': dtype}) for ix, comp in enumerate(data): iname = 'item%i' % ix self.add_data(g, iname, comp) elif isinstance(data, dict): g = self.add_h5group(group, name, attrs={'larchtype': 'dict'}) for key, val in data.items(): self.add_data(g, key, val) elif isParameter(data): g = self.add_h5group(group, name, attrs={'larchtype': 'parameter'}) self.add_h5dataset(g, 'json', data.asjson()) else: d = self.add_h5dataset(group, name, data)
def read_athena(filename, match=None, do_preedge=True, do_bkg=True, do_fft=True, use_hashkey=False, _larch=None): """read athena project file returns a Group of Groups, one for each Athena Group in the project file Arguments: filename (string): name of Athena Project file match (sring): pattern to use to limit imported groups (see Note 1) do_preedge (bool): whether to do pre-edge subtraction [True] do_bkg (bool): whether to do XAFS background subtraction [True] do_fft (bool): whether to do XAFS Fast Fourier transform [True] use_hashkey (bool): whether to use Athena's hash key as the group name instead of the Athena label [False] Returns: group of groups each named according the label used by Athena. Notes: 1. To limit the imported groups, use the pattern in `match`, using '*' to match 'all' '?' to match any single character, or [sequence] to match any of a sequence of letters. The match will always be insensitive to case. 3. do_preedge, do_bkg, and do_fft will attempt to reproduce the pre-edge, background subtraction, and FFT from Athena by using the parameters saved in the project file. 2. use_hashkey=True will name groups from the internal 5 character string used by Athena, instead of the group label. Example: 1. read in all groups from a project file: cr_data = read_athena('My Cr Project.prj') 2. read in only the "merged" data from a Project, and don't do FFT: zn_data = read_athena('Zn on Stuff.prj', match='*merge*', do_fft=False) """ from larch_plugins.xafs import pre_edge, autobk, xftf if not os.path.exists(filename): raise IOError("%s '%s': cannot find file" % (ERR_MSG, filename)) try: fh = GzipFile(filename) lines = [bytes2str(t) for t in fh.readlines()] fh.close() except: raise ValueError("%s '%s': invalid gzip file" % (ERR_MSG, filename)) athenagroups = [] dat = {"name": ""} Athena_version = None vline = lines.pop(0) if "Athena project file -- Demeter version" not in vline: raise ValueError("%s '%s': invalid Athena File" % (ERR_MSG, filename)) major, minor, fix = "0", "0", "0" try: vs = vline.split("Athena project file -- Demeter version")[1] major, minor, fix = vs.split(".") except: raise ValueError("%s '%s': cannot read version" % (ERR_MSG, filename)) if int(minor) < 9 or int(fix[:2]) < 21: raise ValueError("%s '%s': file is too old to read" % (ERR_MSG, filename)) for t in lines: if t.startswith("#") or len(t) < 2: continue key = t.split(" ")[0].strip() key = key.replace("$", "").replace("@", "") if key == "old_group": dat["name"] = perl2json(t) elif key == "[record]": athenagroups.append(dat) dat = {"name": ""} elif key == "args": dat["args"] = perl2json(t) elif key in ("x", "y", "i0"): dat[key] = np.array([float(x) for x in perl2json(t)]) if match is not None: match = match.lower() out = Group() out.__doc__ = """XAFS Data from Athena Project File %s""" % (filename) for dat in athenagroups: label = dat["name"] this = Group( athena_id=label, energy=dat["x"], mu=dat["y"], bkg_params=Group(), fft_params=Group(), athena_params=Group() ) if "i0" in dat: this.i0 = dat["i0"] if "args" in dat: for i in range(len(dat["args"]) // 2): key = dat["args"][2 * i] val = dat["args"][2 * i + 1] if key.startswith("bkg_"): setattr(this.bkg_params, key[4:], val) elif key.startswith("fft_"): setattr(this.fft_params, key[4:], val) elif key == "label": this.label = val if not use_hashkey: label = this.label else: setattr(this.athena_params, key, val) this.__doc__ = """Athena Group Name %s (key='%s')""" % (label, dat["name"]) olabel = fix_varname(label) if match is not None: if not fnmatch(olabel.lower(), match): continue if do_preedge or do_bkg: pars = this.bkg_params pre_edge( this, _larch=_larch, e0=float(pars.e0), pre1=float(pars.pre1), pre2=float(pars.pre2), norm1=float(pars.nor1), norm2=float(pars.nor2), nnorm=float(pars.nnorm) - 1, make_flat=bool(pars.flatten), ) if do_bkg and hasattr(pars, "rbkg"): autobk( this, _larch=_larch, e0=float(pars.e0), rbkg=float(pars.rbkg), kmin=float(pars.spl1), kmax=float(pars.spl2), kweight=float(pars.kw), dk=float(pars.dk), clamp_lo=float(pars.clamp1), clamp_hi=float(pars.clamp2), ) if do_fft: pars = this.fft_params kweight = 2 if hasattr(pars, "kw"): kweight = float(pars.kw) xftf( this, _larch=_larch, kmin=float(pars.kmin), kmax=float(pars.kmax), kweight=kweight, window=pars.kwindow, dk=float(pars.dk), ) setattr(out, olabel, this) return out
def read_athena(filename, match=None, do_preedge=True, do_bkg=True, do_fft=True, use_hashkey=False, _larch=None): """read athena project file returns a Group of Groups, one for each Athena Group in the project file Arguments: filename (string): name of Athena Project file match (sring): pattern to use to limit imported groups (see Note 1) do_preedge (bool): whether to do pre-edge subtraction [True] do_bkg (bool): whether to do XAFS background subtraction [True] do_fft (bool): whether to do XAFS Fast Fourier transform [True] use_hashkey (bool): whether to use Athena's hash key as the group name instead of the Athena label [False] Returns: group of groups each named according the label used by Athena. Notes: 1. To limit the imported groups, use the pattern in `match`, using '*' to match 'all' '?' to match any single character, or [sequence] to match any of a sequence of letters. The match will always be insensitive to case. 3. do_preedge, do_bkg, and do_fft will attempt to reproduce the pre-edge, background subtraction, and FFT from Athena by using the parameters saved in the project file. 2. use_hashkey=True will name groups from the internal 5 character string used by Athena, instead of the group label. Example: 1. read in all groups from a project file: cr_data = read_athena('My Cr Project.prj') 2. read in only the "merged" data from a Project, and don't do FFT: zn_data = read_athena('Zn on Stuff.prj', match='*merge*', do_fft=False) """ from larch_plugins.xafs import pre_edge, autobk, xftf if not os.path.exists(filename): raise IOError("%s '%s': cannot find file" % (ERR_MSG, filename)) try: fh = GzipFile(filename) lines = [bytes2str(t) for t in fh.readlines()] fh.close() except: raise ValueError("%s '%s': invalid gzip file" % (ERR_MSG, filename)) athenagroups = [] dat = {'name': ''} Athena_version = None vline = lines.pop(0) if "Athena project file -- Demeter version" not in vline: raise ValueError("%s '%s': invalid Athena File" % (ERR_MSG, filename)) major, minor, fix = '0', '0', '0' try: vs = vline.split("Athena project file -- Demeter version")[1] major, minor, fix = vs.split('.') except: raise ValueError("%s '%s': cannot read version" % (ERR_MSG, filename)) if int(minor) < 9 or int(fix[:2]) < 21: raise ValueError("%s '%s': file is too old to read" % (ERR_MSG, filename)) for t in lines: if t.startswith('#') or len(t) < 2: continue key = t.split(' ')[0].strip() key = key.replace('$', '').replace('@', '') if key == 'old_group': dat['name'] = perl2json(t) elif key == '[record]': athenagroups.append(dat) dat = {'name': ''} elif key == 'args': dat['args'] = perl2json(t) elif key in ('x', 'y', 'i0'): dat[key] = np.array([float(x) for x in perl2json(t)]) if match is not None: match = match.lower() out = Group() out.__doc__ = """XAFS Data from Athena Project File %s""" % (filename) for dat in athenagroups: label = dat['name'] this = Group(athena_id=label, energy=dat['x'], mu=dat['y'], bkg_params=Group(), fft_params=Group(), athena_params=Group()) if 'i0' in dat: this.i0 = dat['i0'] if 'args' in dat: for i in range(len(dat['args']) // 2): key = dat['args'][2 * i] val = dat['args'][2 * i + 1] if key.startswith('bkg_'): setattr(this.bkg_params, key[4:], val) elif key.startswith('fft_'): setattr(this.fft_params, key[4:], val) elif key == 'label': this.label = val if not use_hashkey: label = this.label else: setattr(this.athena_params, key, val) this.__doc__ = """Athena Group Name %s (key='%s')""" % (label, dat['name']) olabel = fix_varname(label) if match is not None: if not fnmatch(olabel.lower(), match): continue if do_preedge or do_bkg: pars = this.bkg_params pre_edge(this, _larch=_larch, e0=float(pars.e0), pre1=float(pars.pre1), pre2=float(pars.pre2), norm1=float(pars.nor1), norm2=float(pars.nor2), nnorm=float(pars.nnorm) - 1, make_flat=bool(pars.flatten)) if do_bkg and hasattr(pars, 'rbkg'): autobk(this, _larch=_larch, e0=float(pars.e0), rbkg=float(pars.rbkg), kmin=float(pars.spl1), kmax=float(pars.spl2), kweight=float(pars.kw), dk=float(pars.dk), clamp_lo=float(pars.clamp1), clamp_hi=float(pars.clamp2)) if do_fft: pars = this.fft_params kweight = 2 if hasattr(pars, 'kw'): kweight = float(pars.kw) xftf(this, _larch=_larch, kmin=float(pars.kmin), kmax=float(pars.kmax), kweight=kweight, window=pars.kwindow, dk=float(pars.dk)) setattr(out, olabel, this) return out