def _read_raw_athena(filename): """try to read athena project file as plain text, to determine validity """ # try gzip text = None try: fh = GzipFile(filename) text = bytes2str(fh.read()) except Exception: errtype, errval, errtb = sys.exc_info() text = None finally: fh.close() if text is None: # try plain text file try: fh = open(filename, 'r') text = bytes2str(fh.read()) except Exception: errtype, errval, errtb = sys.exc_info() text = None finally: fh.close() return text
def onSelectArray(self, evt=None): xrmfile = self.owner.current_file name = self.workarray_choice.GetStringSelection() dset = xrmfile.get_work_array(h5str(name)) expr = bytes2str(dset.attrs.get('expression', '<unknonwn>')) self.info1.SetLabel("Expression: %s" % expr) info = json.loads(bytes2str(dset.attrs.get('info', []))) buff = [] for var, dat in info: fname, aname, det, dtc = dat if dat[1] != '1': buff.append("%s= %s('%s', det=%s, dtcorr=%s)" % (var, fname, aname, det, dtc)) self.info2.SetLabel('\n'.join(buff))
def check_image(self): if self.h5file is None: shp = np.shape(self.image) if len(shp) == 2: self.image = np.reshape(self.image,(1,1,shp[0],shp[1])) if len(shp) == 3: self.image = np.reshape(self.image,(1,shp[0],shp[1],shp[2])) self.jframes,self.iframes,self.xpix,self.ypix = np.shape(self.image) else: try: self.h5xrd = self.h5file['xrmmap/xrd2D/counts'] except: self.image = None print('No 2DXRD data in %s' % os.path.split(self.h5file.filename)[-1]) return self.calfile = bytes2str(self.h5file['xrmmap/xrd1D'].attrs.get('calfile','')) if not os.path.exists(self.calfile): self.calfile = None ## making an assumption that h5 map file always has multiple rows and cols self.jframes,self.iframes,self.xpix,self.ypix = self.h5xrd.shape self.i = 0 if self.iframes < 4 else int(self.iframes)/2 self.j = 0 if self.jframes < 4 else int(self.jframes)/2
def roiSELECT(self,iroi,event=None): detname = self.det_choice[iroi].GetStringSelection() roiname = self.roi_choice[iroi].GetStringSelection() if version_ge(self.cfile.version, '2.0.0'): try: roi = self.cfile.xrmmap['roimap'][detname][roiname] limits = roi['limits'][:] units = bytes2str(roi['limits'].attrs.get('units','')) if units == '1/A': roistr = '[%0.2f to %0.2f %s]' % (limits[0],limits[1],units) else: roistr = '[%0.1f to %0.1f %s]' % (limits[0],limits[1],units) except: roistr = '' else: try: roi = self.cfile.xrmmap[detname] en = list(roi['energy'][:]) index = list(roi['roi_name'][:]).index(roiname) limits = list(roi['roi_limits'][:][index]) roistr = '[%0.1f to %0.1f keV]' % (en[limits[0]],en[limits[1]]) except: roistr = '' self.roi_label[iroi].SetLabel(roistr)
def set_roilist(self, mca=None): """ Add Roi names to roilist""" self.wids['roilist'].Clear() if mca is not None: for roi in mca.rois: name = bytes2str(roi.name.strip()) if len(name) > 0: self.wids['roilist'].Append(roi.name)
def is_athena_project(filename): """tests whether file is a valid Athena Project file""" result = False if os.path.exists(filename): try: fh = GzipFile(filename) line1 = bytes2str(fh.readline()) result = "Athena project file -- Demeter version" in line1 except: pass finally: fh.close() return result
def plotmca(self, mca, title=None, set_title=True, as_mca2=False, fullrange=False, init=False, **kws): if as_mca2: self.mca2 = mca kws['new'] = False else: self.mca = mca self.panel.conf.show_grid = False xview_range = self.panel.axes.get_xlim() if init or xview_range == (0.0, 1.0): self.xview_range = (min(self.mca.energy), max(self.mca.energy)) else: self.xview_range = xview_range atitles = [] if self.mca is not None: if hasattr(self.mca, 'title'): atitles.append(bytes2str(self.mca.title)) if hasattr(self.mca, 'filename'): atitles.append(" File={:s}".format(self.mca.filename)) if hasattr(self.mca, 'npixels'): atitles.append(" {:.0f} Pixels".format(self.mca.npixels)) if hasattr(self.mca, 'real_time'): try: rtime_str = " RealTime={:.2f} sec".format(self.mca.real_time) except ValueError: rtime_str = " RealTime= %s sec".format(str(self.mca.real_time)) atitles.append(rtime_str) try: self.plot(self.mca.energy, self.mca.counts, mca=self.mca, **kws) except ValueError: pass if as_mca2: if hasattr(self.mca2, 'title'): atitles.append(" BG={:s}".format(self.mca2.title)) elif hasattr(self.mca2, 'filename'): atitles.append(" BG_File={:s}".format(self.mca2.filename)) if hasattr(self.mca, 'real_time'): atitles.append(" BG_RealTime={:.2f} sec".format(self.mca2.real_time)) self.oplot(self.mca2.energy, self.mca2.counts, mca=self.mca2, **kws) if title is None: title = ' '.join(atitles) if set_title: self.SetTitle(title)
def onROI(self, event=None, label=None): if label is None and event is not None: label = event.GetString() self.roilist_sel = event.GetSelection() self.wids['roiname'].SetValue(label) name, left, right = None, -1, -1 label = bytes2str(label.lower().strip()) self.selected_roi = None if self.mca is not None: for roi in self.mca.rois: if bytes2str(roi.name.lower()) == label: left, right, name = roi.left, roi.right, roi.name elo = self.mca.energy[left] ehi = self.mca.energy[right] self.selected_roi = roi break if name is None or right == -1: return self.ShowROIStatus(left, right, name=name) self.ShowROIPatch(left, right) roi_msg1 = '[{:} : {:}]'.format(left, right) roi_msg2 = '[{:6.3f} : {:6.3f}]'.format(elo, ehi) roi_msg3 = ' {:6.3f} / {:6.3f} '.format((elo + ehi) / 2., (ehi - elo)) self.energy_for_zoom = (elo + ehi) / 2.0 self.wids['roi_msg1'].SetLabel(roi_msg1) self.wids['roi_msg2'].SetLabel(roi_msg2) self.wids['roi_msg3'].SetLabel(roi_msg3) self.draw() self.panel.Refresh()
def save(self, filename=None, use_gzip=True): if filename is not None: self.filename = filename iso_now = time.strftime('%Y-%m-%dT%H:%M:%S') pyosversion = "Python %s on %s" % (platform.python_version(), platform.platform()) buff = [ "# Athena project file -- Demeter version 0.9.24", "# This file created at %s" % iso_now, "# Using Larch version %s, %s" % (larch_version, pyosversion) ] for key, dat in self.groups.items(): if not hasattr(dat, 'args'): continue buff.append("") groupname = getattr(dat, 'groupname', key) buff.append("$old_group = '%s';" % groupname) buff.append("@args = (%s);" % format_dict(dat.args)) buff.append("@x = (%s);" % format_array(dat.x)) buff.append("@y = (%s);" % format_array(dat.y)) if getattr(dat, 'i0', None) is not None: buff.append("@i0 = (%s);" % format_array(dat.i0)) if getattr(dat, 'signal', None) is not None: buff.append("@signal = (%s);" % format_array(dat.signal)) if getattr(dat, 'stddev', None) is not None: buff.append("@stddev = (%s);" % format_array(dat.stddev)) buff.append("[record] # ") buff.extend([ "", "@journal = {};", "", "1;", "", "", "# Local Variables:", "# truncate-lines: t", "# End:", "" ]) fopen = open if use_gzip: fopen = GzipFile fh = fopen(self.filename, 'w') fh.write(str2bytes("\n".join([bytes2str(t) for t in buff]))) fh.close()
def save(self, filename=None, use_gzip=True): if filename is not None: self.filename = filename iso_now = time.strftime('%Y-%m-%dT%H:%M:%S') pyosversion = "Python %s on %s" % (platform.python_version(), platform.platform()) buff = ["# Athena project file -- Demeter version 0.9.24", "# This file created at %s" % iso_now, "# Using Larch version %s, %s" % (larch_version, pyosversion)] for key, dat in self.groups.items(): if not hasattr(dat, 'args'): continue buff.append("") buff.append("$old_group = '%s';" % dat.groupname) buff.append("@args = (%s);" % format_dict(dat.args)) buff.append("@x = (%s);" % format_array(dat.x)) buff.append("@y = (%s);" % format_array(dat.y)) if getattr(dat, 'i0', None) is not None: buff.append("@i0 = (%s);" % format_array(dat.i0)) if getattr(dat, 'signal', None) is not None: buff.append("@signal = (%s);" % format_array(dat.signal)) if getattr(dat, 'stddev', None) is not None: buff.append("@stddev = (%s);" % format_array(dat.stddev)) buff.append("[record] # ") buff.extend(["", "@journal = {};", "", "1;", "", "", "# Local Variables:", "# truncate-lines: t", "# End:", ""]) fopen =open if use_gzip: fopen = GzipFile fh = fopen(self.filename, 'w') fh.write(str2bytes("\n".join([bytes2str(t) for t in buff]))) fh.close()
def read_athena(filename, match=None, do_preedge=True, do_bkg=True, do_fft=True, use_hashkey=False, _larch=None): """read athena project file returns a Group of Groups, one for each Athena Group in the project file Arguments: filename (string): name of Athena Project file match (string): pattern to use to limit imported groups (see Note 1) do_preedge (bool): whether to do pre-edge subtraction [True] do_bkg (bool): whether to do XAFS background subtraction [True] do_fft (bool): whether to do XAFS Fast Fourier transform [True] use_hashkey (bool): whether to use Athena's hash key as the group name instead of the Athena label [False] Returns: group of groups each named according the label used by Athena. Notes: 1. To limit the imported groups, use the pattern in `match`, using '*' to match 'all' '?' to match any single character, or [sequence] to match any of a sequence of letters. The match will always be insensitive to case. 3. do_preedge, do_bkg, and do_fft will attempt to reproduce the pre-edge, background subtraction, and FFT from Athena by using the parameters saved in the project file. 2. use_hashkey=True will name groups from the internal 5 character string used by Athena, instead of the group label. Example: 1. read in all groups from a project file: cr_data = read_athena('My Cr Project.prj') 2. read in only the "merged" data from a Project, and don't do FFT: zn_data = read_athena('Zn on Stuff.prj', match='*merge*', do_fft=False) """ from larch_plugins.xafs import pre_edge, autobk, xftf if not os.path.exists(filename): raise IOError("%s '%s': cannot find file" % (ERR_MSG, filename)) try: fh = GzipFile(filename) lines = [bytes2str(t) for t in fh.readlines()] fh.close() except: raise ValueError("%s '%s': invalid gzip file" % (ERR_MSG, filename)) athenagroups = [] dat = {'name': ''} Athena_version = None vline = lines.pop(0) if "Athena project file -- Demeter version" not in vline: raise ValueError("%s '%s': invalid Athena File" % (ERR_MSG, filename)) major, minor, fix = '0', '0', '0' try: vs = vline.split("Athena project file -- Demeter version")[1] major, minor, fix = vs.split('.') except: raise ValueError("%s '%s': cannot read version" % (ERR_MSG, filename)) if int(minor) < 9 or int(fix[:2]) < 21: raise ValueError("%s '%s': file is too old to read" % (ERR_MSG, filename)) for t in lines: if t.startswith('#') or len(t) < 2 or 'undef' in t: continue key = t.split(' ')[0].strip() key = key.replace('$', '').replace('@', '') if key == 'old_group': dat['name'] = perl2json(t) elif key == '[record]': athenagroups.append(dat) dat = {'name': ''} elif key == 'args': dat['args'] = perl2json(t) elif key in ('x', 'y', 'i0', 'signal'): dat[key] = np.array([float(x) for x in perl2json(t)]) if match is not None: match = match.lower() out = Group() out.__doc__ = """XAFS Data from Athena Project File %s""" % (filename) for dat in athenagroups: label = dat.get('name', 'unknown') this = Group(athena_id=label, energy=dat['x'], mu=dat['y'], bkg_params=Group(), fft_params=Group(), athena_params=Group()) if 'i0' in dat: this.i0 = dat['i0'] if 'args' in dat: for i in range(len(dat['args']) // 2): key = dat['args'][2 * i] val = dat['args'][2 * i + 1] if key.startswith('bkg_'): setattr(this.bkg_params, key[4:], val) elif key.startswith('fft_'): setattr(this.fft_params, key[4:], val) elif key == 'label': this.label = val if not use_hashkey: label = this.label else: setattr(this.athena_params, key, val) this.__doc__ = """Athena Group Name %s (key='%s')""" % (label, dat['name']) olabel = fix_varname(label) if match is not None: if not fnmatch(olabel.lower(), match): continue if do_preedge or do_bkg: pars = this.bkg_params pre_edge(this, _larch=_larch, e0=float(pars.e0), pre1=float(pars.pre1), pre2=float(pars.pre2), norm1=float(pars.nor1), norm2=float(pars.nor2), nnorm=float(pars.nnorm) - 1, make_flat=bool(pars.flatten)) if do_bkg and hasattr(pars, 'rbkg'): autobk(this, _larch=_larch, e0=float(pars.e0), rbkg=float(pars.rbkg), kmin=float(pars.spl1), kmax=float(pars.spl2), kweight=float(pars.kw), dk=float(pars.dk), clamp_lo=float(pars.clamp1), clamp_hi=float(pars.clamp2)) if do_fft: pars = this.fft_params kweight = 2 if hasattr(pars, 'kw'): kweight = float(pars.kw) xftf(this, _larch=_larch, kmin=float(pars.kmin), kmax=float(pars.kmax), kweight=kweight, window=pars.kwindow, dk=float(pars.dk)) setattr(out, olabel, this) return out
def read_athena(filename, match=None, do_preedge=True, do_bkg=True, do_fft=True, use_hashkey=False, _larch=None): """read athena project file returns a Group of Groups, one for each Athena Group in the project file Arguments: filename (string): name of Athena Project file match (sring): pattern to use to limit imported groups (see Note 1) do_preedge (bool): whether to do pre-edge subtraction [True] do_bkg (bool): whether to do XAFS background subtraction [True] do_fft (bool): whether to do XAFS Fast Fourier transform [True] use_hashkey (bool): whether to use Athena's hash key as the group name instead of the Athena label [False] Returns: group of groups each named according the label used by Athena. Notes: 1. To limit the imported groups, use the pattern in `match`, using '*' to match 'all' '?' to match any single character, or [sequence] to match any of a sequence of letters. The match will always be insensitive to case. 3. do_preedge, do_bkg, and do_fft will attempt to reproduce the pre-edge, background subtraction, and FFT from Athena by using the parameters saved in the project file. 2. use_hashkey=True will name groups from the internal 5 character string used by Athena, instead of the group label. Example: 1. read in all groups from a project file: cr_data = read_athena('My Cr Project.prj') 2. read in only the "merged" data from a Project, and don't do FFT: zn_data = read_athena('Zn on Stuff.prj', match='*merge*', do_fft=False) """ from larch_plugins.xafs import pre_edge, autobk, xftf if not os.path.exists(filename): raise IOError("%s '%s': cannot find file" % (ERR_MSG, filename)) try: fh = GzipFile(filename) lines = [bytes2str(t) for t in fh.readlines()] fh.close() except: raise ValueError("%s '%s': invalid gzip file" % (ERR_MSG, filename)) athenagroups = [] dat = {"name": ""} Athena_version = None vline = lines.pop(0) if "Athena project file -- Demeter version" not in vline: raise ValueError("%s '%s': invalid Athena File" % (ERR_MSG, filename)) major, minor, fix = "0", "0", "0" try: vs = vline.split("Athena project file -- Demeter version")[1] major, minor, fix = vs.split(".") except: raise ValueError("%s '%s': cannot read version" % (ERR_MSG, filename)) if int(minor) < 9 or int(fix[:2]) < 21: raise ValueError("%s '%s': file is too old to read" % (ERR_MSG, filename)) for t in lines: if t.startswith("#") or len(t) < 2: continue key = t.split(" ")[0].strip() key = key.replace("$", "").replace("@", "") if key == "old_group": dat["name"] = perl2json(t) elif key == "[record]": athenagroups.append(dat) dat = {"name": ""} elif key == "args": dat["args"] = perl2json(t) elif key in ("x", "y", "i0"): dat[key] = np.array([float(x) for x in perl2json(t)]) if match is not None: match = match.lower() out = Group() out.__doc__ = """XAFS Data from Athena Project File %s""" % (filename) for dat in athenagroups: label = dat["name"] this = Group( athena_id=label, energy=dat["x"], mu=dat["y"], bkg_params=Group(), fft_params=Group(), athena_params=Group() ) if "i0" in dat: this.i0 = dat["i0"] if "args" in dat: for i in range(len(dat["args"]) // 2): key = dat["args"][2 * i] val = dat["args"][2 * i + 1] if key.startswith("bkg_"): setattr(this.bkg_params, key[4:], val) elif key.startswith("fft_"): setattr(this.fft_params, key[4:], val) elif key == "label": this.label = val if not use_hashkey: label = this.label else: setattr(this.athena_params, key, val) this.__doc__ = """Athena Group Name %s (key='%s')""" % (label, dat["name"]) olabel = fix_varname(label) if match is not None: if not fnmatch(olabel.lower(), match): continue if do_preedge or do_bkg: pars = this.bkg_params pre_edge( this, _larch=_larch, e0=float(pars.e0), pre1=float(pars.pre1), pre2=float(pars.pre2), norm1=float(pars.nor1), norm2=float(pars.nor2), nnorm=float(pars.nnorm) - 1, make_flat=bool(pars.flatten), ) if do_bkg and hasattr(pars, "rbkg"): autobk( this, _larch=_larch, e0=float(pars.e0), rbkg=float(pars.rbkg), kmin=float(pars.spl1), kmax=float(pars.spl2), kweight=float(pars.kw), dk=float(pars.dk), clamp_lo=float(pars.clamp1), clamp_hi=float(pars.clamp2), ) if do_fft: pars = this.fft_params kweight = 2 if hasattr(pars, "kw"): kweight = float(pars.kw) xftf( this, _larch=_larch, kmin=float(pars.kmin), kmax=float(pars.kmax), kweight=kweight, window=pars.kwindow, dk=float(pars.dk), ) setattr(out, olabel, this) return out
def gsexdi_deadtime_correct(fname, channelname, subdir='DT_Corrected', bad=None, _larch=None): """convert GSE XDI fluorescence XAFS scans to dead time corrected files""" if not is_GSEXDI(fname): print("'%s' is not a GSE XDI scan file\n" % fname) return out = Group() out.orig_filename = fname try: xdi = read_gsexdi(fname, bad=bad, _larch=_larch) except: print('Could not read XDI file ', fname) return for attr in ('energy', 'i0', 'i1', 'i2', 'tscaler', 'counttime', 'scan_start_time', 'scan_end_time'): if hasattr(xdi, attr): setattr(out, attr, getattr(xdi, attr)) # some scans may not record separate counttime, but TSCALER # is clock ticks for a 50MHz clock if not hasattr(out, 'counttime'): out.counttime = xdi.tscaler * 2.e-8 if hasattr(xdi, 'energy_readback'): out.energy = xdi.energy_readback arrname = None channelname = channelname.lower().replace(' ', '_') for arr in xdi.array_labels: if arr.lower().startswith(channelname): arrname = arr break if arrname is None: print('Cannot find Channel %s in file %s '% (channelname, fname)) return out.ifluor = getattr(xdi, arrname) out.ifluor_raw = getattr(xdi, arrname) arrname_raw = arrname + '_nodtc' if arrname_raw in xdi.array_labels: out.ifluor_raw = getattr(xdi, arrname_raw) out.mufluor = out.ifluor / out.i0 TINY = 2.e-20 if hasattr(out, 'i1') or hasattr(out, 'itrans'): i1 = getattr(out, 'i1', None) if i1 is None: i1 = getattr(out, 'itrans', None) if i1 is not None: i1[np.isnan(i1)] = TINY i1 = i1 / out.i0 i1[np.where(i1<TINY)] = TINY out.mutrans = -np.log(i1) npts = len(out.i0) col0_name = xdi.array_labels[0].lower() col0_units = None col0_data = xdi.data[0, :] if col0_name == 'energy': col0_data = out.energy col0_units = 'eV' buff = ['# XDI/1.0 GSE/1.0'] header = OrderedDict() hgroups = ['beamline', 'facility', 'mono', 'undulator', 'detectors', 'scaler', 'detectorstage', 'samplestage', 'scan', 'scanparameters'] hskip = ['scanparameters.end', 'scanparameters.start'] for agroup in hgroups: attrs = xdi._xdi.attrs.get(agroup, {}) if agroup == 'mono': agroup = 'monochromator' header[agroup] = OrderedDict() for sname in sorted(attrs.keys()): if "%s.%s" %( agroup, sname) not in hskip: header[agroup][sname] = attrs[sname] header['facility']['name'] = 'APS' header['facility']['xray_source'] = '3.6 cm undulator' header['beamline']['name'] = '13-ID-E, GSECARS' header['detectors']['i0'] = '20cm ion chamber, He' header['detectors']['ifluor'] = 'Si SDD Vortex ME-4, 4 elements' header['detectors']['ifluor_electronics'] = 'Quantum Xspress3 3.1.10' mono_cut = 'Si(111)' if xdi.mono_dspacing < 2: mono_cut = 'Si(311)' header['monochromator']['name'] = "%s, LN2 cooled" % mono_cut out_arrays = OrderedDict() out_arrays[col0_name] = (col0_name, col0_units) out_arrays['mufluor'] = ('mufluor', None) if hasattr(out, 'i1'): out_arrays['mutrans'] = ('mutrans', None) out_arrays['ifluor'] = ('ifluor', '# deadtime-corrected') out_arrays['ifluor_raw'] = ('ifluor_raw', '# not deadtime-corrected') out_arrays['i0'] = ('i0', None) if hasattr(out, 'i1'): out_arrays['itrans'] = ('i1', None) if hasattr(out, 'i2'): out_arrays['irefer'] = ('i2', None) if hasattr(out, 'counttime'): out_arrays['counttime'] = ('counttime', 'sec') arrlabel = [] for iarr, aname in enumerate(out_arrays): lab = "%12s " % aname if iarr == 0: lab = "%11s " % aname arrlabel.append(lab) extra = out_arrays[aname][1] if extra is None: extra = '' buff.append("# Column.%i: %s %s" % (iarr+1, aname, extra)) arrlabel = '#%s' % (' '.join(arrlabel)) ncol = len(out_arrays) for family, fval in header.items(): for attr, val in fval.items(): buff.append("# %s.%s: %s" % (family.title(), attr, val)) buff.append("# ///") for comment in bytes2str(xdi._xdi.comments).split('\n'): c = comment.strip() if len(c) > 0: buff.append('# %s' % c) buff.extend(["# summed %s fluorescence data from %s" % (channelname, fname), "# Dead-time correction applied", "#"+ "-"*78, arrlabel]) efmt = "%11.4f" ffmt = "%13.7f" gfmt = "%13.7g" for i in range(npts): dline = ["", efmt % col0_data[i], ffmt % out.mufluor[i]] if hasattr(out, 'i1'): dline.append(ffmt % out.mutrans[i]) dline.extend([gfmt % out.ifluor[i], gfmt % out.ifluor_raw[i], gfmt % out.i0[i]]) if hasattr(out, 'i1'): dline.append(gfmt % out.i1[i]) if hasattr(out, 'i2'): dline.append(gfmt % out.i2[i]) if hasattr(out, 'counttime'): dline.append(gfmt % out.counttime[i]) buff.append(" ".join(dline)) ofile = fname[:] if ofile.startswith('..'): ofile = ofile[3:] ofile = ofile.replace('.', '_') + '.dat' ofile = os.path.join(subdir, ofile) if not os.path.exists(subdir): os.mkdir(subdir) try: fout = open(ofile, 'w') fout.write("\n".join(buff)) fout.close() print("wrote %s, npts=%i, channel='%s'" % (ofile, npts, channelname)) except: print("could not open / write to output file %s" % ofile) return out
def read(self, filename=None): """read validate and parse an XDI datafile into python structures """ if filename is None and self.filename is not None: filename = self.filename pxdi = pointer(XDIFileStruct()) self.status = self.xdilib.XDI_readfile(six.b(filename), pxdi) if self.status < 0: msg = bytes2str(self.xdilib.XDI_errorstring(self.status)) self.xdilib.XDI_cleanup(pxdi, self.status) msg = 'Error reading XDIFile %s\n%s' % (filename, msg) raise ValueError(msg) xdi = pxdi.contents for attr in dict(xdi._fields_): setattr(self, attr, getattr(xdi, attr)) self.array_labels = tostrlist(xdi.array_labels, self.narrays) if self.user_labels is not None: ulab = self.user_labels.replace(',', ' ') ulabs = [l.strip() for l in ulab.split()] self.array_labels[:len(ulabs)] = ulabs arr_units = tostrlist(xdi.array_units, self.narrays) self.array_units = [] self.array_addrs = [] for unit in arr_units: addr = '' if '||' in unit: unit, addr = [x.strip() for x in unit.split('||', 1)] self.array_units.append(unit) self.array_addrs.append(addr) mfams = tostrlist(xdi.meta_families, self.nmetadata) mkeys = tostrlist(xdi.meta_keywords, self.nmetadata) mvals = tostrlist(xdi.meta_values, self.nmetadata) self.attrs = {} for fam, key, val in zip(mfams, mkeys, mvals): fam = fam.lower() key = key.lower() if fam not in self.attrs: self.attrs[fam] = {} self.attrs[fam][key] = val parrays = (xdi.narrays*c_void_p).from_address(xdi.array)[:] self.data = [(xdi.npts*c_double).from_address(p)[:] for p in parrays] nout = xdi.nouter outer, breaks = [], [] if nout > 1: outer = (nout*c_double).from_address(xdi.outer_array)[:] breaks = (nout*c_long).from_address(xdi.outer_breakpts)[:] for attr in ('outer_array', 'outer_breakpts', 'nouter'): delattr(self, attr) self.outer_array = array(outer) self.outer_breakpts = array(breaks) self.data = array(self.data) self.data.shape = (self.narrays, self.npts) self._assign_arrays() for attr in ('nmetadata', 'narray_labels', 'meta_families', 'meta_keywords', 'meta_values', 'array'): delattr(self, attr) self.xdilib.XDI_cleanup(pxdi, 0)