def load(self, warn=True): # capture all error messages oldErr = _system.err _system.setErr(_pstream(_NoOutputStream())) try: jdh = self._loadFile() finally: _system.setErr(oldErr) data = asDatasetList(jdh.getList()) names = jdh.getNames() basenames = [] from os import path as _path for n in names: # remove bits of path so sanitising works if _path.exists(n): basenames.append(_path.basename(n)) else: basenames.append(n) if len(data) != len(basenames): raise io_exception, "Number of names does not match number of datasets" metadata = None if self.load_metadata: meta = jdh.getMetadata() if meta: mnames = meta.metaNames if mnames: metadata = [(k, meta.getMetaValue(k)) for k in mnames] return DataHolder(zip(basenames, data), metadata, warn)
def createSimpleScanFileHolderAndScannables(): w = MockScannable('w', ['wi1', 'wi2'], ['we']) x = MockScannable('x', ['x'], []) y = MockScannable('y', ['yi'], ['ypath', 'ye']) y.outputformats = ['%.2f', '%s', '%.2f'] z = MockScannable('z', [], ['ze1', 'ze2']) # Using DataHolder to replace deprecated ScanFileHolder testData = DataHolder() testData['x'] = [0., 1., 2., 3., 4., 5., 6., 7., 8., 9.] testData['yi'] = [0, .1, .2, .3, .4, .5, .3, .2, .1, 0.] testData['ye'] = [1.0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.3, 1.2, 1.1, 1.0] testData['ze1'] = [0., 10., 20., 30., 40., 50., 60., 70., 80., 90.] testData['ze2'] = [1., 11., 21., 31., 41., 51., 61., 71., 81., 91.] testData['wi1'] = [ 100., 101., 102., 103., 104., 105., 106., 107., 108., 109. ] testData['wi2'] = [ 100., 101., 102., 103., 104., 105., 104., 103., 102., 101., 100. ] testData['we'] = [ 100., 100.1, 100.2, 100.3, 100.4, 100.5, 100.3, 100.2, 100.1, 100.0 ] return w, x, y, z, testData
def load(self, warn=True): # py4j gymnastics to get access to Java LoaderFactory from . import py4jutils as utils #@UnresolvedImport try: java = utils.get_gateway().jvm except: raise io_exception("No Py4J gateway so cannot use Java loaders") loader_factory = java.uk.ac.diamond.scisoft.analysis.io.LoaderFactory jdh = loader_factory.getData(self.name, self.load_metadata, None) # convert to Python data = utils.convert_datasets(jdh.getList()) names = jdh.getNames() basenames = [] from os import path as _path for n in names: # remove bits of path so sanitising works if _path.exists(n): basenames.append(_path.basename(n)) else: basenames.append(n) if len(data) != len(basenames): raise io_exception("Number of names does not match number of datasets") metadata = None if self.load_metadata: meta = jdh.getMetadata() if meta: mnames = meta.getMetaNames() if mnames: mnames = java.java.util.ArrayList(mnames) # make it iterable metadata = [ (k, meta.getMetaValue(k)) for k in mnames if k is not None ] return DataHolder(list(zip(basenames, data)), metadata, warn)
def load(self, warn=True): if _tf is None: raise NotImplementedError data = [] if self.load_metadata: metadata = dict() else: metadata = None t = _tf.TiffFile(self.name) for i, p in enumerate(t.pages): d = p.asarray() if p.photometric == p.photometric.RGB: # convert to an rgb dataset d = _core.asarray(d, dtype=_core.int16).view(_RGB) if not self.ascolour: d = d.get_grey() data.append(("image-%02d" % (i+1,), d)) if self.load_metadata: for k,v in p.tags.items(): metadata[k] = v.value if len(data) < 1: pass return DataHolder(data, metadata, warn)
def load(self, warn=True): import numpy as np #@UnresolvedImport d = np.load(self.name) import os.path as _path n = self.name if _path.exists(n): n = _path.basename(n) return DataHolder([(n, d)], warn=warn)
def load(self, warn=True): ''' warn -- if True (default), print warnings about key names Returns a DataHolder object ''' f = open(self.name) try: hdrtext = [] while True: l = f.readline() if not l: raise io_exception("End of file reached unexpectedly") ls = l.strip() if ls: if _begin_number.match(ls): break if ls.startswith("#"): if self.load_metadata: ls = ls[1:].strip() if ls: hdrtext.append(ls) else: raise io_exception("No end tag found") colstext = hdrtext.pop().strip() if hdrtext else None datatext = [ls] while True: l = f.readline() if l: ls = l.strip() if ls.startswith("#"): if self.load_metadata: ls = ls[1:].strip() if ls: hdrtext.append(ls) elif _begin_number.match(ls): datatext.append(ls) else: break data = self._parse_data(colstext, datatext, warn) metadata = self._parse_head(hdrtext, warn) return DataHolder(data, metadata, warn) finally: f.close()
def createSimpleScanFileHolderWithOneValueAndScannables(): w = MockScannable('w', ['wi1', 'wi2'], ['we']) x = MockScannable('x', ['x'], []) y = MockScannable('y', ['yi'], ['ye']) z = MockScannable('z', [], ['ze1', 'ze2']) # Using DataHolder to replace deprecated ScanFileHolder testData = DataHolder() testData['x'] = [0.] testData['yi'] = [1.] testData['ye'] = [2.] testData['ze1'] = [3.] testData['ze2'] = [4.] testData['wi1'] = [5.] testData['wi2'] = [6.] testData['we'] = [7.] return w, x, y, z, testData
def load(self, warn=True): if _im is None: raise NotImplementedError im = _im.open(self.name) if im.mode == 'RGB': if self.ascolour: # convert to an rgb dataset d = _core.asarray(im, dtype=_core.int16).view(_RGB) else: im = im.convert('L') d = _core.asarray(im) else: d = _core.asarray(im) import os.path as _path n = self.name if _path.exists(n): n = _path.basename(n) return DataHolder([(n, d)], warn=warn)
def load(self, warn=True): data = [] metadata = [] try: h = _cbf.cbf_handle_struct() h.read_widefile(self.name, _cbf.MSG_DIGEST) h.rewind_datablock() for nd in range(h.count_datablocks()): h.select_datablock(nd) # db_name = h.datablock_name() # print "DBl: %d, %s" % (nd, db_name) h.rewind_category() for nc in range(h.count_categories()): h.select_category(nc) # ct_name = h.category_name() # print " Cat: %d, %s" % (nc, ct_name) h.rewind_column() colnames = [] for nv in range(h.count_columns()): h.select_column(nv) cl_name = h.column_name() colnames.append(cl_name) # print " Col: %d, %s" % (nv, cl_name) h.rewind_row() for nh in range(h.count_rows()): h.select_row(nh) h.rewind_column() for nv in range(h.count_columns()): h.select_column(nv) v = self.getvalue(h) # print " %d %d: " % (nh, nv), v item = "%s-%d" % (colnames[nv], nh), v if isinstance(v, numpy.ndarray): data.append(item) else: metadata.append(item) finally: del(h) return DataHolder(data, metadata, warn)
def load(self, warn=True): ''' warn -- if True (default), print warnings about key names Returns a DataHolder object ''' f = open(self.name) try: header = False while True: l = f.readline() if not l: raise io_exception("End of file reached unexpectedly") ls = l.lstrip() if ls.startswith("&SRS") or ls.startswith("&DLS"): header = True break if _begin_number.match(ls): break else: raise io_exception("Not an SRS file") srstext = [] if header: while True: l = f.readline() if not l: raise io_exception("End of file reached unexpectedly") ls = l.strip() if ls: if ls.startswith("&END"): break if self.load_metadata: srstext.append(ls) else: raise io_exception("No end tag found") l = f.readline() if not l: raise io_exception("End of file reached unexpectedly") colstext = l.strip() datatext = [] else: colstext = None datatext = [ls] while True: l = f.readline() if not l: break ls = l.lstrip() if _begin_number.match(ls): datatext.append(ls) else: break data = SRSLoader._parse_data(colstext, datatext, warn) metadata = SRSLoader._parse_head(srstext, warn) return DataHolder(data, metadata, warn) finally: f.close()