def _load(self, filename, *args, **kargs): """Leeds MOKE file loader routine. Args: filename (string or bool): File to load. If None then the existing filename is used, if False, then a file dialog will be used. Returns: A copy of the itself after loading the data. """ if filename is None or not filename: self.get_filename("r") else: self.filename = filename with io.open(self.filename, mode="rb") as f: line = bytes2str(f.readline()).strip() if line != "#Leeds CM Physics MOKE": raise Core.StonerLoadError( "Not a Core.DataFile from the Leeds MOKE") while line.startswith("#") or line == "": parts = line.split(":") if len(parts) > 1: key = parts[0][1:] data = ":".join(parts[1:]).strip() self[key] = data line = bytes2str(f.readline()).strip() column_headers = [x.strip() for x in line.split(",")] self.data = np.genfromtxt(f, delimiter=",") self.setas = "xy.de" self.column_headers = column_headers return self
def _load(self, filename=None, *args, **kargs): """Sheffield Focussed MOKE file loader routine. Args: filename (string or bool): File to load. If None then the existing filename is used, if False, then a file dialog will be used. Returns: A copy of the itself after loading the data. """ if filename is None or not filename: self.get_filename("r") else: self.filename = filename with io.open(self.filename, mode="rb") as f: try: value = [float(x.strip()) for x in bytes2str(f.readline()).split("\t")] except Exception: f.close() raise Core.StonerLoadError("Not an FMOKE file?") label = [x.strip() for x in bytes2str(f.readline()).split("\t")] if label[0] != "Header:": f.close() raise Core.StonerLoadError("Not a Focussed MOKE file !") del label[0] for k, v in zip(label, value): self.metadata[k] = v # Create metatdata from first 2 lines column_headers = [x.strip() for x in bytes2str(f.readline()).split("\t")] self.data = np.genfromtxt(f, dtype="float", delimiter="\t", invalid_raise=False) self.column_headers = column_headers return self
def _load(self, filename=None, *args, **kargs): """Leeds MOKE file loader routine. Args: filename (string or bool): File to load. If None then the existing filename is used, if False, then a file dialog will be used. Returns: A copy of the itself after loading the data. """ if filename is None or not filename: self.get_filename("r") else: self.filename = filename with io.open(self.filename, mode="rb") as f: line = bytes2str(f.readline()).strip() if line != "#Leeds CM Physics MOKE": raise Core.StonerLoadError("Not a Core.DataFile from the Leeds MOKE") while line.startswith("#") or line == "": parts = line.split(":") if len(parts) > 1: key = parts[0][1:] data = ":".join(parts[1:]).strip() self[key] = data line = bytes2str(f.readline()).strip() column_headers = [x.strip() for x in line.split(",")] self.data = np.genfromtxt(f, delimiter=",") self.setas = "xy.de" self.column_headers = column_headers return self
def _load(self, filename, *args, **kargs): """Sheffield Focussed MOKE file loader routine. Args: filename (string or bool): File to load. If None then the existing filename is used, if False, then a file dialog will be used. Returns: A copy of the itself after loading the data. """ if filename is None or not filename: self.get_filename("r") else: self.filename = filename with io.open(self.filename, mode="rb") as f: try: value = [float(x.strip()) for x in bytes2str(f.readline()).split("\t")] except Exception: f.close() raise Core.StonerLoadError("Not an FMOKE file?") label = [x.strip() for x in bytes2str(f.readline()).split("\t")] if label[0] != "Header:": f.close() raise Core.StonerLoadError("Not a Focussed MOKE file !") del label[0] for k, v in zip(label, value): self.metadata[k] = v # Create metatdata from first 2 lines column_headers = [x.strip() for x in bytes2str(f.readline()).split("\t")] self.data = np.genfromtxt(f, dtype="float", delimiter="\t", invalid_raise=False) self.column_headers = column_headers return self
def _load(self, filename, *args, **kargs): """Loads data from a hdf5 file. Args: h5file (string or h5py.Group): Either a string or an h5py Group object to load data from Returns: itself after having loaded the data """ if filename is None or not filename: self.get_filename("r") filename = self.filename else: self.filename = filename if isinstance(filename, string_types ): # We got a string, so we'll treat it like a file... f = _open_filename(filename) elif isinstance(filename, h5py.File) or isinstance( filename, h5py.Group): f = filename else: _raise_error( f, message= f"Couldn't interpret {filename} as a valid HDF5 file or group or filename" ) if "type" not in f.attrs: _raise_error( f, message= f"HDF5 Group does not specify the type attribute used to check we can load it." ) typ = bytes2str(f.attrs["type"]) if typ != self.__class__.__name__ and "module" not in f.attrs: _raise_error( f, message= f"HDF5 Group is not a {self.__class__.__name__} and does not specify a module to use to load.", ) loader = None if typ == self.__class__.__name__: loader = getattr(self.__class__, "read_HDF") else: mod = importlib.import_module(bytes2str(f.attrs["module"])) cls = getattr(mod, typ) loader = getattr(cls, "read_JDF") if loader is None: _raise_error( f, message= "Could not et loader for {bytes2str(f.attrs['module'])}.{typ}") return loader(f, *args, **kargs)
def _load(self, filename=None, *args, **kargs): """Data loader function for 340 files.""" if filename is None or not filename: self.get_filename("r") else: self.filename = filename with io.open(self.filename, "rb") as data: keys = [] vals = [] for line in data: line = bytes2str(line) if line.strip() == "": break parts = [p.strip() for p in line.split(":")] if len(parts) != 2: raise Core.StonerLoadError( "Header doesn't contain two parts at {}".format( line.strip())) else: keys.append(parts[0]) vals.append(parts[1]) else: raise Core.StonerLoadError("Overan the end of the file") if keys != [ "Sensor Model", "Serial Number", "Data Format", "SetPoint Limit", "Temperature coefficient", "Number of Breakpoints", ]: raise Core.StonerLoadError( "Header did not contain recognised keys.") for (k, v) in zip(keys, vals): v = v.split()[0] self.metadata[k] = string_to_type(v) headers = bytes2str(next(data)).strip().split() column_headers = headers[1:] dat = np.genfromtxt(data) self.data = dat[:, 1:] self.column_headers = column_headers return self
def _load(self, filename=None, *args, **kargs): """Data loader function for 340 files.""" if filename is None or not filename: self.get_filename("r") else: self.filename = filename with io.open(self.filename, "rb") as data: keys = [] vals = [] for line in data: line = bytes2str(line) if line.strip() == "": break parts = [p.strip() for p in line.split(":")] if len(parts) != 2: raise Core.StonerLoadError("Header doesn't contain two parts at {}".format(line.strip())) else: keys.append(parts[0]) vals.append(parts[1]) else: raise Core.StonerLoadError("Overan the end of the file") if keys != [ "Sensor Model", "Serial Number", "Data Format", "SetPoint Limit", "Temperature coefficient", "Number of Breakpoints", ]: raise Core.StonerLoadError("Header did not contain recognised keys.") for (k, v) in zip(keys, vals): v = v.split()[0] self.metadata[k] = self.metadata.string_to_type(v) headers = bytes2str(next(data)).strip().split() column_headers = headers[1:] dat = np.genfromtxt(data) self.data = dat[:, 1:] self.column_headers = column_headers return self
def _extract(self, archive, member): """Responsible for actually reading the zip file archive. Args: archive (zipfile.ZipFile): An open zip archive member (string): The name of one member of the zip file Return: A datafile like instance """ tmp = DataFile() info = archive.getinfo(member) data = bytes2str( archive.read(info)) # In Python 3 this would be a bytes self.__init__(tmp << data) self.filename = path.join(archive.filename, member) return self
def _load(self, filename=None, *args, **kargs): """Reads an Rigaku ras file including handling the metadata nicely Args: filename (string or bool): File to load. If None then the existing filename is used, if False, then a file dialog will be used. Returns: A copy of the itself after loading the data. """ from ast import literal_eval if filename is None or not filename: self.get_filename("rb") else: self.filename = filename sh = re.compile(r"^\*([^\s]+)\s+(.*)$") # Regexp to grab the keys ka = re.compile(r"(.*)\-(\d+)$") header = dict() i = 0 with io.open(self.filename, "rb") as f: for i, line in enumerate(f): line = bytes2str(line).strip() if i == 0 and line != "*RAS_DATA_START": raise Core.StonerLoadError("Not a Rigaku file!") if line == "*RAS_HEADER_START": break i2 = None for i2, line in enumerate(f): line = bytes2str(line).strip() m = sh.match(line) if m: key = m.groups()[0].lower().replace("_", ".") try: value = m.groups()[1].decode("utf-8", "ignore") except AttributeError: value = m.groups()[1] header[key] = value if "*RAS_INT_START" in line: break keys = list(header.keys()) keys.sort() for key in keys: m = ka.match(key) value = header[key].strip() try: newvalue = literal_eval(value.strip('"')) except Exception: newvalue = literal_eval(value) if m: key = m.groups()[0] if key in self.metadata and not (isinstance(self[key], (np.ndarray, list))): if isinstance(self[key], str): self[key] = list([self[key]]) else: self[key] = np.array(self[key]) if key not in self.metadata: if isinstance(newvalue, str): self[key] = list([newvalue]) else: self[key] = np.array([newvalue]) else: if isinstance(self[key][0], str) and isinstance(self[key], list): self[key].append(newvalue) else: self[key] = np.append(self[key], newvalue) else: self.metadata[key] = newvalue with io.open(self.filename, "rb") as data: self.data = np.genfromtxt( data, dtype="float", delimiter=" ", invalid_raise=False, comments="*", skip_header=i + i2 + 1 ) column_headers = ["Column" + str(i) for i in range(self.data.shape[1])] column_headers[0:2] = [self.metadata["meas.scan.unit.x"], self.metadata["meas.scan.unit.y"]] for key in self.metadata: if isinstance(self[key], list): self[key] = np.array(self[key]) self.setas = "xy" self.column_headers = column_headers return self
def _load(self, filename=None, *args, **kargs): """Reads an Rigaku ras file including handling the metadata nicely Args: filename (string or bool): File to load. If None then the existing filename is used, if False, then a file dialog will be used. Returns: A copy of the itself after loading the data. """ from ast import literal_eval pos = 0 reopen = False filetype = io.IOBase if filename is None or not filename: self.get_filename("rb") elif isinstance(filename, filetype): self.filename = filename.name pos = filename.tell() reopen = True else: self.filename = filename sh = re.compile(r"^\*([^\s]+)\s+(.*)$") # Regexp to grab the keys ka = re.compile(r"(.*)\-(\d+)$") header = dict() i = 0 with io.open(self.filename, "rb") as f: f.seek(pos) for i, line in enumerate(f): line = bytes2str(line).strip() if pos == 0 and (i == 0 and line != "*RAS_DATA_START"): raise StonerLoadError("Not a Rigaku file!") if pos != 0 or line == "*RAS_HEADER_START": break for line in f: line = bytes2str(line).strip() m = sh.match(line) if m: key = m.groups()[0].lower().replace("_", ".") try: value = m.groups()[1].decode("utf-8", "ignore") except AttributeError: value = m.groups()[1] header[key] = value if "*RAS_INT_START" in line: break keys = list(header.keys()) keys.sort() for key in keys: m = ka.match(key) value = header[key].strip() try: newvalue = literal_eval(value.strip('"')) except Exception: newvalue = literal_eval(value) if newvalue == "-": newvalue = np.nan # trap for missing float value if m: key = m.groups()[0] idx = int(m.groups()[1]) if key in self.metadata and not (isinstance( self[key], (np.ndarray, list))): if isinstance(self[key], str): self[key] = list([self[key]]) if idx > 1: self[key].extend([""] * idx - 1) else: self[key] = np.array(self[key]) if idx > 1: self[key] = np.append( self[key], np.ones(idx - 1) * np.nan) if key not in self.metadata: if isinstance(newvalue, str): listval = [""] * (idx + 1) listval[idx] = newvalue self[key] = listval else: arrayval = np.ones(idx + 1) * np.nan arrayval = arrayval.astype(type(newvalue)) arrayval[idx] = newvalue self[key] = arrayval else: if isinstance(self[key][0], str) and isinstance( self[key], list): if len(self[key]) < idx + 1: self[key].extend([""] * (idx + 1 - len(self[key]))) self[key][idx] = newvalue else: if idx + 1 > self[key].size: self[key] = np.append( self[key], (np.ones(idx + 1 - self[key].size) * np.nan).astype(self[key].dtype)) try: self[key][idx] = newvalue except ValueError: pass else: self.metadata[key] = newvalue pos = f.tell() max_rows = 0 for max_rows, line in enumerate(f): line = bytes2str(line).strip() if "RAS_INT_END" in line: break endpos = f.tell() f.seek(pos) if max_rows > 0: self.data = np.genfromtxt(f, dtype="float", delimiter=" ", invalid_raise=False, comments="*", max_rows=max_rows) column_headers = [ "Column" + str(i) for i in range(self.data.shape[1]) ] column_headers[0:2] = [ self.metadata["meas.scan.unit.x"], self.metadata["meas.scan.unit.y"] ] for key in self.metadata: if isinstance(self[key], list): self[key] = np.array(self[key]) self.setas = "xy" self.column_headers = column_headers if reopen: filename.seek(endpos) self["_endpos"] = endpos return self
def _load(self, filename=None, *args, **kargs): """Reads an Rigaku ras file including handling the metadata nicely Args: filename (string or bool): File to load. If None then the existing filename is used, if False, then a file dialog will be used. Returns: A copy of the itself after loading the data. """ from ast import literal_eval if filename is None or not filename: self.get_filename("rb") else: self.filename = filename sh = re.compile(r"^\*([^\s]+)\s+(.*)$") # Regexp to grab the keys ka = re.compile(r"(.*)\-(\d+)$") header = dict() i = 0 with io.open(self.filename, "rb") as f: for i, line in enumerate(f): line = bytes2str(line).strip() if i == 0 and line != "*RAS_DATA_START": raise Core.StonerLoadError("Not a Rigaku file!") if line == "*RAS_HEADER_START": break i2 = None for i2, line in enumerate(f): line = bytes2str(line).strip() m = sh.match(line) if m: key = m.groups()[0].lower().replace("_", ".") try: value = m.groups()[1].decode("utf-8", "ignore") except AttributeError: value = m.groups()[1] header[key] = value if "*RAS_INT_START" in line: break keys = list(header.keys()) keys.sort() for key in keys: m = ka.match(key) value = header[key].strip() try: newvalue = literal_eval(value.strip('"')) except Exception: newvalue = literal_eval(value) if m: key = m.groups()[0] if key in self.metadata and not (isinstance( self[key], (np.ndarray, list))): if isinstance(self[key], str): self[key] = list([self[key]]) else: self[key] = np.array(self[key]) if key not in self.metadata: if isinstance(newvalue, str): self[key] = list([newvalue]) else: self[key] = np.array([newvalue]) else: if isinstance(self[key][0], str) and isinstance( self[key], list): self[key].append(newvalue) else: self[key] = np.append(self[key], newvalue) else: self.metadata[key] = newvalue with io.open(self.filename, "rb") as data: self.data = np.genfromtxt(data, dtype="float", delimiter=" ", invalid_raise=False, comments="*", skip_header=i + i2 + 1) column_headers = ["Column" + str(i) for i in range(self.data.shape[1])] column_headers[0:2] = [ self.metadata["meas.scan.unit.x"], self.metadata["meas.scan.unit.y"] ] for key in self.metadata: if isinstance(self[key], list): self[key] = np.array(self[key]) self.setas = "xy" self.column_headers = column_headers return self