Example #1
0
    def _load(self, filename, *args, **kargs):
        """Sheffield Focussed MOKE file loader routine.

        Args:
            filename (string or bool): File to load. If None then the existing filename is used,
                if False, then a file dialog will be used.

        Returns:
            A copy of the itself after loading the data.
        """
        if filename is None or not filename:
            self.get_filename("r")
        else:
            self.filename = filename
        with io.open(self.filename, mode="rb") as f:
            try:
                value = [float(x.strip()) for x in bytes2str(f.readline()).split("\t")]
            except Exception:
                f.close()
                raise Core.StonerLoadError("Not an FMOKE file?")
            label = [x.strip() for x in bytes2str(f.readline()).split("\t")]
            if label[0] != "Header:":
                f.close()
                raise Core.StonerLoadError("Not a Focussed MOKE file !")
            del label[0]
            for k, v in zip(label, value):
                self.metadata[k] = v  # Create metatdata from first 2 lines
            column_headers = [x.strip() for x in bytes2str(f.readline()).split("\t")]
            self.data = np.genfromtxt(f, dtype="float", delimiter="\t", invalid_raise=False)
            self.column_headers = column_headers
        return self
Example #2
0
        def _load(self, filename=None, *args, **kargs):
            """HyperSpy Loader file loader routine.

            Args:
                filename (string or bool): File to load. If None then the existing filename is used,
                    if False, then a file dialog will be used.

            Returns:
                A copy of the itself after loading the data.
            """
            if filename is None or not filename:
                self.get_filename("r")
            else:
                self.filename = filename
            # Open the file and read the main file header and unpack into a dict
            try:
                signal = hs.load(self.filename)
                if not isinstance(signal, hs.signals.Signal2D):
                    raise Core.StonerLoadError(
                        "Not a 2D signal object - aborting!")
            except Exception as e:  # Pretty generic error catcher
                raise Core.StonerLoadError(
                    "Not readable by HyperSpy error was {}".format(e))
            self.data = signal.data
            self._unpack_meta("", signal.metadata.as_dictionary())
            self._unpack_axes(signal.axes_manager)

            return self
Example #3
0
    def _load(self, filename, *args, **kargs):
        """File loader for PinkLib.

        Args:
            filename (string or bool): File to load. If None then the existing filename is used,
                if False, then a file dialog will be used.

        Returns:
            A copy of the itself after loading the data.
        """
        if filename is None or not filename:
            self.get_filename("r")
        else:
            self.filename = filename
        ix = 0
        with io.open(self.filename, "r", errors="ignore",
                     encoding="utf-8") as f:  # Read filename linewise
            if not re.compile(r"\d{1,2}/\d{1,2}/\d{4}").match(f.readline()):
                raise Core.StonerLoadError(
                    "Not a BirgeIVFile as no date on first line")
            data = f.readlines()
            expected = ["Vo(-))", "Vo(+))", "Ic(+)", "Ic(-)"]
            for l, m in zip(data[-4:], expected):
                if not l.startswith(m):
                    raise Core.StonerLoadError(
                        "Not a BirgeIVFile as wrong footer line")
                key = l[:len(m)]
                val = l[len(m):]
                if "STDEV" in val:
                    ix2 = val.index("STDEV")
                    key2 = val[ix2:ix2 + 4 + len(key)]
                    val2 = val[ix2 + 4 + len(key):]
                    self.metadata[key2] = string_to_type(val2.strip())
                    val = val[:ix2]
                self.metadata[key] = string_to_type(val.strip())
            for ix, line in enumerate(
                    data):  # Scan the ough lines to get metadata
                if ":" in line:
                    parts = line.split(":")
                    self.metadata[parts[0].strip()] = string_to_type(
                        parts[1].strip())
                elif "," in line:
                    for part in line.split(","):
                        parts = part.split(" ")
                        self.metadata[parts[0].strip()] = string_to_type(
                            parts[1].strip())
                elif line.startswith("H "):
                    self.metadata["H"] = string_to_type(
                        line.split(" ")[1].strip())
                else:
                    headers = [x.strip() for x in line.split(" ")]
                    break
            else:
                raise Core.StonerLoadError("Oops ran off the end of the file!")
        self.data = np.genfromtxt(filename, skip_header=ix + 2, skip_footer=4)
        self.column_headers = headers

        self.setas = "xy"
        return self
    def __parse_VSM(self, header_line=3, data_line=3, header_delim=","):
        """An intrernal function for parsing deliminated data without a leading column of metadata.copy

        Keyword Arguments:
            header_line (int): The line in the file that contains the column headers.
                If None, then column headers are auotmatically generated.
            data_line (int): The line on which the data starts
            header_delim (strong): The delimiter used for separating header values

        Returns:
            Nothing, but modifies the current object.

        Note:
            The default values are configured fir read VSM data files
        """
        try:
            with io.open(self.filename, errors="ignore",
                         encoding="utf-8") as f:
                for i, line in enumerate(f):
                    if i == 0:
                        self["Timestamp"] = line.strip()
                        check = datetime.strptime(self["Timestamp"],
                                                  "%a %b %d %H:%M:%S %Y")
                        if check is None:
                            raise Core.StonerLoadError("Not a VSM file ?")
                    elif i == 1:
                        assertion(line.strip() == "")
                    elif i == 2:
                        header_string = line.strip()
                    elif i == header_line:
                        unit_string = line.strip()
                        column_headers = [
                            "{} ({})".format(h.strip(), u.strip())
                            for h, u in zip(header_string.split(header_delim),
                                            unit_string.split(header_delim))
                        ]
                    elif i > 3:
                        break
        except (StonerAssertionError, ValueError, AssertionError,
                TypeError) as e:
            raise Core.StonerLoadError("Not a VSM File" + str(e.args))
        self.data = np.genfromtxt(
            self.filename,
            dtype="float",
            usemask=True,
            skip_header=data_line - 1,
            missing_values=["6:0", "---"],
            invalid_raise=False,
        )

        self.data = np.ma.mask_rows(self.data)
        cols = self.data.shape[1]
        self.data = np.reshape(self.data.compressed(), (-1, cols))
        self.column_headers = column_headers
        self.setas(x="H_vsm (T)", y="m (emu)")  # pylint: disable=not-callable
 def _load(self, filename=None, *args, **kargs):
     """Load function. File format has space delimited columns from row 3 onwards."""
     if filename is None or not filename:
         self.get_filename("r")
     else:
         self.filename = filename
     pattern = re.compile(
         r'# Dataset "([^\"]*)" exported from GenX on (.*)$')
     pattern2 = re.compile(
         r"#\sFile\sexported\sfrom\sGenX\'s\sReflectivity\splugin")
     i = 0
     ix = 0
     with io.open(self.filename, "r", errors="ignore",
                  encoding="utf-8") as datafile:
         line = datafile.readline()
         match = pattern.match(line)
         match2 = pattern2.match(line)
         if match is not None:
             dataset = match.groups()[0]
             date = match.groups()[1]
             self["date"] = date
             i = 2
         elif match2 is not None:
             line = datafile.readline()
             self["date"] = line.split(":")[1].strip()
             dataset = datafile.readline()[1:].strip()
             i = 3
         else:
             raise _SC_.StonerLoadError("Not a GenXFile")
         for ix, line in enumerate(datafile):
             line = line.strip()
             if line in ["# Headers:", "# Column lables:"]:
                 line = next(datafile)[1:].strip()
                 break
         else:
             raise _SC_.StonerLoadError("Cannot find headers")
     skip = ix + i + 2
     column_headers = [f.strip() for f in line.strip().split("\t")]
     self.data = _np_.real(
         _np_.genfromtxt(self.filename, skip_header=skip, dtype=complex))
     self["dataset"] = dataset
     if "sld" in dataset.lower():
         self["type"] = "SLD"
     elif "asymmetry" in dataset.lower():
         self["type"] = "Asymmetry"
     elif "dd" in dataset.lower():
         self["type"] = "Down"
     elif "uu" in dataset.lower():
         self["type"] = "Up"
     self.column_headers = column_headers
     return self
Example #6
0
 def _read_xdata(self, f):
     """Read the xdata from the spc file."""
     self._pts = self._header["fnpts"]
     if self._header["ftflgs"] & 128:  # We need to read some X Data
         if 4 * self._pts > self._filesize - f.tell():
             raise Core.StonerLoadError("Trying to read too much data!")
         xvals = f.read(4 * self._pts)  # I think storing X vals directly implies that each one is 4 bytes....
         xdata = np.array(struct.unpack(str2bytes(str(self._pts) + "f"), xvals))
     else:  # Generate the X Data ourselves
         first = self._header["ffirst"]
         last = self._header["flast"]
         if self._pts > 1e6:  # Something not right here !
             raise Core.StonerLoadError("More than 1 million points requested. Bugging out now!")
         xdata = np.linspace(first, last, self._pts)
     return xdata
Example #7
0
    def _load(self, filename=None, *args, **kargs):
        """File loader for PinkLib.

        Args:
            filename (string or bool): File to load. If None then the existing filename is used,
                if False, then a file dialog will be used.

        Returns:
            A copy of the itself after loading the data.
        """
        if filename is None or not filename:
            self.get_filename("r")
        else:
            self.filename = filename
        with io.open(self.filename, "r", errors="ignore", encoding="utf-8") as f:  # Read filename linewise
            if "PINKlibrary" not in f.readline():
                raise Core.StonerLoadError("Not a PINK file")
            f = f.readlines()
            happened_before = False
            for i, line in enumerate(f):
                if line[0] != "#" and not happened_before:
                    header_line = i - 2  # -2 because there's a commented out data line
                    happened_before = True
                    continue  # want to get the metadata at the bottom of the file too
                elif any(s in line for s in ("Start time", "End time", "Title")):
                    tmp = line.strip("#").split(":")
                    self.metadata[tmp[0].strip()] = ":".join(tmp[1:]).strip()
            column_headers = f[header_line].strip("#\t ").split("\t")
        data = np.genfromtxt(self.filename, dtype="float", delimiter="\t", invalid_raise=False, comments="#")
        self.data = data[:, 0:-2]  # Deal with an errant tab at the end of each line
        self.column_headers = column_headers
        if np.all([h in column_headers for h in ("T (C)", "R (Ohm)")]):
            self.setas(x="T (C)", y="R (Ohm)")  # pylint: disable=not-callable
        return self
Example #8
0
 def __find_lines(self):
     """Returns an array of ints [header_line,data_line,scan_line,date_line,motor_line]."""
     with io.open(self.filename, "r", errors="ignore",
                  encoding="utf-8") as fp:
         self.line_numbers = [0, 0, 0, 0, 0]
         counter = 0
         for line in fp:
             counter += 1
             if counter == 1 and line[0] != "#":
                 raise Core.StonerLoadError("Not a BNL File ?")
             if len(line) < 2:
                 continue  # if there's nothing written on the line go to the next
             elif line[0:2] == "#L":
                 self.line_numbers[0] = counter
             elif line[0:2] == "#S":
                 self.line_numbers[2] = counter
             elif line[0:2] == "#D":
                 self.line_numbers[3] = counter
             elif line[0:2] == "#P":
                 self.line_numbers[4] = counter
             elif line[0] in [
                     "0", "1", "2", "3", "4", "5", "6", "7", "8", "9"
             ]:
                 self.line_numbers[1] = counter
                 break
    def _read_loginfo(self, f):
        """Read the log info section of the spc file."""
        logstc = struct.unpack(b"IIIII44s", f.read(64))
        logstc_keys = ("logsizd", "logsizm", "logtxto", "logbins", "logdsks",
                       "logrsvr")
        logheader = dict(zip(logstc_keys, logstc))
        self._header = dict(self._header, **logheader)

        # Can't handle either binary log information or ion disk log information (wtf is this anyway !)
        if self._header["logbins"] + self._header[
                "logdsks"] > self._filesize - f.tell():
            raise Core.StonerLoadError("Too much logfile data to read")
        f.read(self._header["logbins"] + self._header["logdsks"])

        # The renishaw seems to put a 16 character timestamp next - it's not in the spec but never mind that.
        self._header["Date-Time"] = f.read(16)
        # Now read the rest of the file as log text
        logtext = f.read()
        # We expect things to be single lines terminated with a CR-LF of the format key=value
        for line in re.split(b"[\r\n]+", logtext):
            if b"=" in line:
                parts = line.split(b"=")
                key = parts[0].decode()
                value = parts[1].decode()
                self._header[key] = value
Example #10
0
    def _load(self, filename=None, *args, **kargs):
        """Load an OpenGDA file.

        Args:
            filename (string or bool): File to load. If None then the existing filename is used,
                if False, then a file dialog will be used.

        Returns:
            A copy of the itself after loading the data.
        """
        if filename is None or not filename:
            self.get_filename("r")
        else:
            self.filename = filename
        i = 0
        with io.open(self.filename, "r", errors="ignore", encoding="utf-8") as f:
            for i, line in enumerate(f):
                line = line.strip()
                if i == 0 and line != "&SRS":
                    raise Core.StonerLoadError("Not a GDA File from Rasor ?" + str(line))
                if "&END" in line:
                    break
                parts = line.split("=")
                if len(parts) != 2:
                    continue
                key = parts[0]
                value = parts[1].strip()
                self.metadata[key] = string_to_type(value)
            column_headers = f.readline().strip().split("\t")
            self.data = np.genfromtxt([str2bytes(l) for l in f], dtype="float", invalid_raise=False)
        self.column_headers = column_headers
        return self
Example #11
0
    def _check_signature(self, filename):
        """Check that this is a PNG file and raie a Core.StonerLoadError if not."""
        try:
            with io.open(filename, "rb") as test:
                sig = test.read(8)
            sig = [x for x in sig]
            if self.debug:
                print(sig)
            if sig != [137, 80, 78, 71, 13, 10, 26, 10]:
                raise Core.StonerLoadError("Signature mismatrch")
        except Exception:
            from traceback import format_exc

            raise Core.StonerLoadError("Not a PNG file!>\n{}".format(
                format_exc()))
        return True
Example #12
0
    def _load(self, filename, *args, **kargs):
        """Leeds  MOKE file loader routine.

        Args:
            filename (string or bool): File to load. If None then the existing filename is used,
                if False, then a file dialog will be used.

        Returns:
            A copy of the itself after loading the data.
        """
        if filename is None or not filename:
            self.get_filename("r")
        else:
            self.filename = filename
        with io.open(self.filename, mode="rb") as f:
            line = bytes2str(f.readline()).strip()
            if line != "#Leeds CM Physics MOKE":
                raise Core.StonerLoadError(
                    "Not a Core.DataFile from the Leeds MOKE")
            while line.startswith("#") or line == "":
                parts = line.split(":")
                if len(parts) > 1:
                    key = parts[0][1:]
                    data = ":".join(parts[1:]).strip()
                    self[key] = data
                line = bytes2str(f.readline()).strip()
            column_headers = [x.strip() for x in line.split(",")]
            self.data = np.genfromtxt(f, delimiter=",")
        self.setas = "xy.de"
        self.column_headers = column_headers
        return self
Example #13
0
    def _load(self, filename=None, *args, **kargs):
        """Reads an XRD Core.DataFile as produced by the Brucker diffractometer.

        Args:
            filename (string or bool):
                File to load. If None then the existing filename is used, if False, then a file dialog will be used.

        Returns:
            A copy of the itself after loading the data.

        Notes:
            Format is ini file like but not enough to do standard inifile processing - in particular
            one can have multiple sections with the same name (!)
        """
        if filename is None or not filename:
            self.get_filename("r")
        else:
            self.filename = filename
        sh = re.compile(r"\[(.+)\]")  # Regexp to grab section name
        with io.open(self.filename, errors="ignore", encoding="utf-8") as f:  # Read filename linewise
            if f.readline().strip() != ";RAW4.00":  # Check we have the corrrect fileformat
                raise Core.StonerLoadError("File Format Not Recognized !")
            drive = 0
            for line in f:  # for each line
                m = sh.search(line)
                if m:  # This is a new section
                    section = m.group(1)
                    if section == "Drive":  # If this is a Drive section we need to know which Drive Section it is
                        section = section + str(drive)
                        drive = drive + 1
                    elif section == "Data":  # Data section contains the business but has a redundant first line
                        f.readline()
                    for line in f:  # Now start reading lines in this section...
                        if line.strip() == "":
                            # A blank line marks the end of the section, so go back to the outer loop which will
                            # handle a new section
                            break
                        if section == "Data":  # In the Data section read lines of data value,vale
                            parts = line.split(",")
                            angle = parts[0].strip()
                            counts = parts[1].strip()
                            dataline = np.array([float(angle), float(counts)])
                            self.data = np.append(self.data, dataline)
                        else:  # Other sections contain metadata
                            parts = line.split("=")
                            key = parts[0].strip()
                            data = parts[1].strip()
                            # Keynames in main metadata are section:key - use theCore.DataFile magic to do type
                            # determination
                            self[section + ":" + key] = string_to_type(data)
            column_headers = ["Angle", "Counts"]  # Assume the columns were Angles and Counts

        self.data = np.reshape(self.data, (-1, 2))
        self.setas = "xy"
        self.four_bounce = self["HardwareConfiguration:Monochromator"] == 1
        self.column_headers = column_headers
        if kargs.pop("Q", False):
            self.to_Q()
        return self
Example #14
0
    def _load(self, filename=None, *args, **kargs):
        """Load function. File format has space delimited columns from row 3 onwards."""
        if filename is None or not filename:
            self.get_filename("r")
        else:
            self.filename = filename

        with io.open(self.filename, "r", errors="ignore", encoding="utf-8") as data:  # Slightly ugly text handling
            line = data.readline()
            if not line.strip().startswith(
                "# Datafile created by QuickNXS 0.9.39"
            ):  # bug out oif we don't like the header
                raise Core.StonerLoadError("Not a file from the SNS BL4A line")
            for line in data:
                if line.startswith("# "):  # We're in the header
                    line = line[2:].strip()  # strip the header and whitespace

                if line.startswith("["):  # Look for a section header
                    section = line.strip().strip("[]")
                    if section == "Data":  # The Data section has one line of colum headers and then data
                        header = next(data)[2:].split("\t")
                        if not python_v3:
                            column_headers = [h.strip().encode("ascii", errors="replace") for h in header]
                        else:
                            column_headers = [h.strip() for h in header]
                        self.data = np.genfromtxt(data)  # we end by reading the raw data
                    elif section == "Global Options":  # This section can go into metadata
                        for line in data:
                            line = line[2:].strip()
                            if line.strip() == "":
                                break
                            else:
                                self[line[2:10].strip()] = line[11:].strip()
                    elif (
                        section == "Direct Beam Runs" or section == "Data Runs"
                    ):  # These are constructed into lists ofg dictionaries for each file
                        sec = list()
                        header = next(data)
                        header = header[2:].strip()
                        keys = [s.strip() for s in header.split("  ") if s.strip()]
                        for line in data:
                            line = line[2:].strip()
                            if line == "":
                                break
                            else:
                                values = [s.strip() for s in line.split("  ") if s.strip()]
                                sec.append(dict(zip(keys, values)))
                        self[section] = sec
                else:  # We must still be in the opening un-labelled section of meta data
                    if ":" in line:
                        i = line.index(":")
                        key = line[:i].strip()
                        value = line[i + 1 :].strip()
                        self[key.strip()] = value.strip()
        self.column_headers = column_headers
        return self
    def _load(self, filename=None, *args, **kargs):
        """Data loader function for 340 files."""
        if filename is None or not filename:
            self.get_filename("r")
        else:
            self.filename = filename

        with io.open(self.filename, "rb") as data:
            keys = []
            vals = []
            for line in data:
                line = bytes2str(line)
                if line.strip() == "":
                    break
                parts = [p.strip() for p in line.split(":")]
                if len(parts) != 2:
                    raise Core.StonerLoadError(
                        "Header doesn't contain two parts at {}".format(
                            line.strip()))
                else:
                    keys.append(parts[0])
                    vals.append(parts[1])
            else:
                raise Core.StonerLoadError("Overan the end of the file")
            if keys != [
                    "Sensor Model",
                    "Serial Number",
                    "Data Format",
                    "SetPoint Limit",
                    "Temperature coefficient",
                    "Number of Breakpoints",
            ]:
                raise Core.StonerLoadError(
                    "Header did not contain recognised keys.")
            for (k, v) in zip(keys, vals):
                v = v.split()[0]
                self.metadata[k] = string_to_type(v)
            headers = bytes2str(next(data)).strip().split()
            column_headers = headers[1:]
            dat = np.genfromtxt(data)
            self.data = dat[:, 1:]
        self.column_headers = column_headers
        return self
Example #16
0
    def _load(self, filename=None, header_line=0, data_line=1, data_delim=",", header_delim=",", **kargs):
        """Generic deliminated file loader routine.

        Args:
            filename (string or bool): File to load. If None then the existing filename is used,
                if False, then a file dialog will be used.

        Keyword Arguments:
            header_line (int): The line in the file that contains the column headers.
                If None, then column headers are auotmatically generated.
            data_line (int): The line on which the data starts
            data_delim (string): Thge delimiter used for separating data values
            header_delim (strong): The delimiter used for separating header values

        Returns:
            A copy of the current object after loading the data.
        """
        if filename is None or not filename:
            self.get_filename("r")
        else:
            self.filename = filename
        if header_line is not None:
            try:
                header_string = linecache.getline(self.filename, header_line + 1)
                header_string = re.sub(r'["\n]', "", header_string)
                header_string.index(header_delim)
            except (ValueError, SyntaxError):
                linecache.clearcache()
                raise Core.StonerLoadError("No Delimiters in header line")
            column_headers = [x.strip() for x in header_string.split(header_delim)]
        else:
            column_headers = ["Column" + str(x) for x in range(np.shape(self.data)[1])]
            data_line = linecache.getline(self.filename, data_line)
            try:
                data_line.index(data_delim)
            except ValueError:
                linecache.clearcache()
                raise Core.StonerLoadError("No delimiters in data lines")

        self.data = np.genfromtxt(self.filename, dtype="float", delimiter=data_delim, skip_header=data_line)
        self.column_headers = column_headers
        linecache.clearcache()
        return self
def split_up_down(data, col=None, folder=None):
    """Splits the DataFile data into several files where the column *col* is either rising or falling

    Args:
        data (:py:class:`Stoner.Core.DataFile`): object containign the data to be sorted
        col (index): is something that :py:meth:`Stoner.Core.DataFile.find_col` can use
        folder (:py:class:`Stoner.Folders.DataFolder` or None): if this is an instance of :py:class:`Stoner.Folders.DataFolder` then add
            rising and falling files to groups of this DataFolder, otherwise create a new one

    Returns:
        A :py:class:`Sonter.Folder.DataFolder` object with two groups, rising and falling
    """
    a = _SC_.Data(data)
    if col is None:
        _=a._col_args()
        col=_.xcol
    width = len(a) / 10
    if width % 2 == 0:  # Ensure the window for Satvisky Golay filter is odd
        width += 1
    setas=a.setas.clone
    a.setas=""
    peaks = list(a.peaks(col, width,xcol=None, peaks=True, troughs=False,full_data=False))
    troughs = list(a.peaks(col, width, xcol=None, peaks=False, troughs=True,full_data=False))
    a.setas=setas
    if len(peaks) > 0 and len(troughs) > 0:  #Ok more than up down here
        order = peaks[0] < troughs[0]
    elif len(peaks) > 0:  #Rise then fall
        order = True
    elif len(troughs) > 0:  # Fall then rise
        order = False
    else:  #No peaks or troughs so just return a single rising
        ret=_SF_(readlist=False)
        ret+=data
        return ret
    splits = [0, len(a)]
    splits.extend(peaks)
    splits.extend(troughs)
    splits.sort()
    splits=[int(s) for s in splits]
    if not isinstance(folder, _SF_):  # Create a new DataFolder object
        output = _SF_(readlist=False)
    else:
        output = folder
    output.add_group("rising")
    output.add_group("falling")

    if order:
        risefall=["rising","falling"]
    else:
        risefall=["falling","rising"]
    for i in range(len(splits)-1):
        working=data.clone
        working.data = data.data[splits[i]:splits[i+1],:]
        output.groups[risefall[i%2]].append(working)
    return output
def collate(grp,trail,**kargs):
    grp.sort()
    final=SC.DataFile()
    final.add_column(grp[0].column('Energy'),'Energy')
    for g in grp:
        final.add_column(g.column('Asym'),g.title)
    if "group_key" in kargs:
        final[kargs["group_key"]]=grp.key
    final["path"]=trail
    if "save" in kargs and kargs["save"]:
        final.save(kargs["filename"])
    return final
Example #19
0
    def _load(self, filename, *args, **kargs):
        """Private loader method."""
        if filename is None or not filename:
            self.get_filename("r")
        else:
            self.filename = filename

        datastart = -1
        dataend = -1

        i = 0
        with io.open(self.filename, "r", errors="ignore", encoding="utf-8") as data:
            if "******** EasyPlot save file ********" not in data.read(1024):
                raise Core.StonerLoadError("Not an EasyPlot Save file?")
            data.seek(0)
            for i, line in enumerate(data):
                line = line.strip()
                if line == "":
                    continue
                if line[0] not in "-0123456789" and datastart > 0 and dataend < 0:
                    dataend = i
                if line.startswith('"') and ":" in line:
                    parts = [x.strip() for x in line.strip('"').split(":")]
                    self[parts[0]] = string_to_type(":".join(parts[1:]))
                elif line.startswith("/"):  # command
                    parts = [x.strip('"') for x in next(csv.reader([line], delimiter=" ")) if x != ""]
                    cmd = parts[0].strip("/")
                    if len(cmd) > 1:
                        cmdname = "_{}_cmd".format(cmd)
                        if cmdname in dir(self):  # If this command is implemented as a function run it
                            cmd = getattr(self, "_{}_cmd".format(cmd))
                            cmd(parts[1:])
                        else:
                            if len(parts[1:]) > 1:
                                cmd = cmd + "." + parts[1]
                                value = ",".join(parts[2:])
                            elif len(parts[1:]) == 1:
                                value = parts[1]
                            else:
                                value = True
                            self[cmd] = value
                elif line[0] in "-0123456789" and datastart < 0:  # start of data
                    datastart = i
                    if "," in line:
                        delimiter = ","
                    else:
                        delimiter = None
        if dataend < 0:
            dataend = i
        self.data = np.genfromtxt(self.filename, skip_header=datastart, skip_footer=i - dataend, delimiter=delimiter)
        if self.data.shape[1] == 2:
            self.setas = "xy"
        return self
def split(data, col=None, folder=None, spliton=0, rising=True, falling=False, skip=0):
    """Splits the DataFile data into several files where the column *col* is either rising or falling

    Args:
        data (:py:class:`Stoner.Core.DataFile`): object containign the data to be sorted
        col (index): is something that :py:meth:`Stoner.Core.DataFile.find_col` can use
        folder (:py:class:`Stoner.Folders.DataFolder` or None): if this is an instance of :py:class:`Stoner.Folders.DataFolder` then add
            rising and falling files to groups of this DataFolder, otherwise create a new one
        spliton (str or float): Define where to split the data, 'peak' to split on peaks, 'trough' to split
            on troughs, 'both' to split on peaks and troughs or number to split at that number
        rising (bool): whether to split on threshold crossing when data is rising
        falling (bool): whether to split on threshold crossing when data is falling
        skip (int): skip this number of splitons each time. eg skip=1 picks out odd crossings
    Returns:
        A :py:class:`Sonter.Folder.DataFolder` object with two groups, rising and falling
    """
    if col is None:
        col = data.setas["x"]
    d=_SC_.Data(data)
    if not isinstance(folder, _SF_):  # Create a new DataFolder object
        output = _SF_()
    else:
        output = folder

    if isinstance(spliton, int_types+(float,)):
        spl=d.threshold(threshold=float(spliton),col=col,rising=rising,falling=falling,all_vals=True)

    elif spliton in ['peaks','troughs','both']:
        width = len(d) / 10
        if width % 2 == 0:  # Ensure the window for Satvisky Golay filter is odd
            width += 1
        if spliton=='peaks':
            spl = list(d.peaks(col, width, xcol=False, peaks=True, troughs=False))
        elif spliton=='troughs':
            spl = list(d.peaks(col, width, xcol=False, peaks=False, troughs=True))
        else:
            spl = list(d.peaks(col, width, xcol=False, peaks=True, troughs=True))

    else:
        raise ValueError('Did not recognise spliton')

    spl = [spl[i] for i in range(len(spl)) if i%(skip+1)==0]
    spl.extend([0,len(d)])
    spl.sort()
    for i in range(len(spl)-1):
        tmp=d.clone
        tmp.data=tmp[spl[i]:spl[i+1]]
        output.files.append(tmp)
    return output
    def _read_ydata(self, f, data, column_headers):
        """Read the y data and column headers from spc file."""
        n = self._header["fnsub"]
        subhdr_keys = (
            "subflgs",
            "subexp",
            "subindx",
            "subtime",
            "subnext",
            "subnois",
            "subnpts",
            "subscan",
            "subwlevel",
            "subresv",
        )
        if self._header["ftflgs"] & 1:
            y_width = 2
            y_fmt = "h"
            divisor = 2**16
        else:
            y_width = 4
            y_fmt = "i"
            divisor = 2**32
        if n * (y_width * self._pts + 32) > self._filesize - f.tell():
            raise Core.StonerLoadError("No good, going to read too much data!")
        for j in range(n):  # We have n sub-scans
            # Read the subheader and import into the main metadata dictionary as scan#:<subheader item>
            subhdr = struct.unpack(b"BBHfffIIf4s", f.read(32))
            subheader = dict(
                zip(["scan" + str(j) + ":" + x for x in subhdr_keys], subhdr))

            # Now read the y-data
            exponent = subheader["scan" + str(j) + ":subexp"]
            if int(exponent) & -128:  # Data is unscaled direct floats
                ydata = np.array(
                    struct.unpack(str2bytes(str(self._pts) + "f"),
                                  f.read(self._pts * y_width)))
            else:  # Data is scaled by exponent
                yvals = struct.unpack(str2bytes(str(self._pts) + y_fmt),
                                      f.read(self._pts * y_width))
                ydata = np.array(yvals, dtype="float64") * (2**
                                                            exponent) / divisor
            data[:, j + 1] = ydata
            self._header = dict(self._header, **subheader)
            column_headers.append("Scan" + str(j) + ":" +
                                  self._yvars[self._header["fytype"]])

        return data
Example #22
0
        def _load(self, filename=None, *args, **kargs):
            """TDMS file loader routine.

            Args:
                filename (string or bool): File to load. If None then the existing filename is used,
                    if False, then a file dialog will be used.

            Returns:
                A copy of the itself after loading the data.
            """
            if filename is None or not filename:
                self.get_filename("r")
            else:
                self.filename = filename
            # Open the file and read the main file header and unpack into a dict
            try:
                f = TdmsFile(self.filename)

                column_headers = []
                data = np.array([])

                for grp in f.objects.keys():
                    if grp == "/":
                        pass  # skip the rooot group
                    elif grp == "/'TDI Format 1.5'":
                        metadata = f.object("TDI Format 1.5")
                        for k, v in metadata.properties.items():
                            self.metadata[k] = string_to_type(str(v))
                    else:
                        if f.objects[grp].has_data:
                            chnl = grp.split("/")[-1]
                            chnl.strip().strip("'")
                            column_headers.append(chnl)
                            if data.size == 0:
                                data = f.objects[grp].data
                            else:
                                data = np.column_stack(
                                    [data, f.objects[grp].data])
                self.data = data
                self.column_headers = column_headers
            except Exception:
                from traceback import format_exc

                raise Core.StonerLoadError("Not a TDMS File \n{}".format(
                    format_exc()))

            return self
 def _read_uvwdata(self, filename, fmt, lineno):
     """Read the numerical data taking account of the format."""
     if fmt == "Text":
         uvwdata = _np_.genfromtxt(self.filename, skip_header=lineno + 2)
     elif fmt == "Binary 4":
         if self["version"] == 1:
             dt = _np_.dtype(">f4")
         else:
             dt = _np_.dtype("<f4")
         with io.open(filename, "rb") as bindata:
             bindata.seek(self._ptr)
             uvwdata = _np_.fromfile(bindata,
                                     dtype=dt,
                                     count=1 +
                                     self["xnodes"] * self["ynodes"] *
                                     self["znodes"] * self["valuedim"])
             assertion(
                 uvwdata[0] == 1234567.0,
                 "Binary 4 format check value incorrect ! Actual Value was {}"
                 .format(uvwdata[0]),
             )
         uvwdata = uvwdata[1:]
         uvwdata = _np_.reshape(uvwdata, (-1, self["valuedim"]))
     elif fmt == "Binary 8":
         if self["version"] == 1:
             dt = _np_.dtype(">f8")
         else:
             dt = _np_.dtype("<f8")
         with io.open(filename, "rb") as bindata:
             bindata.seek(self._ptr)
             uvwdata = _np_.fromfile(bindata,
                                     dtype=dt,
                                     count=1 +
                                     self["xnodes"] * self["ynodes"] *
                                     self["znodes"] * self["valuedim"])
             assertion(
                 (uvwdata[0] == 123456789012345.0),
                 "Binary 4 format check value incorrect ! Actual Value was {}"
                 .format(uvwdata[0]),
             )
         uvwdata = _np_.reshape(uvwdata, (-1, self["valuedim"]))
     else:
         raise _SC_.StonerLoadError("Unknow OVF Format {}".format(fmt))
     return uvwdata
Example #24
0
    def _load(self, filename, *args, **kargs):
        """Just call the parent class but with the right parameters set

        Args:
            filename (string or bool): File to load. If None then the existing filename is used,
                if False, then a file dialog will be used.

        Returns:
            A copy of the itself after loading the data.
        """
        if filename is None or not filename:
            self.get_filename("r")
        else:
            self.filename = filename

        super(BigBlueFile, self)._load(
            self.filename, *args, header_line=3, data_line=7, data_delim=" ", header_delim=","
        )
        if np.all(np.isnan(self.data)):
            raise Core.StonerLoadError("All data was NaN in Big Blue format")
        return self
Example #25
0
 def _load(self, filename=None, *args, **kargs):
     """Load function. File format has space delimited columns from row 3 onwards."""
     if filename is None or not filename:
         self.get_filename("r")
     else:
         self.filename = filename
     pattern = re.compile(
         r'# Dataset "([^\"]*)" exported from GenX on (.*)$')
     pattern2 = re.compile(
         r"#\sFile\sexported\sfrom\sGenX\'s\sReflectivity\splugin")
     with io.open(self.filename, "r", errors="ignore",
                  encoding="utf-8") as datafile:
         line = datafile.readline()
         match = pattern.match(line)
         match2 = pattern2.match(line)
         if match is not None:
             dataset = match.groups()[0]
             date = match.groups()[1]
             line = datafile.readline()
             line = datafile.readline()
             line = line[1:]
             self["date"] = date
         elif match2 is not None:
             line = datafile.readline()
             self["date"] = line.split(":")[1].strip()
             datafile.readline()
             line = datafile.readline()
             line = line[1:]
             dataset = "asymmetry"
         else:
             raise _SC_.StonerLoadError("Not a GenXFile")
     column_headers = [f.strip() for f in line.strip().split("\t")]
     self.data = _np_.genfromtxt(self.filename, skip_header=4)
     self["dataset"] = dataset
     self.setas = "xye"
     self.column_headers = column_headers
     return self
Example #26
0
    def _load(self, filename=None, *args, **kargs):
        """PNG file loader routine.

        Args:
            filename (string or bool): File to load. If None then the existing filename is used,
                if False, then a file dialog will be used.

        Returns:
            A copy of the itself after loading the data.
        """
        if filename is None or not filename:
            self.get_filename("r")
        else:
            self.filename = filename
        self._check_signature(filename)
        try:
            with PIL.Image.open(self.filename, "r") as img:
                for k in img.info:
                    self.metadata[k] = img.info[k]
                self.data = np.asarray(img)
        except IOError:
            raise Core.StonerLoadError("Unable to read as a PNG file.")

        return self
Example #27
0
 def _load(self, filename=None, *args, **kargs):
     """Load function. File format has space delimited columns from row 3 onwards."""
     if filename is None or not filename:
         self.get_filename("r")
     else:
         self.filename = filename
     i = [0, 0, 0, 0]
     with io.open(self.filename, "r", errors="ignore",
                  encoding="utf-8") as data:  # Slightly ugly text handling
         for i[0], line in enumerate(data):
             if (i[0] == 0
                     and line.strip() != "## mda2ascii 1.2 generated output"
                 ):  # bug out oif we don't like the header
                 raise Core.StonerLoadError("Not a file mda2ascii")
             line.strip()
             if "=" in line:
                 parts = line[2:].split("=")
                 self[parts[0].strip()] = string_to_type("".join(
                     parts[1:]).strip())
             elif line.startswith("#  Extra PV:"):
                 # Onto the next metadata bit
                 break
         pvpat = re.compile(r"^#\s+Extra\s+PV\s\d+\:(.*)")
         for i[1], line in enumerate(data):
             if line.strip() == "":
                 continue
             elif line.startswith("# Extra PV"):
                 res = pvpat.match(line)
                 bits = [
                     b.strip().strip(r'"') for b in res.group(1).split(",")
                 ]
                 if bits[1] == "":
                     key = bits[0]
                 else:
                     key = bits[1]
                 if len(bits) > 3:
                     key = key + " ({})".format(bits[3])
                 self[key] = string_to_type(bits[2])
             else:
                 break  # End of Extra PV stuff
         else:
             raise Core.StonerLoadError("Overran Extra PV Block")
         for i[2], line in enumerate(data):
             line.strip()
             if line.strip() == "":
                 continue
             elif line.startswith("# Column Descriptions:"):
                 break  # Start of column headers now
             elif "=" in line:
                 parts = line[2:].split("=")
                 self[parts[0].strip()] = string_to_type("".join(
                     parts[1:]).strip())
         else:
             raise Core.StonerLoadError(
                 "Overran end of scan header before column descriptions")
         colpat = re.compile(r"#\s+\d+\s+\[([^\]]*)\](.*)")
         column_headers = []
         for i[3], line in enumerate(data):
             res = colpat.match(line)
             line.strip()
             if line.strip() == "":
                 continue
             elif line.startswith("# 1-D Scan Values"):
                 break  # Start of data
             elif res is not None:
                 if "," in res.group(2):
                     bits = [b.strip() for b in res.group(2).split(",")]
                     if bits[-2] == "":
                         colname = bits[0]
                     else:
                         colname = bits[-2]
                     if bits[-1] != "":
                         colname += " ({})".format(bits[-1])
                     if colname in column_headers:
                         colname = "{}:{}".format(bits[0], colname)
                 else:
                     colname = res.group(1).strip()
                 column_headers.append(colname)
         else:
             raise Core.StonerLoadError(
                 "Overand the end of file without reading data")
     self.data = np.genfromtxt(self.filename,
                               skip_header=sum(i))  # so that's ok then !
     self.column_headers = column_headers
     return self
Example #28
0
    def _load(self, filename, *args, **kargs):
        """Generic deliminated file loader routine.

        Args:
            filename (string or bool): File to load. If None then the existing filename is used,
                if False, then a file dialog will be used.

        Keyword Arguments:
            header_line (int): The line in the file that contains the column headers.
                If None, then column headers are auotmatically generated.
            data_line (int): The line on which the data starts
            data_delim (string): Thge delimiter used for separating data values
            header_delim (strong): The delimiter used for separating header values

        Returns:
            A copy of the current object after loading the data.
        """
        defaults = copy(self._defaults)
        defaults.update(kargs)
        keep = set(kargs.keys()) - (set(self._defaults.keys())
                                    | set(["auto_load", "filetype"]))
        kargs = {k: kargs[k] for k in keep}
        header_line = defaults["header_line"]
        data_line = defaults["data_line"]
        data_delim = defaults["data_delim"]
        header_delim = defaults["header_delim"]
        if filename is None or not filename:
            self.get_filename("r")
        else:
            self.filename = filename
        if header_line is not None:
            try:
                header_string = linecache.getline(self.filename,
                                                  header_line + 1)
                header_string = re.sub(r'["\n]', "", header_string)
                header_string.index(header_delim)
            except (ValueError, SyntaxError):
                linecache.clearcache()
                raise Core.StonerLoadError("No Delimiters in header line")
            column_headers = [
                x.strip() for x in header_string.split(header_delim)
            ]
        else:
            column_headers = [
                "Column" + str(x) for x in range(np.shape(self.data)[1])
            ]
            data_test = linecache.getline(self.filename, data_line + 1)
            if data_delim is None:
                for data_delim in ["\t", ",", ";", " "]:
                    if data_delim in data_test:
                        break
                else:
                    raise Core.StonerLoadError("No delimiters in data lines")
            elif data_delim not in data_test:
                linecache.clearcache()
                raise Core.StonerLoadError("No delimiters in data lines")

        self.data = np.genfromtxt(self.filename,
                                  dtype="float",
                                  delimiter=data_delim,
                                  skip_header=data_line)
        self.column_headers = column_headers
        linecache.clearcache()
        self._kargs = kargs
        return self
    def _load(self, filename=None, *args, **kargs):
        """Reads a .scf file produced by the Renishaw Raman system (amongs others)

        Args:
            filename (string or bool): File to load. If None then the existing filename is used,
                if False, then a file dialog will be used.

        Returns:
            A copy of the itself after loading the data.

        Todo:
            Implement the second form of the file that stores multiple x-y curves in the one file.

        Notes:
            Metadata keys are pretty much as specified in the spc.h file that defines the filerformat.
        """
        if filename is None or not filename:
            self.get_filename("r")
        else:
            self.filename = filename
        # Open the file and read the main file header and unpack into a dict
        self._filesize = os.stat(self.filename).st_size
        with io.open(filename, "rb") as f:
            spchdr = struct.unpack(
                b"BBBciddiBBBBi9s9sH8f30s130siiBBHf48sfifB187s", f.read(512))
            keys = (
                "ftflgs",
                "fversn",
                "fexper",
                "fexp",
                "fnpts",
                "ffirst",
                "flast",
                "fnsub",
                "fxtype",
                "fytype",
                "fztype",
                "fpost",
                "fres",
                "fsource",
                "fpeakpt",
                "fspare1",
                "fspare2",
                "fspare3",
                "fspare4",
                "fspare5",
                "fspare6",
                "fspare7",
                "fspare8",
                "fcm",
                "nt",
                "fcatx",
                "flogoff",
                "fmods",
                "fprocs",
                "flevel",
                "fsampin",
                "ffactor",
                "fmethod",
                "fzinc",
                "fwplanes",
                "fwinc",
                "fwtype",
                "fwtype",
                "fresv",
            )
            self._xvars = [
                "Arbitrary",
                "Wavenumber (cm-1)",
                "Micrometers (um)",
                "Nanometers (nm)",
                "Seconds",
                "Minutes",
                "Hertz (Hz)",
                "Kilohertz (KHz)",
                "Megahertz (MHz)",
                "Mass (M/z)",
                "Parts per million (PPM)",
                "Days",
                "Years",
                "Raman Shift (cm-1)",
                "Raman Shift (cm-1)",
                "eV",
                "XYZ text labels in fcatxt (old 0x4D version only)",
                "Diode Number",
                "Channel",
                "Degrees",
                "Temperature (F)",
                "Temperature (C)",
                "Temperature (K)",
                "Data Points",
                "Milliseconds (mSec)",
                "Microseconds (uSec)",
                "Nanoseconds (nSec)",
                "Gigahertz (GHz)",
                "Centimeters (cm)",
                "Meters (m)",
                "Millimeters (mm)",
                "Hours",
                "Hours",
            ]
            self._yvars = [
                "Arbitrary Intensity",
                "Interferogram",
                "Absorbance",
                "Kubelka-Monk",
                "Counts",
                "Volts",
                "Degrees",
                "Milliamps",
                "Millimeters",
                "Millivolts",
                "Log(1/R)",
                "Percent",
                "Percent",
                "Intensity",
                "Relative Intensity",
                "Energy",
                "Decibel",
                "Temperature (F)",
                "Temperature (C)",
                "Temperature (K)",
                "Index of Refraction [N]",
                "Extinction Coeff. [K]",
                "Real",
                "Imaginary",
                "Complex",
                "Complex",
                "Transmission (ALL HIGHER MUST HAVE VALLEYS!)",
                "Reflectance",
                "Arbitrary or Single Beam with Valley Peaks",
                "Emission",
                "Emission",
            ]

            self._header = dict(zip(keys, spchdr))
            n = self._header["fnsub"]

            if self._header["ftflgs"] & 64 == 64 or not (
                    75 <= self._header["fversn"] <=
                    77):  # This is the multiple XY curves in file flag.
                raise Core.StonerLoadError(
                    "Filetype not implemented yet ! ftflgs={ftflgs}, fversn={fversn}"
                    .format(**self._header))
            else:  # A single XY curve in the file.
                # Read the xdata and add it to the file.
                xdata = self._read_xdata(f)
                data = np.zeros(
                    (self._pts, (n + 1)))  # initialise the data soace
                data[:, 0] = xdata  # Put in the X-Data
                column_headers = [self._xvars[self._header["fxtype"]]
                                  ]  # And label the X column correctly

                # Now we're going to read the Y-data
                data = self._read_ydata(f, data, column_headers)
                if self._header[
                        "flogoff"] != 0:  # Ok, we've got a log, so read the log header and merge into metadata
                    self._read_loginfo(f)
            # Ok now build the Stoner.Core.DataFile instance to return
            self.data = data
            # The next bit generates the metadata. We don't just copy the metadata because we need to figure out the typehints first - hence the loop
            # here to call Core.DataFile.__setitem()
            for x in self._header:
                self[x] = self._header[x]
            self.column_headers = column_headers
            if len(self.column_headers) == 2:
                self.setas = "xy"
            return self
    def _load(self, filename=None, *args, **kargs):
        """QD system file loader routine.

        Args:
            filename (string or bool): File to load. If None then the existing filename is used,
                if False, then a file dialog will be used.

        Returns:
            A copy of the itself after loading the data.
        """
        if filename is None or not filename:
            self.get_filename("r")
        else:
            self.filename = filename
        setas = {}
        i = 0
        with io.open(self.filename, "r", encoding="utf-8",
                     errors="ignore") as f:  # Read filename linewise
            for i, line in enumerate(f):
                line = line.strip()
                if i == 0 and line != "[Header]":
                    raise Core.StonerLoadError("Not a Quantum Design File !")
                elif line == "[Header]" or line.startswith(";") or line == "":
                    continue
                elif "[Data]" in line:
                    break
                elif "," not in line:
                    raise Core.StonerLoadError("No data in file!")
                parts = [x.strip() for x in line.split(",")]
                if parts[1].split(":")[0] == "SEQUENCE FILE":
                    key = parts[1].split(":")[0].title()
                    value = parts[1].split(":")[1]
                elif parts[0] == "INFO":
                    if parts[1] == "APPNAME":
                        parts[1], parts[2] = parts[2], parts[1]
                    if len(parts) > 2:
                        key = "{}.{}".format(parts[0], parts[2])
                    else:
                        raise Core.StonerLoadError("No data in file!")
                    key = key.title()
                    value = parts[1]
                elif parts[0] in ["BYAPP", "FILEOPENTIME"]:
                    key = parts[0].title()
                    value = " ".join(parts[1:])
                elif parts[0] == "FIELDGROUP":
                    key = "{}.{}".format(parts[0], parts[1]).title()
                    value = "[{}]".format(",".join(parts[2:]))
                elif parts[0] == "STARTUPAXIS":
                    axis = parts[1][0].lower()
                    setas[axis] = setas.get(axis, []) + [int(parts[2])]
                    key = "Startupaxis-{}".format(parts[1].strip())
                    value = parts[2].strip()
                else:
                    key = parts[0] + "," + parts[1]
                    key = key.title()
                    value = " ".join(parts[2:])
                self.metadata[key] = string_to_type(value)
            else:
                raise Core.StonerLoadError("No data in file!")
            if "Byapp" not in self:
                raise Core.StonerLoadError("Not a Quantum Design File !")

            column_headers = f.readline().strip().split(",")
            data = np.genfromtxt([str2bytes(l) for l in f],
                                 dtype="float",
                                 delimiter=",",
                                 invalid_raise=False)
            if data.shape[0] == 0:
                raise Core.StonerLoadError("No data in file!")
            if data.shape[1] < len(
                    column_headers
            ):  # Trap for buggy QD software not giving ewnough columns of data
                data = np.append(
                    data,
                    np.ones(
                        (data.shape[0], len(column_headers) - data.shape[1])) *
                    np.NaN,
                    axis=1)
            elif data.shape[1] > len(column_headers):  # too much data
                data = data[:, :len(column_headers) - data.shape[1]]
            self.data = data
        self.column_headers = column_headers
        s = self.setas
        for k in setas:
            for ix in setas[k]:
                s[ix - 1] = k
        self.setas = s
        return self