def _load(self, filename, *args, **kargs):
        """File loader for PinkLib.

        Args:
            filename (string or bool): File to load. If None then the existing filename is used,
                if False, then a file dialog will be used.

        Returns:
            A copy of the itself after loading the data.
        """
        if filename is None or not filename:
            self.get_filename("r")
        else:
            self.filename = filename
        ix = 0
        with io.open(self.filename, "r", errors="ignore",
                     encoding="utf-8") as f:  # Read filename linewise
            if not re.compile(r"\d{1,2}/\d{1,2}/\d{4}").match(f.readline()):
                raise Core.StonerLoadError(
                    "Not a BirgeIVFile as no date on first line")
            data = f.readlines()
            expected = ["Vo(-))", "Vo(+))", "Ic(+)", "Ic(-)"]
            for l, m in zip(data[-4:], expected):
                if not l.startswith(m):
                    raise Core.StonerLoadError(
                        "Not a BirgeIVFile as wrong footer line")
                key = l[:len(m)]
                val = l[len(m):]
                if "STDEV" in val:
                    ix2 = val.index("STDEV")
                    key2 = val[ix2:ix2 + 4 + len(key)]
                    val2 = val[ix2 + 4 + len(key):]
                    self.metadata[key2] = string_to_type(val2.strip())
                    val = val[:ix2]
                self.metadata[key] = string_to_type(val.strip())
            for ix, line in enumerate(
                    data):  # Scan the ough lines to get metadata
                if ":" in line:
                    parts = line.split(":")
                    self.metadata[parts[0].strip()] = string_to_type(
                        parts[1].strip())
                elif "," in line:
                    for part in line.split(","):
                        parts = part.split(" ")
                        self.metadata[parts[0].strip()] = string_to_type(
                            parts[1].strip())
                elif line.startswith("H "):
                    self.metadata["H"] = string_to_type(
                        line.split(" ")[1].strip())
                else:
                    headers = [x.strip() for x in line.split(" ")]
                    break
            else:
                raise Core.StonerLoadError("Oops ran off the end of the file!")
        self.data = np.genfromtxt(filename, skip_header=ix + 2, skip_footer=4)
        self.column_headers = headers

        self.setas = "xy"
        return self
Exemple #2
0
 def on_load_process(self, tmp):
     """Carry out processing on a newly loaded file to set means and extra metadata."""
     for p in self.pattern:
         if isinstance(p, _pattern_type) and (p.search(tmp.filename)
                                              is not None):
             m = p.search(tmp.filename)
             for k in m.groupdict():
                 tmp.metadata[k] = string_to_type(m.group(k))
     if self.read_means:  # Add mean and standard deviations to the metadata
         if len(tmp) == 0:
             pass
         elif len(tmp) == 1:
             for h in tmp.column_headers:
                 tmp[h] = tmp.column(h)[0]
                 tmp["{}_stdev".format(h)] = None
         else:
             for h in tmp.column_headers:
                 try:
                     tmp[h] = mean(masked_invalid(tmp.column(h)))
                     tmp["{}_stdev".format(h)] = std(
                         masked_invalid(tmp.column(h)))
                 except ValueError:
                     continue
     tmp["Loaded from"] = tmp.filename
     return tmp
Exemple #3
0
    def _load(self, filename=None, *args, **kargs):
        """Load an OpenGDA file.

        Args:
            filename (string or bool): File to load. If None then the existing filename is used,
                if False, then a file dialog will be used.

        Returns:
            A copy of the itself after loading the data.
        """
        if filename is None or not filename:
            self.get_filename("r")
        else:
            self.filename = filename
        i = 0
        with io.open(self.filename, "r", errors="ignore", encoding="utf-8") as f:
            for i, line in enumerate(f):
                line = line.strip()
                if i == 0 and line != "&SRS":
                    raise Core.StonerLoadError("Not a GDA File from Rasor ?" + str(line))
                if "&END" in line:
                    break
                parts = line.split("=")
                if len(parts) != 2:
                    continue
                key = parts[0]
                value = parts[1].strip()
                self.metadata[key] = string_to_type(value)
            column_headers = f.readline().strip().split("\t")
            self.data = np.genfromtxt([str2bytes(l) for l in f], dtype="float", invalid_raise=False)
        self.column_headers = column_headers
        return self
Exemple #4
0
    def _load(self, filename=None, *args, **kargs):
        """Reads an XRD Core.DataFile as produced by the Brucker diffractometer.

        Args:
            filename (string or bool):
                File to load. If None then the existing filename is used, if False, then a file dialog will be used.

        Returns:
            A copy of the itself after loading the data.

        Notes:
            Format is ini file like but not enough to do standard inifile processing - in particular
            one can have multiple sections with the same name (!)
        """
        if filename is None or not filename:
            self.get_filename("r")
        else:
            self.filename = filename
        sh = re.compile(r"\[(.+)\]")  # Regexp to grab section name
        with io.open(self.filename, errors="ignore", encoding="utf-8") as f:  # Read filename linewise
            if f.readline().strip() != ";RAW4.00":  # Check we have the corrrect fileformat
                raise Core.StonerLoadError("File Format Not Recognized !")
            drive = 0
            for line in f:  # for each line
                m = sh.search(line)
                if m:  # This is a new section
                    section = m.group(1)
                    if section == "Drive":  # If this is a Drive section we need to know which Drive Section it is
                        section = section + str(drive)
                        drive = drive + 1
                    elif section == "Data":  # Data section contains the business but has a redundant first line
                        f.readline()
                    for line in f:  # Now start reading lines in this section...
                        if line.strip() == "":
                            # A blank line marks the end of the section, so go back to the outer loop which will
                            # handle a new section
                            break
                        if section == "Data":  # In the Data section read lines of data value,vale
                            parts = line.split(",")
                            angle = parts[0].strip()
                            counts = parts[1].strip()
                            dataline = np.array([float(angle), float(counts)])
                            self.data = np.append(self.data, dataline)
                        else:  # Other sections contain metadata
                            parts = line.split("=")
                            key = parts[0].strip()
                            data = parts[1].strip()
                            # Keynames in main metadata are section:key - use theCore.DataFile magic to do type
                            # determination
                            self[section + ":" + key] = string_to_type(data)
            column_headers = ["Angle", "Counts"]  # Assume the columns were Angles and Counts

        self.data = np.reshape(self.data, (-1, 2))
        self.setas = "xy"
        self.four_bounce = self["HardwareConfiguration:Monochromator"] == 1
        self.column_headers = column_headers
        if kargs.pop("Q", False):
            self.to_Q()
        return self
Exemple #5
0
    def _load(self, filename, *args, **kargs):
        """Private loader method."""
        if filename is None or not filename:
            self.get_filename("r")
        else:
            self.filename = filename

        datastart = -1
        dataend = -1

        i = 0
        with io.open(self.filename, "r", errors="ignore", encoding="utf-8") as data:
            if "******** EasyPlot save file ********" not in data.read(1024):
                raise Core.StonerLoadError("Not an EasyPlot Save file?")
            data.seek(0)
            for i, line in enumerate(data):
                line = line.strip()
                if line == "":
                    continue
                if line[0] not in "-0123456789" and datastart > 0 and dataend < 0:
                    dataend = i
                if line.startswith('"') and ":" in line:
                    parts = [x.strip() for x in line.strip('"').split(":")]
                    self[parts[0]] = string_to_type(":".join(parts[1:]))
                elif line.startswith("/"):  # command
                    parts = [x.strip('"') for x in next(csv.reader([line], delimiter=" ")) if x != ""]
                    cmd = parts[0].strip("/")
                    if len(cmd) > 1:
                        cmdname = "_{}_cmd".format(cmd)
                        if cmdname in dir(self):  # If this command is implemented as a function run it
                            cmd = getattr(self, "_{}_cmd".format(cmd))
                            cmd(parts[1:])
                        else:
                            if len(parts[1:]) > 1:
                                cmd = cmd + "." + parts[1]
                                value = ",".join(parts[2:])
                            elif len(parts[1:]) == 1:
                                value = parts[1]
                            else:
                                value = True
                            self[cmd] = value
                elif line[0] in "-0123456789" and datastart < 0:  # start of data
                    datastart = i
                    if "," in line:
                        delimiter = ","
                    else:
                        delimiter = None
        if dataend < 0:
            dataend = i
        self.data = np.genfromtxt(self.filename, skip_header=datastart, skip_footer=i - dataend, delimiter=delimiter)
        if self.data.shape[1] == 2:
            self.setas = "xy"
        return self
Exemple #6
0
        def _load(self, filename=None, *args, **kargs):
            """TDMS file loader routine.

            Args:
                filename (string or bool): File to load. If None then the existing filename is used,
                    if False, then a file dialog will be used.

            Returns:
                A copy of the itself after loading the data.
            """
            if filename is None or not filename:
                self.get_filename("r")
            else:
                self.filename = filename
            # Open the file and read the main file header and unpack into a dict
            try:
                f = TdmsFile(self.filename)

                column_headers = []
                data = np.array([])

                for grp in f.objects.keys():
                    if grp == "/":
                        pass  # skip the rooot group
                    elif grp == "/'TDI Format 1.5'":
                        metadata = f.object("TDI Format 1.5")
                        for k, v in metadata.properties.items():
                            self.metadata[k] = string_to_type(str(v))
                    else:
                        if f.objects[grp].has_data:
                            chnl = grp.split("/")[-1]
                            chnl.strip().strip("'")
                            column_headers.append(chnl)
                            if data.size == 0:
                                data = f.objects[grp].data
                            else:
                                data = np.column_stack(
                                    [data, f.objects[grp].data])
                self.data = data
                self.column_headers = column_headers
            except Exception:
                from traceback import format_exc

                raise Core.StonerLoadError("Not a TDMS File \n{}".format(
                    format_exc()))

            return self
    def _load(self, filename=None, *args, **kargs):
        """Data loader function for 340 files."""
        if filename is None or not filename:
            self.get_filename("r")
        else:
            self.filename = filename

        with io.open(self.filename, "rb") as data:
            keys = []
            vals = []
            for line in data:
                line = bytes2str(line)
                if line.strip() == "":
                    break
                parts = [p.strip() for p in line.split(":")]
                if len(parts) != 2:
                    raise Core.StonerLoadError(
                        "Header doesn't contain two parts at {}".format(
                            line.strip()))
                else:
                    keys.append(parts[0])
                    vals.append(parts[1])
            else:
                raise Core.StonerLoadError("Overan the end of the file")
            if keys != [
                    "Sensor Model",
                    "Serial Number",
                    "Data Format",
                    "SetPoint Limit",
                    "Temperature coefficient",
                    "Number of Breakpoints",
            ]:
                raise Core.StonerLoadError(
                    "Header did not contain recognised keys.")
            for (k, v) in zip(keys, vals):
                v = v.split()[0]
                self.metadata[k] = string_to_type(v)
            headers = bytes2str(next(data)).strip().split()
            column_headers = headers[1:]
            dat = np.genfromtxt(data)
            self.data = dat[:, 1:]
        self.column_headers = column_headers
        return self
Exemple #8
0
 def _load(self, filename=None, *args, **kargs):
     """Load function. File format has space delimited columns from row 3 onwards."""
     if filename is None or not filename:
         self.get_filename("r")
     else:
         self.filename = filename
     i = [0, 0, 0, 0]
     with io.open(self.filename, "r", errors="ignore",
                  encoding="utf-8") as data:  # Slightly ugly text handling
         for i[0], line in enumerate(data):
             if (i[0] == 0
                     and line.strip() != "## mda2ascii 1.2 generated output"
                 ):  # bug out oif we don't like the header
                 raise Core.StonerLoadError("Not a file mda2ascii")
             line.strip()
             if "=" in line:
                 parts = line[2:].split("=")
                 self[parts[0].strip()] = string_to_type("".join(
                     parts[1:]).strip())
             elif line.startswith("#  Extra PV:"):
                 # Onto the next metadata bit
                 break
         pvpat = re.compile(r"^#\s+Extra\s+PV\s\d+\:(.*)")
         for i[1], line in enumerate(data):
             if line.strip() == "":
                 continue
             elif line.startswith("# Extra PV"):
                 res = pvpat.match(line)
                 bits = [
                     b.strip().strip(r'"') for b in res.group(1).split(",")
                 ]
                 if bits[1] == "":
                     key = bits[0]
                 else:
                     key = bits[1]
                 if len(bits) > 3:
                     key = key + " ({})".format(bits[3])
                 self[key] = string_to_type(bits[2])
             else:
                 break  # End of Extra PV stuff
         else:
             raise Core.StonerLoadError("Overran Extra PV Block")
         for i[2], line in enumerate(data):
             line.strip()
             if line.strip() == "":
                 continue
             elif line.startswith("# Column Descriptions:"):
                 break  # Start of column headers now
             elif "=" in line:
                 parts = line[2:].split("=")
                 self[parts[0].strip()] = string_to_type("".join(
                     parts[1:]).strip())
         else:
             raise Core.StonerLoadError(
                 "Overran end of scan header before column descriptions")
         colpat = re.compile(r"#\s+\d+\s+\[([^\]]*)\](.*)")
         column_headers = []
         for i[3], line in enumerate(data):
             res = colpat.match(line)
             line.strip()
             if line.strip() == "":
                 continue
             elif line.startswith("# 1-D Scan Values"):
                 break  # Start of data
             elif res is not None:
                 if "," in res.group(2):
                     bits = [b.strip() for b in res.group(2).split(",")]
                     if bits[-2] == "":
                         colname = bits[0]
                     else:
                         colname = bits[-2]
                     if bits[-1] != "":
                         colname += " ({})".format(bits[-1])
                     if colname in column_headers:
                         colname = "{}:{}".format(bits[0], colname)
                 else:
                     colname = res.group(1).strip()
                 column_headers.append(colname)
         else:
             raise Core.StonerLoadError(
                 "Overand the end of file without reading data")
     self.data = np.genfromtxt(self.filename,
                               skip_header=sum(i))  # so that's ok then !
     self.column_headers = column_headers
     return self
    def _load(self, filename=None, *args, **kargs):
        """QD system file loader routine.

        Args:
            filename (string or bool): File to load. If None then the existing filename is used,
                if False, then a file dialog will be used.

        Returns:
            A copy of the itself after loading the data.
        """
        if filename is None or not filename:
            self.get_filename("r")
        else:
            self.filename = filename
        setas = {}
        i = 0
        with io.open(self.filename, "r", encoding="utf-8",
                     errors="ignore") as f:  # Read filename linewise
            for i, line in enumerate(f):
                line = line.strip()
                if i == 0 and line != "[Header]":
                    raise Core.StonerLoadError("Not a Quantum Design File !")
                elif line == "[Header]" or line.startswith(";") or line == "":
                    continue
                elif "[Data]" in line:
                    break
                elif "," not in line:
                    raise Core.StonerLoadError("No data in file!")
                parts = [x.strip() for x in line.split(",")]
                if parts[1].split(":")[0] == "SEQUENCE FILE":
                    key = parts[1].split(":")[0].title()
                    value = parts[1].split(":")[1]
                elif parts[0] == "INFO":
                    if parts[1] == "APPNAME":
                        parts[1], parts[2] = parts[2], parts[1]
                    if len(parts) > 2:
                        key = "{}.{}".format(parts[0], parts[2])
                    else:
                        raise Core.StonerLoadError("No data in file!")
                    key = key.title()
                    value = parts[1]
                elif parts[0] in ["BYAPP", "FILEOPENTIME"]:
                    key = parts[0].title()
                    value = " ".join(parts[1:])
                elif parts[0] == "FIELDGROUP":
                    key = "{}.{}".format(parts[0], parts[1]).title()
                    value = "[{}]".format(",".join(parts[2:]))
                elif parts[0] == "STARTUPAXIS":
                    axis = parts[1][0].lower()
                    setas[axis] = setas.get(axis, []) + [int(parts[2])]
                    key = "Startupaxis-{}".format(parts[1].strip())
                    value = parts[2].strip()
                else:
                    key = parts[0] + "," + parts[1]
                    key = key.title()
                    value = " ".join(parts[2:])
                self.metadata[key] = string_to_type(value)
            else:
                raise Core.StonerLoadError("No data in file!")
            if "Byapp" not in self:
                raise Core.StonerLoadError("Not a Quantum Design File !")

            column_headers = f.readline().strip().split(",")
            data = np.genfromtxt([str2bytes(l) for l in f],
                                 dtype="float",
                                 delimiter=",",
                                 invalid_raise=False)
            if data.shape[0] == 0:
                raise Core.StonerLoadError("No data in file!")
            if data.shape[1] < len(
                    column_headers
            ):  # Trap for buggy QD software not giving ewnough columns of data
                data = np.append(
                    data,
                    np.ones(
                        (data.shape[0], len(column_headers) - data.shape[1])) *
                    np.NaN,
                    axis=1)
            elif data.shape[1] > len(column_headers):  # too much data
                data = data[:, :len(column_headers) - data.shape[1]]
            self.data = data
        self.column_headers = column_headers
        s = self.setas
        for k in setas:
            for ix in setas[k]:
                s[ix - 1] = k
        self.setas = s
        return self
Exemple #10
0
    def _load(self, filename=None, *args, **kargs):
        """Load function. File format has space delimited columns from row 3 onwards."""
        if filename is None or not filename:
            self.get_filename("r")
        else:
            self.filename = filename

        self._ptr = 0
        with io.open(self.filename, "r", errors="ignore",
                     encoding="utf-8") as data:  # Slightly ugly text handling
            line = next(data)
            self._ptr += len(line)
            line = line.strip()
            if "OOMMF: rectangular mesh" in line:
                if "v1.0" in line:
                    self["version"] = 1
                elif "v2.0" in line:
                    self["version"] = 2
                else:
                    raise StonerLoadError(
                        "Cannot determine version of OOMMFF file")
            else:  # bug out oif we don't like the header
                raise StonerLoadError(
                    "Not n OOMMF OVF File: opening line eas {}".format(line))
            pattern = re.compile(r"#\s*([^\:]+)\:\s+(.*)$")
            i = None
            for i, line in enumerate(data):
                self._ptr += len(line)
                line.strip()
                if line.startswith(
                        "# Begin: Data"):  # marks the start of the trext
                    break
                elif line.startswith("# Begin:") or line.startswith("# End:"):
                    continue
                else:
                    res = pattern.match(line)
                    if res is not None:
                        key = res.group(1)
                        val = res.group(2)
                        self[key] = string_to_type(val)
                    else:
                        raise StonerLoadError("Failed to understand metadata")
            fmt = re.match(r".*Data\s+(.*)", line).group(1).strip()
            assertion(
                (self["meshtype"] == "rectangular"),
                "Sorry only OVF files with rectnagular meshes are currently supported.",
            )
            if self["version"] == 1:
                if self["meshtype"] == "rectangular":
                    self["valuedim"] = 3
                else:
                    self["valuedim"] = 6
            uvwdata = self._read_uvwdata(filename, fmt, i)

        x = (np.linspace(self["xmin"], self["xmax"], self["xnode"] + 1)[:-1] +
             self["xbase"]) * 1e9
        y = (np.linspace(self["ymin"], self["ymax"], self["ynode"] + 1)[:-1] +
             self["ybase"]) * 1e9
        z = (np.linspace(self["zmin"], self["zmax"], self["znode"] + 1)[:-1] +
             self["zbase"]) * 1e9
        (y, z, x) = (np.ravel(i) for i in np.meshgrid(y, z, x))
        self.data = np.column_stack((x, y, z, uvwdata))
        column_headers = ["X (nm)", "Y (nm)", "Z (nm)", "U", "V", "W"]
        self.setas = "xyzuvw"
        self.column_headers = column_headers
        return self