Beispiel #1
0
 def _parse_header(cls, filename, fp, data_objects=None, close=False, **fmt_params):
     try:
         fmt_params = dict(cls.default_fmt_params, **fmt_params)
 
         # Goto start of file
         fp.seek(0)
 
         # Get base filename:
         basename = u(os.path.basename(filename))
 
         # Read in the first and last data line and put the file cursor back
         # at its original position
         header = cls.parse_raw_line(fp.readline().strip(), str)
         replace = [
             "a%d" % i for i in range (1, 6)
         ] + [
             "b%d" % i for i in range (1, 6)
         ] + [ "c", ]
         header = [ "par_%s" % val if val in replace else val for val in header ]
         data_start_pos = fp.tell()
         line_count, _ = cls.get_last_line(fp)
         fp.seek(data_start_pos)
 
         # Adapt DataObject list
         data_objects = cls._adapt_data_object_list(data_objects, num_samples=line_count)
 
         # Fill in header info:
         for i in range(line_count):
             data_objects[i].update(
                 filename=basename,
                 header=header
             )
     finally:
         if close: fp.close()
     return data_objects
Beispiel #2
0
    def _parse_header(cls,
                      filename,
                      fp,
                      data_objects=None,
                      close=False,
                      **fmt_params):
        fmt_params = dict(cls.default_fmt_params, **fmt_params)
        f = fp

        # Goto start of file
        f.seek(0)

        # Get base filename:
        try:
            basename = u(os.path.basename(filename))
        except:
            basename = None

        # Read in the first and last data line and put the file cursor back
        # at its original position
        header = f.readline().strip()
        data_start_pos = f.tell()
        first_line = f.readline().strip()
        twotheta_count, last_line = cls.get_last_line(f)
        last_line = last_line.strip()
        f.seek(data_start_pos)

        # Extract the data from the first & last data lines:
        first_line_vals = cls.parse_raw_line(first_line, float, **fmt_params)
        last_line_vals = cls.parse_raw_line(last_line, float, **fmt_params)
        num_samples = len(first_line_vals) - 1  # first column is 2-theta
        twotheta_min = first_line_vals[0]
        twotheta_max = last_line_vals[0]
        twotheta_step = int((twotheta_max - twotheta_min) / twotheta_count)

        # Parse the header line:
        sample_names = cls.parse_raw_line(header, lambda s: s,
                                          **fmt_params)[1:]
        if len(sample_names) < num_samples:
            sample_names.extend([""] * (num_samples - len(sample_names)))
        if len(sample_names) > num_samples:
            sample_names = sample_names[:num_samples]

        # Adapt DataObject list
        data_objects = cls._adapt_data_object_list(data_objects,
                                                   num_samples=num_samples)

        # Fill in header info:
        for i, sample_name in enumerate(sample_names):
            data_objects[i].update(filename=basename,
                                   name=sample_name,
                                   twotheta_min=twotheta_min,
                                   twotheta_max=twotheta_max,
                                   twotheta_step=twotheta_step,
                                   twotheta_count=twotheta_count)

        if close: f.close()
        return data_objects
Beispiel #3
0
    def _parse_header(cls, filename, fp, data_objects=None, close=False):
        f = fp
        try:
            basename = u(os.path.basename(filename))
        except:
            basename = None
        # Adapt XRDFile list
        data_objects = cls._adapt_data_object_list(data_objects, num_samples=1)

        # Move to the start of the file
        f.seek(0)

        # Go over the header:
        header_dict = {}

        for lineno, line in enumerate(f):
            # Start of data after this line:
            if line.strip() == "RawScan":
                data_start = f.tell()
                break
            else:
                # Break header line into separate parts, and strip trailing whitespace:
                parts = list(map(str.strip, line.split(',')))

                # If length is shorter then three, somethings wrong
                if len(parts) < 3:
                    raise IOError("Header of UDF file is malformed at line %d" % lineno)

                # Handle some of the header's arguments manually, the rest is
                # just passed to the data object as keyword arguments...
                if parts[0] == "SampleIdent":
                    name = parts[1]
                elif parts[0] == "DataAngleRange":
                    twotheta_min = float(parts[1])
                    twotheta_max = float(parts[2])
                elif parts[0] == "ScanStepSize":
                    twotheta_step = float(parts[1])

                # TODO extract other keys and replace with default names
                header_dict[parts[0]] = ','.join(parts[1:-1])

        twotheta_count = int((twotheta_max - twotheta_min) / twotheta_step)

        data_objects[0].update(
            filename=basename,
            name=name,
            twotheta_min=twotheta_min,
            twotheta_max=twotheta_max,
            twotheta_step=twotheta_step,
            twotheta_count=twotheta_count,
            data_start=data_start,
            **header_dict
        )

        if close: f.close()
        return data_objects
Beispiel #4
0
    def _parse_header(cls, filename, fp, data_objects=None, close=False):
        f = fp
        try:
            basename = u(os.path.basename(filename))
        except:
            basename = None
        # Adapt XRDFile list
        data_objects = cls._adapt_data_object_list(data_objects, num_samples=1)

        # Move to the start of the file
        f.seek(0)

        # Go over the header:
        header_dict = {}

        for lineno, line in enumerate(f):
            # Start of data after this line:
            if line.strip() == "RawScan":
                data_start = f.tell()
                break
            else:
                # Break header line into separate parts, and strip trailing whitespace:
                parts = list(map(str.strip, line.split(',')))

                # If length is shorter then three, somethings wrong
                if len(parts) < 3:
                    raise IOError(
                        "Header of UDF file is malformed at line %d" % lineno)

                # Handle some of the header's arguments manually, the rest is
                # just passed to the data object as keyword arguments...
                if parts[0] == "SampleIdent":
                    name = parts[1]
                elif parts[0] == "DataAngleRange":
                    twotheta_min = float(parts[1])
                    twotheta_max = float(parts[2])
                elif parts[0] == "ScanStepSize":
                    twotheta_step = float(parts[1])

                # TODO extract other keys and replace with default names
                header_dict[parts[0]] = ','.join(parts[1:-1])

        twotheta_count = int((twotheta_max - twotheta_min) / twotheta_step)

        data_objects[0].update(filename=basename,
                               name=name,
                               twotheta_min=twotheta_min,
                               twotheta_max=twotheta_max,
                               twotheta_step=twotheta_step,
                               twotheta_count=twotheta_count,
                               data_start=data_start,
                               **header_dict)

        if close: f.close()
        return data_objects
Beispiel #5
0
    def _parse_header(cls, filename, fp, data_objects=None, close=False, split_columns=True, has_header=True, file_start=0, **fmt_params):
        fmt_params = dict(cls.default_fmt_params, **fmt_params)
        f = fp

        # Goto start of file
        f.seek(file_start)

        # Get base filename:
        try:
            basename = u(os.path.basename(filename))
        except:
            basename = None

        # Read in the first and last data line and put the file cursor back
        # at its original position
        header = f.readline().strip()
        if not has_header:
            f.seek(file_start) # go back to the start, we still use the first line as header, but also as data
        data_start_pos = f.tell()
        first_line = f.readline().strip()
        twotheta_count, last_line = cls.get_last_line(f)
        last_line = last_line.strip()
        f.seek(data_start_pos)

        # Extract the data from the first & last data lines:
        first_line_vals = cls.parse_raw_line(first_line, float, **fmt_params)
        last_line_vals = cls.parse_raw_line(last_line, float, **fmt_params)
        num_samples = len(first_line_vals) - 1 # first column is 2-theta
        twotheta_min = first_line_vals[0]
        twotheta_max = last_line_vals[0]
        twotheta_step = int((twotheta_max - twotheta_min) / twotheta_count)

        # Parse the header line:
        sample_names = cls.parse_raw_line(header, lambda s: s, **fmt_params)[1:]
        if len(sample_names) < num_samples:
            sample_names.extend([""] * (num_samples - len(sample_names)))
        if len(sample_names) > num_samples:
            sample_names = sample_names[:num_samples]

        # Adapt DataObject list
        data_objects = cls._adapt_data_object_list(data_objects, num_samples=num_samples)

        # Fill in header info:
        for i, sample_name in enumerate(sample_names):
            data_objects[i].update(
                filename=basename,
                name=sample_name,
                twotheta_min=twotheta_min,
                twotheta_max=twotheta_max,
                twotheta_step=twotheta_step,
                twotheta_count=twotheta_count
            )

        if close: f.close()
        return data_objects
Beispiel #6
0
    def _parse_header(cls, filename, fp, data_objects=None, close=False):
        f = fp

        try:
            basename = u(os.path.basename(filename))
        except:
            basename = None

        # Adapt XRDFile list
        data_objects = cls._adapt_data_object_list(data_objects, num_samples=1)

        # Move to the start of the file
        f.seek(0)
        # Skip a line: file type header
        f.readline()
        # Read data limits
        twotheta_min = float(f.readline().replace(",", ".").strip())
        twotheta_max = float(f.readline().replace(",", ".").strip())
        twotheta_step = float(f.readline().replace(",", ".").strip())
        twotheta_count = int((twotheta_max - twotheta_min) / twotheta_step)
        # Read target element name
        target_type = f.readline()
        # Read wavelength
        alpha1 = float(f.readline().replace(",", ".").strip())
        # Read up to SCANDATA and keep track of the line before,
        # it contains the sample description
        name = ""
        while True:
            line = f.readline().strip()
            if line == "SCANDATA" or line == "":
                data_start = f.tell()
                break
            else:
                name = line

        data_objects[0].update(
            filename=basename,
            name=name,
            target_type=target_type,
            alpha1=alpha1,
            twotheta_min=twotheta_min,
            twotheta_max=twotheta_max,
            twotheta_step=twotheta_step,
            twotheta_count=twotheta_count,
            data_start=data_start,
        )

        if close: f.close()
        return data_objects
Beispiel #7
0
    def parse_header(cls, filename, f=None, data_objects=None, close=False):
        filename, f, close = cls._get_file(filename, f=f, close=close)

        # Adapt XRDFile list
        data_objects = cls._adapt_data_object_list(data_objects, num_samples=1)

        # Move to the start of the file
        f.seek(0)
        # Skip a line: file type header
        f.readline()
        # Read data limits
        twotheta_min = float(f.readline().replace(",", ".").strip())
        twotheta_max = float(f.readline().replace(",", ".").strip())
        twotheta_step = float(f.readline().replace(",", ".").strip())
        twotheta_count = int((twotheta_max - twotheta_min) / twotheta_step)
        # Read target element name
        target_type = f.readline()
        # Read wavelength
        alpha1 = float(f.readline().replace(",", ".").strip())
        # Read up to SCANDATA and keep track of the line before,
        # it contains the sample description
        name = ""
        while True:
            line = f.readline().strip()
            if line == "SCANDATA" or line == "":
                data_start = f.tell()
                break;
            else:
                name = line

        data_objects[0].update(
            filename=u(os.path.basename(filename)),
            name=name,
            target_type=target_type,
            alpha1=alpha1,
            twotheta_min=twotheta_min,
            twotheta_max=twotheta_max,
            twotheta_step=twotheta_step,
            twotheta_count=twotheta_count,
            data_start=data_start,
        )

        if close: f.close()
        return data_objects
Beispiel #8
0
    def _parse_header(cls,
                      filename,
                      fp,
                      data_objects=None,
                      close=False,
                      **fmt_params):
        try:
            fmt_params = dict(cls.default_fmt_params, **fmt_params)

            # Goto start of file
            fp.seek(0)

            # Get base filename:
            basename = u(os.path.basename(filename))

            # Read in the first and last data line and put the file cursor back
            # at its original position
            header = cls.parse_raw_line(fp.readline().strip(), str)
            replace = ["a%d" % i for i in range(1, 6)
                       ] + ["b%d" % i for i in range(1, 6)] + [
                           "c",
                       ]
            header = [
                "par_%s" % val if val in replace else val for val in header
            ]
            data_start_pos = fp.tell()
            line_count, _ = cls.get_last_line(fp)
            fp.seek(data_start_pos)

            # Adapt DataObject list
            data_objects = cls._adapt_data_object_list(data_objects,
                                                       num_samples=line_count)

            # Fill in header info:
            for i in xrange(line_count):
                data_objects[i].update(filename=basename, header=header)
        finally:
            if close: fp.close()
        return data_objects
Beispiel #9
0
    def _parse_header(cls, filename, fp, data_objects=None, close=False):
        f = fp

        try:
            basename = u(os.path.basename(filename))
        except:
            basename = None

        # Adapt XRDFile list
        data_objects = cls._adapt_data_object_list(data_objects, num_samples=1)

        # Go to the start of the file
        f.seek(0, SEEK_SET)

        # Read file format version:
        version = f.read(2).decode()

        if version in ("V3", "V5"):

            # Read diffractometer, target and focus type:
            f.seek(84, SEEK_SET)
            diffractomer_type, target_type, focus_type = struct.unpack("bbb", f.read(3))
            diffractomer_type = {
                0: b"PW1800",
                1: b"PW1710 based system",
                2: b"PW1840",
                3: b"PW3710 based system",
                4: b"Undefined",
                5: b"X'Pert MPD"
            }[cap(0, diffractomer_type, 5, 4)]
            target_type = {
                0: b"Cu",
                1: b"Mo",
                2: b"Fe",
                3: b"Cr",
                4: b"Other"
            }[cap(0, target_type, 3, 4)]
            focus_type = {
                0: b"BF",
                1: b"NF",
                2: b"FF",
                3: b"LFF",
                4: b"Unkown",
            }[cap(0, focus_type, 3, 4)]

            # Read wavelength information:
            f.seek(94, SEEK_SET)
            alpha1, alpha2, alpha_factor = struct.unpack("ddd", f.read(24))
            # Read sample name:
            f.seek(146, SEEK_SET)
            sample_name = u(f.read(16).replace(b"\0", b""))

            # Read data limits:
            f.seek(214)
            twotheta_step, twotheta_min, twotheta_max = struct.unpack("ddd", f.read(24))
            twotheta_count = int((twotheta_max - twotheta_min) / twotheta_step)

            # Set data start:
            data_start = {
                "V3": 250,
                "V5": 810
            }[version]

            data_objects[0].update(
                filename=basename,
                name=sample_name,
                twotheta_min=twotheta_min,
                twotheta_max=twotheta_max,
                twotheta_step=twotheta_step,
                twotheta_count=twotheta_count,
                target_type=target_type,
                alpha1=alpha1,
                alpha2=alpha2,
                alpha_factor=alpha_factor,
                data_start=data_start,
                version=version
            )

        else:
            raise IOError("Only V3 and V5 *.RD files are supported!")

        if close: f.close()
        return data_objects
Beispiel #10
0
    def _parse_header(cls, filename, fp, data_objects=None, close=False):
        f = fp

        try:
            basename = u(os.path.basename(filename))
        except:
            basename = None

        # Go to the start of the file
        f.seek(0, SEEK_SET)

        # Read file format version:
        version = str(f.read(4))
        if version == "RAW ":                             version = "RAW1"
        elif version == "RAW2":                           version = "RAW2"
        elif version == "RAW1" and str(f.read(3)) == ".01": version = "RAW3"

        if version == "RAW1":

            # This format does not allow getting the exact number of samples,
            # so start with one and append where needed:
            isfollowed = 1
            num_samples = 0
            while isfollowed > 0:

                twotheta_count = int(struct.unpack("I", f.read(4))[0])
                # Check if this is an early "RAW " formatted file where the
                # "RAW " is repeated for each sample:
                if num_samples > 0 and twotheta_count == int(struct.unpack("I", "RAW ")[0]):
                    twotheta_count = int(struct.unpack("I", f.read(4))[0])

                # Step counting time, 2-theta step size and scanning mode:
                time_step, twotheta_step, scan_mode = struct.unpack("fff", f.read(12)) #@UnusedVariable
                # Skip 4 bytes, and read 2-theta starting position:
                f.seek(4, SEEK_CUR)
                twotheta_min, = struct.unpack("f", f.read(4))
                twotheta_max = twotheta_min + twotheta_step * float(twotheta_count)
                # Skip 12 bytes
                # (contain theta, khi and phi start point for eularian craddles)
                f.seek(12, SEEK_CUR)
                # Read sample name and wavelengths:
                sample_name = cls._clean_bin_str(f.read(32))
                alpha1, alpha2 = struct.unpack("ff", f.read(8))
                # Skip 72 bytes:
                f.seek(72, SEEK_CUR)
                isfollowed, = struct.unpack("I", f.read(4))

                # Get data position and skip for now:
                data_start = f.tell()
                f.seek(twotheta_count * 4, SEEK_CUR)

                # Adapt XRDFile list
                data_objects = cls._adapt_data_object_list(
                    data_objects,
                    num_samples=(num_samples + 1),
                    only_extend=True
                )

                data_objects[num_samples].update(
                    filename=basename,
                    version=version,
                    name=sample_name,
                    time_step=time_step,
                    twotheta_min=twotheta_min,
                    twotheta_max=twotheta_max,
                    twotheta_step=twotheta_step,
                    twotheta_count=twotheta_count,
                    alpha1=alpha1,
                    alpha2=alpha2,
                    data_start=data_start
                )

                num_samples += 1

        elif version == "RAW2":

            # Read number of sample ranges:
            num_samples, = struct.unpack("H", f.read(2))

            # Adapt XRDFile list
            data_objects = cls._adapt_data_object_list(data_objects, num_samples=num_samples)

            # Read sample name:
            f.seek(8, SEEK_SET)
            sample_name = cls._clean_bin_str(f.read(32))
            # Meta-data description, skip for now:
            # description = u(str(f.read(128)).replace("\0", "").strip())
            # date = u(str(f.read(10)).replace("\0", "").strip())
            # time = u(str(f.read(5)).replace("\0", "").strip())

            # Read wavelength information:
            f.seek(148, SEEK_CUR)
            target_type = u(str(f.read(2)).replace("\0", "").strip()) #@UnusedVariable
            alpha1, alpha2, alpha_factor = struct.unpack("fff", f.read(12))

            # Total runtime in seconds: (not used fttb)
            f.seek(8, SEEK_CUR)
            time_total, = struct.unpack("f", f.read(4)) #@UnusedVariable

            # Move to first sample header start:
            f.seek(256, SEEK_SET)

            # Read in per-sample meta data
            for i in range(num_samples):
                header_start = f.tell()
                header_length, twotheta_count = struct.unpack("HH", f.read(4))
                data_start = header_start + header_length

                # Read step size and start angle:
                f.seek(header_start + 12) # = 256 + 4 + 8 skipped bytes
                twotheta_step, twotheta_min = struct.unpack("ff", f.read(8))
                twotheta_max = twotheta_min + twotheta_step * float(twotheta_count)

                # Read up to end of data:
                f.seek(data_start + twotheta_count * 4, SEEK_SET)

                # Update XRDFile object:
                data_objects[i].update(
                    filename=basename,
                    version=version,
                    name=sample_name,
                    twotheta_min=twotheta_min,
                    twotheta_max=twotheta_max,
                    twotheta_step=twotheta_step,
                    twotheta_count=twotheta_count,
                    alpha1=alpha1,
                    alpha2=alpha2,
                    alpha_factor=alpha_factor,
                    data_start=data_start
                )

        elif version == "RAW3":

            # Read file status:    
            f.seek(8, SEEK_SET)       
            file_status = { #@UnusedVariable
                1: "done",
                2: "active",
                3: "aborted",
                4: "interrupted"
            }[int(struct.unpack("I", f.read(4))[0])]

            # Read number of samples inside this file:
            f.seek(12, SEEK_SET)
            num_samples, = struct.unpack("I", f.read(4))

            # Read in sample name:
            f.seek(326, SEEK_SET)
            sample_name =  cls._clean_bin_str(f.read(60))

            # Goniometer radius:
            f.seek(564, SEEK_SET)
            radius = float(struct.unpack("f", f.read(4))[0])

            # Fixed divergence:
            f.seek(568, SEEK_SET)
            divergence = float(struct.unpack("f", f.read(4))[0])

            # Primary soller
            f.seek(576, SEEK_SET)
            soller1 = float(struct.unpack("f", f.read(4))[0])

            # Secondary soller
            f.seek(592, SEEK_SET)
            soller2 = float(struct.unpack("f", f.read(4))[0])

            # Get anode type:
            f.seek(608, SEEK_SET)
            target_type = str(f.read(4)) #@UnusedVariable

            # Get wavelength info:
            f.seek(616, SEEK_SET)
            alpha_average, alpha1, alpha2, beta, alpha_factor = (#@UnusedVariable
                struct.unpack("ddddd", f.read(8 * 5)))

            # Get total recording time:
            f.seek(664, SEEK_SET)
            time_total, = struct.unpack("f", f.read(4)) #@UnusedVariable
            
            # Adapt XRDFile lis & Skip to first block:t
            data_objects = cls._adapt_data_object_list(data_objects, num_samples=num_samples)
            f.seek(712, SEEK_SET)
            
            # Read in per-sample meta data
            for i in range(num_samples):
                # Store the start of the header:
                header_start = f.tell()

                # Get header length
                f.seek(header_start + 0, SEEK_SET)
                header_length, = struct.unpack("I", f.read(4))
                assert header_length == 304, "Invalid format!"
                
                # Get data count and
                f.seek(header_start + 4, SEEK_SET)
                twotheta_count, = struct.unpack("I", f.read(4))
                
                # Get theta start positions
                f.seek(header_start + 8, SEEK_SET)
                theta_min, twotheta_min = struct.unpack("dd", f.read(8 * 2))#@UnusedVariable               

                # Read step size
                f.seek(header_start + 176, SEEK_SET)
                twotheta_step, = struct.unpack("d", f.read(8))

                # Read counting time
                f.seek(header_start + 192, SEEK_SET)
                time_step, = struct.unpack("d", f.read(8))

                # Read the used wavelength
                f.seek(header_start + 240, SEEK_SET)
                alpha_used, = struct.unpack("d", f.read(8))#@UnusedVariable

                # Supplementary header size:
                f.seek(header_start + 256, SEEK_SET)
                supp_headers_size, = struct.unpack("I", f.read(4))
                data_start = header_start + header_length + supp_headers_size

                # Move to the end of the data:
                f.seek(data_start + twotheta_count * 4)
                
                # Calculate last data point
                twotheta_max = twotheta_min + twotheta_step * float(twotheta_count - 0.5)
                
                data_objects[i].update(
                    filename=basename,
                    version=version,
                    name=sample_name,
                    twotheta_min=twotheta_min,
                    twotheta_max=twotheta_max,
                    twotheta_step=twotheta_step,
                    twotheta_count=twotheta_count,
                    alpha1=alpha1,
                    alpha2=alpha2,
                    alpha_factor=alpha_factor,
                    data_start=data_start,
                    radius=radius,
                    soller1=soller1,
                    soller2=soller2,
                    divergence=divergence
                )

        else:
            raise IOError, "Only verson 1, 2 and 3 *.RAW files are supported!"

        if close: f.close()
        return data_objects
Beispiel #11
0
 def _clean_bin_str(cls, val):
     return u(str(val).replace("\0", "").strip())
Beispiel #12
0
 def _clean_bin_str(cls, val):
     return u(val.replace("\0".encode(), "".encode()).strip())
Beispiel #13
0
    def parse_header(cls, filename, f=None, data_objects=None, close=False):
        filename, f, close = cls._get_file(filename, f=f, close=close)

        # Go to the start of the file
        f.seek(0, SEEK_SET)

        # Read file format version:
        version = str(f.read(4))
        if version == "RAW ":                             version = "RAW1"
        elif version == "RAW2":                           version = "RAW2"
        elif version == "RAW1" and str(f.read(3)) == ".01": version = "RAW3"

        if version == "RAW1":

            # This format does not allow getting the exact number of samples,
            # so start with one and append where needed:
            isfollowed = 1
            num_samples = 0
            while isfollowed > 0:

                twotheta_count = int(struct.unpack("I", f.read(4))[0])
                # Check if this is an early "RAW " formatted file where the
                # "RAW " is repeated for each sample:
                if num_samples > 0 and twotheta_count == int(struct.unpack("I", "RAW ")[0]):
                    twotheta_count = int(struct.unpack("I", f.read(4))[0])

                # Step counting time, 2-theta step size and scanning mode:
                time_step, twotheta_step, scan_mode = struct.unpack("fff", f.read(12)) #@UnusedVariable
                # Skip 4 bytes, and read 2-theta starting position:
                f.seek(4, SEEK_CUR)
                twotheta_min, = struct.unpack("f", f.read(4))
                twotheta_max = twotheta_min + twotheta_step * float(twotheta_count)
                # Skip 12 bytes
                # (contain theta, khi and phi start point for eularian craddles)
                f.seek(12, SEEK_CUR)
                # Read sample name and wavelengths:
                sample_name = u(str(f.read(32)).replace("\0", "").strip())
                alpha1, alpha2 = struct.unpack("ff", f.read(8))
                # Skip 72 bytes:
                f.seek(72, SEEK_CUR)
                isfollowed, = struct.unpack("I", f.read(4))

                # Get data position and skip for now:
                data_start = f.tell()
                f.seek(twotheta_count * 4, SEEK_CUR)

                # Adapt XRDFile list
                data_objects = cls._adapt_data_object_list(
                    data_objects,
                    num_samples=(num_samples + 1),
                    only_extend=True
                )

                data_objects[num_samples].update(
                    filename=u(os.path.basename(filename)),
                    version=version,
                    name=sample_name,
                    time_step=time_step,
                    twotheta_min=twotheta_min,
                    twotheta_max=twotheta_max,
                    twotheta_step=twotheta_step,
                    twotheta_count=twotheta_count,
                    alpha1=alpha1,
                    alpha2=alpha2,
                    data_start=data_start
                )

                num_samples += 1

        elif version == "RAW2":

            # Read number of sample ranges:
            num_samples, = struct.unpack("H", f.read(2))

            # Adapt XRDFile list
            data_objects = cls._adapt_data_object_list(data_objects, num_samples=num_samples)

            # Read sample name:
            f.seek(8, SEEK_SET)
            sample_name = u(str(f.read(32)).replace("\0", "").strip())
            # Meta-data description, skip for now:
            # description = u(str(f.read(128)).replace("\0", "").strip())
            # date = u(str(f.read(10)).replace("\0", "").strip())
            # time = u(str(f.read(5)).replace("\0", "").strip())

            # Read wavelength information:
            f.seek(148, SEEK_CUR)
            target_type = u(str(f.read(2)).replace("\0", "").strip()) #@UnusedVariable
            alpha1, alpha2, alpha_factor = struct.unpack("fff", f.read(12))

            # Total runtime in seconds: (not used fttb)
            f.seek(8, SEEK_CUR)
            time_total, = struct.unpack("f", f.read(4)) #@UnusedVariable

            # Move to first sample header start:
            f.seek(256, SEEK_SET)

            # Read in per-sample meta data
            for i in range(num_samples):
                header_start = f.tell()
                header_length, twotheta_count = struct.unpack("HH", f.read(4))
                data_start = header_start + header_length

                # Read step size and start angle:
                f.seek(header_start + 12) # = 256 + 4 + 8 skipped bytes
                twotheta_step, twotheta_min = struct.unpack("ff", f.read(8))
                twotheta_max = twotheta_min + twotheta_step * float(twotheta_count)

                # Read up to end of data:
                f.seek(data_start + twotheta_count * 4, SEEK_SET)

                # Update XRDFile object:
                data_objects[i].update(
                    filename=u(os.path.basename(filename)),
                    version=version,
                    name=sample_name,
                    twotheta_min=twotheta_min,
                    twotheta_max=twotheta_max,
                    twotheta_step=twotheta_step,
                    twotheta_count=twotheta_count,
                    alpha1=alpha1,
                    alpha2=alpha2,
                    alpha_factor=alpha_factor,
                    data_start=data_start
                )

        elif version == "RAW3":

            # Read file status:
            file_status = { #@UnusedVariable
                1: "done",
                2: "active",
                3: "aborted",
                4: "interrupted"
            }[int(struct.unpack("H", f.read(4))[0])]

            # Read number of samples inside this file:
            num_samples, = struct.unpack("H", f.read(4))

            # Adapt XRDFile list
            data_objects = cls._adapt_data_object_list(data_objects, num_samples=num_samples)

            # Read in sample name:
            f.seek(326, SEEK_SET)
            sample_name = str(f.read(60))

            # Get anode type:
            f.seek(608, SEEK_SET)
            target_type = str(f.read(4)) #@UnusedVariable

            # Get wavelength info:
            f.seek(616, SEEK_SET)
            alpha_average, alpha1, alpha2, beta, alpha_factor = (#@UnusedVariable
                struct.unpack("ddddd", f.read(8 * 5)))


            # Get total recording time:
            time_total, = struct.unpack("f", f.read(4)) #@UnusedVariable

            # Read in per-sample meta data
            for i in range(num_samples):
                # Store the start of the header:
                header_start = f.tell()

                # Get header length, data count and theta start position
                header_length, twotheta_count = struct.unpack("HH", f.read(4))
                theta_min, twotheta_min = struct.unpack("dd", f.read(8 * 2))#@UnusedVariable
                data_start = header_start + header_length

                # Read step size
                f.seek(header_start + 176, SEEK_SET)
                twotheta_step, = struct.unpack("d", f.read(8))

                # Read counting time
                f.seek(header_start + 192, SEEK_SET)
                time_step, = struct.unpack("d", f.read(8))

                # Read the used wavelength
                f.seek(header_start + 240, SEEK_SET)
                alpha_used, = struct.unpack("d", f.read(8))#@UnusedVariable

                # Move to the end of the data:
                f.seek(data_start + twotheta_count * 4)

                data_objects[i].update(
                    filename=u(os.path.basename(filename)),
                    version=version,
                    name=sample_name,
                    twotheta_min=twotheta_min,
                    twotheta_max=twotheta_max,
                    twotheta_step=twotheta_step,
                    twotheta_count=twotheta_count,
                    alpha1=alpha1,
                    alpha2=alpha2,
                    alpha_factor=alpha_factor,
                    data_start=data_start
                )

        else:
            raise IOError, "Only verson 1, 2 and 3 *.RAW files are supported!"

        if close: f.close()
        return data_objects
Beispiel #14
0
 def _clean_bin_str(cls, val):
     return u(val.replace("\0".encode(), "".encode()).strip())