class WLDParser(GenericXYCSVParser): """ ASCII *.WLD format parser """ description = "Wavelength distribution file" extensions = get_case_insensitive_glob("*.WLD") pass # end of class
class CSVParser(GenericXYCSVParser): """ ASCII *.DAT, *.CSV and *.TAB format parser """ description = "ASCII XRD data" extensions = get_case_insensitive_glob("*.DAT", "*.CSV", "*.TAB") pass # end of class
class EXCParser(GenericXYCSVParser): """ ASCII *.DAT, *.CSV and *.TAB format parser """ description = "Exclusion range file" extensions = get_case_insensitive_glob("*.EXC") pass # end of class
def get_file_chooser_kwags(): return dict(action=Gtk.FileChooserAction.SAVE, title="The dialog title", parent=Gtk.Window(), current_name="suggested_file_name", current_folder=os.path.expanduser("~"), extra_widget=Gtk.Label(label="Test Label"), filters=[("Text File", get_case_insensitive_glob("*.txt"))], multiple=False, confirm_overwrite=True)
def get_file_chooser_kwags(): return dict( action=Gtk.FileChooserAction.SAVE, title="The dialog title", parent=Gtk.Window(), current_name="suggested_file_name", current_folder=os.path.expanduser("~"), extra_widget=Gtk.Label(label="Test Label"), filters=[ ("Text File", get_case_insensitive_glob("*.txt")) ], multiple=False, confirm_overwrite=True )
class ComponentsController(ChildObjectListStoreController): """ Controller for the components ObjectListStore """ treemodel_property_name = "components" treemodel_class_type = Component columns = [ ("Component name", "c_name") ] delete_msg = "Deleting a component is irreversible!\nAre You sure you want to continue?" file_filters = [("Component file", get_case_insensitive_glob("*.CMP")), ] obj_type_map = [ (Component, EditComponentView, EditComponentController), ] def load_components(self, filename): old_comps = self.get_selected_objects() if old_comps: num_oc = len(old_comps) new_comps = list() for comp in Component.load_components(filename, parent=self.model): comp.resolve_json_references() new_comps.append(comp) num_nc = len(new_comps) if num_oc != num_nc: DialogFactory.get_information_dialog( "The number of components to import must equal the number of selected components!" ).run() return else: self.select_object(None) logger.info("Importing components...") # replace component(s): for old_comp, new_comp in zip(old_comps, new_comps): i = self.model.components.index(old_comp) self.model.components[i] = new_comp else: DialogFactory.get_information_dialog( "No components selected to replace!" ).run() # ------------------------------------------------------------ # GTK Signal handlers # ------------------------------------------------------------ def on_save_object_clicked(self, event): def on_accept(dialog): logger.info("Exporting components...") Component.save_components(self.get_selected_objects(), filename=dialog.filename) DialogFactory.get_save_dialog( "Export components", parent=self.view.get_toplevel(), filters=self.file_filters ).run(on_accept) return True def on_load_object_clicked(self, event): def on_accept(dialog): self.load_components(dialog.filename) DialogFactory.get_load_dialog( "Import components", parent=self.view.get_toplevel(), filters=self.file_filters ).run(on_accept) return True pass #end of class
class RDParser(XRDParserMixin, BaseParser): """ Philips Binary V3 & V5 *.RD format parser """ description = "Phillips Binary V3/V5 *.RD" extensions = get_case_insensitive_glob("*.RD") mimetypes = ["application/octet-stream", ] __file_mode__ = "rb" @classmethod def _parse_header(cls, filename, fp, data_objects=None, close=False): f = fp try: basename = u(os.path.basename(filename)) except: basename = None # Adapt XRDFile list data_objects = cls._adapt_data_object_list(data_objects, num_samples=1) # Go to the start of the file f.seek(0, SEEK_SET) # Read file format version: version = f.read(2).decode() if version in ("V3", "V5"): # Read diffractometer, target and focus type: f.seek(84, SEEK_SET) diffractomer_type, target_type, focus_type = struct.unpack("bbb", f.read(3)) diffractomer_type = { 0: b"PW1800", 1: b"PW1710 based system", 2: b"PW1840", 3: b"PW3710 based system", 4: b"Undefined", 5: b"X'Pert MPD" }[cap(0, diffractomer_type, 5, 4)] target_type = { 0: b"Cu", 1: b"Mo", 2: b"Fe", 3: b"Cr", 4: b"Other" }[cap(0, target_type, 3, 4)] focus_type = { 0: b"BF", 1: b"NF", 2: b"FF", 3: b"LFF", 4: b"Unkown", }[cap(0, focus_type, 3, 4)] # Read wavelength information: f.seek(94, SEEK_SET) alpha1, alpha2, alpha_factor = struct.unpack("ddd", f.read(24)) # Read sample name: f.seek(146, SEEK_SET) sample_name = u(f.read(16).replace(b"\0", b"")) # Read data limits: f.seek(214) twotheta_step, twotheta_min, twotheta_max = struct.unpack("ddd", f.read(24)) twotheta_count = int((twotheta_max - twotheta_min) / twotheta_step) # Set data start: data_start = { "V3": 250, "V5": 810 }[version] data_objects[0].update( filename=basename, name=sample_name, twotheta_min=twotheta_min, twotheta_max=twotheta_max, twotheta_step=twotheta_step, twotheta_count=twotheta_count, target_type=target_type, alpha1=alpha1, alpha2=alpha2, alpha_factor=alpha_factor, data_start=data_start, version=version ) else: raise IOError("Only V3 and V5 *.RD files are supported!") if close: f.close() return data_objects @classmethod def _parse_data(cls, filename, fp, data_objects=None, close=False): f = fp # RD files are singletons, so no need to iterate over the list, # there is only one XRDFile instance: if data_objects[0].data == None: data_objects[0].data = [] # Parse data: if f is not None: if data_objects[0].version in ("V3", "V5"): # Move to start of data: f.seek(data_objects[0].data_start) n = 0 while n < data_objects[0].twotheta_count: y, = struct.unpack("H", f.read(2)) data_objects[0].data.append([ data_objects[0].twotheta_min + data_objects[0].twotheta_step * float(n + 0.5), float(y) ]) n += 1 else: raise IOError("Only V3 and V5 *.RD files are supported!") data_objects[0].data = np.array(data_objects[0].data) if close: f.close() return data_objects pass # end of class
class CPIParser(XRDParserMixin, BaseParser): """ ASCII Sietronics *.CPI format parser """ description = "Sietronics *.CPI" extensions = get_case_insensitive_glob("*.CPI", "*.CPD", "*.CPS") @classmethod def _parse_header(cls, filename, fp, data_objects=None, close=False): f = fp try: basename = u(os.path.basename(filename)) except: basename = None # Adapt XRDFile list data_objects = cls._adapt_data_object_list(data_objects, num_samples=1) # Move to the start of the file f.seek(0) # Skip a line: file type header f.readline() # Read data limits twotheta_min = float(f.readline().replace(",", ".").strip()) twotheta_max = float(f.readline().replace(",", ".").strip()) twotheta_step = float(f.readline().replace(",", ".").strip()) twotheta_count = int((twotheta_max - twotheta_min) / twotheta_step) # Read target element name target_type = f.readline() # Read wavelength alpha1 = float(f.readline().replace(",", ".").strip()) # Read up to SCANDATA and keep track of the line before, # it contains the sample description name = "" while True: line = f.readline().strip() if line == "SCANDATA" or line == "": data_start = f.tell() break else: name = line data_objects[0].update( filename=basename, name=name, target_type=target_type, alpha1=alpha1, twotheta_min=twotheta_min, twotheta_max=twotheta_max, twotheta_step=twotheta_step, twotheta_count=twotheta_count, data_start=data_start, ) if close: f.close() return data_objects @classmethod def _parse_data(cls, filename, fp, data_objects=None, close=False): f = fp # CPI files are singletons, so no need to iterate over the list, # there is only one data object instance: if data_objects[0].data == None: data_objects[0].data = [] if f is not None: f.seek(data_objects[0].data_start) n = 0 while n <= data_objects[0].twotheta_count: line = f.readline().strip("\n").replace(",", ".") if line != "": data_objects[0].data.append([ float(data_objects[0].twotheta_min + data_objects[0].twotheta_step * n), float(line) ]) n += 1 data_objects[0].data = np.array(data_objects[0].data) if close: f.close() return data_objects @classmethod def write(cls, filename, x, ys, radiation="Cu", wavelength=1.54060, tps=48.0, sample="", **kwargs): """ Writes a SIETRONICS cpi text file. x and ys should be numpy arrays. """ start_angle = x[0] end_angle = x[-1] step_size = (end_angle - start_angle) / (x.size - 1) with open(filename, 'w') as f: f.write("SIETRONICS XRD SCAN\n") f.write("%.4f\n" % start_angle) f.write("%.4f\n" % end_angle) f.write("%.4f\n" % step_size) f.write("%s\n" % radiation) f.write("%.5f\n" % wavelength) f.write("%s\n" % date.today().strftime('%d/%m/%y %H:%M:%S')) f.write("%.1f\n" % tps) f.write("%s\n" % sample) f.write("SCANDATA\n") for y in ys[0, :]: f.write("%.7f\n" % y) pass # end of class
class BrkRAWParser(XRDParserMixin, BaseParser): """ Bruker *.RAW format parser """ description = "Bruker/Siemens Binary V1/V2/V3 *.RAW" extensions = get_case_insensitive_glob("*.RAW") mimetypes = ["application/octet-stream", ] __file_mode__ = "rb" @classmethod def _clean_bin_str(cls, val): return u(str(val).replace("\0", "").strip()) @classmethod def _parse_header(cls, filename, fp, data_objects=None, close=False): f = fp try: basename = u(os.path.basename(filename)) except: basename = None # Go to the start of the file f.seek(0, SEEK_SET) # Read file format version: version = str(f.read(4)) if version == "RAW ": version = "RAW1" elif version == "RAW2": version = "RAW2" elif version == "RAW1" and str(f.read(3)) == ".01": version = "RAW3" if version == "RAW1": # This format does not allow getting the exact number of samples, # so start with one and append where needed: isfollowed = 1 num_samples = 0 while isfollowed > 0: twotheta_count = int(struct.unpack("I", f.read(4))[0]) # Check if this is an early "RAW " formatted file where the # "RAW " is repeated for each sample: if num_samples > 0 and twotheta_count == int(struct.unpack("I", "RAW ")[0]): twotheta_count = int(struct.unpack("I", f.read(4))[0]) # Step counting time, 2-theta step size and scanning mode: time_step, twotheta_step, scan_mode = struct.unpack("fff", f.read(12)) #@UnusedVariable # Skip 4 bytes, and read 2-theta starting position: f.seek(4, SEEK_CUR) twotheta_min, = struct.unpack("f", f.read(4)) twotheta_max = twotheta_min + twotheta_step * float(twotheta_count) # Skip 12 bytes # (contain theta, khi and phi start point for eularian craddles) f.seek(12, SEEK_CUR) # Read sample name and wavelengths: sample_name = cls._clean_bin_str(f.read(32)) alpha1, alpha2 = struct.unpack("ff", f.read(8)) # Skip 72 bytes: f.seek(72, SEEK_CUR) isfollowed, = struct.unpack("I", f.read(4)) # Get data position and skip for now: data_start = f.tell() f.seek(twotheta_count * 4, SEEK_CUR) # Adapt XRDFile list data_objects = cls._adapt_data_object_list( data_objects, num_samples=(num_samples + 1), only_extend=True ) data_objects[num_samples].update( filename=basename, version=version, name=sample_name, time_step=time_step, twotheta_min=twotheta_min, twotheta_max=twotheta_max, twotheta_step=twotheta_step, twotheta_count=twotheta_count, alpha1=alpha1, alpha2=alpha2, data_start=data_start ) num_samples += 1 elif version == "RAW2": # Read number of sample ranges: num_samples, = struct.unpack("H", f.read(2)) # Adapt XRDFile list data_objects = cls._adapt_data_object_list(data_objects, num_samples=num_samples) # Read sample name: f.seek(8, SEEK_SET) sample_name = cls._clean_bin_str(f.read(32)) # Meta-data description, skip for now: # description = u(str(f.read(128)).replace("\0", "").strip()) # date = u(str(f.read(10)).replace("\0", "").strip()) # time = u(str(f.read(5)).replace("\0", "").strip()) # Read wavelength information: f.seek(148, SEEK_CUR) target_type = u(str(f.read(2)).replace("\0", "").strip()) #@UnusedVariable alpha1, alpha2, alpha_factor = struct.unpack("fff", f.read(12)) # Total runtime in seconds: (not used fttb) f.seek(8, SEEK_CUR) time_total, = struct.unpack("f", f.read(4)) #@UnusedVariable # Move to first sample header start: f.seek(256, SEEK_SET) # Read in per-sample meta data for i in range(num_samples): header_start = f.tell() header_length, twotheta_count = struct.unpack("HH", f.read(4)) data_start = header_start + header_length # Read step size and start angle: f.seek(header_start + 12) # = 256 + 4 + 8 skipped bytes twotheta_step, twotheta_min = struct.unpack("ff", f.read(8)) twotheta_max = twotheta_min + twotheta_step * float(twotheta_count) # Read up to end of data: f.seek(data_start + twotheta_count * 4, SEEK_SET) # Update XRDFile object: data_objects[i].update( filename=basename, version=version, name=sample_name, twotheta_min=twotheta_min, twotheta_max=twotheta_max, twotheta_step=twotheta_step, twotheta_count=twotheta_count, alpha1=alpha1, alpha2=alpha2, alpha_factor=alpha_factor, data_start=data_start ) elif version == "RAW3": # Read file status: f.seek(8, SEEK_SET) file_status = { #@UnusedVariable 1: "done", 2: "active", 3: "aborted", 4: "interrupted" }[int(struct.unpack("I", f.read(4))[0])] # Read number of samples inside this file: f.seek(12, SEEK_SET) num_samples, = struct.unpack("I", f.read(4)) # Read in sample name: f.seek(326, SEEK_SET) sample_name = cls._clean_bin_str(f.read(60)) # Goniometer radius: f.seek(564, SEEK_SET) radius = float(struct.unpack("f", f.read(4))[0]) # Fixed divergence: f.seek(568, SEEK_SET) divergence = float(struct.unpack("f", f.read(4))[0]) # Primary soller f.seek(576, SEEK_SET) soller1 = float(struct.unpack("f", f.read(4))[0]) # Secondary soller f.seek(592, SEEK_SET) soller2 = float(struct.unpack("f", f.read(4))[0]) # Get anode type: f.seek(608, SEEK_SET) target_type = str(f.read(4)) #@UnusedVariable # Get wavelength info: f.seek(616, SEEK_SET) alpha_average, alpha1, alpha2, beta, alpha_factor = (#@UnusedVariable struct.unpack("ddddd", f.read(8 * 5))) # Get total recording time: f.seek(664, SEEK_SET) time_total, = struct.unpack("f", f.read(4)) #@UnusedVariable # Adapt XRDFile lis & Skip to first block:t data_objects = cls._adapt_data_object_list(data_objects, num_samples=num_samples) f.seek(712, SEEK_SET) # Read in per-sample meta data for i in range(num_samples): # Store the start of the header: header_start = f.tell() # Get header length f.seek(header_start + 0, SEEK_SET) header_length, = struct.unpack("I", f.read(4)) assert header_length == 304, "Invalid format!" # Get data count and f.seek(header_start + 4, SEEK_SET) twotheta_count, = struct.unpack("I", f.read(4)) # Get theta start positions f.seek(header_start + 8, SEEK_SET) theta_min, twotheta_min = struct.unpack("dd", f.read(8 * 2))#@UnusedVariable # Read step size f.seek(header_start + 176, SEEK_SET) twotheta_step, = struct.unpack("d", f.read(8)) # Read counting time f.seek(header_start + 192, SEEK_SET) time_step, = struct.unpack("d", f.read(8)) # Read the used wavelength f.seek(header_start + 240, SEEK_SET) alpha_used, = struct.unpack("d", f.read(8))#@UnusedVariable # Supplementary header size: f.seek(header_start + 256, SEEK_SET) supp_headers_size, = struct.unpack("I", f.read(4)) data_start = header_start + header_length + supp_headers_size # Move to the end of the data: f.seek(data_start + twotheta_count * 4) # Calculate last data point twotheta_max = twotheta_min + twotheta_step * float(twotheta_count - 0.5) data_objects[i].update( filename=basename, version=version, name=sample_name, twotheta_min=twotheta_min, twotheta_max=twotheta_max, twotheta_step=twotheta_step, twotheta_count=twotheta_count, alpha1=alpha1, alpha2=alpha2, alpha_factor=alpha_factor, data_start=data_start, radius=radius, soller1=soller1, soller2=soller2, divergence=divergence ) else: raise IOError, "Only verson 1, 2 and 3 *.RAW files are supported!" if close: f.close() return data_objects @classmethod def _parse_data(cls, filename, fp, data_objects=None, close=False): for data_object in data_objects: if data_object.data == None: data_object.data = [] # Parse data: if fp is not None: if data_object.version in ("RAW1", "RAW2", "RAW3"): fp.seek(data_object.data_start) n = 0 while n < data_object.twotheta_count: y, = struct.unpack("f", fp.read(4)) x = data_object.twotheta_min + data_object.twotheta_step * float(n + 0.5) data_object.data.append([x,y]) n += 1 else: raise IOError, "Only verson 1, 2 and 3 *.RAW files are supported!" data_object.data = np.array(data_object.data) if close: fp.close() return data_objects pass # end of class
class BrkBRMLParser(XRDParserMixin, XMLParserMixin, BaseParser): """ Bruker *.BRML format parser """ description = "Bruker BRML files *.BRML" extensions = get_case_insensitive_glob("*.BRML") mimetypes = ["application/zip", ] __file_mode__ = "r" @classmethod def _get_file(cls, fp, close=None): """ Returns a three-tuple: filename, zipfile-object, close """ if isinstance(fp, bytes): return fp, ZipFile(fp, cls.__file_mode__), True if close is None else close else: return getattr(fp, 'name', None), ZipFile(fp, cls.__file_mode__), False if close is None else close @classmethod def _get_raw_data_files(cls, f, folder): """ Processes DataContainer.xml and returns a list of xml raw data filepaths and the sample name """ contf = f.open(r"%s/DataContainer.xml" % folder, cls.__file_mode__) data = contf.read() contf.close() _, root = cls.get_xml_for_string(data) sample_name = root.find("./MeasurementInfo").get("SampleName") raw_data_files = [] for child in root.find("./RawDataReferenceList"): raw_data_files.append(child.text) return raw_data_files, sample_name @classmethod def _get_header_dict(cls, f, folder): header_d = {} contf = f.open(r"%s/MeasurementContainer.xml" % folder, cls.__file_mode__) data = contf.read() contf.close() _, root = cls.get_xml_for_string(data) radius_path = "./HardwareLogicExt/Instrument/BeamPathContainers" + \ "/BeamPathContainerAbc[@VisibleName='PrimaryTrack']/%s" tube_path = "./HardwareLogicExt/Instrument/BeamPathContainers" + \ "/BeamPathContainerAbc[@VisibleName='PrimaryTrack']/BankPositions/" + \ "BankPosition/MountedComponent/MountedTube/%s" soller1_path = "./HardwareLogicExt/Instrument/BeamPathContainers" + \ "/BeamPathContainerAbc[@VisibleName='PrimaryTrack']/BankPositions/" + \ "BankPosition/MountedComponent[@VisibleName='SollerMount']/%s" soller2_path = "./HardwareLogicExt/Instrument/BeamPathContainers" + \ "/BeamPathContainerAbc[@VisibleName='SecondaryTrack']/BankPositions/" + \ "BankPosition/MountedComponent[@VisibleName='SollerMount']/%s" divergence_path = "./HardwareLogicExt/Instrument/BeamPathContainers" + \ "/BeamPathContainerAbc[@VisibleName='PrimaryTrack']/BankPositions/" + \ "BankPosition/MountedComponent/Restrictions[@FieldName='OpeningDegree']/%s" header_d.update( alpha1=float(cls.get_val(root, tube_path % "WaveLengthAlpha1", "Value")) / 10, alpha2=float(cls.get_val(root, tube_path % "WaveLengthAlpha2", "Value")) / 10, alpha_average=float(cls.get_val(root, tube_path % "WaveLengthAverage", "Value")) / 10, beta=float(cls.get_val(root, tube_path % "WaveLengthBeta", "Value")) / 10, alpha_factor=cls.get_val(root, tube_path % "WaveLengthRatio", "Value"), target_type=cls.get_val(root, tube_path % "TubeMaterial", "Value"), soller1=cls.get_val(root, soller1_path % "Deflection", "Value"), soller2=cls.get_val(root, soller2_path % "Deflection", "Value"), radius=float(cls.get_val(root, radius_path % "Radius", "Value", 0)) / 10.0, #convert to cm divergence=cls.get_val(root, divergence_path % "Data", "Value") ) return header_d @classmethod def parse(cls, fp, data_objects=None, close=False): filename, fp, close = cls._get_file(fp, close=close) try: basename = os.path.basename(filename) except AttributeError: basename = None num_samples = 0 zipinfos = fp.infolist() processed_folders = [] data_objects = not_none(data_objects, []) for zipinfo in zipinfos: if zipinfo.filename.count('/') == 1 and "DataContainer.xml" in zipinfo.filename: folder = os.path.dirname(zipinfo.filename) if not folder in processed_folders: processed_folders.append(folder) header_d = cls._get_header_dict(fp, folder) raw_data_files, sample_name = cls._get_raw_data_files(fp, folder) for raw_data_filename in raw_data_files: contf = fp.open(raw_data_filename) _, root = cls.get_xml_for_file(contf) for route in root.findall("./DataRoutes/DataRoute"): # Adapt XRDFile list & get last addition: data_objects = cls._adapt_data_object_list( data_objects, num_samples=(num_samples + 1), only_extend=True ) data_object = data_objects[num_samples] # Get the Datum tags: datums = route.findall("Datum") data = [] # Parse the RawDataView tags to find out what index in # the datum is used for what type of data: enabled_datum_index = None twotheta_datum_index = None intensity_datum_index = None steptime_datum_index = None for dataview in route.findall("./DataViews/RawDataView"): index = int(dataview.get("Start", 0)) name = dataview.get("LogicName", default="Undefined") xsi_type = dataview.get("{http://www.w3.org/2001/XMLSchema-instance}type", default="Undefined") if name == "MeasuredTime": steptime_datum_index = index elif name == "AbsorptionFactor": enabled_datum_index = index elif name == "Undefined" and xsi_type == "VaryingRawDataView": for i, definition in enumerate(dataview.findall("./Varying/FieldDefinitions")): if definition.get("TwoTheta"): index += i break twotheta_datum_index = index elif name == "Undefined" and xsi_type == "RecordedRawDataView": intensity_datum_index = index # Parse the SubScanInfo list (usually only one), and # then parse the datums accordingly twotheta_min = None twotheta_max = None twotheta_count = 0 for subscan in route.findall("./SubScans/SubScanInfo"): # Get the steps, where to start and the planned # time per step (measuredTimePerStep deviates # if the recording was interrupted): steps = int(subscan.get("MeasuredSteps")) start = int(subscan.get("StartStepNo")) steptime = float(subscan.get("PlannedTimePerStep")) for datum in datums[start:start + steps]: values = datum.text.split(",") if values[enabled_datum_index] == "1": # Fetch values from the list: datum_steptime = float(values[steptime_datum_index]) intensity = float(values[intensity_datum_index]) intensity /= float(steptime * datum_steptime) twotheta = float(values[twotheta_datum_index]) # Keep track of min 2theta: if twotheta_min is None: twotheta_min = twotheta else: twotheta_min = min(twotheta_min, twotheta) # Keep track of max 2theta: if twotheta_max is None: twotheta_max = twotheta else: twotheta_max = min(twotheta_max, twotheta) # Append point and increase count: data.append([twotheta, intensity]) twotheta_count += 1 #Update header: data_object.update( filename=basename, name=sample_name, time_step=1, # we converted to CPS twotheta_min=twotheta_min, twotheta_max=twotheta_max, twotheta_count=twotheta_count, **header_d ) data_object.data = data num_samples += 1 #end for contf.close() #end for #end if #end if #end for if close: fp.close() return data_objects pass # end of class
class MarkersController(ObjectListStoreController): """ Controller for the markers list """ file_filters = ("Marker file", get_case_insensitive_glob("*.MRK")), treemodel_property_name = "markers" treemodel_class_type = Marker columns = [(" ", "c_visible"), ("Marker label", "c_label")] delete_msg = "Deleting a marker is irreversible!\nAre You sure you want to continue?" obj_type_map = [ (Marker, EditMarkerView, EditMarkerController), ] title = "Edit Markers" def get_markers_tree_model(self, *args): return self.treemodel def setup_treeview_col_c_visible(self, treeview, name, col_descr, col_index, tv_col_nr): def toggle_renderer(column, cell, model, itr, data=None): try: col = column.get_col_attr("active") value = model.get_value(itr, col) cell.set_property('active', not_none(value, False)) except TypeError: if settings.DEBUG: raise pass col = new_toggle_column( " ", toggled_callback=(self.on_marker_visible_toggled, (treeview.get_model(), col_index)), data_func=toggle_renderer, resizable=False, expand=False, activatable=True, active_col=col_index) setattr(col, "colnr", col_index) treeview.append_column(col) return True def select_markers(self, markers): self.set_selected_objects() @contextmanager def _multi_operation_context(self): with self.model.visuals_changed.hold(): yield # ------------------------------------------------------------ # GTK Signal handlers # ------------------------------------------------------------ def on_load_object_clicked(self, event): def on_accept(dialog): with self._multi_operation_context(): for marker in Marker.get_from_csv(dialog.filename, self.model): self.model.markers.append(marker) DialogFactory.get_load_dialog("Import markers", parent=self.view.get_top_widget(), filters=self.file_filters).run(on_accept) def on_save_object_clicked(self, event): def on_accept(dialog): Marker.save_as_csv(dialog.filename, self.get_selected_objects()) DialogFactory.get_save_dialog("Export markers", parent=self.view.get_top_widget(), filters=self.file_filters).run(on_accept) def create_new_object_proxy(self): return Marker(label="New Marker", parent=self.model) def on_marker_visible_toggled(self, cell, path, model, colnr): if model is not None: itr = model.get_iter(path) model.set_value(itr, colnr, not cell.get_active()) return True return False @BaseController.status_message("Finding peaks...", "find_peaks") def on_find_peaks_clicked(self, widget): def after_cb(threshold): self.model.auto_add_peaks(threshold) sel_model = ThresholdSelector(parent=self.model) sel_view = DetectPeaksView(parent=self.view) sel_ctrl = ThresholdController(model=sel_model, view=sel_view, parent=self, callback=after_cb) #@UnusedVariable show_threshold_plot = DialogFactory.get_progress_dialog( action=sel_model.update_threshold_plot_data, complete_callback=lambda *a, **k: sel_view.present(), gui_message="Finding peaks {progress:.0f}%...", toplevel=self.view.get_top_widget()) if len(self.model.markers) > 0: def on_accept(dialog): self.model.clear_markers() show_threshold_plot() def on_reject(dialog): show_threshold_plot() DialogFactory.get_confirmation_dialog( "Do you want to clear the current markers for this pattern?", parent=self.view.get_top_widget()).run(on_accept, on_reject) else: show_threshold_plot() def on_match_minerals_clicked(self, widget): def apply_cb(matches): with self._multi_operation_context(): for name, abbreviation, peaks, matches, score in matches: #@UnusedVariable for marker in self.get_selected_objects(): for mpos, epos in matches: #@UnusedVariable if marker.get_nm_position() * 10. == epos: marker.label += ", %s" % abbreviation def close_cb(): self.model.visuals_changed.emit() self.view.show() marker_peaks = [] # position, intensity for marker in self.get_selected_objects(): intensity = self.model.experimental_pattern.get_y_at_x( marker.position) marker_peaks.append((marker.get_nm_position() * 10., intensity)) scorer_model = MineralScorer(marker_peaks=marker_peaks, parent=self.model) scorer_view = MatchMineralsView(parent=self.view) scorer_ctrl = MatchMineralController( model=scorer_model, view=scorer_view, parent=self, apply_callback=apply_cb, close_callback=close_cb) #@UnusedVariable self.view.hide() scorer_view.present() pass # end of class
class UDFParser(XRDParserMixin, BaseParser): """ ASCII Philips *.UDF format """ description = "Philips *.UDF" extensions = get_case_insensitive_glob("*.UDF") @classmethod def _parse_header(cls, filename, fp, data_objects=None, close=False): f = fp try: basename = u(os.path.basename(filename)) except: basename = None # Adapt XRDFile list data_objects = cls._adapt_data_object_list(data_objects, num_samples=1) # Move to the start of the file f.seek(0) # Go over the header: header_dict = {} for lineno, line in enumerate(f): # Start of data after this line: if line.strip() == "RawScan": data_start = f.tell() break else: # Break header line into separate parts, and strip trailing whitespace: parts = list(map(str.strip, line.split(','))) # If length is shorter then three, somethings wrong if len(parts) < 3: raise IOError( "Header of UDF file is malformed at line %d" % lineno) # Handle some of the header's arguments manually, the rest is # just passed to the data object as keyword arguments... if parts[0] == "SampleIdent": name = parts[1] elif parts[0] == "DataAngleRange": twotheta_min = float(parts[1]) twotheta_max = float(parts[2]) elif parts[0] == "ScanStepSize": twotheta_step = float(parts[1]) # TODO extract other keys and replace with default names header_dict[parts[0]] = ','.join(parts[1:-1]) twotheta_count = int((twotheta_max - twotheta_min) / twotheta_step) data_objects[0].update(filename=basename, name=name, twotheta_min=twotheta_min, twotheta_max=twotheta_max, twotheta_step=twotheta_step, twotheta_count=twotheta_count, data_start=data_start, **header_dict) if close: f.close() return data_objects @classmethod def _parse_data(cls, filename, fp, data_objects=None, close=False): f = fp # UDF files are singletons, so no need to iterate over the list, # there is only one data object instance: if data_objects[0].data == None: data_objects[0].data = [] if f is not None: f.seek(data_objects[0].data_start) n = 0 last_value_reached = False while n <= data_objects[ 0].twotheta_count and not last_value_reached: parts = list(map(str.strip, f.readline().split(','))) for part in parts: # Last value ends with a slash: if part.endswith('/'): part = part[:-1] # remove the ending "/" last_value_reached = True n += 1 data_objects[0].data.append([ float(data_objects[0].twotheta_min + data_objects[0].twotheta_step * n), float(part) ]) data_objects[0].data = np.array(data_objects[0].data) if close: f.close() return data_objects pass # end of class