def update_data(self, dataset_control, min_control, max_control, scale_control): """ Update a data set @param dataset_control: combo box with the file path or workspace name @param min_control: text widget containing the minimum Q of the overlap region @param max_control: text widget containing the maximum Q of the overlap region @param scale_control: text widget containing the scale (can be input or output) """ data_object = None file_in = str(dataset_control.lineEdit().text()) if len(file_in.strip()) == 0: data_object = None elif os.path.isfile(file_in) or AnalysisDataService.doesExist(file_in): data_object = DataSet(file_in) try: data_object.load(True) except (AttributeError, ImportError, NameError, TypeError, ValueError, Warning): data_object = None util.set_valid(dataset_control.lineEdit(), False) QtGui.QMessageBox.warning( self, "Error loading file", "Could not load %s.\nMake sure you pick the XML output from the reduction." % file_in) return if min_control is not None and max_control is not None \ and (len(min_control.text()) == 0 or len(max_control.text()) == 0): minx, maxx = data_object.get_range() min_control.setText("%-6.3g" % minx) max_control.setText("%-6.3g" % maxx) # Set the reference scale, unless we just loaded the data if len(scale_control.text()) == 0: scale_control.setText("1.0") else: scale = util._check_and_get_float_line_edit(scale_control) data_object.set_scale(scale) util.set_valid(dataset_control.lineEdit(), True) else: data_object = None util.set_valid(dataset_control.lineEdit(), False) self._plotted = False return data_object
def stitch_data(self, input_file, output_dir, q_min, q_step): from LargeScaleStructures.data_stitching import DataSet, Stitcher #, RangeSelector # Identify the data sets to stitch and order them workspace_list = [] _list_name = [] _list_ts = [] ws_list = AnalysisDataService.getObjectNames() for item in ws_list: if item.endswith('ts'): (_name, _ts) = item.split('_#') _list_name.append(item) _list_ts.append(_ts) _name_ts = zip(_list_ts, _list_name) _name_ts.sort() _ts_sorted, workspace_list = zip(*_name_ts) # Stitch the data s = Stitcher() q_max = 0 for item in workspace_list: data = DataSet(item) data.load(True, True) dummy_x_min, x_max = data.get_range() if x_max > q_max: q_max = x_max s.append(data) s.set_reference(0) s.compute() # Apply the scaling factors for data in s._data_sets: Scale(InputWorkspace=str(data), OutputWorkspace=data._ws_scaled, Operation="Multiply", Factor=data.get_scale()) SaveAscii(InputWorkspace=str(data), Filename=os.path.join(output_dir, '%s.txt' % str(data))) output_file = input_file.replace('.xml', '_reprocessed.txt') Logger("REFLReprocess").notice("Saving to %s" % output_file) output_ws = _average_y_of_same_x_(q_min, q_step, q_max) SaveAscii(InputWorkspace=output_ws, Filename=output_file)
def update_data(self, dataset_control, min_control, max_control, scale_control): """ Update a data set @param dataset_control: combo box with the file path or workspace name @param min_control: text widget containing the minimum Q of the overlap region @param max_control: text widget containing the maximum Q of the overlap region @param scale_control: text widget containing the scale (can be input or output) """ data_object = None file_in = str(dataset_control.lineEdit().text()) if len(file_in.strip()) == 0: data_object = None elif os.path.isfile(file_in) or AnalysisDataService.doesExist(file_in): data_object = DataSet(file_in) try: data_object.load(True) except (StandardError, Warning): data_object = None util.set_valid(dataset_control.lineEdit(), False) QtGui.QMessageBox.warning(self, "Error loading file", "Could not load %s.\nMake sure you pick the XML output from the reduction." % file_in) return if min_control is not None and max_control is not None \ and (len(min_control.text()) == 0 or len(max_control.text()) == 0): minx, maxx = data_object.get_range() min_control.setText("%-6.3g" % minx) max_control.setText("%-6.3g" % maxx) # Set the reference scale, unless we just loaded the data if len(scale_control.text()) == 0: scale_control.setText("1.0") else: scale = util._check_and_get_float_line_edit(scale_control) data_object.set_scale(scale) _npts = data_object.get_number_of_points() util.set_valid(dataset_control.lineEdit(), True) else: data_object = None util.set_valid(dataset_control.lineEdit(), False) self._plotted = False return data_object
def stitch_data(self, input_file, output_dir, q_min, q_step): from LargeScaleStructures.data_stitching import DataSet, Stitcher, RangeSelector # Identify the data sets to stitch and order them workspace_list = [] _list_name = [] _list_ts = [] ws_list = AnalysisDataService.getObjectNames() for item in ws_list: if item.endswith('ts'): (_name,_ts) = item.split('_#') _list_name.append(item) _list_ts.append(_ts) _name_ts = zip(_list_ts, _list_name) _name_ts.sort() _ts_sorted, workspace_list = zip(*_name_ts) # Stitch the data s = Stitcher() q_max = 0 for item in workspace_list: data = DataSet(item) data.load(True, True) x_min, x_max = data.get_range() if x_max > q_max: q_max = x_max s.append(data) s.set_reference(0) s.compute() # Apply the scaling factors for data in s._data_sets: Scale(InputWorkspace=str(data), OutputWorkspace=data._ws_scaled, Operation="Multiply", Factor=data.get_scale()) SaveAscii(InputWorkspace=str(data), Filename=os.path.join(output_dir, '%s.txt' % str(data))) output_file = input_file.replace('.xml', '_reprocessed.txt') Logger("REFLReprocess").notice("Saving to %s" % output_file) output_ws = _average_y_of_same_x_(q_min, q_step, q_max) SaveAscii(InputWorkspace=output_ws, Filename=output_file)