def fetch_correction_workspaces(vanadium_path, instrument, rb_num=""): """ Fetch workspaces from the file system or create new ones. :param vanadium_path: The path to the requested vanadium run raw data. :param instrument: The instrument the data came from. :param rb_num: A user identifier, usually an experiment number. :return: The resultant integration and curves workspaces. """ vanadium_number = path_handling.get_run_number_from_path(vanadium_path, instrument) integ_path, curves_path = _generate_saved_workspace_file_paths(vanadium_number) force_recalc = get_setting(path_handling.INTERFACES_SETTINGS_GROUP, path_handling.ENGINEERING_PREFIX, "recalc_vanadium", return_type=bool) if path.exists(curves_path) and path.exists(integ_path) and not force_recalc: # Check if the cached files exist. try: integ_workspace = Load(Filename=integ_path, OutputWorkspace=INTEGRATED_WORKSPACE_NAME) curves_workspace = Load(Filename=curves_path, OutputWorkspace=CURVES_WORKSPACE_NAME) if rb_num: user_integ, user_curves = _generate_saved_workspace_file_paths(vanadium_number, rb_num=rb_num) if not path.exists(user_integ) and not path.exists(user_curves): _save_correction_files(integ_workspace, user_integ, curves_workspace, user_curves) return integ_workspace, curves_workspace except RuntimeError as e: logger.error( "Problem loading existing vanadium calculations. Creating new files. Description: " + str(e)) integ_workspace, curves_workspace = _calculate_vanadium_correction(vanadium_path) _save_correction_files(integ_workspace, integ_path, curves_workspace, curves_path) if rb_num: user_integ, user_curves = _generate_saved_workspace_file_paths(vanadium_number, rb_num=rb_num) _save_correction_files(integ_workspace, user_integ, curves_workspace, user_curves) return integ_workspace, curves_workspace
def load_files(self, filenames_string): self._last_added = [] filenames = [name.strip() for name in filenames_string.split(",")] for filename in filenames: ws_name = self._generate_workspace_name(filename) if ws_name not in self._loaded_workspaces: try: if not ADS.doesExist(ws_name): ws = Load(filename, OutputWorkspace=ws_name) else: ws = ADS.retrieve(ws_name) if ws.getNumberHistograms() == 1: self._loaded_workspaces[ws_name] = ws if ws_name not in self._bg_sub_workspaces: self._bg_sub_workspaces[ws_name] = None if ws_name not in self._bg_params: self._bg_params[ws_name] = [] self._last_added.append(ws_name) else: logger.warning( f"Invalid number of spectra in workspace {ws_name}. Skipping loading of file.") except RuntimeError as e: logger.error( f"Failed to load file: {filename}. Error: {e}. \n Continuing loading of other files.") else: logger.warning(f"File {ws_name} has already been loaded") self.update_log_workspace_group()
def load_files(self, filenames_string, xunit): self._last_added = [] filenames = [name.strip() for name in filenames_string.split(",")] for filename in filenames: ws_name = self._generate_workspace_name(filename, xunit) if ws_name not in self._loaded_workspaces: try: if not ADS.doesExist(ws_name): ws = Load(filename, OutputWorkspace=ws_name) if xunit != "TOF": ConvertUnits(InputWorkspace=ws, OutputWorkspace=ws_name, Target=xunit) else: ws = ADS.retrieve(ws_name) if ws.getNumberHistograms() == 1: self._loaded_workspaces[ws_name] = ws if ws_name not in self._background_workspaces: self._background_workspaces[ws_name] = None self._last_added.append(ws_name) self.add_log_to_table(ws_name, ws) else: logger.warning( f"Invalid number of spectra in workspace {ws_name}. Skipping loading of file.") except RuntimeError as e: logger.error( f"Failed to load file: {filename}. Error: {e}. \n Continuing loading of other files.") else: logger.warning(f"File {ws_name} has already been loaded")
def add_log_to_table(self, ws_name, ws): # both ws and name needed in event a ws is renamed and ws.name() is no longer correct if not self._log_workspaces: self.create_log_table() # make dict for run if doesn't exist if ws_name not in self._log_values: self._log_values[ws_name] = dict() # add run info run = ws.getRun() self._log_workspaces[0].addRow( [ws.getInstrument().getFullName(), ws.getRunNumber(), run.getProperty('bankid').value, run.getProtonCharge(), ws.getTitle()]) # add log data - loop over existing log workspaces not logs in settings as these might have changed for ilog in range(1, len(self._log_workspaces)): log_name = self._log_workspaces[ilog].name() if log_name in self._log_values[ws_name]: avg, stdev = self._log_values[ws_name][log_name] else: try: avg, stdev = AverageLogData(ws_name, LogName=log_name, FixZero=False) self._log_values[ws_name][log_name] = [avg, stdev] except RuntimeError: avg, stdev = full(2, nan) logger.error( f"File {ws.name()} does not contain log {self._log_workspaces[ilog].name()}") self._log_workspaces[ilog].addRow([avg, stdev]) self.update_log_group_name()
def _run_focus(input_workspace, output_workspace, vanadium_integration_ws, vanadium_curves_ws, bank, full_calib_ws=None, spectrum_numbers=None): kwargs = { "InputWorkspace": input_workspace, "OutputWorkspace": output_workspace, "VanIntegrationWorkspace": vanadium_integration_ws, "VanCurvesWorkspace": vanadium_curves_ws } if full_calib_ws is not None: kwargs["DetectorPositions"] = full_calib_ws if bank is None: kwargs["SpectrumNumbers"] = spectrum_numbers else: kwargs["Bank"] = bank try: return EnggFocus(**kwargs) except RuntimeError as e: logger.error( "Error in focusing, Could not run the EnggFocus algorithm successfully for bank " + str(bank) + ". Error Description: " + str(e)) raise RuntimeError()
def PyExec(self): # setup progress bar prog_reporter = Progress(self, start=0.0, end=1.0, nreports=3) # Get input ws_list = self.getProperty("PeakWorkspaces").value a = self.getProperty('a').value b = self.getProperty('b').value c = self.getProperty('c').value alpha = self.getProperty('alpha').value beta = self.getProperty('beta').value gamma = self.getProperty('gamma').value self.tol = self.getProperty('Tolerance').value # Find initial UB and use to index peaks in all runs prog_reporter.report(1, "Find initial UB for peak indexing") self.find_initial_indexing( a, b, c, alpha, beta, gamma, ws_list) # removes runs from ws_list if can't index # optimize the lattice parameters across runs (i.e. B matrix) prog_reporter.report(2, "Optimize B") def fobj(x): return self.calcResiduals(x, ws_list) alatt0 = [a, b, c, alpha, beta, gamma] try: alatt, cov, info, msg, ier = leastsq(fobj, x0=alatt0, full_output=True) # eval the fobj at optimal solution to set UB (leastsq iteration stops at a next sub-optimal solution) fobj(alatt) except ValueError: logger.error( "CalculateUMatrix failed - check initial lattice parameters and tolerance provided." ) return success = ier in [ 1, 2, 3, 4 ] and cov is not None # cov is None when matrix is singular if success: # calculate errors dof = sum( [self.child_IndexPeaks(ws, RoundHKLs=True) for ws in ws_list]) - len(alatt0) err = np.sqrt(abs(np.diag(cov)) * (info['fvec']**2).sum() / dof) for wsname in ws_list: ws = AnalysisDataService.retrieve(wsname) ws.sample().getOrientedLattice().setError(*err) logger.notice( f"Lattice parameters successfully refined for workspaces: {ws_list}\n" f"Lattice Parameters: {np.array2string(alatt, precision=6)}\n" f"Parameter Errors : {np.array2string(err, precision=6)}") else: logger.warning( f"Error in optimization of lattice parameters: {msg}") # complete progress prog_reporter.report(3, "Done")
def estimate_background(self, ws_name, niter, xwindow, doSGfilter): try: ws_bg = EnggEstimateFocussedBackground(InputWorkspace=ws_name, OutputWorkspace=ws_name + "_bg", NIterations=niter, XWindow=xwindow, ApplyFilterSG=doSGfilter) except (ValueError, RuntimeError) as e: # ValueError when Niter not positive integer, RuntimeError when Window too small logger.error("Error on arguments supplied to EnggEstimateFocusedBackground: " + str(e)) ws_bg = SetUncertainties(InputWorkspace=ws_name) # copy data and zero errors ws_bg = Minus(LHSWorkspace=ws_bg, RHSWorkspace=ws_bg) # workspace of zeros with same num spectra return ws_bg
def decode(obj_dic, _=None): if obj_dic["encoder_version"] != IO_VERSION: logger.error( "Engineering Diffraction Interface encoder used different version, restoration may fail" ) ws_names = obj_dic.get( "data_workspaces", None) # workspaces are in ADS, need restoring into interface if 'workbench' in sys.modules: from workbench.config import get_window_config parent, flags = get_window_config() else: parent, flags = None, None gui = EngineeringDiffractionGui(parent=parent, window_flags=flags) presenter = gui.presenter gui.tabs.setCurrentIndex(obj_dic["current_tab"]) presenter.settings_presenter.model.set_settings_dict( obj_dic["settings_dict"]) presenter.settings_presenter.settings = obj_dic["settings_dict"] if ws_names is not None: fit_data_widget = presenter.fitting_presenter.data_widget fit_data_widget.model.restore_files(ws_names) fit_data_widget.presenter.plotted = set( obj_dic["plotted_workspaces"]) fit_data_widget.presenter.restore_table() fit_results = obj_dic.get("fit_results", None) if fit_results is not None: fit_data_widget.model._fit_results = fit_results fit_data_widget.model.create_fit_tables() fit_properties = obj_dic.get("fit_properties", None) if fit_properties is not None: fit_browser = presenter.fitting_presenter.plot_widget.view.fit_browser fit_browser.show() # show the fit browser, default is off presenter.fitting_presenter.plot_widget.view.fit_toggle( ) # show the fit browser, default is off fit_props = fit_properties["properties"] fit_function = fit_props["Function"] output_name = fit_props["Output"] is_plot_diff = obj_dic["plot_diff"] fit_browser.setWorkspaceName(output_name) fit_browser.setStartX(fit_props["StartX"]) fit_browser.setEndX(fit_props["EndX"]) fit_browser.loadFunction(fit_function) fit_browser.setOutputName(output_name) ws_name = output_name + '_Workspace' fit_browser.do_plot(ADS.retrieve(ws_name), is_plot_diff) return gui
def _save_correction_files(integration_workspace, integration_path, curves_workspace, curves_path): """ Attempt to save the created workspaces to the filesystem. :param integration_workspace: The workspace for the vanadium integration. :param integration_path: The path to save the integration workspace to. :param curves_workspace: The workspace for the vanadium curves. :param curves_path: The path to save the curves workspace to. """ try: SaveNexus(InputWorkspace=integration_workspace, Filename=integration_path) SaveNexus(InputWorkspace=curves_workspace, Filename=curves_path) except RuntimeError as e: # If the files cannot be saved, continue with the execution of the algorithm anyway. logger.error( "Vanadium Correction files could not be saved to the filesystem. Description: " + str(e)) return
def load_full_instrument_calibration(): if ADS.doesExist("full_inst_calib"): full_calib = ADS.retrieve("full_inst_calib") else: full_calib_path = get_setting( output_settings.INTERFACES_SETTINGS_GROUP, output_settings.ENGINEERING_PREFIX, "full_calibration") try: full_calib = Load(full_calib_path, OutputWorkspace="full_inst_calib") except ValueError: logger.error( "Error loading Full instrument calibration - this is set in the interface settings." ) return return full_calib
def load_files(self, filenames_string): self._last_added = [] filenames = [name.strip() for name in filenames_string.split(",")] for filename in filenames: ws_name = self._generate_workspace_name(filename) try: ws = Load(filename, OutputWorkspace=ws_name) if ws.getNumberHistograms() == 1: self._loaded_workspaces[ws_name] = ws self._last_added.append(ws_name) else: logger.warning( "Invalid number of spectra in workspace {}. Skipping loading of file." .format(ws_name)) except RuntimeError as e: logger.error( "Failed to load file: {}. Error: {}. \n Continuing loading of other files." .format(filename, e))
def _fullpath(self, scan_number, frame_number): ''' Returns a full path for a datafile for HFIR ''' if self._beamline not in self._instruments.keys(): logger.error("Beamline not valid.") instrument = self._instruments[self._beamline] filename = "{}_exp{}_scan{:04}_{:04}.xml".format( instrument, self._exp_number, scan_number, frame_number) filename_path = os.path.join(self._folder_datafiles, filename) # Check if file exists: if not os.path.exists(filename_path): msg = "File does not exist / no permissions to access it: {}".format( filename_path) logger.error(msg) raise IOError(msg) return filename_path
def restore_files(self, ws_names): for ws_name in ws_names: try: ws = ADS.retrieve(ws_name) if ws.getNumberHistograms() == 1: self._loaded_workspaces[ws_name] = ws if self._bg_params[ws_name]: self._bg_sub_workspaces[ws_name] = ADS.retrieve(ws_name + "_bgsub") else: self._bg_sub_workspaces[ws_name] = None if ws_name not in self._bg_params: self._bg_params[ws_name] = [] self._last_added.append(ws_name) else: logger.warning( f"Invalid number of spectra in workspace {ws_name}. Skipping restoration of workspace.") except RuntimeError as e: logger.error( f"Failed to restore workspace: {ws_name}. Error: {e}. \n Continuing loading of other files.") self.update_log_workspace_group()
def _calculate_vanadium_correction(vanadium_path): """ Runs the vanadium correction algorithm. :param vanadium_path: The path to the vanadium data. :return: The integrated workspace and the curves generated by the algorithm. """ try: Load(Filename=vanadium_path, OutputWorkspace=VANADIUM_INPUT_WORKSPACE_NAME) except Exception as e: logger.error("Error when loading vanadium sample data. " "Could not run Load algorithm with vanadium run number: " + str(vanadium_path) + ". Error description: " + str(e)) raise RuntimeError EnggVanadiumCorrections(VanadiumWorkspace=VANADIUM_INPUT_WORKSPACE_NAME, OutIntegrationWorkspace=INTEGRATED_WORKSPACE_NAME, OutCurvesWorkspace=CURVES_WORKSPACE_NAME) Ads.remove(VANADIUM_INPUT_WORKSPACE_NAME) integrated_workspace = Ads.Instance().retrieve(INTEGRATED_WORKSPACE_NAME) curves_workspace = Ads.Instance().retrieve(CURVES_WORKSPACE_NAME) return integrated_workspace, curves_workspace
def _make_paths(self): ''' Make: folder_base folder_shared folder_datafiles ''' self._folder_base = os.path.abspath( os.path.join( os.sep, 'HFIR', self._beamline, 'IPTS-{:04}'.format(self._ipts_number), 'exp{}'.format(self._exp_number), )) self._folder_shared = os.path.join(self._folder_base, "Shared") self._folder_datafiles = os.path.join(self._folder_base, "Datafiles") if not os.path.exists(self._folder_base): logger.error("The Folder is not valid: %s", self._folder_base) if not os.path.exists(self._folder_shared): logger.error("The Folder is not valid: %s", self._folder_shared) if not os.path.exists(self._folder_datafiles): logger.error("The Folder is not valid: %s", self._folder_datafiles)
def restore_files(self, ws_names): self._data_workspaces.add_from_names_dict(ws_names) for ws_name in ws_names: try: ws = ADS.retrieve(ws_name) if ws.getNumberHistograms() == 1: bgsubws = None if self._data_workspaces[ws_name].bg_params: bgsubws = ADS.retrieve( self._data_workspaces[ws_name].bgsub_ws_name) self._last_added.append(ws_name) self._data_workspaces[ws_name].loaded_ws = ws self._data_workspaces[ws_name].bgsub_ws = bgsubws else: logger.warning( f"Invalid number of spectra in workspace {ws_name}. Skipping restoration of workspace." ) except RuntimeError as e: logger.error( f"Failed to restore workspace: {ws_name}. Error: {e}. \n Continuing loading of other files." ) self.update_log_workspace_group()
def create_vanadium_corrections(vanadium_path: str, instrument: str): # -> Workspace, Workspace """ Runs the vanadium correction algorithm. :param vanadium_path: The path to the vanadium data. :return: The integrated workspace and the processed instrument workspace generated. """ try: run_no = path_handling.get_run_number_from_path( vanadium_path, instrument) van_ws = Load(Filename=vanadium_path, OutputWorkspace=str(run_no) + '_' + VANADIUM_INPUT_WORKSPACE_NAME) except Exception as e: logger.error( "Error when loading vanadium sample data. " "Could not run Load algorithm with vanadium run number: " + str(vanadium_path) + ". Error description: " + str(e)) raise RuntimeError # get full instrument calibration for instrument processing calculation if Ads.doesExist("full_inst_calib"): full_calib_ws = Ads.retrieve("full_inst_calib") else: full_calib_path = get_setting( output_settings.INTERFACES_SETTINGS_GROUP, output_settings.ENGINEERING_PREFIX, "full_calibration") try: full_calib_ws = Load(full_calib_path, OutputWorkspace="full_inst_calib") except ValueError: logger.error( "Error loading Full instrument calibration - this is set in the interface settings." ) return integral_ws = _calculate_vanadium_integral(van_ws, run_no) processed_ws = _calculate_vanadium_processed_instrument( van_ws, full_calib_ws, integral_ws, run_no) return integral_ws, processed_ws
def __init__(self, vanadium_run: str, focus_runs: Sequence[str], save_dir: str, full_inst_calib_path: str, prm_path: Optional[str] = None, ceria_run: Optional[str] = None, group: Optional[GROUP] = None, calfile_path: Optional[str] = None, spectrum_num: Optional[str] = None) -> None: # init attributes self.calibration = CalibrationInfo() self.van_run = vanadium_run self.focus_runs = focus_runs self.save_dir = save_dir # Load custom full inst calib if supplied (needs to be in ADS) try: self.full_calib_ws = Load(full_inst_calib_path, OutputWorkspace="full_inst_calib") except ValueError as e: logger.error("Unable to load calibration file " + full_inst_calib_path + ". Error: " + str(e)) # setup CalibrationInfo object if prm_path: self.calibration.set_calibration_from_prm_fname( prm_path) # to load existing calibration elif ceria_run and group: # make new calibration self.calibration.set_group(group) self.calibration.set_calibration_paths("ENGINX", ceria_run) if group == GROUP.CUSTOM and calfile_path: self.calibration.set_cal_file(calfile_path) elif group == GROUP.CROPPED and spectrum_num: self.calibration.set_spectra_list(spectrum_num)
def _on_error(self, error_info): logger.error(str(error_info)) self.emit_enable_button_signal()
def _on_worker_error(self, _): logger.error("Error occurred when loading files.") self._emit_enable_load_button_signal()
def focus_run(self, sample_paths: list, vanadium_path: str, plot_output: bool, instrument: str, rb_num: str, regions_dict: dict) -> None: """ Focus some data using the current calibration. :param sample_paths: The paths to the data to be focused. :param vanadium_path: Path to the vanadium file from the current calibration :param plot_output: True if the output should be plotted. :param instrument: The instrument that the data came from. :param rb_num: Number to signify the user who is running this focus :param regions_dict: dict region name -> grp_ws_name, defining region(s) of interest to focus over """ full_calib_path = get_setting( output_settings.INTERFACES_SETTINGS_GROUP, output_settings.ENGINEERING_PREFIX, "full_calibration") if not Ads.doesExist("full_inst_calib"): try: full_calib_workspace = Load(full_calib_path, OutputWorkspace="full_inst_calib") except RuntimeError: logger.error( "Error loading Full instrument calibration - this is set in the interface settings." ) return else: full_calib_workspace = Ads.retrieve("full_inst_calib") van_integration_ws, van_processed_inst_ws = vanadium_corrections.fetch_correction_workspaces( vanadium_path, instrument) # check correct region calibration(s) and grouping workspace(s) exists inst_ws = path_handling.load_workspace(sample_paths[0]) for region in regions_dict: calib_exists = self._check_region_calib_ws_exists(region) grouping_exists = self._check_region_grouping_ws_exists( regions_dict[region], inst_ws) if not (calib_exists and grouping_exists): return # loop over samples provided, focus each over region(s) specified in regions_dict output_workspaces = [] # List of collated workspaces to plot. self._last_focused_files = [] van_run_no = path_handling.get_run_number_from_path( vanadium_path, instrument) for sample_path in sample_paths: sample_workspace = path_handling.load_workspace(sample_path) run_no = path_handling.get_run_number_from_path( sample_path, instrument) # perform prefocus operations on whole instrument workspace prefocus_success = self._whole_inst_prefocus( sample_workspace, van_integration_ws, full_calib_workspace) if not prefocus_success: continue sample_plots = [ ] # if both banks focused, pass list with both so plotted on same figure for region, grouping_kwarg in regions_dict.items(): tof_output_name = str( run_no) + "_" + FOCUSED_OUTPUT_WORKSPACE_NAME + region dspacing_output_name = tof_output_name + "_dSpacing" region_calib_ws = self._get_region_calib_ws(region) curves = self._get_van_curves_for_roi(region, van_processed_inst_ws, grouping_kwarg) # perform focus over chosen region of interest self._run_focus(sample_workspace, tof_output_name, curves, grouping_kwarg, region_calib_ws) sample_plots.append(tof_output_name) self._save_output(instrument, run_no, van_run_no, region, tof_output_name, rb_num) self._save_output(instrument, run_no, van_run_no, region, dspacing_output_name, rb_num, unit="dSpacing") self._output_sample_logs(instrument, run_no, van_run_no, sample_workspace, rb_num) output_workspaces.append(sample_plots) DeleteWorkspace(sample_workspace) # remove created grouping workspace if present if Ads.doesExist("grp_ws"): DeleteWorkspace("grp_ws") # Plot the output if plot_output: for ws_names in output_workspaces: self._plot_focused_workspaces(ws_names)