def _do_show_data(self, names): # local import to allow this module to be imported without pyplot being imported import matplotlib.pyplot parent, flags = get_window_config() for ws in self._ads.retrieveWorkspaces(names, unrollGroups=True): try: MatrixWorkspaceDisplay.supports(ws) # the plot function is being injected in the presenter # this is done so that the plotting library is mockable in testing presenter = MatrixWorkspaceDisplay(ws, plot=plot, parent=parent, window_flags=flags) presenter.show_view() except ValueError: try: TableWorkspaceDisplay.supports(ws) presenter = TableWorkspaceDisplay(ws, plot=matplotlib.pyplot, parent=parent, window_flags=flags, batch=True) presenter.show_view() except ValueError: logger.error( "Could not open workspace: {0} with neither " "MatrixWorkspaceDisplay nor TableWorkspaceDisplay." "".format(ws.name()))
def run_algorithm(self, name: str, start_progress: float, end_progress: float, soft_crash=True, **kwargs): r""" @param soft_crash : log an error but do not raise an exception """ algorithm_align = self.createChildAlgorithm( name=name, startProgress=start_progress, endProgress=end_progress, enableLogging=True) [ algorithm_align.setProperty(name, value) for name, value in kwargs.items() ] try: algorithm_align.execute() except Exception as err: if soft_crash is True: logger.error('Execution continues') else: raise err.__class__(err) logger.notice(f'{name} has executed')
def should_show_on_startup(): """ Determines if the first time dialog should be shown :return: True if the dialog should be shown """ # first check the facility and instrument facility = ConfigService.getString(AboutPresenter.FACILITY) instrument = ConfigService.getString(AboutPresenter.INSTRUMENT) if not facility: return True else: # check we can get the facility and instrument try: facilityInfo = ConfigService.getFacility(facility) instrumentInfo = ConfigService.getInstrument(instrument) logger.information("Default facility '{0}', instrument '{1}'\n".format(facilityInfo.name(), instrumentInfo.name())) except RuntimeError: # failed to find the facility or instrument logger.error("Could not find your default facility '{0}' or instrument '{1}' in facilities.xml, " + "showing please select again.\n".format(facility, instrument)) return True settings = QSettings() settings.beginGroup(AboutPresenter.DO_NOT_SHOW_GROUP) doNotShowUntilNextRelease =int(settings.value(AboutPresenter.DO_NOT_SHOW, '0')) lastVersion = settings.value(AboutPresenter.LAST_VERSION, "") settings.endGroup() if not doNotShowUntilNextRelease: return True # Now check if the version has changed since last time version = release_notes_url() return version != lastVersion
def exception_logger(main_window, exc_type, exc_value, exc_traceback): """ Captures ALL EXCEPTIONS. Prevents the Workbench from crashing silently, instead it logs the error on ERROR level. :param main_window: A reference to the main window, that will be used to close it in case of the user choosing to terminate the execution. :param exc_type: The type of the exception :param exc_value: Value of the exception, typically contains the error message. :param exc_traceback: Stack trace of the exception. """ logger.error("".join( traceback.format_exception(exc_type, exc_value, exc_traceback))) if UsageService.isEnabled(): page = CrashReportPage(show_continue_terminate=True) presenter = ErrorReporterPresenter( page, '', 'workbench', traceback.format_exception(exc_type, exc_value, exc_traceback)) presenter.show_view_blocking() if not page.continue_working: main_window.close() else: # show the exception message without the traceback WorkbenchErrorMessageBox( main_window, "".join(traceback.format_exception_only(exc_type, exc_value))).exec_()
def load_workspace(file_path): try: return Load(Filename=file_path, OutputWorkspace="engggui_calibration_sample_ws") except Exception as e: logger.error("Error while loading workspace. " "Could not run the algorithm Load successfully for the data file " "(path: " + str(file_path) + "). Error description: " + str(e) + " Please check also the previous log messages for details.") raise RuntimeError
def _load_project_interfaces(self, directory): """ Load the passed project interfaces in the given directory, using the project mantidqt package :param directory: String; Path to the directory in which the saved file is present """ project_loader = ProjectLoader(self.pr.recovery_file_ext) # This method will only load interfaces/plots if all workspaces that are expected have been loaded successfully file_name = os.path.join(directory, (os.path.basename(directory) + self.pr.recovery_file_ext)) if not project_loader.load_project(file_name=file_name, load_workspaces=False): logger.error("Project Recovery: Not all workspaces were recovered successfully, any interfaces requiring " "lost workspaces are not opened")
def onProcessError(self, msg): """ Triggered when the sample processing end with an error. Args: msg (str): error message """ logger.error("Error while processing sample {0}: {1}" .format(self._index + 1, msg)) self._status = self.STATUS_ERROR self.statusChanged.emit()
def _do_plot(self, selected_columns, selected_x, plot_type): if self._is_error_plot(plot_type): yerr = self.presenter.model.marked_columns.find_yerr( selected_columns) # remove the Y error columns if they are in the selection for plotting # this prevents them from being treated as Y columns for err_col in yerr.values(): try: selected_columns.remove(err_col) except ValueError: # the column is not contained within the selected one pass if len(yerr) != len(selected_columns): column_headers = self.presenter.model.original_column_headers() self.presenter.view.show_warning( self.NO_ASSOCIATED_YERR_FOR_EACH_Y_MESSAGE.format(",".join( [column_headers[col] for col in selected_columns]))) return x = self.presenter.model.get_column(selected_x) fig, ax = self.plot.subplots(subplot_kw={"projection": "mantid"}) if fig.canvas.manager is not None: fig.canvas.manager.set_window_title( self.presenter.model.get_name()) ax.set_xlabel(self.presenter.model.get_column_header(selected_x)) ax.wsName = self.presenter.model.get_name() plot_func = self._get_plot_function_from_type(ax, plot_type) kwargs = {} for column in selected_columns: # if the errors are being plotted, retrieve the data for the column if self._is_error_plot(plot_type): yerr_column = yerr[column] yerr_column_data = self.presenter.model.get_column(yerr_column) kwargs["yerr"] = yerr_column_data y = self.presenter.model.get_column(column) column_label = self.presenter.model.get_column_header(column) try: plot_func(x, y, label=self.COLUMN_DISPLAY_LABEL.format(column_label), **kwargs) except ValueError as e: error_message = self.PLOT_FUNCTION_ERROR_MESSAGE.format(e) logger.error(error_message) self.presenter.view.show_warning( error_message, self.INVALID_DATA_WINDOW_TITLE) return ax.set_ylabel(column_label) legend_set_draggable(ax.legend(), True) fig.show()
def load_ceria(ceria_run_no): try: return Load(Filename=ceria_run_no, OutputWorkspace="engggui_calibration_sample_ws") except Exception as e: logger.error( "Error while loading calibration sample data. " "Could not run the algorithm Load successfully for the calibration sample " "(run number: " + str(ceria_run_no) + "). Error description: " + str(e) + " Please check also the previous log messages for details.") raise RuntimeError
def load_existing_gsas_parameters(self, file_path): if not path.exists(file_path): logger.warning("Could not open GSAS calibration file: ", file_path) return try: instrument, van_no, sample_no, params_table = self.get_info_from_file(file_path) self.update_calibration_params_table(params_table) except RuntimeError: logger.error("Invalid file selected: ", file_path) return vanadium_corrections.fetch_correction_workspaces(instrument+van_no, instrument) return instrument, van_no, sample_no
def generate_plot_script_file(self): script = generate_script(self.canvas.figure) filepath = open_a_file_dialog(parent=self.canvas, default_suffix=".py", file_filter="Python Files (*.py)", accept_mode=QFileDialog.AcceptSave, file_mode=QFileDialog.AnyFile) if filepath: try: with open(filepath, 'w') as f: f.write(script) except IOError as io_error: logger.error("Could not write file: {}\n{}" "".format(filepath, io_error))
def _calculate_wavelength_band(self): """ Select the wavelength band examining the logs of the first sample """ runs = self.getProperty('RunNumbers').value run = self._run_list(runs)[0] _t_w = self._load_single_run(run, '_t_w') wavelength = np.mean(_t_w.getRun().getProperty('LambdaRequest').value) logger.error('DEBUG wavelength = ' + str(wavelength)) for reflection, band in self._wavelength_bands.items(): if band[0] <= wavelength <= band[1]: self._wavelength_band = np.array(band) break
def __init__(self, input_filename=None, group_name=None, setting='', autoconvolution=False): self._setting = setting self._autoconvolution = autoconvolution if isinstance(input_filename, str): self._input_filename = input_filename try: self._hash_input_filename = self.calculate_ab_initio_file_hash( ) except IOError as err: logger.error(str(err)) except ValueError as err: logger.error(str(err)) # extract name of file from the full path in the platform independent way filename = os.path.basename(self._input_filename) if filename.strip() == "": raise ValueError("Name of the file cannot be an empty string.") else: raise ValueError( "Invalid name of input file. String was expected.") if isinstance(group_name, str): self._group_name = group_name else: raise ValueError("Invalid name of the group. String was expected.") if filename.split('.')[-1] in AB_INITIO_FILE_EXTENSIONS: core_name = filename[0:filename.rfind( ".")] # e.g. NaCl.phonon -> NaCl (core_name) -> NaCl.hdf5 else: core_name = filename # e.g. OUTCAR -> OUTCAR (core_name) -> OUTCAR.hdf5 save_dir_path = ConfigService.getString("defaultsave.directory") self._hdf_filename = os.path.join(save_dir_path, core_name + ".hdf5") # name of hdf file self._attributes = {} # attributes for group # data for group; they are expected to be numpy arrays or # complex data sets which have the form of Python dictionaries or list of Python # dictionaries self._data = {}
def _do_plot_1d_md(self, names, errors, overplot): """ Plot 1D IMDHistoWorlspaces :param names: list of workspace names :param errors: boolean. if true, the error bar will be plotted :param overplot: boolean. If true, then add these plots to the current figure if one exists and it is a compatible figure :return: """ try: plot_md_ws_from_names(names, errors, overplot) except RuntimeError as re: logger.error(str(re))
def load_existing_calibration_files(calibration): # load prm prm_filepath = calibration.prm_filepath if not path.exists(prm_filepath): msg = f"Could not open GSAS calibration file: {prm_filepath}" logger.warning(msg) return try: # read diff constants from prm write_diff_consts_to_table_from_prm(prm_filepath) except RuntimeError: logger.error(f"Invalid file selected: {prm_filepath}") return calibration.load_relevant_calibration_files()
def __init__(self, replace_db=True): ## List of data sets self.catalog = [] # Connect/create to DB db_path = os.path.join(os.path.expanduser("~"), ".mantid_data_sets") self.db_exists = False self.db = None try: self._create_db(db_path, replace_db) except Exception as msg: logger.error("DataCatalog: Could not access local data catalog\n%s" % sys.exc_info()[1]) logger.exception(msg)
def _do_plot_spectrum(self, names, errors, overplot, advanced=False): """ Plot spectra from the selected workspaces :param names: A list of workspace names :param errors: If true then error bars will be plotted on the points :param overplot: If true then the add to the current figure if one exists and it is a compatible figure :param advanced: If true then the advanced options will be shown in the spectra selector dialog. """ try: plot_from_names(names, errors, overplot, advanced=advanced) except RuntimeError as re: logger.error(str(re))
def _load_project_interfaces(self, directory): """ Load the passed project interfaces in the given directory, using the project mantidqt package :param directory: String; Path to the directory in which the saved file is present """ project_loader = ProjectLoader(self.pr.recovery_file_ext) # This method will only load interfaces/plots if all workspaces that are expected have been loaded successfully file_name = os.path.join( directory, (os.path.basename(directory) + self.pr.recovery_file_ext)) if not project_loader.load_project(file_name=file_name, load_workspaces=False): logger.error( "Project Recovery: Not all workspaces were recovered successfully, any interfaces requiring " "lost workspaces are not opened")
def _do_plot_3D(self, workspaces, plot_type): """ Make a 3D plot from the selected workspace. :param workspaces: A list of workspace names. :param plot_type: The type of 3D plot, either 'surface', 'wireframe', or 'contour'. """ plot_function = getattr(functions, f'plot_{plot_type}', None) if plot_function is None: return try: plot_function(workspaces) except RuntimeError as re: logger.error(str(re))
def _do_show_data(self, names): for ws in self._ads.retrieveWorkspaces(names, unrollGroups=True): try: MatrixWorkspaceDisplay.supports(ws) # the plot function is being injected in the presenter # this is done so that the plotting library is mockable in testing presenter = MatrixWorkspaceDisplay(ws, plot=plot, parent=self) presenter.view.show() except ValueError: try: TableWorkspaceDisplay.supports(ws) presenter = TableWorkspaceDisplay(ws, plot=matplotlib.pyplot, parent=self) presenter.view.show() except ValueError: logger.error( "Could not open workspace: {0} with either MatrixWorkspaceDisplay nor TableWorkspaceDisplay.")
def list_data_sets(self, data_dir=None, call_back=None, process_files=True): """ Process a data directory """ self.catalog = [] if self.db is None: print "DataCatalog: Could not access local data catalog" return c = self.db.cursor() if not os.path.isdir(data_dir): return try: for f in os.listdir(data_dir): if f.endswith(self.extension): file_path = os.path.join(data_dir, f) if hasattr(self.data_set_cls, "find_with_api"): d = self.data_set_cls.find_with_api( file_path, c, process_files=process_files) else: d = self.data_set_cls.find(file_path, c, process_files=process_files) if d is not None: if call_back is not None: attr_list = d.as_string_list() type_id = self.data_set_cls.data_type_cls.get_likely_type( d.id, c) attr_list += (type_id, ) call_back(attr_list) self.catalog.append(d) self.db.commit() c.close() except Exception, msg: logger.error( "DataCatalog: Error working with the local data catalog\n%s" % str(traceback.format_exc())) logger.exception(msg)
def _onTaskError(self, wsName, filename, msg): """ Triggered when the export failed. Args: wsName (str): name of the exported workspace filename (str): name of the file msg (str): error msg """ logger.error("Error while exporting workspace {}.".format(wsName)) logger.error(msg) if wsName in self._exports: self._exports[wsName].discard(filename) if not self._exports[wsName]: del self._exports[wsName] self._logSuccessExport(wsName)
def _do_show_data(self, names): for ws in self._ads.retrieveWorkspaces(names, unrollGroups=True): try: MatrixWorkspaceDisplay.supports(ws) # the plot function is being injected in the presenter # this is done so that the plotting library is mockable in testing presenter = MatrixWorkspaceDisplay(ws, plot=plot, parent=self) presenter.show_view() except ValueError: try: TableWorkspaceDisplay.supports(ws) presenter = TableWorkspaceDisplay(ws, plot=matplotlib.pyplot, parent=self) presenter.show_view() except ValueError: logger.error( "Could not open workspace: {0} with neither " "MatrixWorkspaceDisplay nor TableWorkspaceDisplay." "".format(ws.name()))
def test_capture_logs(self): with capture_logs() as logs: logger.error('Error message') self.assertTrue('Error message' in logs.getvalue()) with temporary_config(): config = ConfigService.Instance() config['logging.loggers.root.level'] = 'information' with capture_logs(level='error') as logs: self.assertTrue( config['logging.loggers.root.level'] == 'error') logger.error('Error-message') logger.debug('Debug-message') self.assertTrue('Error-message' in logs.getvalue()) self.assertFalse('Debug-message' in logs.getvalue()) self.assertTrue( config['logging.loggers.root.level'] == 'information')
def _onTaskError(self, name, msg): """ Triggered when the export failed. Args: name (str): the task name msg (str): error msg """ name = name.split(':') wsName = name[0] filename = name[1] logger.error("Error while exporting workspace {}.".format(wsName)) logger.error(msg) if wsName in self._exports: self._exports[wsName].discard(filename) if not self._exports[wsName]: del self._exports[wsName] self._logSuccessExport(wsName)
def load_relevant_calibration_files(self, output_prefix="engggui"): """ Load calibration table ws output from second step of calibration (PDCalibration of ROI focused spectra) :param output_prefix: prefix for workspace """ filepath = path.splitext( self.prm_filepath)[0] + '.nxs' # change extension to .nxs self.calibration_table = output_prefix + "_calibration_" + self.get_group_suffix( ) try: Load(Filename=filepath, OutputWorkspace=self.calibration_table) except Exception as e: logger.error("Unable to load calibration file " + filepath + ". Error: " + str(e)) # load in custom grouping - checks if applicable inside method if not self.group.banks: self.load_custom_grouping_workspace() else: self.get_group_ws() # creates group workspace
def load_existing_calibration_files(self, file_path): if not path.exists(file_path): msg = "Could not open GSAS calibration file: " + file_path logger.warning(msg) raise try: instrument, ceria_no, params_table = self.get_info_from_file( file_path) self.update_calibration_params_table(params_table) except RuntimeError: logger.error("Invalid file selected: " + file_path) raise try: bank = EnggUtils.load_relevant_calibration_files(file_path) except Exception as e: logger.error( "Unable to loading calibration files corresponding to " + file_path + ". Error: " + str(e)) raise try: grp_ws_name, roi_text = EnggUtils.load_custom_grouping_workspace( file_path) except Exception as e: logger.error( "Unable to load grouping workspace corresponding to " + file_path + ". Error: " + str(e)) raise return instrument, ceria_no, grp_ws_name, roi_text, bank
def exception_logger(main_window, exc_type, exc_value, exc_traceback): """ Captures ALL EXCEPTIONS. Prevents the Workbench from crashing silently, instead it logs the error on ERROR level. :param main_window: A reference to the main window, that will be used to close it in case of the user choosing to terminate the execution. :param exc_type: The type of the exception :param exc_value: Value of the exception, typically contains the error message. :param exc_traceback: Stack trace of the exception. """ logger.error("".join(traceback.format_exception(exc_type, exc_value, exc_traceback))) if UsageService.isEnabled(): page = CrashReportPage(show_continue_terminate=True) presenter = ErrorReporterPresenter(page, '', 'workbench') presenter.show_view_blocking() if not page.continue_working: main_window.close() else: # show the exception message without the traceback WorkbenchErrorMessageBox(main_window, "".join(traceback.format_exception_only(exc_type, exc_value))).exec_()
def list_data_sets(self, data_dir=None, call_back=None, process_files=True): """ Process a data directory """ self.catalog = [] if self.db is None: print ("DataCatalog: Could not access local data catalog") return c = self.db.cursor() if not os.path.isdir(data_dir): return try: for f in os.listdir(data_dir): for extension in self.extension: if f.endswith(extension): file_path = os.path.join(data_dir, f) if hasattr(self.data_set_cls, "find_with_api"): d = self.data_set_cls.find_with_api(file_path, c, process_files=process_files) else: d = self.data_set_cls.find(file_path, c, process_files=process_files) if d is not None: if call_back is not None: attr_list = d.as_string_list() type_id = self.data_set_cls.data_type_cls.get_likely_type(d.id, c) attr_list += (type_id,) call_back(attr_list) self.catalog.append(d) self.db.commit() c.close() except Exception as msg: logger.error("DataCatalog: Error working with the local data catalog\n%s" % str(traceback.format_exc())) logger.exception(msg)
def PyExec(self): # Check congruence of workspaces workspaces = self.getProperty('Workspaces').value fvalues = self.getProperty('ParameterValues').value if len(workspaces) != len(fvalues): mesg = 'Number of Workspaces and ParameterValues should be the same' #logger.error(mesg) raise IndexError(mesg) for workspace in workspaces[1:]: if not self.areWorkspacesCompatible(mantid.mtd[workspaces[0]], mantid.mtd[workspace]): mesg = 'Workspace {0} incompatible with {1}'.format( workspace, workspaces[0]) logger.error(mesg) raise ValueError(mesg) # Load the workspaces into a group of dynamic structure factors from dsfinterp.dsf import Dsf from dsfinterp.dsfgroup import DsfGroup from dsfinterp.channelgroup import ChannelGroup dsfgroup = DsfGroup() for idsf in range(len(workspaces)): dsf = Dsf() dsf.Load(mantid.mtd[workspaces[idsf]]) if not self.getProperty('LoadErrors').value: dsf.errors = None # do not incorporate error data dsf.SetFvalue(fvalues[idsf]) dsfgroup.InsertDsf(dsf) # Create the intepolator if not instantiated before if not self.channelgroup: self.channelgroup = ChannelGroup() self.channelgroup.InitFromDsfGroup(dsfgroup) localregression = self.getProperty('LocalRegression').value if localregression: regressiontype = self.getProperty('RegressionType').value windowlength = self.getProperty('RegressionWindow').value self.channelgroup.InitializeInterpolator( running_regr_type=regressiontype, windowlength=windowlength) else: self.channelgroup.InitializeInterpolator(windowlength=0) # Invoke the interpolator and generate the output workspaces targetfvalues = self.getProperty('TargetParameters').value for targetfvalue in targetfvalues: if targetfvalue < min(fvalues) or targetfvalue > max(fvalues): mesg = 'Target parameters should lie in [{0}, {1}]'.format( min(fvalues), max(fvalues)) logger.error(mesg) raise ValueError(mesg) outworkspaces = self.getProperty('OutputWorkspaces').value if len(targetfvalues) != len(outworkspaces): mesg = 'Number of OutputWorkspaces and TargetParameters should be the same' logger.error(mesg) raise IndexError(mesg) for i in range(len(targetfvalues)): dsf = self.channelgroup(targetfvalues[i]) outws = mantid.simpleapi.CloneWorkspace( mantid.mtd[workspaces[0]], OutputWorkspace=outworkspaces[i]) dsf.Save(outws) # overwrite dataY and dataE
def test_log_to_python(self): py_logger = logging.getLogger('Mantid') py_logger.setLevel(logging.INFO) handler = CaptureHandler() for hdlr in py_logger.handlers: py_logger.removeHandler(hdlr) py_logger.addHandler(handler) with temporary_config(): log_to_python() logger.information('[[info]]') logger.warning('[[warning]]') logger.error('[[error]]') logger.fatal('[[fatal]]') self.assertListEqual( [record.msg for record in handler.records], ['[[info]]', '[[warning]]', '[[error]]', '[[fatal]]']) self.assertListEqual([record.levelname for record in handler.records], ['INFO', 'WARNING', 'ERROR', 'CRITICAL']) py_logger.removeHandler(handler)
def __init__(self, input_filename=None, group_name=None): if isinstance(input_filename, str): self._input_filename = input_filename try: self._hash_input_filename = self.calculate_ab_initio_file_hash( ) except IOError as err: logger.error(str(err)) except ValueError as err: logger.error(str(err)) # extract name of file from the full path in the platform independent way filename = os.path.basename(self._input_filename) if filename.strip() == "": raise ValueError("Name of the file cannot be an empty string.") else: raise ValueError( "Invalid name of input file. String was expected.") if isinstance(group_name, str): self._group_name = group_name else: raise ValueError("Invalid name of the group. String was expected.") core_name = filename[0:filename.rfind(".")] save_dir_path = ConfigService.getString("defaultsave.directory") self._hdf_filename = os.path.join(save_dir_path, core_name + ".hdf5") # name of hdf file self._attributes = {} # attributes for group # data for group; they are expected to be numpy arrays or # complex data sets which have the form of Python dictionaries or list of Python # dictionaries self._data = {}
def _do_plot(self, selected_columns, selected_x, plot_type): if plot_type == PlotType.LINEAR_WITH_ERR: yerr = self.model.marked_columns.find_yerr(selected_columns) if len(yerr) != len(selected_columns): self.view.show_warning( self.NO_ASSOCIATED_YERR_FOR_EACH_Y_MESSAGE) return x = self.model.get_column(selected_x) fig, ax = self.plot.subplots(subplot_kw={'projection': 'mantid'}) fig.canvas.set_window_title(self.model.get_name()) ax.set_xlabel(self.model.get_column_header(selected_x)) plot_func = self._get_plot_function_from_type(ax, plot_type) kwargs = {} for column in selected_columns: if plot_type == PlotType.LINEAR_WITH_ERR: yerr_column = yerr[column] yerr_column_data = self.model.get_column(yerr_column) kwargs["yerr"] = yerr_column_data y = self.model.get_column(column) column_label = self.model.get_column_header(column) try: plot_func(x, y, label=self.COLUMN_DISPLAY_LABEL.format(column_label), **kwargs) except ValueError as e: error_message = self.PLOT_FUNCTION_ERROR_MESSAGE.format(e) logger.error(error_message) self.view.show_warning(error_message, self.INVALID_DATA_WINDOW_TITLE) return ax.set_ylabel(column_label) ax.legend() fig.show()
def _get_scripts_from_settings(): scripts = [] try: scripts = CONF.get(RECENT_SCRIPTS_KEY) except KeyError: # Happens quite often and should fail silently. pass except TypeError: # Happens when garbage data is found in the QSettings .ini file logger.error( "Recently Opened Scripts were lost during save, and workbench has recovered from an error." ) CONF.set(RECENT_SCRIPTS_KEY, []) def sort_key(sub_list): return sub_list[0] scripts.sort(key=sort_key) # strip scripts of it's extra data and overwrite the list for index, script in enumerate(scripts): scripts[index] = script[1] return scripts
def PyExec(self): # Check congruence of workspaces workspaces = self.getProperty('Workspaces').value fvalues = self.getProperty('ParameterValues').value if len(workspaces) != len(fvalues): mesg = 'Number of Workspaces and ParameterValues should be the same' #logger.error(mesg) raise IndexError(mesg) for workspace in workspaces[1:]: if not self.areWorkspacesCompatible(mtd[workspaces[0]],mtd[workspace]): mesg = 'Workspace {0} incompatible with {1}'.format(workspace, workspaces[0]) logger.error(mesg) raise ValueError(mesg) # Load the workspaces into a group of dynamic structure factors from dsfinterp.dsf import Dsf from dsfinterp.dsfgroup import DsfGroup from dsfinterp.channelgroup import ChannelGroup dsfgroup = DsfGroup() for idsf in range(len(workspaces)): dsf = Dsf() dsf.Load( mtd[workspaces[idsf]] ) if not self.getProperty('LoadErrors').value: dsf.errors = None # do not incorporate error data dsf.SetFvalue( fvalues[idsf] ) dsfgroup.InsertDsf(dsf) # Create the intepolator if not instantiated before if not self.channelgroup: self.channelgroup = ChannelGroup() self.channelgroup.InitFromDsfGroup(dsfgroup) localregression = self.getProperty('LocalRegression').value if localregression: regressiontype = self.getProperty('RegressionType').value windowlength = self.getProperty('RegressionWindow').value self.channelgroup.InitializeInterpolator(running_regr_type=regressiontype, windowlength=windowlength) else: self.channelgroup.InitializeInterpolator(windowlength=0) # Invoke the interpolator and generate the output workspaces targetfvalues = self.getProperty('TargetParameters').value for targetfvalue in targetfvalues: if targetfvalue < min(fvalues) or targetfvalue > max(fvalues): mesg = 'Target parameters should lie in [{0}, {1}]'.format(min(fvalues),max(fvalues)) logger.error(mesg) raise ValueError(mesg) outworkspaces = self.getProperty('OutputWorkspaces').value if len(targetfvalues) != len(outworkspaces): mesg = 'Number of OutputWorkspaces and TargetParameters should be the same' logger.error(mesg) raise IndexError(mesg) for i in range(len(targetfvalues)): outworkspace = outworkspaces[i] dsf = self.channelgroup( targetfvalues[i] ) outws = CloneWorkspace( mtd[workspaces[0]], OutputWorkspace=outworkspaces[i]) dsf.Save(outws) # overwrite dataY and dataE
def __init__(self, input_filename=None, group_name=None): if isinstance(input_filename, str): self._input_filename = input_filename try: self._hash_input_filename = self.calculate_ab_initio_file_hash() except IOError as err: logger.error(str(err)) except ValueError as err: logger.error(str(err)) # extract name of file from the full path in the platform independent way filename = os.path.basename(self._input_filename) if filename.strip() == "": raise ValueError("Name of the file cannot be an empty string.") else: raise ValueError("Invalid name of input file. String was expected.") if isinstance(group_name, str): self._group_name = group_name else: raise ValueError("Invalid name of the group. String was expected.") core_name = filename[0:filename.rfind(".")] save_dir_path = ConfigService.getString("defaultsave.directory") self._hdf_filename = os.path.join(save_dir_path, core_name + ".hdf5") # name of hdf file try: self._advanced_parameters = self._get_advanced_parameters() except IOError as err: logger.error(str(err)) except ValueError as err: logger.error(str(err)) self._attributes = {} # attributes for group # data for group; they are expected to be numpy arrays or # complex data sets which have the form of Python dictionaries or list of Python # dictionaries self._data = {}
def emit_abort_script(self): self.abort_project_recovery_script.connect( self.presenter.project_recovery.loader.multi_file_interpreter.abort_all) logger.error("Project Recovery: Cancelling recovery") self.abort_project_recovery_script.emit()