class PluginLoader(object): extension = ".py" def __init__(self, filepath): if not _os.path.isfile(filepath): raise ValueError( "PluginLoader expects a single filename. '%s' does not point to an existing file" % filepath) if not filepath.endswith(self.extension): raise ValueError( "PluginLoader expects a filename ending with .py. '%s' does not have a .py extension" % filepath) self._filepath = filepath self._logger = Logger("PluginLoader") def run(self): """ Try and load the module we are pointing at and return the module object. Any ImportErrors raised are not caught and are passed on to the caller """ pathname = self._filepath name = _os.path.basename(pathname) # Including extension name = _os.path.splitext(name)[0] self._logger.debug("Loading python plugin %s" % pathname) return _imp.load_source(name, pathname)
def __init__(self, parent_presenter, beam_centre_model=None): self._view = None self._parent_presenter = parent_presenter self._logger = Logger("SANS") self._beam_centre_model = BeamCentreModel( ) if not beam_centre_model else beam_centre_model self._worker = BeamCentreAsync(parent_presenter=self)
def __init__(self, project_file_name: Union[str, Path], mode: HidraProjectFileMode = HidraProjectFileMode.READONLY): """ Initialization :param project_file_name: project file name :param mode: I/O mode """ # configure logging for this class self._log = Logger(__name__) # convert the mode to the enum self._io_mode = HidraProjectFileMode.getMode(mode) # check the file if not project_file_name: raise RuntimeError('Must supply a filename') # force the file to be a Path self._file_name = to_filepath( project_file_name, check_exists=bool(self._io_mode != HidraProjectFileMode.OVERWRITE)) self._checkFileAccess() # open the file using h5py self._project_h5 = h5py.File(self._file_name, mode=str(self._io_mode)) if self._io_mode == HidraProjectFileMode.OVERWRITE: self._init_project()
class PluginLoader(object): extension = ".py" def __init__(self, filepath): if not _os.path.isfile(filepath): raise ValueError("PluginLoader expects a single filename. '%s' does not point to an existing file" % filepath) if not filepath.endswith(self.extension): raise ValueError("PluginLoader expects a filename ending with .py. '%s' does not have a .py extension" % filepath) self._filepath = filepath self._logger = Logger("PluginLoader") def run(self): """ Try and load the module we are pointing at and return the module object. Any ImportErrors raised are not caught and are passed on to the caller """ pathname = self._filepath name = _os.path.basename(pathname) # Including extension name = _os.path.splitext(name)[0] self._logger.debug("Loading python plugin %s" % pathname) return _imp.load_source(name, pathname)
def __init__(self, facility, view=None): super(RunTabPresenter, self).__init__() self._facility = facility # Logger self.sans_logger = Logger("SANS") # Presenter needs to have a handle on the view since it delegates it self._view = None self.set_view(view) # Models that are being used by the presenter self._state_model = None self._table_model = None # Due to the nature of the DataProcessorWidget we need to provide an algorithm with at least one input # workspace and at least one output workspace. Our SANS state approach is not compatible with this. Hence # we provide a dummy workspace which is not used. We keep it invisible on the ADS and delete it when the # main_presenter is deleted. # This is not a nice solution but in line with the SANS dummy algorithm approach that we have provided # for the self._create_dummy_input_workspace() # File information for the first input self._file_information = None # Settings diagnostic tab presenter self._settings_diagnostic_tab_presenter = SettingsDiagnosticPresenter( self) # Masking table presenter self._masking_table_presenter = MaskingTablePresenter(self) # Beam centre presenter self._beam_centre_presenter = BeamCentrePresenter(self)
def __init__(self, runObj): self._log = Logger(__name__) # verify the scan index exists try: if runObj['scan_index'].size() == 0: raise RuntimeError('"scan_index" is empty') except KeyError as e: raise RuntimeError('"scan_index" does not exist') from e # Get the time and value from the run object scan_index_times = runObj['scan_index'].times # absolute times scan_index_value = runObj['scan_index'].value # TODO add final time from pcharge logs + 1s with scan_index=0 if np.unique(scan_index_value).size == 1: raise RuntimeError('WARNING: only one scan_index value' ) # TODO should be something else self.times = None self.subruns = None self.propertyFilters = list() self.__generate_sub_run_splitter(scan_index_times, scan_index_value) self.__correct_starting_scan_index_time(runObj) self._createPropertyFilters()
def convert(self): #Always reset these values before conversion. self.Theta = None self.flightpath = None try: if self.ui.InputVal.text() == "": raise RuntimeError("Input value is required for conversion") if float(self.ui.InputVal.text()) <= 0: raise RuntimeError( "Input value must be greater than 0 for conversion") inOption = self.ui.inputUnits.currentText() outOption = self.ui.outputUnits.currentText() if self.ui.totalFlightPathInput.text(): self.flightpath = float(self.ui.totalFlightPathInput.text()) else: self.flightpath = -1.0 if self.ui.scatteringAngleInput.text(): self.Theta = float( self.ui.scatteringAngleInput.text()) * math.pi / 360.0 else: self.Theta = -1.0 self.output = TofConverter.convertUnits.doConversion( self.ui.InputVal.text(), inOption, outOption, self.Theta, self.flightpath) self.ui.convertedVal.clear() self.ui.convertedVal.insert(str(self.output)) except (UnboundLocalError, ArithmeticError, ValueError, RuntimeError) as err: QMessageBox.warning(self, "TofConverter", str(err)) return except Exception as exc: Logger.error(exc) return
def __init__(self, view, exit_code, application='mantidplot', traceback=''): self.error_log = Logger("error") self._view = view self._exit_code = exit_code self._application = application self._traceback = traceback self._view.set_report_callback(self.error_handler) if not traceback: traceback_file_path = os.path.join( ConfigService.getAppDataDirectory(), '{}_stacktrace.txt'.format(application)) try: if os.path.isfile(traceback_file_path): with open(traceback_file_path, 'r') as file: self._traceback = file.readlines() new_workspace_name = os.path.join( ConfigService.getAppDataDirectory(), '{}_stacktrace_sent.txt'.format(application)) os.rename(traceback_file_path, new_workspace_name) except OSError: pass
def PyExec(self): self.logger = Logger("CentreFinder") self.logger.notice("Starting centre finder routine...") self.sample_scatter = self._get_cloned_workspace("SampleScatterWorkspace") self.sample_scatter_monitor = self._get_cloned_workspace("SampleScatterMonitorWorkspace") self.sample_transmission = self._get_cloned_workspace("SampleTransmissionWorkspace") self.sample_direct = self._get_cloned_workspace("SampleDirectWorkspace") self.can_scatter = self._get_cloned_workspace("CanScatterWorkspace") self.can_scatter_monitor = self._get_cloned_workspace("CanScatterMonitorWorkspace") self.can_transmission = self._get_cloned_workspace("CanTransmissionWorkspace") self.can_direct = self._get_cloned_workspace("CanDirectWorkspace") self.component = self.getProperty("Component").value self.r_min = self.getProperty("RMin").value self.r_max = self.getProperty("RMax").value self.state = self._get_state() instrument = self.sample_scatter.getInstrument() self.scale_1 = 1.0 if instrument.getName() == 'LARMOR' else 1000 self.scale_2 = 1000 centre_1_hold, centre_2_hold = self._find_centres() self.setProperty("Centre1", centre_1_hold) self.setProperty("Centre2", centre_2_hold) self.logger.notice("Centre coordinates updated: [{}, {}]".format(centre_1_hold * self.scale_1, centre_2_hold * self.scale_2))
def _remove_job(self, trans_id, job_id=None, is_running=False): """ Abort job and/or stop transaction @param trans_id: remote transaction ID @param job_id: remote job ID @param is_running: True if the job is currently running """ if is_running: try: # At this point we are authenticated so just purge alg = AlgorithmManager.create("AbortRemoteJob", 1) alg.initialize() alg.setProperty("ComputeResource", str(self._settings.compute_resource)) alg.setProperty("JobID", job_id) alg.execute() except: Logger("cluster_status").error("Problem aborting job: %s" % sys.exc_value) try: alg = AlgorithmManager.create("StopRemoteTransaction", 1) alg.initialize() alg.setProperty("ComputeResource", str(self._settings.compute_resource)) alg.setProperty("TransactionID", trans_id) alg.execute() except: Logger("cluster_status").error( "Project stopping remote transaction: %s" % sys.exc_value) self._update_content()
def __init__(self, facility, view=None): super(RunTabPresenter, self).__init__() self._facility = facility # Logger self.sans_logger = Logger("SANS") # Name of grpah to output to self.output_graph = 'SANS-Latest' self.progress = 0 # Models that are being used by the presenter self._state_model = None self._table_model = TableModel() # Presenter needs to have a handle on the view since it delegates it self._view = None self.set_view(view) self._processing = False self.work_handler = WorkHandler() self.batch_process_runner = BatchProcessRunner(self.notify_progress, self.on_processing_finished, self.on_processing_error) # File information for the first input self._file_information = None self._clipboard = [] # Settings diagnostic tab presenter self._settings_diagnostic_tab_presenter = SettingsDiagnosticPresenter(self) # Masking table presenter self._masking_table_presenter = MaskingTablePresenter(self) # Beam centre presenter self._beam_centre_presenter = BeamCentrePresenter(self, WorkHandler, BeamCentreModel, SANSCentreFinder) # Workspace Diagnostic page presenter self._workspace_diagnostic_presenter = DiagnosticsPagePresenter(self, WorkHandler, run_integral, create_state, self._facility)
def __init__(self, view, exit_code: str, application: str, traceback: Optional[str] = None): """ :param view: A reference to the view managed by this presenter :param exit_code: A string containing the exit_code of the failing application :param application: A string containing the failing application name :param traceback: An optional string containing a traceback dumped as JSON-encoded string """ self.error_log = Logger("errorreports") self._view = view self._exit_code = exit_code self._application = application self._traceback = traceback if traceback else '' self._view.set_report_callback(self.error_handler) self._view.moreDetailsButton.clicked.connect(self.show_more_details) if not traceback: traceback_file_path = os.path.join( ConfigService.getAppDataDirectory(), '{}_stacktrace.txt'.format(application)) try: if os.path.isfile(traceback_file_path): with open(traceback_file_path, 'r') as file: self._traceback = file.readlines() new_workspace_name = os.path.join( ConfigService.getAppDataDirectory(), '{}_stacktrace_sent.txt'.format(application)) os.rename(traceback_file_path, new_workspace_name) except OSError: pass
def __init__(self, parent_presenter): super(BeamCentrePresenter, self).__init__() self._view = None self._parent_presenter = parent_presenter self._work_handler = WorkHandler() self._logger = Logger("SANS") self._beam_centre_model = BeamCentreModel()
def __init__(self, filepath): if not _os.path.isfile(filepath): raise ValueError("PluginLoader expects a single filename. '%s' does not point to an existing file" % filepath) if not filepath.endswith(self.extension): raise ValueError("PluginLoader expects a filename ending with .py. '%s' does not have a .py extension" % filepath) self._filepath = filepath self._logger = Logger("PluginLoader")
def pre_process(self): """ Reduction steps that are meant to be executed only once per set of data files. After this is executed, all files will go through the list of reduction steps. """ Logger("Reducer").information("Setting up reduction options") if self.setup_algorithm is not None: alg = AlgorithmManager.create(self.setup_algorithm) alg.initialize() props = [p.name for p in alg.getProperties()] for key in self.reduction_properties.keys(): if key in props: try: alg.setProperty(key, self.reduction_properties[key]) except: msg = "Error setting %s=%s" % ( key, str(self.reduction_properties[key])) msg += "\n %s" % sys.exc_value Logger("Reducer").error(msg) else: Logger("Reducer").warning( "Setup algorithm has no %s property" % key) if "ReductionProperties" in props: alg.setPropertyValue("ReductionProperties", self.get_reduction_table_name()) alg.execute()
def __init__(self, parent_presenter, WorkHandler, BeamCentreModel, SANSCentreFinder): self._view = None self._parent_presenter = parent_presenter self._work_handler = WorkHandler() self._logger = Logger("SANS") self._beam_centre_model = BeamCentreModel(SANSCentreFinder)
class ErrorReporterPresenter(object): def __init__(self, view, exit_code): self.error_log = Logger("error") self._view = view self._exit_code = exit_code self._view.action.connect(self.error_handler) def error_handler(self, continue_working, share, name, email): if share == 0: errorReporter = ErrorReporter("mantidplot", UsageService.getUpTime(), self._exit_code, True, str(name), str(email)) errorReporter.sendErrorReport() elif share == 1: errorReporter = ErrorReporter("mantidplot", UsageService.getUpTime(), self._exit_code, False, str(name), str(email)) errorReporter.sendErrorReport() if not continue_working: self.error_log.error("Terminated by user.") self._view.quit() else: self.error_log.error("Continue working.") def show_view(self): self._view.show()
def beam_center_gravitational_drop(beam_center_file, sdd=1.13): ''' This method is used for correcting for gravitational drop @param beam_center_file :: file where the beam center was found @param sdd :: sample detector distance to apply the beam center ''' def calculate_neutron_drop(path_length, wavelength): ''' Calculate the gravitational drop of the neutrons path_length in meters wavelength in Angstrom ''' wavelength *= 1e-10 neutron_mass = 1.674927211e-27 gravity = 9.80665 h_planck = 6.62606896e-34 l_2 = (gravity * neutron_mass**2 / (2.0 * h_planck**2 )) * path_length**2 return wavelength**2 * l_2 # Get beam center used in the previous reduction pm = mantid.PropertyManagerDataService[ReductionSingleton().property_manager] beam_center_x = pm['LatestBeamCenterX'].value beam_center_y = pm['LatestBeamCenterY'].value Logger("CommandInterface").information("Beam Center before: [%.2f, %.2f] pixels" % (beam_center_x, beam_center_y)) try: # check if the workspace still exists wsname = "__beam_finder_" + os.path.splitext(beam_center_file)[0] ws = mantid.mtd[wsname] Logger("CommandInterface").debug("Using Workspace: %s." % (wsname)) except KeyError: # Let's try loading the file. For some reason the beamcenter ws is not there... try: ws = Load(beam_center_file) Logger("CommandInterface").debug("Using filename %s." % (beam_center_file)) except IOError: Logger("CommandInterface").error("Cannot read input file %s." % beam_center_file) return i = ws.getInstrument() y_pixel_size_mm = i.getNumberParameter('y-pixel-size')[0] Logger("CommandInterface").debug("Y Pixel size = %.2f mm" % y_pixel_size_mm) y_pixel_size = y_pixel_size_mm * 1e-3 # In meters distance_detector1 = i.getComponentByName("detector1").getPos()[2] path_length = distance_detector1 - sdd Logger("CommandInterface").debug("SDD detector1 = %.3f meters. SDD for wing = %.3f meters." % (distance_detector1, sdd)) Logger("CommandInterface").debug("Path length for gravitational drop = %.3f meters." % (path_length)) r = ws.run() wavelength = r.getProperty("wavelength").value Logger("CommandInterface").debug("Wavelength = %.2f A." % (wavelength)) drop = calculate_neutron_drop(path_length, wavelength) Logger("CommandInterface").debug("Gravitational drop = %.6f meters." % (drop)) # 1 pixel -> y_pixel_size # x pixel -> drop drop_in_pixels = drop / y_pixel_size new_beam_center_y = beam_center_y + drop_in_pixels Logger("CommandInterface").information("Beam Center after: [%.2f, %.2f] pixels" % (beam_center_x, new_beam_center_y)) return beam_center_x, new_beam_center_y
def cluster_submit(self, output_dir, user, pwd, resource=None, nodes=4, cores_per_node=4, job_name=None): """ Submit the reduction job to a cluster @param output_dir: directory where the output data will be written @param user: name of the user on the cluster @param pwd: password of the user on the cluster """ Logger("scripter").notice("Preparing remote reduction job submission") if HAS_MANTID: # Generate reduction script and write it to file scripts = self.to_batch() for i in range(len(scripts)): script = scripts[i] script_name = "job_submission_%s.py" % i lower_case_instr = self.instrument_name.lower() job_name_lower = job_name.lower() _job_name = job_name if job_name is None or len(job_name) == 0: _job_name = lower_case_instr elif job_name_lower.find(lower_case_instr) >= 0: _job_name = job_name.strip() else: _job_name = "%s_%s" % (lower_case_instr, job_name.strip()) # Make sure we have unique job names if len(scripts) > 1: _job_name += "_%s" % i # Submit the job # Note: keeping version 1 for now. See comment about # versions in cluster_status.py submit_cmd = "Authenticate(Version=1, ComputeResource='%s', " % resource submit_cmd += "UserName='******', Password='******')\n" % (user, pwd) # Note: keeping version 1 for now. See comment about # versions in cluster_status.py submit_cmd += "id=StartRemoteTransaction(Version=1, ComputeResource='%s')\n" % resource # Note: keeping version 1 for now. See comment about # versions in cluster_status.py submit_cmd += "SubmitRemoteJob(Version=1, ComputeResource='%s', " % resource submit_cmd += "TaskName='%s'," % _job_name submit_cmd += "NumNodes=%s, CoresPerNode=%s, " % ( nodes, cores_per_node) submit_cmd += "TransactionID=id, " submit_cmd += "PythonScript=\"\"\"%s\"\"\", " % script submit_cmd += "ScriptName='%s')" % script_name mantidplot.runPythonScript(submit_cmd, True) else: Logger("scripter").error( "Mantid is unavailable to submit a reduction job")
def find_beam_centre(self, state): """ This is called from the GUI and runs the find beam centre algorithm given a state model and a beam_centre_model object. :param state: A SANS state object :param beam_centre_model: An instance of the BeamCentreModel class. :returns: The centre position found. """ centre_finder = self.SANSCentreFinder() find_direction = None if self.up_down and self.left_right: find_direction = FindDirectionEnum.All elif self.up_down: find_direction = FindDirectionEnum.Up_Down elif self.left_right: find_direction = FindDirectionEnum.Left_Right else: logger = Logger("CentreFinder") logger.notice("Have chosen no find direction exiting early") return {"pos1": self.lab_pos_1, "pos2": self.lab_pos_2} if self.q_min: state.convert_to_q.q_min = self.q_min if self.q_max: state.convert_to_q.q_max = self.q_max if self.COM: centre = centre_finder(state, r_min=self.r_min, r_max=self.r_max, max_iter=self.max_iterations, x_start=self.lab_pos_1, y_start=self.lab_pos_2, tolerance=self.tolerance, find_direction=find_direction, reduction_method=False) centre = centre_finder(state, r_min=self.r_min, r_max=self.r_max, max_iter=self.max_iterations, x_start=centre['pos1'], y_start=centre['pos2'], tolerance=self.tolerance, find_direction=find_direction, reduction_method=True, verbose=self.verbose) else: centre = centre_finder(state, r_min=self.r_min, r_max=self.r_max, max_iter=self.max_iterations, x_start=self.lab_pos_1, y_start=self.lab_pos_2, tolerance=self.tolerance, find_direction=find_direction, reduction_method=True, verbose=self.verbose) return centre
def __init__(self, notify_progress, notify_done, notify_error): super(BatchProcessRunner, self).__init__() self.row_processed_signal.connect(notify_progress) self.row_failed_signal.connect(notify_error) self.notify_done = notify_done self.batch_processor = SANSBatchReduction() self._logger = Logger("SANS") self._worker = None
def set_startTime(self): """ Set the starting time and left slide bar """ inps = str(self.ui.lineEdit_3.text()) info_msg = "Starting time = %s" % (inps) Logger("Filter_Events").information(info_msg) xlim = self.ui.mainplot.get_xlim() if inps == "": # Empty. Use default newtime0 = xlim[0] else: newtime0 = float(inps) # Convert to integer slide value ileftvalue = int((newtime0 - xlim[0]) / (xlim[1] - xlim[0]) * 100) debug_msg = "iLeftSlide = %s" % str(ileftvalue) Logger("Filter_Events").debug(debug_msg) # Skip if same as origina if ileftvalue == self._leftSlideValue: return # Set the value if out of range resetT = True if ileftvalue < 0: # Minimum value as 0 ileftvalue = 0 elif ileftvalue > self._rightSlideValue: # Maximum value as right slide value ileftvalue = self._rightSlideValue else: resetT = False if resetT is True: newtime0 = xlim[0] + ileftvalue * (xlim[1] - xlim[0]) * 0.01 info_msg = "Corrected iLeftSlide = %s (vs. right = %s)" % ( str(ileftvalue), str(self._rightSlideValue)) Logger("Filter_Events").information(info_msg) # Move the slide bar (left) self._leftSlideValue = ileftvalue # Move the vertical line leftx = [newtime0, newtime0] lefty = self.ui.mainplot.get_ylim() setp(self.leftslideline, xdata=leftx, ydata=lefty) self.ui.graphicsView.draw() # Set the value to left slider self.ui.horizontalSlider.setValue(self._leftSlideValue) # Reset the value of line edit if resetT is True: self.ui.lineEdit_3.setText(str(newtime0)) return
def __init__(self, guess_centre): """ Takes a loaded reducer (sample information etc.) and the initial guess of the centre position that are required for all later iterations @param guess_centre: the starting position that the trial x and y are relative to """ self.logger = Logger("CentreFinder") self._last_pos = guess_centre self.detector = None
def SaveIqAscii(reducer=None, process=''): """ Old command for backward compatibility """ output_dir = os.path.expanduser('~') msg = "SaveIqAscii is not longer used:\n " msg += "Please use 'SaveIq' instead\n " msg += "Your output files are currently in %s" % output_dir Logger.get("CommandInterface").warning(msg) ReductionSingleton().reduction_properties["OutputDirectory"] = output_dir ReductionSingleton().reduction_properties["ProcessInfo"] = str(process)
def find_data(file, instrument='', allow_multiple=False): """ Finds a file path for the specified data set, which can either be: - a run number - an absolute path - a file name @param file: file name or part of a file name @param instrument: if supplied, FindNeXus will be tried as a last resort """ # First, assume a file name file = str(file).strip() # If we allow multiple files, users may use ; as a separator, # which is incompatible with the FileFinder n_files = 1 if allow_multiple: file=file.replace(';',',') toks = file.split(',') n_files = len(toks) instrument = str(instrument) file_path = FileFinder.getFullPath(file) if os.path.isfile(file_path): return file_path # Second, assume a run number and pass the instrument name as a hint try: # FileFinder doesn't like dashes... instrument=instrument.replace('-','') f = FileFinder.findRuns(instrument+file) if os.path.isfile(f[0]): if allow_multiple: # Mantid returns its own list object type, so make a real list out if it if len(f)==n_files: return [i for i in f] else: return f[0] except: # FileFinder couldn't make sense of the the supplied information pass # Third, assume a run number, without instrument name to take care of list of full paths try: f = FileFinder.findRuns(file) if os.path.isfile(f[0]): if allow_multiple: # Mantid returns its own list object type, so make a real list out if it if len(f)==n_files: return [i for i in f] else: return f[0] except: # FileFinder couldn't make sense of the the supplied information pass # If we didn't find anything, raise an exception Logger.get('find_data').error("\n\nCould not find a file for %s: check your reduction parameters\n\n" % str(file)) raise RuntimeError, "Could not find a file for %s" % str(file)
def _loadFile(self, filename): """ Load file or run File will be loaded to a workspace shown in MantidPlot """ config = ConfigService # Check input file name and output workspace name if filename.isdigit() is True: # Construct a file name from run number runnumber = int(filename) if runnumber <= 0: error_msg = "Run number cannot be less or equal to zero. User gives %s. " % ( filename) Logger("Filter_Events").error(error_msg) return None else: ishort = config.getInstrument(self._instrument).shortName() filename = "%s_%s" % (ishort, filename) wsname = filename + "_event" elif filename.count(".") > 0: # A proper file name wsname = os.path.splitext(os.path.split(filename)[1])[0] elif filename.count("_") == 1: # A short one as instrument_runnumber iname = filename.split("_")[0] str_runnumber = filename.split("_")[1] if str_runnumber.isdigit() is True and int(str_runnumber) > 0: # Acccepted format ishort = config.getInstrument(iname).shortName() wsname = "%s_%s_event" % (ishort, str_runnumber) else: # Non-supported error_msg = "File name / run number in such format %s is not supported. " % ( filename) Logger("Filter_Events").error(error_msg) return None else: # Unsupported format error_msg = "File name / run number in such format %s is not supported. " % ( filename) Logger("Filter_Events").error(error_msg) return None # Load try: ws = api.Load(Filename=filename, OutputWorkspace=wsname) except RuntimeError as e: ws = None return str(e) return ws
def set_stopTime(self): """ Set the starting time and left slide bar """ inps = str(self.ui.lineEdit_4.text()) info_msg = "Stopping time = %s" % (inps) Logger("Filter_Events").information(info_msg) xlim = self.ui.mainplot.get_xlim() if inps == "": # Empty. Use default newtimef = xlim[1] else: # Parse newtimef = float(inps) # Convert to integer slide value irightvalue = int((newtimef - xlim[0]) / (xlim[1] - xlim[0]) * 100) info_msg = "iRightSlide = %s" % str(irightvalue) Logger("Filter_Events").information(info_msg) # Return if no change if irightvalue == self._rightSlideValue: return # Correct value resetT = True if irightvalue > 100: irightvalue = 100 elif irightvalue < self._leftSlideValue: irightvalue = self._leftSlideValue else: resetT = False if resetT is True: newtimef = xlim[0] + irightvalue * (xlim[1] - xlim[0]) * 0.01 # Move the slide bar (right) self._rightSlideValue = irightvalue # Move the vertical line rightx = [newtimef, newtimef] righty = self.ui.mainplot.get_ylim() setp(self.rightslideline, xdata=rightx, ydata=righty) self.ui.graphicsView.draw() # Set the value to left slider self.ui.horizontalSlider_2.setValue(self._rightSlideValue) # Reset to line edit if resetT: self.ui.lineEdit_4.setText(str(newtimef)) return
def __init__(self, ws, parent=None, window_flags=Qt.Window, model=None, view=None, conf=None): """ Create a presenter for controlling the slice display for a workspace :param ws: Workspace containing data to display and slice :param parent: An optional parent widget :param window_flags: An optional set of window flags :param model: A model to define slicing operations. If None uses SliceViewerModel :param view: A view to display the operations. If None uses SliceViewerView """ model: SliceViewerModel = model if model else SliceViewerModel(ws) self.view = view if view else SliceViewerView(self, Dimensions.get_dimensions_info(ws), model.can_normalize_workspace(), parent, window_flags, conf) super().__init__(ws, self.view.data_view, model) self._logger = Logger("SliceViewer") self._peaks_presenter: PeaksViewerCollectionPresenter = None self._cutviewer_presenter = None self.conf = conf # Acts as a 'time capsule' to the properties of the model at this # point in the execution. By the time the ADS observer calls self.replace_workspace, # the workspace associated with self.model has already been changed. self.initial_model_properties = model.get_properties() self._new_plot_method, self.update_plot_data = self._decide_plot_update_methods() self.view.setWindowTitle(self.model.get_title()) self.view.data_view.create_axes_orthogonal( redraw_on_zoom=not WorkspaceInfo.can_support_dynamic_rebinning(self.model.ws)) if self.model.can_normalize_workspace(): self.view.data_view.set_normalization(ws) self.view.data_view.norm_opts.currentTextChanged.connect(self.normalization_changed) if not self.model.can_support_peaks_overlays(): self.view.data_view.disable_tool_button(ToolItemText.OVERLAY_PEAKS) # check whether to enable non-orthog view # don't know whether can always assume init with display indices (0,1) - so get sliceinfo sliceinfo = self.get_sliceinfo() if not sliceinfo.can_support_nonorthogonal_axes(): self.view.data_view.disable_tool_button(ToolItemText.NONORTHOGONAL_AXES) if not self.model.can_support_non_axis_cuts(): self.view.data_view.disable_tool_button(ToolItemText.NONAXISALIGNEDCUTS) self.view.data_view.help_button.clicked.connect(self.action_open_help_window) self.refresh_view() # Start the GUI with zoom selected. self.view.data_view.activate_tool(ToolItemText.ZOOM) self.ads_observer = SliceViewerADSObserver(self.replace_workspace, self.rename_workspace, self.ADS_cleared, self.delete_workspace)
def __init__(self, parent_presenter, SANSCentreFinder, work_handler=None, beam_centre_model=None): self._view = None self._parent_presenter = parent_presenter self._work_handler = WorkHandler( ) if not work_handler else work_handler self._logger = Logger("SANS") self._beam_centre_model = BeamCentreModel( SANSCentreFinder) if not beam_centre_model else beam_centre_model
def set_maxLogValue(self): """ Set maximum log value from line-edit """ inps = str(self.ui.lineEdit_6.text()) debug_msg = "Maximum Log Value = %s" % (inps) Logger("Filter_Events").debug(debug_msg) ylim = self.ui.mainplot.get_ylim() if inps == "": # Empty. Default to minY newmaxY = ylim[1] else: # Parse newmaxY = float(inps) # Convert to integer slide value imaxlogval = int((newmaxY - ylim[0]) / (ylim[1] - ylim[0]) * 100) debug_msg = "iUpperSlide = %s" % str(imaxlogval) Logger("Filter_Events").debug(debug_msg) # Return if no change if imaxlogval == self._upperSlideValue: return # Set to default if out of range resetL = True # if imaxlogval >= 100: # imaxlogval = 100 if imaxlogval < self._lowerSlideValue: imaxlogval = self._lowerSlideValue + 1 else: resetL = False # Set newmaxY if necessary if resetL is True: newmaxY = ylim[0] + imaxlogval * (ylim[1] - ylim[0]) * 0.01 # Move the vertical line upperx = self.ui.mainplot.get_xlim() uppery = [newmaxY, newmaxY] setp(self.upperslideline, xdata=upperx, ydata=uppery) self.ui.graphicsView.draw() # Set the value to upper slider self._upperSlideValue = imaxlogval self.ui.verticalSlider.setValue(self._upperSlideValue) # Set the value to editor if necessary if resetL is True: self.ui.lineEdit_6.setText(str(newmaxY)) return
def set_minLogValue(self): """ Set the starting time and left slide bar """ debug_msg = "Minimum Log Value = %s" % (str(self.ui.lineEdit_5.text())) Logger("Filter_Events").debug(debug_msg) ylim = self.ui.mainplot.get_ylim() if str(self.ui.lineEdit_5.text()) == "": # Empty. Default to minY newminY = ylim[0] else: # Non empty. Parse newminY = float(self.ui.lineEdit_5.text()) # Convert to integer slide value iminlogval = int((newminY - ylim[0]) / (ylim[1] - ylim[0]) * 100) debug_msg = "ilowerSlide = %s" % str(iminlogval) Logger("Filter_Events").debug(debug_msg) # Return if no change if iminlogval == self._lowerSlideValue: return # Set value if out of range resetL = True if iminlogval >= self._upperSlideValue: iminlogval = self._upperSlideValue - 1 else: resetL = False if resetL is True: newminY = ylim[0] + iminlogval * (ylim[1] - ylim[0]) * 0.01 # Move the vertical line lowerx = self.ui.mainplot.get_xlim() lowery = [newminY, newminY] setp(self.lowerslideline, xdata=lowerx, ydata=lowery) self.ui.graphicsView.draw() # Move the slide bar (lower) self._lowerSlideValue = iminlogval debug_msg = "LineEdit5 set slide to %s" % str(self._lowerSlideValue) Logger("Filter_Events").debug(debug_msg) self.ui.verticalSlider_2.setValue(self._lowerSlideValue) # Reset line Edit if using default if resetL is True: self.ui.lineEdit_5.setText(str(newminY)) return
def __init__(self, name="", facility=""): self.instrument_name = name self.facility_name = facility self._observers = [] self._output_directory = os.path.expanduser('~') if HAS_MANTID: config = ConfigService.Instance() try: head, tail = os.path.split(config.getUserFilename()) if os.path.isdir(head): self._output_directory = head except: Logger.get("scripter").debug("Could not get user filename")
def _check_data_list(data_list, scale): if not isinstance(data_list, list): error_msg = "The data_list parameter should be a list" Logger("data_stitching").error(error_msg) raise RuntimeError(error_msg) if len(data_list) < 2: error_msg = "The data_list parameter should contain at least two data sets" Logger("data_stitching").error(error_msg) raise RuntimeError(error_msg) if isinstance(scale, list) and len(scale) != len(data_list): error_msg = "If the scale parameter is provided as a list, it should have the same length as data_list" Logger("data_stitching").error(error_msg) raise RuntimeError(error_msg)
def __init__(self, filepath): if not _os.path.isfile(filepath): raise ValueError("PluginLoader expects a single filename. '%s' does not point to an existing file" % filepath) if not filepath.endswith(self.extension): raise ValueError("PluginLoader expects a filename ending with .py. '%s' does not have a .py extension" % filepath) self._filepath = filepath self._logger = Logger.get("PluginLoader")
def test_logger_creation_does_not_raise_an_error(self): logger = Logger.get("LoggerTest") self.assertTrue(isinstance(logger, Logger)) attrs = ['fatal', 'error','warning','notice', 'information', 'debug'] for att in attrs: if not hasattr(logger, att): self.fail("Logger object does not have the required attribute '%s'" % att)
def __init__(self, parent_presenter, WorkHandler, BeamCentreModel, SANSCentreFinder): super(BeamCentrePresenter, self).__init__() self._view = None self._parent_presenter = parent_presenter self._work_handler = WorkHandler() self._logger = Logger("SANS") self._beam_centre_model = BeamCentreModel(SANSCentreFinder)
def load_File(self): """ Load the file by file name or run number """ # Get file name from line editor filename = str(self.ui.lineEdit.text()) # Find out it is relative path or absolute path #if os.path.abspath(filename) == filename: # isabspath = True #else: # isabspath = False dataws = self._loadFile(str(filename)) if dataws is None: error_msg = "Unable to locate run %s in default directory %s." % ( filename, self._defaultdir) Logger("Filter_Events").error(error_msg) self._setErrorMsg(error_msg) else: self._importDataWorkspace(dataws) self._defaultdir = os.path.dirname(str(filename)) # Reset GUI self._resetGUI(resetfilerun=False) return
def find_beam_centre(self, state): """ This is called from the GUI and runs the find beam centre algorithm given a state model and a beam_centre_model object. :param state: A SANS state object :param beam_centre_model: An instance of the BeamCentreModel class. :returns: The centre position found. """ centre_finder = self.SANSCentreFinder() find_direction = None if self.up_down and self.left_right: find_direction = FindDirectionEnum.All elif self.up_down: find_direction = FindDirectionEnum.Up_Down elif self.left_right: find_direction = FindDirectionEnum.Left_Right else: logger = Logger("CentreFinder") logger.notice("Have chosen no find direction exiting early") return {"pos1": self.lab_pos_1, "pos2": self.lab_pos_2} if self.COM: centre = centre_finder(state, r_min=self.r_min, r_max=self.r_max, max_iter=self.max_iterations, x_start=self.lab_pos_1, y_start=self.lab_pos_2, tolerance=self.tolerance, find_direction=find_direction, reduction_method=False, component=self.component) centre = centre_finder(state, r_min=self.r_min, r_max=self.r_max, max_iter=self.max_iterations, x_start=centre['pos1'], y_start=centre['pos2'], tolerance=self.tolerance, find_direction=find_direction, reduction_method=True, verbose=self.verbose, component=self.component) else: centre = centre_finder(state, r_min=self.r_min, r_max=self.r_max, max_iter=self.max_iterations, x_start=self.lab_pos_1, y_start=self.lab_pos_2, tolerance=self.tolerance, find_direction=find_direction, reduction_method=True, verbose=self.verbose, component=self.component) return centre
def __init__(self, data_file, workspace_name=None): self.errors = [] if HAS_MANTID: try: if workspace_name is None: self.data_ws = "__raw_data_file" else: self.data_ws = str(workspace_name) api.HFIRLoad(Filename=str(data_file), OutputWorkspace=self.data_ws) ws = AnalysisDataService.retrieve(self.data_ws) x = ws.dataX(0) self.wavelength = (x[0]+x[1])/2.0 self.wavelength_spread = x[1]-x[0] self.sample_detector_distance = ws.getRun().getProperty("sample_detector_distance").value self.sample_thickness = ws.getRun().getProperty("sample-thickness").value self.beam_diameter = ws.getRun().getProperty("beam-diameter").value Logger.get("hfir_data_proxy").information("Loaded data file: %s" % data_file) except: Logger.get("hfir_data_proxy").error("Error loading data file:\n%s" % sys.exc_value) self.errors.append("Error loading data file:\n%s" % sys.exc_value)
def pre_process(self): """ Reduction steps that are meant to be executed only once per set of data files. After this is executed, all files will go through the list of reduction steps. """ Logger.get("Reducer").information("Setting up reduction options") if self.setup_algorithm is not None: alg = AlgorithmManager.create(self.setup_algorithm) alg.initialize() props = [p.name for p in alg.getProperties()] for key in self.reduction_properties.keys(): if key in props: try: alg.setProperty(key, self.reduction_properties[key]) except: msg = "Error setting %s=%s" % (key, str(self.reduction_properties[key])) msg += "\n %s" % sys.exc_value Logger.get("Reducer").error(msg) else: Logger.get("Reducer").warning("Setup algorithm has no %s property" % key) if "ReductionProperties" in props: alg.setPropertyValue("ReductionProperties", self.get_reduction_table_name()) alg.execute()
def __init__(self, reducer, coord1_scale_factor, coord2_scale_factor): super(BeamCenterLogger, self).__init__() self.logger = Logger("CentreFinder") self.using_angle = False if is_workspace_which_requires_angle(reducer): self.coord1_scale_factor = 1. self.using_angle = True # Find the bench rotation. Only supply the bench rotation if it is really needed. If we supply an offset # through a bench rotation we need to take into account that the directionality of the angles is not # the same as in Mantid. We need to reverse the sign of the bench rotation to get the correct rotation. self.offset_coord1 = -1*get_bench_rotation(reducer) else: self.coord1_scale_factor = coord1_scale_factor self.offset_coord1 = 0.0 self.coord2_scale_factor = coord2_scale_factor self.offset_coord2 = 0.0
def __init__(self, guess_centre, sign_policy, find_direction = FindDirectionEnum.ALL): """ Takes a loaded reducer (sample information etc.) and the initial guess of the centre position that are required for all later iterations @param guess_centre: the starting position that the trial x and y are relative to @param sign_policy: sets the sign for the move operation. @param find_direction: Find beam centre for directions, ie if all or only up/down or only left right """ self.logger = Logger("CentreFinder") self._last_pos = guess_centre self.detector = None self.coord1_scale_factor = 1.0 self.coord2_scale_factor = 1.0 self.find_direction = find_direction self.sign_coord1 = -1. self.sign_coord2 = -1. if sign_policy is not None and len(sign_policy) == 2: self.sign_coord1 = sign_policy[0] self.sign_coord2 = sign_policy[1]
# whether to use default wavelength range for transmissions # Allows over-riding of following from user (mask) file: # radial integration limits # wavelength limits, bin size and binning # q limits, q bin size and binning # qxy limits, qxy bin size and binning # The save directory must currently be specified in the Mantid.user.properties file #Make the reduction module available from ISISCommandInterface import * from mantid.simpleapi import * from mantid.api import WorkspaceGroup from mantid.kernel import Logger sanslog = Logger("SANS") import copy import sys import re from reduction_settings import REDUCTION_SETTINGS_OBJ_NAME ################################################################################ # Avoid a bug with deepcopy in python 2.6, details and workaround here: # http://bugs.python.org/issue1515 if sys.version_info[0] == 2 and sys.version_info[1] == 6: import types def _deepcopy_method(x, memo): return type(x)(x.im_func, copy.deepcopy(x.im_self, memo), x.im_class) copy._deepcopy_dispatch[types.MethodType] = _deepcopy_method ################################################################################
class BeamCentrePresenter(object): class ConcreteBeamCentreListener(BeamCentre.BeamCentreListener): def __init__(self, presenter): self._presenter = presenter def on_run_clicked(self): self._presenter.on_run_clicked() class CentreFinderListener(WorkHandler.WorkListener): def __init__(self, presenter): super(BeamCentrePresenter.CentreFinderListener, self).__init__() self._presenter = presenter def on_processing_finished(self, result): self._presenter.on_processing_finished_centre_finder(result) def on_processing_error(self, error): self._presenter.on_processing_error_centre_finder(error) def __init__(self, parent_presenter, WorkHandler, BeamCentreModel, SANSCentreFinder): super(BeamCentrePresenter, self).__init__() self._view = None self._parent_presenter = parent_presenter self._work_handler = WorkHandler() self._logger = Logger("SANS") self._beam_centre_model = BeamCentreModel(SANSCentreFinder) def set_view(self, view): if view: self._view = view # Set up run listener listener = BeamCentrePresenter.ConcreteBeamCentreListener(self) self._view.add_listener(listener) # Set the default gui self._view.set_options(self._beam_centre_model) def on_update_instrument(self, instrument): self._beam_centre_model.set_scaling(instrument) self._view.on_update_instrument(instrument) def on_update_rows(self): file_information = self._parent_presenter._table_model.get_file_information_for_row(0) if file_information: self._beam_centre_model.reset_to_defaults_for_instrument(file_information=file_information) self._view.set_options(self._beam_centre_model) def on_processing_finished_centre_finder(self, result): # Enable button self._view.set_run_button_to_normal() # Update Centre Positions in model and GUI if self._beam_centre_model.update_lab: self._beam_centre_model.lab_pos_1 = result['pos1'] self._beam_centre_model.lab_pos_2 = result['pos2'] self._view.lab_pos_1 = self._beam_centre_model.lab_pos_1 * self._beam_centre_model.scale_1 self._view.lab_pos_2 = self._beam_centre_model.lab_pos_2 * self._beam_centre_model.scale_2 if self._beam_centre_model.update_hab: self._beam_centre_model.hab_pos_1 = result['pos1'] self._beam_centre_model.hab_pos_2 = result['pos2'] self._view.hab_pos_1 = self._beam_centre_model.hab_pos_1 * self._beam_centre_model.scale_1 self._view.hab_pos_2 = self._beam_centre_model.hab_pos_2 * self._beam_centre_model.scale_2 def on_processing_error_centre_finder(self, error): self._logger.warning("There has been an error. See more: {}".format(error)) self._view.set_run_button_to_normal() def on_processing_error(self, error): self._view.set_run_button_to_normal() def on_run_clicked(self): # Get the state information for the first row. state = self._parent_presenter.get_state_for_row(0) if not state: self._logger.information("You can only calculate the beam centre if a user file has been loaded and there" "valid sample scatter entry has been provided in the selected row.") return # Disable the button self._view.set_run_button_to_processing() #Update model self._update_beam_model_from_view() # Run the task listener = BeamCentrePresenter.CentreFinderListener(self) state_copy = copy.copy(state) self._work_handler.process(listener, self._beam_centre_model.find_beam_centre, 0, state_copy) def _update_beam_model_from_view(self): self._beam_centre_model.r_min = self._view.r_min self._beam_centre_model.r_max = self._view.r_max self._beam_centre_model.max_iterations = self._view.max_iterations self._beam_centre_model.tolerance = self._view.tolerance self._beam_centre_model.left_right = self._view.left_right self._beam_centre_model.verbose = self._view.verbose self._beam_centre_model.COM = self._view.COM self._beam_centre_model.up_down = self._view.up_down self._beam_centre_model.lab_pos_1 = self._view.lab_pos_1 / self._beam_centre_model.scale_1 self._beam_centre_model.lab_pos_2 = self._view.lab_pos_2 / self._beam_centre_model.scale_2 self._beam_centre_model.hab_pos_1 = self._view.hab_pos_1 / self._beam_centre_model.scale_1 self._beam_centre_model.hab_pos_2 = self._view.hab_pos_2 / self._beam_centre_model.scale_2 self._beam_centre_model.q_min = self._view.q_min self._beam_centre_model.q_max = self._view.q_max self._beam_centre_model.component = self._view.component self._beam_centre_model.update_hab = self._view.update_hab self._beam_centre_model.update_lab = self._view.update_lab def update_centre_positions(self, state_model): lab_pos_1 = getattr(state_model, 'lab_pos_1') lab_pos_2 = getattr(state_model, 'lab_pos_2') hab_pos_1 = getattr(state_model, 'hab_pos_1') if getattr(state_model, 'hab_pos_1') else lab_pos_1 hab_pos_2 = getattr(state_model, 'hab_pos_2') if getattr(state_model, 'hab_pos_2') else lab_pos_2 self._view.lab_pos_1 = lab_pos_1 self._view.lab_pos_2 = lab_pos_2 self._view.hab_pos_1 = hab_pos_1 self._view.hab_pos_2 = hab_pos_2 def set_on_state_model(self, attribute_name, state_model): attribute = getattr(self._view, attribute_name) if attribute or isinstance(attribute, bool): setattr(state_model, attribute_name, attribute) def set_on_view(self, attribute_name, state_model): attribute = getattr(state_model, attribute_name) if attribute or isinstance(attribute, bool): # We need to be careful here. We don't want to set empty strings, or None, but we want to set boolean values. # noqa setattr(self._view, attribute_name, attribute)
class SettingsDiagnosticPresenter(object): class ConcreteSettingsDiagnosticTabListener(SettingsDiagnosticTab.SettingsDiagnosticTabListener): def __init__(self, presenter): super(SettingsDiagnosticPresenter.ConcreteSettingsDiagnosticTabListener, self).__init__() self._presenter = presenter def on_row_changed(self): self._presenter.on_row_changed() def on_update_rows(self): self._presenter.on_update_rows() def on_collapse(self): self._presenter.on_collapse() def on_expand(self): self._presenter.on_expand() def on_save_state_to_file(self): self._presenter.on_save_state() def __init__(self, parent_presenter): super(SettingsDiagnosticPresenter, self).__init__() self._view = None self._parent_presenter = parent_presenter # Logger self.gui_logger = Logger("SANS GUI LOGGER") def on_collapse(self): self._view.collapse() def on_expand(self): self._view.expand() def on_row_changed(self): try: row_index = self._view.get_current_row() state = self.get_state(row_index) if state: self.display_state_diagnostic_tree(state) except RuntimeError as e: self.gui_logger.error(str(e)) self._parent_presenter.display_warning_box('Warning', 'Unable to find files.', str(e)) def on_update_rows(self): """ Update the row selection in the combobox """ current_row_index = self._view.get_current_row() valid_row_indices = self._parent_presenter.get_row_indices() new_row_index = -1 if current_row_index in valid_row_indices: new_row_index = current_row_index elif len(valid_row_indices) > 0: new_row_index = valid_row_indices[0] self._view.update_rows(valid_row_indices) if new_row_index != -1: self.set_row(new_row_index) self.on_row_changed() def set_row(self, index): self._view.set_row(index) def set_view(self, view): if view: self._view = view # Set up row selection listener listener = SettingsDiagnosticPresenter.ConcreteSettingsDiagnosticTabListener(self) self._view.add_listener(listener) # Set the default gui self._set_default_gui() def _set_default_gui(self): self._view.update_rows([]) self.display_state_diagnostic_tree(state=None) def get_state(self, index): return self._parent_presenter.get_state_for_row(index) def display_state_diagnostic_tree(self, state): # Convert to dict before passing the state to the view if state is not None: state = state.property_manager self._view.set_tree(state) def on_save_state(self): # Get the save location save_location = self._view.get_save_location() # Check if it exists path_dir = os.path.dirname(save_location) if not path_dir: self.gui_logger.warning("The provided save location for the SANS state does not seem to exist. " "Please provide a validate path") return file_name, _ = os.path.splitext(save_location) full_file_path = file_name + JSON_SUFFIX row_index = self._view.get_current_row() state = self.get_state(row_index) serialized_state = state.property_manager with open(full_file_path, 'w') as f: json.dump(serialized_state, f, sort_keys=True, indent=4) self.gui_logger.information("The state for row {} has been saved to: {} ".format(row_index, full_file_path)) # Update the file name in the UI self._view.set_save_location(full_file_path)
def __init__(self, parent_presenter): super(SettingsDiagnosticPresenter, self).__init__() self._view = None self._parent_presenter = parent_presenter # Logger self.gui_logger = Logger("SANS GUI LOGGER")
def __init__(self, parent_presenter): self._view = None self._parent_presenter = parent_presenter # Logger self.gui_logger = Logger("SANS GUI LOGGER")
def reduce(self): """ Go through the list of reduction steps """ t_0 = time.time() self.output_workspaces = [] # Log text self.log_text = "%s reduction - %s\n" % (self.instrument_name, time.ctime()) self.log_text += "Mantid Python API v2\n" # Go through the list of steps that are common to all data files self.pre_process() if self.reduction_algorithm is None: Logger.get("Reducer").error("A reduction algorithm wasn't set: stopping") return for ws in self._data_files.keys(): alg = AlgorithmManager.create(self.reduction_algorithm) alg.initialize() props = [p.name for p in alg.getProperties()] # Check whether the data is already available or needs to be loaded if self._data_files[ws] is not None: datafile = self._data_files[ws] if type(datafile)==list: datafile=','.join(datafile) if "Filename" in props: alg.setPropertyValue("Filename", datafile) else: msg = "Can't set the Filename property on %s" % self.reduction_algorithm Logger.get("Reducer").error(msg) else: if "InputWorkspace" in props: alg.setPropertyValue("InputWorkspace", ws) else: msg = "Can't set the InputWorkspace property on %s" % self.reduction_algorithm Logger.get("Reducer").error(msg) if "ReductionProperties" in props: alg.setPropertyValue("ReductionProperties", self.get_reduction_table_name()) if "OutputWorkspace" in props: alg.setPropertyValue("OutputWorkspace", ws) alg.execute() if "OutputMessage" in props: self.log_text += alg.getProperty("OutputMessage").value+'\n' #any clean up, possibly removing workspaces self.post_process() # Determine which directory to use output_dir = self._data_path if self._output_path is not None: if os.path.isdir(self._output_path): output_dir = self._output_path else: output_dir = os.path.expanduser('~') self.log_text += "Reduction completed in %g sec\n" % (time.time()-t_0) log_path = os.path.join(output_dir,"%s_reduction.log" % self.instrument_name) self.log_text += "Log saved to %s" % log_path # Write the log to file f = open(log_path, 'a') f.write("\n-------------------------------------------\n") f.write(self.log_text) f.close() return self.log_text
class CentreFinder(object): """ Aids estimating the effective centre of the particle beam by calculating Q in four quadrants and using the asymmetry to calculate the direction to the beam centre. A better estimate for the beam centre position can hence be calculated iteratively """ QUADS = ['Left', 'Right', 'Up', 'Down'] def __init__(self, guess_centre, sign_policy, find_direction = FindDirectionEnum.ALL): """ Takes a loaded reducer (sample information etc.) and the initial guess of the centre position that are required for all later iterations @param guess_centre: the starting position that the trial x and y are relative to @param sign_policy: sets the sign for the move operation. @param find_direction: Find beam centre for directions, ie if all or only up/down or only left right """ self.logger = Logger("CentreFinder") self._last_pos = guess_centre self.detector = None self.coord1_scale_factor = 1.0 self.coord2_scale_factor = 1.0 self.find_direction = find_direction self.sign_coord1 = -1. self.sign_coord2 = -1. if sign_policy is not None and len(sign_policy) == 2: self.sign_coord1 = sign_policy[0] self.sign_coord2 = sign_policy[1] def SeekCentre(self, setup, trial): """ Does four calculations of Q to estimate a better centre location than the one passed to it @param setup: the reduction chain object that contains information about the reduction @param trial: the coordinates of the location to test as a list in the form [x, y] @return: the asymmetry in the calculated Q in the x and y directions """ self.detector = setup.instrument.cur_detector().name() # populate the x and y scale factor values at this point for the text box self.coord1_scale_factor = setup.instrument.beam_centre_scale_factor1 self.coord2_scale_factor = setup.instrument.beam_centre_scale_factor2 # We are looking only at the differnce between the old position and the trial. self.move(setup, trial[0]-self._last_pos[0], trial[1]-self._last_pos[1]) #phi masking will remove areas of the detector that we need setup.mask.mask_phi = False setup.pre_process() setup.output_wksp = 'centre' steps = setup._conv_Q steps = steps[0:len(steps)-1] setup._reduce(init=False, post=False, steps=steps) self._group_into_quadrants(setup, 'centre', suffix='_tmp') if setup.get_can(): #reduce the can here setup.reduce_can('centre_can', run_Q=False) self._group_into_quadrants(setup, 'centre_can', suffix='_can') Minus(LHSWorkspace='Left_tmp',RHSWorkspace= 'Left_can',OutputWorkspace= 'Left_tmp') Minus(LHSWorkspace='Right_tmp',RHSWorkspace= 'Right_can',OutputWorkspace= 'Right_tmp') Minus(LHSWorkspace='Up_tmp',RHSWorkspace= 'Up_can',OutputWorkspace= 'Up_tmp') Minus(LHSWorkspace='Down_tmp',RHSWorkspace= 'Down_can',OutputWorkspace= 'Down_tmp') DeleteWorkspace(Workspace='Left_can') DeleteWorkspace(Workspace='Right_can') DeleteWorkspace(Workspace='Up_can') DeleteWorkspace(Workspace='Down_can') DeleteWorkspace(Workspace='centre_can') DeleteWorkspace(Workspace='centre') self._last_pos = trial # prepare the workspaces for "publication", after they have their # standard names calculations will be done on them and they will be plotted for out_wksp in self.QUADS: in_wksp = out_wksp+'_tmp' ReplaceSpecialValues(InputWorkspace=in_wksp,OutputWorkspace=in_wksp,NaNValue=0,InfinityValue=0) rem_nans = StripEndNans() rem_nans.execute(setup, in_wksp) RenameWorkspace(InputWorkspace=in_wksp,OutputWorkspace= out_wksp) return self._calculate_residue() def move(self, setup, x, y): """ Move the selected detector in both the can and sample workspaces, remembering the that ISIS SANS team see the detector from the other side @param setup: the reduction chain object that contains information about the reduction @param x: the distance to move in the x (-x) direction in metres @param y: the distance to move in the y (-y) direction in metres """ # Displacing the beam by +5 is equivalent to displacing the isntrument by -5. Hence we change # the sign here. LARMOR does this correction in the instrument itself, while for the others # we don't x = self.sign_coord1*x y = self.sign_coord2*y setup.instrument.elementary_displacement_of_single_component(workspace=setup.get_sample().wksp_name, component_name=self.detector, coord1 = x, coord2 = y, coord1_scale_factor = 1., coord2_scale_factor = 1., relative_displacement = True) if setup.get_can(): setup.instrument.elementary_displacement_of_single_component(workspace=setup.get_can().wksp_name, component_name=self.detector, coord1 = x, coord2 = y, coord1_scale_factor = 1., coord2_scale_factor = 1., relative_displacement = True) # Create a workspace with a quadrant value in it def _create_quadrant(self, setup, reduced_ws, quadrant, r_min, r_max, suffix): out_ws = quadrant+suffix # Need to create a copy because we're going to mask 3/4 out and that's a one-way trip CloneWorkspace(InputWorkspace=reduced_ws,OutputWorkspace= out_ws) objxml = SANSUtility.QuadrantXML([0, 0, 0.0], r_min, r_max, quadrant) # Mask out everything outside the quadrant of interest MaskDetectorsInShape(Workspace=out_ws,ShapeXML= objxml) setup.to_Q.execute(setup, out_ws) #Q1D(output,rawcount_ws,output,q_bins,AccountForGravity=GRAVITY) # Create 4 quadrants for the centre finding algorithm and return their names def _group_into_quadrants(self, setup, input_value, suffix=''): r_min = setup.CENT_FIND_RMIN r_max = setup.CENT_FIND_RMAX for q in self.QUADS: self._create_quadrant(setup, input_value, q, r_min, r_max, suffix) def _calculate_residue(self): """ Calculate the sum squared difference between pairs of workspaces named Left, Right, Up and Down. This assumes that a workspace with one spectrum for each of the quadrants @return: difference left to right, difference up down """ residueX = 0 if self.find_direction == FindDirectionEnum.ALL or self.find_direction == FindDirectionEnum.LEFT_RIGHT: yvalsAX = mtd['Left'].readY(0) yvalsBX = mtd['Right'].readY(0) qvalsAX = mtd['Left'].readX(0) qvalsBX = mtd['Right'].readX(0) qrangeX = [len(yvalsAX), len(yvalsBX)] nvalsX = min(qrangeX) id1X = "LR1" id2X = "LR2" residueX = self._residual_calculation_for_single_direction(yvalsA = yvalsAX, yvalsB = yvalsBX, qvalsA = qvalsAX, qvalsB = qvalsBX, qrange = qrangeX, nvals = nvalsX, id1 = id1X, id2 = id2X) residueY = 0 if self.find_direction == FindDirectionEnum.ALL or self.find_direction == FindDirectionEnum.UP_DOWN: yvalsAY = mtd['Up'].readY(0) yvalsBY = mtd['Down'].readY(0) qvalsAY = mtd['Up'].readX(0) qvalsBY = mtd['Down'].readX(0) qrangeY = [len(yvalsAY), len(yvalsBY)] nvalsY = min(qrangeY) id1Y = "UD1" id2Y = "UD2" residueY = self._residual_calculation_for_single_direction(yvalsA = yvalsAY, yvalsB = yvalsBY, qvalsA = qvalsAY, qvalsB = qvalsBY, qrange = qrangeY, nvals = nvalsY, id1 = id1Y, id2 = id2Y) return residueX, residueY def _residual_calculation_for_single_direction(self, yvalsA, yvalsB, qvalsA, qvalsB, qrange, nvals, id1, id2): dummy_1 = qrange residue = 0 indexB = 0 for indexA in range(0, nvals): if qvalsA[indexA] < qvalsB[indexB]: self.logger.notice(id1 + " " +str(indexA)+" "+str(indexB)) continue elif qvalsA[indexA] > qvalsB[indexB]: while qvalsA[indexA] > qvalsB[indexB]: self.logger(id2 + " " +str(indexA)+" "+str(indexB)) indexB += 1 if indexA > nvals - 1 or indexB > nvals - 1: break residue += pow(yvalsA[indexA] - yvalsB[indexB], 2) indexB += 1 return residue def _get_cylinder_direction(self, workspace): ''' Get the direction that the masking clyinder needs to point at. This should be the normal of the tilted detector bench. The original normal is along the beam axis as defined in the instrument definition file. @param workspace: the workspace with the tilted detector bench @returns the required direction of the cylinder axis ''' ws = mtd[workspace] instrument = ws.getInstrument() quat = instrument.getComponentByName(self.detector).getRotation() cylinder_direction = instrument.getReferenceFrame().vecPointingAlongBeam() quat.rotate(cylinder_direction) return cylinder_direction.X(), cylinder_direction.Y(), cylinder_direction.Z()
def __init__(self, parent_presenter): super(MaskingTablePresenter, self).__init__() self._view = None self._parent_presenter = parent_presenter self._work_handler = WorkHandler() self._logger = Logger("SANS")
class MaskingTablePresenter(object): DISPLAY_WORKSPACE_NAME = "__sans_mask_display_dummy_workspace" class ConcreteMaskingTableListener(MaskingTable.MaskingTableListener): def __init__(self, presenter): super(MaskingTablePresenter.ConcreteMaskingTableListener, self).__init__() self._presenter = presenter def on_row_changed(self): self._presenter.on_row_changed() def on_update_rows(self): self._presenter.on_update_rows() def on_display(self): self._presenter.on_display() class DisplayMaskListener(WorkHandler.WorkListener): def __init__(self, presenter): super(MaskingTablePresenter.DisplayMaskListener, self).__init__() self._presenter = presenter def on_processing_finished(self, result): self._presenter.on_processing_finished_masking_display(result) def on_processing_error(self, error): self._presenter.on_processing_error_masking_display(error) def __init__(self, parent_presenter): super(MaskingTablePresenter, self).__init__() self._view = None self._parent_presenter = parent_presenter self._work_handler = WorkHandler() self._logger = Logger("SANS") def on_row_changed(self): row_index = self._view.get_current_row() state = self.get_state(row_index, file_lookup=False) if state: self.display_masking_information(state) def on_display(self): # Get the state information for the selected row. # Disable the button self._view.set_display_mask_button_to_processing() try: row_index = self._view.get_current_row() state = self.get_state(row_index) except Exception as e: self.on_processing_error_masking_display(e) raise Exception(str(e)) # propagate errors for run_tab_presenter to deal with else: if not state: self._logger.information("You can only show a masked workspace if a user file has been loaded and there" "valid sample scatter entry has been provided in the selected row.") return # Run the task listener = MaskingTablePresenter.DisplayMaskListener(self) state_copy = copy.copy(state) self._work_handler.process(listener, load_and_mask_workspace, 0, state_copy, self.DISPLAY_WORKSPACE_NAME) def on_processing_finished_masking_display(self, result): # Enable button self._view.set_display_mask_button_to_normal() # Display masked workspace self._display(result) def on_processing_error_masking_display(self, error): self._logger.warning("There has been an error. See more: {}".format(error)) # Enable button self._view.set_display_mask_button_to_normal() def on_processing_error(self, error): pass def on_update_rows(self): """ Update the row selection in the combobox """ current_row_index = self._view.get_current_row() valid_row_indices = self._parent_presenter.get_row_indices() new_row_index = -1 if current_row_index in valid_row_indices: new_row_index = current_row_index elif len(valid_row_indices) > 0: new_row_index = valid_row_indices[0] self._view.update_rows(valid_row_indices) if new_row_index != -1: self.set_row(new_row_index) self.on_row_changed() def set_row(self, index): self._view.set_row(index) def set_view(self, view): if view: self._view = view # Set up row selection listener listener = MaskingTablePresenter.ConcreteMaskingTableListener(self) self._view.add_listener(listener) # Set the default gui self._set_default_gui() def _set_default_gui(self): self._view.update_rows([]) self.display_masking_information(state=None) def get_state(self, index, file_lookup=True): return self._parent_presenter.get_state_for_row(index, file_lookup=file_lookup) @staticmethod def _append_single_spectrum_mask(spectrum_mask, container, detector_name, prefix): if spectrum_mask: for item in spectrum_mask: detail = prefix + str(item) container.append(masking_information(first="Spectrum", second=detector_name, third=detail)) @staticmethod def _append_strip_spectrum_mask(strip_mask_start, strip_mask_stop, container, detector_name, prefix): if strip_mask_start and strip_mask_stop: for start, stop in zip(strip_mask_start, strip_mask_stop): detail = prefix + str(start) + ">" + prefix + str(stop) container.append(masking_information(first="Strip", second=detector_name, third=detail)) @staticmethod def _append_block_spectrum_mask(horizontal_mask_start, horizontal_mask_stop, vertical_mask_start, vertical_mask_stop, container, detector_name): if horizontal_mask_start and horizontal_mask_stop and vertical_mask_start and vertical_mask_stop: for h_start, h_stop, v_start, v_stop in zip(horizontal_mask_start, horizontal_mask_stop, vertical_mask_start, vertical_mask_stop): detail = "H{}>H{}+V{}>V{}".format(h_start, h_stop, v_start, v_stop) container.append(masking_information(first="Strip", second=detector_name, third=detail)) @staticmethod def _append_spectrum_block_cross_mask(horizontal_mask, vertical_mask, container, detector_name): if horizontal_mask and vertical_mask: for h, v in zip(horizontal_mask, vertical_mask): detail = "H{}+V{}".format(h, v) container.append(masking_information(first="Strip", second=detector_name, third=detail)) @staticmethod def _get_spectrum_masks(mask_detector_info): detector_name = mask_detector_info.detector_name spectrum_masks = [] # ------------------------------- # Get the vertical spectrum masks # ------------------------------- single_vertical_strip_mask = mask_detector_info.single_vertical_strip_mask MaskingTablePresenter._append_single_spectrum_mask(single_vertical_strip_mask, spectrum_masks, detector_name, "V") range_vertical_strip_start = mask_detector_info.range_vertical_strip_start range_vertical_strip_stop = mask_detector_info.range_vertical_strip_stop MaskingTablePresenter._append_strip_spectrum_mask(range_vertical_strip_start, range_vertical_strip_stop, spectrum_masks, detector_name, "V") # --------------------------------- # Get the horizontal spectrum masks # --------------------------------- single_horizontal_strip_mask = mask_detector_info.single_horizontal_strip_mask MaskingTablePresenter._append_single_spectrum_mask(single_horizontal_strip_mask, spectrum_masks, detector_name, "H") range_horizontal_strip_start = mask_detector_info.range_horizontal_strip_start range_horizontal_strip_stop = mask_detector_info.range_horizontal_strip_stop MaskingTablePresenter._append_strip_spectrum_mask(range_horizontal_strip_start, range_horizontal_strip_stop, spectrum_masks, detector_name, "H") # --------------------------------- # Get the block masks # --------------------------------- block_horizontal_start = mask_detector_info.block_horizontal_start block_horizontal_stop = mask_detector_info.block_horizontal_stop block_vertical_start = mask_detector_info.block_vertical_start block_vertical_stop = mask_detector_info.block_vertical_stop MaskingTablePresenter._append_block_spectrum_mask(block_horizontal_start, block_horizontal_stop, block_vertical_start, block_vertical_stop, spectrum_masks, detector_name) block_cross_horizontal = mask_detector_info.block_cross_horizontal block_cross_vertical = mask_detector_info.block_cross_vertical MaskingTablePresenter._append_spectrum_block_cross_mask(block_cross_horizontal, block_cross_vertical, spectrum_masks, detector_name) # --------------------------------- # Get spectrum masks # --------------------------------- single_spectra = mask_detector_info.single_spectra MaskingTablePresenter._append_single_spectrum_mask(single_spectra, spectrum_masks, detector_name, "S") spectrum_range_start = mask_detector_info.spectrum_range_start spectrum_range_stop = mask_detector_info.spectrum_range_stop MaskingTablePresenter._append_strip_spectrum_mask(spectrum_range_start, spectrum_range_stop, spectrum_masks, detector_name, "S") return spectrum_masks @staticmethod def _get_time_masks_general(mask_info): container = [] bin_mask_general_start = mask_info.bin_mask_general_start bin_mask_general_stop = mask_info.bin_mask_general_stop if bin_mask_general_start and bin_mask_general_stop: for start, stop in zip(bin_mask_general_start, bin_mask_general_stop): detail = "{}-{}".format(start, stop) container.append(masking_information(first="Time", second="", third=detail)) return container @staticmethod def _get_time_masks(mask_info): container = [] bin_mask_start = mask_info.bin_mask_start bin_mask_stop = mask_info.bin_mask_stop detector_name = mask_info.detector_name if bin_mask_start and bin_mask_stop: for start, stop in zip(bin_mask_start, bin_mask_stop): detail = "{}-{}".format(start, stop) container.append(masking_information(first="Time", second=detector_name, third=detail)) return container @staticmethod def _get_arm_mask(mask_info): container = [] beam_stop_arm_width = mask_info.beam_stop_arm_width beam_stop_arm_angle = mask_info.beam_stop_arm_angle beam_stop_arm_pos1 = mask_info.beam_stop_arm_pos1 if mask_info.beam_stop_arm_pos1 else 0. beam_stop_arm_pos2 = mask_info.beam_stop_arm_pos2 if mask_info.beam_stop_arm_pos2 else 0. if beam_stop_arm_width and beam_stop_arm_angle: detail = "LINE {}, {}, {}, {}".format(beam_stop_arm_width, beam_stop_arm_angle, beam_stop_arm_pos1, beam_stop_arm_pos2) container.append(masking_information(first="Arm", second="", third=detail)) return container @staticmethod def _get_phi_mask(mask_info): container = [] phi_min = mask_info.phi_min phi_max = mask_info.phi_max use_mask_phi_mirror = mask_info.use_mask_phi_mirror if phi_min and phi_max: if use_mask_phi_mirror: detail = "L/PHI {} {}".format(phi_min, phi_max) else: detail = "L/PHI/NOMIRROR{} {}".format(phi_min, phi_max) container.append(masking_information(first="Phi", second="", third=detail)) return container @staticmethod def _get_mask_files(mask_info): container = [] mask_files = mask_info.mask_files if mask_files: for mask_file in mask_files: container.append(masking_information(first="Mask file", second="", third=mask_file)) return container @staticmethod def _get_radius(mask_info): container = [] radius_min = mask_info.radius_min radius_max = mask_info.radius_max if radius_min: detail = "infinite-cylinder, r = {}".format(radius_min) container.append(masking_information(first="Beam stop", second="", third=detail)) if radius_max: detail = "infinite-cylinder, r = {}".format(radius_max) container.append(masking_information(first="Corners", second="", third=detail)) return container def _generate_masking_information(self, state): if state is None: return [] mask_info = state.mask masks = [] mask_info_lab = mask_info.detectors[DetectorType.to_string(DetectorType.LAB)] mask_info_hab = mask_info.detectors[DetectorType.to_string(DetectorType.HAB)] if DetectorType.to_string(DetectorType.HAB) in mask_info.detectors else None # noqa # Add the radius mask radius_mask = self._get_radius(mask_info) masks.extend(radius_mask) # Add the spectrum masks for LAB spectrum_masks_lab = self._get_spectrum_masks(mask_info_lab) masks.extend(spectrum_masks_lab) # Add the spectrum masks for HAB if mask_info_hab: spectrum_masks_hab = self._get_spectrum_masks(mask_info_hab) masks.extend(spectrum_masks_hab) # Add the general time mask time_masks_general = self._get_time_masks_general(mask_info) masks.extend(time_masks_general) # Add the time masks for LAB time_masks_lab = self._get_time_masks(mask_info_lab) masks.extend(time_masks_lab) # Add the time masks for HAB if mask_info_hab: time_masks_hab = self._get_time_masks(mask_info_hab) masks.extend(time_masks_hab) # Add arm mask arm_mask = self._get_arm_mask(mask_info) masks.extend(arm_mask) # Add phi mask phi_mask = self._get_phi_mask(mask_info) masks.extend(phi_mask) # Add mask files mask_files = self._get_mask_files(mask_info) masks.extend(mask_files) return masks def get_masking_information(self, state): table_entries = [] if state is not None: table_entries = self._generate_masking_information(state) return table_entries def display_masking_information(self, state): table_entries = self.get_masking_information(state) self._view.set_table(table_entries) @staticmethod def _display(masked_workspace): if masked_workspace and AnalysisDataService.doesExist(masked_workspace.name()): instrument_win = mantidplot.getInstrumentView(masked_workspace.name()) instrument_win.show()
def __init__(self, view, exit_code): self.error_log = Logger("error") self._view = view self._exit_code = exit_code self._view.set_report_callback(self.error_handler)
class ErrorReporterPresenter(object): def __init__(self, view, exit_code): self.error_log = Logger("error") self._view = view self._exit_code = exit_code self._view.set_report_callback(self.error_handler) def do_not_share(self, continue_working=True): self.error_log.notice("No information shared") self._handle_exit(continue_working) return -1 def share_non_identifiable_information(self, continue_working): uptime = UsageService.getUpTime() status = self._send_report_to_server(share_identifiable=False, uptime=uptime) self.error_log.notice("Sent non-identifiable information") self._handle_exit(continue_working) return status def share_all_information(self, continue_working, name, email, text_box): uptime = UsageService.getUpTime() try: recovery_archive, file_hash = zip_recovery_directory() except Exception as exc: self.error_log.information("Error creating recovery archive: {}. No recovery information will be sent") recovery_archive, file_hash = None, "" status = self._send_report_to_server(share_identifiable=True, uptime=uptime, name=name, email=email, file_hash=file_hash, text_box=text_box) self.error_log.notice("Sent full information") if status == 201 and recovery_archive: self._upload_recovery_file(recovery_archive=recovery_archive) try: os.remove(recovery_archive) except OSError as exc: self.error_log.information("Unable to remove zipped recovery information: {}".format(str(exc))) self._handle_exit(continue_working) return status def error_handler(self, continue_working, share, name, email, text_box): if share == 0: status = self.share_all_information(continue_working, name, email, text_box) elif share == 1: status = self.share_non_identifiable_information(continue_working) elif share == 2: status = self.do_not_share(continue_working) else: self.error_log.error("Unrecognised signal in errorreporter exiting") self._handle_exit(continue_working) status = -2 return status def _handle_exit(self, continue_working): if not continue_working: self.error_log.error("Terminated by user.") self._view.quit() else: self.error_log.error("Continue working.") def _upload_recovery_file(self, recovery_archive): url = ConfigService['errorreports.rooturl'] url = '{}/api/recovery'.format(url) files = {'file': open('{}'.format(recovery_archive), 'rb')} response = requests.post(url, files=files) if response.status_code == 201: self.error_log.notice("Uploaded recovery file to server. HTTP response {}".format(response.status_code)) else: self.error_log.error("Failed to send recovery data HTTP response {}".format(response.status_code)) def _send_report_to_server(self, share_identifiable=False, name='', email='', file_hash='', uptime='', text_box=''): errorReporter = ErrorReporter( "mantidplot", uptime, self._exit_code, share_identifiable, str(name), str(email), str(text_box), str(file_hash)) status = errorReporter.sendErrorReport() if status != 201: self._view.display_message_box('Error contacting server', 'There was an error when sending the report.' 'Please contact [email protected] directly', 'http request returned with status {}'.format(status)) self.error_log.error("Failed to send error report http request returned status {}".format(status)) return status def show_view(self): self._view.show()
def test_unicode_logger(self): logger = Logger("LoggerTest") self.assertTrue(isinstance(logger, Logger)) for att in ['fatal', 'error', 'warning', 'notice', 'information', 'debug']: if not hasattr(logger, att): self.fail("Logger object does not have the required attribute '%s'" % att) logger.fatal('This is a test') logger.error('This is a test') logger.warning('This is a test') logger.notice('This is a test') logger.information('This is a test') logger.debug('This is a test')
from mantid import * import numpy as np import os, csv, math from mantid.kernel import Logger import BilbyCustomFunctions_Reduction reload (BilbyCustomFunctions_Reduction) ansto_logger = Logger("AnstoDataReduction") # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # # INPUT - mandatory from a USER - START ########################################################################################### red_settings = FileFinder.getFullPath('settings_csv_5779.csv') #mantid_reduction_input_latex_shp.csv #settings_csv_peptides # INPUT - index of a line with reduction parameters index_reduction_settings = ["2"] # INDEX OF THE LINE WITH REDUCTION SETTINGS if len(index_reduction_settings) > 1: # must be single choice raise ValueError('Please check your choice of reduction settigns; only single value is allowed') # ID to evaluate - INPUT, in any combination of 'a-b' or ',c', or empty line; empty line means evaluate all files listed in csv index_files_to_reduce = "98" # as per csv_files_to_reduce_list file - LINES' INDEXES FOR FILES TO BE REDUCED # Data file with numbers path_tube_shift_correction = FileFinder.getFullPath('shift_assembled.csv') ###########################################################################################
class BilbySANSDataProcessor(DataProcessorAlgorithm): def __init__(self): DataProcessorAlgorithm.__init__(self) self.sanslog = Logger("ANSTO SANS Data reduction") def category(self): return "Workflow\\SANS" def seeAlso(self): return ["Q1D", "TOFSANSResolutionByPixel", "SANSWideAngleCorrection"] def name(self): return "BilbySANSDataProcessor" def summary(self): return "BILBY SANS data reduction. Converts a workspace in wavelength into a 1D or 2D workspace of" \ " momentum transfer, assuming elastic scattering." def PyInit(self): # input self.declareProperty(MatrixWorkspaceProperty('InputWorkspace', '', direction=Direction.Input, optional=PropertyMode.Mandatory), doc='Particle counts as a function of wavelength') self.declareProperty(MatrixWorkspaceProperty('InputMaskingWorkspace', '', direction=Direction.Input, optional=PropertyMode.Optional), doc='Mask for the scattering data') # blocked beam, beam shape and detector corrections self.declareProperty(MatrixWorkspaceProperty('BlockedBeamWorkspace', '', direction=Direction.Input, optional=PropertyMode.Optional), doc='Blocked beam scattering') self.declareProperty(MatrixWorkspaceProperty('EmptyBeamSpectrumShapeWorkspace', '', direction=Direction.Input, optional=PropertyMode.Mandatory, validator=WorkspaceUnitValidator("Wavelength")), doc='Empty beam transmission, where only a given wavelength slice is considered') self.declareProperty(MatrixWorkspaceProperty('SensitivityCorrectionMatrix', '', direction=Direction.Input, optional=PropertyMode.Optional), doc='Detector sensitivity calibration data set') self.declareProperty(MatrixWorkspaceProperty('TransmissionWorkspace', '', direction=Direction.Input, optional=PropertyMode.Mandatory), doc='Sample transmission workspace') self.declareProperty(MatrixWorkspaceProperty('TransmissionEmptyBeamWorkspace', '', direction=Direction.Input, optional=PropertyMode.Mandatory), doc='Empty beam transmission workspace') self.declareProperty(MatrixWorkspaceProperty('TransmissionMaskingWorkspace', '', direction=Direction.Input, optional=PropertyMode.Mandatory), doc='Mask for the transmission data') self.declareProperty(name='FitMethod', defaultValue='log', doc='Function to use to fit transmission; can be Linear,' ' Log, Polynomial (first letter shall be capital)') self.declareProperty(name='PolynomialOrder', defaultValue='3', doc='Used only for Polynomial function, but needed as an input parameter anyway') self.declareProperty(name='ScalingFactor', defaultValue=1.0, validator=FloatBoundedValidator(lower=0.0), doc='Attenuating factor') self.declareProperty(name='SampleThickness', defaultValue=1.0, validator=FloatBoundedValidator(lower=0.0), doc='Thickness of sample') self.declareProperty(FloatArrayProperty('BinningWavelength', direction=Direction.Input, validator=FloatArrayMandatoryValidator()), doc='Wavelength boundaries for reduction: a comma separated list of first bin boundary,' ' width, last bin boundary') self.declareProperty(FloatArrayProperty('BinningWavelengthTransm', direction=Direction.Input, validator=FloatArrayMandatoryValidator()), doc='Wavelengths boundaries for transmission binning: a comma separated list of first bin' ' boundary, width, last bin') self.declareProperty(FloatArrayProperty('BinningQ', direction=Direction.Input, validator=FloatArrayMandatoryValidator()), doc='Output Q-boundaries: a comma separated list of first bin boundary,' ' width, last bin boundary') self.declareProperty(name='Timemode', defaultValue=True, doc='If data collected in ToF or monochromatic mode') self.declareProperty(name='AccountForGravity', defaultValue=True, doc='Whether to correct for the effects of gravity') self.declareProperty(name='SolidAngleWeighting', defaultValue=True, doc='If True, pixels will be weighted by their solid angle') self.declareProperty(name='RadiusCut', defaultValue=1.0, validator=FloatBoundedValidator(lower=0.0), doc='To increase resolution some wavelengths are excluded within this distance from the' ' beam center (mm). Note that RadiusCut and WaveCut both need to be larger than 0 to' ' affect the effective cutoff. See the algorithm description for a detailed' ' explanation of the cutoff.') self.declareProperty(name='WaveCut', defaultValue=1.0, validator=FloatBoundedValidator(lower=0.0), doc='To increase resolution by starting to remove some wavelengths below this threshold' ' (angstrom). Note that WaveCut and RadiusCut both need to be larger than 0 to affect' ' on the effective cutoff. See the algorithm description for a detailed explanation' ' of the cutoff.') self.declareProperty(name='WideAngleCorrection', defaultValue=True, doc='If true, the wide angle correction for transmissions will be applied') self.declareProperty(name='Reduce2D', defaultValue=False, doc='If true, 2D data reduction will be performed') self.declareProperty(MatrixWorkspaceProperty('OutputWorkspace', '', direction=Direction.Output), doc='Name of the workspace that contains the result of the calculation. ' 'Created automatically.') self.declareProperty(MatrixWorkspaceProperty('OutputWorkspaceTransmissionFit', '', direction=Direction.Output), # This works only when transmission is True. Problems starts when it is not... doc='Counts vs wavelength, fit for the sample transmission') def validateInputs(self): inputs = dict() ws_sam = self.getProperty("InputWorkspace").value ws_samMsk = self.getProperty("InputMaskingWorkspace").value ws_blk = self.getProperty("BlockedBeamWorkspace").value ws_emp = self.getProperty("EmptyBeamSpectrumShapeWorkspace").value ws_sen = self.getProperty("SensitivityCorrectionMatrix").value ws_tranSam = self.getProperty("TransmissionWorkspace").value ws_tranEmp = self.getProperty("TransmissionEmptyBeamWorkspace").value ws_tranMsk = self.getProperty("TransmissionMaskingWorkspace").value # -- Validation -- sam_histograms = ws_sam.getNumberHistograms() if sam_histograms <= 0: inputs["InputWorkspace"] = "has to contain at least one spectrum" elif not ws_sam.isHistogramData(): inputs["InputWorkspace"] = "has to be a histogram" if ws_samMsk: isinstance(ws_samMsk, IMaskWorkspace) if ws_blk: if not ws_blk.isHistogramData(): inputs["BlockedBeamWorkspace"] = "has to be a histogram" elif ws_blk.blocksize() != 1: inputs["BlockedBeamWorkspace"] = "each spectrum must contain only one y value" if ws_emp.getNumberHistograms() != 1: inputs["EmptyBeamSpectrumShapeWorkspace"] = "has to contain only one spectrum" elif not ws_emp.isHistogramData(): inputs["EmptyBeamSpectrumShapeWorkspace"] = "has to be a histogram" if ws_sen: if ws_sen.getNumberHistograms() != sam_histograms: inputs["SensitivityCorrectionMatrix"] = "must have same number of spectra as the InputWorkspace" elif not ws_sen.isHistogramData(): inputs["SensitivityCorrectionMatrix"] = "has to be a histogram" elif ws_sen.getAxis(0).getUnit().symbol(): inputs["SensitivityCorrectionMatrix"] = "has to be unitless" tran_histograms = ws_tranSam.getNumberHistograms() if tran_histograms <= 0: inputs["TransmissionWorkspace"] = "has to contain at least one spectrum" elif not ws_tranSam.isHistogramData(): inputs["TransmissionWorkspace"] = "has to be a histogram" if ws_tranEmp.getNumberHistograms() != tran_histograms: inputs["TransmissionEmptyBeamWorkspace"] = "must have same number of spectra as the TransmissionWorkspace" elif not ws_tranEmp.isHistogramData(): inputs["TransmissionEmptyBeamWorkspace"] = "has to be a histogram" if ws_tranMsk: isinstance(ws_tranMsk, IMaskWorkspace) inputs = self.check_geometry_and_cuts(inputs) return inputs def check_geometry_and_cuts(self, inputs): scale = self.getProperty("ScalingFactor").value thickness = self.getProperty("SampleThickness").value radiuscut = self.getProperty("RadiusCut").value wavecut = self.getProperty("WaveCut").value if scale <= 0.0: inputs["ScalingFactor"] = "has to be greater than zero" if thickness <= 0.0: inputs["SampleThickness"] = "has to be greater than zero" if radiuscut < 0.0: inputs["radiuscut"] = "has to be equal or greater than zero" if wavecut < 0.0: inputs["wavecut"] = "has to be equal or greater than zero" return inputs def PyExec(self): self.sanslog.warning( "SANSDataProcessing is in the beta phase of development. Properties may change without notice.") self.sanslog.warning("Log on the changes is recorded in the body of SANSDataProcessor.py file") # -- Get Arguments -- ws_sam = self.getProperty("InputWorkspace").value ws_samMsk = self.getProperty("InputMaskingWorkspace").value ws_blk = self.getProperty("BlockedBeamWorkspace").value ws_emp = self.getProperty("EmptyBeamSpectrumShapeWorkspace").value ws_sen = self.getProperty("SensitivityCorrectionMatrix").value ws_tranSam = self.getProperty("TransmissionWorkspace").value ws_tranEmp = self.getProperty("TransmissionEmptyBeamWorkspace").value ws_tranMsk = self.getProperty("TransmissionMaskingWorkspace").value scale = self.getProperty("ScalingFactor").value thickness = self.getProperty("SampleThickness").value binning_wavelength = self.getProperty("BinningWavelength").value binning_q = self.getProperty("BinningQ").value wavecut = self.getProperty("WaveCut").value radiuscut = self.getProperty("RadiusCut").value binning_wavelength_transm = self.getProperty("BinningWavelengthTransm").value fitmethod = self.getProperty("FitMethod").value polynomialorder = self.getProperty("PolynomialOrder").value time_mode = self.getProperty( "TimeMode").value # True if External time frame (i.e. choppers), False if Internal time frames (Neutron Velocity Selector) account_for_gravity = self.getProperty("AccountForGravity").value solid_angle_weighting = self.getProperty("SolidAngleWeighting").value wide_angle_correction = self.getProperty("WideAngleCorrection").value reduce_2d = self.getProperty("Reduce2D").value # -- Masking -- if ws_samMsk: self._apply_mask(ws_sam, ws_samMsk) if ws_tranMsk: self._apply_mask(ws_tranSam, ws_tranMsk) self._apply_mask(ws_tranEmp, ws_tranMsk) # -- Convert to Wavelength -- Only for the External time mode - choppers if time_mode: ws_sam = self._convert_units(ws_sam, "Wavelength") ws_tranSam = self._convert_units(ws_tranSam, "Wavelength") ws_tranEmp = self._convert_units(ws_tranEmp, "Wavelength") # -- Transmission -- # Intuitively one would think rebin for NVS data is not needed, but it is required; # not perfect match in binning leads to error like "not matching intervals for calculate_transmission" ws_sam = self._rebin(ws_sam, binning_wavelength, preserveevents=False) ws_tranSam = self._rebin(ws_tranSam, binning_wavelength_transm, preserveevents=False) ws_tranEmp = self._rebin(ws_tranEmp, binning_wavelength_transm, preserveevents=False) ws_tranroi = self._mask_to_roi(ws_tranMsk) self.sanslog.information("FitMethod " + fitmethod) self.sanslog.information("PolynomialOrder " + polynomialorder) ws_tran = self._calculate_transmission(ws_tranSam, ws_tranEmp, ws_tranroi, fitmethod, polynomialorder, binning_wavelength_transm) ws_tranemp_scale = self._get_frame_count(ws_tranEmp) ws_transam_scale = self._get_frame_count(ws_tranSam) f = self._single_valued_ws(ws_tranemp_scale / ws_transam_scale) ws_tran = self._multiply(ws_tran, f) transmission_fit = ws_tran self.setProperty("OutputWorkspaceTransmissionFit", transmission_fit) # -- Blocked Beam Subtraction -- only if blk workspace has been provided (obviously) if ws_blk: ws_sam_time = self._get_frame_count(ws_sam) ws_blk_time = self._get_frame_count(ws_blk) ws_blk_scaling = self._single_valued_ws(ws_sam_time / ws_blk_time) # remove estimated blk counts from sample workspace self._apply_mask(ws_blk, ws_samMsk) # masking blocked beam the same way as sample data if time_mode: ws_blk = self._convert_units(ws_blk, "Wavelength") ws_blk = self._rebin(ws_blk, binning_wavelength, preserveevents=False) # estimated blk counts for given measurement time and bin width ws_blk_est = self._multiply(ws_blk, ws_blk_scaling) ws_sam = self._subtract(ws_sam, ws_blk_est) # sensitivity pixeladj = ws_sen ws_tran = self._emp_shape_adjustment(ws_tran, ws_emp) # swap arrays; ws_emp will always be shorter or equal to ws_tran wavelengthadj = self._multiply(ws_emp, ws_tran) # calculate the wide angle correction for sample transmission if wide_angle_correction: wavepixeladj = self._wide_angle_correction(ws_sam, ws_tran) else: wavepixeladj = None # distance to maximum of parabolic motion of neutrons real_l1 = self._get_l1(ws_sam) # distance from the end of the last guide to the sample extralength = 0.5 * real_l1 # neutrons following parabolic trajectory with maximum at the middle of the L1 # normalize vector to counting time of sample & long empty beam run ws_emp_time = self._get_frame_count(ws_emp) ws_sam_time = self._get_frame_count(ws_sam) scale_full = scale * (ws_emp_time / ws_sam_time) # extra multiplier is needed because measured transmission is ~5% lower; # we need to divide result by lower number, hence need to lowering the final result, i.e. divide by 1.05 f = self._single_valued_ws(scale_full / (thickness * 1.05)) if reduce_2d: q_max = binning_q[2] q_delta = binning_q[1] qxy = self._qxy(ws_sam, q_max, q_delta, pixeladj, wavelengthadj, account_for_gravity, solid_angle_weighting, extralength) qxy = self._multiply(qxy, f) self.setProperty("OutputWorkspace", qxy) else: if (ws_sam.run().getProperty("source_aperture").value): sourceapertureradius = float(ws_sam.run().getProperty("source_aperture").value) / 2.0 if sourceapertureradius > SOURCE_APERTURE_RADIUS_MAX: sourceapertureradius = SOURCE_APERTURE_RADIUS print("sourceapertureradius value cannot be retrieved; generic value of 20mm taken") else: sourceapertureradius = SOURCE_APERTURE_RADIUS # radius in mm print("sourceapertureradius value cannot be retrieved; generic value of 20mm taken") if (ws_sam.run().getProperty("sample_aperture").value): sampleapertureradius = float(ws_sam.run().getProperty("source_aperture").value) / 2.0 if sampleapertureradius > SAMPLE_APERTURE_RADIUS_MAX: sampleapertureradius = SAMPLE_APERTURE_RADIUS print("sampleapertureradius value cannot be retrieved; generic value of 6.25mm taken") else: sampleapertureradius = SAMPLE_APERTURE_RADIUS # radius in mm print("sampleapertureradius value cannot be retrieved; generic value of 6.25mm taken") # creating empty array for SigmaModerator # SigmaModerator is a mandatory parameter for ISIS, but not needed for the reactor facility number_of_bins = NUMBER_OF_BINS number_of_spectra = NUMBER_OF_SPECTRA delta_wavelength = DELTA_WAVELENGTH data_x = np.zeros(number_of_bins + 1) data_y = np.zeros(number_of_bins) x_value = 0.5 y_value = 0.0 for index in range(number_of_bins): data_x[index] = x_value data_y[index] = y_value x_value += delta_wavelength data_x[number_of_bins] = x_value units = "Wavelength" sigmamoderator = self._create_empty_ws(data_x, data_y, number_of_spectra, units) # Call TOFSANSResolutionByPixel ws_sam = self._multiply(ws_sam, f) qresolution = self._tofsansresolutionbypixel(ws_sam, sampleapertureradius, sourceapertureradius, sigmamoderator, real_l1, account_for_gravity, extralength) # Call Q1D, now with resolution q1d = self._q1d(ws_sam, binning_q, pixeladj, wavelengthadj, wavepixeladj, account_for_gravity, solid_angle_weighting, radiuscut, wavecut, extralength, qresolution) self.setProperty("OutputWorkspace", q1d) # set output, file 1D pattern def _get_time_span(self, ws): run = ws.getRun() duration = run.endTime() - run.starme() return float(duration.total_microseconds()) def _get_bm_counts(self, ws): return float(ws.run().getProperty("bm_counts").value) def _get_frame_count(self, ws): return float(ws.run().getProperty("frame_count").value) def _get_period(self, ws): return float(ws.run().getProperty("period").value) def _get_l1(self, ws): return float(ws.run().getProperty("L1").value) def _apply_mask(self, ws, mask): alg = self.createChildAlgorithm("MaskDetectors") alg.setProperty("Workspace", ws) alg.setProperty("MaskedWorkspace", mask) alg.execute() def _convert_units(self, ws, unit): alg = self.createChildAlgorithm("ConvertUnits") alg.setProperty("InputWorkspace", ws) alg.setProperty("Target", unit) alg.execute() return alg.getProperty("OutputWorkspace").value def _rebin(self, ws, binning, preserveevents): alg = self.createChildAlgorithm("Rebin") alg.setProperty("InputWorkspace", ws) alg.setProperty("Params", binning) alg.setProperty("PreserveEvents", preserveevents) alg.execute() return alg.getProperty("OutputWorkspace").value def _multiply(self, a, b): alg = self.createChildAlgorithm("Multiply") alg.setProperty("LHSWorkspace", a) alg.setProperty("RHSWorkspace", b) alg.execute() return alg.getProperty("OutputWorkspace").value def _scale_mult(self, ws_input, factor, operation): alg = self.createChildAlgorithm("Scale") alg.setProperty("InputWorkspace", ws_input) alg.setProperty("Factor", factor) alg.setProperty("Operation", operation) alg.execute() return alg.getProperty("OutputWorkspace").value def _subtract(self, a, b): alg = self.createChildAlgorithm("Minus") alg.setProperty("LHSWorkspace", a) alg.setProperty("RHSWorkspace", b) alg.execute() return alg.getProperty("OutputWorkspace").value def _single_valued_ws(self, value): alg = self.createChildAlgorithm("CreateSingleValuedWorkspace") alg.setProperty("DataValue", value) alg.execute() return alg.getProperty("OutputWorkspace").value def _mask_to_roi(self, ws_mask): alg = self.createChildAlgorithm("InvertMask") alg.setProperty("InputWorkspace", ws_mask) alg.execute() ws_tranmskinv = alg.getProperty("OutputWorkspace").value alg = self.createChildAlgorithm("ExtractMask") alg.setProperty("InputWorkspace", ws_tranmskinv) alg.execute() return alg.getProperty("DetectorList").value def _calculate_transmission(self, ws_tranSam, ws_tranEmp, ws_tranroi, fitmethod, polynomialorder, binning): alg = self.createChildAlgorithm("CalculateTransmission") alg.setProperty("SampleRunWorkspace", ws_tranSam) alg.setProperty("DirectRunWorkspace", ws_tranEmp) alg.setProperty("TransmissionROI", ws_tranroi) alg.setProperty("RebinParams", binning) alg.setProperty("FitMethod", fitmethod) # new alg.setProperty("PolynomialOrder", polynomialorder) # new # FitMethod = 'Polynomial', PolynomialOrder = '4' alg.execute() return alg.getProperty("OutputWorkspace").value def _wide_angle_correction(self, ws_sam, ws_tranSam): alg = self.createChildAlgorithm("SANSWideAngleCorrection") alg.setProperty("SampleData", ws_sam) alg.setProperty("TransmissionData", ws_tranSam) alg.execute() return alg.getProperty("OutputWorkspace").value def _emp_shape_adjustment(self, ws_emp, ws_tran): if ws_emp.getNumberHistograms() != 1: raise ValueError if ws_tran.getNumberHistograms() != 1: raise ValueError ws_emp_bins = ws_emp.readX(0) ws_tran_bins = ws_tran.readX(0) if np.array_equal(ws_emp_bins, ws_tran_bins): # check that bins match return ws_emp # if they match keep them as they are self.sanslog.warning( "EmptyBeamSpectrumShapeWorkspace did not have expected wavelength binning and has to be rebinned") alg = self.createChildAlgorithm("RebinToWorkspace") alg.setProperty("WorkspaceToRebin", ws_emp) alg.setProperty("WorkspaceToMatch", ws_tran) alg.setProperty("PreserveEvents", False) alg.execute() return alg.getProperty("OutputWorkspace").value def _tofsansresolutionbypixel(self, ws_sam, sampleapertureradius, sourceapertureradius, sigmamoderator, collimationlength, accountforgravity, extralength, deltar=5.0): alg = self.createChildAlgorithm("TOFSANSResolutionByPixel") alg.setProperty("InputWorkspace", ws_sam) alg.setProperty("DeltaR", deltar) alg.setProperty("SampleApertureRadius", sampleapertureradius) alg.setProperty("SourceApertureRadius", sourceapertureradius) alg.setProperty("SigmaModerator", sigmamoderator) alg.setProperty("CollimationLength", collimationlength) alg.setProperty("AccountForGravity", accountforgravity) alg.setProperty("ExtraLength", extralength) alg.execute() return alg.getProperty("OutputWorkspace").value def _q1d(self, ws_sam, binning_q, pixeladj, wavelengthadj, wavepixeladj, accountforgravity, solidangleweighting, radiuscut, wavecut, extralength, qresolution): alg = self.createChildAlgorithm("Q1D") alg.setProperty("DetBankWorkspace", ws_sam) alg.setProperty("OutputBinning", binning_q) alg.setProperty("AccountForGravity", accountforgravity) alg.setProperty("SolidAngleWeighting", solidangleweighting) alg.setProperty("RadiusCut", radiuscut) alg.setProperty("WaveCut", wavecut) alg.setProperty("ExtraLength", extralength) alg.setProperty("QResolution", qresolution) # transmission and beam shape correction if wavelengthadj: alg.setProperty("WavelengthAdj", wavelengthadj) # wide angle correction if wavepixeladj: alg.setProperty("wavePixelAdj", wavepixeladj) # pixel sensitivity correction if pixeladj: alg.setProperty("PixelAdj", pixeladj) alg.execute() return alg.getProperty("OutputWorkspace").value def _qxy(self, ws_sam, q_max, q_delta, pixeladj, wavelengthadj, accountforgravity, solidangleweighting, extralength): alg = self.createChildAlgorithm("Qxy") alg.setProperty("InputWorkspace", ws_sam) alg.setProperty("MaxQxy", q_max) alg.setProperty("DeltaQ", q_delta) alg.setProperty("AccountForGravity", accountforgravity) alg.setProperty("SolidAngleWeighting", solidangleweighting) alg.setProperty("ExtraLength", extralength) # pixel sensitivity correction if pixeladj: alg.setProperty("PixelAdj", pixeladj) # transmission and beam shape correction if wavelengthadj: alg.setProperty("WavelengthAdj", wavelengthadj) alg.execute() return alg.getProperty("OutputWorkspace").value def _create_empty_ws(self, data_x, data_y, number_of_spectra, unitx): # empty output workspace in case 2D reduction is not happening alg = self.createChildAlgorithm("CreateWorkspace") alg.setProperty('DataX', data_x) alg.setProperty('DataY', data_y) alg.setProperty('NSpec', number_of_spectra) alg.setProperty('UnitX', unitx) alg.execute() return alg.getProperty("OutputWorkspace").value
class BeamCenterLogger(object): ''' Logger during the beam centre operation. The logging will depend partially on the type of the first coordinate, ie [m, m] or [degree, m]. It will also perform a correction for potential offsets like bench rotations. ''' def __init__(self, reducer, coord1_scale_factor, coord2_scale_factor): super(BeamCenterLogger, self).__init__() self.logger = Logger("CentreFinder") self.using_angle = False if is_workspace_which_requires_angle(reducer): self.coord1_scale_factor = 1. self.using_angle = True # Find the bench rotation. Only supply the bench rotation if it is really needed. If we supply an offset # through a bench rotation we need to take into account that the directionality of the angles is not # the same as in Mantid. We need to reverse the sign of the bench rotation to get the correct rotation. self.offset_coord1 = -1*get_bench_rotation(reducer) else: self.coord1_scale_factor = coord1_scale_factor self.offset_coord1 = 0.0 self.coord2_scale_factor = coord2_scale_factor self.offset_coord2 = 0.0 def report_init(self, coord1, coord2): ''' Report the initial setup @param coord1: the first coordinate @param coord2: the second coordinate ''' if self.using_angle: initial_msg = "beta_start" else: initial_msg = "x_start" # We need to substract the offset from the coordinate, since we do not want to display the offset # which is on the data val1 = (coord1 - self.offset_coord1)*self.coord1_scale_factor val2 = (coord2 - self.offset_coord2)*self.coord2_scale_factor msg = initial_msg + ",ystart= %s %s" % (str(val1), str(val2)) self.logger.notice(msg) self.logger.notice("Starting centre finding routine ...") def report_status(self, iteration, coord1, coord2, resid1, resid2): #pylint: disable=too-many-arguments ''' Report the status of a beam finder iteration @param iteration: the number of the iteration @param coord1: the first coordinate @param coord2: the second coordinate @param resid1: the residual of the first coordinate @param resid2: the residual of the second coordinate ''' msg = self.get_status_message(iteration, coord1, coord2, resid1, resid2) self.logger.notice(msg) def get_status_message(self, iteration, coord1, coord2, resid1, resid2): #pylint: disable=too-many-arguments ''' Report the status of a beam finder iteration @param iteration: the number of the iteration @param coord1: the first coordinate @param coord2: the second coordinate @param resid1: the residual of the first coordinate @param resid2: the residual of the second coordinate ''' # We need to substract the offset from the coordinate, since we do not want to display the offset # which is on the data val1 = (coord1 - self.offset_coord1)* self.coord1_scale_factor val2 = (coord2 - self.offset_coord2)* self.coord2_scale_factor coord1str = str(val1).ljust(10)[0:9] coord2str = str(val2).ljust(10)[0:9] res1str = str(resid1).ljust(7)[0:6] res2str = str(resid2).ljust(7)[0:6] msg = "Itr %i: (%s, %s) SX=%s SY=%s" %(iteration, coord1str, coord2str, res1str, res2str) return msg def report(self, msg): ''' Report a general message @param msg: the message to report ''' self.logger.notice(msg) def report_final(self, coord1, coord2): ''' Report the final coordinates which are set in the reducer @param coord1: the first coordinate @param coord2: the second coordinate ''' # We shouldn't need an offset correction at this point as a possible. # Also we need to multiply both entries with the same (1000) scaling, # Because the first coordinate should have been corrected before # being passed into this method. For reporting purposes we revert this # correction. Also we don't need to remove the offset, since it the input # already has it removed. general_scale = self.coord2_scale_factor val1 = (coord1)*general_scale val2 = (coord2)*general_scale msg = "Centre coordinates updated: [ %f, %f ]" %(val1, val2) self.logger.notice(msg)
def __init__(self): DataProcessorAlgorithm.__init__(self) self.sanslog = Logger("ANSTO SANS Data reduction")
def PyExec(self): state = self._get_state() state_serialized = state.property_manager logger = Logger("CentreFinder") logger.notice("Starting centre finder routine...") progress = self._get_progress() self.scale_1 = 1000 self.scale_2 = 1000 verbose = self.getProperty('Verbose').value x_start = self.getProperty("Position1Start").value y_start = self.getProperty("Position2Start").value sample_scatter = self._get_cloned_workspace("SampleScatterWorkspace") sample_scatter_monitor = self._get_cloned_workspace("SampleScatterMonitorWorkspace") sample_transmission = self._get_cloned_workspace("SampleTransmissionWorkspace") sample_direct = self._get_cloned_workspace("SampleDirectWorkspace") instrument = sample_scatter.getInstrument() if instrument.getName() == 'LARMOR': self.scale_1 = 1.0 can_scatter = self._get_cloned_workspace("CanScatterWorkspace") can_scatter_monitor = self._get_cloned_workspace("CanScatterMonitorWorkspace") can_transmission = self._get_cloned_workspace("CanTransmissionWorkspace") can_direct = self._get_cloned_workspace("CanDirectWorkspace") component = self.getProperty("Component").value tolerance = self.getProperty("Tolerance").value max_iterations = self.getProperty("Iterations").value r_min = self.getProperty("RMin").value r_max = self.getProperty("RMax").value instrument_file = get_instrument_paths_for_sans_file(state.data.sample_scatter) position_1_step = get_named_elements_from_ipf_file( instrument_file[1], ["centre-finder-step-size"], float)['centre-finder-step-size'] try: position_2_step = get_named_elements_from_ipf_file( instrument_file[1], ["centre-finder-step-size2"], float)['centre-finder-step-size2'] except: position_2_step = position_1_step find_direction = self.getProperty("Direction").value if find_direction == FindDirectionEnum.to_string(FindDirectionEnum.Left_Right): position_2_step = 0.0 elif find_direction == FindDirectionEnum.to_string(FindDirectionEnum.Up_Down): position_1_step = 0.0 centre1 = x_start centre2 = y_start residueLR = [] residueTB = [] centre_1_hold = x_start centre_2_hold = y_start for j in range(0, max_iterations + 1): if(j != 0): centre1 += position_1_step centre2 += position_2_step progress.report("Reducing ... Pos1 " + str(centre1) + " Pos2 " + str(centre2)) sample_quartiles = self._run_quartile_reduction(sample_scatter, sample_transmission, sample_direct, "Sample", sample_scatter_monitor, component, state_serialized, centre1, centre2, r_min, r_max) if can_scatter: can_quartiles = self._run_quartile_reduction(can_scatter, can_transmission, can_direct, "Can", can_scatter_monitor, component, state_serialized, centre1, centre2, r_min, r_max) for key in sample_quartiles: sample_quartiles[key] = perform_can_subtraction(sample_quartiles[key], can_quartiles[key], self) if mantidplot: output_workspaces = self._publish_to_ADS(sample_quartiles) if verbose: self._rename_and_group_workspaces(j, output_workspaces) residueLR.append(self._calculate_residuals(sample_quartiles[MaskingQuadrant.Left], sample_quartiles[MaskingQuadrant.Right])) residueTB.append(self._calculate_residuals(sample_quartiles[MaskingQuadrant.Top], sample_quartiles[MaskingQuadrant.Bottom])) if(j == 0): logger.notice("Itr {0}: ( {1}, {2} ) SX={3:.5g} SY={4:.5g}". format(j, self.scale_1 * centre1, self.scale_2 * centre2, residueLR[j], residueTB[j])) if mantidplot: self._plot_quartiles(output_workspaces, state.data.sample_scatter) else: # have we stepped across the y-axis that goes through the beam center? if residueLR[j] > residueLR[j-1]: # yes with stepped across the middle, reverse direction and half the step size position_1_step = - position_1_step / 2 if residueTB[j] > residueTB[j-1]: position_2_step = - position_2_step / 2 logger.notice("Itr {0}: ( {1}, {2} ) SX={3:.5g} SY={4:.5g}". format(j, self.scale_1 * centre1, self.scale_2 * centre2, residueLR[j], residueTB[j])) if (residueLR[j]+residueTB[j]) < (residueLR[j-1]+residueTB[j-1]) or state.compatibility.use_compatibility_mode: centre_1_hold = centre1 centre_2_hold = centre2 if abs(position_1_step) < tolerance and abs(position_2_step) < tolerance: # this is the success criteria, we've close enough to the center logger.notice("Converged - check if stuck in local minimum! ") break if j == max_iterations: logger.notice("Out of iterations, new coordinates may not be the best") self.setProperty("Centre1", centre_1_hold) self.setProperty("Centre2", centre_2_hold) logger.notice("Centre coordinates updated: [{}, {}]".format(centre_1_hold*self.scale_1, centre_2_hold*self.scale_2))