def execute(self, reducer, workspace): # Perform standard sensitivity correction # If the sensitivity correction workspace exists, just apply it. Otherwise create it. output_str = " Using data set: %s" % extract_workspace_name(self._flood_data) if self._efficiency_ws is None: self._compute_efficiency(reducer, workspace) # Modify for wavelength dependency of the efficiency of the detector tubes EQSANSSensitivityCorrection(InputWorkspace=workspace, EfficiencyWorkspace=self._efficiency_ws, Factor=0.95661, Error=0.005, OutputWorkspace=workspace, OutputEfficiencyWorkspace="__wl_efficiency") # Copy over the efficiency's masked pixels to the reduced workspace masked_detectors = GetMaskedDetectors(self._efficiency_ws) MaskDetectors(workspace, None, masked_detectors.getPropertyValue("DetectorList")) return "Wavelength-dependent sensitivity correction applied\n%s\n" % output_str
def _mask_plot_clicked(self): self.mask_ws = "__mask_%s" % extract_workspace_name(str(self._summary.mask_edit.text())) self.show_instrument(self._summary.mask_edit.text, workspace=self.mask_ws, tab=2, reload=self.mask_reload, mask=self._masked_detectors) self._masked_detectors = [] self.mask_reload = False
def _compute_scaling_factor(self, reducer): """ Compute the scaling factor """ # Sanity check if self._data_file is None: raise RuntimeError, "AbsoluteScale called with no defined direct beam file" # Load data file filepath = find_data(self._data_file, instrument=reducer.instrument.name()) data_file_ws = "__abs_scale_" + extract_workspace_name(filepath) loader = reducer._data_loader.clone(data_file=filepath) loader.set_beam_center(reducer.get_beam_center()) loader.execute(reducer, data_file_ws) # Get counting time if reducer._normalizer is None: # Note: this option shouldn't really be allowed monitor_id = reducer.NORMALIZATION_MONITOR else: monitor_id = -1 if hasattr(reducer._normalizer, "get_normalization_spectrum"): monitor_id = reducer._normalizer.get_normalization_spectrum() monitor = mtd[data_file_ws].dataY(monitor_id)[0] # HFIR-specific: If we count for monitor we need to multiply by 1e8 # Need to be consistent with the Normalization step if monitor_id == reducer.NORMALIZATION_MONITOR: monitor /= 1.0e8 if mtd[data_file_ws].getRun().hasProperty("sample_detector_distance"): sdd = mtd[data_file_ws].getRun().getProperty("sample_detector_distance").value else: raise RuntimeError, "AbsoluteScale could not read the sample-detector-distance" if self._beamstop_diameter is not None: beam_diameter = self._beamstop_diameter else: if mtd[data_file_ws].getRun().hasProperty("beam-diameter"): beam_diameter = mtd[data_file_ws].getRun().getProperty("beam-diameter").value else: raise RuntimeError, "AbsoluteScale could not read the beam radius and none was provided" # Apply sensitivity correction if self._apply_sensitivity and reducer.get_sensitivity_correcter() is not None: reducer.get_sensitivity_correcter().execute(data_file_ws) det_count = 1 cylXML = ( '<infinite-cylinder id="asbsolute_scale">' + '<centre x="0.0" y="0.0" z="0.0" />' + '<axis x="0.0" y="0.0" z="1.0" />' + '<radius val="%12.10f" />' % (beam_diameter / 2000.0) + "</infinite-cylinder>\n" ) det_finder = FindDetectorsInShape(Workspace=data_file_ws, ShapeXML=cylXML) det_list = det_finder.getPropertyValue("DetectorList") det_count_ws = "__absolute_scale" GroupDetectors( InputWorkspace=data_file_ws, OutputWorkspace=det_count_ws, DetectorList=det_list, KeepUngroupedSpectra="0" ) det_count = mtd[det_count_ws].dataY(0)[0] # Pixel size, in mm pixel_size_param = mtd[data_file_ws].getInstrument().getNumberParameter("x-pixel-size") if pixel_size_param is not None: pixel_size = pixel_size_param[0] else: raise RuntimeError, "AbsoluteScale could not read the pixel size" # (detector count rate)/(attenuator transmission)/(monitor rate)*(pixel size/SDD)**2 self._scaling_factor = 1.0 / ( det_count / self._attenuator_trans / (monitor) * (pixel_size / sdd) * (pixel_size / sdd) ) mtd.deleteWorkspace(data_file_ws) mtd.deleteWorkspace(det_count_ws)