Beispiel #1
0
 def _load_data_file(file_name, wks_name):
     # Check whether we are processing an event workspace or whether
     # we need to load a file
     if mtd.workspaceExists(file_name) \
         and mtd[file_name].getAxis(0).getUnit().name()=="TOF":
         input_ws = file_name
         filepath = None
     else:
         filepath = find_data(file_name, instrument=reducer.instrument.name())
         input_ws = None
         
     l = EQSANSLoad(Filename=filepath,
                    InputWorkspace=input_ws,
                    OutputWorkspace=wks_name,
                    UseConfigBeam=use_config_beam,
                    BeamCenterX=None,
                    BeamCenterY=None,
                    UseConfigTOFCuts=self._use_config_cutoff,
                    LowTOFCut=self._low_TOF_cut,
                    HighTOFCut=self._high_TOF_cut,
                    SkipTOFCorrection=self._skip_tof_correction,
                    WavelengthStep=self._wavelength_step,
                    UseConfigMask=self._use_config_mask,
                    UseConfig=self._use_config,
                    CorrectForFlightPath=self._correct_for_flight_path,
                    SampleDetectorDistance=self._sample_det_dist,
                    SampleDetectorDistanceOffset=self._sample_det_offset,
                    PreserveEvents=self._keep_events,
                    LoadMonitors=self._load_monitors,
                    ReductionProperties=reducer.get_reduction_table_name()
                    )            
     return l.getPropertyValue("OutputMessage")
Beispiel #2
0
 def _load_data_file(file_name, wks_name):
     filepath = find_data(file_name, instrument=reducer.instrument.name())
     beam_x = None
     beam_y = None
     if self._beam_center is not None:            
         [beam_x, beam_y] = self._beam_center
     else:
         [beam_x, beam_y] = reducer.get_beam_center()
         
     l = HFIRLoad(filepath, wks_name,
                  BeamCenterX = beam_x,
                  BeamCenterY = beam_y,
                  SampleDetectorDistance = self._sample_det_dist,
                  SampleDetectorDistanceOffset = self._sample_det_offset,
                  Wavelength = self._wavelength,
                  WavelengthSpread = self._wavelength_spread,
                  ReductionProperties=reducer.get_reduction_table_name())
     return l.getPropertyValue("OutputMessage")
Beispiel #3
0
 def _load_data_file(file_name, wks_name):
     filepath = find_data(file_name, instrument=reducer.instrument.name())
     l = EQSANSLoad(Filename=filepath, OutputWorkspace=wks_name,
                UseConfigBeam=use_config_beam,
                BeamCenterX=pixel_ctr_x,
                BeamCenterY=pixel_ctr_y,
                UseConfigTOFCuts=self._use_config_cutoff,
                LowTOFCut=self._low_TOF_cut,
                HighTOFCut=self._high_TOF_cut,
                UseConfigMask=self._use_config_mask,
                UseConfig=self._use_config,
                CorrectForFlightPath=self._correct_for_flight_path,
                SampleDetectorDistance=self._sample_det_dist,
                SampleDetectorDistanceOffset=self._sample_det_offset,
                PreserveEvents=self._keep_events,
                ReductionTableWorkspace=reducer.get_reduction_table_name()
                )            
     return l.getPropertyValue("OutputMessage")
Beispiel #4
0
    def execute(self, reducer, inputworkspace, outputworkspace=None):
        """
            Loads a data file.
            Note: Files are ALWAYS reloaded when this method is called.
            We do this because speed is not an issue and we ensure that the data
            is always pristine. We could only load files that are not already loaded
            by using the 'dirty' flag and checking for the existence of the workspace.
        """
        output_str = ""
        if outputworkspace is not None:
            workspace = outputworkspace 
        else:
            workspace = inputworkspace
        # If we don't have a data file, look up the workspace handle
        if self._data_file is None:
            if workspace in reducer._data_files:
                data_file = reducer._data_files[workspace]
            elif workspace in reducer._extra_files:
                data_file = reducer._extra_files[workspace]
            else:
                raise RuntimeError, "SANSReductionSteps.LoadRun doesn't recognize workspace handle %s" % workspace
        else:
            data_file = self._data_file
        
        def _load_data_file(file_name, wks_name):
            filepath = find_data(file_name, instrument=reducer.instrument.name())
            beam_x = None
            beam_y = None
            if self._beam_center is not None:            
                [beam_x, beam_y] = self._beam_center
            else:
                [beam_x, beam_y] = reducer.get_beam_center()
                
            l = HFIRLoad(filepath, wks_name,
                         BeamCenterX = beam_x,
                         BeamCenterY = beam_y,
                         SampleDetectorDistance = self._sample_det_dist,
                         SampleDetectorDistanceOffset = self._sample_det_offset,
                         Wavelength = self._wavelength,
                         WavelengthSpread = self._wavelength_spread,
                         ReductionProperties=reducer.get_reduction_table_name())
            return l.getPropertyValue("OutputMessage")

        # Check whether we have a list of files that need merging
        #   Make sure we process a list of files written as a string
        if type(data_file)==str:
            data_file = find_data(data_file, instrument=reducer.instrument.name(), allow_multiple=True)
        if type(data_file)==list:
            monitor = 0.0
            timer = 0.0 
            for i in range(len(data_file)):
                output_str += "Loaded %s\n" % data_file[i]
                if i==0:
                    output_str += _load_data_file(data_file[i], workspace)
                else:
                    output_str += _load_data_file(data_file[i], '__tmp_wksp')
                    Plus(LHSWorkspace=workspace,
                         RHSWorkspace='__tmp_wksp',
                         OutputWorkspace=workspace)
                    # Get the monitor and timer values
                    monitor += mtd['__tmp_wksp'].getRun().getProperty("monitor").value
                    timer += mtd['__tmp_wksp'].getRun().getProperty("timer").value
            
            # Get the monitor and timer of the first file, which haven't yet
            # been added to the total
            monitor += mtd[workspace].getRun().getProperty("monitor").value
            timer += mtd[workspace].getRun().getProperty("timer").value
                    
            # Update the timer and monitor
            mantid[workspace].getRun().addProperty_dbl("monitor", monitor, True)
            mantid[workspace].getRun().addProperty_dbl("timer", timer, True)
            
            if mtd.workspaceExists('__tmp_wksp'):
                mtd.deleteWorkspace('__tmp_wksp')
        else:
            output_str += "Loaded %s\n" % data_file
            output_str += _load_data_file(data_file, workspace)
                
        if mtd[workspace].getRun().hasProperty("beam_center_x") and \
            mtd[workspace].getRun().hasProperty("beam_center_y"):
            beam_center_x = mtd[workspace].getRun().getProperty("beam_center_x").value
            beam_center_y = mtd[workspace].getRun().getProperty("beam_center_y").value
            if type(reducer._beam_finder) is BaseBeamFinder:
                reducer.set_beam_finder(BaseBeamFinder(beam_center_x, beam_center_y))
                mantid.sendLogMessage("No beam finding method: setting to default [%-6.1f, %-6.1f]" % (beam_center_x, beam_center_y))
        
        n_files = 1
        if type(data_file)==list:
            n_files = len(data_file)
            
        # Mask the back side or front side as needed
        # Front detector IDs
        if self._mask_side is not None and self._mask_side in [0, 1]:
            nx = int(mtd[workspace].getInstrument().getNumberParameter("number-of-x-pixels")[0])
            ny = int(mtd[workspace].getInstrument().getNumberParameter("number-of-y-pixels")[0])
            id_side = []
            
            for iy in range(ny):
                for ix in range(self._mask_side, nx+self._mask_side, 2):
                    id_side.append([iy,ix])

            det_side = reducer.instrument.get_detector_from_pixel(id_side, workspace)
            MaskDetectors(Workspace=workspace, DetectorList=det_side) 
        
        return output_str
Beispiel #5
0
    def execute(self, reducer, workspace):
        output_str = ""      
        # If we don't have a data file, look up the workspace handle
        # Only files that are used for computing data corrections have
        # a path that is passed directly. Data files that are reduced
        # are simply found in reducer._data_files 
        if self._data_file is None:
            if workspace in reducer._data_files:
                data_file = reducer._data_files[workspace]
            elif workspace in reducer._extra_files:
                data_file = reducer._extra_files[workspace]
            else:
                raise RuntimeError, "SNSReductionSteps.LoadRun doesn't recognize workspace handle %s" % workspace
        else:
            data_file = self._data_file

        # Load data
        use_config_beam = False
        [pixel_ctr_x, pixel_ctr_y] = reducer.get_beam_center()
        if pixel_ctr_x == 0.0 and pixel_ctr_y == 0.0:
            use_config_beam = True            
            
        def _load_data_file(file_name, wks_name):
            # Check whether we are processing an event workspace or whether
            # we need to load a file
            if mtd.workspaceExists(file_name) \
                and mtd[file_name].getAxis(0).getUnit().name()=="TOF":
                input_ws = file_name
                filepath = None
            else:
                filepath = find_data(file_name, instrument=reducer.instrument.name())
                input_ws = None
                
            l = EQSANSLoad(Filename=filepath,
                           InputWorkspace=input_ws,
                           OutputWorkspace=wks_name,
                           UseConfigBeam=use_config_beam,
                           BeamCenterX=None,
                           BeamCenterY=None,
                           UseConfigTOFCuts=self._use_config_cutoff,
                           LowTOFCut=self._low_TOF_cut,
                           HighTOFCut=self._high_TOF_cut,
                           SkipTOFCorrection=self._skip_tof_correction,
                           WavelengthStep=self._wavelength_step,
                           UseConfigMask=self._use_config_mask,
                           UseConfig=self._use_config,
                           CorrectForFlightPath=self._correct_for_flight_path,
                           SampleDetectorDistance=self._sample_det_dist,
                           SampleDetectorDistanceOffset=self._sample_det_offset,
                           PreserveEvents=self._keep_events,
                           LoadMonitors=self._load_monitors,
                           ReductionProperties=reducer.get_reduction_table_name()
                           )            
            return l.getPropertyValue("OutputMessage")
        
        # Check whether we have a list of files that need merging
        #   Make sure we process a list of files written as a string
        if type(data_file)==str:
            data_file = find_data(data_file, instrument=reducer.instrument.name(), allow_multiple=True)
        if type(data_file)==list:
            for i in range(len(data_file)):
                output_str += "Loaded %s\n" % data_file[i]
                if i==0:
                    output_str += _load_data_file(data_file[i], workspace)
                else:
                    output_str += _load_data_file(data_file[i], '__tmp_wksp')
                    Plus(LHSWorkspace=workspace,
                         RHSWorkspace='__tmp_wksp',
                         OutputWorkspace=workspace)
            if mtd.workspaceExists('__tmp_wksp'):
                mtd.deleteWorkspace('__tmp_wksp')
        else:
            output_str += "Loaded %s\n" % data_file
            output_str += _load_data_file(data_file, workspace)
        
        mantid[workspace].getRun().addProperty_str("event_ws", workspace, True)
        
        if mtd[workspace].getRun().hasProperty("beam_center_x") and \
            mtd[workspace].getRun().hasProperty("beam_center_y"):
            beam_center_x = mtd[workspace].getRun().getProperty("beam_center_x").value
            beam_center_y = mtd[workspace].getRun().getProperty("beam_center_y").value
            if type(reducer._beam_finder) is BaseBeamFinder:
                reducer.set_beam_finder(BaseBeamFinder(beam_center_x, beam_center_y))
                mantid.sendLogMessage("Setting beam center to [%-6.1f, %-6.1f]" % (beam_center_x, beam_center_y))
        
        # Remove the dirty flag if it existed
        reducer.clean(workspace)
        mtd[workspace].getRun().addProperty_int("loaded_by_eqsans_reduction", 1, True)
        
        return output_str
Beispiel #6
0
    def show_instrument(self, file_name=None, workspace=None, tab=-1, reload=False, mask=None):
        """
            Show instrument for the given data file.
            If both file_name and workspace are given, the file will be loaded in 
            a workspace with the given name.
            
            @param file_name: Data file path
            @param workspace: Workspace to create
            @param tab: Tab to open the instrument window in
        """
        file_name = str(file_name)
        
        def _show_ws_instrument(ws):
            # Do nothing if the instrument view is already displayed
            #FIXME: this doesn't seem to work 100% yet
            if False and self._instrument_view is not None and \
                self._data_set_viewed == file_name \
                and self._instrument_view.isVisible():
                
                # If we want a reload, close the instrument window currently shown
                if reload:
                    self._instrument_view.close()
                else:
                    return True
            
            self._instrument_view = qti.app.mantidUI.getInstrumentView(ws, tab)
            if self._instrument_view is not None:
                self._instrument_view.show()
                self._data_set_viewed = file_name
                return True
            
            return False
        
        # Sanity check
        if not IS_IN_MANTIDPLOT:
            return

        # Set up workspace name
        if workspace is None:
            workspace = '_'+os.path.split(file_name)[1]

        # See if the file is already loaded
        if not reload and _show_ws_instrument(workspace):
            return
        
        # Check that the file exists.
        try:
            filepath = find_data(file_name, instrument=self._settings.instrument_name)
        except:
            QtGui.QMessageBox.warning(self, "File Not Found", "The supplied mask file can't be found on the file system")
            return
        if self._data_proxy is not None:
            proxy = self._data_proxy(filepath, workspace)
            if proxy.data_ws is not None:
                if mask is not None:
                    MaskDetectors(proxy.data_ws, DetectorList=mask)
                _show_ws_instrument(proxy.data_ws)
            else:
                QtGui.QMessageBox.warning(self, "Mask Error", "Mantid doesn't know how to load this file")
        else:
            QtGui.QMessageBox.warning(self, "Mask Error", "Mantid doesn't know how to load this file")
Beispiel #7
0
    def _compute_scaling_factor(self, reducer):
        """
            Compute the scaling factor
        """
        # Sanity check
        if self._data_file is None:
            raise RuntimeError, "AbsoluteScale called with no defined direct beam file"

        # Load data file
        filepath = find_data(self._data_file, instrument=reducer.instrument.name())
        data_file_ws = "__abs_scale_" + extract_workspace_name(filepath)

        loader = reducer._data_loader.clone(data_file=filepath)
        loader.set_beam_center(reducer.get_beam_center())
        loader.execute(reducer, data_file_ws)

        # Get counting time
        if reducer._normalizer is None:
            # Note: this option shouldn't really be allowed
            monitor_id = reducer.NORMALIZATION_MONITOR
        else:
            monitor_id = -1
            if hasattr(reducer._normalizer, "get_normalization_spectrum"):
                monitor_id = reducer._normalizer.get_normalization_spectrum()
        monitor = mtd[data_file_ws].dataY(monitor_id)[0]
        # HFIR-specific: If we count for monitor we need to multiply by 1e8
        # Need to be consistent with the Normalization step
        if monitor_id == reducer.NORMALIZATION_MONITOR:
            monitor /= 1.0e8

        if mtd[data_file_ws].getRun().hasProperty("sample_detector_distance"):
            sdd = mtd[data_file_ws].getRun().getProperty("sample_detector_distance").value
        else:
            raise RuntimeError, "AbsoluteScale could not read the sample-detector-distance"

        if self._beamstop_diameter is not None:
            beam_diameter = self._beamstop_diameter
        else:
            if mtd[data_file_ws].getRun().hasProperty("beam-diameter"):
                beam_diameter = mtd[data_file_ws].getRun().getProperty("beam-diameter").value
            else:
                raise RuntimeError, "AbsoluteScale could not read the beam radius and none was provided"

        # Apply sensitivity correction
        if self._apply_sensitivity and reducer.get_sensitivity_correcter() is not None:
            reducer.get_sensitivity_correcter().execute(data_file_ws)

        det_count = 1
        cylXML = (
            '<infinite-cylinder id="asbsolute_scale">'
            + '<centre x="0.0" y="0.0" z="0.0" />'
            + '<axis x="0.0" y="0.0" z="1.0" />'
            + '<radius val="%12.10f" />' % (beam_diameter / 2000.0)
            + "</infinite-cylinder>\n"
        )

        det_finder = FindDetectorsInShape(Workspace=data_file_ws, ShapeXML=cylXML)
        det_list = det_finder.getPropertyValue("DetectorList")
        det_count_ws = "__absolute_scale"
        GroupDetectors(
            InputWorkspace=data_file_ws, OutputWorkspace=det_count_ws, DetectorList=det_list, KeepUngroupedSpectra="0"
        )
        det_count = mtd[det_count_ws].dataY(0)[0]

        # Pixel size, in mm
        pixel_size_param = mtd[data_file_ws].getInstrument().getNumberParameter("x-pixel-size")
        if pixel_size_param is not None:
            pixel_size = pixel_size_param[0]
        else:
            raise RuntimeError, "AbsoluteScale could not read the pixel size"

        # (detector count rate)/(attenuator transmission)/(monitor rate)*(pixel size/SDD)**2
        self._scaling_factor = 1.0 / (
            det_count / self._attenuator_trans / (monitor) * (pixel_size / sdd) * (pixel_size / sdd)
        )

        mtd.deleteWorkspace(data_file_ws)
        mtd.deleteWorkspace(det_count_ws)