def _wave_range(self):

        if self._emode != 'Elastic':
            self._fixed = math.sqrt(81.787 / self._efixed)

        if self._emode == 'Efixed':
            self._waves.append(self._fixed)
            logger.information('Efixed mode, setting lambda_fixed to {0}'.format(self._fixed))
        else:
            wave_range = '__wave_range'
            ExtractSingleSpectrum(InputWorkspace=self._sample_ws_name, OutputWorkspace=wave_range, WorkspaceIndex=0)

            Xin = mtd[wave_range].readX(0)
            wave_min = mtd[wave_range].readX(0)[0]
            wave_max = mtd[wave_range].readX(0)[len(Xin) - 1]
            number_waves = self._number_wavelengths
            wave_bin = (wave_max - wave_min) / (number_waves-1)

            self._waves = list()
            wave_prog = Progress(self, start=0.07, end = 0.10, nreports=number_waves)
            for idx in range(0, number_waves):
                wave_prog.report('Appending wave data: %i' % idx)
                self._waves.append(wave_min + idx * wave_bin)
            DeleteWorkspace(wave_range, EnableLogging = False)

            if self._emode == 'Elastic':
                self._elastic = self._waves[int(len(self._waves) / 2)]
                logger.information('Elastic lambda : %f' % self._elastic)

            logger.information('Lambda : %i values from %f to %f' % (len(self._waves), self._waves[0], self._waves[-1]))
예제 #2
0
 def PyExec(self):
     error = self.getProperty("Error").value
     if error:
         raise RuntimeError('Error in algorithm')
     progress = Progress(self, 0.0, 1.0, 2)
     progress.report('Half way')
     progress.report()
예제 #3
0
    def PyExec(self):
        # Read the state
        state_property_manager = self.getProperty("SANSState").value
        state = create_deserialized_sans_state_from_property_manager(state_property_manager)

        # Run the appropriate SANSLoader and get the workspaces and the workspace monitors
        # Note that cache optimization is only applied to the calibration workspace since it is not available as a
        # return property and it is also something which is most likely not to change between different reductions.
        use_cached = self.getProperty("UseCached").value
        publish_to_ads = self.getProperty("PublishToCache").value

        data = state.data
        progress = self._get_progress_for_file_loading(data)

        # Get the correct SANSLoader from the SANSLoaderFactory
        load_factory = SANSLoadDataFactory()
        loader = load_factory.create_loader(state)

        workspaces, workspace_monitors = loader.execute(data_info=data, use_cached=use_cached,
                                                        publish_to_ads=publish_to_ads, progress=progress,
                                                        parent_alg=self)
        progress.report("Loaded the data.")

        progress_move = Progress(self, start=0.8, end=1.0, nreports=2)
        progress_move.report("Starting to move the workspaces.")
        self._perform_initial_move(workspaces, state)
        progress_move.report("Finished moving the workspaces.")

        # Set output workspaces
        for workspace_type, workspace in workspaces.items():
            self.set_output_for_workspaces(workspace_type, workspace)

        # Set the output monitor workspaces
        for workspace_type, workspace in workspace_monitors.items():
            self.set_output_for_monitor_workspaces(workspace_type, workspace)
    def _sample(self):
        sample_prog = Progress(self, start=0.01, end=0.03, nreports=2)
        sample_prog.report('Setting Sample Material for Sample')
        SetSampleMaterial(self._sample_ws_name , ChemicalFormula=self._sample_chemical_formula,
                          SampleNumberDensity=self._sample_number_density)
        sample = mtd[self._sample_ws_name].sample()
        sam_material = sample.getMaterial()
        # total scattering x-section
        self._sig_s = np.zeros(self._number_can)
        self._sig_s[0] = sam_material.totalScatterXSection()
        # absorption x-section
        self._sig_a = np.zeros(self._number_can)
        self._sig_a[0] = sam_material.absorbXSection()
        # density
        self._density = np.zeros(self._number_can)
        self._density[0] = self._sample_number_density

        if self._use_can:
            sample_prog.report('Setting Sample Material for Container')
            SetSampleMaterial(InputWorkspace=self._can_ws_name, ChemicalFormula=self._can_chemical_formula,
                              SampleNumberDensity=self._can_number_density)
            can_sample = mtd[self._can_ws_name].sample()
            can_material = can_sample.getMaterial()
            self._sig_s[1] = can_material.totalScatterXSection()
            self._sig_a[1] = can_material.absorbXSection()
            self._density[1] = self._can_number_density
    def _setup(self):
        setup_prog = Progress(self, start=0.00, end=0.01, nreports=2)
        setup_prog.report('Obtaining input properties')
        self._sample_ws_name = self.getPropertyValue('SampleWorkspace')
        self._sample_density_type = self.getPropertyValue('SampleDensityType')
        self._sample_density = self.getProperty('SampleDensity').value
        self._sample_inner_radius = self.getProperty('SampleInnerRadius').value
        self._sample_outer_radius = self.getProperty('SampleOuterRadius').value
        self._number_can = 1

        self._can_ws_name = self.getPropertyValue('CanWorkspace')
        self._use_can = self._can_ws_name != ''
        self._can_density_type = self.getPropertyValue('CanDensityType')
        self._can_density = self.getProperty('CanDensity').value
        self._can_outer_radius = self.getProperty('CanOuterRadius').value
        if self._use_can:
            self._number_can = 2

        self._step_size = self.getProperty('StepSize').value
        self._radii = np.zeros(self._number_can +1)
        self._radii[0] = self._sample_inner_radius
        self._radii[1] = self._sample_outer_radius
        if (self._radii[1] - self._radii[0]) < 1e-4:
            raise ValueError('Sample outer radius not > inner radius')
        else:
            logger.information('Sample : inner radius = %f ; outer radius = %f' % (self._radii[0], self._radii[1]))
            self._ms = int((self._radii[1] - self._radii[0] + 0.0001)/self._step_size)
            if self._ms < 20:
                raise ValueError('Number of steps ( %i ) should be >= 20' % self._ms)
            else:
                if self._ms < 1:
                    self._ms = 1
                logger.information('Sample : ms = %i ' % self._ms)
        if self._use_can:
            self._radii[2] = self._can_outer_radius
            if (self._radii[2] - self._radii[1]) < 1e-4:
                raise ValueError('Can outer radius not > sample outer radius')
            else:
                logger.information('Can : inner radius = %f ; outer radius = %f' % (self._radii[1], self._radii[2]))
        setup_prog.report('Obtaining beam values')
        beam_width = self.getProperty('BeamWidth').value
        beam_height = self.getProperty('BeamHeight').value
        self._beam = [beam_height,
                      0.5 * beam_width,
                      -0.5 * beam_width,
                      (beam_width / 2),
                      -(beam_width / 2),
                      0.0,
                      beam_height,
                      0.0,
                      beam_height]

        self._interpolate = self.getProperty('Interpolate').value
        self._number_wavelengths = self.getProperty('NumberWavelengths').value

        self._emode = self.getPropertyValue('Emode')
        self._efixed = self.getProperty('Efixed').value

        self._output_ws_name = self.getPropertyValue('OutputWorkspace')
    def PyExec(self):
        """Executes the data reduction workflow."""
        progress = Progress(self, 0.0, 1.0, 4)
        subalgLogging = False
        if self.getProperty(common.PROP_SUBALG_LOGGING).value == common.SUBALG_LOGGING_ON:
            subalgLogging = True
        wsNamePrefix = self.getProperty(common.PROP_OUTPUT_WS).valueAsStr
        cleanupMode = self.getProperty(common.PROP_CLEANUP_MODE).value
        wsNames = common.NameSource(wsNamePrefix, cleanupMode)
        wsCleanup = common.IntermediateWSCleanup(cleanupMode, subalgLogging)

        progress.report('Loading inputs')
        mainWS = self._inputWS(wsCleanup)

        progress.report('Applying self shielding corrections')
        mainWS, applied = self._applyCorrections(mainWS, wsNames, wsCleanup, subalgLogging)

        progress.report('Subtracting EC')
        mainWS, subtracted = self._subtractEC(mainWS, wsNames, wsCleanup, subalgLogging)

        if not applied and not subtracted:
            mainWS = self._cloneOnly(mainWS, wsNames, wsCleanup, subalgLogging)

        self._finalize(mainWS, wsCleanup)
        progress.report('Done')
    def PyExec(self):

        setup_prog = Progress(self, start=0.05, end=0.95, nreports=3)

        self._tmp_fit_name = "__fit_ws"
        self._crop_ws(self._sample_ws, self._tmp_fit_name, self._e_min, self._e_max)

        convert_to_hist_alg = self.createChildAlgorithm("ConvertToHistogram", enableLogging=False)
        convert_to_hist_alg.setProperty("InputWorkspace", self._tmp_fit_name)
        convert_to_hist_alg.setProperty("OutputWorkspace", self._tmp_fit_name)
        convert_to_hist_alg.execute()
        mtd.addOrReplace(self._tmp_fit_name, convert_to_hist_alg.getProperty("OutputWorkspace").value)

        self._convert_to_elasticQ(self._tmp_fit_name)

        num_hist = self._sample_ws.getNumberHistograms()
        if self._hist_max is None:
            self._hist_max = num_hist - 1

        setup_prog.report('Fitting 1 peak')
        self._fit(1)
        setup_prog.report('Fitting 2 peaks')
        self._fit(2)
        self._delete_ws(self._tmp_fit_name)
		
        chi_group = self._output_name + '_ChiSq'
        chi_ws1 = self._output_name + '_1L_ChiSq'
        chi_ws2 = self._output_name + '_2L_ChiSq'
        self._clone_ws(chi_ws1, chi_group)
        self._append(chi_group, chi_ws2, chi_group)
        ws = mtd[chi_group]
        ax = TextAxis.create(2)
        for i, x in enumerate(['1 peak', '2 peaks']):
            ax.setLabel(i, x)
        ws.replaceAxis(1, ax)
        self._delete_ws(chi_ws1)
        self._delete_ws(chi_ws2)

        res_group = self._output_name + '_Result'
        res_ws1 = self._output_name + '_1L_Result'
        res_ws2 = self._output_name + '_2L_Result'
        self._extract(res_ws1, res_group, 1)
        self._extract(res_ws2, '__spectrum', 1)
        self._append(res_group, '__spectrum', res_group)
        self._extract(res_ws2, '__spectrum', 3)
        self._append(res_group, '__spectrum', res_group)
        ws = mtd[res_group]
        ax = TextAxis.create(3)
        for i, x in enumerate(['fwhm.1', 'fwhm.2.1', 'fwhm.2.2']):
            ax.setLabel(i, x)
        ws.replaceAxis(1, ax)
        self._delete_ws(res_ws1)
        self._delete_ws(res_ws2)
        self._delete_ws(self._output_name + '_1L_Parameters')
        self._delete_ws(self._output_name + '_2L_Parameters')
예제 #8
0
    def PyExec(self):
        """ Main execution body
        """

        self.vanaws = self.getProperty("VanadiumWorkspace").value       # returns workspace instance
        outws_name = self.getPropertyValue("OutputWorkspace")           # returns workspace name (string)
        eppws = self.getProperty("EPPTable").value
        nhist = self.vanaws.getNumberHistograms()
        prog_reporter = Progress(self, start=0.0, end=1.0, nreports=nhist+1)

        # calculate array of Debye-Waller factors
        dwf = self.calculate_dwf()

        # for each detector: fit gaussian to get peak_centre and fwhm
        # sum data in the range [peak_centre - 3*fwhm, peak_centre + 3*fwhm]
        dataX = self.vanaws.readX(0)
        coefY = np.zeros(nhist)
        coefE = np.zeros(nhist)
        instrument = self.vanaws.getInstrument()
        detID_offset = self.get_detID_offset()
        peak_centre = eppws.column('PeakCentre')
        sigma = eppws.column('Sigma')

        for idx in range(nhist):
            prog_reporter.report("Setting %dth spectrum" % idx)
            dataY = self.vanaws.readY(idx)
            det = instrument.getDetector(idx + detID_offset)
            if np.max(dataY) == 0 or det.isMasked():
                coefY[idx] = 0.
                coefE[idx] = 0.
            else:
                dataE = self.vanaws.readE(idx)
                fwhm = sigma[idx]*2.*np.sqrt(2.*np.log(2.))
                idxmin = (np.fabs(dataX-peak_centre[idx]+3.*fwhm)).argmin()
                idxmax = (np.fabs(dataX-peak_centre[idx]-3.*fwhm)).argmin()
                coefY[idx] = dwf[idx]*sum(dataY[idxmin:idxmax+1])
                coefE[idx] = dwf[idx]*sum(dataE[idxmin:idxmax+1])

        # create X array, X data are the same for all detectors, so
        coefX = np.zeros(nhist)
        coefX.fill(dataX[0])

        create = self.createChildAlgorithm("CreateWorkspace")
        create.setPropertyValue('OutputWorkspace', outws_name)
        create.setProperty('ParentWorkspace', self.vanaws)
        create.setProperty('DataX', coefX)
        create.setProperty('DataY', coefY)
        create.setProperty('DataE', coefE)
        create.setProperty('NSpec', nhist)
        create.setProperty('UnitX', 'TOF')
        create.execute()
        outws = create.getProperty('OutputWorkspace').value

        self.setProperty("OutputWorkspace", outws)
 def _get_angles(self):
     num_hist = mtd[self._sample_ws_name].getNumberHistograms()
     angle_prog = Progress(self, start=0.03, end=0.07, nreports=num_hist)
     source_pos = mtd[self._sample_ws_name].getInstrument().getSource().getPos()
     sample_pos = mtd[self._sample_ws_name].getInstrument().getSample().getPos()
     beam_pos = sample_pos - source_pos
     self._angles = list()
     for index in range(0, num_hist):
         angle_prog.report('Obtaining data for detector angle %i' % index)
         detector = mtd[self._sample_ws_name].getDetector(index)
         two_theta = detector.getTwoTheta(sample_pos, beam_pos) * 180.0 / math.pi
         self._angles.append(two_theta)
     logger.information('Detector angles : %i from %f to %f ' % (len(self._angles), self._angles[0], self._angles[-1]))
예제 #10
0
    def PyExec(self):
        use_zero_error_free = self.getProperty("UseZeroErrorFree").value
        file_formats = self._get_file_formats()
        file_name = self.getProperty("Filename").value
        workspace = self.getProperty("InputWorkspace").value

        if use_zero_error_free:
            workspace = get_zero_error_free_workspace(workspace)
        progress = Progress(self, start=0.0, end=1.0, nreports=len(file_formats) + 1)
        for file_format in file_formats:
            progress_message = "Saving to {0}.".format(SaveType.to_string(file_format.file_format))
            progress.report(progress_message)
            save_to_file(workspace, file_format, file_name)
        progress.report("Finished saving workspace to files.")
    def PyExec(self):

        self.setUp()

        # total number of (unsummed) runs
        total = self._sample_files.count(',')+self._background_files.count(',')+self._calibration_files.count(',')

        self._progress = Progress(self, start=0.0, end=1.0, nreports=total)

        self._reduce_multiple_runs(self._sample_files, self._SAMPLE)

        if self._background_files:

            self._reduce_multiple_runs(self._background_files, self._BACKGROUND)

            back_ws = self._red_ws + '_' + self._BACKGROUND

            Scale(InputWorkspace=back_ws, Factor=self._back_scaling, OutputWorkspace=back_ws)

            if self._back_option == 'Sum':
                self._integrate(self._BACKGROUND, self._SAMPLE)
            else:
                self._interpolate(self._BACKGROUND, self._SAMPLE)

            self._subtract_background(self._BACKGROUND, self._SAMPLE)

            DeleteWorkspace(back_ws)

        if self._calibration_files:

            self._reduce_multiple_runs(self._calibration_files, self._CALIBRATION)

            if self._background_calib_files:
                self._reduce_multiple_runs(self._background_calib_files, self._BACKCALIB)

                back_calib_ws = self._red_ws + '_' + self._BACKCALIB

                Scale(InputWorkspace=back_calib_ws, Factor=self._back_calib_scaling, OutputWorkspace=back_calib_ws)

                if self._back_calib_option == 'Sum':
                    self._integrate(self._BACKCALIB, self._CALIBRATION)
                else:
                    self._interpolate(self._BACKCALIB, self._CALIBRATION)

                self._subtract_background(self._BACKCALIB, self._CALIBRATION)

                DeleteWorkspace(back_calib_ws)

            if self._calib_option == 'Sum':
                self._integrate(self._CALIBRATION, self._SAMPLE)
            else:
                self._interpolate(self._CALIBRATION, self._SAMPLE)

            self._calibrate()

            DeleteWorkspace(self._red_ws + '_' + self._CALIBRATION)

        self.log().debug('Run files map is :'+str(self._all_runs))

        self.setProperty('OutputWorkspace',self._red_ws)
    def PyExec(self):

        self.setUp()

        self._filter_all_input_files()

        if self._background_file:
            background = '__background_'+self._red_ws
            IndirectILLEnergyTransfer(Run = self._background_file, OutputWorkspace = background, **self._common_args)
            Scale(InputWorkspace=background ,Factor=self._back_scaling,OutputWorkspace=background)

        if self._calibration_file:
            calibration = '__calibration_'+self._red_ws
            IndirectILLEnergyTransfer(Run = self._calibration_file, OutputWorkspace = calibration, **self._common_args)

            if self._background_calib_files:
                back_calibration = '__calibration_back_'+self._red_ws
                IndirectILLEnergyTransfer(Run = self._background_calib_files, OutputWorkspace = back_calibration, **self._common_args)
                Scale(InputWorkspace=back_calibration, Factor=self._back_calib_scaling, OutputWorkspace=back_calibration)
                Minus(LHSWorkspace=calibration, RHSWorkspace=back_calibration, OutputWorkspace=calibration)

            # MatchPeaks does not play nicely with the ws groups
            for ws in mtd[calibration]:
                MatchPeaks(InputWorkspace=ws.getName(), OutputWorkspace=ws.getName(), MaskBins=True, BinRangeTable = '')

            Integration(InputWorkspace=calibration,RangeLower=self._peak_range[0],RangeUpper=self._peak_range[1],
                        OutputWorkspace=calibration)
            self._warn_negative_integral(calibration,'in calibration run.')

        if self._unmirror_option == 5 or self._unmirror_option == 7:
            alignment = '__alignment_'+self._red_ws
            IndirectILLEnergyTransfer(Run = self._alignment_file, OutputWorkspace = alignment, **self._common_args)

        runs = self._sample_file.split(',')

        self._progress = Progress(self, start=0.0, end=1.0, nreports=len(runs))

        for run in runs:
            self._reduce_run(run)

        if self._background_file:
            DeleteWorkspace(background)

        if self._calibration_file:
            DeleteWorkspace(calibration)

        if self._background_calib_files:
            DeleteWorkspace(back_calibration)

        if self._unmirror_option == 5 or self._unmirror_option == 7:
            DeleteWorkspace(alignment)

        GroupWorkspaces(InputWorkspaces=self._ws_list,OutputWorkspace=self._red_ws)

        # unhide the final workspaces, i.e. remove __ prefix
        for ws in mtd[self._red_ws]:
            RenameWorkspace(InputWorkspace=ws,OutputWorkspace=ws.getName()[2:])

        self.setProperty('OutputWorkspace',self._red_ws)
    def _rebin_result(self):                    #apply rebinning
        rebin_prog = Progress(self, start=0.0, end=0.8, nreports=3)
        rebin_prog.report('Rebin result ')

        logger.information('Rebin option : ' + self._rebin_option)
        qrange = ''
        if mtd.doesExist(self._sofq):                  #check if S(Q) WS exists
		    logger.information('Sofq data from Workspace : %s' % self._sofq)
        else:                                    #read from nxs file
            sofq_path = FileFinder.getFullPath(self._sofq + '.nxs')
            LoadNexusProcessed(Filename=sofq_path,
                               OutputWorkspace=self._sofq,
                               EnableLogging=False)
            logger.information('Sq data from File : %s' % sofq_path)
        rebin_logs = [('rebin_option', self._rebin_option)]
        if self._rebin_option != 'None':          #rebin to be applied
            rebin_logs.append(('rebin_qrange', self._rebin_qrange))
            logger.information('Rebin qrange : %s' % self._rebin_qrange)
            if self._rebin_qrange == 'New':          #new Q range
                mtd[self._final_q].setDistribution(True)
                xs = mtd[self._final_q].readX(0)
                new_dq = float(self._rebin_qinc)       #increment in Q
                xmax = (int(xs[len(xs) -1 ] / new_dq) + 1) * new_dq    #find number of points & Q max
                qrange = '0.0, %f, %f' % (new_dq, xmax)   #create Q range
                self._rebin(self._final_q, self._final_q, qrange)
                x = mtd[self._final_q].readX(0)
                xshift = 0.5 * (x[0] - x[1])
                self._scale_x(self._final_q, self._final_q, xshift)
                logger.information('Output S(Q) rebinned for range : %s' % qrange)
            if self._rebin_qrange == 'Snap':         #use input Q range
                gR = mtd[self._sofq].getRun()      #input S(Q) WS
                stype = gR.getLogData('input_type').value
                logger.information('Rebin option : %s' % self._rebin_option)
                if stype != 'Q':             #check input was in Q
                    raise ValueError('Input type must be Q for Snap option')
                if self._rebin_option == 'Interpolate':
                    self._rebin_ws(self._final_q, self._sofq, self._final_q)
                    logger.information('Output S(Q) interpolated to input S(Q) : %s' % self._sofq)
                if self._rebin_option == 'Spline':
                    self._spline_interp(self._sofq, self._final_q, self._final_q, '', 2)
                    logger.information('Output S(Q) spline interpolated to input S(Q) :%s ' % self._sofq)
                    rebin_logs.append(('rebin_Q_file', self._sofq))
        log_names = [item[0] for item in rebin_logs]
        log_values = [item[1] for item in rebin_logs]
#        self._add_sample_log_mult(self._final_q, log_names, log_values)
        logger.information('Corrected WS created : %s' % self._final_q)
예제 #14
0
    def PyExec(self):
        from IndirectCommon import getWSprefix
        if self._create_output:
            self._out_ws_table = self.getPropertyValue('OutputWorkspaceTable')

        # Process vanadium workspace
        van_ws = ConvertSpectrumAxis(InputWorkspace=self._van_ws,
                                     OutputWorkspace='__ResNorm_vanadium',
                                     Target='ElasticQ',
                                     EMode='Indirect')

        num_hist = van_ws.getNumberHistograms()

        v_values = van_ws.getAxis(1).extractValues()
        v_unit = van_ws.getAxis(1).getUnit().unitID()

        # Process resolution workspace
        padded_res_ws = self._process_res_ws(num_hist)
        prog_namer = Progress(self, start=0.0, end=0.02, nreports=num_hist)
        input_str = ''
        for idx in range(num_hist):
            input_str += '%s,i%d;' % (padded_res_ws, idx)
            prog_namer.report('Generating PlotPeak input string')

        out_name = getWSprefix(self._res_ws) + 'ResNorm_Fit'
        function = 'name=TabulatedFunction,Workspace=%s,Scaling=1,Shift=0,XScaling=1,ties=(Shift=0)' % self._van_ws

        plot_peaks = self.createChildAlgorithm(name='PlotPeakByLogValue', startProgress=0.02, endProgress=0.94, enableLogging=True)
        plot_peaks.setProperty('Input', input_str)
        plot_peaks.setProperty('OutputWorkspace', out_name)
        plot_peaks.setProperty('Function', function)
        plot_peaks.setProperty('FitType', 'Individual')
        plot_peaks.setProperty('PassWSIndexToFunction', True)
        plot_peaks.setProperty('CreateOutput', self._create_output)
        plot_peaks.setProperty('StartX', self._e_min)
        plot_peaks.setProperty('EndX', self._e_max)
        plot_peaks.execute()
        fit_params = plot_peaks.getProperty('OutputWorkspace').value

        params = {'XScaling':'Stretch', 'Scaling':'Intensity'}
        result_workspaces = []
        prog_process = Progress(self, start=0.94, end=1.0, nreports=3)
        for param_name, output_name in params.items():
            result_workspaces.append(self._process_fit_params(fit_params, param_name, v_values, v_unit, output_name))
            prog_process.report('Processing Fit data')

        GroupWorkspaces(InputWorkspaces=result_workspaces,
                        OutputWorkspace=self._out_ws)
        self.setProperty('OutputWorkspace', self._out_ws)

        DeleteWorkspace(van_ws)
        DeleteWorkspace(padded_res_ws)
        prog_process.report('Deleting workspaces')

        if self._create_output:
            self.setProperty('OutputWorkspaceTable', fit_params)
예제 #15
0
    def PyExec(self):
        """ Main execution body
        """

        # returns workspace instance
        self.vanaws = self.getProperty("VanadiumWorkspace").value
        # returns workspace name (string)
        eppws = self.getProperty("EPPTable").value
        nhist = self.vanaws.getNumberHistograms()
        prog_reporter = Progress(self, start=0.8, end=1.0, nreports=3)
        integrate = self.createChildAlgorithm("IntegrateEPP", startProgress=0.0, endProgress=0.8, enableLogging=False)
        integrate.setProperty("InputWorkspace", self.vanaws)
        integrate.setProperty("OutputWorkspace", "__unused_for_child")
        integrate.setProperty("EPPWorkspace", eppws)
        width = 3. * 2. * np.sqrt(2. * np.log(2.))
        integrate.setProperty("HalfWidthInSigmas", width)
        integrate.execute()
        prog_reporter.report("Computing DWFs")
        outws = integrate.getProperty("OutputWorkspace").value
        # calculate array of Debye-Waller factors
        prog_reporter.report("Applying DWFs")
        dwf = self.calculate_dwf()
        for idx in range(nhist):
            ys = outws.dataY(idx)
            ys /= dwf[idx]
            es = outws.dataE(idx)
            es /= dwf[idx]
        prog_reporter.report("Done")
        self.setProperty("OutputWorkspace", outws)
    def PyExec(self):
        workspace = get_input_workspace_as_copy_if_not_same_as_output_workspace(self)

        progress = Progress(self, start=0.0, end=1.0, nreports=3)

        # Convert the units into wavelength
        progress.report("Converting workspace to wavelength units.")
        workspace = self._convert_units_to_wavelength(workspace)

        # Get the rebin option
        rebin_type = RebinType.from_string(self.getProperty("RebinMode").value)
        rebin_string = self._get_rebin_string(workspace)
        if rebin_type is RebinType.Rebin:
            rebin_options = {"InputWorkspace": workspace,
                             "PreserveEvents": True,
                             "Params": rebin_string}
        else:
            rebin_options = {"InputWorkspace": workspace,
                             "Params": rebin_string}

        # Perform the rebin
        progress.report("Performing rebin.")
        workspace = self._perform_rebin(rebin_type, rebin_options, workspace)

        append_to_sans_file_tag(workspace, "_toWavelength")
        self.setProperty("OutputWorkspace", workspace)
        progress.report("Finished converting to wavelength.")
예제 #17
0
    def PyExec(self):
        # Read the state
        state_property_manager = self.getProperty("SANSState").value
        state = create_deserialized_sans_state_from_property_manager(state_property_manager)

        progress = Progress(self, start=0.0, end=1.0, nreports=3)
        input_workspace = self.getProperty("InputWorkspace").value

        data_type_as_string = self.getProperty("DataType").value
        data_type = DataType.from_string(data_type_as_string)

        slicer = SliceEventFactory.create_slicer(state, input_workspace, data_type)
        slice_info = state.slice

        # Perform the slicing
        progress.report("Starting to slice the workspace.")
        sliced_workspace, slice_factor = slicer.create_slice(input_workspace, slice_info)

        # Scale the monitor accordingly
        progress.report("Scaling the monitors.")
        self.scale_monitors(slice_factor)

        # Set the outputs
        append_to_sans_file_tag(sliced_workspace, "_sliced")
        self.setProperty("OutputWorkspace", sliced_workspace)
        self.setProperty("SliceEventFactor", slice_factor)
        progress.report("Finished slicing.")
예제 #18
0
    def PyExec(self):
        self._setup()

        self._calculate_parameters()

        if not self._dry_run:
            self._transform()

            self._add_logs()

        else:
            skip_prog = Progress(self, start=0.3, end=1.0, nreports=2)
            skip_prog.report('skipping transform')
            skip_prog.report('skipping add logs')
            logger.information('Dry run, will not run TransformToIqt')

        self.setProperty('ParameterWorkspace', self._parameter_table)
        self.setProperty('OutputWorkspace', self._output_workspace)
    def _subtract_corr(self):    #subtract corrections from input to give _data_used & _result
        calc_prog = Progress(self, start=0.0, end=0.8, nreports=3)
        calc_prog.report('Subtract corrections ')

        logger.information('Subtracting corrections')
        self._data_used = self._data + '_used'
        if self._smooth:                                #select which hist to use
            index = 1
        else:
            index= 0
        self._extract(self._data, self._data_used, index)

        self._extract(self._corr, '__wcr', 0)
        wsc1 = 'S-1C'    #1 term subtracted
        self._plus(self._data_used, '__wcr', wsc1)
        wsc2 = 'S-2C'    #2 terms subtracted
        self._extract(self._corr, '__wcr', 1)
        self._plus(wsc1, '__wcr', wsc2)
        wsc3 = 'S-3C'    #3 terms subtracted
        self._extract(self._corr, '__wcr', 2)
        self._plus(wsc2, '__wcr', wsc3)
        wsc4 = 'S-4C'    #4 terms subtracted
        self._extract(self._corr, '__wcr', 3)
        self._plus(wsc3, '__wcr', wsc4)

        self._result = self._data + '_result'             #results WS
        self._clone_ws(wsc1, self._result)
        self._append(self._result, wsc2, self._result)
        self._append(self._result, wsc3, self._result)
        self._append(self._result, wsc4, self._result)

        ax = TextAxis.create(4)
        for i, x in enumerate(['S-1C', 'S-2C', 'S-3C', 'S-4C']):
            ax.setLabel(i, x)
        mtd[self._result].replaceAxis(1, ax)

        subtract_logs = [('smooth', self._smooth)]
        log_names = [item[0] for item in subtract_logs]
        log_values = [item[1] for item in subtract_logs]
        self._add_sample_log_mult(self._result, log_names, log_values)
        workspaces = ['__wcr', wsc1, wsc2, wsc3, wsc4]
        for ws in workspaces:
            self._delete_ws(ws)
        logger.information('Results in WS %s' % self._result)
    def _corr_terms(self):   #calculates the correction terms = coef*deriv as _corr
        calc_prog = Progress(self, start=0.0, end=0.8, nreports=3)
        calc_prog.report('Correction terms ')

        logger.information('Calculating Correction terms')
        self._corr = self._data + '_corr'              #corrections WS
        self._extract(self._deriv, '__temp', 0)
        self._spline_interp('__temp', self._coeff, self._coeff, '', 2)
        self._multiply(self._coeff, self._deriv, self._corr)

        ax = TextAxis.create(4)
        for i, x in enumerate(['Corr.1', 'Corr.2', 'Corr.3', 'Corr.4']):
            ax.setLabel(i, x)
        mtd[self._corr].replaceAxis(1, ax)

        self._copy_log(self._mome, self._corr, 'MergeKeepExisting')
        self._delete_ws('__temp')
        logger.information('Correction terms WS created : %s' % self._corr)
        calc_prog.report('Correction terms completed')
    def _sample(self):
        sample_prog = Progress(self, start=0.01, end=0.03, nreports=2)
        sample_prog.report('Setting Sample Material for Sample')

        sample_ws, self._sample_density = self._set_material(self._sample_ws_name,
                                                             self._set_sample_method,
                                                             self._sample_chemical_formula,
                                                             self._sample_coherent_cross_section,
                                                             self._sample_incoherent_cross_section,
                                                             self._sample_attenuation_cross_section,
                                                             self._sample_density_type,
                                                             self._sample_density,
                                                             self._sample_number_density_unit)

        sample_material = sample_ws.sample().getMaterial()
        # total scattering x-section
        self._sig_s = np.zeros(self._number_can)
        self._sig_s[0] = sample_material.totalScatterXSection()
        # absorption x-section
        self._sig_a = np.zeros(self._number_can)
        self._sig_a[0] = sample_material.absorbXSection()
        # density
        self._density = np.zeros(self._number_can)
        self._density[0] = self._sample_density

        if self._use_can:
            sample_prog.report('Setting Sample Material for Container')

            can_ws, self._can_density = self._set_material(self._can_ws_name,
                                                           self._set_can_method,
                                                           self._can_chemical_formula,
                                                           self._can_coherent_cross_section,
                                                           self._can_incoherent_cross_section,
                                                           self._can_attenuation_cross_section,
                                                           self._can_density_type,
                                                           self._can_density,
                                                           self._can_number_density_unit)

            can_material = can_ws.sample().getMaterial()
            self._sig_s[1] = can_material.totalScatterXSection()
            self._sig_a[1] = can_material.absorbXSection()
            self._density[1] = self._can_density
예제 #22
0
    def PyExec(self):
        # Read the state
        state_property_manager = self.getProperty("SANSState").value
        state = create_deserialized_sans_state_from_property_manager(state_property_manager)

        component = self._get_component()

        # Get the correct SANS masking strategy from create_masker
        workspace = self.getProperty("Workspace").value
        masker = create_masker(state, component)

        # Perform the masking
        number_of_masking_options = 7
        progress = Progress(self, start=0.0, end=1.0, nreports=number_of_masking_options)
        mask_info = state.mask
        workspace = masker.mask_workspace(mask_info, workspace, component, progress)

        append_to_sans_file_tag(workspace, "_masked")
        self.setProperty("Workspace", workspace)
        progress.report("Completed masking the workspace")
    def _cut_result(self):                  #cutoff high angle data ouput _final_theta as *_theta_corrected
        calc_prog = Progress(self, start=0.0, end=0.8, nreports=3)
        calc_prog.report('Cut result ')

        logger.information('Cutting result')
        logger.information('Number of terms used : %i' % (self._nterms))
        temp_ws = '__final'
        self._extract(self._result, temp_ws, self._nterms -1)
        self._final_theta = self._data + '_corrected'   #theta corrected WS
        final_list = ['S-1C', 'S-2C', 'S-3C', 'S-4C']   #names for hist
        icut = 0
        cut_pt = 0
        cut_logs = [('correct_terms', self._nterms), ('cutoff', self._cutoff)]
        if self._cutoff:
            xs = mtd[self._data_used].readX(0)             #S(theta) data
            ys = mtd[self._data_used].readY(0)
            es = mtd[self._data_used].readE(0)
            xf = np.array(mtd[temp_ws].readX(0))
            xfa = np.array(xf)
            yf = np.array(mtd[temp_ws].readY(0))
            ef = np.array(mtd[temp_ws].readE(0))
            icut = np.where(self._cutoff_pt > xfa)[0][-1]
            cut_pt = xf[icut]                      #x-value for cutoff
            logger.information('Corrected data cutoff at : %f' % (cut_pt))
            xnew = np.array(xf[:icut])                #start off new array
            xnew = np.append(xnew,np.array(xs[icut:]))  #append input data
            ynew = np.array(yf[:icut])                #start off new array
            ynew = np.append(ynew,np.array(ys[icut:]))  #append input data
            enew = np.array(ef[:icut])
            enew = np.append(enew,np.array(es[icut:]))
            self._create_ws(self._final_theta, xnew, ynew, enew, 1, final_list[self._nterms - 1])
            cut_logs.append(('cutoff_point', cut_pt))
            self._delete_ws(temp_ws)
        else:
            self._rename_ws(temp_ws, self._final_theta)
            logger.information('Corrected data NOT cutoff')
        self._copy_log(self._result, self._final_theta, 'MergeReplaceExisting')
        log_names = [item[0] for item in cut_logs]
        log_values = [item[1] for item in cut_logs]
        self._add_sample_log_mult(self._final_theta, log_names, log_values)
        logger.information('Corrected WS created : ' + self._final_theta)
    def _convert_result(self):                  #cutoff high angle data ouput _final_theta as *_theta_corrected
        convert_prog = Progress(self, start=0.0, end=0.8, nreports=3)
        convert_prog.report('Converting result to Q')

        #convert *_theta_corrected to *_Q_corrected
        k0 = 4.0 * math.pi / self._lambda

        # Create a copy of the theta workspace and convert to Q
        self._clone_ws(self._corr, self._final_q)
        x_q = mtd[self._final_q].dataX(0)
        x_q = k0 * np.sin(0.5 * np.radians(x_q - self._azero))    #convert to Q after applying zero angle correction
        mtd[self._final_q].setX(0, x_q)
        unitx = mtd[self._final_q].getAxis(0).setUnit("MomentumTransfer")

        self._copy_log(self._corr, self._final_q, 'MergeReplaceExisting')
        convert_logs = [('lambda_out', self._lambda), ('zero_out', self._azero)]
        logger.information('Converting : %s ; from theta to Q as : %s' % (self._corr, self._final_q))
        logger.information('lambda = %f ; zero = %f' % (self._lambda, self._azero))
        log_names = [item[0] for item in convert_logs]
        log_values = [item[1] for item in convert_logs]
        self._add_sample_log_mult(self._final_q, log_names, log_values)
    def PyExec(self):
        """Executes the data reduction workflow."""
        progress = Progress(self, 0.0, 1.0, 4)
        subalgLogging = self.getProperty(common.PROP_SUBALG_LOGGING).value == common.SUBALG_LOGGING_ON
        cleanupMode = self.getProperty(common.PROP_CLEANUP_MODE).value
        wsCleanup = common.IntermediateWSCleanup(cleanupMode, subalgLogging)

        progress.report('Loading inputs')
        mainWS = self._inputWS(wsCleanup)

        progress.report('Integrating')
        mainWS = self._integrate(mainWS, wsCleanup, subalgLogging)

        progress.report('Masking zeros')
        mainWS = self._maskZeros(mainWS, subalgLogging)

        self._finalize(mainWS, wsCleanup)
        progress.report('Done')
예제 #26
0
파일: SANSCrop.py 프로젝트: DanNixon/mantid
    def PyExec(self):
        # Get the correct SANS move strategy from the SANSMaskFactory
        workspace = self.getProperty("InputWorkspace").value

        # Component to crop
        component = self._get_component(workspace)

        progress = Progress(self, start=0.0, end=1.0, nreports=2)
        progress.report("Starting to crop component {0}".format(component))

        # Crop to the component
        crop_name = "CropToComponent"
        crop_options = {"InputWorkspace": workspace,
                        "OutputWorkspace": EMPTY_NAME,
                        "ComponentNames": component}
        crop_alg = create_unmanaged_algorithm(crop_name, **crop_options)
        crop_alg.execute()
        output_workspace = crop_alg.getProperty("OutputWorkspace").value

        # Change the file tag and set the output
        append_to_sans_file_tag(output_workspace, "_cropped")
        self.setProperty("OutputWorkspace", output_workspace)
        progress.report("Finished cropping")
    def _read_sofq(self):  #reads the structure data
        read_prog = Progress(self, start=0.0, end=0.1, nreports=3)
        read_prog.report('Reading data ')

        handle = open(self._input_path, 'r')
        asc = []
        for line in handle:                    #read lines into list 'asc'
            line = line.rstrip()
            asc.append(line)
        handle.close()
        len_asc = len(asc)
        len_head, head = self._read_header(asc)        #find header block
        self._sofq_header  = [('input_type', self._type), ('sofq_lines', len_head)] 
        for m in range(0, len_head):          #number of header lines
            self._sofq_header.append(('sofq_%i' % (m), head[m]))

        #get data from list
        x, y, e = self._read_sofq_data(asc, len_head, len_asc)
        dataX = np.array(x)
        dataY = np.array(y)
        dataE = np.array(e)
        self._temp = '__temp'
        self._create_ws(self._temp, dataX, dataY, dataE)
        log_names = [item[0] for item in self._sofq_header]
        log_values = [item[1] for item in self._sofq_header]
        self._add_sample_log_mult(self._temp, log_names, log_values)
        if self._type == 'angle':       #_input_ws = _theta_temp
            self._clone_ws(self._temp, self._theta_ws)
            unitx = mtd[self._theta_ws].getAxis(0).setUnit("Label")
            unitx.setLabel('2theta', 'deg')

        if self._type == 'Q':
            self._clone_ws(self._temp, self._q_ws)
            self._q_to_theta()       #converts _input_ws from Q to theta in _theta_tmp

        read_prog.report('Reading data completed')
    def _sample(self):
        sample_prog = Progress(self, start=0.01, end=0.03, nreports=2)
        sample_prog.report('Setting Sample Material for Sample')

        sample_chemical_formula = self.getPropertyValue('SampleChemicalFormula')

        sample_ws, self._sample_density = self._set_material(self._sample_ws_name,
                                                             sample_chemical_formula,
                                                             self._sample_density_type,
                                                             self._sample_density)

        sample_material = sample_ws.sample().getMaterial()
        # total scattering x-section
        self._sig_s = np.zeros(self._number_can)
        self._sig_s[0] = sample_material.totalScatterXSection()
        # absorption x-section
        self._sig_a = np.zeros(self._number_can)
        self._sig_a[0] = sample_material.absorbXSection()
        # density
        self._density = np.zeros(self._number_can)
        self._density[0] = self._sample_density

        if self._use_can:
            sample_prog.report('Setting Sample Material for Container')

            can_chemical_formula = self.getPropertyValue('CanChemicalFormula')

            can_ws, self._can_density = self._set_material(self._can_ws_name,
                                                           can_chemical_formula,
                                                           self._can_density_type,
                                                           self._can_density)

            can_material = can_ws.sample().getMaterial()
            self._sig_s[1] = can_material.totalScatterXSection()
            self._sig_a[1] = can_material.absorbXSection()
            self._density[1] = self._can_density
예제 #29
0
    def PyExec(self):
        # Read the state
        state_property_manager = self.getProperty("SANSState").value
        state = create_deserialized_sans_state_from_property_manager(state_property_manager)

        # Get the correct SANS move strategy from the SANSMoveFactory
        workspace = self.getProperty("Workspace").value
        move_factory = SANSMoveFactory()
        mover = move_factory.create_mover(workspace)

        # Get the selected component and the beam coordinates
        move_info = state.move
        full_component_name = self._get_full_component_name(move_info)
        coordinates = self._get_coordinates(move_info, full_component_name)

        # Get which move operation the user wants to perform on the workspace. This can be:
        # 1. Initial move: Suitable when a workspace has been freshly loaded.
        # 2. Elementary displacement: Takes the degrees of freedom of the detector into account. This is normally used
        #    for beam center finding
        # 3. Set to zero: Set the component to its zero position
        progress = Progress(self, start=0.0, end=1.0, nreports=2)
        selected_move_type = self._get_move_type()

        if selected_move_type is MoveType.ElementaryDisplacement:
            progress.report("Starting elementary displacement")
            mover.move_with_elementary_displacement(move_info, workspace, coordinates, full_component_name)
        elif selected_move_type is MoveType.InitialMove:
            is_transmission_workspace = self.getProperty("IsTransmissionWorkspace").value
            progress.report("Starting initial move.")
            mover.move_initial(move_info, workspace, coordinates, full_component_name, is_transmission_workspace)
        elif selected_move_type is MoveType.SetToZero:
            progress.report("Starting set to zero.")
            mover.set_to_zero(move_info, workspace, full_component_name)
        else:
            raise ValueError("SANSMove: The selection {0} for the  move type "
                             "is unknown".format(str(selected_move_type)))
        progress.report("Completed move.")
예제 #30
0
    def PyExec(self):
        data_type = 'Raw'
        if self.getProperty('UseCalibratedData').value:
            data_type = 'Calibrated'
        align_tubes = self.getProperty('AlignTubes').value

        self._progress = Progress(self, start=0.0, end=1.0, nreports=6)
        self._progress.report('Loading data')
        # Do not merge the runs yet, since it will break the calibration
        # Load and calibrate separately, then SumOverlappingTubes will merge correctly
        # Besides + here does not make sense, and it will also slow down D2B a lot
        input_workspace = LoadAndMerge(Filename=self.getPropertyValue('Run').replace('+', ','),
                                       LoaderName='LoadILLDiffraction',
                                       LoaderOptions={'DataType': data_type, 'AlignTubes': align_tubes})
        # We might already have a group, but group just in case
        input_group = GroupWorkspaces(InputWorkspaces=input_workspace)

        instrument = input_group[0].getInstrument()
        self._check_instrument(instrument)

        input_group = self._normalise_input(input_group, instrument)
        self._apply_calibration(input_group)
        self._do_masking(input_group)
        self._get_height_range()

        output_workspaces = []
        self._out_ws_name = self.getPropertyValue('OutputWorkspace')
        self._mirror = False
        self._crop_negative = self.getProperty('CropNegativeScatteringAngles').value
        if instrument.hasParameter("mirror_scattering_angles"):
            self._mirror = instrument.getBoolParameter("mirror_scattering_angles")[0]
        self._final_mask = self.getProperty('FinalMask').value
        self._component_cropping(input_group)

        self._do_reduction(input_group, output_workspaces)

        self._progress.report('Finishing up...')
        DeleteWorkspace('input_group')
        GroupWorkspaces(InputWorkspaces=output_workspaces, OutputWorkspace=self._out_ws_name)
        self.setProperty('OutputWorkspace', self._out_ws_name)
    def PyExec(self):
        """ Main execution body
        """
        input_ws = self.getProperty("InputWorkspace").value
        outws_name = self.getPropertyValue("OutputWorkspace")
        choice_tof = self.getProperty("ChoiceElasticTof").value

        run = input_ws.getRun()
        nb_hist = input_ws.getNumberHistograms()

        prog_reporter = Progress(self,
                                 start=0.0,
                                 end=1.0,
                                 nreports=nb_hist +
                                 1)  # extra call below when summing

        # find elastic time channel
        tof_elastic = np.zeros(nb_hist)
        channel_width = float(run.getLogData('channel_width').value)
        # t_el = epp_channel_number*channel_width + xmin
        t_el_default = float(
            run.getLogData('EPP').value) * channel_width + input_ws.readX(0)[0]

        if choice_tof == 'Geometry':
            # tof_elastic from header of raw datafile start guess on peak position
            tof_elastic.fill(t_el_default)

        if choice_tof == 'FitSample':
            prog_reporter.report("Fit function")
            for idx in range(nb_hist):
                tof_elastic[idx] = api.FitGaussian(input_ws, idx)[0]

        if choice_tof == 'FitVanadium':
            vanaws = self.getProperty("VanadiumWorkspace").value
            prog_reporter.report("Fit function")
            for idx in range(nb_hist):
                tof_elastic[idx] = api.FitGaussian(vanaws, idx)[0]

        self.log().debug("Tel = " + str(tof_elastic))

        outws = api.CloneWorkspace(input_ws, OutputWorkspace=outws_name)

        # mask detectors with EPP=0
        zeros = np.where(tof_elastic == 0)[0]
        if len(zeros) > 0:
            self.log().warning("Detectors " + str(zeros) +
                               " have EPP=0 and will be masked.")
            api.MaskDetectors(outws, DetectorList=zeros)
            # makes sence to convert units even for masked detectors, take EPP guess for that
            for idx in zeros:
                tof_elastic[idx] = t_el_default

        instrument = outws.getInstrument()
        sample = instrument.getSample()
        factor = sp.constants.m_n * 1e+15 / sp.constants.eV

        # calculate new values for dataX and data Y
        for idx in range(nb_hist):
            prog_reporter.report("Setting %dth spectrum" % idx)
            det = instrument.getDetector(
                outws.getSpectrum(idx).getDetectorIDs()[0])
            xbins = input_ws.readX(idx)  # take bin boundaries
            tof = xbins[:-1] + 0.5 * channel_width  # take middle of each bin
            sdd = det.getDistance(sample)
            # calculate new I = t^3*I(t)/(factor*sdd^2*dt)
            dataY = input_ws.readY(idx) * tof**3 / (factor * channel_width *
                                                    sdd * sdd)
            dataE = input_ws.readE(idx) * tof**3 / (factor * channel_width *
                                                    sdd * sdd)
            outws.setY(idx, dataY)
            outws.setE(idx, dataE)
            # calculate dE = factor*0.5*sdd^2*(1/t_el^2 - 1/t^2)
            dataX = 0.5 * factor * sdd * sdd * (1 / tof_elastic[idx]**2 -
                                                1 / xbins**2)
            outws.setX(idx, dataX)

        outws.getAxis(0).setUnit('DeltaE')
        outws.setDistribution(True)

        self.setProperty("OutputWorkspace", outws)
예제 #32
0
    def PyExec(self):

        self.setUp()

        self._filter_all_input_files()

        if self._background_file:
            background = '__background_' + self._red_ws
            IndirectILLEnergyTransfer(Run=self._background_file,
                                      OutputWorkspace=background,
                                      **self._common_args)
            Scale(InputWorkspace=background,
                  Factor=self._back_scaling,
                  OutputWorkspace=background)

        if self._calibration_file:
            calibration = '__calibration_' + self._red_ws
            IndirectILLEnergyTransfer(Run=self._calibration_file,
                                      OutputWorkspace=calibration,
                                      **self._common_args)

            if self._background_calib_files:
                back_calibration = '__calibration_back_' + self._red_ws
                IndirectILLEnergyTransfer(Run=self._background_calib_files,
                                          OutputWorkspace=back_calibration,
                                          **self._common_args)
                Scale(InputWorkspace=back_calibration,
                      Factor=self._back_calib_scaling,
                      OutputWorkspace=back_calibration)
                Minus(LHSWorkspace=calibration,
                      RHSWorkspace=back_calibration,
                      OutputWorkspace=calibration)

            # MatchPeaks does not play nicely with the ws groups
            for ws in mtd[calibration]:
                MatchPeaks(InputWorkspace=ws.getName(),
                           OutputWorkspace=ws.getName(),
                           MaskBins=True,
                           BinRangeTable='')

            Integration(InputWorkspace=calibration,
                        RangeLower=self._peak_range[0],
                        RangeUpper=self._peak_range[1],
                        OutputWorkspace=calibration)
            self._warn_negative_integral(calibration, 'in calibration run.')

        if self._unmirror_option == 5 or self._unmirror_option == 7:
            alignment = '__alignment_' + self._red_ws
            IndirectILLEnergyTransfer(Run=self._alignment_file,
                                      OutputWorkspace=alignment,
                                      **self._common_args)

        runs = self._sample_file.split(',')

        self._progress = Progress(self, start=0.0, end=1.0, nreports=len(runs))

        for run in runs:
            self._reduce_run(run)

        if self._background_file:
            DeleteWorkspace(background)

        if self._calibration_file:
            DeleteWorkspace(calibration)

        if self._background_calib_files:
            DeleteWorkspace(back_calibration)

        if self._unmirror_option == 5 or self._unmirror_option == 7:
            DeleteWorkspace(alignment)

        GroupWorkspaces(InputWorkspaces=self._ws_list,
                        OutputWorkspace=self._red_ws)

        # unhide the final workspaces, i.e. remove __ prefix
        for ws in mtd[self._red_ws]:
            RenameWorkspace(InputWorkspace=ws,
                            OutputWorkspace=ws.getName()[2:])

        self.setProperty('OutputWorkspace', self._red_ws)
예제 #33
0
class IndirectILLReductionFWS(PythonAlgorithm):

    _SAMPLE = 'sample'
    _BACKGROUND = 'background'
    _CALIBRATION = 'calibration'
    _BACKCALIB = 'calibrationBackground'

    _sample_files = None
    _background_files = None
    _calibration_files = None
    _background_calib_files = None
    _observable = None
    _sortX = None
    _red_ws = None
    _back_scaling = None
    _back_calib_scaling = None
    _criteria = None
    _progress = None
    _back_option = None
    _calib_option = None
    _back_calib_option = None
    _common_args = {}
    _all_runs = None
    _discard_sds = None

    def category(self):
        return "Workflow\\MIDAS;Workflow\\Inelastic;Inelastic\\Indirect;Inelastic\\Reduction;ILL\\Indirect"

    def summary(self):
        return 'Performs fixed-window scan (FWS) multiple file reduction (both elastic and inelastic) ' \
               'for ILL indirect geometry data, instrument IN16B.'

    def seeAlso(self):
        return ["IndirectILLReductionQENS", "IndirectILLEnergyTransfer"]

    def name(self):
        return "IndirectILLReductionFWS"

    def PyInit(self):

        self.declareProperty(MultipleFileProperty('Run', extensions=['nxs']),
                             doc='Run number(s) of sample run(s).')

        self.declareProperty(
            MultipleFileProperty('BackgroundRun',
                                 action=FileAction.OptionalLoad,
                                 extensions=['nxs']),
            doc='Run number(s) of background (empty can) run(s).')

        self.declareProperty(
            MultipleFileProperty('CalibrationRun',
                                 action=FileAction.OptionalLoad,
                                 extensions=['nxs']),
            doc='Run number(s) of vanadium calibration run(s).')

        self.declareProperty(
            MultipleFileProperty('CalibrationBackgroundRun',
                                 action=FileAction.OptionalLoad,
                                 extensions=['nxs']),
            doc=
            'Run number(s) of background (empty can) run(s) for vanadium run.')

        self.declareProperty(name='Observable',
                             defaultValue='sample.temperature',
                             doc='Scanning observable, a Sample Log entry\n')

        self.declareProperty(name='SortXAxis',
                             defaultValue=False,
                             doc='Whether or not to sort the x-axis\n')

        self.declareProperty(name='BackgroundScalingFactor',
                             defaultValue=1.,
                             validator=FloatBoundedValidator(lower=0),
                             doc='Scaling factor for background subtraction')

        self.declareProperty(
            name='CalibrationBackgroundScalingFactor',
            defaultValue=1.,
            validator=FloatBoundedValidator(lower=0),
            doc=
            'Scaling factor for background subtraction for vanadium calibration'
        )

        self.declareProperty(
            name='BackgroundOption',
            defaultValue='Sum',
            validator=StringListValidator(['Sum', 'Interpolate']),
            doc='Whether to sum or interpolate the background runs.')

        self.declareProperty(
            name='CalibrationOption',
            defaultValue='Sum',
            validator=StringListValidator(['Sum', 'Interpolate']),
            doc='Whether to sum or interpolate the calibration runs.')

        self.declareProperty(
            name='CalibrationBackgroundOption',
            defaultValue='Sum',
            validator=StringListValidator(['Sum', 'Interpolate']),
            doc=
            'Whether to sum or interpolate the background run for calibration runs.'
        )

        self.declareProperty(
            FileProperty('MapFile',
                         '',
                         action=FileAction.OptionalLoad,
                         extensions=['map', 'xml']),
            doc='Filename of the detector grouping map file to use. \n'
            'By default all the pixels will be summed per each tube. \n'
            'Use .map or .xml file (see GroupDetectors documentation) '
            'only if different range is needed for each tube.')

        self.declareProperty(
            name='ManualPSDIntegrationRange',
            defaultValue=[1, 128],
            doc='Integration range of vertical pixels in each PSD tube. \n'
            'By default all the pixels will be summed per each tube. \n'
            'Use this option if the same range (other than default) '
            'is needed for all the tubes.')

        self.declareProperty(name='Analyser',
                             defaultValue='silicon',
                             validator=StringListValidator(['silicon']),
                             doc='Analyser crystal.')

        self.declareProperty(name='Reflection',
                             defaultValue='111',
                             validator=StringListValidator(['111', '311']),
                             doc='Analyser reflection.')

        self.declareProperty(WorkspaceGroupProperty(
            'OutputWorkspace', '', direction=Direction.Output),
                             doc='Output workspace group')

        self.declareProperty(name='SpectrumAxis',
                             defaultValue='SpectrumNumber',
                             validator=StringListValidator(
                                 ['SpectrumNumber', '2Theta', 'Q', 'Q2']),
                             doc='The spectrum axis conversion target.')

        self.declareProperty(
            name='DiscardSingleDetectors',
            defaultValue=False,
            doc='Whether to discard the spectra of single detectors.')

        self.declareProperty(
            name='ManualInelasticPeakChannels',
            defaultValue=[-1, -1],
            doc=
            'The channel indices for the inelastic peak positions in the beginning '
            'and in the end of the spectra; by default the maxima of the monitor '
            'spectrum will be used for this. The intensities will be integrated symmetrically '
            'around each peak.')

    def validateInputs(self):

        issues = dict()

        if self.getPropertyValue(
                'CalibrationBackgroundRun'
        ) and not self.getPropertyValue('CalibrationRun'):
            issues['CalibrationRun'] = 'Calibration runs are required, ' \
                                       'if background for calibration is given.'

        if not self.getProperty('ManualInelasticPeakChannels').isDefault:
            peaks = self.getProperty('ManualInelasticPeakChannels').value
            if len(peaks) != 2:
                issues['ManualInelasticPeakChannels'] = 'Invalid value for peak channels, ' \
                                                        'provide two comma separated positive integers.'
            elif peaks[0] >= peaks[1]:
                issues[
                    'ManualInelasticPeakChannels'] = 'First peak channel must be less than the second'
            elif peaks[0] <= 0:
                issues[
                    'ManualInelasticPeakChannels'] = 'Non negative integers are required'

        return issues

    def setUp(self):

        self._sample_files = self.getPropertyValue('Run')
        self._background_files = self.getPropertyValue('BackgroundRun')
        self._calibration_files = self.getPropertyValue('CalibrationRun')
        self._background_calib_files = self.getPropertyValue(
            'CalibrationBackgroundRun')
        self._observable = self.getPropertyValue('Observable')
        self._sortX = self.getProperty('SortXAxis').value
        self._back_scaling = self.getProperty('BackgroundScalingFactor').value
        self._back_calib_scaling = self.getProperty(
            'CalibrationBackgroundScalingFactor').value
        self._back_option = self.getPropertyValue('BackgroundOption')
        self._calib_option = self.getPropertyValue('CalibrationOption')
        self._back_calib_option = self.getPropertyValue(
            'CalibrationBackgroundOption')
        self._spectrum_axis = self.getPropertyValue('SpectrumAxis')
        self._discard_sds = self.getProperty('DiscardSingleDetectors').value

        # arguments to pass to IndirectILLEnergyTransfer
        self._common_args['MapFile'] = self.getPropertyValue('MapFile')
        self._common_args['Analyser'] = self.getPropertyValue('Analyser')
        self._common_args['Reflection'] = self.getPropertyValue('Reflection')
        self._common_args['ManualPSDIntegrationRange'] = self.getProperty(
            'ManualPSDIntegrationRange').value
        self._common_args['SpectrumAxis'] = self._spectrum_axis
        self._common_args['DiscardSingleDetectors'] = self._discard_sds

        self._red_ws = self.getPropertyValue('OutputWorkspace')

        suffix = ''
        if self._spectrum_axis == 'SpectrumNumber':
            suffix = '_red'
        elif self._spectrum_axis == '2Theta':
            suffix = '_2theta'
        elif self._spectrum_axis == 'Q':
            suffix = '_q'
        elif self._spectrum_axis == 'Q2':
            suffix = '_q2'

        self._red_ws += suffix

        # Nexus metadata criteria for FWS type of data (both EFWS and IFWS)
        self._criteria = '($/entry0/instrument/Doppler/maximum_delta_energy$ == 0. or ' \
                         '$/entry0/instrument/Doppler/velocity_profile$ == 1)'

        # force sort x-axis, if interpolation is requested
        if ((self._back_option == 'Interpolate' and self._background_files)
            or (self._calib_option == 'Interpolate' and self._calibration_files)
            or (self._back_calib_option == 'Interpolate' and self._background_calib_files)) \
                and not self._sortX:
            self.log().warning(
                'Interpolation option requested, X-axis will be sorted.')
            self._sortX = True

        # empty dictionary to keep track of all runs (ws names)
        self._all_runs = dict()

    def _filter_files(self, files, label):
        '''
        Filters the given list of files according to nexus criteria
        @param  files :: list of input files (i.e. , and + separated string)
        @param  label :: label of error message if nothing left after filtering
        @throws RuntimeError :: when nothing left after filtering
        @return :: the list of input files that passsed the criteria
        '''

        files = SelectNexusFilesByMetadata(files, self._criteria)

        if not files:
            raise RuntimeError(
                'None of the {0} runs satisfied the FWS and Observable criteria.'
                .format(label))
        else:
            self.log().information('Filtered {0} runs are: {0} \\n'.format(
                label, files.replace(',', '\\n')))

        return files

    def _ifws_peak_bins(self, ws):
        '''
        Gives the bin indices of the first and last inelastic peaks
        By default they are taken from the maxima of the monitor spectrum
        Or they can be specified manually as input parameters
        @param ws :: input workspace
        return    :: [imin,imax]
        '''
        if not self.getProperty('ManualInelasticPeakChannels').isDefault:
            peak_channels = self.getProperty(
                'ManualInelasticPeakChannels').value
            blocksize = mtd[ws].blocksize()
            if peak_channels[1] >= blocksize:
                raise RuntimeError(
                    'Manual peak channel {0} is out of range {1}'.format(
                        peak_channels[1], blocksize))
            else:
                AddSampleLogMultiple(Workspace=ws,
                                     LogNames=[
                                         'ManualInelasticLeftPeak',
                                         'ManualInelasticRightPeak'
                                     ],
                                     LogValues=str(peak_channels[0]) + ',' +
                                     str(peak_channels[1]))
                return peak_channels
        run = mtd[ws].getRun()
        if not run.hasProperty('MonitorLeftPeak') or not run.hasProperty(
                'MonitorRightPeak'):
            raise RuntimeError(
                'Unable to retrieve the monitor peak information from the sample logs.'
            )
        else:
            imin = run.getLogData('MonitorLeftPeak').value
            imax = run.getLogData('MonitorRightPeak').value
        return imin, imax

    def _ifws_integrate(self, wsgroup):
        '''
        Integrates IFWS over two peaks at the beginning and end
        @param ws :: input workspace group
        '''

        for item in mtd[wsgroup]:
            ws = item.name()
            size = item.blocksize()
            imin, imax = self._ifws_peak_bins(ws)
            x_values = item.readX(0)
            int1 = '__int1_' + ws
            int2 = '__int2_' + ws
            Integration(InputWorkspace=ws,
                        OutputWorkspace=int1,
                        RangeLower=x_values[0],
                        RangeUpper=x_values[2 * imin])
            Integration(InputWorkspace=ws,
                        OutputWorkspace=int2,
                        RangeLower=x_values[-2 * (size - imax)],
                        RangeUpper=x_values[-1])
            Plus(LHSWorkspace=int1, RHSWorkspace=int2, OutputWorkspace=ws)
            DeleteWorkspace(int1)
            DeleteWorkspace(int2)

    def _perform_unmirror(self, groupws):
        '''
        Sums the integrals of left and right for two wings, or returns the integral of one wing
        @param ws :: group workspace containing one ws for one wing, and two ws for two wing data
        '''
        if mtd[groupws].getNumberOfEntries() == 2:  # two wings, sum
            left = mtd[groupws].getItem(0).name()
            right = mtd[groupws].getItem(1).name()
            left_right_sum = '__sum_' + groupws

            left_monitor = mtd[left].getRun().getLogData(
                'MonitorIntegral').value
            right_monitor = mtd[right].getRun().getLogData(
                'MonitorIntegral').value

            if left_monitor != 0. and right_monitor != 0.:
                sum_monitor = left_monitor + right_monitor
                left_factor = left_monitor / sum_monitor
                right_factor = right_monitor / sum_monitor
                Scale(InputWorkspace=left,
                      OutputWorkspace=left,
                      Factor=left_factor)
                Scale(InputWorkspace=right,
                      OutputWorkspace=right,
                      Factor=right_factor)
            else:
                self.log().notice(
                    'Zero monitor integral has been found in one (or both) wings;'
                    ' left: {0}, right: {1}'.format(left_monitor,
                                                    right_monitor))

            Plus(LHSWorkspace=left,
                 RHSWorkspace=right,
                 OutputWorkspace=left_right_sum)

            DeleteWorkspace(left)
            DeleteWorkspace(right)

            RenameWorkspace(InputWorkspace=left_right_sum,
                            OutputWorkspace=groupws)

        else:
            RenameWorkspace(InputWorkspace=mtd[groupws].getItem(0),
                            OutputWorkspace=groupws)

    def PyExec(self):

        self.setUp()

        # total number of (unsummed) runs
        total = self._sample_files.count(',') + self._background_files.count(
            ',') + self._calibration_files.count(',')

        self._progress = Progress(self, start=0.0, end=1.0, nreports=total)

        self._reduce_multiple_runs(self._sample_files, self._SAMPLE)

        if self._background_files:

            self._reduce_multiple_runs(self._background_files,
                                       self._BACKGROUND)

            back_ws = self._red_ws + '_' + self._BACKGROUND

            Scale(InputWorkspace=back_ws,
                  Factor=self._back_scaling,
                  OutputWorkspace=back_ws)

            if self._back_option == 'Sum':
                self._integrate(self._BACKGROUND, self._SAMPLE)
            else:
                self._interpolate(self._BACKGROUND, self._SAMPLE)

            self._subtract_background(self._BACKGROUND, self._SAMPLE)

            DeleteWorkspace(back_ws)

        if self._calibration_files:

            self._reduce_multiple_runs(self._calibration_files,
                                       self._CALIBRATION)

            if self._background_calib_files:
                self._reduce_multiple_runs(self._background_calib_files,
                                           self._BACKCALIB)

                back_calib_ws = self._red_ws + '_' + self._BACKCALIB

                Scale(InputWorkspace=back_calib_ws,
                      Factor=self._back_calib_scaling,
                      OutputWorkspace=back_calib_ws)

                if self._back_calib_option == 'Sum':
                    self._integrate(self._BACKCALIB, self._CALIBRATION)
                else:
                    self._interpolate(self._BACKCALIB, self._CALIBRATION)

                self._subtract_background(self._BACKCALIB, self._CALIBRATION)

                DeleteWorkspace(back_calib_ws)

            if self._calib_option == 'Sum':
                self._integrate(self._CALIBRATION, self._SAMPLE)
            else:
                self._interpolate(self._CALIBRATION, self._SAMPLE)

            self._calibrate()

            DeleteWorkspace(self._red_ws + '_' + self._CALIBRATION)

        self.log().debug('Run files map is :' + str(self._all_runs))

        self.setProperty('OutputWorkspace', self._red_ws)

    def _reduce_multiple_runs(self, files, label):
        '''
        Filters and reduces multiple files
        @param files :: list of run paths
        @param label :: output ws name
        '''

        files = self._filter_files(files, label)

        for run in files.split(','):
            self._reduce_run(run, label)

        self._create_matrices(label)

    def _reduce_run(self, run, label):
        '''
        Reduces the given (single or summed multiple) run
        @param run :: run path
        @param  label :: sample, background or calibration
        '''

        runs_list = run.split('+')

        runnumber = os.path.basename(runs_list[0]).split('.')[0]

        ws = '__' + runnumber

        if (len(runs_list) > 1):
            ws += '_multiple'

        ws += '_' + label

        self._progress.report("Reducing run #" + runnumber)

        IndirectILLEnergyTransfer(Run=run,
                                  OutputWorkspace=ws,
                                  **self._common_args)

        energy = round(
            mtd[ws].getItem(0).getRun().getLogData(
                'Doppler.maximum_delta_energy').value, 2)

        if energy == 0.:
            # Elastic, integrate over full energy range
            Integration(InputWorkspace=ws, OutputWorkspace=ws)
        else:
            # Inelastic, do something more complex
            self._ifws_integrate(ws)

        ConvertToPointData(InputWorkspace=ws, OutputWorkspace=ws)

        self._perform_unmirror(ws)

        self._subscribe_run(ws, energy, label)

    def _subscribe_run(self, ws, energy, label):
        '''
        Subscribes the given ws name to the map for given energy and label
        @param ws     :: workspace name
        @param energy :: energy value
        @param label  :: sample, calibration or background
        '''

        if label in self._all_runs:
            if energy in self._all_runs[label]:
                self._all_runs[label][energy].append(ws)
            else:
                self._all_runs[label][energy] = [ws]
        else:
            self._all_runs[label] = dict()
            self._all_runs[label][energy] = [ws]

    def _integrate(self, label, reference):
        '''
        Averages the background or calibration intensities over all observable points at given energy
        @param label :: calibration or background
        @param reference :: sample or calibration
        '''

        for energy in self._all_runs[reference]:
            if energy in self._all_runs[label]:
                ws = self._insert_energy_value(self._red_ws + '_' + label,
                                               energy, label)
                if mtd[ws].blocksize() > 1:
                    SortXAxis(InputWorkspace=ws, OutputWorkspace=ws)
                    axis = mtd[ws].readX(0)
                    start = axis[0]
                    end = axis[-1]
                    integration_range = end - start
                    params = [start, integration_range, end]
                    Rebin(InputWorkspace=ws, OutputWorkspace=ws, Params=params)

    def _interpolate(self, label, reference):
        '''
        Interpolates the background or calibration intensities to
        all observable points existing in sample at a given energy
        @param label  :: calibration or background
        @param reference :: to interpolate to, can be sample or calibration
        '''

        for energy in self._all_runs[reference]:
            if energy in self._all_runs[label]:

                ws = self._insert_energy_value(self._red_ws + '_' + label,
                                               energy, label)

                if reference == self._SAMPLE:
                    ref = self._insert_energy_value(self._red_ws, energy,
                                                    reference)
                else:
                    ref = self._insert_energy_value(
                        self._red_ws + '_' + reference, energy, reference)

                if mtd[ws].blocksize() > 1:
                    SplineInterpolation(WorkspaceToInterpolate=ws,
                                        WorkspaceToMatch=ref,
                                        Linear2Points=True,
                                        OutputWorkspace=ws)

    def _subtract_background(self, background, reference):
        '''
        Subtracts the background per each energy if background run is available
        @param background :: background to subtract
        @param reference :: to subtract from
        '''

        for energy in self._all_runs[reference]:
            if energy in self._all_runs[background]:

                if reference == self._SAMPLE:
                    lhs = self._insert_energy_value(self._red_ws, energy,
                                                    reference)
                else:
                    lhs = self._insert_energy_value(
                        self._red_ws + '_' + reference, energy, reference)

                rhs = self._insert_energy_value(
                    self._red_ws + '_' + background, energy, background)
                Minus(LHSWorkspace=lhs, RHSWorkspace=rhs, OutputWorkspace=lhs)
            else:
                self.log().warning(
                    'No background subtraction can be performed for doppler energy of {0} microEV, '
                    'since no background run was provided for the same energy value.'
                    .format(energy))

    def _calibrate(self):
        '''
        Performs calibration per each energy if calibration run is available
        '''

        for energy in self._all_runs[self._SAMPLE]:
            if energy in self._all_runs[self._CALIBRATION]:
                sample_ws = self._insert_energy_value(self._red_ws, energy,
                                                      self._SAMPLE)
                calib_ws = sample_ws + '_' + self._CALIBRATION
                Divide(LHSWorkspace=sample_ws,
                       RHSWorkspace=calib_ws,
                       OutputWorkspace=sample_ws)
                self._scale_calibration(sample_ws, calib_ws)
            else:
                self.log().warning(
                    'No calibration can be performed for doppler energy of {0} microEV, '
                    'since no calibration run was provided for the same energy value.'
                    .format(energy))

    def _scale_calibration(self, sample, calib):
        '''
        Scales sample workspace after calibration up by the maximum of integral intensity
        in calibration run for each observable point
        @param sample  :: sample workspace after calibration
        @param calib   :: calibration workspace
        '''

        if mtd[calib].blocksize() == 1:
            scale = np.max(mtd[calib].extractY()[:, 0])
            Scale(InputWorkspace=sample,
                  Factor=scale,
                  OutputWorkspace=sample,
                  Operation='Multiply')
        else:
            # here calib and sample have the same size already
            for column in range(mtd[sample].blocksize()):
                scale = np.max(mtd[calib].extractY()[:, column])
                for spectrum in range(mtd[sample].getNumberHistograms()):
                    mtd[sample].dataY(spectrum)[column] *= scale
                    mtd[sample].dataE(spectrum)[column] *= scale

    def _get_observable_values(self, ws_list):
        '''
        Retrieves the needed sample log values for the given list of workspaces
        @param ws_list :: list of workspaces
        @returns :: array of observable values
        @throws  :: ValueError if the log entry is not a number nor time-stamp
        '''

        result = []

        zero_time = 0

        pattern = '%Y-%m-%dT%H:%M:%S'

        for i, ws in enumerate(ws_list):

            log = mtd[ws].getRun().getLogData(self._observable)
            value = log.value

            if log.type == 'number':
                value = float(value)
            else:
                try:
                    value = time.mktime(time.strptime(value, pattern))
                except ValueError:
                    raise ValueError(
                        "Invalid observable. "
                        "Provide a numeric (sample.*, run_number, etc.) or time-stamp "
                        "like string (e.g. start_time) log.")
                if i == 0:
                    zero_time = value

                value = value - zero_time

            result.append(value)

        return result

    def _create_matrices(self, label):
        '''
        For each reduction type concatenates the workspaces putting the given sample log value as x-axis
        Creates a group workspace for the given label, that contains 2D workspaces for each distinct energy value
        @param label :: sample, background or calibration
        '''

        togroup = []

        groupname = self._red_ws

        if label != self._SAMPLE:
            groupname += '_' + label

        for energy in sorted(self._all_runs[label]):

            ws_list = self._all_runs[label][energy]

            wsname = self._insert_energy_value(groupname, energy, label)

            togroup.append(wsname)
            nspectra = mtd[ws_list[0]].getNumberHistograms()
            observable_array = self._get_observable_values(
                self._all_runs[label][energy])

            ConjoinXRuns(InputWorkspaces=ws_list, OutputWorkspace=wsname)

            mtd[wsname].setDistribution(True)

            run_list = ''  # to set to sample logs

            for ws in ws_list:
                run = mtd[ws].getRun()

                if run.hasProperty('run_number_list'):
                    run_list += run.getLogData(
                        'run_number_list').value.replace(', ', '+') + ','
                else:
                    run_list += str(run.getLogData('run_number').value) + ','

            AddSampleLog(Workspace=wsname,
                         LogName='ReducedRunsList',
                         LogText=run_list.rstrip(','))

            for spectrum in range(nspectra):

                mtd[wsname].setX(spectrum, np.array(observable_array))

            if self._sortX:
                SortXAxis(InputWorkspace=wsname, OutputWorkspace=wsname)

            self._set_x_label(wsname)

        for energy, ws_list in self._all_runs[label].items():
            for ws in ws_list:
                DeleteWorkspace(ws)

        GroupWorkspaces(InputWorkspaces=togroup, OutputWorkspace=groupname)

    def _set_x_label(self, ws):
        '''
        Sets the x-axis label
        @param ws :: input workspace
        '''

        axis = mtd[ws].getAxis(0)
        if self._observable == 'sample.temperature':
            axis.setUnit("Label").setLabel('Temperature', 'K')
        elif self._observable == 'sample.pressure':
            axis.setUnit("Label").setLabel('Pressure', 'P')
        elif 'time' in self._observable:
            axis.setUnit("Label").setLabel('Time', 'seconds')
        else:
            axis.setUnit("Label").setLabel(self._observable, '')

    def _insert_energy_value(self, ws_name, energy, label):
        '''
        Inserts the doppler's energy value in the workspace name
        in between the user input and automatic suffix
        @param ws_name : workspace name
        @param energy : energy value
        @param label : sample, background, or calibration
        @return : new name with energy value inside
        Example:
        user_input_2theta > user_input_1.5_2theta
        user_input_red_background > user_input_1.5_red_background
        '''
        suffix_pos = ws_name.rfind('_')

        if label != self._SAMPLE:
            # find second to last underscore
            suffix_pos = ws_name.rfind('_', 0, suffix_pos)

        return ws_name[:suffix_pos] + '_' + str(energy) + ws_name[suffix_pos:]
예제 #34
0
    def PyExec(self):
        runs = self.getProperty('Filename').value
        runs_as_str = self.getPropertyValue('Filename')
        number_runs = runs_as_str.count(',') + runs_as_str.count('+') + 1
        self._progress = Progress(self, start=0.0, end=1.0, nreports=number_runs)
        self._loader = self.getPropertyValue('LoaderName')
        self._version = self.getProperty('LoaderVersion').value
        self._loader_options = self.getProperty('LoaderOptions').value
        merge_options = self.getProperty('MergeRunsOptions').value
        output = self.getPropertyValue('OutputWorkspace')
        if output.startswith('__'):
            self._prefix = '__'

        # get the first run
        to_group = []
        first_run = runs[0]
        if isinstance(first_run, list):
            first_run = first_run[0]

        if self._loader == 'Load':
            # figure out the winning loader
            winning_loader = FileLoaderRegistry.Instance().chooseLoader(first_run)
            self._loader = winning_loader.name()
            self._version = winning_loader.version()
            self.setPropertyValue('LoaderName', self._loader)
            self.setProperty('LoaderVersion', self._version)

        for runs_to_sum in runs:
            if not isinstance(runs_to_sum, list):
                run = runs_to_sum
                runnumber = self._prefix + os.path.basename(run).split('.')[0]
                self._load(run, runnumber)
                to_group.append(runnumber)
            else:
                runnumbers = self._prefix
                merged = ''
                for i, run in enumerate(runs_to_sum):
                    runnumber = os.path.basename(run).split('.')[0]
                    runnumbers += '_' + runnumber
                    runnumber = self._prefix + runnumber
                    self._load(run, runnumber)
                    if i == 0:
                        merged = runnumber
                    else:
                        # we need to merge to a temp name, and rename later,
                        # since if the merged is a group workspace,
                        # it's items will be orphaned
                        tmp_merged = '__tmp_' + merged
                        MergeRuns(InputWorkspaces=[merged, runnumber],
                                  OutputWorkspace=tmp_merged, **merge_options)
                        DeleteWorkspace(Workspace=runnumber)
                        DeleteWorkspace(Workspace=merged)
                        RenameWorkspace(InputWorkspace=tmp_merged, OutputWorkspace=merged)

                runnumbers = runnumbers[1:]
                RenameWorkspace(InputWorkspace=merged, OutputWorkspace=runnumbers)
                to_group.append(runnumbers)

        if len(to_group) != 1:
            GroupWorkspaces(InputWorkspaces=to_group, OutputWorkspace=output)
        else:
            RenameWorkspace(InputWorkspace=to_group[0], OutputWorkspace=output)

        self.setProperty('OutputWorkspace', mtd[output])
예제 #35
0
    def PyExec(self):

        self.check_platform_support()

        from IndirectBayes import (CalcErange, GetXYE)
        setup_prog = Progress(self, start=0.0, end=0.3, nreports=5)
        self.log().information('BayesQuasi input')

        erange = [self._e_min, self._e_max]
        nbins = [self._sam_bins, self._res_bins]
        setup_prog.report('Converting to binary for Fortran')
        # convert true/false to 1/0 for fortran
        o_el = int(self._elastic)
        o_w1 = int(self._width)
        o_res = int(self._res_norm)

        # fortran code uses background choices defined using the following numbers
        setup_prog.report('Encoding input options')
        o_bgd = ['Zero', 'Flat', 'Sloping'].index(self._background)
        fitOp = [o_el, o_bgd, o_w1, o_res]

        setup_prog.report('Establishing save path')
        workdir = config['defaultsave.directory']
        if not os.path.isdir(workdir):
            workdir = os.getcwd()
            logger.information('Default Save directory is not set. Defaulting to current working Directory: ' + workdir)

        array_len = 4096  # length of array in Fortran
        setup_prog.report('Checking X Range')
        CheckXrange(erange, 'Energy')

        nbin, nrbin = nbins[0], nbins[1]

        logger.information('Sample is ' + self._samWS)
        logger.information('Resolution is ' + self._resWS)

        # Check for trailing and leading zeros in data
        setup_prog.report('Checking for leading and trailing zeros in the data')
        first_data_point, last_data_point = IndentifyDataBoundaries(self._samWS)
        self.check_energy_range_for_zeroes(first_data_point, last_data_point)

        # update erange with new values
        erange = [self._e_min, self._e_max]

        setup_prog.report('Checking Analysers')
        CheckAnalysers(self._samWS, self._resWS)
        setup_prog.report('Obtaining EFixed, theta and Q')
        efix = getEfixed(self._samWS)
        theta, Q = GetThetaQ(self._samWS)

        nsam, ntc = CheckHistZero(self._samWS)

        totalNoSam = nsam

        # check if we're performing a sequential fit
        if not self._loop:
            nsam = 1

        nres = CheckHistZero(self._resWS)[0]

        setup_prog.report('Checking Histograms')
        if self._program == 'QL':
            if nres == 1:
                prog = 'QLr'  # res file
            else:
                prog = 'QLd'  # data file
                CheckHistSame(self._samWS, 'Sample', self._resWS, 'Resolution')
        elif self._program == 'QSe':
            if nres == 1:
                prog = 'QSe'  # res file
            else:
                raise ValueError('Stretched Exp ONLY works with RES file')

        logger.information('Version is {0}'.format(prog))
        logger.information(' Number of spectra = {0} '.format(nsam))
        logger.information(' Erange : {0}  to {1} '.format(erange[0], erange[1]))

        setup_prog.report('Reading files')
        Wy, We = self._read_width_file(self._width, self._wfile, totalNoSam)
        dtn, xsc = self._read_norm_file(self._res_norm, self._resnormWS, totalNoSam)

        setup_prog.report('Establishing output workspace name')
        fname = self._samWS[:-4] + '_' + prog
        probWS = fname + '_Prob'
        fitWS = fname + '_Fit'
        wrks = os.path.join(workdir, self._samWS[:-4])
        logger.information(' lptfile : ' + wrks + '_' + prog + '.lpt')
        lwrk = len(wrks)
        wrks.ljust(140, ' ')
        wrkr = self._resWS
        wrkr.ljust(140, ' ')

        setup_prog.report('Initialising probability list')
        # initialise probability list
        if self._program == 'QL':
            prob0, prob1, prob2, prob3 = [], [], [], []
        xQ = np.array([Q[0]])
        for m in range(1, nsam):
            xQ = np.append(xQ, Q[m])
        xProb = xQ
        xProb = np.append(xProb, xQ)
        xProb = np.append(xProb, xQ)
        xProb = np.append(xProb, xQ)
        eProb = np.zeros(4 * nsam)

        group = ''
        workflow_prog = Progress(self, start=0.3, end=0.7, nreports=nsam * 3)
        for spectrum in range(0, nsam):
            logger.information('Group {0} at angle {1} '.format(spectrum, theta[spectrum]))
            nsp = spectrum + 1

            nout, bnorm, Xdat, Xv, Yv, Ev = CalcErange(self._samWS, spectrum, erange, nbin)
            Ndat = nout[0]
            Imin = nout[1]
            Imax = nout[2]
            if prog == 'QLd':
                mm = spectrum
            else:
                mm = 0
            Nb, Xb, Yb, Eb = GetXYE(self._resWS, mm, array_len)  # get resolution data
            numb = [nsam, nsp, ntc, Ndat, nbin, Imin, Imax, Nb, nrbin]
            rscl = 1.0
            reals = [efix, theta[spectrum], rscl, bnorm]

            if prog == 'QLr':
                workflow_prog.report('Processing Sample number {0} as Lorentzian'.format(spectrum))
                nd, xout, yout, eout, yfit, yprob = QLr.qlres(numb, Xv, Yv, Ev, reals, fitOp,
                                                              Xdat, Xb, Yb, Wy, We, dtn, xsc,
                                                              wrks, wrkr, lwrk)
                logger.information(' Log(prob) : {0} {1} {2} {3}'.format(yprob[0], yprob[1], yprob[2], yprob[3]))
            elif prog == 'QLd':
                workflow_prog.report('Processing Sample number {0}'.format(spectrum))
                nd, xout, yout, eout, yfit, yprob = QLd.qldata(numb, Xv, Yv, Ev, reals, fitOp,
                                                               Xdat, Xb, Yb, Eb, Wy, We,
                                                               wrks, wrkr, lwrk)
                logger.information(' Log(prob) : {0} {1} {2} {3}'.format(yprob[0], yprob[1], yprob[2], yprob[3]))
            elif prog == 'QSe':
                workflow_prog.report('Processing Sample number {0} as Stretched Exp'.format(spectrum))
                nd, xout, yout, eout, yfit, yprob = Qse.qlstexp(numb, Xv, Yv, Ev, reals, fitOp,
                                                                Xdat, Xb, Yb, Wy, We, dtn, xsc,
                                                                wrks, wrkr, lwrk)

            dataX = xout[:nd]
            dataX = np.append(dataX, 2 * xout[nd - 1] - xout[nd - 2])
            yfit_list = np.split(yfit[:4 * nd], 4)
            dataF1 = yfit_list[1]
            workflow_prog.report('Processing data')
            dataG = np.zeros(nd)
            datX = dataX
            datY = yout[:nd]
            datE = eout[:nd]
            datX = np.append(datX, dataX)
            datY = np.append(datY, dataF1[:nd])
            datE = np.append(datE, dataG)
            res1 = dataF1[:nd] - yout[:nd]
            datX = np.append(datX, dataX)
            datY = np.append(datY, res1)
            datE = np.append(datE, dataG)
            nsp = 3
            names = 'data,fit.1,diff.1'
            res_plot = [0, 1, 2]
            if self._program == 'QL':
                workflow_prog.report('Processing Lorentzian result data')
                dataF2 = yfit_list[2]
                datX = np.append(datX, dataX)
                datY = np.append(datY, dataF2[:nd])
                datE = np.append(datE, dataG)
                res2 = dataF2[:nd] - yout[:nd]
                datX = np.append(datX, dataX)
                datY = np.append(datY, res2)
                datE = np.append(datE, dataG)
                nsp += 2
                names += ',fit.2,diff.2'

                dataF3 = yfit_list[3]
                datX = np.append(datX, dataX)
                datY = np.append(datY, dataF3[:nd])
                datE = np.append(datE, dataG)
                res3 = dataF3[:nd] - yout[:nd]
                datX = np.append(datX, dataX)
                datY = np.append(datY, res3)
                datE = np.append(datE, dataG)
                nsp += 2
                names += ',fit.3,diff.3'

                res_plot.append(4)
                prob0.append(yprob[0])
                prob1.append(yprob[1])
                prob2.append(yprob[2])
                prob3.append(yprob[3])

            # create result workspace
            fitWS = fname + '_Workspaces'
            fout = fname + '_Workspace_' + str(spectrum)

            workflow_prog.report('Creating OutputWorkspace')
            s_api.CreateWorkspace(OutputWorkspace=fout, DataX=datX, DataY=datY, DataE=datE,
                                  Nspec=nsp, UnitX='DeltaE', VerticalAxisUnit='Text', VerticalAxisValues=names)

            # append workspace to list of results
            group += fout + ','

        comp_prog = Progress(self, start=0.7, end=0.8, nreports=2)
        comp_prog.report('Creating Group Workspace')
        s_api.GroupWorkspaces(InputWorkspaces=group, OutputWorkspace=fitWS)

        if self._program == 'QL':
            comp_prog.report('Processing Lorentzian probability data')
            yPr0 = np.array([prob0[0]])
            yPr1 = np.array([prob1[0]])
            yPr2 = np.array([prob2[0]])
            yPr3 = np.array([prob3[0]])
            for m in range(1, nsam):
                yPr0 = np.append(yPr0, prob0[m])
                yPr1 = np.append(yPr1, prob1[m])
                yPr2 = np.append(yPr2, prob2[m])
                yPr3 = np.append(yPr3, prob3[m])
            yProb = yPr0
            yProb = np.append(yProb, yPr1)
            yProb = np.append(yProb, yPr2)
            yProb = np.append(yProb, yPr3)

            prob_axis_names = '0 Peak, 1 Peak, 2 Peak, 3 Peak'
            s_api.CreateWorkspace(OutputWorkspace=probWS, DataX=xProb, DataY=yProb, DataE=eProb,
                                  Nspec=4, UnitX='MomentumTransfer', VerticalAxisUnit='Text',
                                  VerticalAxisValues=prob_axis_names)
            outWS = self.C2Fw(fname)
        elif self._program == 'QSe':
            comp_prog.report('Running C2Se')
            outWS = self.C2Se(fname)

        # Sort x axis
        s_api.SortXAxis(InputWorkspace=outWS, OutputWorkspace=outWS, EnableLogging=False)

        log_prog = Progress(self, start=0.8, end=1.0, nreports=8)
        # Add some sample logs to the output workspaces
        log_prog.report('Copying Logs to outputWorkspace')
        s_api.CopyLogs(InputWorkspace=self._samWS, OutputWorkspace=outWS)
        log_prog.report('Adding Sample logs to Output workspace')
        self._add_sample_logs(outWS, prog, erange, nbins)
        log_prog.report('Copying logs to fit Workspace')
        s_api.CopyLogs(InputWorkspace=self._samWS, OutputWorkspace=fitWS)
        log_prog.report('Adding sample logs to Fit workspace')
        self._add_sample_logs(fitWS, prog, erange, nbins)
        log_prog.report('Finalising log copying')

        self.setProperty('OutputWorkspaceFit', fitWS)
        self.setProperty('OutputWorkspaceResult', outWS)
        log_prog.report('Setting workspace properties')

        if self._program == 'QL':
            s_api.SortXAxis(InputWorkspace=probWS, OutputWorkspace=probWS, EnableLogging=False)
            self.setProperty('OutputWorkspaceProb', probWS)
예제 #36
0
    def PyExec(self):

        self._first_run = None

        # Get the list of data files from the runs
        sample_runs = self.getPropertyValue('SampleRuns')
        output_folder = self.getPropertyValue('OutputFolder')
        scratch_folder = self.getPropertyValue('ScratchFolder')
        keep_reduced_ws = self.getProperty('KeepReducedWorkspace').value
        cycle_runs = expand_as_cycle_runs(sample_runs)

        # set up progress bar
        steps = len(cycle_runs) + 1
        self._progress = Progress(self, start=0.0, end=1.0, nreports=steps)

        saveFolder = scratch_folder if scratch_folder else config[
            'defaultsave.directory']
        for cycle, run in cycle_runs:

            if cycle:
                srun = str(cycle) + ':: ' + str(run)
            else:
                srun = str(run)
            self._progress.report("Processing run {}, ".format(run))

            PelicanReduction(
                SampleRuns=srun,
                EmptyRuns=self.getPropertyValue('EmptyRuns'),
                EnergyTransfer=self.getPropertyValue('EnergyTransfer'),
                MomentumTransfer=self.getPropertyValue('MomentumTransfer'),
                Processing='NXSPE',
                LambdaOnTwoMode=self.getProperty('LambdaOnTwoMode').value,
                FrameOverlap=self.getProperty('FrameOverlap').value,
                ScratchFolder=scratch_folder,
                OutputWorkspace='nxspe',
                ConfigurationFile=self.getPropertyValue('ConfigurationFile'))

            # the nxspe file named 'nxspe_spe_2D.nxspe'is moved to the output folder and renamed
            dfile = 'run_{:d}.nxspe'.format(run)
            dpath = os.path.join(output_folder, dfile)
            if os.path.isfile(dpath):
                os.remove(dpath)
            spath = os.path.join(saveFolder, 'nxspe_spe_2D.nxspe')
            os.rename(spath, dpath)

            if not self._first_run:
                self._first_run = dpath
            elif self.getProperty('FixedDetector').value:
                copy_datset_nodes(self._first_run, dpath, [
                    '/nxspe_spe_2Ddet/data/azimuthal',
                    '/nxspe_spe_2Ddet/data/azimuthal_width',
                    '/nxspe_spe_2Ddet/data/polar',
                    '/nxspe_spe_2Ddet/data/polar_width'
                ])

            # the pelican reduction saves a tempory file 'PLN00nnnn_sample.nxs' and 'PLN00nnnn.nxs' to
            # speed up processing, remove these files to save space
            if scratch_folder:
                for sfile in ['PLN{:07d}_sample.nxs', 'PLN{:07d}.nxs']:
                    tfile = sfile.format(run)
                    tpath = os.path.join(scratch_folder, tfile)
                    os.remove(tpath)

        # delete the workspace, as it is only temporary
        if not keep_reduced_ws:
            self._progress.report("Cleaning up file,")
            DeleteWorkspace(Workspace='nxspe_spe_2D')
예제 #37
0
    def PyExec(self):
        workflow_prog = Progress(self, start=0.0, end=0.3, nreports=4)
        workflow_prog.report('Setting up algorithm')
        self._setup()

        input_ws = mtd[self._sample]

        min_spectrum_index = input_ws.getIndexFromSpectrumNumber(
            int(self._spectra_range[0]))
        max_spectrum_index = input_ws.getIndexFromSpectrumNumber(
            int(self._spectra_range[1]))

        # Crop to the required spectra range
        workflow_prog.report('Cropping Workspace')
        cropped_input = ms.CropWorkspace(
            InputWorkspace=input_ws,
            OutputWorkspace='__symm',
            StartWorkspaceIndex=min_spectrum_index,
            EndWorkspaceIndex=max_spectrum_index)
        # Find the smallest data array in the first spectra
        len_x = len(cropped_input.readX(0))
        len_y = len(cropped_input.readY(0))
        len_e = len(cropped_input.readE(0))
        sample_array_len = min(len_x, len_y, len_e)

        sample_x = cropped_input.readX(0)

        # Get slice bounds of array
        try:
            workflow_prog.report('Calculating array points')
            self._calculate_array_points(sample_x, sample_array_len)
        except Exception as exc:
            raise RuntimeError(
                'Failed to calculate array slice boundaries: %s' % exc.message)

        max_sample_index = sample_array_len - 1
        centre_range_len = self._positive_min_index + self._negative_min_index
        positive_diff_range_len = max_sample_index - self._positive_max_index

        output_cut_index = max_sample_index - self._positive_min_index - positive_diff_range_len - 1
        new_array_len = 2 * max_sample_index - centre_range_len - 2 * positive_diff_range_len - 1

        logger.information('Sample array length = %d' % sample_array_len)

        logger.information(
            'Positive X min at i=%d, x=%f' %
            (self._positive_min_index, sample_x[self._positive_min_index]))
        logger.information(
            'Negative X min at i=%d, x=%f' %
            (self._negative_min_index, sample_x[self._negative_min_index]))

        logger.information(
            'Positive X max at i=%d, x=%f' %
            (self._positive_max_index, sample_x[self._positive_max_index]))

        logger.information('New array length = %d' % new_array_len)
        logger.information('Output array LR split index = %d' %
                           output_cut_index)

        # Create an empty workspace with enough storage for the new data
        workflow_prog.report('Creating OutputWorkspace')
        out_ws = WorkspaceFactory.Instance().create(
            cropped_input, cropped_input.getNumberHistograms(),
            int(new_array_len), int(new_array_len))

        # For each spectrum copy positive values to the negative
        pop_prog = Progress(self,
                            start=0.3,
                            end=0.95,
                            nreports=out_ws.getNumberHistograms())
        for idx in range(out_ws.getNumberHistograms()):
            pop_prog.report('Populating data in workspace %i' % idx)
            # Strip any additional array cells
            x_in = cropped_input.readX(idx)[:sample_array_len]
            y_in = cropped_input.readY(idx)[:sample_array_len]
            e_in = cropped_input.readE(idx)[:sample_array_len]

            # Get some zeroed data to overwrite with copies from sample
            x_out = np.zeros(new_array_len)
            y_out = np.zeros(new_array_len)
            e_out = np.zeros(new_array_len)

            # Left hand side (reflected)
            x_out[:output_cut_index] = -x_in[self._positive_max_index -
                                             1:self._positive_min_index:-1]
            y_out[:output_cut_index] = y_in[self._positive_max_index -
                                            1:self._positive_min_index:-1]
            e_out[:output_cut_index] = e_in[self._positive_max_index -
                                            1:self._positive_min_index:-1]

            # Right hand side (copied)
            x_out[output_cut_index:] = x_in[self._negative_min_index:self.
                                            _positive_max_index]
            y_out[output_cut_index:] = y_in[self._negative_min_index:self.
                                            _positive_max_index]
            e_out[output_cut_index:] = e_in[self._negative_min_index:self.
                                            _positive_max_index]

            # Set output spectrum data
            out_ws.setX(idx, x_out)
            out_ws.setY(idx, y_out)
            out_ws.setE(idx, e_out)

            logger.information('Symmetrise spectrum %d' % idx)

        end_prog = Progress(self, start=0.95, end=1.0, nreports=3)
        end_prog.report('Deleting temp workspaces')
        ms.DeleteWorkspace(cropped_input)

        if self._props_output_workspace != '':
            end_prog.report('Generating property table')
            self._generate_props_table()

        self.setProperty('OutputWorkspace', out_ws)
        end_prog.report('Algorithm Complete')
예제 #38
0
    def PyExec(self):
        prog = Progress(self, start=0, end=1, nreports=5)

        prog.report('Importing GSAS-II ')
        self._run_threadsafe(self._import_gsas2, self.getProperty(self.PROP_PATH_TO_GSASII).value)

        prog.report('Initializing GSAS-II ')
        gs2 = self._run_threadsafe(self._init_gs2)

        prog.report('Loading and preparing input data')
        focused_wks = self._get_focused_wks(self.PROP_INPUT_WORKSPACE, self.PROP_WORKSPACE_INDEX)

        inst_file = self.getProperty(self.PROP_INSTR_FILE).value
        try:
            (gs2_rd, limits, peaks_init, background_def) =\
                self._run_threadsafe(self._load_prepare_data_for_fit, gs2, focused_wks, inst_file)
        except RuntimeError as rexc:
            raise RuntimeError("Error in execution of GSAS-II data loading routines: "
                               "{0}.".format(str(rexc)))

        # No obvious way to provide proper progress report from inside the refinement/fitting routines
        prog.report('Running refinement. This may take some times')
        try:
            (gof_estimates, lattice_params, parm_dict) = \
                self._run_threadsafe(self._run_refinement,
                                     gs2, gs2_rd, (limits, peaks_init, background_def))
        except RuntimeError as rexc:
            raise RuntimeError("Error in execution of GSAS-II refinement routines: "
                               "{0}".format(str(rexc)))

        prog.report('Producing outputs')
        self._save_project_read_lattice(gs2, gs2_rd)
        self._produce_outputs(gof_estimates, lattice_params, parm_dict)

        import time
        time.sleep(0.1)
예제 #39
0
    def PyExec(self):
        self._setup()
        self._wave_range()

        setup_prog = Progress(self, start=0.0, end=0.2, nreports=2)
        # Set sample material form chemical formula
        setup_prog.report('Set sample material')
        self._sample_density = self._set_material(self._sample_ws_name,
                                                  self._set_sample_method,
                                                  self._sample_chemical_formula,
                                                  self._sample_coherent_cross_section,
                                                  self._sample_incoherent_cross_section,
                                                  self._sample_attenuation_cross_section,
                                                  self._sample_density_type,
                                                  self._sample_density,
                                                  self._sample_number_density_unit)

        # If using a can, set sample material using chemical formula
        if self._use_can:
            setup_prog.report('Set container sample material')
            self._can_density = self._set_material(self._can_ws_name,
                                                   self._set_can_method,
                                                   self._can_chemical_formula,
                                                   self._can_coherent_cross_section,
                                                   self._can_incoherent_cross_section,
                                                   self._can_attenuation_cross_section,
                                                   self._can_density_type,
                                                   self._can_density,
                                                   self._can_number_density_unit)

        # Holders for the corrected data
        data_ass = []
        data_assc = []
        data_acsc = []
        data_acc = []

        self._get_angles()
        num_angles = len(self._angles)
        workflow_prog = Progress(self, start=0.2, end=0.8, nreports=num_angles * 2)

        # Check sample input
        sam_material = mtd[self._sample_ws_name].sample().getMaterial()
        self._has_sample_in = \
            bool(self._sample_density and self._sample_thickness and (sam_material.totalScatterXSection() + sam_material.absorbXSection()))
        if not self._has_sample_in:
            logger.warning("The sample has not been given, or the information is incomplete. Continuing but no absorption for sample will "
                           "be computed.")

        # Check can input
        if self._use_can:
            can_material = mtd[self._can_ws_name].sample().getMaterial()
            if self._can_density and (can_material.totalScatterXSection() + can_material.absorbXSection()):
                self._has_can_front_in = bool(self._can_front_thickness)
                self._has_can_back_in = bool(self._can_back_thickness)
            else:
                logger.warning(
                    "A can workspace was given but the can information is incomplete. Continuing but no absorption for the can will "
                    "be computed.")

            if not self._has_can_front_in:
                logger.warning(
                    "A can workspace was given but the can front thickness was not given. Continuing but no absorption for can front"
                    " will be computed.")
            if not self._has_can_back_in:
                logger.warning(
                    "A can workspace was given but the can back thickness was not given. Continuing but no absorption for can back"
                    " will be computed.")

        for angle_idx in range(num_angles):
            workflow_prog.report('Running flat correction for angle %s' % angle_idx)
            angle = self._angles[angle_idx]
            (ass, assc, acsc, acc) = self._flat_abs(angle)

            logger.information('Angle %d: %f successful' % (angle_idx + 1, self._angles[angle_idx]))
            workflow_prog.report('Appending data for angle %s' % angle_idx)
            data_ass = np.append(data_ass, ass)
            data_assc = np.append(data_assc, assc)
            data_acsc = np.append(data_acsc, acsc)
            data_acc = np.append(data_acc, acc)

        log_prog = Progress(self, start=0.8, end=1.0, nreports=8)

        sample_logs = {'sample_shape': 'flatplate', 'sample_filename': self._sample_ws_name,
                       'sample_thickness': self._sample_thickness, 'sample_angle': self._sample_angle,
                       'emode': self._emode, 'efixed': self._efixed}
        dataX = self._wavelengths * num_angles

        # Create the output workspaces
        ass_ws = self._output_ws_name + '_ass'
        log_prog.report('Creating ass output Workspace')
        CreateWorkspace(OutputWorkspace=ass_ws,
                        DataX=dataX,
                        DataY=data_ass,
                        NSpec=num_angles,
                        UnitX='Wavelength',
                        VerticalAxisUnit='SpectraNumber',
                        ParentWorkspace=self._sample_ws_name,
                        EnableLogging=False)
        log_prog.report('Adding sample logs')
        self._add_sample_logs(ass_ws, sample_logs)

        workspaces = [ass_ws]

        if self._use_can:
            log_prog.report('Adding can sample logs')
            AddSampleLog(Workspace=ass_ws, LogName='can_filename', LogType='String', LogText=str(self._can_ws_name), EnableLogging=False)

            assc_ws = self._output_ws_name + '_assc'
            workspaces.append(assc_ws)
            log_prog.report('Creating assc output workspace')
            CreateWorkspace(OutputWorkspace=assc_ws,
                            DataX=dataX,
                            DataY=data_assc,
                            NSpec=num_angles,
                            UnitX='Wavelength',
                            VerticalAxisUnit='SpectraNumber',
                            ParentWorkspace=self._sample_ws_name,
                            EnableLogging=False)
            log_prog.report('Adding assc sample logs')
            self._add_sample_logs(assc_ws, sample_logs)
            AddSampleLog(Workspace=assc_ws, LogName='can_filename', LogType='String', LogText=str(self._can_ws_name), EnableLogging=False)

            acsc_ws = self._output_ws_name + '_acsc'
            workspaces.append(acsc_ws)
            log_prog.report('Creating acsc outputworkspace')
            CreateWorkspace(OutputWorkspace=acsc_ws,
                            DataX=dataX,
                            DataY=data_acsc,
                            NSpec=num_angles,
                            UnitX='Wavelength',
                            VerticalAxisUnit='SpectraNumber',
                            ParentWorkspace=self._sample_ws_name,
                            EnableLogging=False)
            log_prog.report('Adding acsc sample logs')
            self._add_sample_logs(acsc_ws, sample_logs)
            AddSampleLog(Workspace=acsc_ws, LogName='can_filename', LogType='String', LogText=str(self._can_ws_name), EnableLogging=False)

            acc_ws = self._output_ws_name + '_acc'
            workspaces.append(acc_ws)
            log_prog.report('Creating acc workspace')
            CreateWorkspace(OutputWorkspace=acc_ws,
                            DataX=dataX,
                            DataY=data_acc,
                            NSpec=num_angles,
                            UnitX='Wavelength',
                            VerticalAxisUnit='SpectraNumber',
                            ParentWorkspace=self._sample_ws_name,
                            EnableLogging=False)
            log_prog.report('Adding acc sample logs')
            self._add_sample_logs(acc_ws, sample_logs)
            AddSampleLog(Workspace=acc_ws, LogName='can_filename', LogType='String', LogText=str(self._can_ws_name), EnableLogging=False)

        if self._interpolate:
            self._interpolate_corrections(workspaces)
        log_prog.report('Grouping Output Workspaces')
        GroupWorkspaces(InputWorkspaces=','.join(workspaces), OutputWorkspace=self._output_ws_name, EnableLogging=False)
        self.setPropertyValue('OutputWorkspace', self._output_ws_name)
예제 #40
0
    def PyExec(self):

        self.setUp()

        # total number of (unsummed) runs
        total = self._sample_files.count(',') + self._background_files.count(
            ',') + self._calibration_files.count(',')

        self._progress = Progress(self, start=0.0, end=1.0, nreports=total)

        self._reduce_multiple_runs(self._sample_files, self._SAMPLE)

        if self._background_files:

            self._reduce_multiple_runs(self._background_files,
                                       self._BACKGROUND)

            back_ws = self._red_ws + '_' + self._BACKGROUND

            Scale(InputWorkspace=back_ws,
                  Factor=self._back_scaling,
                  OutputWorkspace=back_ws)

            if self._back_option == 'Sum':
                self._integrate(self._BACKGROUND, self._SAMPLE)
            else:
                self._interpolate(self._BACKGROUND, self._SAMPLE)

            self._subtract_background(self._BACKGROUND, self._SAMPLE)

            DeleteWorkspace(back_ws)

        if self._calibration_files:

            self._reduce_multiple_runs(self._calibration_files,
                                       self._CALIBRATION)

            if self._background_calib_files:
                self._reduce_multiple_runs(self._background_calib_files,
                                           self._BACKCALIB)

                back_calib_ws = self._red_ws + '_' + self._BACKCALIB

                Scale(InputWorkspace=back_calib_ws,
                      Factor=self._back_calib_scaling,
                      OutputWorkspace=back_calib_ws)

                if self._back_calib_option == 'Sum':
                    self._integrate(self._BACKCALIB, self._CALIBRATION)
                else:
                    self._interpolate(self._BACKCALIB, self._CALIBRATION)

                self._subtract_background(self._BACKCALIB, self._CALIBRATION)

                DeleteWorkspace(back_calib_ws)

            if self._calib_option == 'Sum':
                self._integrate(self._CALIBRATION, self._SAMPLE)
            else:
                self._interpolate(self._CALIBRATION, self._SAMPLE)

            self._calibrate()

            DeleteWorkspace(self._red_ws + '_' + self._CALIBRATION)

        self.log().debug('Run files map is :' + str(self._all_runs))

        self.setProperty('OutputWorkspace', self._red_ws)
예제 #41
0
class PowderILLParameterScan(PythonAlgorithm):

    _calibration_file = None
    _roc_file = None
    _normalise_option = None
    _observable = None
    _sort_x_axis = None
    _unit = None
    _out_name = None
    _progress = None
    _crop_negative = None
    _zero_counting_option = None
    _rebin_width = None
    _region_of_interest = []
    _zero_cells = []

    def _hide(self, name):
        return '__' + self._out_name + '_' + name

    def _hide_run(selfs, runnumber):
        return '__' + runnumber

    def category(self):
        return "ILL\\Diffraction;Diffraction\\Reduction"

    def summary(self):
        return 'Performs powder diffraction data reduction for ILL instrument D20.'

    def seeAlso(self):
        return ["PowderILLDetectorScan", "PowderILLEfficiency"]

    def name(self):
        return "PowderILLParameterScan"

    def validateInputs(self):
        issues = dict()
        rebin = self.getProperty('ScanAxisBinWidth').value
        sort = self.getProperty('SortObservableAxis').value
        if rebin != 0 and not sort:
            issues[
                'SortObservableAxis'] = 'Axis must be sorted if rebin is requested.'
        return issues

    def PyInit(self):

        self.declareProperty(MultipleFileProperty('Run', extensions=['nxs']),
                             doc='File path of run(s).')

        self.declareProperty(FileProperty('CalibrationFile',
                                          '',
                                          action=FileAction.OptionalLoad,
                                          extensions=['nxs']),
                             doc='File containing the detector efficiencies.')

        self.declareProperty(
            FileProperty('ROCCorrectionFile',
                         '',
                         action=FileAction.OptionalLoad,
                         extensions=['nxs']),
            doc=
            'File containing the radial oscillating collimator (ROC) corrections.'
        )

        self.declareProperty(name='NormaliseTo',
                             defaultValue='None',
                             validator=StringListValidator(
                                 ['None', 'Time', 'Monitor', 'ROI']),
                             doc='Normalise to time, monitor or ROI counts.')

        thetaRangeValidator = FloatArrayOrderedPairsValidator()

        self.declareProperty(
            FloatArrayProperty(name='ROI',
                               values=[0, 153.6],
                               validator=thetaRangeValidator),
            doc=
            'Regions of interest for normalisation [in scattering angle in degrees].'
        )

        normaliseToROI = VisibleWhenProperty('NormaliseTo',
                                             PropertyCriterion.IsEqualTo,
                                             'ROI')
        self.setPropertySettings('ROI', normaliseToROI)

        self.declareProperty(name='Observable',
                             defaultValue='sample.temperature',
                             doc='Scanning observable, a Sample Log entry.')

        self.declareProperty(
            name='SortObservableAxis',
            defaultValue=False,
            doc='Whether or not to sort the scanning observable axis.')

        self.declareProperty(
            name='ScanAxisBinWidth',
            defaultValue=0.,
            validator=FloatBoundedValidator(lower=0.),
            doc=
            'Rebin the observable axis to this width. Default is to not rebin.'
        )

        self.declareProperty(
            name='CropNegative2Theta',
            defaultValue=True,
            doc=
            'Whether or not to crop out the bins corresponding to negative scattering angle.'
        )

        self.declareProperty(
            name='ZeroCountingCells',
            defaultValue='Interpolate',
            validator=StringListValidator(['Crop', 'Interpolate', 'Leave']),
            doc=
            'Crop out the zero counting cells or interpolate the counts from the neighbours.'
        )

        self.declareProperty(name='Unit',
                             defaultValue='ScatteringAngle',
                             validator=StringListValidator([
                                 'ScatteringAngle', 'MomentumTransfer',
                                 'dSpacing'
                             ]),
                             doc='The unit of the reduced diffractogram.')

        self.declareProperty(
            MatrixWorkspaceProperty('OutputWorkspace',
                                    '',
                                    direction=Direction.Output),
            doc='Output workspace containing the reduced data.')

    def PyExec(self):

        self._progress = Progress(self, start=0.0, end=1.0, nreports=4)

        self._configure()
        temp_ws = self._hide('temp')
        joined_ws = self._hide('joined')
        mon_ws = self._hide('mon')

        self._progress.report('Loading the data')
        LoadAndMerge(Filename=self.getPropertyValue('Run'),
                     LoaderName='LoadILLDiffraction',
                     OutputWorkspace=temp_ws)

        self._progress.report('Normalising and merging')
        if self._normalise_option == 'Time':
            if isinstance(mtd[temp_ws], WorkspaceGroup):
                for ws in mtd[temp_ws]:
                    # normalise to time here, before joining, since the duration is in sample logs
                    duration = ws.getRun().getLogData('duration').value
                    Scale(InputWorkspace=ws,
                          OutputWorkspace=ws,
                          Factor=1. / duration)
            else:
                duration = mtd[temp_ws].getRun().getLogData('duration').value
                Scale(InputWorkspace=temp_ws,
                      OutputWorkspace=temp_ws,
                      Factor=1. / duration)

        try:
            ConjoinXRuns(InputWorkspaces=temp_ws,
                         SampleLogAsXAxis=self._observable,
                         OutputWorkspace=joined_ws)
        except RuntimeError:
            raise ValueError('Invalid scanning observable')

        DeleteWorkspace(temp_ws)

        ExtractMonitors(InputWorkspace=joined_ws,
                        DetectorWorkspace=joined_ws,
                        MonitorWorkspace=mon_ws)

        if self._normalise_option == 'Monitor':
            Divide(LHSWorkspace=joined_ws,
                   RHSWorkspace=mon_ws,
                   OutputWorkspace=joined_ws)
        elif self._normalise_option == 'ROI':
            self._normalise_to_roi(joined_ws)

        DeleteWorkspace(mon_ws)

        self._progress.report('Applying calibration or ROC if needed')
        if self._calibration_file:
            calib_ws = self._hide('calib')
            LoadNexusProcessed(Filename=self._calibration_file,
                               OutputWorkspace=calib_ws)
            Multiply(LHSWorkspace=joined_ws,
                     RHSWorkspace=calib_ws,
                     OutputWorkspace=joined_ws)
            DeleteWorkspace(calib_ws)

        if self._roc_file:
            roc_ws = self._hide('roc')
            LoadNexusProcessed(Filename=self._roc_file, OutputWorkspace=roc_ws)
            Multiply(LHSWorkspace=joined_ws,
                     RHSWorkspace=roc_ws,
                     OutputWorkspace=joined_ws)
            DeleteWorkspace(roc_ws)

        if self._sort_x_axis:
            SortXAxis(InputWorkspace=joined_ws, OutputWorkspace=joined_ws)

        theta_ws = self._hide('theta')
        ConvertSpectrumAxis(InputWorkspace=joined_ws,
                            OutputWorkspace=theta_ws,
                            Target='SignedTheta',
                            OrderAxis=False)
        theta_axis = mtd[theta_ws].getAxis(1).extractValues()
        DeleteWorkspace(theta_ws)
        first_positive_theta = int(np.where(theta_axis > 0)[0][0])

        if self._crop_negative:
            self.log().information(
                'First positive 2theta at workspace index: ' +
                str(first_positive_theta))
            CropWorkspace(InputWorkspace=joined_ws,
                          OutputWorkspace=joined_ws,
                          StartWorkspaceIndex=first_positive_theta)

        self._progress.report('Treating the zero counting cells')
        self._find_zero_cells(joined_ws)

        if self._zero_counting_option == 'Crop':
            self._crop_zero_cells(joined_ws, self._zero_cells)
        elif self._zero_counting_option == 'Interpolate':
            self._interpolate_zero_cells(joined_ws, theta_axis)

        target = 'SignedTheta'
        if self._unit == 'MomentumTransfer':
            target = 'ElasticQ'
        elif self._unit == 'dSpacing':
            target = 'ElasticDSpacing'

        ConvertSpectrumAxis(InputWorkspace=joined_ws,
                            OutputWorkspace=joined_ws,
                            Target=target)
        Transpose(InputWorkspace=joined_ws, OutputWorkspace=joined_ws)

        if self._rebin_width > 0:
            self._group_spectra(joined_ws)

        RenameWorkspace(InputWorkspace=joined_ws,
                        OutputWorkspace=self._out_name)
        self.setProperty('OutputWorkspace', self._out_name)

    def _configure(self):
        """
            Configures the input properties
        """
        self._out_name = self.getPropertyValue('OutputWorkspace')
        self._observable = self.getPropertyValue('Observable')
        self._sort_x_axis = self.getProperty('SortObservableAxis').value
        self._normalise_option = self.getPropertyValue('NormaliseTo')
        self._calibration_file = self.getPropertyValue('CalibrationFile')
        self._roc_file = self.getPropertyValue('ROCCorrectionFile')
        self._unit = self.getPropertyValue('Unit')
        self._crop_negative = self.getProperty('CropNegative2Theta').value
        self._zero_counting_option = self.getPropertyValue('ZeroCountingCells')
        self._rebin_width = self.getProperty('ScanAxisBinWidth').value
        if self._normalise_option == 'ROI':
            self._region_of_interest = self.getProperty('ROI').value

    def _find_zero_cells(self, ws):
        """
            Finds the cells counting zeros
            @param ws: the input workspace
        """
        self._zero_cells = []
        size = mtd[ws].blocksize()
        for spectrum in range(mtd[ws].getNumberHistograms()):
            counts = mtd[ws].readY(spectrum)
            if np.count_nonzero(counts) < size / 5:
                self._zero_cells.append(spectrum)
        self._zero_cells.sort()
        self.log().information('Found zero counting cells at indices: ' +
                               str(self._zero_cells))

    def _crop_zero_cells(self, ws, wsIndexList):
        """
            Crops out the spectra corresponding to zero counting pixels
            @param ws: the input workspace
            @param wsIndexList: list of workspace indices to crop out
        """
        MaskDetectors(Workspace=ws, WorkspaceIndexList=wsIndexList)
        ExtractUnmaskedSpectra(InputWorkspace=ws, OutputWorkspace=ws)

    def _interpolate_zero_cells(self, ws, theta_axis):
        """
            Interpolates the counts of zero counting cells linearly from the
            nearest non-zero neighbour cells
            @param ws: the input workspace
            @param theta_axis: the unordered signed 2theta axis
        """
        unable_to_interpolate = []
        for cell in self._zero_cells:
            prev_cell = cell - 1
            next_cell = cell + 1

            while prev_cell in self._zero_cells:
                prev_cell -= 1
            while next_cell in self._zero_cells:
                next_cell += 1

            if prev_cell == -1:
                self.log().notice(
                    'Unable to interpolate for cell #' + str(cell) +
                    ': no non-zero neighbour cell was found on the left side. Bin will be cropped.'
                )
                unable_to_interpolate.append(cell)
            if next_cell == mtd[ws].getNumberHistograms():
                self.log().notice(
                    'Unable to interpolate for cell #' + str(cell) +
                    ': no non-zero neighbour cell was found on the right side. Bin will be cropped.'
                )
                unable_to_interpolate.append(cell)

            if prev_cell >= 0 and next_cell < mtd[ws].getNumberHistograms():
                theta_prev = theta_axis[prev_cell]
                theta = theta_axis[cell]
                theta_next = theta_axis[next_cell]
                counts_prev = mtd[ws].readY(prev_cell)
                errors_prev = mtd[ws].readE(prev_cell)
                counts_next = mtd[ws].readY(next_cell)
                errors_next = mtd[ws].readE(next_cell)
                coefficient = (theta - theta_prev) / (theta_next - theta_prev)
                counts = counts_prev + coefficient * (counts_next -
                                                      counts_prev)
                errors = errors_prev + coefficient * (errors_next -
                                                      errors_prev)
                mtd[ws].setY(cell, counts)
                mtd[ws].setE(cell, errors)

        self._crop_zero_cells(ws, unable_to_interpolate)

    def _normalise_to_roi(self, ws):
        """
            Normalises counts to the sum of counts in the region-of-interest
            @param ws : input workspace with raw spectrum axis
        """
        roi_ws = self._hide('roi')
        theta_ws = self._hide('theta_ROI')
        ConvertSpectrumAxis(InputWorkspace=ws,
                            OutputWorkspace=theta_ws,
                            Target='SignedTheta')
        roi_pattern = self._parse_roi(theta_ws)
        SumSpectra(InputWorkspace=ws,
                   OutputWorkspace=roi_ws,
                   ListOfWorkspaceIndices=roi_pattern)
        SumSpectra(InputWorkspace=roi_ws, OutputWorkspace=roi_ws)
        Divide(LHSWorkspace=ws, RHSWorkspace=roi_ws, OutputWorkspace=ws)
        DeleteWorkspace(roi_ws)
        DeleteWorkspace(theta_ws)

    def _parse_roi(self, ws):
        """
            Parses the regions of interest string from 2theta ranges to workspace indices
            @param ws : input workspace with 2theta as spectrum axis
            @returns: roi as workspace indices, e.g. 7-20,100-123
        """
        result = ''
        axis = mtd[ws].getAxis(1).extractValues()
        index = 0
        while index < len(self._region_of_interest):
            start = self._region_of_interest[index]
            end = self._region_of_interest[index + 1]
            start_index = np.argwhere(axis > start)
            end_index = np.argwhere(axis < end)
            result += str(start_index[0][0]) + '-' + str(end_index[-1][0])
            result += ','
            index += 2
        self.log().information('ROI summing pattern is ' + result[:-1])
        return result[:-1]

    def _group_spectra(self, ws):
        """
            Groups the spectrum axis by summing spectra
            @param ws : the input workspace
        """
        new_axis = []
        start_index = 0
        axis = mtd[ws].getAxis(1).extractValues()
        grouped = self._hide('grouped')
        name = grouped
        while start_index < len(axis):
            end = axis[start_index] + self._rebin_width
            end_index = np.argwhere(axis < end)[-1][0]
            SumSpectra(InputWorkspace=ws,
                       OutputWorkspace=name,
                       StartWorkspaceIndex=int(start_index),
                       EndWorkspaceIndex=int(end_index))
            count = end_index - start_index + 1
            Scale(InputWorkspace=name, OutputWorkspace=name, Factor=1. / count)
            new_axis.append(np.sum(axis[start_index:end_index + 1]) / count)
            if name != grouped:
                AppendSpectra(InputWorkspace1=grouped,
                              InputWorkspace2=name,
                              OutputWorkspace=grouped)
                DeleteWorkspace(name)
            start_index = end_index + 1
            name = self._hide('ws_{0}'.format(start_index))
        spectrum_axis = NumericAxis.create(len(new_axis))
        for i in range(len(new_axis)):
            spectrum_axis.setValue(i, new_axis[i])
        mtd[grouped].replaceAxis(1, spectrum_axis)
        RenameWorkspace(InputWorkspace=grouped, OutputWorkspace=ws)
예제 #42
0
    def PyExec(self):

        self._progress = Progress(self, start=0.0, end=1.0, nreports=4)

        self._configure()
        temp_ws = self._hide('temp')
        joined_ws = self._hide('joined')
        mon_ws = self._hide('mon')

        self._progress.report('Loading the data')
        LoadAndMerge(Filename=self.getPropertyValue('Run'),
                     LoaderName='LoadILLDiffraction',
                     OutputWorkspace=temp_ws)

        self._progress.report('Normalising and merging')
        if self._normalise_option == 'Time':
            if isinstance(mtd[temp_ws], WorkspaceGroup):
                for ws in mtd[temp_ws]:
                    # normalise to time here, before joining, since the duration is in sample logs
                    duration = ws.getRun().getLogData('duration').value
                    Scale(InputWorkspace=ws,
                          OutputWorkspace=ws,
                          Factor=1. / duration)
            else:
                duration = mtd[temp_ws].getRun().getLogData('duration').value
                Scale(InputWorkspace=temp_ws,
                      OutputWorkspace=temp_ws,
                      Factor=1. / duration)

        try:
            ConjoinXRuns(InputWorkspaces=temp_ws,
                         SampleLogAsXAxis=self._observable,
                         OutputWorkspace=joined_ws)
        except RuntimeError:
            raise ValueError('Invalid scanning observable')

        DeleteWorkspace(temp_ws)

        ExtractMonitors(InputWorkspace=joined_ws,
                        DetectorWorkspace=joined_ws,
                        MonitorWorkspace=mon_ws)

        if self._normalise_option == 'Monitor':
            Divide(LHSWorkspace=joined_ws,
                   RHSWorkspace=mon_ws,
                   OutputWorkspace=joined_ws)
        elif self._normalise_option == 'ROI':
            self._normalise_to_roi(joined_ws)

        DeleteWorkspace(mon_ws)

        self._progress.report('Applying calibration or ROC if needed')
        if self._calibration_file:
            calib_ws = self._hide('calib')
            LoadNexusProcessed(Filename=self._calibration_file,
                               OutputWorkspace=calib_ws)
            Multiply(LHSWorkspace=joined_ws,
                     RHSWorkspace=calib_ws,
                     OutputWorkspace=joined_ws)
            DeleteWorkspace(calib_ws)

        if self._roc_file:
            roc_ws = self._hide('roc')
            LoadNexusProcessed(Filename=self._roc_file, OutputWorkspace=roc_ws)
            Multiply(LHSWorkspace=joined_ws,
                     RHSWorkspace=roc_ws,
                     OutputWorkspace=joined_ws)
            DeleteWorkspace(roc_ws)

        if self._sort_x_axis:
            SortXAxis(InputWorkspace=joined_ws, OutputWorkspace=joined_ws)

        theta_ws = self._hide('theta')
        ConvertSpectrumAxis(InputWorkspace=joined_ws,
                            OutputWorkspace=theta_ws,
                            Target='SignedTheta',
                            OrderAxis=False)
        theta_axis = mtd[theta_ws].getAxis(1).extractValues()
        DeleteWorkspace(theta_ws)
        first_positive_theta = int(np.where(theta_axis > 0)[0][0])

        if self._crop_negative:
            self.log().information(
                'First positive 2theta at workspace index: ' +
                str(first_positive_theta))
            CropWorkspace(InputWorkspace=joined_ws,
                          OutputWorkspace=joined_ws,
                          StartWorkspaceIndex=first_positive_theta)

        self._progress.report('Treating the zero counting cells')
        self._find_zero_cells(joined_ws)

        if self._zero_counting_option == 'Crop':
            self._crop_zero_cells(joined_ws, self._zero_cells)
        elif self._zero_counting_option == 'Interpolate':
            self._interpolate_zero_cells(joined_ws, theta_axis)

        target = 'SignedTheta'
        if self._unit == 'MomentumTransfer':
            target = 'ElasticQ'
        elif self._unit == 'dSpacing':
            target = 'ElasticDSpacing'

        ConvertSpectrumAxis(InputWorkspace=joined_ws,
                            OutputWorkspace=joined_ws,
                            Target=target)
        Transpose(InputWorkspace=joined_ws, OutputWorkspace=joined_ws)

        if self._rebin_width > 0:
            self._group_spectra(joined_ws)

        RenameWorkspace(InputWorkspace=joined_ws,
                        OutputWorkspace=self._out_name)
        self.setProperty('OutputWorkspace', self._out_name)
    def PyExec(self):
        self._setup()
        if not self._use_corrections:
            logger.information('Not using corrections')
        if not self._use_can:
            logger.information('Not using container')

        prog_container = Progress(self, start=0.0, end=0.2, nreports=4)
        prog_container.report('Starting algorithm')

        # Units should be wavelength
        sample_unit = self._sample_workspace.getAxis(0).getUnit().unitID()
        sample_ws_wavelength = self._convert_units_wavelength(
            self._sample_workspace)

        container_ws_wavelength = (self._process_container_workspace(
            self._container_workspace, prog_container)
                                   if self._use_can else None)

        prog_corr = Progress(self, start=0.2, end=0.6, nreports=2)
        if self._use_corrections:
            prog_corr.report('Preprocessing corrections')

            if self._use_can:
                # Use container factors
                prog_corr.report('Correcting sample and container')
                factor_workspaces = self._get_factor_workspaces()
                output_workspace = self._correct_sample_can(
                    sample_ws_wavelength, container_ws_wavelength,
                    factor_workspaces)
                correction_type = 'sample_and_can_corrections'
            else:
                # Use sample factor only
                output_workspace = self._correct_sample(
                    sample_ws_wavelength, self._corrections_workspace[0])
                correction_type = 'sample_corrections_only'
                # Add corrections filename to log values
                prog_corr.report('Correcting sample')
                AddSampleLog(Workspace=output_workspace,
                             LogName='corrections_filename',
                             LogType='String',
                             LogText=self._corrections_ws_name)
        else:
            # Do simple subtraction
            output_workspace = self._subtract(sample_ws_wavelength,
                                              container_ws_wavelength)
            correction_type = 'can_subtraction'
            # Add container filename to log values
            can_base = self.getPropertyValue("CanWorkspace")
            can_base = can_base[:can_base.index('_')]
            prog_corr.report('Adding container filename')
            AddSampleLog(Workspace=output_workspace,
                         LogName='container_filename',
                         LogType='String',
                         LogText=can_base)

        prog_wrkflow = Progress(self, 0.6, 1.0, nreports=5)
        # Record the container scale factor
        if self._use_can and self._scale_can:
            prog_wrkflow.report('Adding container scaling')
            AddSampleLog(Workspace=output_workspace,
                         LogName='container_scale',
                         LogType='Number',
                         LogText=str(self._can_scale_factor))

        # Record the container shift amount
        if self._use_can and self._shift_can:
            prog_wrkflow.report('Adding container shift')
            AddSampleLog(Workspace=output_workspace,
                         LogName='container_shift',
                         LogType='Number',
                         LogText=str(self._can_shift_factor))

        # Record the type of corrections applied
        prog_wrkflow.report('Adding correction type')
        AddSampleLog(Workspace=output_workspace,
                     LogName='corrections_type',
                     LogType='String',
                     LogText=correction_type)

        # Add original sample as log entry
        sam_base = self.getPropertyValue("SampleWorkspace")

        if '_' in sam_base:
            sam_base = sam_base[:sam_base.index('_')]
            prog_wrkflow.report('Adding sample filename')
            AddSampleLog(Workspace=output_workspace,
                         LogName='sample_filename',
                         LogType='String',
                         LogText=sam_base)

        # Convert Units back to original
        emode = str(output_workspace.getEMode())
        efixed = 0.0
        if emode == "Indirect":
            efixed = self._get_e_fixed(output_workspace)
        if sample_unit != 'Label':
            output_workspace = self._convert_units(output_workspace,
                                                   sample_unit, emode, efixed)

        if output_workspace.name():
            RenameWorkspace(
                InputWorkspace=output_workspace,
                OutputWorkspace=self.getPropertyValue('OutputWorkspace'))
        self.setProperty('OutputWorkspace', output_workspace)
        prog_wrkflow.report('Algorithm Complete')
예제 #44
0
 def PyExec(self):
     process = self.getPropertyValue('ProcessAs')
     processes = ['Absorber', 'Beam', 'Transmission', 'Container', 'Sample']
     progress = Progress(self, start=0.0, end=1.0, nreports=processes.index(process) + 1)
     ws = '__' + self.getPropertyValue('OutputWorkspace')
     # we do not want the summing done by LoadAndMerge since it will be pair-wise and slow
     # instead we load and list, and merge once with merge runs
     LoadAndMerge(Filename=self.getPropertyValue('Run').replace('+',','), LoaderName='LoadILLSANS', OutputWorkspace=ws)
     if isinstance(mtd[ws], WorkspaceGroup):
         tmp = '__tmp'+ws
         MergeRuns(InputWorkspaces=ws, OutputWorkspace=tmp)
         DeleteWorkspaces(ws)
         RenameWorkspace(InputWorkspace=tmp, OutputWorkspace=ws)
     self._instrument = mtd[ws].getInstrument().getName()
     self._normalise(ws)
     run = mtd[ws].getRun()
     if run.hasProperty('tof_mode'):
         if run.getLogData('tof_mode').value == 'TOF':
             self._mode = 'TOF'
     progress.report()
     if process in ['Beam', 'Transmission', 'Container', 'Sample']:
         absorber_ws = self.getProperty('AbsorberInputWorkspace').value
         if absorber_ws:
             self._apply_absorber(ws, absorber_ws)
         if process == 'Beam':
             self._process_beam(ws)
             progress.report()
         else:
             beam_ws = self.getProperty('BeamInputWorkspace').value
             if beam_ws:
                 self._apply_beam(ws, beam_ws)
             if process == 'Transmission':
                 self._process_transmission(ws, beam_ws)
                 progress.report()
             else:
                 transmission_ws = self.getProperty('TransmissionInputWorkspace').value
                 if transmission_ws:
                     self._apply_transmission(ws, transmission_ws)
                 solid_angle = self._make_solid_angle_name(ws)
                 cache = self.getProperty('CacheSolidAngle').value
                 if (cache and not mtd.doesExist(solid_angle)) or not cache:
                     if self._instrument == "D16":
                         run = mtd[ws].getRun()
                         distance = run.getLogData('L2').value
                         CloneWorkspace(InputWorkspace=ws, OutputWorkspace=solid_angle)
                         MoveInstrumentComponent(Workspace=solid_angle, X=0, Y=0, Z=distance,
                                                 RelativePosition=False, ComponentName="detector")
                         RotateInstrumentComponent(Workspace=solid_angle, X=0, Y=1, Z=0, angle=0,
                                                   RelativeRotation=False, ComponentName="detector")
                         input_solid = solid_angle
                     else:
                         input_solid = ws
                     SolidAngle(InputWorkspace=input_solid, OutputWorkspace=solid_angle,
                                Method=self._get_solid_angle_method(self._instrument))
                 Divide(LHSWorkspace=ws, RHSWorkspace=solid_angle, OutputWorkspace=ws, WarnOnZeroDivide=False)
                 if not cache:
                     DeleteWorkspace(solid_angle)
                 progress.report()
                 if process == 'Sample':
                     container_ws = self.getProperty('ContainerInputWorkspace').value
                     if container_ws:
                         self._apply_container(ws, container_ws)
                     self._apply_masks(ws)
                     thickness = self.getProperty('SampleThickness').value
                     NormaliseByThickness(InputWorkspace=ws, OutputWorkspace=ws, SampleThickness=thickness)
                     # parallax (gondola) effect
                     if self._instrument in ['D22', 'D22lr', 'D33']:
                         self._apply_parallax(ws)
                     progress.report()
                     sensitivity_out = self.getPropertyValue('SensitivityOutputWorkspace')
                     if sensitivity_out:
                         self._process_sensitivity(ws, sensitivity_out)
                     self._process_sample(ws)
                     progress.report()
     self._finalize(ws, process)
    def PyExec(self):
        temporary_workspaces = []
        self.temp_ws = temp_workspace_generator(
            temporary_workspaces)  # name generator for temporary workpaces

        prefix_output = self.getProperty('OutputWorkspacesPrefix').value
        progress_percent_start, progress_percent_end, reports_count = 0.0, 0.01, 5
        progress = Progress(self, progress_percent_start, progress_percent_end,
                            reports_count)
        input_workspace = self.getPropertyValue(
            'InputWorkspace')  # name of the input workspace
        adjustment_diagnostics = list(
        )  # list workspace names that analyze the orientation of the banks

        # Create a grouping workspace whereby we group detectors by banks
        grouping_workspace = self.temp_ws()  # a temporary name
        CreateGroupingWorkspace(InputWorkspace=input_workspace,
                                OutputWorkspace=grouping_workspace,
                                GroupDetectorsBy='bank')

        # Remove delayed emission time from the moderator
        kwargs = dict(InputWorkspace=input_workspace,
                      Emode='Elastic',
                      OutputWorkspace=input_workspace)
        self.run_algorithm('ModeratorTzero',
                           0,
                           0.02,
                           soft_crash=True,
                           **kwargs)
        progress.report('ModeratorTzero has been applied')

        # Find dSpacing to TOF conversion DIFC parameter
        difc_table = f'{prefix_output}PDCalibration_difc'
        diagnostics_workspaces = prefix_output + 'PDCalibration_diagnostics'  # group workspace
        kwargs = dict(InputWorkspace=input_workspace,
                      TofBinning=self.getProperty('TofBinning').value,
                      PeakFunction=self.getProperty('PeakFunction').value,
                      PeakPositions=self.getProperty('PeakPositions').value,
                      CalibrationParameters='DIFC',
                      OutputCalibrationTable=difc_table,
                      DiagnosticWorkspaces=diagnostics_workspaces)
        PDCalibration(**kwargs)
        progress.report('PDCalibration has been applied')

        # Create one spectra in d-spacing for each bank using the original instrument geometry
        self.fitted_in_dspacing(
            fitted_in_tof=prefix_output + 'PDCalibration_diagnostics_fitted',
            workspace_with_instrument=input_workspace,
            output_workspace=prefix_output + 'PDCalibration_peaks_original',
            grouping_workspace=grouping_workspace)
        adjustment_diagnostics.append(prefix_output +
                                      'PDCalibration_peaks_original')

        # Find the peak centers in TOF units, for the peaks found at each pixel
        peak_centers_in_tof = prefix_output + 'PDCalibration_diagnostics_tof'
        self.centers_in_tof(
            prefix_output + 'PDCalibration_diagnostics_dspacing', difc_table,
            peak_centers_in_tof)
        mtd[diagnostics_workspaces].add(peak_centers_in_tof)

        # Find the Histogram of peak deviations (in d-spacing units)
        # for each bank, using the original instrument geometry
        self.histogram_peak_deviations(
            prefix_output + 'PDCalibration_diagnostics_tof', input_workspace,
            prefix_output + 'peak_deviations_original', grouping_workspace)
        adjustment_diagnostics.append(prefix_output +
                                      'peak_deviations_original')

        # repeat with percent peak deviations for each bank, using the adjusted instrument geometry
        self.histogram_peak_deviations(
            prefix_output + 'PDCalibration_diagnostics_tof',
            input_workspace,
            prefix_output + 'percent_peak_deviations_original',
            grouping_workspace,
            deviation_params=[-10, 0.01, 10],
            percent_deviations=True)
        adjustment_diagnostics.append(prefix_output +
                                      'percent_peak_deviations_original')

        # store the DIFC and DIFC_mask workspace created by PDCalibration in the diagnostics workspace
        mtd[diagnostics_workspaces].add(difc_table)
        mtd[diagnostics_workspaces].add(difc_table + '_mask')

        adjustments_table_name = f'{prefix_output}adjustments'
        # Adjust the position of the source along the beam (Z) axis
        # The instrument in `input_workspace` is adjusted in-place
        if self.getProperty('AdjustSource').value is True:
            dz = self.getProperty('SourceMaxTranslation').value
            kwargs = dict(
                InputWorkspace=input_workspace,
                OutputWorkspace=input_workspace,
                PeakCentersTofTable=peak_centers_in_tof,
                PeakPositions=self.getProperty('PeakPositions').value,
                MaskWorkspace=f'{difc_table}_mask',
                FitSourcePosition=True,
                FitSamplePosition=False,
                Zposition=True,
                MinZPosition=-dz,
                MaxZPosition=dz,
                Minimizer='L-BFGS-B')
            self.run_algorithm('AlignComponents', 0.1, 0.2, **kwargs)
        else:
            # Impose the fixed position of the source and save into the adjustments table
            self._fixed_source_set_and_table(adjustments_table_name)
        # Translate and rotate the each bank, only after the source has been adjusted
        # The instrument in `input_workspace` is adjusted in-place

        # Translation options to AlignComponents
        dt = self.getProperty('ComponentMaxTranslation'
                              ).value  # maximum translation along either axis
        move_y = False if self.getProperty('FixY').value is True else True
        kwargs_transl = dict(Xposition=True,
                             MinXPosition=-dt,
                             MaxXPosition=dt,
                             Yposition=move_y,
                             MinYPosition=-dt,
                             MaxYPosition=dt,
                             Zposition=True,
                             MinZPosition=-dt,
                             MaxZPosition=dt)

        # Rotation options for AlignComponents
        dr = self.getProperty(
            'ComponentMaxRotation').value  # maximum rotation along either axis
        rot_z = False if self.getProperty('FixYaw').value is True else True
        kwargs_rotat = dict(AlphaRotation=True,
                            MinAlphaRotation=-dr,
                            MaxAlphaRotation=dr,
                            BetaRotation=True,
                            MinBetaRotation=-dr,
                            MaxBetaRotation=dr,
                            GammaRotation=rot_z,
                            MinGammaRotation=-dr,
                            MaxGammaRotation=dr,
                            EulerConvention='YXZ')

        # Remaining options for AlignComponents
        displacements_table_name = f'{prefix_output}displacements'
        kwargs = dict(InputWorkspace=input_workspace,
                      OutputWorkspace=input_workspace,
                      PeakCentersTofTable=peak_centers_in_tof,
                      PeakPositions=self.getProperty('PeakPositions').value,
                      MaskWorkspace=f'{difc_table}_mask',
                      AdjustmentsTable=adjustments_table_name + '_banks',
                      DisplacementsTable=displacements_table_name,
                      FitSourcePosition=False,
                      FitSamplePosition=False,
                      ComponentList=self.getProperty('ComponentList').value,
                      Minimizer=self.getProperty('Minimizer').value,
                      MaxIterations=self.getProperty('MaxIterations').value)

        self.run_algorithm('AlignComponents', 0.2, 0.97, **kwargs,
                           **kwargs_transl, **kwargs_rotat)
        progress.report('AlignComponents has been applied')

        # AlignComponents produces two unwanted workspaces
        temporary_workspaces.append('calWS')

        # Append the banks table to the source table, then delete the banks table.
        self._append_second_to_first(adjustments_table_name,
                                     adjustments_table_name + '_banks')
        # Create one spectra in d-spacing for each bank using the adjusted instrument geometry.
        # The spectra can be compare to those of prefix_output + 'PDCalibration_peaks_original'
        self.fitted_in_dspacing(
            fitted_in_tof=prefix_output + 'PDCalibration_diagnostics_fitted',
            workspace_with_instrument=input_workspace,
            output_workspace=prefix_output + 'PDCalibration_peaks_adjusted',
            grouping_workspace=grouping_workspace)
        adjustment_diagnostics.append(prefix_output +
                                      'PDCalibration_peaks_adjusted')

        # Find the Histogram of peak deviations (in d-spacing units)
        # for each bank, using the adjusted instrument geometry
        self.histogram_peak_deviations(
            prefix_output + 'PDCalibration_diagnostics_tof', input_workspace,
            prefix_output + 'peak_deviations_adjusted', grouping_workspace)
        adjustment_diagnostics.append(prefix_output +
                                      'peak_deviations_adjusted')

        # repeat with percent peak deviations for each bank, using the adjusted instrument geometry
        self.histogram_peak_deviations(
            prefix_output + 'PDCalibration_diagnostics_tof',
            input_workspace,
            prefix_output + 'percent_peak_deviations_adjusted',
            grouping_workspace,
            deviation_params=[-10, 0.01, 10],
            percent_deviations=True)
        adjustment_diagnostics.append(prefix_output +
                                      'percent_peak_deviations_adjusted')

        # summarize the changes observed in the histogram of percent peak deviations
        self.peak_deviations_summarize(
            prefix_output + 'percent_peak_deviations_original',
            prefix_output + 'percent_peak_deviations_adjusted',
            prefix_output + 'percent_peak_deviations_summary')
        adjustment_diagnostics.append(prefix_output +
                                      'percent_peak_deviations_summary')

        # Create a WorkspaceGroup with the orientation diagnostics
        GroupWorkspaces(InputWorkspaces=adjustment_diagnostics,
                        OutputWorkspace=prefix_output +
                        'bank_adjustment_diagnostics')

        # clean up at the end (only happens if algorithm completes sucessfully)
        [
            DeleteWorkspace(name) for name in temporary_workspaces
            if AnalysisDataService.doesExist(name)
        ]
예제 #46
0
    def PyExec(self):
        in_Runs = self.getProperty("RunNumbers").value
        progress = Progress(self, 0., .25, 3)
        finalUnits = self.getPropertyValue("FinalUnits")

        # default arguments for AlignAndFocusPowder
        self.alignAndFocusArgs = {'Tmin': 0,
                                  'TMax': 50000,
                                  'RemovePromptPulseWidth': 1600,
                                  'PreserveEvents': False,
                                  'Dspacing': True,  # binning parameters in d-space
                                  'Params': self.getProperty("Binning").value,
                                  }

        # workspace for loading metadata only to be used in LoadDiffCal and
        # CreateGroupingWorkspace
        metaWS = None

        # either type of file-based calibration is stored in the same variable
        calib = self.getProperty("Calibration").value
        detcalFile = None
        if calib == "Calibration File":
            metaWS = self._loadMetaWS(in_Runs[0])
            LoadDiffCal(Filename=self.getPropertyValue("CalibrationFilename"),
                        WorkspaceName='SNAP',
                        InputWorkspace=metaWS,
                        MakeGroupingWorkspace=False, MakeMaskWorkspace=False)
            self.alignAndFocusArgs['CalibrationWorkspace'] = 'SNAP_cal'
        elif calib == 'DetCal File':
            detcalFile = ','.join(self.getProperty('DetCalFilename').value)
        progress.report('loaded calibration')

        norm = self.getProperty("Normalization").value

        if norm == "From Processed Nexus":
            norm_File = self.getProperty("NormalizationFilename").value
            normalizationWS = 'normWS'
            LoadNexusProcessed(Filename=norm_File, OutputWorkspace=normalizationWS)
            progress.report('loaded normalization')
        elif norm == "From Workspace":
            normalizationWS = str(self.getProperty("NormalizationWorkspace").value)
            progress.report('')
        else:
            normalizationWS = None
            progress.report('')

        self.alignAndFocusArgs['GroupingWorkspace'] = self._generateGrouping(in_Runs[0], metaWS, progress)
        self.alignAndFocusArgs['MaskWorkspace'] = self._getMaskWSname(in_Runs[0], metaWS)  # can be empty string

        if metaWS is not None:
            DeleteWorkspace(Workspace=metaWS)

        Process_Mode = self.getProperty("ProcessingMode").value

        prefix = self.getProperty("OptionalPrefix").value

        Tag = 'SNAP'
        progStart = .25
        progDelta = (1.-progStart)/len(in_Runs)

        # --------------------------- PROCESS BACKGROUND ----------------------
        if not self.getProperty('Background').isDefault:
            progDelta = (1. - progStart) / (len(in_Runs) + 1)  # redefine to account for background

            background = 'SNAP_{}'.format(self.getProperty('Background').value)
            self.log().notice("processing run background {}".format(background))
            background, unfocussedBkgd = self._alignAndFocus(background,
                                                             background+'_bkgd_red',
                                                             detCalFilename=detcalFile,
                                                             withUnfocussed=(Process_Mode == 'Set-Up'),
                                                             progStart=progStart, progDelta=progDelta)
        else:
            background = None
            unfocussedBkgd = ''

        # --------------------------- REDUCE DATA -----------------------------

        for i, runnumber in enumerate(in_Runs):
            self.log().notice("processing run %s" % runnumber)
            self.log().information(str(self.get_IPTS_Local(runnumber)))

            # put together output names
            new_Tag = Tag
            if len(prefix) > 0:
                new_Tag = prefix + '_' + new_Tag
            basename = '%s_%s_%s' % (new_Tag, runnumber, self.alignAndFocusArgs['GroupingWorkspace'])
            self.log().warning('{}:{}:{}'.format(i, new_Tag, basename))
            redWS, unfocussedWksp = self._alignAndFocus('SNAP_{}'.format(runnumber),
                                                        basename + '_red',
                                                        detCalFilename=detcalFile,
                                                        withUnfocussed=(Process_Mode == 'Set-Up'),
                                                        progStart=progStart, progDelta=progDelta*.5)
            progStart += .5 * progDelta

            # subtract the background if it was supplied
            if background:
                self.log().information('subtracting {} from {}'.format(background, redWS))
                Minus(LHSWorkspace=redWS, RHSWorkspace=background, OutputWorkspace=redWS)
                # intentionally don't subtract the unfocussed workspace since it hasn't been normalized by counting time

            # the rest takes up .25 percent of the run processing
            progress = Progress(self, progStart, progStart+.25*progDelta, 2)

            # AlignAndFocusPowder leaves the data in time-of-flight
            ConvertUnits(InputWorkspace=redWS, OutputWorkspace=redWS, Target='dSpacing', EMode='Elastic')

            # Edit instrument geometry to make final workspace smaller on disk
            det_table = PreprocessDetectorsToMD(Inputworkspace=redWS,
                                                OutputWorkspace='__SNAP_det_table')
            polar = np.degrees(det_table.column('TwoTheta'))
            azi = np.degrees(det_table.column('Azimuthal'))
            EditInstrumentGeometry(Workspace=redWS, L2=det_table.column('L2'),
                                   Polar=polar, Azimuthal=azi)
            mtd.remove('__SNAP_det_table')
            progress.report('simplify geometry')

            # AlignAndFocus doesn't necessarily rebin the data correctly
            if Process_Mode == "Set-Up":
                Rebin(InputWorkspace=unfocussedWksp, Params=self.alignAndFocusArgs['Params'],
                      Outputworkspace=unfocussedWksp)
                if background:
                    Rebin(InputWorkspace=unfocussedBkgd, Params=self.alignAndFocusArgs['Params'],
                          Outputworkspace=unfocussedBkgd)
            # normalize the data as requested
            normalizationWS = self._generateNormalization(redWS, norm, normalizationWS)
            normalizedWS = None
            if normalizationWS is not None:
                normalizedWS = basename + '_nor'
                Divide(LHSWorkspace=redWS, RHSWorkspace=normalizationWS,
                       OutputWorkspace=normalizedWS)
                ReplaceSpecialValues(Inputworkspace=normalizedWS,
                                     OutputWorkspace=normalizedWS,
                                     NaNValue='0', NaNError='0',
                                     InfinityValue='0', InfinityError='0')
                progress.report('normalized')
            else:
                progress.report()

            # rename everything as appropriate and determine output workspace name
            if normalizedWS is None:
                outputWksp = redWS
            else:
                outputWksp = normalizedWS

                if norm == "Extracted from Data" and Process_Mode == "Production":
                        DeleteWorkspace(Workspace=redWS)
                        DeleteWorkspace(Workspace=normalizationWS)

            # Save requested formats
            saveDir = self.getPropertyValue("OutputDirectory").strip()
            if len(saveDir) <= 0:
                self.log().notice('Using default save location')
                saveDir = os.path.join(self.get_IPTS_Local(runnumber), 'shared', 'data')
            self._save(saveDir, basename, outputWksp)

            # set workspace as an output so it gets history
            ConvertUnits(InputWorkspace=str(outputWksp), OutputWorkspace=str(outputWksp), Target=finalUnits,
                         EMode='Elastic')
            self._exportWorkspace('OutputWorkspace_' + str(outputWksp), outputWksp)

            # declare some things as extra outputs in set-up
            if Process_Mode != "Production":
                propprefix = 'OutputWorkspace_{:d}_'.format(i)
                propNames = [propprefix + it for it in ['d', 'norm', 'normalizer']]
                wkspNames = ['%s_%s_d' % (new_Tag, runnumber),
                             basename + '_red',
                             '%s_%s_normalizer' % (new_Tag, runnumber)]
                for (propName, wkspName) in zip(propNames, wkspNames):
                    self._exportWorkspace(propName, wkspName)

        if background:
            ConvertUnits(InputWorkspace=str(background), OutputWorkspace=str(background), Target=finalUnits,
                         EMode='Elastic')
            prefix = 'OutputWorkspace_{}'.format(len(in_Runs))
            propNames = [prefix + it for it in ['', '_d']]
            wkspNames = [background, unfocussedBkgd]
            for (propName, wkspName) in zip(propNames, wkspNames):
                self._exportWorkspace(propName, wkspName)
예제 #47
0
    def PyExec(self):
        """Execute the data collection workflow."""
        progress = Progress(self, 0.0, 1.0, 9)
        self._report = utils.Report()
        self._subalgLogging = self.getProperty(common.PROP_SUBALG_LOGGING).value == common.SUBALG_LOGGING_ON
        namePrefix = self.getProperty(common.PROP_OUTPUT_WS).valueAsStr
        cleanupMode = self.getProperty(common.PROP_CLEANUP_MODE).value
        self._cleanup = utils.Cleanup(cleanupMode, self._subalgLogging)
        self._names = utils.NameSource(namePrefix, cleanupMode)

        # The variables 'mainWS' and 'monWS shall hold the current main
        # data throughout the algorithm.

        # Get input workspace.
        progress.report('Loading inputs')
        mainWS = self._inputWS()

        # Extract monitors to a separate workspace.
        progress.report('Extracting monitors')
        mainWS, monWS = self._separateMons(mainWS)

        # Save the main workspace for later use, if needed.
        rawWS = None
        if not self.getProperty(common.PROP_OUTPUT_RAW_WS).isDefault:
            rawWS = mainWS
            self._cleanup.protect(rawWS)

        # Normalisation to monitor/time, if requested.
        progress.report('Normalising to monitor/time')
        monWS = self._flatBkgMon(monWS)
        monEPPWS = self._createEPPWSMon(monWS)
        mainWS = self._normalize(mainWS, monWS, monEPPWS)

        # Time-independent background.
        progress.report('Calculating backgrounds')
        mainWS = self._flatBkgDet(mainWS)

        # Calibrate incident energy, if requested.
        progress.report('Calibrating incident energy')
        mainWS, monWS = self._calibrateEi(mainWS, monWS, monEPPWS)
        self._cleanup.cleanup(monWS, monEPPWS)

        # Add the Ei as Efixed instrument parameter
        _addEfixedInstrumentParameter(mainWS)

        progress.report('Correcting TOF')
        mainWS = self._correctTOFAxis(mainWS)
        self._outputRaw(mainWS, rawWS)

        # Find elastic peak positions.
        progress.report('Calculating EPPs')
        self._outputDetEPPWS(mainWS)

        self._finalize(mainWS)
        progress.report('Done')
 def _get_progress(self):
     return Progress(self, start=0.0, end=1.0, nreports=10)
예제 #49
0
class PelicanCrystalProcessing(DataProcessorAlgorithm):
    def category(self):
        return "Workflow"

    def summary(self):
        return 'Performs crystal processing for ANSTO Pelican data.'

    def seeAlso(self):
        return ["NA"]

    def name(self):
        return "PelicanCrystalProcessing"

    def PyInit(self):

        mandatoryInputRuns = CompositeValidator()
        mandatoryInputRuns.add(StringArrayMandatoryValidator())
        self.declareProperty(StringArrayProperty('SampleRuns',
                                                 values=[],
                                                 validator=mandatoryInputRuns),
                             doc='Comma separated range of sample runs,\n'
                             ' eg [cycle::] 7333-7341,7345')

        self.declareProperty(
            name='EmptyRuns',
            defaultValue='',
            doc='Optional path followed by comma separated range of runs,\n'
            'looking for runs in the sample folder if path not included,\n'
            '  eg [cycle::] 6300-6308')

        self.declareProperty(name='ScaleEmptyRuns',
                             defaultValue=1.0,
                             doc='Scale the empty runs prior to subtraction')

        self.declareProperty(
            name='CalibrationRuns',
            defaultValue='',
            doc='Optional path followed by comma separated range of runs,\n'
            'looking for runs in the sample folder if path not included,\n'
            '  eg [cycle::] 6350-6365')

        self.declareProperty(
            name='EmptyCalibrationRuns',
            defaultValue='',
            doc='Optional path followed by comma separated range of runs,\n'
            'looking for runs in the sample folder if path not included,\n'
            '  eg [cycle::] 6370-6375')

        self.declareProperty(
            name='EnergyTransfer',
            defaultValue='0.0, 0.02, 3.0',
            doc='Energy transfer range in meV expressed as min, step, max')

        self.declareProperty(
            name='MomentumTransfer',
            defaultValue='',
            doc='Momentum transfer range in inverse Angstroms,\n'
            'expressed as min, step, max. Default estimates\n'
            'the max range based on energy transfer.')

        self.declareProperty(
            name='LambdaOnTwoMode',
            defaultValue=False,
            doc='Set if instrument running in lambda on two mode.')

        self.declareProperty(
            name='FrameOverlap',
            defaultValue=False,
            doc='Set if the energy transfer extends over a frame.')

        self.declareProperty(name='FixedDetector',
                             defaultValue=True,
                             doc='Fix detector positions to the first run')

        self.declareProperty(FileProperty('ScratchFolder',
                                          '',
                                          action=FileAction.OptionalDirectory,
                                          direction=Direction.Input),
                             doc='Path to save and restore merged workspaces.')

        mandatoryOutputFolder = CompositeValidator()
        mandatoryOutputFolder.add(StringArrayMandatoryValidator())
        self.declareProperty(FileProperty('OutputFolder',
                                          '',
                                          action=FileAction.Directory,
                                          direction=Direction.Input),
                             doc='Path to save the output nxspe files.')

        self.declareProperty(
            FileProperty('ConfigurationFile',
                         '',
                         action=FileAction.OptionalLoad,
                         extensions=['ini']),
            doc='Optional: INI file to override default processing values.')

        self.declareProperty(name='KeepReducedWorkspace',
                             defaultValue=False,
                             doc='Keep the last reduced workspace.')

    def PyExec(self):

        self._first_run = None

        # Get the list of data files from the runs
        sample_runs = self.getPropertyValue('SampleRuns')
        output_folder = self.getPropertyValue('OutputFolder')
        scratch_folder = self.getPropertyValue('ScratchFolder')
        keep_reduced_ws = self.getProperty('KeepReducedWorkspace').value
        cycle_runs = expand_as_cycle_runs(sample_runs)

        # set up progress bar
        steps = len(cycle_runs) + 1
        self._progress = Progress(self, start=0.0, end=1.0, nreports=steps)

        saveFolder = scratch_folder if scratch_folder else config[
            'defaultsave.directory']
        for cycle, run in cycle_runs:

            if cycle:
                srun = str(cycle) + ':: ' + str(run)
            else:
                srun = str(run)
            self._progress.report("Processing run {}, ".format(run))

            PelicanReduction(
                SampleRuns=srun,
                EmptyRuns=self.getPropertyValue('EmptyRuns'),
                EnergyTransfer=self.getPropertyValue('EnergyTransfer'),
                MomentumTransfer=self.getPropertyValue('MomentumTransfer'),
                Processing='NXSPE',
                LambdaOnTwoMode=self.getProperty('LambdaOnTwoMode').value,
                FrameOverlap=self.getProperty('FrameOverlap').value,
                ScratchFolder=scratch_folder,
                OutputWorkspace='nxspe',
                ConfigurationFile=self.getPropertyValue('ConfigurationFile'))

            # the nxspe file named 'nxspe_spe_2D.nxspe'is moved to the output folder and renamed
            dfile = 'run_{:d}.nxspe'.format(run)
            dpath = os.path.join(output_folder, dfile)
            if os.path.isfile(dpath):
                os.remove(dpath)
            spath = os.path.join(saveFolder, 'nxspe_spe_2D.nxspe')
            os.rename(spath, dpath)

            if not self._first_run:
                self._first_run = dpath
            elif self.getProperty('FixedDetector').value:
                copy_datset_nodes(self._first_run, dpath, [
                    '/nxspe_spe_2Ddet/data/azimuthal',
                    '/nxspe_spe_2Ddet/data/azimuthal_width',
                    '/nxspe_spe_2Ddet/data/polar',
                    '/nxspe_spe_2Ddet/data/polar_width'
                ])

            # the pelican reduction saves a tempory file 'PLN00nnnn_sample.nxs' and 'PLN00nnnn.nxs' to
            # speed up processing, remove these files to save space
            if scratch_folder:
                for sfile in ['PLN{:07d}_sample.nxs', 'PLN{:07d}.nxs']:
                    tfile = sfile.format(run)
                    tpath = os.path.join(scratch_folder, tfile)
                    os.remove(tpath)

        # delete the workspace, as it is only temporary
        if not keep_reduced_ws:
            self._progress.report("Cleaning up file,")
            DeleteWorkspace(Workspace='nxspe_spe_2D')
예제 #50
0
    def PyExec(self):
        self._eulerConvention = self.getProperty('EulerConvention').value
        calWS = self.getProperty('CalibrationTable').value
        calWS = api.SortTableWorkspace(calWS, Columns='detid')
        maskWS = self.getProperty("MaskWorkspace").value

        difc = calWS.column('difc')
        if maskWS is not None:
            self._masking = True
            mask = maskWS.extractY().flatten()
            difc = np.ma.masked_array(difc, mask)

        detID = calWS.column('detid')

        if self.getProperty("Workspace").value is not None:
            wks_name = self.getProperty("Workspace").value.name()
        else:
            wks_name = "alignedWorkspace"
            api.LoadEmptyInstrument(
                Filename=self.getProperty("InstrumentFilename").value,
                OutputWorkspace=wks_name)

        # Make a dictionary of what options are being refined for sample/source. No rotation.
        for opt in self._optionsList[:3]:
            self._optionsDict[opt] = self.getProperty(opt).value
        for opt in self._optionsList[3:]:
            self._optionsDict[opt] = False

        # First fit L1 if selected for Source and/or Sample
        for component in "Source", "Sample":
            if self.getProperty("Fit" + component + "Position").value:
                self._move = True
                if component == "Sample":
                    comp = api.mtd[wks_name].getInstrument().getSample()
                else:
                    comp = api.mtd[wks_name].getInstrument().getSource()
                componentName = comp.getFullName()
                logger.notice("Working on " + componentName +
                              " Starting position is " + str(comp.getPos()))
                firstIndex = 0
                lastIndex = len(difc)
                if self._masking:
                    mask_out = mask[firstIndex:lastIndex + 1]
                else:
                    mask_out = None

                self._initialPos = [
                    comp.getPos().getX(),
                    comp.getPos().getY(),
                    comp.getPos().getZ(), 0, 0, 0
                ]

                # Set up x0 and bounds lists
                x0List = []
                boundsList = []
                for iopt, opt in enumerate(self._optionsList[:3]):
                    if self._optionsDict[opt]:
                        x0List.append(self._initialPos[iopt])
                        boundsList.append(
                            (self._initialPos[iopt] +
                             self.getProperty("Min" + opt).value,
                             self._initialPos[iopt] +
                             self.getProperty("Max" + opt).value))

                results = minimize(self._minimisation_func,
                                   x0=x0List,
                                   method='L-BFGS-B',
                                   args=(wks_name, componentName, firstIndex,
                                         lastIndex, difc[firstIndex:lastIndex +
                                                         1], mask_out),
                                   bounds=boundsList)

                # Apply the results to the output workspace
                xmap = self._mapOptions(results.x)

                # Need to grab the component again, as things have changed
                api.MoveInstrumentComponent(wks_name,
                                            componentName,
                                            X=xmap[0],
                                            Y=xmap[1],
                                            Z=xmap[2],
                                            RelativePosition=False)
                comp = api.mtd[wks_name].getInstrument().getComponentByName(
                    componentName)
                logger.notice("Finished " + componentName +
                              " Final position is " + str(comp.getPos()))
                self._move = False

        # Now fit all the components if any
        components = self.getProperty("ComponentList").value

        # Make a dictionary of what options are being refined.
        for opt in self._optionsList:
            self._optionsDict[opt] = self.getProperty(opt).value

        self._move = (self._optionsDict["Xposition"]
                      or self._optionsDict["Yposition"]
                      or self._optionsDict["Zposition"])

        self._rotate = (self._optionsDict["AlphaRotation"]
                        or self._optionsDict["BetaRotation"]
                        or self._optionsDict["GammaRotation"])

        prog = Progress(self, start=0, end=1, nreports=len(components))
        for component in components:
            comp = api.mtd[wks_name].getInstrument().getComponentByName(
                component)
            firstDetID = self._getFirstDetID(comp)
            firstIndex = detID.index(firstDetID)
            lastDetID = self._getLastDetID(comp)
            lastIndex = detID.index(lastDetID)
            if lastDetID - firstDetID != lastIndex - firstIndex:
                raise RuntimeError(
                    "Calibration detid doesn't match instrument")

            eulerAngles = comp.getRotation().getEulerAngles(
                self._eulerConvention)

            logger.notice("Working on " + comp.getFullName() +
                          " Starting position is " + str(comp.getPos()) +
                          " Starting rotation is " + str(eulerAngles))

            x0List = []
            self._initialPos = [
                comp.getPos().getX(),
                comp.getPos().getY(),
                comp.getPos().getZ(), eulerAngles[0], eulerAngles[1],
                eulerAngles[2]
            ]

            boundsList = []

            if self._masking:
                mask_out = mask[firstIndex:lastIndex + 1]
                if mask_out.sum() == mask_out.size:
                    self.log().warning(
                        "All pixels in '%s' are masked. Skipping calibration."
                        % component)
                    continue
            else:
                mask_out = None

            for iopt, opt in enumerate(self._optionsList):
                if self._optionsDict[opt]:
                    x0List.append(self._initialPos[iopt])
                    boundsList.append((self._initialPos[iopt] +
                                       self.getProperty("Min" + opt).value,
                                       self._initialPos[iopt] +
                                       self.getProperty("Max" + opt).value))

            results = minimize(self._minimisation_func,
                               x0=x0List,
                               method='L-BFGS-B',
                               args=(wks_name, component, firstIndex,
                                     lastIndex, difc[firstIndex:lastIndex + 1],
                                     mask_out),
                               bounds=boundsList)

            # Apply the results to the output workspace
            xmap = self._mapOptions(results.x)

            if self._move:
                api.MoveInstrumentComponent(wks_name,
                                            component,
                                            X=xmap[0],
                                            Y=xmap[1],
                                            Z=xmap[2],
                                            RelativePosition=False)

            if self._rotate:
                (rotw, rotx, roty,
                 rotz) = self._eulerToAngleAxis(xmap[3], xmap[4], xmap[5],
                                                self._eulerConvention)
                api.RotateInstrumentComponent(wks_name,
                                              component,
                                              X=rotx,
                                              Y=roty,
                                              Z=rotz,
                                              Angle=rotw,
                                              RelativeRotation=False)

            # Need to grab the component again, as things have changed
            comp = api.mtd[wks_name].getInstrument().getComponentByName(
                component)
            logger.notice(
                "Finshed " + comp.getFullName() + " Final position is " +
                str(comp.getPos()) + " Final rotation is " +
                str(comp.getRotation().getEulerAngles(self._eulerConvention)))

            prog.report()
        logger.notice("Results applied to workspace " + wks_name)
예제 #51
0
    def PyExec(self):
        """Executes the data reduction workflow."""
        progress = Progress(self, 0.0, 1.0, 7)
        report = common.Report()
        subalgLogging = self.getProperty(
            common.PROP_SUBALG_LOGGING).value == common.SUBALG_LOGGING_ON
        wsNamePrefix = self.getProperty(common.PROP_OUTPUT_WS).valueAsStr
        cleanupMode = self.getProperty(common.PROP_CLEANUP_MODE).value
        wsNames = common.NameSource(wsNamePrefix, cleanupMode)
        wsCleanup = common.IntermediateWSCleanup(cleanupMode, subalgLogging)

        progress.report('Loading inputs')
        mainWS = self._inputWS(wsNames, wsCleanup, subalgLogging)

        maskWSName = wsNames.withSuffix('combined_mask')
        maskWS = _createMaskWS(mainWS, maskWSName, subalgLogging)
        wsCleanup.cleanupLater(maskWS)

        reportWS = None
        if not self.getProperty(
                common.PROP_OUTPUT_DIAGNOSTICS_REPORT_WS).isDefault:
            reportWSName = self.getProperty(
                common.PROP_OUTPUT_DIAGNOSTICS_REPORT_WS).valueAsStr
            reportWS = _createDiagnosticsReportTable(
                reportWSName, mainWS.getNumberHistograms(), subalgLogging)

        progress.report('Loading default mask')
        defaultMaskWS = self._defaultMask(mainWS, wsNames, wsCleanup, report,
                                          subalgLogging)
        defaultMaskedSpectra = set()
        if defaultMaskWS is not None:
            defaultMaskedSpectra = _reportDefaultMask(reportWS, defaultMaskWS)
            maskWS = Plus(LHSWorkspace=maskWS,
                          RHSWorkspace=defaultMaskWS,
                          EnableLogging=subalgLogging)
            wsCleanup.cleanup(defaultMaskWS)

        progress.report('User-defined mask')
        userMaskWS = self._userMask(mainWS, wsNames, wsCleanup, subalgLogging)
        maskWS = Plus(LHSWorkspace=maskWS,
                      RHSWorkspace=userMaskWS,
                      EnableLogging=subalgLogging)
        wsCleanup.cleanup(userMaskWS)

        beamStopMaskedSpectra = set()
        if self._beamStopDiagnosticsEnabled(mainWS, report):
            progress.report('Diagnosing beam stop')
            beamStopMaskWS = self._beamStopDiagnostics(mainWS, maskWS, wsNames,
                                                       wsCleanup, report,
                                                       subalgLogging)
            beamStopMaskedSpectra = _reportBeamStopMask(
                reportWS, beamStopMaskWS)
            maskWS = Plus(LHSWorkspace=maskWS,
                          RHSWorkspace=beamStopMaskWS,
                          EnableLogging=subalgLogging)
            wsCleanup.cleanup(beamStopMaskWS)

        bkgMaskedSpectra = set()
        if self._bkgDiagnosticsEnabled(mainWS, report):
            progress.report('Diagnosing backgrounds')
            bkgMaskWS, bkgWS = self._bkgDiagnostics(mainWS, wsNames, wsCleanup,
                                                    report, subalgLogging)
            bkgMaskedSpectra = _reportBkgDiagnostics(reportWS, bkgWS,
                                                     bkgMaskWS)
            maskWS = Plus(LHSWorkspace=maskWS,
                          RHSWorkspace=bkgMaskWS,
                          EnableLogging=subalgLogging)
            wsCleanup.cleanup(bkgMaskWS)
            wsCleanup.cleanup(bkgWS)

        peakMaskedSpectra = set()
        if self._peakDiagnosticsEnabled(mainWS, report):
            progress.report('Diagnosing peaks')
            peakMaskWS, peakIntensityWS = self._peakDiagnostics(
                mainWS, wsNames, wsCleanup, report, subalgLogging)
            peakMaskedSpectra = _reportPeakDiagnostics(reportWS,
                                                       peakIntensityWS,
                                                       peakMaskWS)
            maskWS = Plus(LHSWorkspace=maskWS,
                          RHSWorkspace=peakMaskWS,
                          EnableLogging=subalgLogging)
            wsCleanup.cleanup(peakMaskWS)
            wsCleanup.cleanup(peakIntensityWS)

        self._outputReports(reportWS, defaultMaskedSpectra,
                            beamStopMaskedSpectra, peakMaskedSpectra,
                            bkgMaskedSpectra)

        self._finalize(maskWS, wsCleanup, report)
        progress.report('Done')
예제 #52
0
파일: BayesQuasi.py 프로젝트: mcvine/mantid
    def PyExec(self):

        # Check for platform support
        if not is_supported_f2py_platform():
            unsupported_msg = "This algorithm can only be run on valid platforms." \
                              + " please view the algorithm documentation to see" \
                              + " what platforms are currently supported"
            raise RuntimeError(unsupported_msg)

        from IndirectBayes import (CalcErange, GetXYE, ReadNormFile,
                                   ReadWidthFile, QLAddSampleLogs, C2Fw, C2Se,
                                   QuasiPlot)
        from IndirectCommon import (CheckXrange, CheckAnalysers, getEfixed,
                                    GetThetaQ, CheckHistZero, CheckHistSame,
                                    IndentifyDataBoundaries)
        setup_prog = Progress(self, start=0.0, end=0.3, nreports=5)
        self.log().information('BayesQuasi input')

        erange = [self._e_min, self._e_max]
        nbins = [self._sam_bins, self._res_bins]
        setup_prog.report('Converting to binary for Fortran')
        #convert true/false to 1/0 for fortran
        o_el = 1 if self._elastic else 0
        o_w1 = 1 if self._width else 0
        o_res = 1 if self._res_norm else 0

        #fortran code uses background choices defined using the following numbers
        setup_prog.report('Encoding input options')
        if self._background == 'Sloping':
            o_bgd = 2
        elif self._background == 'Flat':
            o_bgd = 1
        elif self._background == 'Zero':
            o_bgd = 0

        fitOp = [o_el, o_bgd, o_w1, o_res]

        setup_prog.report('Establishing save path')
        workdir = config['defaultsave.directory']
        if not os.path.isdir(workdir):
            workdir = os.getcwd()
            logger.information(
                'Default Save directory is not set. Defaulting to current working Directory: '
                + workdir)

        array_len = 4096  # length of array in Fortran
        setup_prog.report('Checking X Range')
        CheckXrange(erange, 'Energy')

        nbin, nrbin = nbins[0], nbins[1]

        logger.information('Sample is ' + self._samWS)
        logger.information('Resolution is ' + self._resWS)

        # Check for trailing and leading zeros in data
        setup_prog.report(
            'Checking for leading and trailing zeros in the data')
        first_data_point, last_data_point = IndentifyDataBoundaries(
            self._samWS)
        if first_data_point > self._e_min:
            logger.warning(
                "Sample workspace contains leading zeros within the energy range."
            )
            logger.warning("Updating eMin: eMin = " + str(first_data_point))
            self._e_min = first_data_point
        if last_data_point < self._e_max:
            logger.warning(
                "Sample workspace contains trailing zeros within the energy range."
            )
            logger.warning("Updating eMax: eMax = " + str(last_data_point))
            self._e_max = last_data_point

        # update erange with new values
        erange = [self._e_min, self._e_max]

        setup_prog.report('Checking Analysers')
        CheckAnalysers(self._samWS, self._resWS)
        setup_prog.report('Obtaining EFixed, theta and Q')
        efix = getEfixed(self._samWS)
        theta, Q = GetThetaQ(self._samWS)

        nsam, ntc = CheckHistZero(self._samWS)

        totalNoSam = nsam

        #check if we're performing a sequential fit
        if self._loop != True:
            nsam = 1

        nres = CheckHistZero(self._resWS)[0]

        setup_prog.report('Checking Histograms')
        if self._program == 'QL':
            if nres == 1:
                prog = 'QLr'  # res file
            else:
                prog = 'QLd'  # data file
                CheckHistSame(self._samWS, 'Sample', self._resWS, 'Resolution')
        elif self._program == 'QSe':
            if nres == 1:
                prog = 'QSe'  # res file
            else:
                raise ValueError('Stretched Exp ONLY works with RES file')

        logger.information('Version is ' + prog)
        logger.information(' Number of spectra = ' + str(nsam))
        logger.information(' Erange : ' + str(erange[0]) + ' to ' +
                           str(erange[1]))

        setup_prog.report('Reading files')
        Wy, We = ReadWidthFile(self._width, self._wfile, totalNoSam)
        dtn, xsc = ReadNormFile(self._res_norm, self._resnormWS, totalNoSam)

        setup_prog.report('Establishing output workspace name')
        fname = self._samWS[:-4] + '_' + prog
        probWS = fname + '_Prob'
        fitWS = fname + '_Fit'
        wrks = os.path.join(workdir, self._samWS[:-4])
        logger.information(' lptfile : ' + wrks + '_' + prog + '.lpt')
        lwrk = len(wrks)
        wrks.ljust(140, ' ')
        wrkr = self._resWS
        wrkr.ljust(140, ' ')

        setup_prog.report('Initialising probability list')
        # initialise probability list
        if self._program == 'QL':
            prob0 = []
            prob1 = []
            prob2 = []
        xQ = np.array([Q[0]])
        for m in range(1, nsam):
            xQ = np.append(xQ, Q[m])
        xProb = xQ
        xProb = np.append(xProb, xQ)
        xProb = np.append(xProb, xQ)
        eProb = np.zeros(3 * nsam)

        group = ''
        workflow_prog = Progress(self, start=0.3, end=0.7, nreports=nsam * 3)
        for m in range(0, nsam):
            logger.information('Group ' + str(m) + ' at angle ' +
                               str(theta[m]))
            nsp = m + 1
            nout, bnorm, Xdat, Xv, Yv, Ev = CalcErange(self._samWS, m, erange,
                                                       nbin)
            Ndat = nout[0]
            Imin = nout[1]
            Imax = nout[2]
            if prog == 'QLd':
                mm = m
            else:
                mm = 0
            Nb, Xb, Yb, Eb = GetXYE(self._resWS, mm,
                                    array_len)  # get resolution data
            numb = [nsam, nsp, ntc, Ndat, nbin, Imin, Imax, Nb, nrbin]
            rscl = 1.0
            reals = [efix, theta[m], rscl, bnorm]

            if prog == 'QLr':
                workflow_prog.report(
                    'Processing Sample number %i as Lorentzian' % nsam)
                nd, xout, yout, eout, yfit, yprob = QLr.qlres(
                    numb, Xv, Yv, Ev, reals, fitOp, Xdat, Xb, Yb, Wy, We, dtn,
                    xsc, wrks, wrkr, lwrk)
                message = ' Log(prob) : ' + str(yprob[0]) + ' ' + str(
                    yprob[1]) + ' ' + str(yprob[2]) + ' ' + str(yprob[3])
                logger.information(message)
            if prog == 'QLd':
                workflow_prog.report('Processing Sample number %i' % nsam)
                nd, xout, yout, eout, yfit, yprob = QLd.qldata(
                    numb, Xv, Yv, Ev, reals, fitOp, Xdat, Xb, Yb, Eb, Wy, We,
                    wrks, wrkr, lwrk)
                message = ' Log(prob) : ' + str(yprob[0]) + ' ' + str(
                    yprob[1]) + ' ' + str(yprob[2]) + ' ' + str(yprob[3])
                logger.information(message)
            if prog == 'QSe':
                workflow_prog.report(
                    'Processing Sample number %i as Stretched Exp' % nsam)
                nd,xout,yout,eout,yfit,yprob=Qse.qlstexp(numb,Xv,Yv,Ev,reals,fitOp,\
                                                        Xdat,Xb,Yb,Wy,We,dtn,xsc,\
                                                        wrks,wrkr,lwrk)
            dataX = xout[:nd]
            dataX = np.append(dataX, 2 * xout[nd - 1] - xout[nd - 2])
            yfit_list = np.split(yfit[:4 * nd], 4)
            dataF1 = yfit_list[1]
            if self._program == 'QL':
                dataF2 = yfit_list[2]
            workflow_prog.report('Processing data')
            dataG = np.zeros(nd)
            datX = dataX
            datY = yout[:nd]
            datE = eout[:nd]
            datX = np.append(datX, dataX)
            datY = np.append(datY, dataF1[:nd])
            datE = np.append(datE, dataG)
            res1 = dataF1[:nd] - yout[:nd]
            datX = np.append(datX, dataX)
            datY = np.append(datY, res1)
            datE = np.append(datE, dataG)
            nsp = 3
            names = 'data,fit.1,diff.1'
            res_plot = [0, 1, 2]
            if self._program == 'QL':
                workflow_prog.report('Processing Lorentzian result data')
                datX = np.append(datX, dataX)
                datY = np.append(datY, dataF2[:nd])
                datE = np.append(datE, dataG)
                res2 = dataF2[:nd] - yout[:nd]
                datX = np.append(datX, dataX)
                datY = np.append(datY, res2)
                datE = np.append(datE, dataG)
                nsp += 2
                names += ',fit.2,diff.2'
                res_plot.append(4)
                prob0.append(yprob[0])
                prob1.append(yprob[1])
                prob2.append(yprob[2])

            # create result workspace
            fitWS = fname + '_Workspaces'
            fout = fname + '_Workspace_' + str(m)

            workflow_prog.report('Creating OutputWorkspace')
            CreateWorkspace(OutputWorkspace=fout, DataX=datX, DataY=datY, DataE=datE,\
                Nspec=nsp, UnitX='DeltaE', VerticalAxisUnit='Text', VerticalAxisValues=names)

            # append workspace to list of results
            group += fout + ','

        comp_prog = Progress(self, start=0.7, end=0.8, nreports=2)
        comp_prog.report('Creating Group Workspace')
        GroupWorkspaces(InputWorkspaces=group, OutputWorkspace=fitWS)

        if self._program == 'QL':
            comp_prog.report('Processing Lorentzian probability data')
            yPr0 = np.array([prob0[0]])
            yPr1 = np.array([prob1[0]])
            yPr2 = np.array([prob2[0]])
            for m in range(1, nsam):
                yPr0 = np.append(yPr0, prob0[m])
                yPr1 = np.append(yPr1, prob1[m])
                yPr2 = np.append(yPr2, prob2[m])
            yProb = yPr0
            yProb = np.append(yProb, yPr1)
            yProb = np.append(yProb, yPr2)
            probWs = CreateWorkspace(OutputWorkspace=probWS, DataX=xProb, DataY=yProb, DataE=eProb,\
                Nspec=3, UnitX='MomentumTransfer')
            outWS = C2Fw(self._samWS[:-4], fname)
            if self._plot != 'None':
                QuasiPlot(fname, self._plot, res_plot, self._loop)
        if self._program == 'QSe':
            comp_prog.report('Runnning C2Se')
            outWS = C2Se(fname)
            if self._plot != 'None':
                QuasiPlot(fname, self._plot, res_plot, self._loop)

        log_prog = Progress(self, start=0.8, end=1.0, nreports=8)
        #Add some sample logs to the output workspaces
        log_prog.report('Copying Logs to outputWorkspace')
        CopyLogs(InputWorkspace=self._samWS, OutputWorkspace=outWS)
        log_prog.report('Adding Sample logs to Output workspace')
        QLAddSampleLogs(outWS, self._resWS, prog, self._background,
                        self._elastic, erange, (nbin, nrbin), self._resnormWS,
                        self._wfile)
        log_prog.report('Copying logs to fit Workspace')
        CopyLogs(InputWorkspace=self._samWS, OutputWorkspace=fitWS)
        log_prog.report('Adding sample logs to Fit workspace')
        QLAddSampleLogs(fitWS, self._resWS, prog, self._background,
                        self._elastic, erange, (nbin, nrbin), self._resnormWS,
                        self._wfile)
        log_prog.report('Finialising log copying')

        if self._save:
            log_prog.report('Saving workspaces')
            fit_path = os.path.join(workdir, fitWS + '.nxs')
            SaveNexusProcessed(InputWorkspace=fitWS, Filename=fit_path)
            out_path = os.path.join(workdir,
                                    outWS + '.nxs')  # path name for nxs file
            SaveNexusProcessed(InputWorkspace=outWS, Filename=out_path)
            logger.information('Output fit file created : ' + fit_path)
            logger.information('Output paramter file created : ' + out_path)

        self.setProperty('OutputWorkspaceFit', fitWS)
        self.setProperty('OutputWorkspaceResult', outWS)
        log_prog.report('Setting workspace properties')

        if self._program == 'QL':
            self.setProperty('OutputWorkspaceProb', probWS)
    def PyExec(self):
        from IndirectCommon import getEfixed

        self._setup()

        # Set up progress reporting
        n_prog_reports = 2
        if self._can_ws_name is not None:
            n_prog_reports += 1
        prog = Progress(self, 0.0, 1.0, n_prog_reports)

        efixed = getEfixed(self._sample_ws_name)

        sample_wave_ws = '__sam_wave'
        ConvertUnits(InputWorkspace=self._sample_ws_name,
                     OutputWorkspace=sample_wave_ws,
                     Target='Wavelength',
                     EMode='Indirect',
                     EFixed=efixed,
                     EnableLogging=False)

        sample_thickness = self._sample_outer_radius - self._sample_inner_radius
        logger.information('Sample thickness: ' + str(sample_thickness))

        prog.report('Calculating sample corrections')
        if self._sample_density_type == 'Mass Density':
            builder = MaterialBuilder()
            mat = builder.setFormula(
                self._sample_chemical_formula).setMassDensity(
                    self._sample_density).build()
            self._sample_density = mat.numberDensity
        SetSampleMaterial(sample_wave_ws,
                          ChemicalFormula=self._sample_chemical_formula,
                          SampleNumberDensity=self._sample_density)
        AnnularRingAbsorption(
            InputWorkspace=sample_wave_ws,
            OutputWorkspace=self._ass_ws,
            SampleHeight=3.0,
            SampleThickness=sample_thickness,
            CanInnerRadius=self._can_inner_radius,
            CanOuterRadius=self._can_outer_radius,
            SampleChemicalFormula=self._sample_chemical_formula,
            SampleNumberDensity=self._sample_density,
            NumberOfWavelengthPoints=10,
            EventsPerPoint=self._events)

        group = self._ass_ws

        if self._can_ws_name is not None:
            can1_wave_ws = '__can1_wave'
            can2_wave_ws = '__can2_wave'
            ConvertUnits(InputWorkspace=self._can_ws_name,
                         OutputWorkspace=can1_wave_ws,
                         Target='Wavelength',
                         EMode='Indirect',
                         EFixed=efixed,
                         EnableLogging=False)
            if self._can_scale != 1.0:
                logger.information('Scaling container by: ' +
                                   str(self._can_scale))
                Scale(InputWorkspace=can1_wave_ws,
                      OutputWorkspace=can1_wave_ws,
                      Factor=self._can_scale,
                      Operation='Multiply')
            CloneWorkspace(InputWorkspace=can1_wave_ws,
                           OutputWorkspace=can2_wave_ws,
                           EnableLogging=False)

            can_thickness_1 = self._sample_inner_radius - self._can_inner_radius
            can_thickness_2 = self._can_outer_radius - self._sample_outer_radius
            logger.information('Container thickness: %f & %f' %
                               (can_thickness_1, can_thickness_2))

            if self._use_can_corrections:
                prog.report('Calculating container corrections')
                Divide(LHSWorkspace=sample_wave_ws,
                       RHSWorkspace=self._ass_ws,
                       OutputWorkspace=sample_wave_ws)

                if self._can_density_type == 'Mass Density':
                    builder = MaterialBuilder()
                    mat = builder.setFormula(
                        self._can_chemical_formula).setMassDensity(
                            self._can_density).build()
                    self._can_density = mat.numberDensity
                SetSampleMaterial(can1_wave_ws,
                                  ChemicalFormula=self._can_chemical_formula,
                                  SampleNumberDensity=self._can_density)

                AnnularRingAbsorption(
                    InputWorkspace=can1_wave_ws,
                    OutputWorkspace='__Acc1',
                    SampleHeight=3.0,
                    SampleThickness=can_thickness_1,
                    CanInnerRadius=self._can_inner_radius,
                    CanOuterRadius=self._sample_outer_radius,
                    SampleChemicalFormula=self._can_chemical_formula,
                    SampleNumberDensity=self._can_density,
                    NumberOfWavelengthPoints=10,
                    EventsPerPoint=self._events)

                SetSampleMaterial(can2_wave_ws,
                                  ChemicalFormula=self._can_chemical_formula,
                                  SampleNumberDensity=self._can_density)
                AnnularRingAbsorption(
                    InputWorkspace=can2_wave_ws,
                    OutputWorkspace='__Acc2',
                    SampleHeight=3.0,
                    SampleThickness=can_thickness_2,
                    CanInnerRadius=self._sample_inner_radius,
                    CanOuterRadius=self._can_outer_radius,
                    SampleChemicalFormula=self._can_chemical_formula,
                    SampleNumberDensity=self._can_density,
                    NumberOfWavelengthPoints=10,
                    EventsPerPoint=self._events)

                Multiply(LHSWorkspace='__Acc1',
                         RHSWorkspace='__Acc2',
                         OutputWorkspace=self._acc_ws,
                         EnableLogging=False)
                DeleteWorkspace('__Acc1', EnableLogging=False)
                DeleteWorkspace('__Acc2', EnableLogging=False)

                Divide(LHSWorkspace=can1_wave_ws,
                       RHSWorkspace=self._acc_ws,
                       OutputWorkspace=can1_wave_ws)
                Minus(LHSWorkspace=sample_wave_ws,
                      RHSWorkspace=can1_wave_ws,
                      OutputWorkspace=sample_wave_ws)
                group += ',' + self._acc_ws

            else:
                prog.report('Calculating can scaling')
                Minus(LHSWorkspace=sample_wave_ws,
                      RHSWorkspace=can1_wave_ws,
                      OutputWorkspace=sample_wave_ws)
                Divide(LHSWorkspace=sample_wave_ws,
                       RHSWorkspace=self._ass_ws,
                       OutputWorkspace=sample_wave_ws)

            DeleteWorkspace(can1_wave_ws, EnableLogging=False)
            DeleteWorkspace(can2_wave_ws, EnableLogging=False)

        else:
            Divide(LHSWorkspace=sample_wave_ws,
                   RHSWorkspace=self._ass_ws,
                   OutputWorkspace=sample_wave_ws)

        ConvertUnits(InputWorkspace=sample_wave_ws,
                     OutputWorkspace=self._output_ws,
                     Target='DeltaE',
                     EMode='Indirect',
                     EFixed=efixed,
                     EnableLogging=False)
        DeleteWorkspace(sample_wave_ws, EnableLogging=False)

        prog.report('Recording sample logs')
        sample_log_workspaces = [self._output_ws, self._ass_ws]
        sample_logs = [('sample_shape', 'annulus'),
                       ('sample_filename', self._sample_ws_name),
                       ('sample_inner', self._sample_inner_radius),
                       ('sample_outer', self._sample_outer_radius),
                       ('can_inner', self._can_inner_radius),
                       ('can_outer', self._can_outer_radius)]

        if self._can_ws_name is not None:
            sample_logs.append(('container_filename', self._can_ws_name))
            sample_logs.append(('container_scale', self._can_scale))
            if self._use_can_corrections:
                sample_log_workspaces.append(self._acc_ws)
                sample_logs.append(('container_thickness_1', can_thickness_1))
                sample_logs.append(('container_thickness_2', can_thickness_2))

        log_names = [item[0] for item in sample_logs]
        log_values = [item[1] for item in sample_logs]

        for ws_name in sample_log_workspaces:
            AddSampleLogMultiple(Workspace=ws_name,
                                 LogNames=log_names,
                                 LogValues=log_values,
                                 EnableLogging=False)

        self.setProperty('OutputWorkspace', self._output_ws)

        # Output the Ass workspace if it is wanted, delete if not
        if self._abs_ws == '':
            DeleteWorkspace(self._ass_ws, EnableLogging=False)
            if self._can_ws_name is not None and self._use_can_corrections:
                DeleteWorkspace(self._acc_ws, EnableLogging=False)
        else:
            GroupWorkspaces(InputWorkspaces=group,
                            OutputWorkspace=self._abs_ws,
                            EnableLogging=False)
            self.setProperty('CorrectionsWorkspace', self._abs_ws)
    def PyExec(self):
        # setup progress reporting
        prog = Progress(self, 0.0, 1.0, 3)

        prog.report('Setting up sample environment')

        # set the beam shape
        set_beam_alg = self.createChildAlgorithm("SetBeam",
                                                 enableLogging=False)
        set_beam_alg.setProperty("InputWorkspace", self._input_ws)
        set_beam_alg.setProperty(
            "Geometry", {
                'Shape': 'Slit',
                'Width': self._beam_width,
                'Height': self._beam_height
            })
        set_beam_alg.execute()

        # set the sample geometry
        sample_geometry = dict()
        sample_geometry['Height'] = self._height

        if self._shape == 'FlatPlate':
            sample_geometry['Shape'] = 'FlatPlate'
            sample_geometry['Width'] = self._width
            sample_geometry['Thick'] = self._thickness
            sample_geometry['Center'] = [0.0, 0.0, self._center]
            sample_geometry['Angle'] = self._angle

        if self._shape == 'Cylinder':
            sample_geometry['Shape'] = 'Cylinder'
            sample_geometry['Radius'] = self._radius
            sample_geometry['Center'] = [0.0, 0.0, 0.0]

        if self._shape == 'Annulus':
            sample_geometry['Shape'] = 'HollowCylinder'
            sample_geometry['InnerRadius'] = self._inner_radius
            sample_geometry['OuterRadius'] = self._outer_radius
            sample_geometry['Center'] = [0.0, 0.0, 0.0]
            sample_geometry['Axis'] = 1

        set_sample_alg = self.createChildAlgorithm("SetSample",
                                                   enableLogging=False)
        set_sample_alg.setProperty("InputWorkspace", self._input_ws)
        set_sample_alg.setProperty("Geometry", sample_geometry)

        # set sample
        if self._material_defined:
            # set sample without sample material
            set_sample_alg.execute()

        else:
            # set the sample material
            sample_material = dict()
            sample_material['ChemicalFormula'] = self._chemical_formula

            if self._density_type == 'Mass Density':
                sample_material['SampleMassDensity'] = self._density
            if self._density_type == 'Number Density':
                sample_material['SampleNumberDensity'] = self._density

            set_sample_alg.setProperty("Material", sample_material)

            try:
                set_sample_alg.execute()
            except RuntimeError as exc:
                raise RuntimeError(
                    "Supplied chemical formula was invalid: \n" + str(exc))

        prog.report('Calculating sample corrections')

        monte_carlo_alg = self.createChildAlgorithm("MonteCarloAbsorption",
                                                    enableLogging=True)
        monte_carlo_alg.setProperty("InputWorkspace", self._input_ws)
        monte_carlo_alg.setProperty("OutputWorkspace", self._output_ws)
        monte_carlo_alg.setProperty("EventsPerPoint", self._events)
        monte_carlo_alg.setProperty("NumberOfWavelengthPoints",
                                    self._number_wavelengths)
        monte_carlo_alg.setProperty("Interpolation", self._interpolation)
        monte_carlo_alg.execute()

        output_ws = monte_carlo_alg.getProperty("OutputWorkspace").value

        prog.report('Recording Sample Logs')

        log_names = ['beam_height', 'beam_width']
        log_values = [self._beam_height, self._beam_width]

        # add sample geometry to sample logs
        for key, value in sample_geometry.items():
            log_names.append('sample_' + key.lower())
            log_values.append(value)

        add_sample_log_alg = self.createChildAlgorithm('AddSampleLogMultiple',
                                                       enableLogging=False)
        add_sample_log_alg.setProperty('Workspace', output_ws)
        add_sample_log_alg.setProperty('LogNames', log_names)
        add_sample_log_alg.setProperty('LogValues', log_values)
        add_sample_log_alg.execute()

        self.setProperty('OutputWorkspace', output_ws)
예제 #55
0
class IndirectILLReductionQENS(PythonAlgorithm):

    _sample_files = None
    _alignment_files = None
    _background_files = None
    _calibration_files = None
    _background_calib_files = None
    _sum_all_runs = None
    _unmirror_option = None
    _back_scaling = None
    _back_calib_scaling = None
    _criteria = None
    _progress = None
    _red_ws = None
    _common_args = {}
    _peak_range = []
    _runs = None
    _spectrum_axis = None

    def category(self):
        return "Workflow\\MIDAS;Workflow\\Inelastic;Inelastic\\Indirect;Inelastic\\Reduction;ILL\\Indirect"

    def summary(self):
        return 'Performs quasi-elastic neutron scattering (QENS) multiple file reduction ' \
               'for ILL indirect geometry data, instrument IN16B.'

    def seeAlso(self):
        return ["IndirectILLReductionFWS", "IndirectILLEnergyTransfer"]

    def name(self):
        return "IndirectILLReductionQENS"

    def PyInit(self):

        self.declareProperty(MultipleFileProperty('Run', extensions=['nxs']),
                             doc='Run number(s) of sample run(s).')

        self.declareProperty(
            MultipleFileProperty('BackgroundRun',
                                 action=FileAction.OptionalLoad,
                                 extensions=['nxs']),
            doc='Run number(s) of background (empty can) run(s).')

        self.declareProperty(
            MultipleFileProperty('CalibrationRun',
                                 action=FileAction.OptionalLoad,
                                 extensions=['nxs']),
            doc='Run number(s) of vanadium calibration run(s).')

        self.declareProperty(
            MultipleFileProperty('CalibrationBackgroundRun',
                                 action=FileAction.OptionalLoad,
                                 extensions=['nxs']),
            doc=
            'Run number(s) of background (empty can) run(s) for vanadium run.')

        self.declareProperty(
            MultipleFileProperty('AlignmentRun',
                                 action=FileAction.OptionalLoad,
                                 extensions=['nxs']),
            doc='Run number(s) of vanadium run(s) used for '
            'peak alignment for UnmirrorOption=[5, 7]')

        self.declareProperty(name='SumRuns',
                             defaultValue=False,
                             doc='Whether to sum all the input runs.')

        self.declareProperty(
            name='CropDeadMonitorChannels',
            defaultValue=False,
            doc='Whether or not to exclude the first and last few channels '
            'with 0 monitor count in the energy transfer formula.')

        self.declareProperty(
            name='UnmirrorOption',
            defaultValue=6,
            validator=IntBoundedValidator(lower=0, upper=7),
            doc='Unmirroring options : \n'
            '0 no unmirroring\n'
            '1 sum of left and right\n'
            '2 left\n'
            '3 right\n'
            '4 shift right according to left and sum\n'
            '5 like 4, but use alignment run for peak positions\n'
            '6 center both left and right at zero and sum\n'
            '7 like 6, but use alignment run for peak positions')

        self.declareProperty(name='BackgroundScalingFactor',
                             defaultValue=1.,
                             validator=FloatBoundedValidator(lower=0),
                             doc='Scaling factor for background subtraction')

        self.declareProperty(
            name='CalibrationBackgroundScalingFactor',
            defaultValue=1.,
            validator=FloatBoundedValidator(lower=0),
            doc=
            'Scaling factor for background subtraction for vanadium calibration'
        )

        self.declareProperty(
            name='CalibrationPeakRange',
            defaultValue=[-0.003, 0.003],
            validator=FloatArrayMandatoryValidator(),
            doc='Peak range for integration over calibration file peak (in mev)'
        )

        self.declareProperty(
            FileProperty('MapFile',
                         '',
                         action=FileAction.OptionalLoad,
                         extensions=['map', 'xml']),
            doc='Filename of the detector grouping map file to use. \n'
            'By default all the pixels will be summed per each tube. \n'
            'Use .map or .xml file (see GroupDetectors documentation) '
            'only if different range is needed for each tube.')

        self.declareProperty(
            name='ManualPSDIntegrationRange',
            defaultValue=[1, 128],
            doc='Integration range of vertical pixels in each PSD tube. \n'
            'By default all the pixels will be summed per each tube. \n'
            'Use this option if the same range (other than default) '
            'is needed for all the tubes.')

        self.declareProperty(name='Analyser',
                             defaultValue='silicon',
                             validator=StringListValidator(['silicon']),
                             doc='Analyser crystal.')

        self.declareProperty(name='Reflection',
                             defaultValue='111',
                             validator=StringListValidator(['111', '311']),
                             doc='Analyser reflection.')

        self.declareProperty(WorkspaceGroupProperty(
            'OutputWorkspace', '', direction=Direction.Output),
                             doc='Group name for the reduced workspace(s).')

        self.declareProperty(name='SpectrumAxis',
                             defaultValue='SpectrumNumber',
                             validator=StringListValidator(
                                 ['SpectrumNumber', '2Theta', 'Q', 'Q2']),
                             doc='The spectrum axis conversion target.')

    def validateInputs(self):

        issues = dict()

        uo = self.getProperty('UnmirrorOption').value

        if (uo == 5 or uo == 7) and not self.getPropertyValue('AlignmentRun'):
            issues[
                'AlignmentRun'] = 'Given UnmirrorOption requires alignment run to be set'

        if self.getPropertyValue('CalibrationRun'):
            range = self.getProperty('CalibrationPeakRange').value
            if len(range) != 2:
                issues['CalibrationPeakRange'] = 'Please provide valid calibration range ' \
                                                 '(comma separated 2 energy values).'
            elif range[0] >= range[1]:
                issues['CalibrationPeakRange'] = 'Please provide valid calibration range. ' \
                                                 'Start energy is bigger than end energy.'

        if self.getPropertyValue(
                'CalibrationBackgroundRun'
        ) and not self.getPropertyValue('CalibrationRun'):
            issues[
                'CalibrationRun'] = 'Calibration run is required when calibration background is given.'

        return issues

    def setUp(self):

        self._sample_file = self.getPropertyValue('Run')
        self._alignment_file = self.getPropertyValue('AlignmentRun').replace(
            ',', '+')  # automatic summing
        self._background_file = self.getPropertyValue('BackgroundRun').replace(
            ',', '+')  # automatic summing
        self._calibration_file = self.getPropertyValue(
            'CalibrationRun').replace(',', '+')  # automatic summing
        self._background_calib_files = self.getPropertyValue(
            'CalibrationBackgroundRun').replace(',', '+')  # automatic summing
        self._sum_all_runs = self.getProperty('SumRuns').value
        self._unmirror_option = self.getProperty('UnmirrorOption').value
        self._back_scaling = self.getProperty('BackgroundScalingFactor').value
        self._back_calib_scaling = self.getProperty(
            'CalibrationBackgroundScalingFactor').value
        self._peak_range = self.getProperty('CalibrationPeakRange').value
        self._spectrum_axis = self.getPropertyValue('SpectrumAxis')

        self._red_ws = self.getPropertyValue('OutputWorkspace')

        suffix = ''
        if self._spectrum_axis == 'SpectrumNumber':
            suffix = '_red'
        elif self._spectrum_axis == '2Theta':
            suffix = '_2theta'
        elif self._spectrum_axis == 'Q':
            suffix = '_q'
        elif self._spectrum_axis == 'Q2':
            suffix = '_q2'

        self._red_ws += suffix

        # arguments to pass to IndirectILLEnergyTransfer
        self._common_args['MapFile'] = self.getPropertyValue('MapFile')
        self._common_args['Analyser'] = self.getPropertyValue('Analyser')
        self._common_args['Reflection'] = self.getPropertyValue('Reflection')
        self._common_args['ManualPSDIntegrationRange'] = self.getProperty(
            'ManualPSDIntegrationRange').value
        self._common_args['CropDeadMonitorChannels'] = self.getProperty(
            'CropDeadMonitorChannels').value
        self._common_args['SpectrumAxis'] = self._spectrum_axis

        if self._sum_all_runs is True:
            self.log().notice('All the sample runs will be summed')
            self._sample_file = self._sample_file.replace(',', '+')

        # Nexus metadata criteria for QENS type of data
        self._criteria = '$/entry0/instrument/Doppler/maximum_delta_energy$ != 0. and ' \
                         '$/entry0/instrument/Doppler/velocity_profile$ == 0'

        # empty list to store all final workspaces to group
        self._ws_list = []

    def _mask(self, ws, xstart, xend):
        """
        Masks the first and last bins
        @param   ws           :: input workspace name
        @param   xstart       :: MaskBins between x[0] and x[xstart]
        @param   xend         :: MaskBins between x[xend] and x[-1]
        """
        x_values = mtd[ws].readX(0)

        if xstart > 0:
            self.log().debug('Mask bins smaller than {0}'.format(xstart))
            MaskBins(InputWorkspace=ws,
                     OutputWorkspace=ws,
                     XMin=x_values[0],
                     XMax=x_values[int(xstart)])

        if xend < len(x_values) - 1:
            self.log().debug('Mask bins larger than {0}'.format(xend))
            MaskBins(InputWorkspace=ws,
                     OutputWorkspace=ws,
                     XMin=x_values[int(xend) + 1],
                     XMax=x_values[-1])

    def _filter_files(self, files, label):
        '''
        Filters the given list of files according to nexus criteria
        @param  files :: list of input files (i.e. , and + separated string)
        @param  label :: label of error message if nothing left after filtering
        @throws RuntimeError :: when nothing left after filtering
        @return :: the list of input files that passsed the criteria
        '''

        files = SelectNexusFilesByMetadata(files, self._criteria)

        if not files:
            raise RuntimeError(
                'None of the {0} runs are of QENS type.'
                'Check the files or reduction type.'.format(label))
        else:
            self.log().information('Filtered {0} runs are: {0} \\n'.format(
                label, files.replace(',', '\\n')))

        return files

    def _filter_all_input_files(self):
        '''
        Filters all the lists of input files needed for the reduction.
        '''

        self._sample_file = self._filter_files(self._sample_file, 'sample')

        if self._background_file:
            self._background_file = self._filter_files(self._background_file,
                                                       'background')

        if self._calibration_file:
            self._calibration_file = self._filter_files(
                self._calibration_file, 'calibration')

        if self._background_calib_files:
            self._background_calib_files = self._filter_files(
                self._background_calib_files, 'calibration background')

        if self._alignment_file:
            self._alignment_file = self._filter_files(self._alignment_file,
                                                      'alignment')

    def _warn_negative_integral(self, ws, message):
        '''
        Raises an error if an integral of the given workspace is <= 0
        @param ws :: input workspace name
        @param message :: message suffix for the error
        @throws RuntimeError :: on non-positive integral found
        '''

        tmp_int = '__tmp_int' + ws
        Integration(InputWorkspace=ws, OutputWorkspace=tmp_int)

        for item in mtd[tmp_int]:
            for index in range(item.getNumberHistograms()):
                if item.readY(index)[0] <= 0:
                    self.log().warning(
                        'Negative or 0 integral in spectrum #{0} {1}'.format(
                            index, message))

        DeleteWorkspace(tmp_int)

    def PyExec(self):

        self.setUp()

        self._filter_all_input_files()

        if self._background_file:
            background = '__background_' + self._red_ws
            IndirectILLEnergyTransfer(Run=self._background_file,
                                      OutputWorkspace=background,
                                      **self._common_args)
            Scale(InputWorkspace=background,
                  Factor=self._back_scaling,
                  OutputWorkspace=background)

        if self._calibration_file:
            calibration = '__calibration_' + self._red_ws
            IndirectILLEnergyTransfer(Run=self._calibration_file,
                                      OutputWorkspace=calibration,
                                      **self._common_args)

            if self._background_calib_files:
                back_calibration = '__calibration_back_' + self._red_ws
                IndirectILLEnergyTransfer(Run=self._background_calib_files,
                                          OutputWorkspace=back_calibration,
                                          **self._common_args)
                Scale(InputWorkspace=back_calibration,
                      Factor=self._back_calib_scaling,
                      OutputWorkspace=back_calibration)
                Minus(LHSWorkspace=calibration,
                      RHSWorkspace=back_calibration,
                      OutputWorkspace=calibration)

            # MatchPeaks does not play nicely with the ws groups
            for ws in mtd[calibration]:
                MatchPeaks(InputWorkspace=ws.getName(),
                           OutputWorkspace=ws.getName(),
                           MaskBins=True,
                           BinRangeTable='')

            Integration(InputWorkspace=calibration,
                        RangeLower=self._peak_range[0],
                        RangeUpper=self._peak_range[1],
                        OutputWorkspace=calibration)
            self._warn_negative_integral(calibration, 'in calibration run.')

        if self._unmirror_option == 5 or self._unmirror_option == 7:
            alignment = '__alignment_' + self._red_ws
            IndirectILLEnergyTransfer(Run=self._alignment_file,
                                      OutputWorkspace=alignment,
                                      **self._common_args)

        runs = self._sample_file.split(',')

        self._progress = Progress(self, start=0.0, end=1.0, nreports=len(runs))

        for run in runs:
            self._reduce_run(run)

        if self._background_file:
            DeleteWorkspace(background)

        if self._calibration_file:
            DeleteWorkspace(calibration)

        if self._background_calib_files:
            DeleteWorkspace(back_calibration)

        if self._unmirror_option == 5 or self._unmirror_option == 7:
            DeleteWorkspace(alignment)

        GroupWorkspaces(InputWorkspaces=self._ws_list,
                        OutputWorkspace=self._red_ws)

        # unhide the final workspaces, i.e. remove __ prefix
        for ws in mtd[self._red_ws]:
            RenameWorkspace(InputWorkspace=ws,
                            OutputWorkspace=ws.getName()[2:])

        self.setProperty('OutputWorkspace', self._red_ws)

    def _reduce_run(self, run):
        '''
        Reduces the given (single or summed multiple) run
        @param run :: run path
        @throws RuntimeError : if inconsistent mirror sense is found in container or calibration run
        '''

        runs_list = run.split('+')

        runnumber = os.path.basename(runs_list[0]).split('.')[0]

        self._progress.report("Reducing run #" + runnumber)

        ws = '__' + runnumber

        if (len(runs_list) > 1):
            ws += '_multiple'

        ws += '_' + self._red_ws

        back_ws = '__background_' + self._red_ws

        calib_ws = '__calibration_' + self._red_ws

        IndirectILLEnergyTransfer(Run=run,
                                  OutputWorkspace=ws,
                                  **self._common_args)

        wings = mtd[ws].getNumberOfEntries()

        if self._background_file:
            if wings == mtd[back_ws].getNumberOfEntries():
                Minus(LHSWorkspace=ws,
                      RHSWorkspace=back_ws,
                      OutputWorkspace=ws)
                self._warn_negative_integral(ws,
                                             'after background subtraction.')
            else:
                raise RuntimeError(
                    'Inconsistent mirror sense in background run. Unable to perform subtraction.'
                )

        if self._calibration_file:
            if wings == mtd[calib_ws].getNumberOfEntries():
                Divide(LHSWorkspace=ws,
                       RHSWorkspace=calib_ws,
                       OutputWorkspace=ws)
                self._scale_calibration(ws, calib_ws)
            else:
                raise RuntimeError(
                    'Inconsistent mirror sense in calibration run. Unable to perform calibration.'
                )

        self._perform_unmirror(ws, runnumber)

        # register to reduced runs list
        self._ws_list.append(ws)

    def _scale_calibration(self, ws, calib_ws):
        '''
        Scales the wings of calibrated sample ws with the maximum
        of the integrated intensities in each wing of calib ws
        @param ws       :: calibrated sample workspace
        @param calib_ws :: calibration workspace
        '''

        # number of wings are checked to be the same in ws and calib_ws here already

        for wing in range(mtd[ws].getNumberOfEntries()):
            sample = mtd[ws].getItem(wing).getName()
            integral = mtd[calib_ws].getItem(wing).getName()
            scale = numpy.max(mtd[integral].extractY()[:, 0])
            self.log().information(
                "Wing {0} will be scaled up with {1} after calibration".format(
                    wing, scale))
            Scale(InputWorkspace=sample,
                  Factor=scale,
                  OutputWorkspace=sample,
                  Operation='Multiply')

    def _perform_unmirror(self, ws, run):
        '''
        Performs unmirroring, i.e. summing of left and right wings
        for two-wing data or centering the one wing data
        @param ws  :: workspace
        @param run :: runnumber
        @throws RuntimeError : if the size of the left and right wings do not match in 2-wings case
                               and the unmirror option is 1 or >3
        @throws RuntimeError : if the mirros sense in the alignment run is inconsistent
        '''

        outname = ws + '_tmp'

        wings = mtd[ws].getNumberOfEntries()

        self.log().information(
            'Unmirroring workspace {0} with option {1}'.format(
                ws, self._unmirror_option))

        alignment = '__alignment_' + self._red_ws

        # make sure the sample and alignment runs have the same mirror sense for unmirror 5,7
        if self._unmirror_option == 5 or self._unmirror_option == 7:
            if wings != mtd[alignment].getNumberOfEntries():
                raise RuntimeError(
                    'Inconsistent mirror sense in alignment run. Unable to perform unmirror.'
                )

        if wings == 1:  # one wing

            name = mtd[ws].getItem(0).getName()

            if self._unmirror_option < 6:  # do unmirror 0, i.e. nothing
                CloneWorkspace(InputWorkspace=name, OutputWorkspace=outname)
            elif self._unmirror_option == 6:
                MatchPeaks(InputWorkspace=name,
                           OutputWorkspace=outname,
                           MaskBins=True,
                           BinRangeTable='')
            elif self._unmirror_option == 7:
                MatchPeaks(InputWorkspace=name,
                           InputWorkspace2=mtd[alignment].getItem(0).getName(),
                           MatchInput2ToCenter=True,
                           OutputWorkspace=outname,
                           MaskBins=True,
                           BinRangeTable='')

        elif wings == 2:  # two wing

            left = mtd[ws].getItem(0).getName()
            right = mtd[ws].getItem(1).getName()

            mask_min = 0
            mask_max = mtd[left].blocksize()

            if (self._common_args['CropDeadMonitorChannels'] and
                (self._unmirror_option == 1 or self._unmirror_option > 3)
                    and mtd[left].blocksize() != mtd[right].blocksize()):
                raise RuntimeError(
                    "Different number of bins found in the left and right wings"
                    " after cropping the dead monitor channels. "
                    "Unable to perform the requested unmirror option, consider using option "
                    "0, 2 or 3 or switch off the CropDeadMonitorChannels.")

            if self._unmirror_option == 0:
                left_out = '__' + run + '_' + self._red_ws + '_left'
                right_out = '__' + run + '_' + self._red_ws + '_right'
                CloneWorkspace(InputWorkspace=left, OutputWorkspace=left_out)
                CloneWorkspace(InputWorkspace=right, OutputWorkspace=right_out)
                GroupWorkspaces(InputWorkspaces=[left_out, right_out],
                                OutputWorkspace=outname)
            elif self._unmirror_option == 1:
                Plus(LHSWorkspace=left,
                     RHSWorkspace=right,
                     OutputWorkspace=outname)
                Scale(InputWorkspace=outname,
                      OutputWorkspace=outname,
                      Factor=0.5)
            elif self._unmirror_option == 2:
                CloneWorkspace(InputWorkspace=left, OutputWorkspace=outname)
            elif self._unmirror_option == 3:
                CloneWorkspace(InputWorkspace=right, OutputWorkspace=outname)
            elif self._unmirror_option == 4:
                bin_range_table = '__um4_' + right
                MatchPeaks(InputWorkspace=right,
                           InputWorkspace2=left,
                           OutputWorkspace=right,
                           MaskBins=True,
                           BinRangeTable=bin_range_table)
                mask_min = mtd[bin_range_table].row(0)['MinBin']
                mask_max = mtd[bin_range_table].row(0)['MaxBin']
                DeleteWorkspace(bin_range_table)
            elif self._unmirror_option == 5:
                bin_range_table = '__um5_' + right
                MatchPeaks(InputWorkspace=right,
                           InputWorkspace2=mtd[alignment].getItem(0).getName(),
                           InputWorkspace3=mtd[alignment].getItem(1).getName(),
                           OutputWorkspace=right,
                           MaskBins=True,
                           BinRangeTable=bin_range_table)
                mask_min = mtd[bin_range_table].row(0)['MinBin']
                mask_max = mtd[bin_range_table].row(0)['MaxBin']
                DeleteWorkspace(bin_range_table)
            elif self._unmirror_option == 6:
                bin_range_table_left = '__um6_' + left
                bin_range_table_right = '__um6_' + right
                MatchPeaks(InputWorkspace=left,
                           OutputWorkspace=left,
                           MaskBins=True,
                           BinRangeTable=bin_range_table_left)
                MatchPeaks(InputWorkspace=right,
                           OutputWorkspace=right,
                           MaskBins=True,
                           BinRangeTable=bin_range_table_right)
                mask_min = max(mtd[bin_range_table_left].row(0)['MinBin'],
                               mtd[bin_range_table_right].row(0)['MinBin'])
                mask_max = min(mtd[bin_range_table_left].row(0)['MaxBin'],
                               mtd[bin_range_table_right].row(0)['MaxBin'])
                DeleteWorkspace(bin_range_table_left)
                DeleteWorkspace(bin_range_table_right)
            elif self._unmirror_option == 7:
                bin_range_table_left = '__um7_' + left
                bin_range_table_right = '__um7_' + right
                MatchPeaks(InputWorkspace=left,
                           InputWorkspace2=mtd[alignment].getItem(0).getName(),
                           OutputWorkspace=left,
                           MatchInput2ToCenter=True,
                           MaskBins=True,
                           BinRangeTable=bin_range_table_left)
                MatchPeaks(InputWorkspace=right,
                           InputWorkspace2=mtd[alignment].getItem(1).getName(),
                           OutputWorkspace=right,
                           MatchInput2ToCenter=True,
                           MaskBins=True,
                           BinRangeTable=bin_range_table_right)
                mask_min = max(mtd[bin_range_table_left].row(0)['MinBin'],
                               mtd[bin_range_table_right].row(0)['MinBin'])
                mask_max = min(mtd[bin_range_table_left].row(0)['MaxBin'],
                               mtd[bin_range_table_right].row(0)['MaxBin'])
                DeleteWorkspace(bin_range_table_left)
                DeleteWorkspace(bin_range_table_right)

            if self._unmirror_option > 3:
                Plus(LHSWorkspace=left,
                     RHSWorkspace=right,
                     OutputWorkspace=outname)
                Scale(InputWorkspace=outname,
                      OutputWorkspace=outname,
                      Factor=0.5)
                self._mask(outname, mask_min, mask_max)

        DeleteWorkspace(ws)
        RenameWorkspace(InputWorkspace=outname, OutputWorkspace=ws)
예제 #56
0
class SANSILLAutoProcess(DataProcessorAlgorithm):
    """
    Performs complete treatment of ILL SANS data; instruments D11, D16, D22, D33.
    """
    progress = None
    reduction_type = None
    sample = None
    absorber = None
    beam = None
    container = None
    stransmission = None
    ctransmission = None
    btransmission = None
    atransmission = None
    sensitivity = None
    mask = None
    flux = None
    default_mask = None
    output = None
    output_sens = None
    dimensionality = None
    reference = None
    normalise = None
    radius = None
    thickness = None
    theta_dependent = None

    def category(self):
        return 'ILL\\SANS;ILL\\Auto'

    def summary(self):
        return 'Performs complete SANS data reduction at the ILL.'

    def seeAlso(self):
        return ['SANSILLReduction', 'SANSILLIntegration',]

    def name(self):
        return 'SANSILLAutoProcess'

    def validateInputs(self):
        result = dict()
        message = 'Wrong number of {0} runs: {1}. Provide one or as many as sample runs: {2}.'
        message_value = 'Wrong number of {0} values: {1}. Provide one or as ' \
                        'many as sample runs: {2}.'
        tr_message = 'Wrong number of {0} runs: {1}. Provide one or multiple runs summed with +.'
        sample_dim = self.getPropertyValue('SampleRuns').count(',')
        abs_dim = self.getPropertyValue('AbsorberRuns').count(',')
        beam_dim = self.getPropertyValue('BeamRuns').count(',')
        flux_dim = self.getPropertyValue('FluxRuns').count(',')
        can_dim = self.getPropertyValue('ContainerRuns').count(',')
        str_dim = self.getPropertyValue('SampleTransmissionRuns').count(',')
        ctr_dim = self.getPropertyValue('ContainerTransmissionRuns').count(',')
        btr_dim = self.getPropertyValue('TransmissionBeamRuns').count(',')
        atr_dim = self.getPropertyValue('TransmissionAbsorberRuns').count(',')
        mask_dim = self.getPropertyValue('MaskFiles').count(',')
        sens_dim = self.getPropertyValue('SensitivityMaps').count(',')
        ref_dim = self.getPropertyValue('ReferenceFiles').count(',')
        maxqxy_dim = self.getPropertyValue('MaxQxy').count(',')
        deltaq_dim = self.getPropertyValue('DeltaQ').count(',')
        output_type = self.getPropertyValue('OutputType')
        n_wedges = self.getProperty('NumberOfWedges').value
        if self.getPropertyValue('SampleRuns') == '':
            result['SampleRuns'] = 'Please provide at least one sample run.'
        if abs_dim != sample_dim and abs_dim != 0:
            result['AbsorberRuns'] = message.format('Absorber', abs_dim, sample_dim)
        if beam_dim != sample_dim and beam_dim != 0:
            result['BeamRuns'] = message.format('Beam', beam_dim, sample_dim)
        if can_dim != sample_dim and can_dim != 0:
            result['ContainerRuns'] = message.format('Container', can_dim, sample_dim)
        if str_dim != 0:
            result['SampleTransmissionRuns'] = tr_message.format('SampleTransmission', str_dim)
        if ctr_dim != 0:
            result['ContainerTransmissionRuns'] = tr_message.format('ContainerTransmission', ctr_dim)
        if btr_dim != 0:
            result['TransmissionBeamRuns'] = tr_message.format('TransmissionBeam', btr_dim)
        if atr_dim != 0:
            result['TransmissionAbsorberRuns'] = tr_message.format('TransmissionAbsorber', atr_dim)
        if mask_dim != sample_dim and mask_dim != 0:
            result['MaskFiles'] = message.format('Mask', mask_dim, sample_dim)
        if ref_dim != sample_dim and ref_dim != 0:
            result['ReferenceFiles'] = message.format('Reference', ref_dim, sample_dim)
        if sens_dim != sample_dim and sens_dim != 0:
            result['SensitivityMaps'] = message.format('Sensitivity', sens_dim, sample_dim)
        if flux_dim != flux_dim and flux_dim != 0:
            result['FluxRuns'] = message.format('Flux')
        if maxqxy_dim != sample_dim and maxqxy_dim != 0:
            result['MaxQxy'] = message_value.format('MaxQxy',
                                                    maxqxy_dim,
                                                    sample_dim)
        if deltaq_dim != sample_dim and deltaq_dim != 0:
            result['DeltaQ'] = message_value.format('DeltaQ',
                                                    deltaq_dim,
                                                    sample_dim)
        if output_type == 'I(Phi,Q)' and n_wedges == 0:
            result['NumberOfWedges'] = "For I(Phi,Q) processing, the number " \
                                       "of wedges must be different from 0."

        return result

    def setUp(self):
        self.sample = self.getPropertyValue('SampleRuns').split(',')
        self.absorber = self.getPropertyValue('AbsorberRuns').split(',')
        self.beam = self.getPropertyValue('BeamRuns').split(',')
        self.flux = self.getPropertyValue('FluxRuns').split(',')
        self.container = self.getPropertyValue('ContainerRuns').split(',')
        self.stransmission = self.getPropertyValue('SampleTransmissionRuns')
        self.ctransmission = self.getPropertyValue('ContainerTransmissionRuns')
        self.btransmission = self.getPropertyValue('TransmissionBeamRuns')
        self.atransmission = self.getPropertyValue('TransmissionAbsorberRuns')
        self.sensitivity = self.getPropertyValue('SensitivityMaps') \
            .replace(' ', '').split(',')
        self.default_mask = self.getPropertyValue('DefaultMaskFile')
        self.mask = self.getPropertyValue('MaskFiles') \
            .replace(' ', '').split(',')
        self.reference = self.getPropertyValue('ReferenceFiles') \
            .replace(' ', '').split(',')
        self.output = self.getPropertyValue('OutputWorkspace')
        self.output_panels = self.output + "_panels"
        self.output_sens = self.getPropertyValue('SensitivityOutputWorkspace')
        self.normalise = self.getPropertyValue('NormaliseBy')
        self.theta_dependent = self.getProperty('ThetaDependent').value
        self.radius = self.getProperty('BeamRadius').value
        self.dimensionality = len(self.sample)
        self.progress = Progress(self, start=0.0, end=1.0, nreports=10 * self.dimensionality)
        self.cleanup = self.getProperty('ClearCorrected2DWorkspace').value
        self.n_wedges = self.getProperty('NumberOfWedges').value
        self.maxqxy = self.getPropertyValue('MaxQxy').split(',')
        self.deltaq = self.getPropertyValue('DeltaQ').split(',')
        self.output_type = self.getPropertyValue('OutputType')

    def PyInit(self):

        self.declareProperty(WorkspaceGroupProperty('OutputWorkspace', '',
                                                    direction=Direction.Output),
                             doc='The output workspace group containing reduced data.')

        self.declareProperty(MultipleFileProperty('SampleRuns',
                                                  action=FileAction.OptionalLoad,
                                                  extensions=['nxs'],
                                                  allow_empty=True),
                             doc='Sample run(s).')

        self.declareProperty(MultipleFileProperty('AbsorberRuns',
                                                  action=FileAction.OptionalLoad,
                                                  extensions=['nxs']),
                             doc='Absorber (Cd/B4C) run(s).')

        self.declareProperty(MultipleFileProperty('BeamRuns',
                                                  action=FileAction.OptionalLoad,
                                                  extensions=['nxs']),
                             doc='Empty beam run(s).')

        self.declareProperty(MultipleFileProperty('FluxRuns',
                                                  action=FileAction.OptionalLoad,
                                                  extensions=['nxs']),
                             doc='Empty beam run(s) for flux calculation only; '
                                 'if left blank flux will be calculated from BeamRuns.')

        self.declareProperty(MultipleFileProperty('ContainerRuns',
                                                  action=FileAction.OptionalLoad,
                                                  extensions=['nxs']),
                             doc='Empty container run(s).')

        self.setPropertyGroup('SampleRuns', 'Numors')
        self.setPropertyGroup('AbsorberRuns', 'Numors')
        self.setPropertyGroup('BeamRuns', 'Numors')
        self.setPropertyGroup('FluxRuns', 'Numors')
        self.setPropertyGroup('ContainerRuns', 'Numors')

        self.declareProperty(MultipleFileProperty('SampleTransmissionRuns',
                                                  action=FileAction.OptionalLoad,
                                                  extensions=['nxs']),
                             doc='Sample transmission run(s).')

        self.declareProperty(MultipleFileProperty('ContainerTransmissionRuns',
                                                  action=FileAction.OptionalLoad,
                                                  extensions=['nxs']),
                             doc='Container transmission run(s).')

        self.declareProperty(MultipleFileProperty('TransmissionBeamRuns',
                                                  action=FileAction.OptionalLoad,
                                                  extensions=['nxs']),
                             doc='Empty beam run(s) for transmission.')

        self.declareProperty(MultipleFileProperty('TransmissionAbsorberRuns',
                                                  action=FileAction.OptionalLoad,
                                                  extensions=['nxs']),
                             doc='Absorber (Cd/B4C) run(s) for transmission.')

        self.setPropertyGroup('SampleTransmissionRuns', 'Transmissions')
        self.setPropertyGroup('ContainerTransmissionRuns', 'Transmissions')
        self.setPropertyGroup('TransmissionBeamRuns', 'Transmissions')
        self.setPropertyGroup('TransmissionAbsorberRuns', 'Transmissions')
        self.copyProperties('SANSILLReduction',
                            ['ThetaDependent'])
        self.setPropertyGroup('ThetaDependent', 'Transmissions')

        self.declareProperty('SensitivityMaps', '',
                             doc='File(s) or workspaces containing the maps of relative detector efficiencies.')

        self.declareProperty('DefaultMaskFile', '',
                             doc='File or workspace containing the default mask (typically the detector edges and dead pixels/tubes)'
                                 ' to be applied to all the detector configurations.')

        self.declareProperty('MaskFiles','',
                             doc='File(s) or workspaces containing the detector mask (typically beam stop).')

        self.declareProperty('ReferenceFiles', '',
                             doc='File(s) or workspaces containing the corrected water data (in 2D) for absolute normalisation.')

        self.declareProperty(MatrixWorkspaceProperty('SensitivityOutputWorkspace', '',
                                                     direction=Direction.Output,
                                                     optional=PropertyMode.Optional),
                             doc='The output sensitivity map workspace.')

        self.copyProperties('SANSILLReduction', ['NormaliseBy'])

        self.declareProperty('SampleThickness', 0.1, validator=FloatBoundedValidator(lower=0.),
                             doc='Sample thickness [cm]')

        self.declareProperty('BeamRadius', 0.05, validator=FloatBoundedValidator(lower=0.),
                             doc='Beam radius [m]; used for beam center finding, transmission and flux calculations.')

        self.declareProperty('WaterCrossSection', 1., doc='Provide water cross-section; '
                                                          'used only if the absolute scale is done by dividing to water.')

        self.setPropertyGroup('SensitivityMaps', 'Options')
        self.setPropertyGroup('DefaultMaskFile', 'Options')
        self.setPropertyGroup('MaskFiles', 'Options')
        self.setPropertyGroup('ReferenceFiles', 'Options')
        self.setPropertyGroup('SensitivityOutputWorkspace', 'Options')
        self.setPropertyGroup('NormaliseBy', 'Options')
        self.setPropertyGroup('SampleThickness', 'Options')
        self.setPropertyGroup('BeamRadius', 'Options')
        self.setPropertyGroup('WaterCrossSection', 'Options')

        self.declareProperty(FloatArrayProperty('MaxQxy', values=[-1]),
                             doc='Maximum of absolute Qx and Qy.')
        self.declareProperty(FloatArrayProperty('DeltaQ', values=[-1]),
                             doc='The dimension of a Qx-Qy cell.')

        self.declareProperty('OutputPanels', False,
                             doc='Whether or not process the individual '
                             'detector panels.')

        self.copyProperties('SANSILLIntegration',
                            ['OutputType', 'CalculateResolution',
                             'DefaultQBinning', 'BinningFactor',
                             'OutputBinning', 'NPixelDivision',
                             'NumberOfWedges', 'WedgeAngle', 'WedgeOffset',
                             'AsymmetricWedges', 'IQxQyLogBinning'])

        self.setPropertyGroup('OutputType', 'Integration Options')
        self.setPropertyGroup('CalculateResolution', 'Integration Options')
        self.declareProperty('ClearCorrected2DWorkspace', True,
                             'Whether to clear the fully corrected 2D workspace.')

    def PyExec(self):

        self.setUp()
        outputs = []
        panel_output_groups = []

        container_transmission, sample_transmission = \
            self.processTransmissions()

        for d in range(self.dimensionality):
            if self.sample[d] != EMPTY_TOKEN:
                absorber = self.processAbsorber(d)
                flux = self.processFlux(d, absorber)
                if flux:
                    beam, _ = self.processBeam(d, absorber)
                else:
                    beam, flux = self.processBeam(d, absorber)
                container = self.processContainer(d, beam, absorber,
                                                  container_transmission)
                sample, panels = self.processSample(d, flux,
                                                    sample_transmission, beam,
                                                    absorber, container)
                outputs.append(sample)
                if panels:
                    panel_output_groups.append(panels)
            else:
                self.log().information('Skipping empty token run.')

        for output in outputs:
            ConvertToPointData(InputWorkspace=output,
                               OutputWorkspace=output)
        if len(outputs) > 1 and self.getPropertyValue('OutputType') == 'I(Q)':
            try:
                stitched = self.output + "_stitched"
                Stitch1DMany(InputWorkspaces=outputs,
                             OutputWorkspace=stitched)
                outputs.append(stitched)
            except RuntimeError as re:
                self.log().warning("Unable to stitch automatically, consider "
                                   "stitching manually: " + str(re))

        GroupWorkspaces(InputWorkspaces=outputs, OutputWorkspace=self.output)

        # group wedge workspaces
        if self.output_type == "I(Q)":
            for w in range(self.n_wedges):
                wedge_ws = [self.output + "_wedge_" + str(w + 1) + "_" + str(d + 1)
                            for d in range(self.dimensionality)]
                # convert to point data and remove nan and 0 from edges
                for ws in wedge_ws:
                    ConvertToPointData(InputWorkspace=ws,
                                       OutputWorkspace=ws)
                    ReplaceSpecialValues(InputWorkspace=ws,
                                         OutputWorkspace=ws,
                                         NaNValue=0)
                    y = mtd[ws].readY(0)
                    x = mtd[ws].readX(0)
                    nonzero = np.nonzero(y)

                    CropWorkspace(InputWorkspace=ws,
                                  XMin=x[nonzero][0] - 1,
                                  XMax=x[nonzero][-1],
                                  OutputWorkspace=ws)

                # and stitch if possible
                if len(wedge_ws) > 1:
                    try:
                        stitched = self.output + "_wedge_" + str(w + 1) \
                                   + "_stitched"
                        Stitch1DMany(InputWorkspaces=wedge_ws,
                                     OutputWorkspace=stitched)
                        wedge_ws.append(stitched)
                    except RuntimeError as re:
                        self.log().warning("Unable to stitch automatically, "
                                           "consider stitching manually: "
                                           + str(re))
                GroupWorkspaces(InputWorkspaces=wedge_ws,
                                OutputWorkspace=self.output + "_wedge_" + str(w + 1))

        self.setProperty('OutputWorkspace', mtd[self.output])
        if self.output_sens:
            self.setProperty('SensitivityOutputWorkspace', mtd[self.output_sens])

        # group panels
        if panel_output_groups:
            GroupWorkspaces(InputWorkspaces=panel_output_groups,
                            OutputWorkspace=self.output_panels)

    def processTransmissions(self):
        [process_transmission_absorber, transmission_absorber_name] = \
                needs_processing(self.atransmission, 'Absorber')
        self.progress.report('Processing transmission absorber')
        if process_transmission_absorber:
            SANSILLReduction(Run=self.atransmission,
                             ProcessAs='Absorber',
                             NormaliseBy=self.normalise,
                             OutputWorkspace=transmission_absorber_name)

        [process_transmission_beam, transmission_beam_name] = \
            needs_processing(self.btransmission, 'Beam')
        flux_name = transmission_beam_name + '_Flux'
        self.progress.report('Processing transmission beam')
        if process_transmission_beam:
            SANSILLReduction(Run=self.btransmission,
                             ProcessAs='Beam',
                             NormaliseBy=self.normalise,
                             OutputWorkspace=transmission_beam_name,
                             BeamRadius=self.radius,
                             FluxOutputWorkspace=flux_name,
                             AbsorberInputWorkspace=
                             transmission_absorber_name)

        [process_container_transmission, container_transmission_name] = \
            needs_processing(self.ctransmission, 'Transmission')
        self.progress.report('Processing container transmission')
        if process_container_transmission:
            SANSILLReduction(Run=self.ctransmission,
                             ProcessAs='Transmission',
                             OutputWorkspace=container_transmission_name,
                             AbsorberInputWorkspace=
                             transmission_absorber_name,
                             BeamInputWorkspace=transmission_beam_name,
                             NormaliseBy=self.normalise,
                             BeamRadius=self.radius)

        [process_sample_transmission, sample_transmission_name] = \
            needs_processing(self.stransmission, 'Transmission')
        self.progress.report('Processing sample transmission')
        if process_sample_transmission:
            SANSILLReduction(Run=self.stransmission,
                             ProcessAs='Transmission',
                             OutputWorkspace=sample_transmission_name,
                             AbsorberInputWorkspace=
                             transmission_absorber_name,
                             BeamInputWorkspace=transmission_beam_name,
                             NormaliseBy=self.normalise,
                             BeamRadius=self.radius)
        return container_transmission_name, sample_transmission_name

    def processAbsorber(self, i):
        absorber = (self.absorber[i]
                    if len(self.absorber) == self.dimensionality
                    else self.absorber[0])
        [process_absorber, absorber_name] = \
            needs_processing(absorber, 'Absorber')
        self.progress.report('Processing absorber')
        if process_absorber:
            SANSILLReduction(Run=absorber,
                             ProcessAs='Absorber',
                             NormaliseBy=self.normalise,
                             OutputWorkspace=absorber_name)
        return absorber_name

    def processBeam(self, i, absorber_name):
        beam = (self.beam[i]
                if len(self.beam) == self.dimensionality
                else self.beam[0])
        [process_beam, beam_name] = needs_processing(beam, 'Beam')
        flux_name = beam_name + '_Flux' if not self.flux[0] else ''
        self.progress.report('Processing beam')
        if process_beam:
            SANSILLReduction(Run=beam,
                             ProcessAs='Beam',
                             OutputWorkspace=beam_name,
                             NormaliseBy=self.normalise,
                             BeamRadius=self.radius,
                             AbsorberInputWorkspace=absorber_name,
                             FluxOutputWorkspace=flux_name)
        return beam_name, flux_name

    def processFlux(self, i, aborber_name):
        if self.flux[0]:
            flux = (self.flux[i]
                    if len(self.flux) == self.dimensionality
                    else self.flux[0])
            [process_flux, flux_name] = needs_processing(flux, 'Flux')
            self.progress.report('Processing flux')
            if process_flux:
                SANSILLReduction(Run=flux,
                                 ProcessAs='Beam',
                                 OutputWorkspace=flux_name.replace('Flux',
                                                                   'Beam'),
                                 NormaliseBy=self.normalise,
                                 BeamRadius=self.radius,
                                 AbsorberInputWorkspace=absorber_name,
                                 FluxOutputWorkspace=flux_name)
            return flux_name
        else:
            return None

    def processContainer(self, i, beam_name, absorber_name,
                         container_transmission_name):
        container = (self.container[i]
                     if len(self.container) == self.dimensionality
                     else self.container[0])
        [process_container, container_name] = \
            needs_processing(container, 'Container')
        self.progress.report('Processing container')
        if process_container:
            SANSILLReduction(Run=container,
                             ProcessAs='Container',
                             OutputWorkspace=container_name,
                             AbsorberInputWorkspace=absorber_name,
                             BeamInputWorkspace=beam_name,
                             CacheSolidAngle=True,
                             TransmissionInputWorkspace=
                             container_transmission_name,
                             ThetaDependent=self.theta_dependent,
                             NormaliseBy=self.normalise)
        return container_name

    def processSample(self, i, flux_name, sample_transmission_name, beam_name,
                      absorber_name, container_name):
        # this is the default mask, the same for all the distance configurations
        [load_default_mask, default_mask_name] = \
                needs_loading(self.default_mask, 'DefaultMask')
        self.progress.report('Loading default mask')
        if load_default_mask:
            LoadNexusProcessed(Filename=self.default_mask,
                               OutputWorkspace=default_mask_name)

        # this is the beam stop mask, potentially different at each distance configuration
        mask = (self.mask[i]
                if len(self.mask) == self.dimensionality
                else self.mask[0])
        [load_mask, mask_name] = needs_loading(mask, 'Mask')
        self.progress.report('Loading mask')
        if load_mask:
            LoadNexusProcessed(Filename=mask, OutputWorkspace=mask_name)

        # sensitivity
        sens_input = ''
        ref_input = ''
        if self.sensitivity:
            sens = (self.sensitivity[i]
                    if len(self.sensitivity) == self.dimensionality
                    else self.sensitivity[0])
            [load_sensitivity, sensitivity_name] = \
                needs_loading(sens, 'Sensitivity')
            sens_input = sensitivity_name
            self.progress.report('Loading sensitivity')
            if load_sensitivity:
                LoadNexusProcessed(Filename=sens,
                                   OutputWorkspace=sensitivity_name)

        # reference
        if self.reference:
            reference = (self.reference[i]
                         if len(self.reference) == self.dimensionality
                         else self.reference[0])
            [load_reference, reference_name] = \
                needs_loading(reference, 'Reference')
            ref_input = reference_name
            self.progress.report('Loading reference')
            if load_reference:
                LoadNexusProcessed(Filename=reference,
                                   OutputWorkspace=reference_name)

        # sample
        [_, sample_name] = needs_processing(self.sample[i], 'Sample')
        output = self.output + '_' + str(i + 1)
        self.progress.report('Processing sample at detector configuration '
                             + str(i + 1))
        SANSILLReduction(
                Run=self.sample[i],
                ProcessAs='Sample',
                OutputWorkspace=sample_name,
                ReferenceInputWorkspace=ref_input,
                AbsorberInputWorkspace=absorber_name,
                BeamInputWorkspace=beam_name,
                CacheSolidAngle=True,
                ContainerInputWorkspace=container_name,
                TransmissionInputWorkspace=sample_transmission_name,
                MaskedInputWorkspace=mask_name,
                DefaultMaskedInputWorkspace=default_mask_name,
                SensitivityInputWorkspace=sens_input,
                SensitivityOutputWorkspace=self.output_sens,
                FluxInputWorkspace=flux_name,
                NormaliseBy=self.normalise,
                ThetaDependent=self.theta_dependent,
                SampleThickness=
                self.getProperty('SampleThickness').value,
                WaterCrossSection=
                self.getProperty('WaterCrossSection').value
                )

        if self.getProperty('OutputPanels').value:
            panel_ws_group = self.output_panels + '_' + str(i + 1)
        else:
            panel_ws_group = ""

        if self.n_wedges and self.output_type == "I(Q)":
            output_wedges = self.output + "_wedge_d" + str(i + 1)
        else:
            output_wedges = ""

        SANSILLIntegration(
                InputWorkspace=sample_name,
                OutputWorkspace=output,
                OutputType=self.output_type,
                CalculateResolution=
                self.getPropertyValue('CalculateResolution'),
                DefaultQBinning=self.getPropertyValue('DefaultQBinning'),
                BinningFactor=self.getProperty('BinningFactor').value,
                OutputBinning=self.getPropertyValue('OutputBinning'),
                NPixelDivision=self.getProperty('NPixelDivision').value,
                NumberOfWedges=self.n_wedges,
                WedgeAngle=self.getProperty('WedgeAngle').value,
                WedgeOffset=self.getProperty('WedgeOffset').value,
                WedgeWorkspace=output_wedges,
                AsymmetricWedges=self.getProperty('AsymmetricWedges').value,
                PanelOutputWorkspaces=panel_ws_group,
                MaxQxy=(self.maxqxy[i]
                        if len(self.maxqxy) == self.dimensionality
                        else self.maxqxy[0]),
                DeltaQ=(self.deltaq[i]
                        if len(self.deltaq) == self.dimensionality
                        else self.deltaq[0]),
                IQxQyLogBinning=self.getProperty('IQxQyLogBinning').value
                )

        # wedges ungrouping and renaming
        if self.n_wedges and self.output_type == "I(Q)":
            wedges_old_names = [output_wedges + "_" + str(w + 1)
                                for w in range(self.n_wedges)]
            wedges_new_names = [self.output + "_wedge_" + str(w + 1)
                                + "_" + str(i + 1)
                                for w in range(self.n_wedges)]
            UnGroupWorkspace(InputWorkspace=output_wedges)
            RenameWorkspaces(InputWorkspaces=wedges_old_names,
                             WorkspaceNames=wedges_new_names)

        if self.cleanup:
            DeleteWorkspace(sample_name)

        if not mtd.doesExist(panel_ws_group):
            panel_ws_group = ""

        return output, panel_ws_group
    def PyExec(self):
        self._setup()
        self._wave_range()

        setup_prog = Progress(self, start=0.0, end=0.2, nreports=2)
        # Set sample material form chemical formula
        setup_prog.report('Set sample material')
        self._sample_density = self._set_material(
            self._sample_ws_name, self._sample_chemical_formula,
            self._sample_density_type, self._sample_density)

        # If using a can, set sample material using chemical formula
        if self._use_can:
            setup_prog.report('Set container sample material')
            self._can_density = self._set_material(self._can_ws_name,
                                                   self._can_chemical_formula,
                                                   self._can_density_type,
                                                   self._can_density)

        # Holders for the corrected data
        data_ass = []
        data_assc = []
        data_acsc = []
        data_acc = []

        self._get_angles()
        num_angles = len(self._angles)
        workflow_prog = Progress(self,
                                 start=0.2,
                                 end=0.8,
                                 nreports=num_angles * 2)
        for angle_idx in range(num_angles):
            workflow_prog.report('Running flat correction for angle %s' %
                                 angle_idx)
            angle = self._angles[angle_idx]
            (ass, assc, acsc, acc) = self._flat_abs(angle)

            logger.information('Angle %d: %f successful' %
                               (angle_idx + 1, self._angles[angle_idx]))
            workflow_prog.report('Appending data for angle %s' % angle_idx)
            data_ass = np.append(data_ass, ass)
            data_assc = np.append(data_assc, assc)
            data_acsc = np.append(data_acsc, acsc)
            data_acc = np.append(data_acc, acc)

        log_prog = Progress(self, start=0.8, end=1.0, nreports=8)

        sample_logs = {
            'sample_shape': 'flatplate',
            'sample_filename': self._sample_ws_name,
            'sample_thickness': self._sample_thickness,
            'sample_angle': self._sample_angle
        }
        dataX = self._waves * num_angles

        # Create the output workspaces
        ass_ws = self._output_ws_name + '_ass'
        log_prog.report('Creating ass output Workspace')
        CreateWorkspace(OutputWorkspace=ass_ws,
                        DataX=dataX,
                        DataY=data_ass,
                        NSpec=num_angles,
                        UnitX='Wavelength',
                        VerticalAxisUnit='SpectraNumber',
                        ParentWorkspace=self._sample_ws_name,
                        EnableLogging=False)
        log_prog.report('Adding sample logs')
        self._add_sample_logs(ass_ws, sample_logs)

        workspaces = [ass_ws]

        if self._use_can:
            log_prog.report('Adding can sample logs')
            AddSampleLog(Workspace=ass_ws,
                         LogName='can_filename',
                         LogType='String',
                         LogText=str(self._can_ws_name),
                         EnableLogging=False)

            assc_ws = self._output_ws_name + '_assc'
            workspaces.append(assc_ws)
            log_prog.report('Creating assc output workspace')
            CreateWorkspace(OutputWorkspace=assc_ws,
                            DataX=dataX,
                            DataY=data_assc,
                            NSpec=num_angles,
                            UnitX='Wavelength',
                            VerticalAxisUnit='SpectraNumber',
                            ParentWorkspace=self._sample_ws_name,
                            EnableLogging=False)
            log_prog.report('Adding assc sample logs')
            self._add_sample_logs(assc_ws, sample_logs)
            AddSampleLog(Workspace=assc_ws,
                         LogName='can_filename',
                         LogType='String',
                         LogText=str(self._can_ws_name),
                         EnableLogging=False)

            acsc_ws = self._output_ws_name + '_acsc'
            workspaces.append(acsc_ws)
            log_prog.report('Creating acsc outputworkspace')
            CreateWorkspace(OutputWorkspace=acsc_ws,
                            DataX=dataX,
                            DataY=data_acsc,
                            NSpec=num_angles,
                            UnitX='Wavelength',
                            VerticalAxisUnit='SpectraNumber',
                            ParentWorkspace=self._sample_ws_name,
                            EnableLogging=False)
            log_prog.report('Adding acsc sample logs')
            self._add_sample_logs(acsc_ws, sample_logs)
            AddSampleLog(Workspace=acsc_ws,
                         LogName='can_filename',
                         LogType='String',
                         LogText=str(self._can_ws_name),
                         EnableLogging=False)

            acc_ws = self._output_ws_name + '_acc'
            workspaces.append(acc_ws)
            log_prog.report('Creating acc workspace')
            CreateWorkspace(OutputWorkspace=acc_ws,
                            DataX=dataX,
                            DataY=data_acc,
                            NSpec=num_angles,
                            UnitX='Wavelength',
                            VerticalAxisUnit='SpectraNumber',
                            ParentWorkspace=self._sample_ws_name,
                            EnableLogging=False)
            log_prog.report('Adding acc sample logs')
            self._add_sample_logs(acc_ws, sample_logs)
            AddSampleLog(Workspace=acc_ws,
                         LogName='can_filename',
                         LogType='String',
                         LogText=str(self._can_ws_name),
                         EnableLogging=False)

        if self._interpolate:
            self._interpolate_corrections(workspaces)
        log_prog.report('Grouping Output Workspaces')
        GroupWorkspaces(InputWorkspaces=','.join(workspaces),
                        OutputWorkspace=self._output_ws_name,
                        EnableLogging=False)
        self.setPropertyValue('OutputWorkspace', self._output_ws_name)
예제 #58
0
 def _get_progress(self, number_of_reductions, overall_reduction_mode):
     number_from_merge = 1 if overall_reduction_mode is ReductionMode.Merged else 0
     number_of_progress_reports = number_of_reductions + number_from_merge + 1
     return Progress(self, start=0.0, end=1.0, nreports=number_of_progress_reports)
    def PyExec(self):

        self.setUp()

        self._progress = Progress(self,
                                  start=0.0,
                                  end=1.0,
                                  nreports=self._run_file.count('+'))

        LoadAndMerge(Filename=self._run_file,
                     OutputWorkspace=self._red_ws,
                     LoaderName='LoadILLIndirect')

        self._instrument = mtd[self._red_ws].getInstrument()

        self._load_map_file()

        run = str(
            mtd[self._red_ws].getRun().getLogData('run_number').value)[:6]

        self._ws = self._red_ws + '_' + run

        if self._run_file.count('+') > 0:  # multiple summed files
            self._ws += '_multiple'

        RenameWorkspace(InputWorkspace=self._red_ws, OutputWorkspace=self._ws)

        LoadParameterFile(Workspace=self._ws, Filename=self._parameter_file)

        self._efixed = self._instrument.getNumberParameter('Efixed')[0]

        self._setup_run_properties()

        if self._reduction_type == 'BATS':
            self._reduce_bats(self._ws)
        else:
            if self._mirror_sense == 14:  # two wings, extract left and right

                size = mtd[self._ws].blocksize()
                left = self._ws + '_left'
                right = self._ws + '_right'
                _extract_workspace(self._ws, left, 0, int(size / 2))
                _extract_workspace(self._ws, right, int(size / 2), size)
                DeleteWorkspace(self._ws)
                self._reduce_one_wing_doppler(left)
                self._reduce_one_wing_doppler(right)
                GroupWorkspaces(InputWorkspaces=[left, right],
                                OutputWorkspace=self._red_ws)

            elif self._mirror_sense == 16:  # one wing

                self._reduce_one_wing_doppler(self._ws)
                GroupWorkspaces(InputWorkspaces=[self._ws],
                                OutputWorkspace=self._red_ws)

        if self._normalise_to == 'Monitor':
            for ws in mtd[self._red_ws]:
                AddSampleLog(Workspace=ws,
                             LogName="NormalisedTo",
                             LogType="String",
                             LogText="Monitor",
                             EnableLogging=False)

        self.setProperty('OutputWorkspace', self._red_ws)
예제 #60
0
    def PyExec(self):
        in_Runs = self.getProperty("RunNumbers").value
        maskWSname = self._getMaskWSname()
        progress = Progress(self, 0., .25, 3)

        # default arguments for AlignAndFocusPowder
        alignAndFocusArgs = {
            'TMax': 50000,
            'RemovePromptPulseWidth': 1600,
            'PreserveEvents': False,
            'Dspacing': True,  # binning parameters in d-space
            'Params': self.getProperty("Binning").value
        }

        # workspace for loading metadata only to be used in LoadDiffCal and
        # CreateGroupingWorkspace
        metaWS = None

        # either type of file-based calibration is stored in the same variable
        calib = self.getProperty("Calibration").value
        detcalFile = None
        if calib == "Calibration File":
            metaWS = self._loadMetaWS(in_Runs[0])
            LoadDiffCal(Filename=self.getPropertyValue("CalibrationFilename"),
                        WorkspaceName='SNAP',
                        InputWorkspace=metaWS,
                        MakeGroupingWorkspace=False,
                        MakeMaskWorkspace=False)
            alignAndFocusArgs['CalibrationWorkspace'] = 'SNAP_cal'
        elif calib == 'DetCal File':
            detcalFile = ','.join(self.getProperty('DetCalFilename').value)
        progress.report('loaded calibration')

        norm = self.getProperty("Normalization").value

        if norm == "From Processed Nexus":
            norm_File = self.getProperty("NormalizationFilename").value
            normalizationWS = 'normWS'
            LoadNexusProcessed(Filename=norm_File,
                               OutputWorkspace=normalizationWS)
            progress.report('loaded normalization')
        elif norm == "From Workspace":
            normalizationWS = str(
                self.getProperty("NormalizationWorkspace").value)
            progress.report('')
        else:
            normalizationWS = None
            progress.report('')

        group = self._generateGrouping(in_Runs[0], metaWS, progress)

        if metaWS is not None:
            DeleteWorkspace(Workspace=metaWS)

        Process_Mode = self.getProperty("ProcessingMode").value

        prefix = self.getProperty("OptionalPrefix").value

        # --------------------------- REDUCE DATA -----------------------------

        Tag = 'SNAP'
        if self.getProperty("LiveData").value:
            Tag = 'Live'

        progStart = .25
        progDelta = (1. - progStart) / len(in_Runs)
        for i, runnumber in enumerate(in_Runs):
            self.log().notice("processing run %s" % runnumber)
            self.log().information(str(self.get_IPTS_Local(runnumber)))

            # put together output names
            new_Tag = Tag
            if len(prefix) > 0:
                new_Tag += '_' + prefix
            basename = '%s_%s_%s' % (new_Tag, runnumber, group)

            if self.getProperty("LiveData").value:
                raise RuntimeError('Live data is not currently supported')
            else:
                Load(Filename='SNAP' + str(runnumber),
                     OutputWorkspace=basename + '_red',
                     startProgress=progStart,
                     endProgress=progStart + .25 * progDelta)
                progStart += .25 * progDelta
            redWS = basename + '_red'

            # overwrite geometry with detcal files
            if calib == 'DetCal File':
                LoadIsawDetCal(InputWorkspace=redWS, Filename=detcalFile)

            # create unfocussed data if in set-up mode
            if Process_Mode == "Set-Up":
                unfocussedWksp = '{}_{}_d'.format(new_Tag, runnumber)
            else:
                unfocussedWksp = ''

            AlignAndFocusPowder(
                InputWorkspace=redWS,
                OutputWorkspace=redWS,
                MaskWorkspace=maskWSname,  # can be empty string
                GroupingWorkspace=group,
                UnfocussedWorkspace=unfocussedWksp,  # can be empty string
                startProgress=progStart,
                endProgress=progStart + .5 * progDelta,
                **alignAndFocusArgs)
            progStart += .5 * progDelta

            # the rest takes up .25 percent of the run processing
            progress = Progress(self, progStart, progStart + .25 * progDelta,
                                2)

            # AlignAndFocusPowder leaves the data in time-of-flight
            ConvertUnits(InputWorkspace=redWS,
                         OutputWorkspace=redWS,
                         Target='dSpacing',
                         EMode='Elastic')

            # Edit instrument geometry to make final workspace smaller on disk
            det_table = PreprocessDetectorsToMD(
                Inputworkspace=redWS, OutputWorkspace='__SNAP_det_table')
            polar = np.degrees(det_table.column('TwoTheta'))
            azi = np.degrees(det_table.column('Azimuthal'))
            EditInstrumentGeometry(Workspace=redWS,
                                   L2=det_table.column('L2'),
                                   Polar=polar,
                                   Azimuthal=azi)
            mtd.remove('__SNAP_det_table')
            progress.report('simplify geometry')

            # AlignAndFocus doesn't necessarily rebin the data correctly
            if Process_Mode == "Set-Up":
                Rebin(InputWorkspace=unfocussedWksp,
                      Params=alignAndFocusArgs['Params'],
                      Outputworkspace=unfocussedWksp)

            NormaliseByCurrent(InputWorkspace=redWS, OutputWorkspace=redWS)

            # normalize the data as requested
            normalizationWS = self._generateNormalization(
                redWS, norm, normalizationWS)
            normalizedWS = None
            if normalizationWS is not None:
                normalizedWS = basename + '_nor'
                Divide(LHSWorkspace=redWS,
                       RHSWorkspace=normalizationWS,
                       OutputWorkspace=normalizedWS)
                ReplaceSpecialValues(Inputworkspace=normalizedWS,
                                     OutputWorkspace=normalizedWS,
                                     NaNValue='0',
                                     NaNError='0',
                                     InfinityValue='0',
                                     InfinityError='0')
                progress.report('normalized')
            else:
                progress.report()

            # rename everything as appropriate and determine output workspace name
            if normalizedWS is None:
                outputWksp = redWS
            else:
                outputWksp = normalizedWS

                if norm == "Extracted from Data" and Process_Mode == "Production":
                    DeleteWorkspace(Workspace=redWS)
                    DeleteWorkspace(Workspace=normalizationWS)

            # Save requested formats
            saveDir = self.getPropertyValue("OutputDirectory").strip()
            if len(saveDir) <= 0:
                self.log().notice('Using default save location')
                saveDir = os.path.join(self.get_IPTS_Local(runnumber),
                                       'shared', 'data')
            self._save(saveDir, basename, outputWksp)

            # set workspace as an output so it gets history
            propertyName = 'OutputWorkspace_' + str(outputWksp)
            self.declareProperty(
                WorkspaceProperty(propertyName, outputWksp, Direction.Output))
            self.setProperty(propertyName, outputWksp)

            # declare some things as extra outputs in set-up
            if Process_Mode != "Production":
                prefix = 'OuputWorkspace_{:d}_'.format(i)
                propNames = [prefix + it for it in ['d', 'norm', 'normalizer']]
                wkspNames = [
                    '%s_%s_d' % (new_Tag, runnumber), basename + '_red',
                    '%s_%s_normalizer' % (new_Tag, runnumber)
                ]
                for (propName, wkspName) in zip(propNames, wkspNames):
                    if mtd.doesExist(wkspName):
                        self.declareProperty(
                            WorkspaceProperty(propName, wkspName,
                                              Direction.Output))
                        self.setProperty(propName, wkspName)