def runTest(self):
        UseCompatibilityMode()
        config['default.instrument'] = 'SANS2D'
        SANS2DTUBES()
        Set1D()
        Detector("rear-detector")
        # This contains two MASKFILE commands, each resulting in a separate call to MaskDetectors.
        MaskFile('SANS2DTube_ZerroErrorFreeTest.txt')

        # Saves a file which produces an output file which does not contain any zero errors
        csv_file = FileFinder.getFullPath("SANS2DTUBES_ZeroErrorFree_batch.csv")
        save_alg = {"SaveNexus": "nxs"}
        BatchReduce(csv_file, 'nxs', saveAlgs=save_alg, plotresults=False, save_as_zero_error_free=True)
        DeleteWorkspace('zero_free_out_rear_1D_1.75_12.5')

        # The zero correction only occurs for the saved files. Stephen King mentioned that the
        # original workspaces should not be tampered with
        self._final_output = os.path.join(config['defaultsave.directory'], 'zero_free_out_rear_1D_1.75_12.5.nxs')
        self._final_workspace = 'ws'
        Load(Filename=self._final_output, OutputWorkspace=self._final_workspace)
Beispiel #2
0
def pyexec_setup(new_options):
    """
    Backup keys of mantid.config and clean up temporary files and workspaces
    upon algorithm completion or exception raised.
    Workspaces with names beginning with '_t_' are assumed temporary.

    Parameters
    ----------
    new_options: dict
        Dictionary of mantid configuration options to be modified.
    """
    # Hold in this tuple all temporary objects to be removed after completion
    temp_objects = namedtuple('temp_objects', 'files workspaces')
    temps = temp_objects(list(), list())

    previous_config = dict()
    for key, value in new_options.items():
        previous_config[key] = mantid_config[key]
        mantid_config[key] = value
    try:
        yield temps
    finally:
        # reinstate the mantid options
        for key, value in previous_config.items():
            mantid_config[key] = value
        # delete temporary files
        for file_name in temps.files:
            os.remove(file_name)
        # remove any workspace added to temps.workspaces or whose name begins
        # with "_t_"
        to_be_removed = set()
        for name in AnalysisDataService.getObjectNames():
            if '_t_' == name[0:3]:
                to_be_removed.add(name)
        for workspace in temps.workspaces:
            if isinstance(workspace, str):
                to_be_removed.add(workspace)
            else:
                to_be_removed.add(workspace.name())
        for name in to_be_removed:
            DeleteWorkspace(name)
Beispiel #3
0
    def PyExec(self):
        filename = self.getProperty("Filename").value
        wavelength = self.getProperty("wavelength").value
        outWS = self.getPropertyValue("OutputWorkspace")

        LoadEventNexus(Filename=filename,
                       OutputWorkspace=outWS,
                       LoadMonitors=True)
        Integration(InputWorkspace=outWS, OutputWorkspace=outWS)

        if self.getProperty("ApplyMask").value:
            MaskBTP(outWS, Bank='8', Tube='449-480')
            MaskBTP(outWS, Pixel='1,2,511,512')

        mtd[outWS].getAxis(0).setUnit("Wavelength")
        w = [wavelength - 0.001, wavelength + 0.001]
        for idx in range(mtd[outWS].getNumberHistograms()):
            mtd[outWS].setX(idx, w)

        SetGoniometer(outWS, Axis0="HB2C:Mot:s1,0,1,0,1")
        AddSampleLog(outWS,
                     LogName="gd_prtn_chrg",
                     LogType='Number',
                     NumberType='Double',
                     LogText=str(mtd[outWS + '_monitors'].getNumberEvents()))
        DeleteWorkspace(outWS + '_monitors')

        AddSampleLog(outWS,
                     LogName="Wavelength",
                     LogType='Number',
                     NumberType='Double',
                     LogText=str(wavelength))
        AddSampleLog(outWS,
                     LogName="Ei",
                     LogType='Number',
                     NumberType='Double',
                     LogText=str(
                         UnitConversion.run('Wavelength', 'Energy', wavelength,
                                            0, 0, 0, Elastic, 0)))

        self.setProperty('OutputWorkspace', outWS)
    def test_number_density(self):
        # Mass Density for water is 1.0
        # Number Density for water is 0.033428
        # These should give similar results

        kwargs = self._arguments
        kwargs['DensityType'] = 'Number Density'
        kwargs['Density'] = 0.033428

        corrected_num = SimpleShapeMonteCarloAbsorption(
            InputWorkspace=self._red_ws,
            Shape='FlatPlate',
            Width=2.0,
            Thickness=2.0,
            **kwargs)

        # _corrected_flat_plate is with mass density 1.0
        CompareWorkspaces(self._corrected_flat_plate,
                          corrected_num,
                          Tolerance=1e-6)
        DeleteWorkspace(corrected_num)
    def testInputFail(self):
        signal = range(0, 1000)
        error = range(0, 1000)
        samplews = CreateMDHistoWorkspace(
            Dimensionality=3,
            SignalInput=signal,
            ErrorInput=error,
            Extents='-3,3,-3,3,-3,3',
            NumberOfBins='10,10,10',
            Names='x,y,z',
            Units='MomentumTransfer,EnergyTransfer,EnergyTransfer')

        # A MDHisto WS with no experiment info should fail
        with self.assertRaises(RuntimeError):
            HB3AAdjustSampleNorm(InputWorkspaces=samplews,
                                 DetectorHeightOffset=0.0,
                                 DetectorDistanceOffset=0.0,
                                 OutputWorkspace="__tmpout",
                                 Wavelength=2.0)

        DeleteWorkspace(samplews)
Beispiel #6
0
    def test_not_in_wavelength(self):
        red_ws_not_wavelength = Load('irs26176_graphite002_red.nxs')

        kwargs = {
            'InputWorkspace': red_ws_not_wavelength,
            'ChemicalFormula': 'H2-O',
            'DensityType': 'Mass Density',
            'Density': 1.0,
            'EventsPerPoint': 200,
            'BeamHeight': 3.5,
            'BeamWidth': 4.0,
            'Height': 2.0,
            'Shape': 'FlatPlate',
            'Width': 1.4,
            'Thickness': 2.1
        }

        with self.assertRaises(RuntimeError):
            SimpleShapeMonteCarloAbsorption(**kwargs)

        DeleteWorkspace(red_ws_not_wavelength)
    def test_sum(self):
        outputWorkspaceName = "output_ws"
        alg_test = run_algorithm("ComputeCalibrationCoefVan",
                                 VanadiumWorkspace=self._input_ws,
                                 EPPTable=self._table,
                                 OutputWorkspace=outputWorkspaceName)
        self.assertTrue(alg_test.isExecuted())
        wsoutput = AnalysisDataService.retrieve(outputWorkspaceName)

        # Check whether the sum is calculated correctly, for theta=0, dwf=1
        # The result should be somewhere between the full bin sums.
        y_sumMin = np.sum(self._input_ws.readY(0)[self._lowerBoundRange])
        y_sumMax = np.sum(self._input_ws.readY(0)[self._upperBoundRange])
        e_sumMin = np.sqrt(np.sum(np.square(self._input_ws.readE(0)[self._lowerBoundRange])))
        e_sumMax = np.sqrt(np.sum(np.square(self._input_ws.readE(0)[self._upperBoundRange])))
        self.assertLess(y_sumMin, wsoutput.readY(0)[0])
        self.assertGreater(y_sumMax, wsoutput.readY(0)[0])
        self.assertLess(e_sumMin, wsoutput.readE(0)[0])
        self.assertGreater(e_sumMax, wsoutput.readE(0)[0])

        DeleteWorkspace(wsoutput)
Beispiel #8
0
 def test_table_to_workspace(self) -> None:
     r"""Test the conversion of a TableWorkspace containing the masked detector ID's to a MaskWorkspace object"""
     output_workspace = 'test_table_to_workspace_masked'
     # Have a fake mask table, masking bank 42
     mask_table = CreateEmptyTableWorkspace(OutputWorkspace=output_workspace)
     mask_table.addColumn(type='int', name='Detector ID')
     begin, end = 167936, 172030  # # Bank 42 has detector ID's from 167936 to 172030
     for detector_id in range(begin, 1 + end):
         mask_table.addRow([detector_id])
     # Convert to MaskWorkspace
     mask_table = _table_to_workspace(mask_table)
     # Check the output workspace is of type MaskWorkspace
     assert isinstance(mask_table, MaskWorkspace)
     # Check the output workspace has 1 on workspace indexes for bank 42, and 0 elsewhere
     mask_flags = mask_table.extractY().flatten()
     offset = 3  # due to the detector monitors, workspace_index = detector_id + offset
     masked_workspace_indexes = slice(begin + offset, 1 + end + offset)
     assert np.all(mask_flags[masked_workspace_indexes])  # all values are 1
     mask_flags = np.delete(mask_flags, masked_workspace_indexes)
     assert not np.any(mask_flags)  # no value is 1
     DeleteWorkspace(output_workspace)
    def test_disabled_debye_waller_correction(self):
        outputWorkspaceName = "output_ws"

        # change theta to make dwf != 1
        EditInstrumentGeometry(self._input_ws,
                               L2="4,8",
                               Polar="0,15",
                               Azimuthal="0,0",
                               DetectorIDs="1,2")
        alg_test = run_algorithm("ComputeCalibrationCoefVan",
                                 VanadiumWorkspace=self._input_ws,
                                 EPPTable=self._table,
                                 OutputWorkspace=outputWorkspaceName,
                                 EnableDWF=False)
        self.assertTrue(alg_test.isExecuted())
        wsoutput = AnalysisDataService.retrieve(outputWorkspaceName)
        for i in range(wsoutput.getNumberHistograms()):
            self.assertEqual(100., wsoutput.readY(i)[0])
            self.assertEqual(10., wsoutput.readE(i)[0])

        DeleteWorkspace(wsoutput)
Beispiel #10
0
    def test_remove_if_correctly_removes_lines_associated_with_multiple_workspaces(
            self):
        second_ws = CreateSampleWorkspace()
        line_ws2d_histo_spec_2 = self.ax.plot(self.ws2d_histo,
                                              specNum=2,
                                              linewidth=6)[0]
        line_ws2d_histo_spec_3 = self.ax.plot(self.ws2d_histo,
                                              specNum=3,
                                              linewidth=6)[0]
        line_second_ws = self.ax.plot(second_ws, specNum=5)[0]
        self.assertEqual(3, len(self.ax.lines))

        is_empty = self.ax.remove_artists_if(lambda artist: artist.get_label(
        ) in ['ws2d_histo: 6', 'second_ws: spec 5'])
        self.assertEqual(1, len(self.ax.lines))
        self.assertTrue(line_ws2d_histo_spec_2 not in self.ax.lines)
        self.assertTrue(line_ws2d_histo_spec_3 in self.ax.lines)
        self.assertTrue(line_second_ws not in self.ax.lines)
        self.assertEqual(len(self.ax.tracked_workspaces), 1)
        self.assertFalse(is_empty)
        DeleteWorkspace(second_ws)
def applyPowder(ipts_list):
    from mantid.simpleapi import BASISPowderDiffraction, SaveAscii, DeleteWorkspace
    minimal_size = int(10e6)  # 10MB
    for ipt in ipts_list:
        print('ipts =', ipt)
        for root, dirs, files in os.walk('/SNS/BSS/IPTS-{}/0'.format(ipt)):
            print('nfiles =', len(files))
            time.sleep(3)
            for file in files:
                if file.endswith('event.nxs'):
                    full_path = os.path.join(root, file)
                    if os.stat(full_path).st_size > minimal_size:
                        print(full_path)
                        run_number = full_path.split('BSS_')[1].split(
                            '_event')[0]
                        print(run_number)
                        out_name = 'ipts_{}_run_{}'.format(ipt, run_number)
                        out_name = os.path.join(outdir, out_name)
                        title = 'IPTS {} RUN {}'.format(ipt, run_number)
                        print(out_name)
                        print(title)
                        try:
                            BASISPowderDiffraction(
                                RunNumbers=run_number,
                                MomentumTransferBins=[0.1, 0.0025, 3.0],
                                OutputWorkspace='w',
                                MonitorNormalization=0)
                            SaveAscii(InputWorkspace='w_angle',
                                      Filename=out_name + '_angle.dat',
                                      WriteSpectrumID=False,
                                      Separator='Space',
                                      ColumnHeader=False)
                            DeleteWorkspace(Workspace='w_angle')
                        except:
                            pass
                        finally:
                            print('\n\n\n**********************************')
                            print('\n   {}   '.format(title))
                            print('\n**********************************\n\n')
                            time.sleep(2)
Beispiel #12
0
 def case_restart_diff_order(self, order=None):
     """
     The considered testing scenario looks as follows. First calculations are performed for
     self._quantum_order_event. Then calculations are performed for order (different quantum order event). In case
     order >  self._quantum_order_event then S should be calculated. Otherwise, it will be loaded from an hdf file.
     :param order: number of quantum order event for which restart should be done.
     """
     self.case_from_scratch()
     DeleteWorkspace(self._output_name)
     Abins(AbInitioProgram=self._ab_initio_program,
           VibrationalOrPhononFile=self._system_name +
           self._extension[self._ab_initio_program],
           TemperatureInKelvin=self._temperature,
           SampleForm=self._sample_form,
           Instrument=self._instrument_name,
           BinWidthInWavenumber=self._bin_width,
           Atoms=self._atoms,
           SumContributions=self._sum_contributions,
           Scale=self._scale,
           QuantumOrderEventsNumber=str(order),
           ScaleByCrossSection=self._cross_section_factor,
           OutputWorkspace=self._output_name)
Beispiel #13
0
def convertToHKL(ws,
                 OutputWorkspace='__md_hkl',
                 UB=None,
                 Append=False,
                 scale=None,
                 BinningDim0='-10.05,10.05,201',
                 BinningDim1='-10.05,10.05,201',
                 BinningDim2='-10.05,10.05,201',
                 Uproj=(1, 0, 0),
                 Vproj=(0, 1, 0),
                 Wproj=(0, 0, 1)):
    """Output MDHistoWorkspace in HKL
    """

    SetUB(ws, UB=UB)

    ConvertToMD(ws,
                QDimensions='Q3D',
                QConversionScales='HKL',
                dEAnalysisMode='Elastic',
                Q3DFrames='HKL',
                OutputWorkspace='__temp',
                Uproj=Uproj,
                Vproj=Vproj,
                Wproj=Wproj)

    if scale is not None:
        mtd['__temp'] *= scale

    BinMD(InputWorkspace='__temp',
          TemporaryDataWorkspace=OutputWorkspace
          if Append and mtd.doesExist(OutputWorkspace) else None,
          OutputWorkspace=OutputWorkspace,
          AlignedDim0=mtd['__temp'].getDimension(0).name + ',' + BinningDim0,
          AlignedDim1=mtd['__temp'].getDimension(1).name + ',' + BinningDim1,
          AlignedDim2=mtd['__temp'].getDimension(2).name + ',' + BinningDim2)
    DeleteWorkspace('__temp')

    return OutputWorkspace
Beispiel #14
0
    def PyExec(self):
        input_ws = self.getProperty("InputWorkspace").value

        extents = self.getProperty("Extents").value
        bins = self.getProperty("Bins").value

        # Get the UB from either the PeaksWS if provided, or from the input workspace
        if not self.getProperty("PeaksWorkspace").isDefault:
            peak_ws = self.getProperty("PeaksWorkspace").value
            self._lattice = peak_ws.sample().getOrientedLattice()
        else:
            self._lattice = input_ws.getExperimentInfo(0).sample().getOrientedLattice()

        # Get axis names and units from u,v,w projections, as done in ConvertWANDSCDtoQ:
        w = np.eye(3)
        w[:, 0] = self.getProperty("Uproj").value
        w[:, 1] = self.getProperty("Vproj").value
        w[:, 2] = self.getProperty("Wproj").value
        char_dict = {0: '0', 1: '{1}', -1: '-{1}'}
        chars = ['H', 'K', 'L']
        names = ['[' + ','.join(char_dict.get(j, '{0}{1}')
                                .format(j, chars[np.argmax(np.abs(w[:, i]))]) for j in w[:, i]) + ']' for i in range(3)]

        q = [self._lattice.qFromHKL(w[i]) for i in range(3)]

        units = ['in {:.3f} A^-1'.format(q[i].norm()) for i in range(3)]

        mdhist = BinMD(InputWorkspace=input_ws, AxisAligned=False, NormalizeBasisVectors=False,
                       BasisVector0='{},{},{},{},{}'.format(names[0], units[0], q[0].X(), q[0].Y(), q[0].Z()),
                       BasisVector1='{},{},{},{},{}'.format(names[1], units[1], q[1].X(), q[1].Y(), q[1].Z()),
                       BasisVector2='{},{},{},{},{}'.format(names[2], units[2], q[2].X(), q[2].Y(), q[2].Z()),
                       OutputExtents=extents,
                       OutputBins=bins)

        SetMDFrame(mdhist, MDFrame='HKL', Axes='0, 1, 2')

        self.setProperty("OutputWorkspace", mdhist)

        DeleteWorkspace(mdhist)
Beispiel #15
0
 def _run_focus(input_workspace, tof_output_name, curves, grouping_ws,
                region_calib) -> None:
     """Focus the processed full instrument workspace over the chosen region of interest
     :param input_workspace: Processed full instrument workspace converted to dSpacing
     :param tof_output_name: Name for the time-of-flight output workspace
     :param curves: Workspace containing the vanadium curves for this region of interest
     :param grouping_ws: Grouping workspace to pass to DiffractionFocussing
     :param region_calib: Region of interest calibration workspace (table ws output from PDCalibration)
     """
     # rename workspace prior to focussing to avoid errors later
     dspacing_output_name = tof_output_name + "_dSpacing"
     # focus sample over specified region of interest
     focused_sample = DiffractionFocussing(
         InputWorkspace=input_workspace,
         OutputWorkspace=dspacing_output_name,
         GroupingWorkspace=grouping_ws)
     curves_rebinned = RebinToWorkspace(WorkspaceToRebin=curves,
                                        WorkspaceToMatch=focused_sample)
     # flux correction - divide focused sample data by rebinned focused vanadium curve data
     Divide(LHSWorkspace=focused_sample,
            RHSWorkspace=curves_rebinned,
            OutputWorkspace=focused_sample,
            AllowDifferentNumberSpectra=True)
     # apply calibration from specified region of interest
     ApplyDiffCal(InstrumentWorkspace=focused_sample,
                  CalibrationWorkspace=region_calib)
     # set bankid for use in fit tab
     run = focused_sample.getRun()
     if region_calib.name() == "engggui_calibration_bank_1":
         run.addProperty("bankid", 1, True)
     elif region_calib.name() == "engggui_calibration_bank_2":
         run.addProperty("bankid", 2, True)
     else:
         run.addProperty("bankid", 3, True)
     # output in both dSpacing and TOF
     ConvertUnits(InputWorkspace=focused_sample,
                  OutputWorkspace=tof_output_name,
                  Target='TOF')
     DeleteWorkspace(curves_rebinned)
Beispiel #16
0
    def _save(self, saveDir, basename, outputWksp):
        if not self.getProperty("SaveData").value:
            return

        self.log().notice('Writing to \'' + saveDir + '\'')

        SaveNexusProcessed(InputWorkspace=outputWksp,
                           Filename=os.path.join(saveDir, 'nexus', basename + '.nxs'))
        SaveAscii(InputWorkspace=outputWksp,
                  Filename=os.path.join(saveDir, 'd_spacing', basename + '.dat'))
        ConvertUnits(InputWorkspace=outputWksp, OutputWorkspace='WS_tof',
                     Target="TOF", AlignBins=False)

        # GSAS and FullProf require data in time-of-flight
        SaveGSS(InputWorkspace='WS_tof',
                Filename=os.path.join(saveDir, 'gsas', basename + '.gsa'),
                Format='SLOG', SplitFiles=False, Append=False, ExtendedHeader=True)
        SaveFocusedXYE(InputWorkspace='WS_tof',
                       Filename=os.path.join(
                           saveDir, 'fullprof', basename + '.dat'),
                       SplitFiles=True, Append=False)
        DeleteWorkspace(Workspace='WS_tof')
Beispiel #17
0
def identify_bad_detectors(workspace_name):
    """
    Identify detectors which should be masked

    @param workspace_name Name of workspace to use to get masking detectors
    @return List of masked spectra
    """
    from mantid.simpleapi import (IdentifyNoisyDetectors, DeleteWorkspace)

    instrument = mtd[workspace_name].getInstrument()

    try:
        masking_type = instrument.getStringParameter('Workflow.Masking')[0]
    except IndexError:
        masking_type = 'None'

    logger.information('Masking type: %s' % masking_type)

    masked_spec = list()

    if masking_type == 'IdentifyNoisyDetectors':
        ws_mask = '__workspace_mask'
        IdentifyNoisyDetectors(InputWorkspace=workspace_name,
                               OutputWorkspace=ws_mask)

        # Convert workspace to a list of spectra
        num_spec = mtd[ws_mask].getNumberHistograms()
        masked_spec = [
            spec for spec in range(0, num_spec)
            if mtd[ws_mask].readY(spec)[0] == 0.0
        ]

        # Remove the temporary masking workspace
        DeleteWorkspace(ws_mask)

    logger.debug('Masked spectra for workspace %s: %s' %
                 (workspace_name, str(masked_spec)))

    return masked_spec
Beispiel #18
0
    def __determineCharacterizations(self, filename, wkspname):
        useCharac = bool(self.charac is not None)
        loadFile = not mtd.doesExist(wkspname)

        # input workspace is only needed to find a row in the characterizations table
        tempname = None
        if loadFile:
            if useCharac:
                tempname = '__%s_temp' % wkspname
                # set the loader for this file
                loader = self.__createLoader(filename, tempname)
                loader.setProperty(
                    'MetaDataOnly',
                    True)  # this is only supported by LoadEventNexus
                loader.execute()

                # get the underlying loader name if we used the generic one
                if self.__loaderName == 'Load':
                    self.__loaderName = loader.getPropertyValue('LoaderName')
        else:
            tempname = wkspname  # assume it is already loaded

        # put together argument list
        args = dict(ReductionProperties=self.getProperty(
            'ReductionProperties').valueAsStr)
        for name in PROPS_FOR_PD_CHARACTER:
            prop = self.getProperty(name)
            if not prop.isDefault:
                args[name] = prop.value
        if tempname is not None:
            args['InputWorkspace'] = tempname
        if useCharac:
            args['Characterizations'] = self.charac

        PDDetermineCharacterizations(**args)

        if loadFile and useCharac:
            DeleteWorkspace(Workspace=tempname)
Beispiel #19
0
def get_ipf_parameters_from_run(run_number, instrument, analyser, reflection,
                                parameters):
    from IndirectCommon import getInstrumentParameter

    ipf_filename = os.path.join(
        config['instrumentDefinition.directory'],
        instrument + '_' + analyser + '_' + reflection + '_Parameters.xml')

    results = dict()
    try:
        run_workspace = '__temp'
        do_load(instrument + str(run_number), run_workspace, ipf_filename,
                False, {})

        for parameter in parameters:
            results[parameter] = getInstrumentParameter(
                run_workspace, parameter)

        DeleteWorkspace(run_workspace)
    except ValueError or RuntimeError:
        pass

    return results
Beispiel #20
0
    def _save(self, runnumber, basename, outputWksp):
        if not self.getProperty("SaveData").value:
            return

        # determine where to save the data
        saveDir = self.getPropertyValue("OutputDirectory").strip()
        if len(saveDir) <= 0:
            self.log().notice('Using default save location')
            saveDir = os.path.join(self.get_IPTS_Local(runnumber), 'shared',
                                   'data')

        self.log().notice('Writing to \'' + saveDir + '\'')

        SaveNexusProcessed(InputWorkspace=outputWksp,
                           Filename=os.path.join(saveDir, 'nexus',
                                                 basename + '.nxs'))
        SaveAscii(InputWorkspace=outputWksp,
                  Filename=os.path.join(saveDir, 'd_spacing',
                                        basename + '.dat'))
        ConvertUnits(InputWorkspace=outputWksp,
                     OutputWorkspace='WS_tof',
                     Target="TOF",
                     AlignBins=False)

        # GSAS and FullProf require data in time-of-flight
        SaveGSS(InputWorkspace='WS_tof',
                Filename=os.path.join(saveDir, 'gsas', basename + '.gsa'),
                Format='SLOG',
                SplitFiles=False,
                Append=False,
                ExtendedHeader=True)
        SaveFocusedXYE(InputWorkspace='WS_tof',
                       Filename=os.path.join(saveDir, 'fullprof',
                                             basename + '.dat'),
                       SplitFiles=True,
                       Append=False)
        DeleteWorkspace(Workspace='WS_tof')
Beispiel #21
0
def dynamicsusceptibility(workspace, temperature, outputName=None, zeroEnergyEpsilon=1e-6):
    """Convert :math:`S(Q,E)` to susceptibility :math:`\\chi''(Q,E)`.

    #. If the X units are not in DeltaE, the workspace is transposed
    #. The Y data in *workspace* is multiplied by :math:`1 - e^{\\Delta E / (kT)}`
    #. Y data in the bin closest to 0 meV and within -*zeroEnergyEpsilon* < :math:`\\Delta E` < *zeroEnergyEpsilon* is set to 0
    #. If the input was transposed, transpose the output as well

    :param workspace: a :math:`S(Q,E)` workspace to convert
    :type workspace: :class:`mantid.api.MatrixWorkspace`
    :param temperature: temperature in Kelvin
    :type temperature: float
    :param outputName: name of the output workspace. If :class:`None`, the output will be given some generated name.
    :type outputName: str or None
    :param zeroEnergyEpsilon: if a bin center is within this value from 0, the bin's value is set to zero.
    :type zeroEnergyEpsilon: float
    :returns: a :class:`mantid.api.MatrixWorkspace` containing :math:`\\chi''(Q,E)`
    """
    workspace = _normws(workspace)
    if not _validate._isSofQW(workspace):
        raise RuntimeError('Failed to calculate dynamic susceptibility. '
                           + "The workspace '{}' does not look like a S(Q,E).".format(str(workspace)))
    horAxis = workspace.getAxis(0)
    horUnit = horAxis.getUnit().unitID()
    doTranspose = horUnit != 'DeltaE'
    if outputName is None:
        outputName = 'CHIofQW_{}'.format(str(workspace))
    if doTranspose:
        workspace = Transpose(workspace, OutputWorkspace='__transposed_SofQW_', EnableLogging=False)
    c = 1e-3 * constants.e / constants.k / temperature
    outWS = OneMinusExponentialCor(workspace, OutputWorkspace=outputName, C=c, Operation='Multiply', EnableLogging=False)
    _removesingularity(outWS, zeroEnergyEpsilon)
    if doTranspose:
        outWS = Transpose(outWS, OutputWorkspace=outputName, EnableLogging=False)
        DeleteWorkspace('__transposed_SofQW_', EnableLogging=False)
    outWS.setYUnitLabel("Dynamic susceptibility")
    return outWS
    def test_property_setup(input_mde: str, reduced_background: str):
        """Test set up properties

        :return:
        """
        # Test raise exception for wrong background MDE type
        try:
            # Create background workspace in Q lab
            ConvertToMD(InputWorkspace=reduced_background,
                        QDimensions='Q3D',
                        Q3DFrames='Q_sample',
                        OutputWorkspace='bkgd_md',
                        MinValues='-11,-11,-11,-25',
                        MaxValues='11,11,11,49')

            MDNorm(InputWorkspace=input_mde,
                   BackgroundWorkspace='bkgd_md',
                   Dimension0Name='QDimension1',
                   Dimension0Binning='-5,0.05,5',
                   Dimension1Name='QDimension2',
                   Dimension1Binning='-5,0.05,5',
                   Dimension2Name='DeltaE',
                   Dimension2Binning='-2,2',
                   Dimension3Name='QDimension0',
                   Dimension3Binning='-0.5,0.5',
                   SymmetryOperations='x,y,z;x,-y,z;x,y,-z;x,-y,-z',
                   OutputWorkspace='result',
                   OutputDataWorkspace='dataMD',
                   OutputNormalizationWorkspace='normMD')

            DeleteWorkspace(Workspace='bkgd_md')
        except RuntimeError:
            pass
        else:
            raise AssertionError(
                f'Expected failure due to Background MD in Q_sample.')
Beispiel #23
0
    def PyExec(self):
        # Retrieve all relevant notice

        in_Runs = self.getProperty("RunNumbers").value

        maskWSname = self._getMaskWSname()

        # either type of file-based calibration is stored in the same variable
        calib = self.getProperty("Calibration").value
        if calib == "Calibration File":
            cal_File = self.getProperty("CalibrationFilename").value
        elif calib == 'DetCal File':
            cal_File = self.getProperty('DetCalFilename').value
            cal_File = ','.join(cal_File)
        else:
            cal_File = None

        params = self.getProperty("Binning").value
        norm = self.getProperty("Normalization").value

        if norm == "From Processed Nexus":
            norm_File = self.getProperty("NormalizationFilename").value
            LoadNexusProcessed(Filename=norm_File, OutputWorkspace='normWS')
            normWS = 'normWS'
        elif norm == "From Workspace":
            normWS = str(self.getProperty("NormalizationWorkspace").value)
        else:
            normWS = None

        group_to_real = {
            'Banks': 'Group',
            'Modules': 'bank',
            '2_4 Grouping': '2_4Grouping'
        }
        group = self.getProperty('GroupDetectorsBy').value
        real_name = group_to_real.get(group, group)

        if not mtd.doesExist(group):
            if group == '2_4 Grouping':
                group = '2_4_Grouping'
            CreateGroupingWorkspace(InstrumentName='SNAP',
                                    GroupDetectorsBy=real_name,
                                    OutputWorkspace=group)

        Process_Mode = self.getProperty("ProcessingMode").value

        prefix = self.getProperty("OptionalPrefix").value

        # --------------------------- REDUCE DATA -----------------------------

        Tag = 'SNAP'
        for r in in_Runs:
            self.log().notice("processing run %s" % r)
            self.log().information(str(self.get_IPTS_Local(r)))
            if self.getProperty("LiveData").value:
                Tag = 'Live'
                LoadPreNexusLive(Instrument='SNAP', OutputWorkspace='WS')
            else:
                Load(Filename='SNAP' + str(r), OutputWorkspace='WS')
                NormaliseByCurrent(InputWorkspace='WS', OutputWorkspace='WS')

            CompressEvents(InputWorkspace='WS', OutputWorkspace='WS')
            CropWorkspace(InputWorkspace='WS',
                          OutputWorkspace='WS',
                          XMax=50000)
            RemovePromptPulse(InputWorkspace='WS',
                              OutputWorkspace='WS',
                              Width='1600',
                              Frequency='60.4')

            if maskWSname is not None:
                MaskDetectors(Workspace='WS', MaskedWorkspace=maskWSname)

            self._alignAndFocus(params, calib, cal_File, group)

            normWS = self._generateNormalization('WS_red', norm, normWS)
            WS_nor = None
            if normWS is not None:
                WS_nor = 'WS_nor'
                Divide(LHSWorkspace='WS_red',
                       RHSWorkspace=normWS,
                       OutputWorkspace='WS_nor')
                ReplaceSpecialValues(Inputworkspace='WS_nor',
                                     OutputWorkspace='WS_nor',
                                     NaNValue='0',
                                     NaNError='0',
                                     InfinityValue='0',
                                     InfinityError='0')

            new_Tag = Tag
            if len(prefix) > 0:
                new_Tag += '_' + prefix

            # Edit instrument geomety to make final workspace smaller on disk
            det_table = PreprocessDetectorsToMD(
                Inputworkspace='WS_red', OutputWorkspace='__SNAP_det_table')
            polar = np.degrees(det_table.column('TwoTheta'))
            azi = np.degrees(det_table.column('Azimuthal'))
            EditInstrumentGeometry(Workspace='WS_red',
                                   L2=det_table.column('L2'),
                                   Polar=polar,
                                   Azimuthal=azi)
            if WS_nor is not None:
                EditInstrumentGeometry(Workspace='WS_nor',
                                       L2=det_table.column('L2'),
                                       Polar=polar,
                                       Azimuthal=azi)
            mtd.remove('__SNAP_det_table')

            # Save requested formats
            basename = '%s_%s_%s' % (new_Tag, r, group)
            self._save(r, basename, norm)

            # temporary workspace no longer needed
            DeleteWorkspace(Workspace='WS')

            # rename everything as appropriate and determine output workspace name
            RenameWorkspace(Inputworkspace='WS_d',
                            OutputWorkspace='%s_%s_d' % (new_Tag, r))
            RenameWorkspace(Inputworkspace='WS_red',
                            OutputWorkspace=basename + '_red')
            if norm == 'None':
                outputWksp = basename + '_red'
            else:
                outputWksp = basename + '_nor'
                RenameWorkspace(Inputworkspace='WS_nor',
                                OutputWorkspace=basename + '_nor')
            if norm == "Extracted from Data":
                RenameWorkspace(Inputworkspace='peak_clip_WS',
                                OutputWorkspace='%s_%s_normalizer' %
                                (new_Tag, r))

            # delte some things in production
            if Process_Mode == "Production":
                DeleteWorkspace(Workspace='%s_%s_d' %
                                (new_Tag, r))  # was 'WS_d'

                if norm != "None":
                    DeleteWorkspace(Workspace=basename +
                                    '_red')  # was 'WS_red'

                if norm == "Extracted from Data":
                    DeleteWorkspace(Workspace='%s_%s_normalizer' %
                                    (new_Tag, r))  # was 'peak_clip_WS'

            propertyName = 'OutputWorkspace_' + str(outputWksp)
            self.declareProperty(
                WorkspaceProperty(propertyName, outputWksp, Direction.Output))
            self.setProperty(propertyName, outputWksp)
Beispiel #24
0
def calibrate(ws, tubeSet, knownPositions, funcForm, **kwargs):
    """
    Define the calibrated positions of the detectors inside the tubes defined
    in tubeSet.

    Tubes may be considered a list of detectors aligned that may be considered
    as pixels for the analogy when they values are displayed.

    The position of these pixels are provided by the manufacturer, but its real
    position depends on the electronics inside the tube and varies slightly
    from tube to tube. The calibrate method, aims to find the real positions
    of the detectors (pixels) inside the tube.

    For this, it will receive an Integrated workspace, where a special
    measurement was performed so to have a
    pattern of peaks or through. Where gaussian peaks or edges can be found.


    The calibration follows the following steps

    1. Finding the peaks on each tube
    2. Fitting the peaks against the Known Positions
    3. Defining the new position for the pixels(detectors)

    Let's consider the simplest way of calling calibrate:

    .. code-block:: python

       from tube import calibrate
       ws = Load('WISH17701')
       ws = Integration(ws)
       known_pos = [-0.41,-0.31,-0.21,-0.11,-0.02, 0.09, 0.18, 0.28, 0.39 ]
       peaks_form = 9*[1] # all the peaks are gaussian peaks
       calibTable = calibrate(ws,'WISH/panel03',known_pos, peaks_form)

    In this example, the calibrate framework will consider all the
    tubes (152) from WISH/panel03.
    You may decide to look for a subset of the tubes, by passing the
    **rangeList** option.

    .. code-block:: python

       # This code will calibrate only the tube indexed as number 3
       # (usually tube0004)
       calibTable = calibrate(ws,'WISH/panel03',known_pos,
                              peaks_form, rangeList=[3])

    **Finding the peaks on each tube**

    **Dynamically fitting peaks**

    The framework expects that for each tube, it will find a peak pattern
    around the pixels corresponding to the known_pos positions.

    The way it will work out the estimated peak position (in pixel) is:

    1. Get the length of the tube: distance(first_detector,last_detector) in the tube.
    2. Get the number of detectors in the tube (nDets)
    3. It will be assumed that the center of the tube correspond to the origin (0)

    .. code-block:: python

      centre_pixel = known_pos * nDets/tube_length + nDets/2

    It will them look for the real peak around the estimated value as:

    .. code-block:: python

       # consider tube_values the array of counts, and peak the estimated
       # position for the peak
       real_peak_pos = argmax(tube_values[peak-margin:peak+margin])

    After finding the real_peak_pos, it will try to fit the region around
    the peak to find the best expected position of the peak in a continuous
    space. It will do this by fitting the region around the peak to a
    Gaussian Function, and them extract the PeakCentre returned by the
    Fitting.

    .. code-block:: python

       centre = real_peak_pos
       fit_start, fit_stop = centre-margin, centre+margin
       values = tube_values[fit_start,fit_stop]
       background = min(values)
       peak = max(values) - background
       width = len(where(values > peak/2+background))
       # It will fit to something like:
       # Fit(function=LinerBackground,A0=background;Gaussian,
       # Height=peak, PeakCentre=centre, Sigma=width,fit_start,fit_end)

    **Force Fitting Parameters**

    These dynamically values can be avoided by defining the **fitPar** for
    the calibrate function

    .. code-block:: python

       eP = [57.5, 107.0, 156.5, 206.0, 255.5, 305.0, 354.5, 404.0, 453.5]
       # Expected Height of Gaussian Peaks (initial value of fit parameter)
       ExpectedHeight = 1000.0
       # Expected width of Gaussian peaks in pixels
       # (initial value of fit parameter)
       ExpectedWidth = 10.0
       fitPar = TubeCalibFitParams( eP, ExpectedHeight, ExpectedWidth )
       calibTable = calibrate(ws, 'WISH/panel03', known_pos, peaks_form, fitPar=fitPar)

    **Different Function Factors**

    Although the examples consider only Gaussian peaks, it is possible to
    change the function factors to edges by passing the index of the
    known_position through the **funcForm**. Hence, considering three special
    points, where there are one gaussian peak and two edges, the calibrate
    could be configured as

    .. code-block:: python

       known_pos = [-0.1 2 2.3]
       # gaussian peak followed by two edges (through)
       form_factor = [1 2 2]
       calibTable = calibrate(ws,'WISH/panel03',known_pos, form_factor)


    **Override Peaks**

    It is possible to scape the finding peaks position steps by providing the
    peaks through the **overridePeaks** parameters. The example below tests
    the calibration of a single tube (30) but skips the finding peaks step.

    .. code-block:: python

       known_pos = [-0.41,-0.31,-0.21,-0.11,-0.02, 0.09, 0.18, 0.28, 0.39 ]
       define_peaks = [57.5, 107.0, 156.5, 206.0, 255.5, 305.0, 354.5,
                      404.0, 453.5]
       calibTable = calibrate(ws, 'WISH/panel03', known_pos, peaks_form,
                        overridePeaks={30:define_peaks}, rangeList=[30])

    **Output Peaks Positions**

    Enabling the option **outputPeak** a WorkspaceTable will be produced with
    the first column as tube name and the following columns with the position
    where corresponding peaks were found. Like the table below.

    +-------+-------+-----+-------+
    |TubeId | Peak1 | ... | PeakM |
    +=======+=======+=====+=======+
    |tube0  | 15.5  | ... | 370.3 |
    +-------+-------+-----+-------+
    |  ...  |  ...  | ... |  ...  |
    +-------+-------+-----+-------+
    |tubeN  | 14.9  | ... | 371.2 |
    +-------+-------+-----+-------+

    The signature changes to::

    .. code-block:: python

       calibTable, peakTable = calibrate(...)

    It is possible to give a peakTable directly to the **outputPeak** option,
    which will make the calibration to append the peaks to the given table.

    .. hint::

      It is possible to save the peakTable to a file using the
      :meth:`savePeak` method.

    **Find the correct position along the tube**


    The second step of the calibration is to define the correct position of
    pixels along the tube. This is done by fitting the peaks positions found
    at the previous step against the known_positions provided.

    ::

        known       |              *
        positions   |           *
                    |      *
                    |  *
                    |________________
                      pixels positions

    The default operation is to fit the pixels positions against the known
    positions with a quadratic function in order to define an operation to
    move all the pixels to their real positions. If necessary, the user may
    select to fit using a polinomial of 3rd order, through the parameter
    **fitPolyn**.

    .. note::

      The known positions are given in the same unit as the spacial position
      (3D) and having the center of the tube as the origin.

    Hence, this section will define a function that:

    .. math:: F(pix) = RealRelativePosition

    The fitting framework of Mantid stores values and errors for the optimized coefficients of the polynomial
    in a table (of type TableWorkspace). These tables can be grouped into a WorkspaceGroup by passing
    the name of this workspace to option **parameters_table_group**. The name of each table workspace will
    the string **parameters_table_group** plus a suffix which is the index of the tube in the input **tubeSet**.

    **Define the new position for the detectors**

    Finally, the position of the detectors are defined as a vector operation
    like

    .. math::

      \\vec{p} = \\vec{c} + v \\vec{u}

    Where :math:`\\vec{p}` is the position in the 3D space, **v** is the
    RealRelativePosition deduced from the last session, and finally,
    :math:`\\vec{u}` is the unitary vector in the direction of the tube.



    :param ws: Integrated workspace with tubes to be calibrated.
    :param tubeSet: Specification of Set of tubes to be calibrated. If a string is passed, a TubeSpec will be created \
    passing the string as the setTubeSpecByString.

    This will be the case for TubeSpec as string

    .. code-block:: python

      self.tube_spec = TubeSpec(ws)
      self.tube_spec.setTubeSpecByString(tubeSet)

    If a list of strings is passed, the TubeSpec will be created with this list:

    .. code-block:: python

       self.tube_spec = TubeSpec(ws)
       self.tube_spec.setTubeSpecByStringArray(tubeSet)

    If a :class:`~tube_spec.TubeSpec` object is passed, it will be used as it is.


    :param knownPositions: The defined position for the peaks/edges, taking the center as the origin and having the \
    same units as the tube length in the 3D space.

    :param funcForm: list with special values to define the format of the peaks/edge (peaks=1, edge=2). If it is not \
    provided, it will be assumed that all the knownPositions are peaks.


    Optionals parameters to tune the calibration:

    :param fitPar: Define the parameters to be used in the fit as a :class:`~tube_calib_fit_params.TubeCalibFitParams`. \
    If not provided, the dynamic mode is used. See :py:func:`~Examples.TubeCalibDemoMaps_All.provideTheExpectedValue`

    :param margin: value in pixesl that will be used around the peaks/edges to fit them. Default = 15. See the code of \
    :py:mod:`~Examples.TubeCalibDemoMerlin` where **margin** is used to calibrate small tubes.

    .. code-block:: python

       fit_start, fit_end = centre - margin, centre + margin

    :param rangeList: list of tubes indexes that will be calibrated. As in the following code \
    (see: :py:func:`~Examples.TubeCalibDemoMaps_All.improvingCalibrationSingleTube`):

    .. code-block:: python

       for index in rangelist:
           do_calibrate(tubeSet.getTube(index))

    :param calibTable: Pass the calibration table, it will them append the values to the provided one and return it. \
    (see: :py:mod:`~Examples.TubeCalibDemoMerlin`)

    :param plotTube: If given, the tube whose index is in plotTube will be ploted as well as its fitted peaks, it can \
    receive a list of indexes to plot.(see: :py:func:`~Examples.TubeCalibDemoMaps_All.changeMarginAndExpectedValue`)

    :param excludeShortTubes: Do not calibrate tubes whose length is smaller than given value. (see at \
    Examples/TubeCalibDemoMerlin_Adjustable.py)

    :param overridePeaks: dictionary that defines an array of peaks positions (in pixels) to be used for the specific \
    tube(key). (see: :py:func:`~Examples.TubeCalibDemoMaps_All.improvingCalibrationSingleTube`)

    .. code-block:: python

       for index in rangelist:
         if overridePeaks.has_key(index):
           use_this_peaks = overridePeaks[index]
           # skip finding peaks
           fit_peaks_to_position()

    :param fitPolyn: Define the order of the polynomial to fit the pixels positions against the known positions. The \
    acceptable values are 1, 2 or 3. Default = 2.


    :param outputPeak: Enable the calibrate to output the peak table, relating the tubes with the pixels positions. It \
    may be passed as a boolean value (outputPeak=True) or as a peakTable value. The later case is to inform calibrate \
    to append the new values to the given peakTable. This is useful when you have to operate in subsets of tubes. \
    (see :py:mod:`~Examples.TubeCalibDemoMerlin` that shows a nice inspection on this table).

    .. code-block:: python

      calibTable, peakTable = calibrate(ws, (omitted), rangeList=[1],
               outputPeak=True)
      # appending the result to peakTable
      calibTable, peakTable = calibrate(ws, (omitted), rangeList=[2],
               outputPeak=peakTable)
      # now, peakTable has information for tube[1] and tube[2]

    :rtype: calibrationTable, a TableWorkspace with two columns DetectorID(int) and DetectorPositions(V3D).

    """
    # Legacy code requires kwargs to contain only the list of parameters specify below. Thus, we pop other
    # arguments into temporary variables, such as `parameters_table_group`
    parameters_table_group = kwargs.pop(
        'parameters_table_group'
    ) if 'parameters_table_group' in kwargs else None

    FITPAR = 'fitPar'
    MARGIN = 'margin'
    RANGELIST = 'rangeList'
    CALIBTABLE = 'calibTable'
    PLOTTUBE = 'plotTube'
    EXCLUDESHORT = 'excludeShortTubes'
    OVERRIDEPEAKS = 'overridePeaks'
    FITPOLIN = 'fitPolyn'
    OUTPUTPEAK = 'outputPeak'

    param_helper = _CalibrationParameterHelper(FITPAR, MARGIN, RANGELIST,
                                               CALIBTABLE, PLOTTUBE,
                                               EXCLUDESHORT, OVERRIDEPEAKS,
                                               FITPOLIN, OUTPUTPEAK)

    # check that only valid arguments were passed through kwargs
    param_helper.ensure_no_unknown_kwargs(kwargs)

    # check parameter ws: if it was given as string, transform it in
    # mantid object
    ws = _CalibrationParameterHelper.enforce_matrix_ws(ws)

    # check parameter tubeSet. It accepts string or preferable a TubeSpec
    tubeSet = _CalibrationParameterHelper.enforce_tube_spec(tubeSet, ws)

    # check the known_positions parameter
    # for old version compatibility, it also accepts IdealTube, even though
    # they should only be used internally
    _CalibrationParameterHelper.validate_known_positions(knownPositions)
    ideal_tube = IdealTube()
    ideal_tube.setArray(numpy.array(knownPositions))

    n_peaks = len(ideal_tube.getArray())
    # deal with funcForm parameter
    _CalibrationParameterHelper.validate_func_form(funcForm, n_peaks)

    # apply the functional form to the ideal Tube
    ideal_tube.setForm(funcForm)

    # check the FITPAR parameter (optional)
    # if the FITPAR is given, than it will just pass on, if the FITPAR is
    # not given, it will create a FITPAR 'guessing' the centre positions,
    # and allowing the find peaks calibration methods to adjust the parameter
    # for the peaks automatically
    fit_par = param_helper.get_parameter(FITPAR,
                                         kwargs,
                                         tube_set=tubeSet,
                                         ideal_tube=ideal_tube)

    if MARGIN in kwargs:
        margin = param_helper.get_parameter(MARGIN, kwargs)
        fit_par.setMargin(margin)

    range_list = param_helper.get_parameter(RANGELIST,
                                            kwargs,
                                            default_range_list=list(
                                                range(tubeSet.getNumTubes())))
    calib_table = param_helper.get_parameter(CALIBTABLE, kwargs)
    plot_tube = param_helper.get_parameter(PLOTTUBE, kwargs)
    exclude_short_tubes = param_helper.get_parameter(EXCLUDESHORT, kwargs)
    override_peaks = param_helper.get_parameter(OVERRIDEPEAKS,
                                                kwargs,
                                                tube_set=tubeSet,
                                                ideal_tube=ideal_tube)
    polin_fit = param_helper.get_parameter(
        FITPOLIN, kwargs)  # order of the fiting polynomial. Default is 2
    output_peak, delete_peak_table_after = param_helper.get_parameter(
        OUTPUTPEAK, kwargs, ideal_tube=ideal_tube)

    getCalibration(ws,
                   tubeSet,
                   calib_table,
                   fit_par,
                   ideal_tube,
                   output_peak,
                   override_peaks,
                   exclude_short_tubes,
                   plot_tube,
                   range_list,
                   polin_fit,
                   parameters_table_group=parameters_table_group)

    if delete_peak_table_after:
        DeleteWorkspace(str(output_peak))
        return calib_table
    else:
        return calib_table, output_peak
    def PyExec(self):
        raw_ws = self.getProperty('InputWorkspace').value
        sample_geometry = self.getPropertyValue('SampleGeometry')
        sample_material = self.getPropertyValue('SampleMaterial')
        cal_file_name = self.getPropertyValue('CalFileName')
        SetSample(InputWorkspace=raw_ws,
                  Geometry=sample_geometry,
                  Material=sample_material)
        # find the closest monitor to the sample for incident spectrum
        raw_spec_info = raw_ws.spectrumInfo()
        incident_index = None
        for i in range(raw_spec_info.size()):
            if raw_spec_info.isMonitor(i):
                l2 = raw_spec_info.position(i)[2]
                if not incident_index:
                    incident_index = i
                else:
                    if raw_spec_info.position(incident_index)[2] < l2 < 0:
                        incident_index = i
        monitor = ExtractSpectra(InputWorkspace=raw_ws,
                                 WorkspaceIndexList=[incident_index])
        monitor = ConvertUnits(InputWorkspace=monitor, Target="Wavelength")
        x_data = monitor.dataX(0)
        min_x = np.min(x_data)
        max_x = np.max(x_data)
        width_x = (max_x - min_x) / x_data.size
        fit_spectra = FitIncidentSpectrum(
            InputWorkspace=monitor,
            BinningForCalc=[min_x, 1 * width_x, max_x],
            BinningForFit=[min_x, 10 * width_x, max_x],
            FitSpectrumWith="CubicSpline")
        self_scattering_correction = CalculatePlaczekSelfScattering(
            InputWorkspace=raw_ws, IncidentSpecta=fit_spectra)
        cal_workspace = LoadCalFile(InputWorkspace=self_scattering_correction,
                                    CalFileName=cal_file_name,
                                    Workspacename='cal_workspace',
                                    MakeOffsetsWorkspace=False,
                                    MakeMaskWorkspace=False)
        self_scattering_correction = DiffractionFocussing(
            InputWorkspace=self_scattering_correction,
            GroupingFilename=cal_file_name)

        n_pixel = np.zeros(self_scattering_correction.getNumberHistograms())

        for i in range(cal_workspace.getNumberHistograms()):
            grouping = cal_workspace.dataY(i)
            if grouping[0] > 0:
                n_pixel[int(grouping[0] - 1)] += 1
        correction_ws = CreateWorkspace(
            DataY=n_pixel,
            DataX=[0, 1],
            NSpec=self_scattering_correction.getNumberHistograms())
        self_scattering_correction = Divide(
            LHSWorkspace=self_scattering_correction,
            RHSWorkspace=correction_ws)
        ConvertToDistribution(Workspace=self_scattering_correction)
        self_scattering_correction = ConvertUnits(
            InputWorkspace=self_scattering_correction,
            Target="MomentumTransfer",
            EMode='Elastic')
        DeleteWorkspace('cal_workspace_group')
        DeleteWorkspace(correction_ws)
        DeleteWorkspace(fit_spectra)
        DeleteWorkspace(monitor)
        DeleteWorkspace(raw_ws)
        self.setProperty('OutputWorkspace', self_scattering_correction)
Beispiel #26
0
def create_absorption_input(  # noqa: C901
    filename,
    props=None,
    num_wl_bins=1000,
    material={},
    geometry={},
    environment={},
    opt_wl_min=0,
    opt_wl_max=Property.EMPTY_DBL,
    metaws=None,
):
    """
    Create an input workspace for carpenter or other absorption corrections

    :param filename: Input file to retrieve properties from the sample log
    :param props: PropertyManager of run characterizations, obtained from PDDetermineCharacterizations
    :param num_wl_bins: The number of wavelength bins used for absorption correction
    :param material: Optional material to use in SetSample
    :param geometry: Optional geometry to use in SetSample
    :param environment: Optional environment to use in SetSample
    :param opt_wl_min: Optional minimum wavelength. If specified, this is used instead of from the props
    :param opt_wl_max: Optional maximum wavelength. If specified, this is used instead of from the props
    :param metaws: Optional workspace name with metadata to use for donor workspace instead of reading from filename
    :return: Name of the donor workspace created
    """
    def confirmProps(props):
        '''This function will throw an exception if the PropertyManager
        is not defined correctly. It should only be called if the value
        is needed.'''
        if props is None:
            raise ValueError(
                "props is required to create donor workspace, props is None")

        if not isinstance(props, PropertyManager):
            raise ValueError("props must be a PropertyManager object")

    log = Logger('CreateAbsorptionInput')

    # Load from file if no workspace with metadata has been given, otherwise avoid a duplicate load with the metaws
    absName = metaws
    if metaws is None:
        absName = '__{}_abs'.format(_getBasename(filename))
        allowed_log = ",".join([
            'SampleFormula', 'SampleDensity',
            "BL11A:CS:ITEMS:HeightInContainerUnits", "SampleContainer",
            "SampleMass"
        ])
        Load(Filename=filename,
             OutputWorkspace=absName,
             MetaDataOnly=True,
             AllowList=allowed_log)

    # attempt to get the wavelength from the function parameters
    if opt_wl_min > 0.:
        wl_min = opt_wl_min
    else:
        # or get it from the PropertyManager
        confirmProps(props)
        wl_min = props['wavelength_min'].value
    if opt_wl_max != Property.EMPTY_DBL:
        wl_max = opt_wl_max
    else:
        # or get it from the PropertyManager
        confirmProps(props)
        wl_max = props['wavelength_max'].value  # unset value is 0.

    # if it isn't found by this point, guess it from the time-of-flight range
    if wl_min == 0. or wl_max == 0.:
        confirmProps(props)
        tof_min = props['tof_min'].value
        tof_max = props['tof_max'].value
        if tof_min >= 0. and tof_max > tof_min:
            log.information('TOF range is {} to {} microseconds'.format(
                tof_min, tof_max))

            # determine L1
            instr = mtd[absName].getInstrument()
            L1 = instr.getSource().getDistance(instr.getSample())
            # determine L2 range
            PreprocessDetectorsToMD(InputWorkspace=absName,
                                    OutputWorkspace=absName + '_dets',
                                    GetMaskState=False)
            L2 = mtd[absName + '_dets'].column('L2')
            Lmin = np.min(L2) + L1
            Lmax = np.max(L2) + L1
            DeleteWorkspace(Workspace=absName + '_dets')

            log.information('Distance range is {} to {} meters'.format(
                Lmin, Lmax))

            # wavelength is h*TOF / m_n * L  values copied from Kernel/PhysicalConstants.h
            usec_to_sec = 1.e-6
            meter_to_angstrom = 1.e10
            h_m_n = meter_to_angstrom * usec_to_sec * 6.62606896e-34 / 1.674927211e-27
            if wl_min == 0.:
                wl_min = h_m_n * tof_min / Lmax
            if wl_max == 0.:
                wl_max = h_m_n * tof_max / Lmin

    # there isn't a good way to guess it so error out
    if wl_max <= wl_min:
        DeleteWorkspace(Workspace=absName)  # no longer needed
        raise RuntimeError('Invalid wavelength range min={}A max={}A'.format(
            wl_min, wl_max))
    log.information('Using wavelength range min={}A max={}A'.format(
        wl_min, wl_max))

    absorptionWS = WorkspaceFactory.create(
        mtd[absName],
        NVectors=mtd[absName].getNumberHistograms(),
        XLength=num_wl_bins + 1,
        YLength=num_wl_bins)
    xaxis = np.arange(0., float(num_wl_bins + 1)) * (wl_max - wl_min) / (
        num_wl_bins) + wl_min
    for i in range(absorptionWS.getNumberHistograms()):
        absorptionWS.setX(i, xaxis)
    absorptionWS.getAxis(0).setUnit('Wavelength')

    # this effectively deletes the metadata only workspace
    AnalysisDataService.addOrReplace(absName, absorptionWS)

    # cleanup inputs before delegating work
    if not material:
        material = {}
    if not geometry:
        geometry = {}
    if not environment:
        environment = {}

    # Make sure one is set before calling SetSample
    if material or geometry or environment:
        mantid.simpleapi.SetSampleFromLogs(InputWorkspace=absName,
                                           Material=material,
                                           Geometry=geometry,
                                           Environment=environment)

    return absName
Beispiel #27
0
 def tearDown(self):
     shutil.rmtree(self._test_dir)
     DeleteWorkspace(self._workspace)
 def tearDown(self):
     DeleteWorkspace(self._workspace1)
Beispiel #29
0
 def cleanUp(self):
     if self._input_ws_base is not None:
         DeleteWorkspace(self._input_ws_base)
 def delete_if_present(workspace):
     if workspace in mtd:
         DeleteWorkspace(workspace)