Esempio n. 1
0
    def loadSpicePDData(self, expno, scanno, datafilename):
        """ Load SPICE powder diffraction data to MDEventsWorkspaces
        """
        # Create base workspace name
        try:
            basewsname = os.path.basename(datafilename).split(".")[0]
        except AttributeError as e:
            raise NotImplementedError("Unable to parse data file name due to %s." % (str(e)))

        # load SPICE
        tablewsname = basewsname + "_RawTable"
        infowsname  = basewsname + "ExpInfo"
        api.LoadSpiceAscii(Filename=datafilename,
                           OutputWorkspace=tablewsname, RunInfoWorkspace=infowsname)

        tablews = AnalysisDataService.retrieve(tablewsname)
        infows  = AnalysisDataService.retrieve(infowsname)
        if tablews is None or infows is None:
            raise NotImplementedError('Unable to retrieve either spice table workspace %s or log workspace %s' % (
                tablewsname, infowsname))

        # Create a reduction manager and add workspaces to it
        wsmanager = PDRManager(expno, scanno)
        wsmanager.set_raw_workspaces(tablews, infows)
        self._myWorkspaceDict[(int(expno), int(scanno))] = wsmanager

        return
Esempio n. 2
0
    def parseSpiceData(self, expno, scanno, detefftablews=None):
        """ Load SPICE data to MDWorkspaces from raw table workspace
        """
        # Get reduction manager
        try:
            wsmanager = self._myWorkspaceDict[ (int(expno), int(scanno) )]
        except KeyError:
            raise NotImplementedError("Exp %d Scan %d has not been loaded yet." % (int(expno),int(scanno)))

        # Convert to MDWorkspace
        tablews = wsmanager.getRawSpiceTable()
        infows  = wsmanager.getRawInfoMatrixWS()

        basewsname = tablews.name().split('_RawTable')[0]
        datamdwsname = basewsname + "_DataMD"
        monitorwsname = basewsname + "_MonitorMD"
        api.ConvertSpiceDataToRealSpace(InputWorkspace=tablews,
                                        RunInfoWorkspace=infows,
                                        OutputWorkspace=datamdwsname,
                                        OutputMonitorWorkspace=monitorwsname,
                                        DetectorEfficiencyTableWorkspace=detefftablews)

        datamdws = AnalysisDataService.retrieve(datamdwsname)
        monitormdws = AnalysisDataService.retrieve(monitorwsname)

        if datamdws is None or monitormdws is None:
            raise NotImplementedError("Failed to convert SPICE data to MDEventWorkspaces \
                    for experiment %d and scan %d." % (expno, scanno))

        # Manager:
        wsmanager.setupMDWrokspaces(datamdws, monitormdws)
        self._myWorkspaceDict[(expno, scanno)] = wsmanager

        return True
Esempio n. 3
0
 def make_UB_consistent(self, ws_ref, ws):
     # compare U matrix to perform TransformHKL to preserve indexing
     U_ref = AnalysisDataService.retrieve(
         ws_ref).sample().getOrientedLattice().getU()
     U = AnalysisDataService.retrieve(
         ws).sample().getOrientedLattice().getU()
     # find transform required  ( U_ref = U T^-1) - see TransformHKL docs for details
     transform = np.linalg.inv(
         getSignMaxAbsValInCol(np.linalg.inv(U) @ U_ref))
     self.child_TransformHKL(PeaksWorkspace=ws,
                             HKLTransform=transform,
                             FindError=False)
Esempio n. 4
0
    def scanEventWorkspaces(self):
        """
        """
        wsnames = AnalysisDataService.getObjectNames()

        eventwsnames = []
        for wsname in wsnames:
            wksp = AnalysisDataService.retrieve(wsname)
            if wksp.__class__.__name__.count("Event") == 1:
                eventwsnames.append(wsname)
        # ENDFOR

        if len(eventwsnames) > 0:
            self.ui.comboBox.clear()
            self.ui.comboBox.addItems(eventwsnames)
Esempio n. 5
0
    def _searchTableWorkspaces(self):
        """ Search table workspaces and add to 'comboBox_corrWS'
        """
        wsnames = AnalysisDataService.getObjectNames()

        tablewsnames = []
        for wsname in wsnames:
            wksp = AnalysisDataService.retrieve(wsname)
            if isinstance(wksp, mantid.api.ITableWorkspace):
                tablewsnames.append(wsname)
        # ENDFOR

        self.ui.comboBox_corrWS.clear()
        if len(tablewsnames) > 0:
            self.ui.comboBox_corrWS.addItems(tablewsnames)
Esempio n. 6
0
    def _searchTableWorkspaces(self):
        """ Search table workspaces and add to 'comboBox_corrWS'
        """
        wsnames = AnalysisDataService.getObjectNames()

        tablewsnames = []
        for wsname in wsnames:
            wksp = AnalysisDataService.retrieve(wsname)
            if isinstance(wksp, mantid.api.ITableWorkspace):
                tablewsnames.append(wsname)
        # ENDFOR

        self.ui.comboBox_corrWS.clear()
        if len(tablewsnames) > 0:
            self.ui.comboBox_corrWS.addItems(tablewsnames)
Esempio n. 7
0
    def scanEventWorkspaces(self):
        """
        """
        wsnames = AnalysisDataService.getObjectNames()

        eventwsnames = []
        for wsname in wsnames:
            wksp = AnalysisDataService.retrieve(wsname)
            if wksp.__class__.__name__.count("Event") == 1:
                eventwsnames.append(wsname)
        # ENDFOR

        if len(eventwsnames) > 0:
            self.ui.comboBox.clear()
            self.ui.comboBox.addItems(eventwsnames)
Esempio n. 8
0
    def test_that_correctly_identifies_normalisation_for_artificial_double_pulse_data(
            self):
        delta = 0.33
        x = np.linspace(0., 15., 100)
        x_offset = np.linspace(delta / 2, 15. + delta / 2, 100)
        x_offset_neg = np.linspace(-delta / 2, 15. - delta / 2, 100)

        testFunction = GausOsc(Frequency=1.5, A=0.22)
        y1 = testFunction(x_offset_neg)
        y2 = testFunction(x_offset)
        N0 = 6.38
        y = N0 * (1 + y1 / 2 + y2 / 2)
        y_norm = y1 / 2 + y2 / 2
        CreateWorkspace(x, y, OutputWorkspace="unnormalised_workspace")
        CreateWorkspace(x, y, OutputWorkspace="ws_to_normalise")
        CreateWorkspace(x, y_norm, OutputWorkspace="ws_correctly_normalised")
        AddSampleLog(Workspace='ws_to_normalise',
                     LogName="analysis_asymmetry_norm",
                     LogText="1")

        innerFunction = FunctionFactory.createInitialized(
            'name=GausOsc,A=0.20,Sigma=0.2,Frequency=1.0,Phi=0')
        tf_function = ConvertFitFunctionForMuonTFAsymmetry(
            InputFunction=innerFunction, WorkspaceList=['ws_to_normalise'])

        CalculateMuonAsymmetry(
            MaxIterations=100,
            EnableDoublePulse=True,
            PulseOffset=delta,
            UnNormalizedWorkspaceList='unnormalised_workspace',
            ReNormalizedWorkspaceList='ws_to_normalise',
            OutputFitWorkspace='DoublePulseFit',
            StartX=0,
            InputFunction=str(tf_function),
            Minimizer='Levenberg-Marquardt')

        double_parameter_workspace = AnalysisDataService.retrieve(
            'DoublePulseFit_Parameters')
        values_column = double_parameter_workspace.column(1)
        # Check that the correct normalisation is found.
        self.assertAlmostEqual(values_column[0], N0, places=3)
        # Check that normalised data is correct
        result, message = CompareWorkspaces('ws_to_normalise',
                                            'ws_correctly_normalised',
                                            Tolerance=1e-3)
        self.assertTrue(result)

        AnalysisDataService.clear()
Esempio n. 9
0
    def output_data(self, x, y, e, data, NSpec, UnitX, norm, outputfn,
                    metadata):
        """Save reduced data to file"""

        createWS_alg = self.createChildAlgorithm("CreateWorkspace",
                                                 enableLogging=False)
        createWS_alg.setProperty("DataX", x)
        createWS_alg.setProperty("DataY", y)
        createWS_alg.setProperty("DataE", e)
        createWS_alg.setProperty("NSpec", NSpec)
        createWS_alg.setProperty("UnitX", UnitX)
        createWS_alg.setProperty("YUnitLabel", "Counts")
        createWS_alg.setProperty("WorkspaceTitle",
                                 str(metadata['scan_title']) + "_norm_" + norm)
        createWS_alg.execute()
        outWS = createWS_alg.getProperty("OutputWorkspace").value
        AnalysisDataService.addOrReplace(outputfn + "_norm_" + norm, outWS)
        self.add_metadata(outWS, metadata, data)

        # save reduced workspace to requested format
        save_data = self.getProperty("SaveData").value
        if save_data:
            outputdir = self.getProperty("OutputDirectory").value
            outputdir = outputdir if outputdir != "" else f"/HFIR/HB2A/IPTS-{metadata['proposal']}/shared"
            _outputfunc = {
                'XYE': SaveFocusedXYE,
                'GSAS': SaveGSSCW
            }[self.getProperty('OutputFormat').value]
            _outputext = {
                "XYE": 'dat',
                "GSAS": 'gss',
            }[self.getProperty('OutputFormat').value]
            outputbase = os.path.join(outputdir, outputfn)
            if norm == "mon":
                out_f_name = outputbase
            else:
                out_f_name = outputbase + "_norm_" + norm
            if self.getProperty('OutputFormat').value == "GSAS":
                _outputfunc(
                    InputWorkspace=outWS,
                    OutputFilename=f"{out_f_name}.{_outputext}",
                )
            else:
                _outputfunc(
                    InputWorkspace=outWS,
                    Filename=f"{out_f_name}.{_outputext}",
                    SplitFiles=False,
                )
Esempio n. 10
0
 def _check_region_grouping_ws_exists(grouping_ws_name: str,
                                      inst_ws) -> bool:
     """
     Check that the required grouping workspace for this focus exists, and if not present for a North/South bank
     focus, retrieve them from the user directories or create them (expected if first focus with loaded calibration)
     :param grouping_ws_name: Name of the grouping workspace whose presence in the ADS is being checked
     :param inst_ws: Workspace containing the instrument data for use in making a bank grouping workspace
     :return: True if the required workspace exists (or has just been loaded/created), False if not
     """
     if not Ads.doesExist(grouping_ws_name):
         if "North" in grouping_ws_name:
             logger.notice(
                 "NorthBank grouping workspace not present in ADS, loading")
             EnggUtils.get_bank_grouping_workspace(1, inst_ws)
             return True
         elif "South" in grouping_ws_name:
             logger.notice(
                 "SouthBank grouping workspace not present in ADS, loading")
             EnggUtils.get_bank_grouping_workspace(2, inst_ws)
             return True
         else:
             logger.warning(
                 f"Cannot focus as the grouping workspace \"{grouping_ws_name}\" is not present."
             )
             return False
     return True
Esempio n. 11
0
    def getVectorProcessVanToPlot(self, exp, scan, tempdata=False):
        """ Get vec x and y for the processed vanadium spectrum
        """
        # get on hold of processed vanadium data workspace
        wsmanager = self.getWorkspace(exp, scan, raiseexception=True)

        if tempdata is True:
            procVanWs = wsmanager.getProcessedVanadiumWSTemp()
        else:
            procVanWs = wsmanager.getProcessedVanadiumWS()
            #procVanWs = wsmanager._processedVanWS

        if procVanWs is None:
            raise NotImplementedError(
                "Exp %d Scan %d does not have processed vanadium workspace." %
                (exp, scan))

        # convert to point data if necessary
        if len(procVanWs.readX(0)) != len(procVanWs.readY(0)):
            wsname = procVanWs.name() + "_pd"
            api.ConvertToPointData(InputWorkspace=procVanWs,
                                   OutputWorkspace=wsname)
            outws = AnalysisDataService.retrieve(wsname)
        else:
            outws = procVanWs

        # get vectors
        return outws.readX(0), outws.readY(0)
    def PyExec(self):
        ws_list = self.getProperty('InputWorkspaces').value
        x_min = self.getProperty('XMin').value
        x_max = self.getProperty('XMax').value
        scale_bool = self.getProperty('CalculateScale').value
        offset_bool = self.getProperty('CalculateOffset').value
        flattened_list = self.unwrap_groups(ws_list)
        largest_range_spectrum, rebin_param = self.get_common_bin_range_and_largest_spectra(flattened_list)
        CloneWorkspace(InputWorkspace=flattened_list[0], OutputWorkspace='ws_conjoined')
        Rebin(InputWorkspace='ws_conjoined', OutputWorkspace='ws_conjoined', Params=rebin_param)
        for ws in flattened_list[1:]:
            temp = CloneWorkspace(InputWorkspace=ws)
            temp = Rebin(InputWorkspace=temp, Params=rebin_param)
            ConjoinWorkspaces(InputWorkspace1='ws_conjoined',
                              InputWorkspace2=temp,
                              CheckOverlapping=False)
        ws_conjoined = AnalysisDataService.retrieve('ws_conjoined')
        ref_spec = ws_conjoined.getSpectrum(largest_range_spectrum).getSpectrumNo()
        ws_conjoined, offset, scale, chisq = MatchSpectra(InputWorkspace=ws_conjoined,
                                                          ReferenceSpectrum=ref_spec,
                                                          CalculateScale=scale_bool,
                                                          CalculateOffset=offset_bool)
        x_min, x_max, bin_width = self.fit_x_lims_to_match_histogram_bins(ws_conjoined, x_min, x_max)

        ws_conjoined = CropWorkspaceRagged(InputWorkspace=ws_conjoined, XMin=x_min, XMax=x_max)
        ws_conjoined = Rebin(InputWorkspace=ws_conjoined, Params=[min(x_min), bin_width, max(x_max)])
        merged_ws = SumSpectra(InputWorkspace=ws_conjoined, WeightedSum=True, MultiplyBySpectra=False, StoreInADS=False)
        DeleteWorkspace(ws_conjoined)
        self.setProperty('OutputWorkspace', merged_ws)
Esempio n. 13
0
def load_full_instrument_calibration():
    if ADS.doesExist("full_inst_calib"):
        full_calib = ADS.retrieve("full_inst_calib")
    else:
        full_calib_path = get_setting(
            output_settings.INTERFACES_SETTINGS_GROUP,
            output_settings.ENGINEERING_PREFIX, "full_calibration")
        try:
            full_calib = Load(full_calib_path,
                              OutputWorkspace="full_inst_calib")
        except ValueError:
            logger.error(
                "Error loading Full instrument calibration - this is set in the interface settings."
            )
            return
    return full_calib
Esempio n. 14
0
    def getVectorProcessVanToPlot(self, exp, scan, tempdata=False):
        """ Get vec x and y for the processed vanadium spectrum
        """
        # get on hold of processed vanadium data workspace
        wsmanager = self.getWorkspace(exp, scan, raiseexception=True)

        if tempdata is True:
            procVanWs = wsmanager.getProcessedVanadiumWSTemp()
        else:
            procVanWs = wsmanager.getProcessedVanadiumWS()
            #procVanWs = wsmanager._processedVanWS

        if procVanWs is None:
            raise NotImplementedError("Exp %d Scan %d does not have processed vanadium workspace." % (exp, scan))

        # convert to point data if necessary
        if len(procVanWs.readX(0)) != len(procVanWs.readY(0)):
            wsname = procVanWs.name() + "_pd"
            api.ConvertToPointData(InputWorkspace=procVanWs, OutputWorkspace=wsname)
            outws = AnalysisDataService.retrieve(wsname)
        else:
            outws = procVanWs

        # get vectors
        return outws.readX(0), outws.readY(0)
Esempio n. 15
0
def generate_tof_fit_dictionary(cal_name=None) -> dict:
    """
    Generate a dictionary of data to plot showing the results of the calibration
    :param cal_name: Name of the region of interest of the calibration
    :return: dict, keys: x = expected peaks (dSpacing), y = fitted peaks (TOF), e = y error data,
                         y2 = calculated peaks (TOF), r = residuals (y - y2)
    """
    if not cal_name:
        generate_tof_fit_dictionary("bank_1")
        generate_tof_fit_dictionary("bank_2")
    if cal_name[-1:] == '1':  # bank_1
        diag_ws_name = "diag_bank_1"
    elif cal_name[-1:] == '2':
        diag_ws_name = "diag_bank_2"
    else:
        diag_ws_name = "diag_" + cal_name
    fitparam_ws_name = diag_ws_name + "_fitparam"
    fitted_ws_name = diag_ws_name + "_fitted"
    fiterror_ws_name = diag_ws_name + "_fiterror"
    fitparam_ws = ADS.retrieve(fitparam_ws_name)
    fitted_ws = ADS.retrieve(fitted_ws_name)
    fiterror_ws = ADS.retrieve(fiterror_ws_name)

    expected_dspacing_peaks = default_ceria_expected_peaks(final=True)

    expected_d_peaks_x = []
    fitted_tof_peaks_y = []
    tof_peaks_error_e = []
    calculated_tof_peaks_y2 = []
    residuals = []
    for irow in range(0, fitparam_ws.rowCount()):
        expected_d_peaks_x.append(expected_dspacing_peaks[-(irow + 1)])
        fitted_tof_peaks_y.append(fitparam_ws.cell(irow, 5))
        tof_peaks_error_e.append(fiterror_ws.cell(irow, 5))
        calculated_tof_peaks_y2.append(
            convert_single_value_dSpacing_to_TOF(expected_d_peaks_x[irow],
                                                 fitted_ws))
        residuals.append(fitted_tof_peaks_y[irow] -
                         calculated_tof_peaks_y2[irow])

    return {
        'x': expected_d_peaks_x,
        'y': fitted_tof_peaks_y,
        'e': tof_peaks_error_e,
        'y2': calculated_tof_peaks_y2,
        'r': residuals
    }
Esempio n. 16
0
    def PyExec(self):
        # setup progress bar
        prog_reporter = Progress(self, start=0.0, end=1.0, nreports=3)
        # Get input
        ws_list = self.getProperty("PeakWorkspaces").value
        a = self.getProperty('a').value
        b = self.getProperty('b').value
        c = self.getProperty('c').value
        alpha = self.getProperty('alpha').value
        beta = self.getProperty('beta').value
        gamma = self.getProperty('gamma').value
        self.tol = self.getProperty('Tolerance').value

        # Find initial UB and use to index peaks in all runs
        prog_reporter.report(1, "Find initial UB for peak indexing")
        self.find_initial_indexing(
            a, b, c, alpha, beta, gamma,
            ws_list)  # removes runs from ws_list if can't index

        # optimize the lattice parameters across runs (i.e. B matrix)
        prog_reporter.report(2, "Optimize B")

        def fobj(x):
            return self.calcResiduals(x, ws_list)

        alatt0 = [a, b, c, alpha, beta, gamma]
        try:
            alatt, cov, info, msg, ier = leastsq(fobj,
                                                 x0=alatt0,
                                                 full_output=True)
            # eval the fobj at optimal solution to set UB (leastsq iteration stops at a next sub-optimal solution)
            fobj(alatt)
        except ValueError:
            logger.error(
                "CalculateUMatrix failed - check initial lattice parameters and tolerance provided."
            )
            return

        success = ier in [
            1, 2, 3, 4
        ] and cov is not None  # cov is None when matrix is singular
        if success:
            # calculate errors
            dof = sum(
                [self.child_IndexPeaks(ws, RoundHKLs=True)
                 for ws in ws_list]) - len(alatt0)
            err = np.sqrt(abs(np.diag(cov)) * (info['fvec']**2).sum() / dof)
            for wsname in ws_list:
                ws = AnalysisDataService.retrieve(wsname)
                ws.sample().getOrientedLattice().setError(*err)
            logger.notice(
                f"Lattice parameters successfully refined for workspaces: {ws_list}\n"
                f"Lattice Parameters: {np.array2string(alatt, precision=6)}\n"
                f"Parameter Errors  : {np.array2string(err, precision=6)}")
        else:
            logger.warning(
                f"Error in optimization of lattice parameters: {msg}")
        # complete progress
        prog_reporter.report(3, "Done")
Esempio n. 17
0
 def _get_region_calib_ws(region: str):  # -> Workspace
     """
     Retrieve region calibration workspace from the ADS
     :param region: String describing region of interest
     :return: Region calibration workspace
     """
     ws_name = REGION_CALIB_WS_PREFIX + region
     return Ads.retrieve(ws_name)
Esempio n. 18
0
    def reduceSpicePDData(self, exp, scan, unit, xmin, xmax, binsize, wavelength=None, excludeddetlist=None,scalefactor=None):
        """ Reduce SPICE powder diffraction data.
        Return - Boolean as reduction is successful or not
        """
        # Default
        if excludeddetlist is None:
            excludeddetlist = None

        # Get reduction manager
        try:
            wsmanager = self._myWorkspaceDict[(int(exp), int(scan))]
        except KeyError:
            raise NotImplementedError("SPICE data for Exp %d Scan %d has not been loaded." % (
                int(exp), int(scan)))

        datamdws = wsmanager.datamdws
        monitormdws = wsmanager.monitormdws

        # binning from MD to single spectrum ws
        # set up binning parameters
        if xmin is None or xmax is None:
            binpar = "%.7f" % (binsize)
        else:
            binpar = "%.7f, %.7f, %.7f" % (xmin, binsize, xmax)

        # scale-factor
        if scalefactor is None:
            scalefactor = 1.
        else:
            scalefactor = float(scalefactor)

        basewsname = datamdws.name().split("_DataMD")[0]
        outwsname = basewsname + "_Reduced"
        print "[DB]", numpy.array(excludeddetlist)
        api.ConvertCWPDMDToSpectra(InputWorkspace=datamdws,
                InputMonitorWorkspace=monitormdws,
                OutputWorkspace=outwsname,
                BinningParams=binpar,
                UnitOutput = unit,
                NeutronWaveLength=wavelength,
                ExcludedDetectorIDs=numpy.array(excludeddetlist),
                ScaleFactor=scalefactor)

        print "[DB] Reduction is finished.  Data is in workspace %s. " % (outwsname)

        # Set up class variable for min/max and
        outws = AnalysisDataService.retrieve(outwsname)
        if outws is None:
            raise NotImplementedError("Failed to bin the MDEventWorkspaces to MatrixWorkspace.")

        # Manager:
        wsmanager = PDRManager(exp, scan)
        wsmanager.setup(datamdws, monitormdws, outws, unit, binsize)
        wsmanager.setWavelength(wavelength)

        self._myWorkspaceDict[(exp, scan)] = wsmanager

        return True
Esempio n. 19
0
def fetch_correction_workspaces(vanadium_path: str, instrument: str):
    # -> Workspace2D, Workspace2D
    """
    Fetch workspaces from the ADS or create new ones.
    :param vanadium_path: The path to the requested vanadium run raw data.
    :param instrument: The instrument the data came from.
    """
    van_run_no = path_handling.get_run_number_from_path(
        vanadium_path, instrument)
    if not check_workspaces_exist(van_run_no):
        van_integration_ws, van_processed_inst_ws = create_vanadium_corrections(
            vanadium_path, instrument)
    else:
        van_integration_ws = Ads.retrieve(
            str(van_run_no) + '_' + INTEGRATED_WORKSPACE_NAME)
        van_processed_inst_ws = Ads.retrieve(
            str(van_run_no) + '_' + PROCESSED_WORKSPACE_NAME)
    return van_integration_ws, van_processed_inst_ws
Esempio n. 20
0
    def _locate_global_xlimit(self):
        """Find the global bin from all spectrum"""
        input_workspaces = self.getProperty("InputWorkspace").value
        mask = self.getProperty("MaskWorkspace").value
        maks_angle = self.getProperty("MaskAngle").value
        target = self.getProperty("Target").value
        e_fixed = self.getProperty("EFixed").value

        # NOTE:
        # Due to range difference among incoming spectra, a common bin para is needed
        # such that all data can be binned exactly the same way.
        _xMin, _xMax = 1e16, -1e16

        # BEGIN_FOR: located_global_xMin&xMax
        for n, _wsn in enumerate(input_workspaces):
            _ws = AnalysisDataService.retrieve(_wsn)
            _mskn = f"__mask_{n}"
            self.temp_workspace_list.append(_mskn)

            ExtractMask(_ws, OutputWorkspace=_mskn, EnableLogging=False)
            if maks_angle != Property.EMPTY_DBL:
                MaskAngle(
                    Workspace=_mskn,
                    MinAngle=maks_angle,
                    Angle="Phi",
                    EnableLogging=False,
                )
            if mask is not None:
                BinaryOperateMasks(
                    InputWorkspace1=_mskn,
                    InputWorkspace2=mask,
                    OperationType="OR",
                    OutputWorkspace=_mskn,
                    EnableLogging=False,
                )

            _ws_tmp = ExtractUnmaskedSpectra(
                InputWorkspace=_ws, MaskWorkspace=_mskn, EnableLogging=False
            )
            if isinstance(mtd["_ws_tmp"], IEventWorkspace):
                _ws_tmp = Integration(InputWorkspace=_ws_tmp, EnableLogging=False)
            _ws_tmp = ConvertSpectrumAxis(
                InputWorkspace=_ws_tmp,
                Target=target,
                EFixed=e_fixed,
                EnableLogging=False,
            )
            _ws_tmp = Transpose(
                InputWorkspace=_ws_tmp, OutputWorkspace=f"__ws_{n}", EnableLogging=False
            )

            _xMin = min(_xMin, _ws_tmp.readX(0).min())
            _xMax = max(_xMax, _ws_tmp.readX(0).max())
        # END_FOR: located_global_xMin&xMax

        return _xMin, _xMax
Esempio n. 21
0
 def test_MatchAndMergeWorkspaces_accepts_a_list_of_workspaces(self):
     x_min = np.array([2, 5, 10])
     x_max = np.array([10, 20, 30])
     ws_group = AnalysisDataService.retrieve('ws_group')
     ws_list = [ws_group[0], ws_group[1], ws_group[2]]
     ws_merged = MatchAndMergeWorkspaces(InputWorkspaces=ws_list, XMin=x_min, XMax=x_max)
     self.assertIsInstance(ws_merged, MatrixWorkspace)
     self.assertEqual(ws_merged.getNumberHistograms(), 1)
     self.assertAlmostEqual(min(ws_merged.dataX(0)), 2, places=0)
     self.assertAlmostEqual(max(ws_merged.dataX(0)), 30, places=0)
 def unwrap_groups(inputs):
     output = []
     for name_in_list in inputs:
         ws_in_list = AnalysisDataService.retrieve(name_in_list)
         if isinstance(ws_in_list, Workspace2D):
             output.append(ws_in_list)
         if isinstance(ws_in_list, WorkspaceGroup):
             for ws_in_group in ws_in_list:
                 output.append(ws_in_group)
     return output
Esempio n. 23
0
 def plot_cut_ws(self, wsname):
     if len(self.figure.axes[0].tracked_workspaces) == 0:
         self.figure.axes[0].errorbar(ADS.retrieve(wsname),
                                      wkspIndex=None,
                                      marker='o',
                                      capsize=2,
                                      color='k',
                                      markersize=3)
     self._format_cut_figure()
     self.figure.canvas.draw()
Esempio n. 24
0
def _calculate_vanadium_correction(vanadium_path):
    """
    Runs the vanadium correction algorithm.
    :param vanadium_path: The path to the vanadium data.
    :return: The integrated workspace and the curves generated by the algorithm.
    """
    try:
        Load(Filename=vanadium_path, OutputWorkspace=VANADIUM_INPUT_WORKSPACE_NAME)
    except Exception as e:
        logger.error("Error when loading vanadium sample data. "
                     "Could not run Load algorithm with vanadium run number: " +
                     str(vanadium_path) + ". Error description: " + str(e))
        raise RuntimeError
    EnggVanadiumCorrections(VanadiumWorkspace=VANADIUM_INPUT_WORKSPACE_NAME,
                            OutIntegrationWorkspace=INTEGRATED_WORKSPACE_NAME,
                            OutCurvesWorkspace=CURVES_WORKSPACE_NAME)
    Ads.remove(VANADIUM_INPUT_WORKSPACE_NAME)
    integrated_workspace = Ads.Instance().retrieve(INTEGRATED_WORKSPACE_NAME)
    curves_workspace = Ads.Instance().retrieve(CURVES_WORKSPACE_NAME)
    return integrated_workspace, curves_workspace
Esempio n. 25
0
 def _expand_groups(self):
     """expand workspace groups"""
     workspaces = self.getProperty("InputWorkspace").value
     input_workspaces = []
     for wsname in workspaces:
         wks = AnalysisDataService.retrieve(wsname)
         if isinstance(wks, WorkspaceGroup):
             input_workspaces.extend(wks.getNames())
         else:
             input_workspaces.append(wsname)
     return input_workspaces
Esempio n. 26
0
    def testNoBackgroundNoiseDefaults(self):
        self.__setupTestWS()
        self.__createRandPeaksWS(self.peak_amplitude)

        basews = AnalysisDataService.retrieve("Baseline")
        peakws = AnalysisDataService.retrieve("PeakData")

        clippedws = ClipPeaks(peakws,
                              LLSCorrection=True,
                              IncreasingWindow=False,
                              SmoothingRange=10,
                              WindowSize=10,
                              OutputWorkspace="clipout")

        # Validate by subtracting peak clip results with baseline function
        np.testing.assert_allclose(clippedws.extractY(),
                                   basews.extractY(),
                                   rtol=self.tolerance)

        DeleteWorkspaces(WorkspaceList=["Baseline", "PeakData", "clipout"])
Esempio n. 27
0
    def __createRandPeaksWS(self, amplitude=1.0):
        """
        Creates a test WS with random peaks added to the baseline function
        """
        # Create a new generator, get a permutation of indices used to add peaks to the data.
        np.random.seed(self.rand_seed)
        peaklist = np.random.randint(self.peak_border_lim,
                                     self.resolution - self.peak_border_lim,
                                     self.npeaks)

        self.assertTrue(AnalysisDataService.doesExist("Baseline"))
        basews = AnalysisDataService.retrieve("Baseline")
        x = basews.extractX()
        y = basews.extractY()

        # Add a simple peak to the indices chosen by the random permutation
        for i in peaklist:
            y[0][i] = y[0][i] + amplitude * np.abs(np.sin(x[0][i]))

        CreateWorkspace(DataX=x, DataY=y, OutputWorkspace="PeakData")
Esempio n. 28
0
def get_bank_grouping_workspace(bank: int, sample_raw):  # -> GroupingWorkspace
    """
    Retrieve the grouping workspace for the North/South bank from the user directories, or create a new one from the
    sample workspace instrument data if not found
    :param bank: integer denoting the bank, 1 or 2 for North/South respectively
    :param sample_raw: Workspace containing the instrument data that can be used to create a new grouping workspace
    :return: The loaded or created grouping workspace
    """
    if bank == 1:
        try:
            if ADS.doesExist("NorthBank_grouping"):
                return ADS.retrieve("NorthBank_grouping")
            grp_ws = mantid.LoadDetectorsGroupingFile(
                InputFile="ENGINX_North_grouping.xml",
                OutputWorkspace="NorthBank_grouping")
            return grp_ws
        except ValueError:
            logger.notice(
                "NorthBank grouping file not found in user directories - creating one"
            )
        bank_name = "NorthBank"
    elif bank == 2:
        try:
            if ADS.doesExist("SouthBank_grouping"):
                return ADS.retrieve("SouthBank_grouping")
            grp_ws = mantid.LoadDetectorsGroupingFile(
                InputFile="ENGINX_South_grouping.xml",
                OutputWorkspace="SouthBank_grouping")
            return grp_ws
        except ValueError:
            logger.notice(
                "SouthBank grouping file not found in user directories - creating one"
            )
        bank_name = "SouthBank"
    else:
        raise ValueError("Invalid bank number given")
    ws_name = bank_name + "_grouping"
    grp_ws, _, _ = mantid.CreateGroupingWorkspace(InputWorkspace=sample_raw,
                                                  GroupNames=bank_name,
                                                  OutputWorkspace=ws_name)
    return grp_ws
Esempio n. 29
0
    def _plot_focused_workspaces(focused_workspaces):
        fig = plt.figure()
        gs = gridspec.GridSpec(1, len(focused_workspaces))
        plots = [
            fig.add_subplot(gs[i], projection="mantid")
            for i in range(len(focused_workspaces))
        ]

        for ax, ws_name in zip(plots, focused_workspaces):
            ax.plot(Ads.retrieve(ws_name), wkspIndex=0)
            ax.set_title(ws_name)
        fig.show()
Esempio n. 30
0
 def test_that_plotting_ws_without_giving_spec_num_sets_correct_spec_num_after_spectra_removed(
         self):
     CreateWorkspace(DataX=[10, 20, 30],
                     DataY=[10, 20, 30],
                     DataE=[1, 1, 1],
                     NSpec=3,
                     OutputWorkspace="ws-with-3-spec")
     RemoveSpectra("ws-with-3-spec", [0, 1], OutputWorkspace='out_ws')
     out_ws = ADS.retrieve('out_ws')
     self.ax.plot(out_ws)
     ws_artist = self.ax.tracked_workspaces['out_ws'][0]
     self.assertEqual(3, ws_artist.spec_num)
Esempio n. 31
0
def get_detector_ids_for_bank(bank):
    """
    Find the detector IDs for an instrument bank. Note this is at this point specific to
    the ENGINX instrument.

    @param bank :: name/number as a string.

    @returns list of detector IDs corresponding to the specified Engg bank number
    """
    import os
    grouping_file_path = os.path.join(mantid.config.getInstrumentDirectory(),
                                      'Grouping', 'ENGINX_Grouping.xml')

    alg = AlgorithmManager.create('LoadDetectorsGroupingFile')
    alg.initialize()
    alg.setLogging(False)
    alg.setProperty('InputFile', grouping_file_path)
    group_name = '__EnginXGrouping'
    alg.setProperty('OutputWorkspace', group_name)
    alg.execute()

    # LoadDetectorsGroupingFile produces a 'Grouping' workspace.
    # PropertyWithValue<GroupingWorkspace> not working (GitHub issue 13437)
    # => cannot run as child and get outputworkspace property properly
    if not ADS.doesExist(group_name):
        raise RuntimeError(
            'LoadDetectorsGroupingFile did not run correctly. Could not '
            'find its output workspace: ' + group_name)
    grouping = mtd[group_name]

    detector_ids = set()

    # less then zero indicates both banks, from line 98
    bank_int = int(bank)
    if bank_int < 0:
        # both banks, north and south
        bank_int = [1, 2]
    else:
        # make into list so that the `if in` check works
        bank_int = [bank_int]

    for i in range(grouping.getNumberHistograms()):
        if grouping.readY(i)[0] in bank_int:
            detector_ids.add(grouping.getDetector(i).getID())

    mantid.DeleteWorkspace(grouping)

    if len(detector_ids) == 0:
        raise ValueError('Could not find any detector for this bank: ' + bank +
                         '. This looks like an unknown bank')

    return detector_ids
 def test_fit_cubic_spline_via_mantid_produces_fit_with_same_range_as_binning_for_calc(
         self):
     binning_for_calc = "0.2,0.1,3.0"
     binning_for_fit = "0.2,0.1,4.0"
     alg_test = run_algorithm("FitIncidentSpectrum",
                              InputWorkspace=self.incident_wksp,
                              OutputWorkspace="fit_wksp",
                              BinningForCalc=binning_for_calc,
                              BinningForFit=binning_for_fit,
                              FitSpectrumWith="CubicSplineViaMantid")
     self.assertTrue(alg_test.isExecuted())
     fit_wksp = AnalysisDataService.retrieve("fit_wksp")
     self.assertEqual(fit_wksp.readX(0).all(), np.arange(0.2, 3, 0.1).all())
Esempio n. 33
0
 def _check_region_calib_ws_exists(region: str) -> bool:
     """
     Check that the required workspace for use in focussing the provided region of interest exist in the ADS
     :param region: String describing region of interest
     :return: True if present, False if not
     """
     region_ws_name = REGION_CALIB_WS_PREFIX + region
     present = Ads.doesExist(region_ws_name)
     if not present:
         logger.warning(
             f"Cannot focus as the region calibration workspace \"{region_ws_name}\" is not "
             f"present.")
     return present
Esempio n. 34
0
    def use_existWS(self):
        """ Set up workspace to an existing one
        """
        wsname = str(self.ui.comboBox.currentText())

        try:
            dataws = AnalysisDataService.retrieve(wsname)
            self._importDataWorkspace(dataws)
        except KeyError:
            pass

        # Reset GUI
        self._resetGUI(resetfilerun=True)
Esempio n. 35
0
 def test_MatchAndMergeWorkspaces_accepts_a_mixture_of_ws_size(self):
     x_min = np.array([2, 5, 10, 15, 20])
     x_max = np.array([10, 20, 30, 40, 45])
     ws_group = AnalysisDataService.retrieve('ws_group')
     ConjoinWorkspaces(InputWorkspace1=ws_group[3],
                       InputWorkspace2=ws_group[4],
                       CheckOverlapping=False)
     ws_list = [ws_group[0], ws_group[1], ws_group[2], ws_group[3]]
     ws_merged = MatchAndMergeWorkspaces(InputWorkspaces=ws_list, XMin=x_min, XMax=x_max)
     self.assertIsInstance(ws_merged, MatrixWorkspace)
     self.assertEqual(ws_merged.getNumberHistograms(), 1)
     self.assertAlmostEqual(min(ws_merged.dataX(0)), 2, places=0)
     self.assertAlmostEqual(max(ws_merged.dataX(0)), 45, places=0)
Esempio n. 36
0
    def use_existWS(self):
        """ Set up workspace to an existing one
        """
        wsname = str(self.ui.comboBox.currentText())

        try:
            dataws = AnalysisDataService.retrieve(wsname)
            self._importDataWorkspace(dataws)
        except KeyError:
            pass

        # Reset GUI
        self._resetGUI(resetfilerun=True)
Esempio n. 37
0
def create_vanadium_corrections(vanadium_path: str,
                                instrument: str):  # -> Workspace, Workspace
    """
    Runs the vanadium correction algorithm.
    :param vanadium_path: The path to the vanadium data.
    :return: The integrated workspace and the processed instrument workspace generated.
    """
    try:
        run_no = path_handling.get_run_number_from_path(
            vanadium_path, instrument)
        van_ws = Load(Filename=vanadium_path,
                      OutputWorkspace=str(run_no) + '_' +
                      VANADIUM_INPUT_WORKSPACE_NAME)
    except Exception as e:
        logger.error(
            "Error when loading vanadium sample data. "
            "Could not run Load algorithm with vanadium run number: " +
            str(vanadium_path) + ". Error description: " + str(e))
        raise RuntimeError
    # get full instrument calibration for instrument processing calculation
    if Ads.doesExist("full_inst_calib"):
        full_calib_ws = Ads.retrieve("full_inst_calib")
    else:
        full_calib_path = get_setting(
            output_settings.INTERFACES_SETTINGS_GROUP,
            output_settings.ENGINEERING_PREFIX, "full_calibration")
        try:
            full_calib_ws = Load(full_calib_path,
                                 OutputWorkspace="full_inst_calib")
        except ValueError:
            logger.error(
                "Error loading Full instrument calibration - this is set in the interface settings."
            )
            return
    integral_ws = _calculate_vanadium_integral(van_ws, run_no)
    processed_ws = _calculate_vanadium_processed_instrument(
        van_ws, full_calib_ws, integral_ws, run_no)
    return integral_ws, processed_ws
Esempio n. 38
0
    def getMergedVector(self, mkey):
        """ Get vector X and Y from merged scans
        """
        if self._myMergedWSDict.has_key(mkey) is True:
            wksp = self._myMergedWSDict[mkey]

            # convert to point data if necessary
            if len(wksp.readX(0)) != len(wksp.readY(0)):
                wsname = wksp.name() + "_pd"
                api.ConvertToPointData(InputWorkspace=wksp, OutputWorkspace=wsname)
                wksp = AnalysisDataService.retrieve(wsname)

            vecx = wksp.readX(0)
            vecy = wksp.readY(0)
        else:
            raise NotImplementedError("No merged workspace for key = %s." % (str(mkey)))

        return (vecx, vecy)
Esempio n. 39
0
    def getVectorToPlot(self, exp, scan):
        """ Get vec x and vec y of the reduced workspace to plot
        """
        # get on hold of reduced workspace
        wsmanager = self.getWorkspace(exp, scan, raiseexception=True)
        reducedws = wsmanager.reducedws
        if reducedws is None:
            raise NotImplementedError("Exp %d Scan %d does not have reduced workspace." % (exp, scan))

        # convert to point data if necessary
        if len(reducedws.readX(0)) != len(reducedws.readY(0)):
            wsname = reducedws.name() + "_pd"
            api.ConvertToPointData(InputWorkspace=reducedws, OutputWorkspace=wsname)
            outws = AnalysisDataService.retrieve(wsname)
        else:
            outws = reducedws

        # get vectors
        return outws.readX(0), outws.readY(0)
Esempio n. 40
0
    def _plotTimeCounts(self, wksp):
        """ Plot time/counts
        """
        import datetime
        # Rebin events by pulse time
        try:
            # Get run start and run stop
            if wksp.getRun().hasProperty("run_start"):
                runstart = wksp.getRun().getProperty("run_start").value
            else:
                runstart = wksp.getRun().getProperty("proton_charge").times[0]
            runstop = wksp.getRun().getProperty("proton_charge").times[-1]

            runstart = str(runstart).split(".")[0].strip()
            runstop = str(runstop).split(".")[0].strip()

            t0 = datetime.datetime.strptime(runstart, "%Y-%m-%dT%H:%M:%S")
            tf = datetime.datetime.strptime(runstop, "%Y-%m-%dT%H:%M:%S")

            # Calcualte
            dt = tf-t0
            timeduration = dt.days*3600*24 + dt.seconds

            timeres = float(timeduration)/MAXTIMEBINSIZE
            if timeres < 1.0:
                timeres = 1.0

            sumwsname = "_Summed_%s"%(str(wksp))
            if AnalysisDataService.doesExist(sumwsname) is False:
                sumws = api.SumSpectra(InputWorkspace=wksp, OutputWorkspace=sumwsname)
                sumws = api.RebinByPulseTimes(InputWorkspace=sumws, OutputWorkspace = sumwsname,
                                              Params="%f"%(timeres))
                sumws = api.ConvertToPointData(InputWorkspace=sumws, OutputWorkspace=sumwsname)
            else:
                sumws = AnalysisDataService.retrieve(sumwsname)
        except RuntimeError as e:
            return str(e)

        vecx = sumws.readX(0)
        vecy = sumws.readY(0)

        xmin = min(vecx)
        xmax = max(vecx)
        ymin = min(vecy)
        ymax = max(vecy)

        # Reset graph
        self.ui.mainplot.set_xlim(xmin, xmax)
        self.ui.mainplot.set_ylim(ymin, ymax)

        self.ui.mainplot.set_xlabel('Time (seconds)', fontsize=13)
        self.ui.mainplot.set_ylabel('Counts', fontsize=13)

        # Set up main line
        setp(self.mainline, xdata=vecx, ydata=vecy)

        # Reset slide
        newslidery = [min(vecy), max(vecy)]

        newleftx = xmin + (xmax-xmin)*self._leftSlideValue*0.01
        setp(self.leftslideline, xdata=[newleftx, newleftx], ydata=newslidery)

        newrightx = xmin + (xmax-xmin)*self._rightSlideValue*0.01
        setp(self.rightslideline, xdata=[newrightx, newrightx], ydata=newslidery)

        self.ui.graphicsView.draw()

        return
Esempio n. 41
0
    def mergeReduceSpiceData(self, expno, scannolist, unit, xmin, xmax, binsize):
        """ Merge and reduce SPICE data files
        Arguements:
         - expscanfilelist: list of 3 tuples: expnumber, scannumber and file name
        """
        # Collect data MD workspaces and monitor MD workspaces
        datamdwslist = []
        monitormdwslist = []
        self._lastWkspToMerge = []

        print "[Checkpoint 0] Scans = ", str(scannolist)
        for scanno in sorted(scannolist):
            try:
                wsmanager = self.getWorkspace(expno, scanno, True)
                datamdwslist.append(wsmanager.datamdws)
                monitormdwslist.append(wsmanager.monitormdws)
                self._lastWkspToMerge.append(wsmanager)
            except KeyError as ne:
                print '[Error] Unable to retrieve MDWorkspaces for Exp %d Scan %d due to %s.' % (
                    expno, scanno, str(ne))
                scannolist.remove(scanno)
        # ENDFOR

        print "[Checkpoing 1] Scans = ", str(scannolist)

        # Merge and binning
        if len(datamdwslist) > 1:
            mg_datamdws = datamdwslist[0] +  datamdwslist[1]
            mg_monitormdws = monitormdwslist[0] + monitormdwslist[1]
        else:
            mg_datamdws = datamdwslist[0]
            mg_monitormdws = monitormdwslist[0]
        for iw in xrange(2, len(datamdwslist)):
            mg_datamdws += datamdwslist[iw]
            mg_monitormdws += monitormdwslist[iw]

        # Set up binning parameters
        if xmin is None or xmax is None:
            binpar = "%.7f" % (binsize)
        else:
            binpar = "%.7f, %.7f, %.7f" % (xmin, binsize, xmax)

        # set up output workspace's name
        scannolist = sorted(scannolist)
        outwsname = "Merged_Exp%d_Scan%s_%s" % (expno, scannolist[0], scannolist[-1])

        # Merge
        wavelength = self.getWavelength(expno, scannolist[0])
        api.ConvertCWPDMDToSpectra(InputWorkspace=mg_datamdws,
                                   InputMonitorWorkspace=mg_monitormdws,
                                   OutputWorkspace=outwsname,
                                   BinningParams=binpar,
                                   UnitOutput=unit,
                                   NeutronWaveLength=wavelength)
        moutws = AnalysisDataService.retrieve(outwsname)
        if moutws is None:
            raise NotImplementedError("Merge failed.")

        key = (expno, str(scannolist))
        self._myMergedWSDict[key] = moutws

        return key