Exemplo n.º 1
0
    def test_that_can_load_isis_nexus_file_with_event_data_and_multi_period(self):
        # Arrange
        state = SANSLoadTest._get_simple_state(sample_scatter="LARMOR00013065.nxs",
                                               calibration="80tubeCalibration_18-04-2016_r9330-9335.nxs")

        # Act
        output_workspace_names = {"SampleScatterWorkspace": "sample_scatter",
                                  "SampleScatterMonitorWorkspace": "sample_monitor_scatter"}
        load_alg = self._run_load(state, publish_to_cache=True, use_cached=True, move_workspace=False,
                                  output_workspace_names=output_workspace_names)

        # Assert
        expected_number_of_workspaces = [4, 0, 0, 0, 0, 0]
        expected_number_on_ads = 1
        workspace_type = [EventWorkspace, None, None, None, None, None]
        self._do_test_output(load_alg, expected_number_of_workspaces, expected_number_on_ads, workspace_type)

        # Check that calibration is added
        self.assertTrue(SANSLoadTest._has_calibration_been_applied(load_alg))

        # Confirm that the ADS workspace contains the calibration file
        try:
            AnalysisDataService.retrieve("80tubeCalibration_18-04-2016_r9330-9335")
            on_ads = True
        except RuntimeError:
            on_ads = False
        self.assertTrue(on_ads)

        # Cleanup
        remove_all_workspaces_from_ads()
Exemplo n.º 2
0
    def testConvertUnits(self):
        # test whether CorrectTof+ConvertUnits+ConvertToDistribution will give the same result as TOFTOFConvertTOFToDeltaE
        OutputWorkspaceName = "outputws1"
        alg_test = run_algorithm("CorrectTOF", InputWorkspace=self._input_ws, EPPTable=self._table, OutputWorkspace=OutputWorkspaceName)
        self.assertTrue(alg_test.isExecuted())
        wscorr = AnalysisDataService.retrieve(OutputWorkspaceName)

        # convert units, convert to distribution
        alg_cu = run_algorithm("ConvertUnits", InputWorkspace=wscorr, Target='DeltaE', EMode='Direct', EFixed=2.27, OutputWorkspace=OutputWorkspaceName+'_dE')
        ws_dE = AnalysisDataService.retrieve(OutputWorkspaceName+'_dE')
        alg_cd = run_algorithm("ConvertToDistribution", Workspace=ws_dE)

        # create reference data for X axis
        tof1 = 2123.33867005
        dataX =  self._input_ws.readX(0) - tof1
        tel = 8189.5 - tof1
        factor = m_n*1e+15/eV
        newX = 0.5*factor*16.0*(1/tel**2 - 1/dataX**2)
        # compare
        # self.assertEqual(newX[0], ws_dE.readX(0)[0])
        self.assertTrue(np.allclose(newX, ws_dE.readX(0), atol=0.01))

        # create reference data for Y axis and compare to the output
        tof = dataX[:-1] + 5.25
        newY = self._input_ws.readY(0)*tof**3/(factor*10.5*16.0)
        # compare
        self.assertTrue(np.allclose(newY, ws_dE.readY(0), rtol=0.01))

        run_algorithm("DeleteWorkspace", Workspace=ws_dE)
        run_algorithm("DeleteWorkspace", Workspace=wscorr)
    def test_DNSFRSelfCorrection(self):
        outputWorkspaceName = "DNSFlippingRatioCorrTest_Test4"
        # consider normalization=1.0 as set in self._create_fake_workspace
        dataws_sf = self.__sf_nicrws - self.__sf_bkgrws
        dataws_nsf = self.__nsf_nicrws - self.__nsf_bkgrws
        alg_test = run_algorithm("DNSFlippingRatioCorr", SFDataWorkspace=dataws_sf,
                                 NSFDataWorkspace=dataws_nsf, SFNiCrWorkspace=self.__sf_nicrws.getName(),
                                 NSFNiCrWorkspace=self.__nsf_nicrws.getName(), SFBkgrWorkspace=self.__sf_bkgrws.getName(),
                                 NSFBkgrWorkspace=self.__nsf_bkgrws.getName(), SFOutputWorkspace=outputWorkspaceName+'SF',
                                 NSFOutputWorkspace=outputWorkspaceName+'NSF')

        self.assertTrue(alg_test.isExecuted())
        # check whether the data are correct
        ws_sf = AnalysisDataService.retrieve(outputWorkspaceName + 'SF')
        ws_nsf = AnalysisDataService.retrieve(outputWorkspaceName + 'NSF')
        # dimensions
        self.assertEqual(24, ws_sf.getNumberHistograms())
        self.assertEqual(24, ws_nsf.getNumberHistograms())
        self.assertEqual(2,  ws_sf.getNumDims())
        self.assertEqual(2,  ws_nsf.getNumDims())
        # data array: spin-flip must be zero
        for i in range(24):
            self.assertAlmostEqual(0.0, ws_sf.readY(i)[0])
        # data array: non spin-flip must be nsf - sf^2/nsf
        nsf = np.array(dataws_nsf.extractY())
        sf = np.array(dataws_sf.extractY())
        refdata = nsf + sf
        for i in range(24):
            self.assertAlmostEqual(refdata[i][0], ws_nsf.readY(i)[0])

        run_algorithm("DeleteWorkspace", Workspace=outputWorkspaceName + 'SF')
        run_algorithm("DeleteWorkspace", Workspace=outputWorkspaceName + 'NSF')
        run_algorithm("DeleteWorkspace", Workspace=dataws_sf)
        run_algorithm("DeleteWorkspace", Workspace=dataws_nsf)
        return
Exemplo n.º 4
0
    def edit_matrix_workspace(sq_name, scale_factor, shift, edited_sq_name=None):
        """
        Edit the matrix workspace of S(Q) by scaling and shift
        :param sq_name: name of the SofQ workspace
        :param scale_factor:
        :param shift:
        :param edited_sq_name: workspace for the edited S(Q)
        :return:
        """
        # get the workspace
        if AnalysisDataService.doesExist(sq_name) is False:
            raise RuntimeError('S(Q) workspace {0} cannot be found in ADS.'.format(sq_name))

        if edited_sq_name is not None:
            simpleapi.CloneWorkspace(InputWorkspace=sq_name, OutputWorkspace=edited_sq_name)
            sq_ws = AnalysisDataService.retrieve(edited_sq_name)
        else:
            sq_ws = AnalysisDataService.retrieve(sq_name)

        # get the vector of Y
        sq_ws = sq_ws * scale_factor
        sq_ws = sq_ws + shift
        if sq_ws.name() != edited_sq_name:
            simpleapi.DeleteWorkspace(Workspace=edited_sq_name)
            simpleapi.RenameWorkspace(InputWorkspace=sq_ws, OutputWorkspace=edited_sq_name)

        assert sq_ws is not None, 'S(Q) workspace cannot be None.'
        print('[DB...BAT] S(Q) workspace that is edit is {0}'.format(sq_ws))
    def test_that_can_find_can_reduction_if_it_exists(self):
        # Arrange
        test_director = TestDirector()
        state = test_director.construct()
        tagged_workspace_names = {None: "test_ws",
                                  OutputParts.Count: "test_ws_count",
                                  OutputParts.Norm: "test_ws_norm"}
        SANSFunctionsTest._prepare_workspaces(number_of_workspaces=4,
                                              tagged_workspace_names=tagged_workspace_names,
                                              state=state,
                                              reduction_mode=ISISReductionMode.LAB)
        # Act
        workspace, workspace_count, workspace_norm = get_reduced_can_workspace_from_ads(state, output_parts=True,
                                                                              reduction_mode=ISISReductionMode.LAB)  # noqa

        # Assert
        self.assertTrue(workspace is not None)
        self.assertTrue(workspace.name() == AnalysisDataService.retrieve("test_ws").name())
        self.assertTrue(workspace_count is not None)
        self.assertTrue(workspace_count.name() == AnalysisDataService.retrieve("test_ws_count").name())
        self.assertTrue(workspace_norm is not None)
        self.assertTrue(workspace_norm.name() == AnalysisDataService.retrieve("test_ws_norm").name())

        # Clean up
        SANSFunctionsTest._remove_workspaces()
Exemplo n.º 6
0
    def _check_if_all_multi_period_workspaces_have_the_same_position(self, base_name, number_of_workspaces):

        reference_name = base_name + str(1)
        reference_workspace = AnalysisDataService.retrieve(reference_name)
        reference_position, reference_rotation = self._get_position_and_rotation(reference_workspace)
        for index in range(2, number_of_workspaces + 1):
            ws_name = base_name + str(index)
            workspace = AnalysisDataService.retrieve(ws_name)
            position, rotation = self._get_position_and_rotation(workspace)
            self.assertEqual(position, reference_position)
            self.assertEqual(rotation, reference_rotation)
Exemplo n.º 7
0
    def calculate_peak_center(self):
        """ Calculate peak's center by averaging the peaks found and stored in PeakWorkspace
        :return:
        """
        # Go through the peak workspaces to calculate peak center with weight (monitor and counts)
        peak_ws = AnalysisDataService.retrieve(self._myPeakWorkspaceName)

        # spice table workspace
        spice_table_name = get_spice_table_name(self._myExpNumber, self._myScanNumber)
        spice_table_ws = AnalysisDataService.retrieve(spice_table_name)

        pt_spice_row_dict = build_pt_spice_table_row_map(spice_table_ws)
        det_col_index = spice_table_ws.getColumnNames().index('detector')
        monitor_col_index = spice_table_ws.getColumnNames().index('monitor')

        num_found_peaks = peak_ws.rowCount()

        q_sample_sum = numpy.array([0., 0., 0.])
        weight_sum = 0.

        for i_peak in xrange(num_found_peaks):
            # get peak
            peak_i = peak_ws.getPeak(i_peak)
            run_number = peak_i.getRunNumber()
            # get Pt. number
            pt_number = run_number % self._myScanNumber
            # get row number and then detector counts and monitor counts
            if pt_number not in pt_spice_row_dict:
                # skip
                print '[Error] Scan %d Peak %d Pt %d cannot be located.' % (self._myScanNumber, i_peak, pt_number)
                continue

            row_index = pt_spice_row_dict[pt_number]
            det_counts = spice_table_ws.cell(row_index, det_col_index)
            monitor_counts = spice_table_ws.cell(row_index, monitor_col_index)
            if monitor_counts < 1.:
                # skip zero-count
                continue
            # convert q sample from V3D to ndarray
            q_i = peak_i.getQSampleFrame()
            q_array = numpy.array([q_i.X(), q_i.Y(), q_i.Z()])
            # calculate weight
            weight_i = float(det_counts)/float(monitor_counts)
            # contribute to total
            weight_sum += weight_i
            q_sample_sum += q_array * weight_i
            # set non-normalized peak intensity as detector counts (roughly)
            peak_i.setIntensity(det_counts)
        # END-FOR (i_peak)

        self._avgPeakCenter = q_sample_sum/weight_sum

        return
Exemplo n.º 8
0
    def test_genHKLList(self):
        """ Test to load a .hkl file
        """
        # Set up
        alg_test = run_algorithm("CreateLeBailFitInput",
                ReflectionsFile         = "",
                MaxHKL                  = "12,12,12",
                FullprofParameterFile   = "2011B_HR60b2.irf",
                Bank                    = 2,
                LatticeConstant         = 4.66,
                GenerateBraggReflections        = True,
                InstrumentParameterWorkspace    = "PG3_Bank2_Foo2",
                BraggPeakParameterWorkspace     = "Arb_Peaks"
                )

        # Execute
        self.assertTrue(alg_test.isExecuted())

        # Verify some values
        # Profile parameter workspace
        paramws = AnalysisDataService.retrieve("PG3_Bank2_Foo2")

        paramname0 = paramws.cell(0, 0)

        if paramname0.lower() == "bank":
            numrowgood = 28
        else:
            numrowgood = 27
        #print "Parameter name of first line = ", paramname0

        #self.assertEqual(numrowgood, paramws.rowCount())

        paramnames = []
        for i in range(paramws.rowCount()):
            paramname = paramws.cell(i, 0)
            paramnames.append(paramname)
        self.assertEqual(paramnames.count("LatticeConstant"), 1)


        # Bragg peak list
        braggws = AnalysisDataService.retrieve("Arb_Peaks")
        self.assertEqual(braggws.rowCount() > 20, True)

        # 4. Delete the test hkl file
        AnalysisDataService.remove("PG3_Bank2_Foo2")
        AnalysisDataService.remove("Arb_Peaks")

        return
Exemplo n.º 9
0
    def test_LoadPartiallyValidFilesMultipleLogValues(self):
        outputWorskapceName = "LoadLogPropertyTableTest_Test2"

        alg_test = run_algorithm(
            "LoadLogPropertyTable",
            FirstFile="emu00006473.nxs",
            LastFile="emu00006475.nxs",
            LogNames="Temp_Sample,dur",
            OutputWorkspace=outputWorskapceName,
        )

        self.assertTrue(alg_test.isExecuted())

        # Verify some values
        tablews = AnalysisDataService.retrieve(outputWorskapceName)
        self.assertEqual(2, tablews.rowCount())
        self.assertEqual(3, tablews.columnCount())

        self.assertEqual(6473, tablews.cell(0, 0))
        self.assertAlmostEqual(200.078, tablews.cell(0, 1), 2)
        self.assertEqual("8697", tablews.cell(0, 2))
        self.assertEqual(6475, tablews.cell(1, 0))
        self.assertAlmostEqual(283.523, tablews.cell(1, 1), 2)
        self.assertEqual("5647", tablews.cell(1, 2))

        run_algorithm("DeleteWorkspace", Workspace=outputWorskapceName)

        return
Exemplo n.º 10
0
    def test_LoadValidFilesComments(self):
        outputWorskapceName = "LoadLogPropertyTableTest_Test1"

        alg_test = run_algorithm(
            "LoadLogPropertyTable",
            FirstFile="MUSR00015189.nxs",
            LastFile="MUSR00015193.nxs",
            LogNames="comment",
            OutputWorkspace=outputWorskapceName,
        )

        self.assertTrue(alg_test.isExecuted())

        # Verify some values
        tablews = AnalysisDataService.retrieve(outputWorskapceName)
        self.assertEqual(5, tablews.rowCount())
        self.assertEqual(2, tablews.columnCount())

        self.assertEqual("18.95MHz 100W", tablews.cell(0, 1))
        self.assertEqual(15189, tablews.cell(0, 0))
        self.assertEqual(15193, tablews.cell(4, 0))

        run_algorithm("DeleteWorkspace", Workspace=outputWorskapceName)

        return
    def test_temperature_log_is_time_series(self):
        outputWorkspaceName = "output_ws"
        EditInstrumentGeometry(self._input_ws, L2="4,8", Polar="0,15",
                               Azimuthal="0,0", DetectorIDs="1,2")
        AddTimeSeriesLog(
            self._input_ws,
            'temperature',
            '2010-09-14T04:20:12',
            Value='0.0')
        AddTimeSeriesLog(
            self._input_ws,
            'temperature',
            '2010-09-14T04:20:13',
            Value='0.0')
        AddTimeSeriesLog(
            self._input_ws,
            'temperature',
            '2010-09-14T04:20:14',
            Value='0.0')
        alg_test = run_algorithm("ComputeCalibrationCoefVan",
                                 VanadiumWorkspace=self._input_ws,
                                 EPPTable=self._table,
                                 OutputWorkspace=outputWorkspaceName)
        self.assertTrue(alg_test.isExecuted())
        wsoutput = AnalysisDataService.retrieve(outputWorkspaceName)

        self._checkDWF(wsoutput, 0.0)
def convert_fit_parameter_table_to_dict(table_name):
    """

    :param table_name:
    :return:
    """
    # table workspace
    assert isinstance(table_name, str), 'blabla'
    table_ws = AnalysisDataService.retrieve(table_name)

    # create dictionary
    fit_param_dict = dict()
    params_list = table_ws.getColumnNames()
    #  ['wsindex', 'peakindex', 'Height', 'PeakCentre', 'Sigma', 'A0', 'A1', 'chi2']

    # go through all lines
    num_rows = table_ws.rowCount()
    for irow in range(num_rows):
        value_dict = dict()
        ws_index = None
        for iparam, par_name in enumerate(params_list):
            value_i = table_ws.cell(irow, iparam)
            if par_name == 'wsindex':
                ws_index = int(value_i)
            else:
                value_dict[par_name] = float(value_i)
        # END-FOR (column)
        assert ws_index is not None
        fit_param_dict[ws_index] = value_dict
    # END-FOR (row)

    return fit_param_dict
Exemplo n.º 13
0
    def test_LoadTOF(self):
        outputWorkspaceName = "LoadDNSLegacyTest_Test7"
        filename = "dnstof.d_dat"
        tof1 = 424.668     # must be changed if L1 will change
        alg_test = run_algorithm("LoadDNSLegacy", Filename=filename, Normalization='no',
                                 OutputWorkspace=outputWorkspaceName)
        self.assertTrue(alg_test.isExecuted())

        # Verify some values
        ws = AnalysisDataService.retrieve(outputWorkspaceName)
        # dimensions
        self.assertEqual(24, ws.getNumberHistograms())
        self.assertEqual(100,  ws.getNumberBins())
        # data array
        self.assertEqual(8, ws.readY(19)[37])       # must be changed after comissioning will be finished
        self.assertAlmostEqual(tof1, ws.readX(0)[0], 3)
        self.assertAlmostEqual(tof1+40.1*100, ws.readX(0)[100], 3)
        # sample logs
        run = ws.getRun()
        self.assertEqual(-7.5, run.getProperty('deterota').value)
        self.assertEqual(100, run.getProperty('tof_channels').value)
        self.assertEqual(51428, run.getProperty('mon_sum').value)
        self.assertEqual('z', run.getProperty('polarisation').value)
        self.assertEqual(33, run.getProperty('EPP').value)  # check that EPP is taken from file
        self.assertEqual('7', str(run.getProperty('polarisation_comment').value))
        self.assertEqual('no', run.getProperty('normalized').value)
        # check whether detector bank is rotated
        det = ws.getDetector(0)
        self.assertAlmostEqual(7.5, ws.detectorSignedTwoTheta(det)*180/pi)
        run_algorithm("DeleteWorkspace", Workspace=outputWorkspaceName)
        return
Exemplo n.º 14
0
    def test_DNSMomentumTransfer(self):
        outputWorkspaceName = "DNSMergeRunsTest_Test4"
        alg_test = run_algorithm("DNSMergeRuns", WorkspaceNames=self.workspaces,
                                 OutputWorkspace=outputWorkspaceName, HorizontalAxis='|Q|')

        self.assertTrue(alg_test.isExecuted())
        # check whether the data are correct
        ws = AnalysisDataService.retrieve(outputWorkspaceName)
        # dimensions
        self.assertEqual(96, ws.blocksize())
        self.assertEqual(2,  ws.getNumDims())
        self.assertEqual(1,  ws.getNumberHistograms())
        # data array
        # reference values
        ttheta = np.round(np.radians(self.angles), 4)
        qarr = np.sort(4.0*np.pi*np.sin(0.5*ttheta)/4.2)
        # read the merged values
        dataX = ws.extractX()[0]
        for i in range(len(self.angles)):
            self.assertAlmostEqual(qarr[i], dataX[i])
        # check that the intensity has not been changed
        dataY = ws.extractY()[0]
        for i in range(len(dataY)):
            self.assertAlmostEqual(1.0, dataY[i])
        run_algorithm("DeleteWorkspace", Workspace=outputWorkspaceName)
        return
Exemplo n.º 15
0
    def test_TwoTheta(self):
        # check whether the 2theta angles the same as in the data workspace
        outputWorkspaceName = "DNSDetCorrVanaTest_Test5"
        # rotate detector bank to different angles
        api.LoadInstrument(self.__dataws, InstrumentName='DNS')
        api.LoadInstrument(self.__vanaws, InstrumentName='DNS')
        api.LoadInstrument(self.__bkgrws, InstrumentName='DNS')

        api.RotateInstrumentComponent(self.__dataws, "bank0", X=0, Y=1, Z=0, Angle=-7.53)
        api.RotateInstrumentComponent(self.__vanaws, "bank0", X=0, Y=1, Z=0, Angle=-8.02)
        api.RotateInstrumentComponent(self.__bkgrws, "bank0", X=0, Y=1, Z=0, Angle=-8.54)
        # run correction
        alg_test = run_algorithm("DNSDetEffCorrVana", InputWorkspace=self.__dataws.getName(),
                                 OutputWorkspace=outputWorkspaceName, VanaWorkspace=self.__vanaws.getName(),
                                 BkgWorkspace=self.__bkgrws.getName())
        self.assertTrue(alg_test.isExecuted())
        # check dimensions and angles
        ws = AnalysisDataService.retrieve(outputWorkspaceName)
        # dimensions
        self.assertEqual(24, ws.getNumberHistograms())
        self.assertEqual(2,  ws.getNumDims())
        # angles
        tthetas = np.array([7.53 + i*5 for i in range(24)])
        for i in range(24):
            det = ws.getDetector(i)
            self.assertAlmostEqual(tthetas[i], np.degrees(ws.detectorSignedTwoTheta(det)))

        run_algorithm("DeleteWorkspace", Workspace=outputWorkspaceName)
        return
Exemplo n.º 16
0
    def test_LoadWavelength(self):
        outputWorkspaceName = "LoadDNSLegacyTest_Test8"
        filename = "dn134011vana.d_dat"
        alg_test = run_algorithm("LoadDNSLegacy", Filename=filename, Normalization='no',
                                 OutputWorkspace=outputWorkspaceName, CoilCurrentsTable=self.curtable,
                                 Wavelength=5.7)

        self.assertTrue(alg_test.isExecuted())

        # Verify some values
        ws = AnalysisDataService.retrieve(outputWorkspaceName)
        # dimensions
        self.assertEqual(24, ws.getNumberHistograms())
        self.assertEqual(2,  ws.getNumDims())
        # data array
        self.assertEqual(31461, ws.readY(1))
        self.assertEqual(13340, ws.readY(23))
        self.assertAlmostEqual(5.7, ws.readX(1)[0], 3)
        self.assertAlmostEqual(5.7, ws.readX(23)[0], 3)
        # sample logs
        run = ws.getRun()
        self.assertEqual(5.7, run.getProperty('wavelength').value)
        self.assertAlmostEqual(2.51782, run.getProperty('Ei').value, 3)
        run_algorithm("DeleteWorkspace", Workspace=outputWorkspaceName)
        return
Exemplo n.º 17
0
    def save_workspaces(self, workspaces_to_save=None):
        """
        Use the private method _get_workspaces_to_save to get a list of workspaces that are present in the ADS to save
        to the directory that was passed at object creation time, it will also add each of them to the output_list
        private instance variable on the WorkspaceSaver class.
        :param workspaces_to_save: List of Strings; The workspaces that are to be saved to the project.
        """

        # Handle getting here and nothing has been given passed
        if workspaces_to_save is None:
            return

        for workspace_name in workspaces_to_save:
            # Get the workspace from the ADS
            workspace = ADS.retrieve(workspace_name)
            place_to_save_workspace = os.path.join(self.directory, workspace_name)

            from mantid.simpleapi import SaveMD, SaveNexusProcessed

            try:
                if isinstance(workspace, MDHistoWorkspace) or isinstance(workspace, IMDEventWorkspace):
                    # Save normally using SaveMD
                    SaveMD(InputWorkspace=workspace_name, Filename=place_to_save_workspace + ".nxs")
                else:
                    # Save normally using SaveNexusProcessed
                    SaveNexusProcessed(InputWorkspace=workspace_name, Filename=place_to_save_workspace + ".nxs")
            except Exception:
                logger.warning("Couldn't save workspace in project: " + workspace)

            self.output_list.append(workspace_name)
Exemplo n.º 18
0
    def retrieve_hkl_from_spice_table(self):
        """ Get averaged HKL from SPICE table
        HKL will be averaged from SPICE table by assuming the value in SPICE might be right
        :return:
        """
        # get SPICE table
        spice_table_name = get_spice_table_name(self._myExpNumber, self._myScanNumber)
        assert AnalysisDataService.doesExist(spice_table_name), 'Spice table for exp %d scan %d cannot be found.' \
                                                                '' % (self._myExpNumber, self._myScanNumber)

        spice_table_ws = AnalysisDataService.retrieve(spice_table_name)

        # get HKL column indexes
        h_col_index = spice_table_ws.getColumnNames().index('h')
        k_col_index = spice_table_ws.getColumnNames().index('k')
        l_col_index = spice_table_ws.getColumnNames().index('l')

        # scan each Pt.
        hkl = numpy.array([0., 0., 0.])

        num_rows = spice_table_ws.rowCount()
        for row_index in xrange(num_rows):
            mi_h = spice_table_ws.cell(row_index, h_col_index)
            mi_k = spice_table_ws.cell(row_index, k_col_index)
            mi_l = spice_table_ws.cell(row_index, l_col_index)
            hkl += numpy.array([mi_h, mi_k, mi_l])
        # END-FOR

        self._spiceHKL = hkl/num_rows

        return
Exemplo n.º 19
0
def get_average_omega(exp_number, scan_number):
    """Get average omega (omega-theta)

    :param exp_number:
    :param scan_number:
    :return:
    """
    # get table workspace
    spice_table_name = util4.get_spice_table_name(exp_number, scan_number)
    spice_table = AnalysisDataService.retrieve(spice_table_name)

    # column index
    col_omega_index = spice_table.getColumnNames().index('omega')
    col_2theta_index = spice_table.getColumnNames().index('2theta')

    # get the vectors
    vec_size = spice_table.rowCount()
    vec_omega = numpy.ndarray(shape=(vec_size, ), dtype='float')
    vec_2theta = numpy.ndarray(shape=(vec_size, ), dtype='float')

    for i_row in range(vec_size):
        vec_omega[i_row] = spice_table.cell(i_row, col_omega_index)
        vec_2theta[i_row] = spice_table.cell(i_row, col_2theta_index)
    # END-FOR

    vec_omega -= vec_2theta * 0.5

    return numpy.sum(vec_omega)
Exemplo n.º 20
0
    def test_LoadPRFFile(self):
        """ Test to load a .prf file
        """
        # 1. Create  test .prf file
        prffilename = "test.prf"
        self._createPrfFile(prffilename)

        # 2. Execute the algorithm
        alg_test = run_algorithm("LoadFullprofFile",
                Filename = prffilename,
                OutputWorkspace = "Data",
                PeakParameterWorkspace = "Info")

        self.assertTrue(alg_test.isExecuted())

        # 3. Check data
        dataws = AnalysisDataService.retrieve("Data")
        self.assertEqual(dataws.getNumberHistograms(), 4)
        self.assertEqual(len(dataws.readX(0)), 36)

        #    value
        self.assertEqual(dataws.readX(0)[13], 5026.3223)
        self.assertEqual(dataws.readY(1)[30], 0.3819)

        # 4. Clean
        os.remove(prffilename)
        AnalysisDataService.remove("Data")
        AnalysisDataService.remove("Info")


        return
Exemplo n.º 21
0
    def test_saveGSS(self):
        """ Test to Save a GSAS file to match V-drive
        """
        # Create a test data file and workspace
        binfilename = "testbin.dat"
        self._createBinFile(binfilename)

        datawsname = "TestInputWorkspace"
        self._createDataWorkspace(datawsname)

        # Execute
        alg_test = run_algorithm("SaveVulcanGSS", 
                InputWorkspace = datawsname,
                BinFilename = binfilename,
                OutputWorkspace = datawsname+"_rebinned",
                GSSFilename = "tempout.gda")

        self.assertTrue(alg_test.isExecuted())

        # Verify ....
        outputws = AnalysisDataService.retrieve(datawsname+"_rebinned")
        #self.assertEqual(4, tablews.rowCount())

        # Delete the test hkl file
        os.remove(binfilename)
        AnalysisDataService.remove("InputWorkspace")
        AnalysisDataService.remove(datawsname+"_rebinned")

        return
Exemplo n.º 22
0
    def get_weighted_peak_centres(self):
        """ Get the peak centers found in peak workspace.
        Guarantees: the peak centers and its weight (detector counts) are exported
        :return: 2-tuple: list of 3-tuple (Qx, Qy, Qz)
                          list of double (Det_Counts)
        """
        # get PeaksWorkspace
        if AnalysisDataService.doesExist(self._myPeakWorkspaceName) is False:
            raise RuntimeError('PeaksWorkspace %s does ot exit.' % self._myPeakWorkspaceName)

        peak_ws = AnalysisDataService.retrieve(self._myPeakWorkspaceName)

        # get peak center, peak intensity and etc.
        peak_center_list = list()
        peak_intensity_list = list()
        num_peaks = peak_ws.getNumberPeaks()
        for i_peak in xrange(num_peaks):
            peak_i = peak_ws.getPeak(i_peak)
            center_i = peak_i.getQSampleFrame()
            intensity_i = peak_i.getIntensity()
            peak_center_list.append((center_i.X(), center_i.Y(), center_i.Z()))
            peak_intensity_list.append(intensity_i)
        # END-FOR

        return peak_center_list, peak_intensity_list
Exemplo n.º 23
0
    def decode(self, obj_dic, project_path=None):
        """
        Decode a InstrumentView Dictionary from project Save and return the object created
        :param obj_dic: Dict; A dictionary containing the information for an InstrumentView
        :param project_path: String; The location of the project save location
        :return: InstrumentView's View; The View object with correct state is returned.
        """
        load_mask = True

        if obj_dic is None:
            return None

        if project_path is None:
            project_path = ""
            load_mask = False

        # Make the widget
        ws = ADS.retrieve(obj_dic["workspaceName"])
        instrument_view = InstrumentViewPresenter(ws).container
        instrument_widget = instrument_view.widget

        #  Then 'decode' set the values from the dictionary
        self.widget_decoder.decode(obj_dic, instrument_widget, project_path, load_mask)

        # Show the end result
        return instrument_view
Exemplo n.º 24
0
def get_moving_motor_information(spice_table_name):
    """

    :param spice_table_name:
    :return:
    """
    table = AnalysisDataService.retrieve(spice_table_name)

    col_names = table.getColumnNames()
    pt_index = col_names.index('Pt.')
    omega_index = col_names.index('omega')
    chi_index = col_names.index('chi')
    phi_index = col_names.index('phi')

    col_tup_dict = {'omega': omega_index, 'phi': phi_index, 'chi': chi_index}

    std_list = list()
    motor_vector_dict = dict()
    for motor in col_tup_dict:
        motor_index = col_tup_dict[motor]
        motor_vector = numpy.array(table.column(motor_index))
        motor_vector_dict[motor] = motor_vector
        std_list.append((motor_vector.std(), motor))
    std_list.sort()
    moving_motor = std_list[-1][1]
    pt_list = table.column(pt_index)

    motor_pos_dict = dict()
    for i_m in range(len(pt_list)):
        motor_pos_dict[pt_list[i_m]] = motor_vector_dict[moving_motor][i_m]

    return moving_motor, motor_pos_dict
    def test_updateDouble(self):
        """ Test for update a double value
        """
        # tablews = self.create_TableWorkspace()

        alg_init = run_algorithm("CreateEmptyTableWorkspace", OutputWorkspace="TestTableWorkspace")
        self.assertTrue(alg_init.isExecuted())

        tablews = AnalysisDataService.retrieve("TestTableWorkspace")

        tablews.addColumn("str", "Name")
        tablews.addColumn("double", "Value")
        tablews.addColumn("str", "FitOrTie")

        tablews.addRow(["A", 1.34, "Fit"])
        tablews.addRow(["B", 2.34, "Tie"])
        tablews.addRow(["S", 3.34, "Tie"])

        alg_test = run_algorithm("UpdatePeakParameterTableValue", InputWorkspace=alg_init.getPropertyValue("OutputWorkspace"),
                Column="Value", ParameterNames=["A"], NewFloatValue=1.00)
        
        self.assertTrue(alg_test.isExecuted())

        newvalue_A = tablews.cell(0, 1)

        self.assertEqual(newvalue_A,  1.00)

        return
Exemplo n.º 26
0
    def __init__(self, data_file, workspace_name=None):
        self.errors = []
        if HAS_MANTID:
            try:
                if workspace_name is None:
                    self.data_ws = "__raw_data_file"
                else:
                    self.data_ws = str(workspace_name)
                api.HFIRLoad(Filename=str(data_file), OutputWorkspace=self.data_ws)
                ws = AnalysisDataService.retrieve(self.data_ws)
                x = ws.dataX(0)
                self.wavelength = (x[0]+x[1])/2.0
                self.wavelength_spread = x[1]-x[0]
                self.sample_detector_distance = ws.getRun().getProperty("sample-detector-distance").value
                self.sample_detector_distance_offset = ws.getRun().getProperty("sample-detector-distance-offset").value
                self.sample_si_window_distance = ws.getRun().getProperty("sample-si-window-distance").value
                self.sample_detector_distance_moved = ws.getRun().getProperty("sample_detector_distance").value

                self.sample_thickness = ws.getRun().getProperty("sample-thickness").value
                self.beam_diameter = ws.getRun().getProperty("beam-diameter").value

                logger.notice("Loaded data file: %s" % data_file)
            except:
                logger.error("Error loading data file:\n%s" % sys.exc_value)
                self.errors.append("Error loading data file:\n%s" % sys.exc_value)
Exemplo n.º 27
0
    def test_LoadHKLFile(self):
        """ Test to load a .hkl file
        """
        # 1. Create a test file
        hklfilename = "test.hkl"
        self._createHKLFile(hklfilename)

        # 2.
        alg_test = run_algorithm("LoadFullprofFile", Filename = hklfilename,
                OutputWorkspace = "Foo", PeakParameterWorkspace = "PeakParameterTable")

        self.assertTrue(alg_test.isExecuted())

        # 3. Verify some values
        tablews = AnalysisDataService.retrieve("PeakParameterTable")
        self.assertEqual(4, tablews.rowCount())

        #   alpha of (11 5 1)/Row 0
        self.assertEqual(0.34252, tablews.cell(0, 3))

        # 4. Delete the test hkl file
        os.remove(hklfilename)
        AnalysisDataService.remove("PeakParameterTable")
        AnalysisDataService.remove("Foo")

        return
Exemplo n.º 28
0
    def test_calculation_fitsample(self):
        OutputWorkspaceName = "outputws"
        # generate EPP table
        table = FindEPP(self._input_ws)

        alg_test = run_algorithm("TOFTOFConvertTofToDeltaE", InputWorkspace=self._input_ws,
                                 EPPTable=table, OutputWorkspace=OutputWorkspaceName)
        self.assertTrue(alg_test.isExecuted())
        wsoutput = AnalysisDataService.retrieve(OutputWorkspaceName)

        # create reference data for X axis
        dataX = np.linspace(4005.75, 7995.75, 381)
        tel = 6005.25
        factor = m_n*1e+15/eV
        newX = 0.5*factor*16.0*(1/tel**2 - 1/dataX**2)
        # compare
        self.assertTrue(np.allclose(newX, wsoutput.readX(0)))           # sdd = 4.0
        self.assertTrue(np.allclose(4.0*newX, wsoutput.readX(1)))       # sdd = 8.0

        # create reference data for Y axis and compare to the output
        tof = dataX[:-1] + 5.25
        newY = self._input_ws.readY(0)*tof**3/(factor*10.5*16.0)
        self.assertTrue(np.allclose(newY, wsoutput.readY(0)))           # sdd = 4.0
        self.assertTrue(np.allclose(newY/4.0, wsoutput.readY(1)))       # sdd = 8.0

        DeleteWorkspace(wsoutput)
        DeleteWorkspace(table)
Exemplo n.º 29
0
    def test_LoadNormalizeToDuration(self):
        outputWorkspaceName = "LoadDNSLegacyTest_Test1"
        filename = "dn134011vana.d_dat"
        alg_test = run_algorithm("LoadDNSLegacy", Filename=filename, Normalization='duration',
                                 OutputWorkspace=outputWorkspaceName, CoilCurrentsTable=self.curtable)
        self.assertTrue(alg_test.isExecuted())

        # Verify some values
        ws = AnalysisDataService.retrieve(outputWorkspaceName)
        # dimensions
        self.assertEqual(24, ws.getNumberHistograms())
        self.assertEqual(2,  ws.getNumDims())
        # data array
        self.assertAlmostEqual(31461.0/600.0, ws.readY(1))
        self.assertAlmostEqual(13340.0/600.0, ws.readY(23))
        # sample logs
        run = ws.getRun()
        self.assertEqual(-8.54, run.getProperty('deterota').value)
        self.assertEqual(8332872, run.getProperty('mon_sum').value)
        self.assertEqual('duration', run.getProperty('normalized').value)
        # check whether detector bank is rotated
        det = ws.getDetector(0)
        self.assertAlmostEqual(8.54, ws.detectorSignedTwoTheta(det)*180/pi)
        run_algorithm("DeleteWorkspace", Workspace=outputWorkspaceName)
        return
def _hasWorkspaceID(workspace_name, workspace_id):
    """Check that a workspace has the given type"""
    workspace = AnalysisDataService.retrieve(workspace_name)
    if isinstance(workspace, WorkspaceGroup):
        return workspace[0].id() == workspace_id
    else:
        return workspace.id() == workspace_id
Exemplo n.º 31
0
 def display_workspace(self, name):
     from mantidqt.widgets.workspacedisplay.matrix.presenter import MatrixWorkspaceDisplay
     from mantidqt.widgets.workspacedisplay.table.presenter import TableWorkspaceDisplay
     if AnalysisDataService.doesExist(name):
         ws = AnalysisDataService.retrieve(name)
         if isinstance(ws, MatrixWorkspace):
             presenter = MatrixWorkspaceDisplay(ws, plot=plot)
             presenter.show_view()
         elif isinstance(ws, ITableWorkspace):
             presenter = TableWorkspaceDisplay(ws, plot=matplotlib.pyplot)
             presenter.show_view()
Exemplo n.º 32
0
 def test_input_run_is_loaded_if_not_in_ADS(self):
     args = self._default_options
     args['InputRunList'] = '13460'
     outputs = ['IvsQ_13460', 'IvsQ_binned_13460', 'TOF_13460', 'TOF']
     self._assert_run_algorithm_succeeds(args, outputs)
     history = [
         'ReflectometryISISPreprocess', 'ReflectometryReductionOneAuto',
         'GroupWorkspaces'
     ]
     self._check_history(AnalysisDataService.retrieve('IvsQ_binned_13460'),
                         history)
Exemplo n.º 33
0
 def testTable(self):
     # tests that correct table is created
     OutputWorkspaceName = "outputws1"
     alg_test = run_algorithm("FindEPP", InputWorkspace=self._input_ws, OutputWorkspace=OutputWorkspaceName)
     self.assertTrue(alg_test.isExecuted())
     wsoutput = AnalysisDataService.retrieve(OutputWorkspaceName)
     self.assertEqual(2, wsoutput.rowCount())
     self.assertEqual(9, wsoutput.columnCount())
     columns = ['WorkspaceIndex', 'PeakCentre', 'PeakCentreError', 'Sigma', 'SigmaError', 'Height', 'HeightError', 'chiSq', 'FitStatus']
     self.assertEqual(columns, wsoutput.getColumnNames())
     DeleteWorkspace(wsoutput)
Exemplo n.º 34
0
 def runTest(self):
     enginx = EnginX(vanadium_run="ENGINX236516",
                     focus_runs=["ENGINX299080"],
                     save_dir=CWDIR,
                     full_inst_calib_path=FULL_CALIB,
                     ceria_run="ENGINX193749",
                     group=GROUP.TEXTURE20)
     enginx.main(plot_cal=False, plot_foc=False)
     # store workspaces for validation
     self._ws_foc = ADS.retrieve(
         "299080_engggui_focusing_output_ws_Texture20")
Exemplo n.º 35
0
 def test_debug_option_outputs_extra_workspaces_with_overridden_names(self):
     self._create_workspace(13460, 'TOF_')
     args = self._default_options
     args['InputRunList'] = '13460'
     args['Debug'] = True
     args.update(self._all_output_names)
     outputs = ['testIvsQ', 'testIvsQBin', 'testIvsLam', 'TOF_13460', 'TOF']
     self._assert_run_algorithm_succeeds(args, outputs)
     history = ['ReflectometryReductionOneAuto', 'GroupWorkspaces']
     self._check_history(AnalysisDataService.retrieve('testIvsQBin'),
                         history)
Exemplo n.º 36
0
 def update_fit(self, fit_props):
     for fit_prop in fit_props:
         wsname = fit_prop['properties']['InputWorkspace']
         self._fit_results[wsname] = {
             'model': fit_prop['properties']['Function'],
             'status': fit_prop['status']
         }
         self._fit_results[wsname]['results'] = defaultdict(
             list)  # {function_param: [[Y1, E1], [Y2,E2],...] }
         fnames = [
             x.split('=')[-1] for x in findall(
                 'name=[^,]*', fit_prop['properties']['Function'])
         ]
         # get num params for each function (first elem empty as str begins with 'name=')
         # need to remove ties and constraints which are enclosed in ()
         nparams = [
             s.count('=')
             for s in sub(r'=\([^)]*\)', '', fit_prop['properties']
                          ['Function']).split('name=')[1:]
         ]
         params_dict = ADS.retrieve(fit_prop['properties']['Output'] +
                                    '_Parameters').toDict()
         # loop over rows in output workspace to get value and error for each parameter
         istart = 0
         for ifunc, fname in enumerate(fnames):
             for iparam in range(0, nparams[ifunc]):
                 irow = istart + iparam
                 key = '_'.join([
                     fname, params_dict['Name'][irow].split('.')[-1]
                 ])  # funcname_param
                 self._fit_results[wsname]['results'][key].append([
                     params_dict['Value'][irow], params_dict['Error'][irow]
                 ])
                 if key in fit_prop['peak_centre_params']:
                     # param corresponds to a peak centre in TOF which we also need in dspacing
                     # add another entry into the results dictionary
                     key_d = key + "_dSpacing"
                     try:
                         dcen = self._convert_TOF_to_d(
                             params_dict['Value'][irow], wsname)
                         dcen_er = self._convert_TOFerror_to_derror(
                             params_dict['Error'][irow], dcen, wsname)
                         self._fit_results[wsname]['results'][key_d].append(
                             [dcen, dcen_er])
                     except (ValueError, RuntimeError) as e:
                         logger.warning(
                             f"Unable to output {key_d} parameters for TOF={params_dict['Value'][irow]}: "
                             + str(e))
             istart += nparams[ifunc]
         # append the cost function value (in this case always chisq/DOF) as don't let user change cost func
         # always last row in parameters table
         self._fit_results[wsname]['costFunction'] = params_dict['Value'][
             -1]
     self.create_fit_tables()
Exemplo n.º 37
0
    def split_to_single_bank(self, gss_ws_name):
        """
        Split a multiple-bank GSAS workspace to a set of single-spectrum MatrixWorkspace
        Parameters
        ----------
        gss_ws_name

        Returns
        -------
        Name of grouped workspace and list
        """
        # check
        assert isinstance(gss_ws_name, str)
        assert AnalysisDataService.doesExist(gss_ws_name)

        # get workspace
        gss_ws = AnalysisDataService.retrieve(gss_ws_name)

        ws_list = list()
        angle_list = list()

        if gss_ws.getNumberHistograms() == 1:
            # input is already a single-spectrum workspace
            ws_list.append(gss_ws_name)
        else:
            num_spec = gss_ws.getNumberHistograms()

            for i_ws in range(num_spec):
                # split this one to a single workspace
                out_ws_name = '%s_bank%d' % (gss_ws_name, i_ws + 1)
                # also can use ExtractSpectra()
                simpleapi.CropWorkspace(InputWorkspace=gss_ws_name,
                                        OutputWorkspace=out_ws_name,
                                        StartWorkspaceIndex=i_ws,
                                        EndWorkspaceIndex=i_ws)
                assert AnalysisDataService.doesExist(out_ws_name)
                ws_list.append(out_ws_name)

            # END-FOR
        # END-IF

        # calculate bank angles
        for ws_name in ws_list:
            bank_angle = calculate_bank_angle(ws_name)
            angle_list.append(bank_angle)

        # group all the workspace
        ws_group_name = gss_ws_name + '_group'
        simpleapi.GroupWorkspaces(InputWorkspaces=ws_list,
                                  OutputWorkspace=ws_group_name)

        self._braggDataDict[ws_group_name] = (gss_ws_name, ws_list)

        return ws_group_name, ws_list, angle_list
Exemplo n.º 38
0
 def test_loading_run_with_instrument_prefix_in_name(self):
     args = self._default_options
     args['InputRunList'] = 'INTER13460'
     outputs = ['IvsQ_13460', 'IvsQ_binned_13460', 'TOF_13460', 'TOF']
     self._assert_run_algorithm_succeeds(args, outputs)
     history = [
         'ReflectometryISISPreprocess', 'ReflectometryReductionOneAuto',
         'GroupWorkspaces'
     ]
     self._check_history(AnalysisDataService.retrieve('IvsQ_binned_13460'),
                         history)
Exemplo n.º 39
0
def _add_workspace_to_group(group_name, workspace_name):
    if AnalysisDataService.doesExist(group_name):
        workspaces_to_group = AnalysisDataService.retrieve(
            group_name).getNames()
    else:
        workspaces_to_group = []

    if workspace_name not in workspaces_to_group:
        workspaces_to_group.append(workspace_name)
        WorkspaceGroupDefinition().add_workspaces_to_group(
            group_name, workspaces_to_group)
        WorkspaceGroupDefinition().execute_grouping()
Exemplo n.º 40
0
    def _expand_groups(self):
        """expand workspace groups"""
        workspaces = self.getProperty("InputWorkspace").value
        input_workspaces = []
        for wsname in workspaces:
            wks = AnalysisDataService.retrieve(wsname)
            if isinstance(wks, WorkspaceGroup):
                input_workspaces.extend(wks.getNames())
            else:
                input_workspaces.append(wsname)

        return input_workspaces
Exemplo n.º 41
0
 def runTest(self):
     enginx = EnginX(vanadium_run="ENGINX236516",
                     focus_runs=["ENGINX299080"],
                     save_dir=CWDIR,
                     full_inst_calib_path=FULL_CALIB,
                     ceria_run="ENGINX193749",
                     group=GROUP.CROPPED,
                     spectrum_num="1-1200")  # North
     enginx.main(plot_cal=False, plot_foc=False)
     # store workspaces for validation
     self._ws_foc = ADS.retrieve(
         "299080_engggui_focusing_output_ws_Cropped")
Exemplo n.º 42
0
def process_vanadium(vanadium_path, calibration, full_calib):
    van_run = path_handling.get_run_number_from_path(
        vanadium_path, calibration.get_instrument())
    van_foc_name = CURVES_PREFIX + calibration.get_group_suffix()
    if ADS.doesExist(van_foc_name):
        ws_van_foc = ADS.retrieve(van_foc_name)
    else:
        if ADS.doesExist(van_run):
            ws_van = ADS.retrieve(
                van_run)  # will exist if have only changed the ROI
        else:
            ws_van = _load_run_and_convert_to_dSpacing(
                vanadium_path, calibration.get_instrument(), full_calib)
            if not ws_van:
                raise RuntimeError(
                    f"vanadium run {van_run} has no proton_charge - "
                    f"please supply a valid vanadium run to focus.")
        ws_van_foc = _focus_run_and_apply_roi_calibration(
            ws_van, calibration, ws_foc_name=van_foc_name)
        ws_van_foc = _smooth_vanadium(ws_van_foc)
    return ws_van_foc, van_run
Exemplo n.º 43
0
 def testFitOutputWorkspacesAreDeleted(self):
     OutputWorkspaceName = "outputws1"
     alg_test = run_algorithm("FindEPP",
                              InputWorkspace=self._input_ws,
                              OutputWorkspace=OutputWorkspaceName,
                              Version=1)
     wsoutput = AnalysisDataService.retrieve(OutputWorkspaceName)
     DeleteWorkspace(wsoutput)
     oldOption = mantid.config['MantidOptions.InvisibleWorkspaces']
     mantid.config['MantidOptions.InvisibleWorkspaces'] = '1'
     self.assertEqual(mtd.size(), 1)  # Only self._input_ws exists.
     mantid.config['MantidOptions.InvisibleWorkspaces'] = oldOption
Exemplo n.º 44
0
    def test_key_operator_does_same_as_retrieve(self):
        wsname = 'ADSTest_test_key_operator_does_same_as_retrieve'
        self._run_createws(wsname)
        ws_from_op = AnalysisDataService[wsname]
        ws_from_method = AnalysisDataService.retrieve(wsname)

        self.do_check_for_matrix_workspace_type(ws_from_op)
        self.do_check_for_matrix_workspace_type(ws_from_method)

        self.assertEquals(ws_from_op.name(), ws_from_method.name())
        self.assertEquals(ws_from_op.getMemorySize(),
                          ws_from_method.getMemorySize())
Exemplo n.º 45
0
    def test_update_plot_guess_will_use_tmp_workspace_with_data_range_if_plot_range_type_is_selected(self):
        guess_workspace_name = "__frequency_domain_analysis_fitting_guessName1"
        self.model.dataset_names = self.dataset_names
        self.model.single_fit_functions = self.single_fit_functions
        self.model.start_xs = [0.0, 1.0]
        self.model.end_xs = [10.0, 11.0]
        self.model.current_dataset_index = 0
        self.model.plot_guess = True
        self.model.plot_guess_type = X_FROM_FIT_RANGE

        self.model.context = mock.Mock()
        self.model._double_pulse_enabled = mock.Mock(return_value=False)
        self.model._get_plot_guess_name = mock.Mock(return_value=guess_workspace_name)
        self.model._get_guess_parameters = mock.Mock(return_value=['func', 'ws'])
        self.model.update_plot_guess()

        guess_workspace = AnalysisDataService.retrieve(guess_workspace_name)
        data_workspace = AnalysisDataService.retrieve(self.dataset_names[0])
        self.assertEqual(guess_workspace.dataX(0).min(), 0.0)
        self.assertEqual(guess_workspace.dataX(0).max(), 10.0)
        self.assertEqual(guess_workspace.dataX(0).size, data_workspace.blocksize())
Exemplo n.º 46
0
def get_region_of_interest(mask_ws_name):
    """
    get region of interest from mask workspace
    :param mask_ws_name:
    :return:
    """
    # check input
    assert isinstance(mask_ws_name, str), 'Mask workspace name {0} must be an integer but not a {1}' \
                                          ''.format(mask_ws_name, type(mask_ws_name))
    mask_ws = ADS.retrieve(mask_ws_name)

    # construct a 2D matrix
    size_x = size_y = int(math.sqrt(mask_ws.getNumberHistograms()))
    mask_matrix = numpy.ndarray(shape=(size_x, size_y), dtype='int')

    # mask or unmask all the matrix element according to mask workspace
    for iws in range(mask_ws.getNumberHistograms()):
        det_id = mask_ws.getDetector(iws).getID()
        pixel_2d_id = det_id / size_y, det_id % size_y
        mask_matrix[pixel_2d_id] = int(mask_ws.isMasked(iws))
    # END-FOR

    # find lower left corner
    lower_left_corner = None
    for ir in range(size_y):
        if mask_matrix[ir].min() == 0:
            ll_row = ir
            ret_value = numpy.where(mask_matrix[ir] == 0)
            ll_col = ret_value[0][0]
            lower_left_corner = ll_row, ll_col
            break
        # END-IF
    # END-FOR

    # find upper right corner
    upper_right_corner = None
    for ir in range(size_y - 1, -1, -1):
        if mask_matrix[ir].min() == 0:
            ur_row = ir
            ret_value = numpy.where(mask_matrix[ir] == 0)
            ur_col = ret_value[0][-1]
            upper_right_corner = ur_row, ur_col
            break
        # END-IF
    # END-FOR

    # check before return
    if lower_left_corner is None or upper_right_corner is None:
        raise RuntimeError(
            'It is impossible not to find either lower left corner {0} or upper right corner {1}'
            ''.format(lower_left_corner, upper_right_corner))

    return lower_left_corner, upper_right_corner
Exemplo n.º 47
0
    def test_sum(self):
        outputWorkspaceName = "output_ws"
        alg_test = run_algorithm("ComputeCalibrationCoefVan", VanadiumWorkspace=self._input_ws,
                                 EPPTable=self._table, OutputWorkspace=outputWorkspaceName)
        self.assertTrue(alg_test.isExecuted())
        wsoutput = AnalysisDataService.retrieve(outputWorkspaceName)

        # check whether sum is calculated correctly, for theta=0, dwf=1
        y_sum = sum(self._input_ws.readY(0)[27:75])
        self.assertAlmostEqual(y_sum, wsoutput.readY(0)[0])

        DeleteWorkspace(wsoutput)
Exemplo n.º 48
0
def _getRunNumberAsString(workspace_name):
    """Get the run number for a workspace. If it's a workspace group, get
    the run number from the first child workspace."""
    try:
        workspace = AnalysisDataService.retrieve(workspace_name)
        if not isinstance(workspace, WorkspaceGroup):
            return str(workspace.getRunNumber())
        # Get first child in the group
        return str(workspace[0].getRunNumber())
    except:
        raise RuntimeError('Could not find run number for workspace ' +
                           workspace_name)
 def test_overriding_output_names_includes_all_specified_outputs(self):
     self._create_workspace(13460, 'TOF_')
     args = self._default_options
     args['InputRunList'] = '13460'
     args.update(self._all_output_names)
     # Current behaviour is that the optional workspaces are output even if
     # Debug is not set
     outputs = ['testIvsQ', 'testIvsQBin', 'testIvsLam', 'TOF_13460', 'TOF']
     self._assert_run_algorithm_succeeds(args, outputs)
     history = ['ReflectometryReductionOneAuto', 'GroupWorkspaces']
     self._check_history(AnalysisDataService.retrieve('testIvsQBin'),
                         history)
Exemplo n.º 50
0
    def test_setGetMonitorWS(self):
        run_algorithm('CreateWorkspace', OutputWorkspace='ws1', DataX=[
                      1., 2., 3.], DataY=[2., 3.], DataE=[2., 3.], UnitX='TOF')
        run_algorithm('CreateWorkspace', OutputWorkspace='ws_mon', DataX=[
                      1., 2., 3.], DataY=[2., 3.], DataE=[2., 3.], UnitX='TOF')

        ws1 = AnalysisDataService.retrieve('ws1')
        try:
            monWs = ws1.getMonitorWorkspace()
            GotIt = True
        except RuntimeError:
            GotIt = False
        self.assertFalse(GotIt)

        monWs = AnalysisDataService.retrieve('ws_mon')
        ws1.setMonitorWorkspace(monWs)
        monWs.setTitle("My Fake Monitor workspace")

        monWs1 = ws1.getMonitorWorkspace()
        self.assertEquals(monWs.getTitle(), monWs1.getTitle())

        ws1.clearMonitorWorkspace()
        try:
            monWs1 = ws1.getMonitorWorkspace()
            GotIt = True
        except RuntimeError:
            GotIt = False
        self.assertFalse(GotIt)

        # Check weak pointer issues
        ws1.setMonitorWorkspace(monWs)
        wms = ws1.getMonitorWorkspace()

        allFine = False
        try:
            ws1.setMonitorWorkspace(wms)
            allFine = True
        except ValueError:
            pass
        self.assertTrue(allFine)
Exemplo n.º 51
0
    def test_DNSFRVanaCorrection(self):
        outputWorkspaceName = "DNSFlippingRatioCorrTest_Test6"
        # create fake vanadium data workspaces
        dataY = np.array([1811., 2407., 3558., 3658., 3352., 2321., 2240., 2617., 3245., 3340., 3338., 3310.,
                          2744., 3212., 1998., 2754., 2791., 2509., 3045., 3429., 3231., 2668., 3373., 2227.])
        __sf_vanaws = create_fake_dns_workspace('__sf_vanaws', dataY=dataY/58.0, flipper='ON')
        self.workspaces.append('__sf_vanaws')
        dataY = np.array([2050., 1910., 2295., 2236., 1965., 1393., 1402., 1589., 1902., 1972., 2091., 1957.,
                          1593., 1952., 1232., 1720., 1689., 1568., 1906., 2001., 2051., 1687., 1975., 1456.])
        __nsf_vanaws = create_fake_dns_workspace('__nsf_vanaws', dataY=dataY/58.0, flipper='OFF')
        self.workspaces.append('__nsf_vanaws')
        # consider normalization=1.0 as set in self._create_fake_workspace
        dataws_sf = __sf_vanaws - self.__sf_bkgrws
        dataws_nsf = __nsf_vanaws - self.__nsf_bkgrws
        alg_test = run_algorithm("DNSFlippingRatioCorr", SFDataWorkspace=dataws_sf,
                                 NSFDataWorkspace=dataws_nsf, SFNiCrWorkspace=self.__sf_nicrws.getName(),
                                 NSFNiCrWorkspace=self.__nsf_nicrws.getName(), SFBkgrWorkspace=self.__sf_bkgrws.getName(),
                                 NSFBkgrWorkspace=self.__nsf_bkgrws.getName(), SFOutputWorkspace=outputWorkspaceName+'SF',
                                 NSFOutputWorkspace=outputWorkspaceName+'NSF')

        self.assertTrue(alg_test.isExecuted())
        # check whether the data are correct
        ws_sf = AnalysisDataService.retrieve(outputWorkspaceName + 'SF')
        ws_nsf = AnalysisDataService.retrieve(outputWorkspaceName + 'NSF')
        # dimensions
        self.assertEqual(24, ws_sf.getNumberHistograms())
        self.assertEqual(24, ws_nsf.getNumberHistograms())
        self.assertEqual(2,  ws_sf.getNumDims())
        self.assertEqual(2,  ws_nsf.getNumDims())
        # data array: for vanadium ratio sf/nsf must be around 2
        ws = ws_sf/ws_nsf
        for i in range(24):
            self.assertAlmostEqual(2.0, np.around(ws.readY(i)))

        run_algorithm("DeleteWorkspace", Workspace=outputWorkspaceName + 'SF')
        run_algorithm("DeleteWorkspace", Workspace=outputWorkspaceName + 'NSF')
        run_algorithm("DeleteWorkspace", Workspace=dataws_sf)
        run_algorithm("DeleteWorkspace", Workspace=dataws_nsf)
        run_algorithm("DeleteWorkspace", Workspace=ws)
        return
Exemplo n.º 52
0
    def test_DNSFRSelfCorrection(self):
        outputWorkspaceName = "DNSFlippingRatioCorrTest_Test4"
        # consider normalization=1.0 as set in self._create_fake_workspace
        dataws_sf = self.__sf_nicrws - self.__sf_bkgrws
        dataws_nsf = self.__nsf_nicrws - self.__nsf_bkgrws
        alg_test = run_algorithm("DNSFlippingRatioCorr",
                                 SFDataWorkspace=dataws_sf,
                                 NSFDataWorkspace=dataws_nsf,
                                 SFNiCrWorkspace=self.__sf_nicrws.getName(),
                                 NSFNiCrWorkspace=self.__nsf_nicrws.getName(),
                                 SFBkgrWorkspace=self.__sf_bkgrws.getName(),
                                 NSFBkgrWorkspace=self.__nsf_bkgrws.getName(),
                                 SFOutputWorkspace=outputWorkspaceName + 'SF',
                                 NSFOutputWorkspace=outputWorkspaceName +
                                 'NSF')

        self.assertTrue(alg_test.isExecuted())
        # check whether the data are correct
        ws_sf = AnalysisDataService.retrieve(outputWorkspaceName + 'SF')
        ws_nsf = AnalysisDataService.retrieve(outputWorkspaceName + 'NSF')
        # dimensions
        self.assertEqual(24, ws_sf.getNumberHistograms())
        self.assertEqual(24, ws_nsf.getNumberHistograms())
        self.assertEqual(2, ws_sf.getNumDims())
        self.assertEqual(2, ws_nsf.getNumDims())
        # data array: spin-flip must be zero
        for i in range(24):
            self.assertAlmostEqual(0.0, ws_sf.readY(i)[0])
        # data array: non spin-flip must be nsf - sf^2/nsf
        nsf = np.array(dataws_nsf.extractY())
        sf = np.array(dataws_sf.extractY())
        refdata = nsf + sf
        for i in range(24):
            self.assertAlmostEqual(refdata[i][0], ws_nsf.readY(i)[0])

        run_algorithm("DeleteWorkspace", Workspace=outputWorkspaceName + 'SF')
        run_algorithm("DeleteWorkspace", Workspace=outputWorkspaceName + 'NSF')
        run_algorithm("DeleteWorkspace", Workspace=dataws_sf)
        run_algorithm("DeleteWorkspace", Workspace=dataws_nsf)
        return
Exemplo n.º 53
0
    def test_that_can_load_isis_nexus_file_with_event_data_and_multi_period(
            self):
        # Arrange
        state = SANSLoadTest._get_simple_state(
            sample_scatter="LARMOR00013065.nxs",
            calibration="80tubeCalibration_18-04-2016_r9330-9335.nxs")

        # Act
        output_workspace_names = {
            "SampleScatterWorkspace": "sample_scatter",
            "SampleScatterMonitorWorkspace": "sample_monitor_scatter"
        }
        load_alg = self._run_load(
            state,
            publish_to_cache=True,
            use_cached=True,
            move_workspace=False,
            output_workspace_names=output_workspace_names)

        # Assert
        expected_number_of_workspaces = [4, 0, 0, 0, 0, 0]
        expected_number_on_ads = 1
        workspace_type = [EventWorkspace, None, None, None, None, None]
        self._do_test_output(load_alg, expected_number_of_workspaces,
                             expected_number_on_ads, workspace_type)

        # Check that calibration is added
        self.assertTrue(SANSLoadTest._has_calibration_been_applied(load_alg))

        # Confirm that the ADS workspace contains the calibration file
        try:
            AnalysisDataService.retrieve(
                "80tubeCalibration_18-04-2016_r9330-9335")
            on_ads = True
        except RuntimeError:
            on_ads = False
        self.assertTrue(on_ads)

        # Cleanup
        remove_all_workspaces_from_ads()
 def test_slicing_loads_input_run_if_not_in_ADS(self):
     args = self._default_options
     args.update(self._default_slice_options_real_run)
     args['InputRunList'] = '38415'
     outputs = self._expected_real_time_sliced_outputs
     self._assert_run_algorithm_succeeds(args, outputs)
     history = [
         'ReflectometryReductionOneAuto', 'ReflectometryReductionOneAuto',
         'ReflectometryReductionOneAuto', 'GroupWorkspaces'
     ]
     self._check_history(
         AnalysisDataService.retrieve('IvsQ_binned_38415_sliced_0_210'),
         history, False)
 def test_debug_option_outputs_extra_workspaces(self):
     self._create_workspace(13460, 'TOF_')
     args = self._default_options
     args['InputRunList'] = '13460'
     args['Debug'] = True
     outputs = [
         'IvsQ_13460', 'IvsQ_binned_13460', 'IvsLam_13460', 'TOF_13460',
         'TOF'
     ]
     self._assert_run_algorithm_succeeds(args, outputs)
     history = ['ReflectometryReductionOneAuto', 'GroupWorkspaces']
     self._check_history(AnalysisDataService.retrieve('IvsQ_binned_13460'),
                         history)
Exemplo n.º 56
0
 def test_input_run_is_reloaded_if_in_ADS_with_unknown_prefix(self):
     self._create_workspace(13460, 'TEST_')
     args = self._default_options
     args['InputRunList'] = '13460'
     outputs = [
         'IvsQ_13460', 'IvsQ_binned_13460', 'TEST_13460', 'TOF_13460', 'TOF'
     ]
     self._assert_run_algorithm_succeeds(args, outputs)
     history = [
         'LoadNexus', 'ReflectometryReductionOneAuto', 'GroupWorkspaces'
     ]
     self._check_history(AnalysisDataService.retrieve('IvsQ_binned_13460'),
                         history)
Exemplo n.º 57
0
 def _create_flat_background_test_workspace(workspace_name):
     LoadNexusProcessed(Filename="LOQ48127", OutputWorkspace=workspace_name)
     workspace = AnalysisDataService.retrieve(workspace_name)
     # Rebin to only have four values at 11, 31, 51, 70.5
     workspace = Rebin(workspace, "1,20,80")
     # For each spectrum we set the first two entries to 2 and the other two entries to 4.
     for index in range(workspace.getNumberHistograms()):
         data_y = workspace.dataY(index)
         data_y[0] = 2.
         data_y[1] = 2.
         data_y[2] = 4.
         data_y[3] = 4.
     return workspace
Exemplo n.º 58
0
 def _setUniformNumberOfSlices(self, alg, workspace_name):
     """If slicing by a specified number of slices is requested, find the time
     interval to use to give this number of even time slices and set the relevant
     property on the given slicing algorithm"""
     if self.getProperty(Prop.NUMBER_OF_SLICES).isDefault:
         return
     number_of_slices = self.getProperty(Prop.NUMBER_OF_SLICES).value
     run=AnalysisDataService.retrieve(workspace_name).run()
     total_duration = (run.endTime() - run.startTime()).total_seconds()
     slice_duration = total_duration / number_of_slices
     alg.setProperty("TimeInterval", slice_duration)
     self.log().information('Slicing ' + workspace_name + ' into ' + str(number_of_slices)
                            + ' even slices of duration ' + str(slice_duration))
Exemplo n.º 59
0
def _removeWorkspace(workspace_name):
    """Remove the workspace with the given name, including any child workspaces if it
    is a group. If a corresponding monitors workspace exists, remove that too."""
    if AnalysisDataService.doesExist(workspace_name):
        workspace = AnalysisDataService.retrieve(workspace_name)
        if isinstance(workspace, WorkspaceGroup):
            # Remove child workspaces first
            while workspace.getNumberOfEntries():
                _removeWorkspace(workspace[0].name())
        AnalysisDataService.remove(workspace_name)
    # If a corresponding monitors workspace also exists, remove that too
    if AnalysisDataService.doesExist(_monitorWorkspace(workspace_name)):
        _removeWorkspace(_monitorWorkspace(workspace_name))
Exemplo n.º 60
0
    def clear_fit_result_lines(self):
        """
        Delete the fit curves.
        """
        for line in self.get_lines():
            if line.get_label() == self.sequential_fit_line:
                line.remove()

        if self.fit_result_ws_name:
            ws = AnalysisDataService.retrieve(self.fit_result_ws_name)
            self.get_axes().remove_workspace_artists(ws)
            self.fit_result_ws_name = ""
        self.update_legend()