示例#1
0
    def test_removing_from_ads_calls_any_change_handle(self):
        CreateSampleWorkspace(OutputWorkspace="ws1")

        self.project.anyChangeHandle = mock.MagicMock()
        ADS.remove("ws1")

        self.assertEqual(1, self.project.anyChangeHandle.call_count)
    def tearDown(self):
        ADS.clear()
        if os.path.exists(self.pr.recovery_directory_hostname):
            shutil.rmtree(self.pr.recovery_directory_hostname)

        if os.path.exists(self.working_directory):
            shutil.rmtree(self.working_directory)
示例#3
0
    def test_saveGSS(self):
        """ Test to Save a GSAS file to match V-drive
        """
        # Create a test data file and workspace
        binfilename = "testbin.dat"
        self._createBinFile(binfilename)

        datawsname = "TestInputWorkspace"
        self._createDataWorkspace(datawsname)

        # Execute
        alg_test = run_algorithm("SaveVulcanGSS", 
                InputWorkspace = datawsname,
                BinFilename = binfilename,
                OutputWorkspace = datawsname+"_rebinned",
                GSSFilename = "tempout.gda")

        self.assertTrue(alg_test.isExecuted())

        # Verify ....
        outputws = AnalysisDataService.retrieve(datawsname+"_rebinned")
        #self.assertEqual(4, tablews.rowCount())

        # Delete the test hkl file
        os.remove(binfilename)
        AnalysisDataService.remove("InputWorkspace")
        AnalysisDataService.remove(datawsname+"_rebinned")

        return
示例#4
0
    def test_LoadHKLFile(self):
        """ Test to load a .hkl file
        """
        # 1. Create a test file
        hklfilename = "test.hkl"
        self._createHKLFile(hklfilename)

        # 2.
        alg_test = run_algorithm("LoadFullprofFile", Filename = hklfilename,
                OutputWorkspace = "Foo", PeakParameterWorkspace = "PeakParameterTable")

        self.assertTrue(alg_test.isExecuted())

        # 3. Verify some values
        tablews = AnalysisDataService.retrieve("PeakParameterTable")
        self.assertEqual(4, tablews.rowCount())

        #   alpha of (11 5 1)/Row 0
        self.assertEqual(0.34252, tablews.cell(0, 3))

        # 4. Delete the test hkl file
        os.remove(hklfilename)
        AnalysisDataService.remove("PeakParameterTable")
        AnalysisDataService.remove("Foo")

        return
    def get_weighted_peak_centres(self):
        """ Get the peak centers found in peak workspace.
        Guarantees: the peak centers and its weight (detector counts) are exported
        :return: 2-tuple: list of 3-tuple (Qx, Qy, Qz)
                          list of double (Det_Counts)
        """
        # get PeaksWorkspace
        if AnalysisDataService.doesExist(self._myPeakWorkspaceName) is False:
            raise RuntimeError('PeaksWorkspace %s does ot exit.' % self._myPeakWorkspaceName)

        peak_ws = AnalysisDataService.retrieve(self._myPeakWorkspaceName)

        # get peak center, peak intensity and etc.
        peak_center_list = list()
        peak_intensity_list = list()
        num_peaks = peak_ws.getNumberPeaks()
        for i_peak in xrange(num_peaks):
            peak_i = peak_ws.getPeak(i_peak)
            center_i = peak_i.getQSampleFrame()
            intensity_i = peak_i.getIntensity()
            peak_center_list.append((center_i.X(), center_i.Y(), center_i.Z()))
            peak_intensity_list.append(intensity_i)
        # END-FOR

        return peak_center_list, peak_intensity_list
    def test_that_can_find_can_reduction_if_it_exists(self):
        # Arrange
        test_director = TestDirector()
        state = test_director.construct()
        tagged_workspace_names = {None: "test_ws",
                                  OutputParts.Count: "test_ws_count",
                                  OutputParts.Norm: "test_ws_norm"}
        SANSFunctionsTest._prepare_workspaces(number_of_workspaces=4,
                                              tagged_workspace_names=tagged_workspace_names,
                                              state=state,
                                              reduction_mode=ISISReductionMode.LAB)
        # Act
        workspace, workspace_count, workspace_norm = get_reduced_can_workspace_from_ads(state, output_parts=True,
                                                                              reduction_mode=ISISReductionMode.LAB)  # noqa

        # Assert
        self.assertTrue(workspace is not None)
        self.assertTrue(workspace.name() == AnalysisDataService.retrieve("test_ws").name())
        self.assertTrue(workspace_count is not None)
        self.assertTrue(workspace_count.name() == AnalysisDataService.retrieve("test_ws_count").name())
        self.assertTrue(workspace_norm is not None)
        self.assertTrue(workspace_norm.name() == AnalysisDataService.retrieve("test_ws_norm").name())

        # Clean up
        SANSFunctionsTest._remove_workspaces()
示例#7
0
    def test_that_can_load_isis_nexus_file_with_event_data_and_multi_period(self):
        # Arrange
        state = SANSLoadTest._get_simple_state(sample_scatter="LARMOR00013065.nxs",
                                               calibration="80tubeCalibration_18-04-2016_r9330-9335.nxs")

        # Act
        output_workspace_names = {"SampleScatterWorkspace": "sample_scatter",
                                  "SampleScatterMonitorWorkspace": "sample_monitor_scatter"}
        load_alg = self._run_load(state, publish_to_cache=True, use_cached=True, move_workspace=False,
                                  output_workspace_names=output_workspace_names)

        # Assert
        expected_number_of_workspaces = [4, 0, 0, 0, 0, 0]
        expected_number_on_ads = 1
        workspace_type = [EventWorkspace, None, None, None, None, None]
        self._do_test_output(load_alg, expected_number_of_workspaces, expected_number_on_ads, workspace_type)

        # Check that calibration is added
        self.assertTrue(SANSLoadTest._has_calibration_been_applied(load_alg))

        # Confirm that the ADS workspace contains the calibration file
        try:
            AnalysisDataService.retrieve("80tubeCalibration_18-04-2016_r9330-9335")
            on_ads = True
        except RuntimeError:
            on_ads = False
        self.assertTrue(on_ads)

        # Cleanup
        remove_all_workspaces_from_ads()
 def _run_createws(self, wsname):
     """
         Run create workspace storing the output in the named workspace
     """
     data = [1.0,2.0,3.0]
     alg = run_algorithm('CreateWorkspace',DataX=data,DataY=data,NSpec=1,UnitX='Wavelength', child=True)
     AnalysisDataService.addOrReplace(wsname, alg.getProperty("OutputWorkspace").value)
示例#9
0
    def edit_matrix_workspace(sq_name, scale_factor, shift, edited_sq_name=None):
        """
        Edit the matrix workspace of S(Q) by scaling and shift
        :param sq_name: name of the SofQ workspace
        :param scale_factor:
        :param shift:
        :param edited_sq_name: workspace for the edited S(Q)
        :return:
        """
        # get the workspace
        if AnalysisDataService.doesExist(sq_name) is False:
            raise RuntimeError('S(Q) workspace {0} cannot be found in ADS.'.format(sq_name))

        if edited_sq_name is not None:
            simpleapi.CloneWorkspace(InputWorkspace=sq_name, OutputWorkspace=edited_sq_name)
            sq_ws = AnalysisDataService.retrieve(edited_sq_name)
        else:
            sq_ws = AnalysisDataService.retrieve(sq_name)

        # get the vector of Y
        sq_ws = sq_ws * scale_factor
        sq_ws = sq_ws + shift
        if sq_ws.name() != edited_sq_name:
            simpleapi.DeleteWorkspace(Workspace=edited_sq_name)
            simpleapi.RenameWorkspace(InputWorkspace=sq_ws, OutputWorkspace=edited_sq_name)

        assert sq_ws is not None, 'S(Q) workspace cannot be None.'
        print('[DB...BAT] S(Q) workspace that is edit is {0}'.format(sq_ws))
 def cleanup(self):
     if AnalysisDataService.doesExist(self._input_wksp):
         DeleteWorkspace(self._input_wksp)
     if AnalysisDataService.doesExist(self._output_wksp):
         DeleteWorkspace(self._output_wksp)
     if AnalysisDataService.doesExist(self._correction_wksp):
         DeleteWorkspace(self._correction_wksp)
示例#11
0
 def test_setTitle(self):        
     run_algorithm('CreateWorkspace', OutputWorkspace='ws1',DataX=[1.,2.,3.], DataY=[2.,3.], DataE=[2.,3.],UnitX='TOF')
     ws1 = AnalysisDataService['ws1']
     title = 'test_title'
     ws1.setTitle(title)
     self.assertEquals(title, ws1.getTitle())
     AnalysisDataService.remove(ws1.getName())
 def test_add_raises_error_if_name_exists(self):
     data = [1.0,2.0,3.0]
     alg = run_algorithm('CreateWorkspace',DataX=data,DataY=data,NSpec=1,UnitX='Wavelength', child=True)
     name = "testws"
     ws = alg.getProperty("OutputWorkspace").value
     AnalysisDataService.addOrReplace(name, ws)
     self.assertRaises(RuntimeError, AnalysisDataService.add, name, ws)
示例#13
0
    def test_batch_reduction_on_multiperiod_file(self):
        # Arrange
        # Build the data information
        data_builder = get_data_builder(SANSFacility.ISIS)
        data_builder.set_sample_scatter("SANS2D0005512")

        data_info = data_builder.build()

        # Get the rest of the state from the user file
        user_file_director = StateDirectorISIS(data_info)
        user_file_director.set_user_file("MASKSANS2Doptions.091A")
        # Set the reduction mode to LAB
        user_file_director.set_reduction_builder_reduction_mode(ISISReductionMode.LAB)
        state = user_file_director.construct()

        # Act
        states = [state]
        self._run_batch_reduction(states, use_optimizations=False)

        # Assert
        # We only assert that the expected workspaces exist on the ADS
        expected_workspaces = ["5512p1rear_1D_2.0_14.0Phi-45.0_45.0", "5512p2rear_1D_2.0_14.0Phi-45.0_45.0",
                               "5512p3rear_1D_2.0_14.0Phi-45.0_45.0", "5512p4rear_1D_2.0_14.0Phi-45.0_45.0",
                               "5512p5rear_1D_2.0_14.0Phi-45.0_45.0", "5512p6rear_1D_2.0_14.0Phi-45.0_45.0",
                               "5512p7rear_1D_2.0_14.0Phi-45.0_45.0", "5512p8rear_1D_2.0_14.0Phi-45.0_45.0",
                               "5512p9rear_1D_2.0_14.0Phi-45.0_45.0", "5512p10rear_1D_2.0_14.0Phi-45.0_45.0",
                               "5512p11rear_1D_2.0_14.0Phi-45.0_45.0", "5512p12rear_1D_2.0_14.0Phi-45.0_45.0",
                               "5512p13rear_1D_2.0_14.0Phi-45.0_45.0"]
        for element in expected_workspaces:
            self.assertTrue(AnalysisDataService.doesExist(element))

        # Clean up
        for element in expected_workspaces:
            AnalysisDataService.remove(element)
 def setUp(self):
     self.working_directory = tempfile.mkdtemp()
     self.ws1_name = "ws1"
     self.project_ext = ".mtdproj"
     ADS.addOrReplace(self.ws1_name, CreateSampleWorkspace(OutputWorkspace=self.ws1_name))
     project_saver = projectsaver.ProjectSaver(self.project_ext)
     project_saver.save_project(workspace_to_save=[self.ws1_name], directory=self.working_directory)
    def recover_selected_checkpoint(self, selected):
        """
        Recover the passed checkpoint
        :param selected: String; Checkpoint name to be recovered
        """
        # If this is a valid file then it should only be the checkpoint here
        if os.path.exists(selected):
            selected = os.path.basename(selected)

        self.is_recovery_running = True
        self.presenter.change_start_mantid_to_cancel_label()

        ADS.clear()

        # Recover given the checkpoint selected
        pid_dir = self.project_recovery.get_pid_folder_to_load_a_checkpoint_from()
        selected = selected.replace(" ", "T")
        checkpoint = os.path.join(pid_dir, selected)
        self.selected_checkpoint = selected

        try:
            self._start_recovery_of_checkpoint(checkpoint)
        except Exception as e:
            # Fail "Silently" by setting failed run to true, setting checkpoint to tried and closing the view.
            logger.debug("Project Recovery: " + str(e))
            self.has_failed_run = True
            self._update_checkpoint_tried(selected)
            self.presenter.close_view()
 def test_len_increases_when_item_added(self):
     wsname = 'ADSTest_test_len_increases_when_item_added'
     current_len = len(AnalysisDataService)
     self._run_createws(wsname)
     self.assertEquals(len(AnalysisDataService), current_len + 1)
     # Remove to clean the test up
     AnalysisDataService.remove(wsname)
    def open_selected_in_editor(self, selected):
        """
        Open the passed checkpoint in the editor
        :param selected: String; Checkpoint name to be opened
        """
        self.is_recovery_running = True
        ADS.clear()

        # Open editor for this checkpoint
        pid_dir = self.project_recovery.get_pid_folder_to_load_a_checkpoint_from()
        selected = selected.replace(" ", "T")
        checkpoint = os.path.join(pid_dir, selected)

        try:
            self.project_recovery.open_checkpoint_in_script_editor(checkpoint)
        except Exception as e:
            if isinstance(e, KeyboardInterrupt):
                raise
            # Fail "silently"
            self.has_failed_run = True

        if self.has_failed_run:
            self._update_checkpoint_tried(selected)

        self.is_recovery_running = False
        self.presenter.close_view()
示例#18
0
    def retrieve_hkl_from_spice_table(self):
        """ Get averaged HKL from SPICE table
        HKL will be averaged from SPICE table by assuming the value in SPICE might be right
        :return:
        """
        # get SPICE table
        spice_table_name = get_spice_table_name(self._myExpNumber, self._myScanNumber)
        assert AnalysisDataService.doesExist(spice_table_name), 'Spice table for exp %d scan %d cannot be found.' \
                                                                '' % (self._myExpNumber, self._myScanNumber)

        spice_table_ws = AnalysisDataService.retrieve(spice_table_name)

        # get HKL column indexes
        h_col_index = spice_table_ws.getColumnNames().index('h')
        k_col_index = spice_table_ws.getColumnNames().index('k')
        l_col_index = spice_table_ws.getColumnNames().index('l')

        # scan each Pt.
        hkl = numpy.array([0., 0., 0.])

        num_rows = spice_table_ws.rowCount()
        for row_index in xrange(num_rows):
            mi_h = spice_table_ws.cell(row_index, h_col_index)
            mi_k = spice_table_ws.cell(row_index, k_col_index)
            mi_l = spice_table_ws.cell(row_index, l_col_index)
            hkl += numpy.array([mi_h, mi_k, mi_l])
        # END-FOR

        self._spiceHKL = hkl/num_rows

        return
    def test_DNSFRSelfCorrection(self):
        outputWorkspaceName = "DNSFlippingRatioCorrTest_Test4"
        # consider normalization=1.0 as set in self._create_fake_workspace
        dataws_sf = self.__sf_nicrws - self.__sf_bkgrws
        dataws_nsf = self.__nsf_nicrws - self.__nsf_bkgrws
        alg_test = run_algorithm("DNSFlippingRatioCorr", SFDataWorkspace=dataws_sf,
                                 NSFDataWorkspace=dataws_nsf, SFNiCrWorkspace=self.__sf_nicrws.getName(),
                                 NSFNiCrWorkspace=self.__nsf_nicrws.getName(), SFBkgrWorkspace=self.__sf_bkgrws.getName(),
                                 NSFBkgrWorkspace=self.__nsf_bkgrws.getName(), SFOutputWorkspace=outputWorkspaceName+'SF',
                                 NSFOutputWorkspace=outputWorkspaceName+'NSF')

        self.assertTrue(alg_test.isExecuted())
        # check whether the data are correct
        ws_sf = AnalysisDataService.retrieve(outputWorkspaceName + 'SF')
        ws_nsf = AnalysisDataService.retrieve(outputWorkspaceName + 'NSF')
        # dimensions
        self.assertEqual(24, ws_sf.getNumberHistograms())
        self.assertEqual(24, ws_nsf.getNumberHistograms())
        self.assertEqual(2,  ws_sf.getNumDims())
        self.assertEqual(2,  ws_nsf.getNumDims())
        # data array: spin-flip must be zero
        for i in range(24):
            self.assertAlmostEqual(0.0, ws_sf.readY(i)[0])
        # data array: non spin-flip must be nsf - sf^2/nsf
        nsf = np.array(dataws_nsf.extractY())
        sf = np.array(dataws_sf.extractY())
        refdata = nsf + sf
        for i in range(24):
            self.assertAlmostEqual(refdata[i][0], ws_nsf.readY(i)[0])

        run_algorithm("DeleteWorkspace", Workspace=outputWorkspaceName + 'SF')
        run_algorithm("DeleteWorkspace", Workspace=outputWorkspaceName + 'NSF')
        run_algorithm("DeleteWorkspace", Workspace=dataws_sf)
        run_algorithm("DeleteWorkspace", Workspace=dataws_nsf)
        return
示例#20
0
    def test_exportFileNew(self):
        """ Test to export logs without header file
        """
        # Generate the matrix workspace with some logs
        ws = self.createTestWorkspace()
        AnalysisDataService.addOrReplace("TestMatrixWS", ws)

        # Test algorithm
        alg_test = run_algorithm("ExportExperimentLog",
            InputWorkspace = "TestMatrixWS",
            OutputFilename = "TestRecord001.txt",
            SampleLogNames = ["run_number", "duration", "proton_charge", "proton_charge", "proton_charge"],
            SampleLogTitles = ["RUN", "Duration", "ProtonCharge", "MinPCharge", "MeanPCharge"],
            SampleLogOperation = [None, None, "sum", "min", "average"],
            FileMode = "new")

        # Validate
        self.assertTrue(alg_test.isExecuted())

        # Locate file
        outfilename = alg_test.getProperty("OutputFilename").value
        try:
            ifile = open(outfilename)
            lines = ifile.readlines()
            ifile.close()
        except IOError as err:
            print("Unable to open file {0}.".format(outfilename))
            self.assertTrue(False)
            return

        # Last line cannot be empty, i.e., before EOF '\n' is not allowed
        lastline = lines[-1]
        self.assertTrue(len(lastline.strip()) > 0)

        # Number of lines
        self.assertEquals(len(lines), 2)

        # Check line
        firstdataline = lines[1]
        terms = firstdataline.strip().split("\t")
        self.assertEquals(len(terms), 5)

        # Get property
        pchargelog = ws.getRun().getProperty("proton_charge").value
        sumpcharge = numpy.sum(pchargelog)
        minpcharge = numpy.min(pchargelog)
        avgpcharge = numpy.average(pchargelog)

        v2 = float(terms[2])
        self.assertAlmostEqual(sumpcharge, v2)
        v3 = float(terms[3])
        self.assertAlmostEqual(minpcharge, v3)
        v4 = float(terms[4])
        self.assertAlmostEqual(avgpcharge, v4)

        # Remove generated files
        os.remove(outfilename)
        AnalysisDataService.remove("TestMatrixWS")

        return
示例#21
0
 def _createTwoCurves(self, datawsname):
     """ Create data workspace
     """
     E = np.arange(-50, 50, 1.0)
     # curve 1
     I = 1000 * np.exp(-E**2/10**2)
     err = I ** .5
     # curve 2
     I2 = 1000 * (1+np.sin(E/5*np.pi))
     err2 = I ** .5
     # workspace
     ws = WorkspaceFactory.create(
         "Workspace2D", NVectors=2,
         XLength = E.size, YLength = I.size
         )
     # curve1
     ws.dataX(0)[:] = E
     ws.dataY(0)[:] = I
     ws.dataE(0)[:] = err
     # curve2
     ws.dataX(1)[:] = E
     ws.dataY(1)[:] = I2
     ws.dataE(1)[:] = err2
     # Add to data service
     AnalysisDataService.addOrReplace(datawsname, ws)
     return E, I, err, I2, err2
示例#22
0
    def _createDataWorkspace(self, datawsname):
        """ Create data workspace
        """
        import math

        tof0 = 4900.
        delta = 0.001
        numpts = 200

        vecx = []
        vecy = []
        vece = []

        tof = tof0
        for n in range(numpts):
            vecx.append(tof)
            vecy.append(math.sin(tof0))
            vece.append(1.)

            tof = tof * (1+delta)
        # ENDFOR
        vecx.append(tof)

        dataws = api.CreateWorkspace(DataX = vecx, DataY = vecy, DataE = vece, NSpec = 1, 
                UnitX = "TOF")

        # Add to data service
        AnalysisDataService.addOrReplace(datawsname, dataws)

        return dataws
示例#23
0
    def testConvertUnits(self):
        # test whether CorrectTof+ConvertUnits+ConvertToDistribution will give the same result as TOFTOFConvertTOFToDeltaE
        OutputWorkspaceName = "outputws1"
        alg_test = run_algorithm("CorrectTOF", InputWorkspace=self._input_ws, EPPTable=self._table, OutputWorkspace=OutputWorkspaceName)
        self.assertTrue(alg_test.isExecuted())
        wscorr = AnalysisDataService.retrieve(OutputWorkspaceName)

        # convert units, convert to distribution
        alg_cu = run_algorithm("ConvertUnits", InputWorkspace=wscorr, Target='DeltaE', EMode='Direct', EFixed=2.27, OutputWorkspace=OutputWorkspaceName+'_dE')
        ws_dE = AnalysisDataService.retrieve(OutputWorkspaceName+'_dE')
        alg_cd = run_algorithm("ConvertToDistribution", Workspace=ws_dE)

        # create reference data for X axis
        tof1 = 2123.33867005
        dataX =  self._input_ws.readX(0) - tof1
        tel = 8189.5 - tof1
        factor = m_n*1e+15/eV
        newX = 0.5*factor*16.0*(1/tel**2 - 1/dataX**2)
        # compare
        # self.assertEqual(newX[0], ws_dE.readX(0)[0])
        self.assertTrue(np.allclose(newX, ws_dE.readX(0), atol=0.01))

        # create reference data for Y axis and compare to the output
        tof = dataX[:-1] + 5.25
        newY = self._input_ws.readY(0)*tof**3/(factor*10.5*16.0)
        # compare
        self.assertTrue(np.allclose(newY, ws_dE.readY(0), rtol=0.01))

        run_algorithm("DeleteWorkspace", Workspace=ws_dE)
        run_algorithm("DeleteWorkspace", Workspace=wscorr)
示例#24
0
    def test_LoadPRFFile(self):
        """ Test to load a .prf file
        """
        # 1. Create  test .prf file
        prffilename = "test.prf"
        self._createPrfFile(prffilename)

        # 2. Execute the algorithm
        alg_test = run_algorithm("LoadFullprofFile",
                Filename = prffilename,
                OutputWorkspace = "Data",
                PeakParameterWorkspace = "Info")

        self.assertTrue(alg_test.isExecuted())

        # 3. Check data
        dataws = AnalysisDataService.retrieve("Data")
        self.assertEqual(dataws.getNumberHistograms(), 4)
        self.assertEqual(len(dataws.readX(0)), 36)

        #    value
        self.assertEqual(dataws.readX(0)[13], 5026.3223)
        self.assertEqual(dataws.readY(1)[30], 0.3819)

        # 4. Clean
        os.remove(prffilename)
        AnalysisDataService.remove("Data")
        AnalysisDataService.remove("Info")


        return
    def test_exportFileAppend(self):
        """ Test to export logs without header file
        """
        # Generate the matrix workspace with some logs
        ws = self.createTestWorkspace()
        AnalysisDataService.addOrReplace("TestMatrixWS", ws)

        # Test algorithm
        # create new file
        alg_test = run_algorithm("ExportExperimentLog", 
            InputWorkspace = "TestMatrixWS",
            OutputFilename = "TestRecord.txt",
            SampleLogNames = ["run_number", "duration", "proton_charge"],
            SampleLogTitles = ["RUN", "Duration", "ProtonCharge"],
            SampleLogOperation = [None, None, "sum"],
            FileMode = "new")     
      
        # append
        alg_test = run_algorithm("ExportExperimentLog", 
            InputWorkspace = "TestMatrixWS",
            OutputFilename = "TestRecord.txt",
            SampleLogNames = ["run_number", "duration", "proton_charge"],
            SampleLogTitles = ["RUN", "Duration", "ProtonCharge"],
            SampleLogOperation = [None, None, "sum"],
            FileMode = "fastappend")

        # Validate
        self.assertTrue(alg_test.isExecuted())

        # Locate file
        outfilename = alg_test.getProperty("OutputFilename").value
        try:
            print "Output file is %s. " % (outfilename)
            ifile = open(outfilename)
            lines = ifile.readlines()
            ifile.close()
        except IOError as err:
            print "Unable to open file %s. " % (outfilename)
            self.assertTrue(False)
            return
            
        # Last line cannot be empty, i.e., before EOF '\n' is not allowed
        lastline = lines[-1]
        self.assertTrue(len(lastline.strip()) > 0)
        
        # Number of lines
        self.assertEquals(len(lines), 3)

        # Check line
        firstdataline = lines[1]
        terms = firstdataline.strip().split("\t")
        self.assertEquals(len(terms), 3)

        # 
        # # Remove generated files        
        os.remove(outfilename)
        AnalysisDataService.remove("TestMatrixWS")
        
        return
示例#26
0
    def _determine_factors(self, q_high_angle, q_low_angle, mode, scale, shift):

        # We need to make suret that the fitting only occurs in the y direction
        constant_x_shift_and_scale = ', f0.Shift=0.0, f0.XScaling=1.0'

        # Determine the StartQ and EndQ values
        q_min, q_max = self._get_start_q_and_end_q_values(rear_data=q_low_angle, front_data=q_high_angle)

        # We need to transfer the errors from the front data to the rear data, as we are using the the front data as a model, but
        # we want to take into account the errors of both workspaces.
        error_correction = ErrorTransferFromModelToData()
        front_data_corrected, rear_data_corrected = error_correction.get_error_corrected(rear_data=q_low_angle,
                                                                                         front_data=q_high_angle,
                                                                                         q_min=q_min, q_max=q_max)

        fit = self.createChildAlgorithm('Fit')

        # We currently have to put the front_data into the ADS so that the TabulatedFunction has access to it
        front_data_corrected = AnalysisDataService.addOrReplace('front_data_corrected', front_data_corrected)
        front_in_ads = AnalysisDataService.retrieve('front_data_corrected')

        function = 'name=TabulatedFunction, Workspace="' + str(
            front_in_ads.name()) + '"' + ";name=FlatBackground"

        fit.setProperty('Function', function)
        fit.setProperty('InputWorkspace', rear_data_corrected)

        constant_x_shift_and_scale = 'f0.Shift=0.0, f0.XScaling=1.0'
        if mode == Mode.BothFit:
            fit.setProperty('Ties', constant_x_shift_and_scale)
        elif mode == Mode.ShiftOnly:
            fit.setProperty('Ties', 'f0.Scaling=' + str(scale) + ',' + constant_x_shift_and_scale)
        elif mode == Mode.ScaleOnly:
            fit.setProperty('Ties', 'f1.A0=' + str(shift) + '*f0.Scaling,' + constant_x_shift_and_scale)
        else:
            raise RuntimeError('Unknown fitting mode requested.')

        fit.setProperty('StartX', q_min)
        fit.setProperty('EndX', q_max)
        fit.setProperty('CreateOutput', True)
        fit.execute()
        param = fit.getProperty('OutputParameters').value
        AnalysisDataService.remove(front_in_ads.name())

        # The outparameters are:
        # 1. Scaling in y direction
        # 2. Shift in x direction
        # 3. Scaling in x direction
        # 4. Shift in y direction

        scale = param.row(0)['Value']

        if scale == 0.0:
            raise RuntimeError('Fit scaling as part of stitching evaluated to zero')

        # In order to determine the shift, we need to remove the scale factor
        shift = param.row(3)['Value'] / scale

        return (shift, scale)
    def test_observeClear_calls_clearHandle_when_set_on_ads_its_cleared(self):
        CreateSampleWorkspace(OutputWorkspace="ws")

        self.fake_class.observeClear(True)
        self.fake_class.clearHandle = mock.MagicMock()
        ADS.clear()

        self.assertEqual(self.fake_class.clearHandle.call_count, 1)
 def tearDown(self):
     self.cleanup_names.append(self.wsname)
     for name in self.cleanup_names:
         try:
             AnalysisDataService.remove(name)
         except KeyError:
             pass
     self.cleanup_names = []
示例#29
0
def get_ads_workspace_references():
    """
    Gets a list of handles of available workspaces on the ADS

    @return: the workspaces on the ADS.
    """
    for workspace_name in AnalysisDataService.getObjectNames():
        yield AnalysisDataService.retrieve(workspace_name)
示例#30
0
 def populate_combobox(self, combo):
     ws_list = AnalysisDataService.getObjectNames()
     for ws in ws_list:
         ws_object = AnalysisDataService.retrieve(ws)
         if not ws.startswith("__") and combo.findText(ws)<0\
          and hasattr(ws_object, "getNumberHistograms")\
          and  ws_object.getNumberHistograms()==1:
             combo.addItem(ws)
示例#31
0
 def _assert_list_in_ADS(self, workspace_name_list):
     ads_list = AnalysisDataService.getObjectNames()
     for item in workspace_name_list:
         self.assertTrue(item in ads_list)
示例#32
0
 def tearDownClass(cls):
     AnalysisDataService.clear()
示例#33
0
 def _get_project_size(workspace_names):
     project_size = 0
     for name in workspace_names:
         project_size += AnalysisDataService.retrieve(name).getMemorySize()
     return project_size
示例#34
0
def fit_tof_iteration(sample_data, container_data, runs, flags):
    """
    Performs a single iterations of the time of flight corrections and fitting
    workflow.

    :param sample_data: Loaded sample data workspaces
    :param container_data: Loaded container data workspaces
    :param runs: A string specifying the runs to process
    :param flags: A dictionary of flags to control the processing
    :return: Tuple of (workspace group name, pre correction fit parameters,
             final fit parameters, chi^2 values)
    """
    # Transform inputs into something the algorithm can understand
    if isinstance(flags['masses'][0], list):
        mass_values = _create_profile_strs_and_mass_list(
            copy.deepcopy(flags['masses'][0]))[0]
        profiles_strs = []
        for mass_spec in flags['masses']:
            profiles_strs.append(
                _create_profile_strs_and_mass_list(mass_spec)[1])
    else:
        mass_values, profiles_strs = _create_profile_strs_and_mass_list(
            flags['masses'])
    background_str = _create_background_str(flags.get('background', None))
    intensity_constraints = _create_intensity_constraint_str(
        flags['intensity_constraints'])
    ties = _create_user_defined_ties_str(flags['masses'])

    num_spec = sample_data.getNumberHistograms()
    pre_correct_pars_workspace = None
    pars_workspace = None
    max_fit_iterations = flags.get('max_fit_iterations', 5000)

    output_groups = []
    chi2_values = []
    for index in range(num_spec):
        if isinstance(profiles_strs, list):
            profiles = profiles_strs[index]
        else:
            profiles = profiles_strs

        suffix = _create_fit_workspace_suffix(index, sample_data,
                                              flags['fit_mode'],
                                              flags['spectra'],
                                              flags.get('iteration', None))

        # Corrections
        corrections_args = dict()

        # Need to do a fit first to obtain the parameter table
        pre_correction_pars_name = runs + "_params_pre_correction" + suffix
        corrections_fit_name = "__vesuvio_corrections_fit"
        ms.VesuvioTOFFit(InputWorkspace=sample_data,
                         WorkspaceIndex=index,
                         Masses=mass_values,
                         MassProfiles=profiles,
                         Background=background_str,
                         IntensityConstraints=intensity_constraints,
                         Ties=ties,
                         OutputWorkspace=corrections_fit_name,
                         FitParameters=pre_correction_pars_name,
                         MaxIterations=max_fit_iterations,
                         Minimizer=flags['fit_minimizer'])
        ms.DeleteWorkspace(corrections_fit_name)
        corrections_args['FitParameters'] = pre_correction_pars_name

        # Add the mutiple scattering arguments
        corrections_args.update(flags['ms_flags'])

        corrected_data_name = runs + "_tof_corrected" + suffix
        linear_correction_fit_params_name = runs + "_correction_fit_scale" + suffix

        if flags.get('output_verbose_corrections', False):
            corrections_args[
                "CorrectionWorkspaces"] = runs + "_correction" + suffix
            corrections_args[
                "CorrectedWorkspaces"] = runs + "_corrected" + suffix

        if container_data is not None:
            corrections_args["ContainerWorkspace"] = container_data

        ms.VesuvioCorrections(
            InputWorkspace=sample_data,
            OutputWorkspace=corrected_data_name,
            LinearFitResult=linear_correction_fit_params_name,
            WorkspaceIndex=index,
            GammaBackground=flags.get('gamma_correct', False),
            Masses=mass_values,
            MassProfiles=profiles,
            IntensityConstraints=intensity_constraints,
            MultipleScattering=True,
            GammaBackgroundScale=flags.get('fixed_gamma_scaling', 0.0),
            ContainerScale=flags.get('fixed_container_scaling', 0.0),
            **corrections_args)

        # Final fit
        fit_ws_name = runs + "_data" + suffix
        pars_name = runs + "_params" + suffix
        fit_result = ms.VesuvioTOFFit(
            InputWorkspace=corrected_data_name,
            WorkspaceIndex=0,
            Masses=mass_values,
            MassProfiles=profiles,
            Background=background_str,
            IntensityConstraints=intensity_constraints,
            Ties=ties,
            OutputWorkspace=fit_ws_name,
            FitParameters=pars_name,
            MaxIterations=max_fit_iterations,
            Minimizer=flags['fit_minimizer'])
        chi2_values.append(fit_result[-1])
        ms.DeleteWorkspace(corrected_data_name)

        # Process parameter tables
        if pre_correct_pars_workspace is None:
            pre_correct_pars_workspace = _create_param_workspace(
                num_spec, mtd[pre_correction_pars_name])

        if pars_workspace is None:
            pars_workspace = _create_param_workspace(num_spec, mtd[pars_name])

        spec_num_str = str(sample_data.getSpectrum(index).getSpectrumNo())
        current_spec = 'spectrum_' + spec_num_str

        _update_fit_params(pre_correct_pars_workspace, index,
                           mtd[pre_correction_pars_name], current_spec)
        _update_fit_params(pars_workspace, index, mtd[pars_name], current_spec)

        ms.DeleteWorkspace(pre_correction_pars_name)
        ms.DeleteWorkspace(pars_name)

        # Process spectrum group
        # Note the ordering of operations here gives the order in the WorkspaceGroup
        group_name = runs + suffix
        output_workspaces = [fit_ws_name, linear_correction_fit_params_name]
        if flags.get('output_verbose_corrections', False):
            output_workspaces += mtd[
                corrections_args["CorrectionWorkspaces"]].getNames()
            output_workspaces += mtd[
                corrections_args["CorrectedWorkspaces"]].getNames()
            ms.UnGroupWorkspace(corrections_args["CorrectionWorkspaces"])
            ms.UnGroupWorkspace(corrections_args["CorrectedWorkspaces"])

        output_groups.append(
            ms.GroupWorkspaces(InputWorkspaces=output_workspaces,
                               OutputWorkspace=group_name))

        # Output the parameter workspaces
        params_pre_corr = runs + "_params_pre_correction_iteration_" + str(
            flags['iteration'])
        params_name = runs + "_params_iteration_" + str(flags['iteration'])
        AnalysisDataService.Instance().addOrReplace(
            params_pre_corr, pre_correct_pars_workspace)
        AnalysisDataService.Instance().addOrReplace(params_name,
                                                    pars_workspace)

    if len(output_groups) > 1:
        result_ws = output_groups
    else:
        result_ws = output_groups[0]

    return (result_ws, pre_correct_pars_workspace, pars_workspace, chi2_values)
示例#35
0
    def test_project_loading(self):
        project_loader = projectloader.ProjectLoader(project_file_ext)

        self.assertTrue(project_loader.load_project(working_project_file))

        self.assertEqual(ADS.getObjectNames(), ["ws1"])
 def tearDown(self):
     AnalysisDataService.Instance().clear()
示例#37
0
 def test_pcolormesh_from_names(self):
     ws_name = 'test_pcolormesh_from_names-1'
     AnalysisDataService.Instance().addOrReplace(ws_name, self._test_ws)
     fig = pcolormesh_from_names([ws_name])
     self.assertEqual(1, len(fig.gca().images))
示例#38
0
 def test_pcolormesh_from_names_calls_pcolormesh(self, pcolormesh_mock):
     ws_name = 'test_pcolormesh_from_names_calls_pcolormesh-1'
     AnalysisDataService.Instance().addOrReplace(ws_name, self._test_ws)
     pcolormesh_from_names([ws_name])
     self.assertEqual(1, pcolormesh_mock.call_count)
示例#39
0
    def PyExec(self):
        fn = self.getPropertyValue("Filename")
        wsn = self.getPropertyValue("OutputWorkspace")
        monitor_workspace_name = self.getPropertyValue(
            "OutputMonitorWorkspace")
        if monitor_workspace_name == "":
            self.setPropertyValue("OutputMonitorWorkspace", wsn + '_Monitors')
        # print (fn, wsn)
        self.override_angle = self.getPropertyValue("AngleOverride")
        self.fxml = self.getPropertyValue("InstrumentXML")

        # load data

        parms_dict, det_udet, det_count, det_tbc, data = self.read_file(fn)
        nrows = int(parms_dict['NDET'])
        # nbins=int(parms_dict['NTC'])
        xdata = np.array(det_tbc)
        xdata_mon = np.linspace(xdata[0], xdata[-1], len(xdata))
        ydata = data.astype(np.float)
        ydata = ydata.reshape(nrows, -1)
        edata = np.sqrt(ydata)
        # CreateWorkspace(OutputWorkspace=wsn,DataX=xdata,DataY=ydata,DataE=edata,
        #                NSpec=nrows,UnitX='TOF',WorkspaceTitle='Data',YUnitLabel='Counts')
        nr, nc = ydata.shape
        ws = WorkspaceFactory.create("Workspace2D",
                                     NVectors=nr,
                                     XLength=nc + 1,
                                     YLength=nc)
        for i in range(nrows):
            ws.setX(i, xdata)
            ws.setY(i, ydata[i])
            ws.setE(i, edata[i])
        ws.getAxis(0).setUnit('tof')
        AnalysisDataService.addOrReplace(wsn, ws)

        # self.setProperty("OutputWorkspace", wsn)
        # print ("ws:", wsn)
        # ws=mtd[wsn]

        # fix the x values for the monitor
        for i in range(nrows - 2, nrows):
            ws.setX(i, xdata_mon)
        self.log().information("set detector IDs")
        # set detetector IDs
        for i in range(nrows):
            ws.getSpectrum(i).setDetectorID(det_udet[i])
        # Sample_logs the header values are written into the sample logs
        log_names = [
            str(sl.encode('ascii', 'ignore').decode())
            for sl in parms_dict.keys()
        ]
        log_values = [
            str(sl.encode('ascii', 'ignore').decode()) if isinstance(
                sl, UnicodeType) else str(sl) for sl in parms_dict.values()
        ]
        for i in range(len(log_values)):
            if ('nan' in log_values[i]) or ('NaN' in log_values[i]):
                log_values[i] = '-1.0'
        AddSampleLogMultiple(Workspace=wsn,
                             LogNames=log_names,
                             LogValues=log_values)
        SetGoniometer(Workspace=wsn, Goniometers='Universal')
        if (self.fxml == ""):
            LoadInstrument(Workspace=wsn,
                           InstrumentName="Exed",
                           RewriteSpectraMap=True)
        else:
            LoadInstrument(Workspace=wsn,
                           Filename=self.fxml,
                           RewriteSpectraMap=True)
        try:
            RotateInstrumentComponent(
                Workspace=wsn,
                ComponentName='Tank',
                Y=1,
                Angle=-float(parms_dict['phi'].encode('ascii', 'ignore')),
                RelativeRotation=False)
        except:
            self.log().warning(
                "The instrument does not contain a 'Tank' component. "
                "This means that you are using a custom XML instrument definition. "
                "OMEGA_MAG will be ignored.")
            self.log().warning(
                "Please make sure that the detector positions in the instrument definition are correct."
            )
        # Separate monitors into seperate workspace
        __temp_monitors = ExtractSpectra(
            InputWorkspace=wsn,
            WorkspaceIndexList=','.join(
                [str(s) for s in range(nrows - 2, nrows)]),
            OutputWorkspace=self.getPropertyValue("OutputMonitorWorkspace"))
        # ExtractSpectra(InputWorkspace = wsn, WorkspaceIndexList = ','.join([str(s) for s in range(nrows-2, nrows)]),
        # OutputWorkspace = wsn + '_Monitors')
        MaskDetectors(Workspace=wsn,
                      WorkspaceIndexList=','.join(
                          [str(s) for s in range(nrows - 2, nrows)]))
        RemoveMaskedSpectra(InputWorkspace=wsn, OutputWorkspace=wsn)

        self.setProperty("OutputWorkspace", wsn)
        self.setProperty("OutputMonitorWorkspace", __temp_monitors)
示例#40
0
 def tearDown(self):
     ADS.clear()
示例#41
0
def get_all_workspace_names(group_names):
    workspace_names = []
    for group_name in group_names:
        workspace_names = add_workspace_names(
            workspace_names, AnalysisDataService.retrieve(group_name))
    return workspace_names
 def tearDown(self):
     AnalysisDataService.clear()
示例#43
0
    def test_exportFileUTC(self):
        """ Test to export logs without header file
        """
        # Generate the matrix workspace with some logs
        ws = self.createTestWorkspace()
        AnalysisDataService.addOrReplace("TestMatrixWS", ws)

        # Test algorithm
        alg_test = run_algorithm("ExportExperimentLog",
            InputWorkspace = "TestMatrixWS",
            OutputFilename = "TestRecord001utc.txt",
            SampleLogNames = ["run_number", "duration", "run_start", "proton_charge", "proton_charge", "proton_charge"],
            SampleLogTitles = ["RUN", "Duration", "StartTime", "ProtonCharge", "MinPCharge", "MeanPCharge"],
            SampleLogOperation = [None, None, "time", "sum", "min", "average"],
            TimeZone = 'UTC',
            FileMode = "new")

        # Validate
        self.assertTrue(alg_test.isExecuted())

        # Locate file
        outfilename = alg_test.getProperty("OutputFilename").value
        try:
            ifile = open(outfilename)
            lines = ifile.readlines()
            ifile.close()
        except IOError as err:
            print("Unable to open file {0}.".format(outfilename))
            self.fail()
            return

        # Last line cannot be empty, i.e., before EOF '\n' is not allowed
        lastline = lines[-1]
        self.assertGreater(len(lastline.strip()), 0)

        # Number of lines
        self.assertEqual(len(lines), 2)

        # Check line
        firstdataline = lines[1]
        terms = firstdataline.strip().split("\t")
        self.assertEqual(len(terms), 6)

        # Get property
        runstarttime = ws.run().getProperty("run_start").value
        pchargelog = ws.getRun().getProperty("proton_charge").value
        sumpcharge = numpy.sum(pchargelog)
        minpcharge = numpy.min(pchargelog)
        avgpcharge = numpy.average(pchargelog)

        # run start time
        v2 = str(terms[2])
        self.assertEqual(runstarttime, v2.split("UTC")[0].strip())

        v3 = float(terms[3])
        self.assertAlmostEqual(sumpcharge, v3)

        v4 = float(terms[4])
        self.assertAlmostEqual(minpcharge, v4)

        v5 = float(terms[5])
        self.assertAlmostEqual(avgpcharge, v5)

        # Remove generated files
        os.remove(outfilename)

        AnalysisDataService.remove("TestMatrixWS")

        return
示例#44
0
 def test_plot_from_names_with_non_plottable_workspaces_returns_None(self):
     table = WorkspaceFactory.Instance().createTable()
     table_name = 'test_plot_from_names_with_non_plottable_workspaces_returns_None'
     AnalysisDataService.Instance().addOrReplace(table_name, table)
     result = plot_from_names([table_name], errors=False, overplot=False)
     self.assertEqual(result, None)
示例#45
0
 def tearDown(self):
     ADS.clear()
     if isdir(working_directory):
         rmtree(working_directory)
示例#46
0
 def test_that_manage_workspace_names_raises_on_mix_of_workspaces_and_names(
         self):
     ws = ["some_workspace", self._test_ws]
     AnalysisDataService.Instance().addOrReplace("some_workspace",
                                                 self._test_ws)
     self.assertRaises(TypeError, workspace_names_dummy_func(ws))
示例#47
0
    def test_exportFileAppend2(self):
        """ Test to export file in appending mode
        In this case, the original file will be renamed and a new file will
        be creatd
        """
        import datetime
        import time

        # Generate the matrix workspace with some logs
        ws = self.createTestWorkspace()
        AnalysisDataService.addOrReplace("TestMatrixWS", ws)

        # Test algorithm
        # create new file
        alg_test = run_algorithm("ExportExperimentLog",
            InputWorkspace = "TestMatrixWS",
            OutputFilename = "TestRecord.txt",
            SampleLogNames = ["run_number", "duration", "proton_charge"],
            SampleLogTitles = ["RUN", "Duration", "ProtonCharge"],
            SampleLogOperation = [None, None, "sum"],
            FileMode = "new")

        # append
        alg_test = run_algorithm("ExportExperimentLog",
            InputWorkspace = "TestMatrixWS",
            OutputFilename = "TestRecord.txt",
            SampleLogNames = ["run_number", "duration", "proton_charge", "SensorA"],
            SampleLogTitles = ["RUN", "Duration", "ProtonCharge", "SensorA"],
            SampleLogOperation = [None, None, "sum", "0"],
            FileMode = "append")

        # Validate
        self.assertTrue(alg_test.isExecuted())

        # Locate file
        outfilename = alg_test.getProperty("OutputFilename").value
        try:
            ifile = open(outfilename)
            lines = ifile.readlines()
            ifile.close()
        except IOError as err:
            print("Unable to open file {0}.".format(outfilename))
            self.fail()
            return

        # Last line cannot be empty, i.e., before EOF '\n' is not allowed
        lastline = lines[-1]
        self.assertGreater(len(lastline.strip()), 0)

        # Number of lines
        self.assertEqual(len(lines), 2)

        # Check line
        firstdataline = lines[1]
        terms = firstdataline.strip().split("\t")
        self.assertEqual(len(terms), 4)

        # Locate the previos file


        # Rename old file and reset the file mode

        # Rename the old one: split path from file, new name, and rename
        fileName, fileExtension = os.path.splitext(outfilename)
        now = datetime.datetime.now()
        nowstr = time.strftime("%Y_%B_%d_%H_%M")
        oldfilename = fileName + "_" + nowstr + fileExtension
        print("Saved old file is {0}. ".format(oldfilename))
        self.assertTrue(os.path.exists(oldfilename))

        # Remove generated files
        os.remove(outfilename)
        os.remove(oldfilename)
        AnalysisDataService.remove("TestMatrixWS")

        return
示例#48
0
def remove_all_workspaces_from_ads():
    workspaces_on_the_ads = AnalysisDataService.getObjectNames()
    for name in workspaces_on_the_ads:
        AnalysisDataService.remove(name)
示例#49
0
 def tearDown(self):
     AnalysisDataService.Instance().clear()
     plt.close('all')
示例#50
0
 def tearDown(self):
     if AnalysisDataService.doesExist('ws1'):
         DeleteWorkspace('ws1')
     if AnalysisDataService.doesExist('ws2'):
         DeleteWorkspace('ws2')
示例#51
0
 def cleanup(self):
     ADS.clear()
     _try_delete_cal_and_focus_dirs(CWDIR)
示例#52
0
 def _remove_workspaces():
     for element in AnalysisDataService.getObjectNames():
         AnalysisDataService.remove(element)
示例#53
0
def add_to_ads(workspace_name, workspace):
    return AnalysisDataService.addOrReplace(workspace_name, workspace)
示例#54
0
 def cleanup(self):
     ADS.clear()
     try:
         os.remove(self._peaks_file)
     except:
         pass
示例#55
0
def get_ads_workspace(workspace_name):
    return AnalysisDataService.retrieve(workspace_name)
示例#56
0
 def cleanup(self):
     ADS.clear()
     try:
         os.remove(self._filepath)
     except:
         pass
示例#57
0
    def test_that_instantiated_WorkspaceGroup_can_be_added_to_the_ADS(self):
        ws_group = WorkspaceGroup()
        mtd.add("group1", ws_group)

        self.assertEqual(AnalysisDataService.getObjectNames(), ["group1"])
        self.assertIsInstance(mtd["group1"], WorkspaceGroup)
示例#58
0
 def cleanup(self):
     ADS.clear()
示例#59
0
def BatchReduce(
        filename,
        format,
        plotresults=False,
        saveAlgs=None,
        verbose=False,  # noqa
        centreit=False,
        reducer=None,
        combineDet=None,
        save_as_zero_error_free=False):  # noqa
    """
        @param filename: the CSV file with the list of runs to analyse
        @param format: type of file to load, nxs for Nexus, etc.
        @param plotresults: if true and this function is run from Mantidplot a graph will be created for the results of each reduction
        @param saveAlgs: this named algorithm will be passed the name of the results workspace and filename (default = 'SaveRKH').
            Pass a tuple of strings to save to multiple file formats
        @param verbose: set to true to write more information to the log (default=False)
        @param centreit: do centre finding (default=False)
        @param reducer: if to use the command line (default) or GUI reducer object
        @param combineDet: that will be forward to WavRangeReduction (rear, front, both, merged, None)
        @param save_as_zero_error_free: Should the reduced workspaces contain zero errors or not
        @return final_setings: A dictionary with some values of the Reduction - Right Now:(scale, shift)
    """
    if saveAlgs is None:
        saveAlgs = {'SaveRKH': 'txt'}

    # From the old interface
    _ = format
    _ = reducer
    _ = verbose

    if centreit:
        raise RuntimeError(
            "The beam centre finder is currently not supported.")
    if plotresults:
        raise RuntimeError("Plotting the results is currenlty not supported.")

    # Set up the save algorithms
    save_algs = []

    if saveAlgs:
        for key, _ in list(saveAlgs.items()):
            if key == "SaveRKH":
                save_algs.append(SaveType.RKH)
            elif key == "SaveNexus":
                save_algs.append(SaveType.Nexus)
            elif key == "SaveNistQxy":
                save_algs.append(SaveType.NistQxy)
            elif key == "SaveCanSAS" or key == "SaveCanSAS1D":
                save_algs.append(SaveType.CanSAS)
            elif key == "SaveCSV":
                save_algs.append(SaveType.CSV)
            elif key == "SaveNXcanSAS":
                save_algs.append(SaveType.NXcanSAS)
            else:
                raise RuntimeError(
                    "The save format {0} is not known.".format(key))
        output_mode = OutputMode.Both
    else:
        output_mode = OutputMode.PublishToADS

    # Get the information from the csv file
    batch_csv_parser = BatchCsvParser(filename)
    parsed_batch_entries = batch_csv_parser.parse_batch_file()

    # Get a state with all existing settings
    for parsed_batch_entry in parsed_batch_entries:
        # A new user file. If a new user file is provided then this will overwrite all other settings from,
        # otherwise we might have cross-talk between user files.
        if BatchReductionEntry.UserFile in list(parsed_batch_entry.keys()):
            user_file = parsed_batch_entry[BatchReductionEntry.UserFile]
            MaskFile(user_file)

        # Sample scatter
        sample_scatter = parsed_batch_entry[BatchReductionEntry.SampleScatter]
        sample_scatter_period = parsed_batch_entry[
            BatchReductionEntry.SampleScatterPeriod]
        AssignSample(sample_run=sample_scatter, period=sample_scatter_period)

        # Sample transmission
        if (BatchReductionEntry.SampleTransmission in list(
                parsed_batch_entry.keys()) and BatchReductionEntry.SampleDirect
                in list(parsed_batch_entry.keys())):
            sample_transmission = parsed_batch_entry[
                BatchReductionEntry.SampleTransmission]
            sample_transmission_period = parsed_batch_entry[
                BatchReductionEntry.SampleTransmissionPeriod]
            sample_direct = parsed_batch_entry[
                BatchReductionEntry.SampleDirect]
            sample_direct_period = parsed_batch_entry[
                BatchReductionEntry.SampleDirectPeriod]
            TransmissionSample(sample=sample_transmission,
                               direct=sample_direct,
                               period_t=sample_transmission_period,
                               period_d=sample_direct_period)

        # Can scatter
        if BatchReductionEntry.CanScatter in list(parsed_batch_entry.keys()):
            can_scatter = parsed_batch_entry[BatchReductionEntry.CanScatter]
            can_scatter_period = parsed_batch_entry[
                BatchReductionEntry.CanScatterPeriod]
            AssignCan(can_run=can_scatter, period=can_scatter_period)

        # Can transmission
        if (BatchReductionEntry.CanTransmission in list(
                parsed_batch_entry.keys()) and BatchReductionEntry.CanDirect
                in list(parsed_batch_entry.keys())):
            can_transmission = parsed_batch_entry[
                BatchReductionEntry.CanTransmission]
            can_transmission_period = parsed_batch_entry[
                BatchReductionEntry.CanTransmissionPeriod]
            can_direct = parsed_batch_entry[BatchReductionEntry.CanDirect]
            can_direct_period = parsed_batch_entry[
                BatchReductionEntry.CanDirectPeriod]
            TransmissionCan(can=can_transmission,
                            direct=can_direct,
                            period_t=can_transmission_period,
                            period_d=can_direct_period)

        # Name of the output. We need to modify the name according to the setup of the old reduction mechanism
        output_name = parsed_batch_entry[BatchReductionEntry.Output]

        # In addition to the output name the user can specify with combineDet an additional suffix (in addtion to the
        # suffix that the user can set already -- was there previously, so we have to provide that)
        use_reduction_mode_as_suffix = combineDet is not None

        # Apply save options
        if save_algs:
            set_save(save_algorithms=save_algs,
                     save_as_zero_error_free=save_as_zero_error_free)

        # Run the reduction for a single
        reduced_workspace_name = WavRangeReduction(
            combineDet=combineDet,
            output_name=output_name,
            output_mode=output_mode,
            use_reduction_mode_as_suffix=use_reduction_mode_as_suffix)

        # Remove the settings which were very specific for this single reduction which are:
        # 1. The last user file (if any was set)
        # 2. The last scatter entry
        # 3. The last scatter transmission and direct entry (if any were set)
        # 4. The last can scatter ( if any was set)
        # 5. The last can transmission and direct entry (if any were set)
        if BatchReductionEntry.UserFile in list(parsed_batch_entry.keys()):
            director.remove_last_user_file()
        director.remove_last_scatter_sample()

        if (BatchReductionEntry.SampleTransmission in list(
                parsed_batch_entry.keys()) and BatchReductionEntry.SampleDirect
                in list(parsed_batch_entry.keys())):  # noqa
            director.remove_last_sample_transmission_and_direct()

        if BatchReductionEntry.CanScatter in list(parsed_batch_entry.keys()):
            director.remove_last_scatter_can()

        if (BatchReductionEntry.CanTransmission in list(
                parsed_batch_entry.keys()) and BatchReductionEntry.CanDirect
                in list(parsed_batch_entry.keys())):
            director.remove_last_can_transmission_and_direct()

        # Plot the results if that was requested, the flag 1 is from the old version.
        if plotresults == 1:
            if AnalysisDataService.doesExist(reduced_workspace_name):
                workspace = AnalysisDataService.retrieve(
                    reduced_workspace_name)
                if isinstance(workspace, WorkspaceGroup):
                    for ws in workspace:
                        PlotResult(ws.getName())
                else:
                    PlotResult(workspace.getName())
示例#60
0
    def test_sortRecordFileOverride(self):
        """ Test to append logs and sort the log record file
        """
        # Record 0
        ws1 = self.createTestWorkspace(run=10000)
        AnalysisDataService.addOrReplace("TestMatrixWS1", ws1)

        alg_test = run_algorithm("ExportExperimentLog",
            InputWorkspace = "TestMatrixWS1",
            OutputFilename = "TestRecord10.txt",
            SampleLogNames = ["run_number", "duration", "proton_charge", "proton_charge"],
            SampleLogTitles = ["RUN", "Duration", "ProtonCharge", "ProtonCharge-Avg"],
            SampleLogOperation = [None, None, "min", "average"],
            FileMode = "new",
            FileFormat = "tab",
            OverrideLogValue = ["Duration", "12345", "ProtonCharge-Avg", "32.921"],
            OrderByTitle = 'RUN')


        # Record 1
        ws2 = self.createTestWorkspace(run=11000)
        AnalysisDataService.addOrReplace("TestMatrixWS2", ws2)

        alg_test = run_algorithm("ExportExperimentLog",
            InputWorkspace = "TestMatrixWS2",
            OutputFilename = "TestRecord10.txt",
            SampleLogNames = ["run_number", "duration", "proton_charge", "proton_charge"],
            SampleLogTitles = ["RUN", "Duration", "ProtonCharge", "ProtonCharge-Avg"],
            SampleLogOperation = [None, None, "min", "average"],
            FileMode = "fastappend",
            FileFormat = "tab",
            OverrideLogValue = ["Duration", "23456", "ProtonCharge-Avg", "22.921"],
            OrderByTitle = 'RUN')

        # Record 2
        ws3 = self.createTestWorkspace(run=10023)
        AnalysisDataService.addOrReplace("TestMatrixWS3", ws3)

        alg_test = run_algorithm("ExportExperimentLog",
            InputWorkspace = "TestMatrixWS3",
            OutputFilename = "TestRecord10.txt",
            SampleLogNames = ["run_number", "duration", "proton_charge", "proton_charge"],
            SampleLogTitles = ["RUN", "Duration", "ProtonCharge", "ProtonCharge-Avg"],
            SampleLogOperation = [None, None, "min", "average"],
            FileMode = "fastappend",
            FileFormat = "tab",
            OverrideLogValue = ["Duration", "34567", "ProtonCharge-Avg", "12.921"],
            OrderByTitle = 'RUN')

        # Verify
        # Locate file
        outfilename = alg_test.getProperty("OutputFilename").value
        try:
            ifile = open(outfilename)
            lines = ifile.readlines()
            ifile.close()
        except IOError as err:
            print("Unable to open file {0}.".format(outfilename))
            self.fail()
            return

        # Last line cannot be empty, i.e., before EOF '\n' is not allowed
        lastline = lines[-1]
        self.assertGreater(len(lastline.strip()), 0)

        # Number of lines
        self.assertEqual(len(lines), 4)

        # Check value
        for i in range(1, 3):
            currline = lines[i]
            curr_run = int(currline.split("\t")[0])
            curr_min = float(currline.split("\t")[2])
            nextline = lines[i+1]
            next_run = int(nextline.split('\t')[0])
            next_min = float(nextline.split('\t')[2])
            self.assertLess(curr_run, next_run)
            self.assertLess(curr_min, next_min)

        line2 = lines[2]
        terms = line2.split("\t")
        duration = int(terms[1])
        self.assertEqual(duration, 34567)
        pchargeavg = float(terms[3])
        self.assertAlmostEqual(pchargeavg, 12.921)


        # Remove generated files
        os.remove(outfilename)
        AnalysisDataService.remove("TestMatrixWS1")
        AnalysisDataService.remove("TestMatrixWS2")
        AnalysisDataService.remove("TestMatrixWS3")

        return