def test_simple(self):
     input_x = np.array([0.1,0.2,0.3,0.4,0.5])
     input_y = np.array([1.,2,3,2,1])
     input_e = np.array([0.1,0.14,0.17,0.14,0.1])
     sq = CreateWorkspace(DataX=input_x,
                          DataY=input_y,
                          DataE=input_e,
                          UnitX='MomentumTransfer')
     SetSampleMaterial(InputWorkspace=sq, ChemicalFormula='Ar')
     fq = PDConvertReciprocalSpace(InputWorkspace=sq,
                                   To='F(Q)',
                                   From='S(Q)')
     x=fq.readX(0)
     y=fq.readY(0)
     e=fq.readE(0)
     self.assertTrue(np.array_equal(x, input_x))
     self.assertTrue(np.array_equal(y, input_x*(input_y-1)))
     self.assertTrue(np.array_equal(e, input_x*input_e))
     fkq = PDConvertReciprocalSpace(InputWorkspace=sq,
                                    To='FK(Q)',
                                    From='S(Q)')
     x=fkq.readX(0)
     y=fkq.readY(0)
     e=fkq.readE(0)
     bsq = sq.sample().getMaterial().cohScatterLengthSqrd()
     self.assertTrue(np.array_equal(x, input_x))
     self.assertTrue(np.allclose(y, bsq*(input_y-1)))
     self.assertTrue(np.allclose(e, bsq*input_e))
Example #2
0
class SampleTest(unittest.TestCase):

    def setUp(self):
        self._ws = CreateWorkspace(DataX=[1,2,3,4,5], DataY=[1,2,3,4,5], OutputWorkspace="dummy")

    def test_geometry_getters_and_setters(self):
        sample = self._ws.sample()

        sample.setThickness(12.5)
        self.assertEquals(sample.getThickness(), 12.5)
        sample.setHeight(10.2)
        self.assertEquals(sample.getHeight(), 10.2)
        sample.setWidth(5.9)
        self.assertEquals(sample.getWidth(), 5.9)

    def test_crystal_structure_handling(self):
        sample = self._ws.sample()

        self.assertEquals(sample.hasCrystalStructure(), False)
        self.assertRaises(RuntimeError, sample.getCrystalStructure)

        cs = CrystalStructure('5.43 5.43 5.43',
                              'F d -3 m',
                              'Si 0 0 0 1.0 0.01')

        sample.setCrystalStructure(cs)

        self.assertEquals(sample.hasCrystalStructure(), True)

        cs_from_sample = sample.getCrystalStructure()

        self.assertEquals(cs.getSpaceGroup().getHMSymbol(), cs_from_sample.getSpaceGroup().getHMSymbol())
        self.assertEquals(cs.getUnitCell().a(), cs_from_sample.getUnitCell().a())
        self.assertEquals(len(cs.getScatterers()), len(cs_from_sample.getScatterers()))
        self.assertEquals(cs.getScatterers()[0], cs_from_sample.getScatterers()[0])


        sample.clearCrystalStructure()

        self.assertEquals(sample.hasCrystalStructure(), False)
        self.assertRaises(RuntimeError, sample.getCrystalStructure)

    def test_material(self):
        SetSampleMaterial(self._ws,"Al2 O3",SampleMassDensity=4)
        material = self._ws.sample().getMaterial()

        self.assertAlmostEqual(material.numberDensity, 0.0236, places=4)
        self.assertAlmostEqual(material.relativeMolecularMass(), 101.961, places=3)

        atoms, numatoms = material.chemicalFormula()

        self.assertEquals(len(atoms), len(numatoms))
        self.assertEquals(len(atoms), 2)
        self.assertEquals(numatoms[0], 2)
        self.assertEquals(numatoms[1], 3)

        xs0 = atoms[0].neutron()
        xs1 = atoms[1].neutron()
        xs = ( xs0['coh_scatt_xs']*2 + xs1['coh_scatt_xs']*3 ) / 5
        self.assertAlmostEquals(material.cohScatterXSection(), xs, places=4)
Example #3
0
    def test_disable_history(self):
        ws_name = '__tmp_test_algorithm_history'
        ws = CreateWorkspace([0, 1, 2], [0, 1, 2], OutputWorkspace=ws_name)
        alg = self._run_algorithm('ParentAlg', child_algorithm=True, record_history=False, Workspace=ws_name)

        history = ws.getHistory()
        alg_hists = history.getAlgorithmHistories()

        self.assertEquals(history.size(), 1)
        self.assertEquals(len(alg_hists), 1)
    def createTestWorkspace(self):
        """ Create a workspace for testing against with ideal log values
        """
        from mantid.simpleapi import CreateWorkspace
        from mantid.simpleapi import AddSampleLog
        from time import gmtime, strftime,mktime 
        import numpy as np
      
        # Create a matrix workspace
        x = np.array([1.,2.,3.,4.])
        y = np.array([1.,2.,3.])
        e = np.sqrt(np.array([1.,2.,3.]))
        wksp = CreateWorkspace(DataX=x, DataY=y,DataE=e,NSpec=1,UnitX='TOF')

        # Add run_start 
        tmptime = strftime("%Y-%m-%d %H:%M:%S", gmtime(mktime(gmtime())))
        AddSampleLog(Workspace=wksp,LogName='run_start',LogText=str(tmptime))

        tsp_a=kernel.FloatTimeSeriesProperty("proton_charge") 
        tsp_b=kernel.FloatTimeSeriesProperty("SensorA")
        for i in arange(25): 
            tmptime = strftime("%Y-%m-%d %H:%M:%S", gmtime(mktime(gmtime())+i))
            tsp_a.addValue(tmptime, 1.0*i*i) 
            tsp_b.addValue(tmptime, 1.234*(i+1))

        wksp.mutableRun()['run_number']="23456"
        wksp.mutableRun()['duration']=342.3
        wksp.mutableRun()['SensorA'] = tsp_b
        wksp.mutableRun()['proton_charge']=tsp_a

        return wksp
 def test_container_rebinning_enabled(self):
     xs = numpy.array([0.0, 1.0, 0.0, 1.1])
     ys = numpy.array([2.2, 3.3])
     sample_1 = CreateWorkspace(DataX=xs, DataY=ys, NSpec=2,
                                UnitX='Wavelength')
     xs = numpy.array([-1.0, 0.0, 1.0, 2.0, -1.0, 0.0, 1.0, 2.0])
     ys = numpy.array([0.101, 0.102, 0.103, 0.104, 0.105, 0.106])
     container_1 = CreateWorkspace(DataX=xs, DataY=ys, NSpec=2,
                                   UnitX='Wavelength')
     corrected = ApplyPaalmanPingsCorrection(SampleWorkspace=sample_1,
                                             CanWorkspace=container_1,
                                             RebinCanToSample=True)
     self.assertTrue(numpy.all(sample_1.extractY() > corrected.extractY()))
     DeleteWorkspace(sample_1)
     DeleteWorkspace(container_1)
     DeleteWorkspace(corrected)
 def test_container_input_workspace_not_unintentionally_rebinned(self):
     xs = numpy.array([0.0, 1.0, 0.0, 1.1])
     ys = numpy.array([2.2, 3.3])
     sample_1 = CreateWorkspace(DataX=xs, DataY=ys, NSpec=2,
                                UnitX='Wavelength')
     ys = numpy.array([0.11, 0.22])
     container_1 = CreateWorkspace(DataX=xs, DataY=ys, NSpec=2,
                                   UnitX='Wavelength')
     corrected = ApplyPaalmanPingsCorrection(SampleWorkspace=sample_1,
                                             CanWorkspace=container_1)
     numHisto = container_1.getNumberHistograms()
     for i in range(numHisto):
         container_xs = container_1.readX(i)
         for j in range(len(container_xs)):
             self.assertEqual(container_xs[j], xs[i * numHisto + j])
     DeleteWorkspace(sample_1)
     DeleteWorkspace(container_1)
     DeleteWorkspace(corrected)
Example #7
0
 def _parseStructure(self, structure):
     from mantid.simpleapi import mtd, LoadCIF, CreateWorkspace, DeleteWorkspace
     import uuid
     self._fromCIF = False
     if isinstance(structure, string_types):
         if mtd.doesExist(structure):
             try:
                 self._cryst = self._copyCrystalStructure(mtd[structure].sample().getCrystalStructure())
                 self._getUniqueAtoms()
             except RuntimeError:
                 raise ValueError('Workspace ''%s'' has no valid CrystalStructure' % (structure))
         else:
             tmpws = CreateWorkspace(1, 1, OutputWorkspace='_tempPointCharge_'+str(uuid.uuid4())[:8])
             try:
                 LoadCIF(tmpws, structure)
                 # Attached CrystalStructure object gets destroyed when workspace is deleted
                 self._cryst = self._copyCrystalStructure(tmpws.sample().getCrystalStructure())
             except:
                 DeleteWorkspace(tmpws)
                 raise
             else:
                 DeleteWorkspace(tmpws)
                 self._getUniqueAtoms()
     elif isinstance(structure, list):
         if (len(structure) == 4 and all([isinstance(x, (int, float)) for x in structure])):
             structure = [structure]
         if (all([isinstance(x, list) and (len(x) == 4) and
            all([isinstance(y, (int, float)) for y in x]) for x in structure])):
             self._ligands = structure
         else:
             raise ValueError('Incorrect ligands direct input. Must be a 4-element list or a list '
                              'of 4-element list. Each ligand must be of the form [charge, x, y, z]')
     elif hasattr(structure, 'getScatterers'):
         self._cryst = structure
         self._getUniqueAtoms()
     else:
         if not hasattr(structure, 'sample'):
             raise ValueError('First input must be a Mantid CrystalStructure object, workspace or string '
                              '(name of CIF file or workspace)')
         try:
             self._cryst = self._copyCrystalStructure(structure.sample().getCrystalStructure())
             self._getUniqueAtoms()
         except RuntimeError:
             raise ValueError('Workspace ''%s'' has no valid CrystalStructure' % (structure.name()))
Example #8
0
class SampleTest(unittest.TestCase):

    def setUp(self):
        self._ws = CreateWorkspace(DataX=[1,2,3,4,5], DataY=[1,2,3,4,5], OutputWorkspace="dummy")

    def test_geometry_getters_and_setters(self):
        sample = self._ws.sample()

        sample.setThickness(12.5)
        self.assertEquals(sample.getThickness(), 12.5)
        sample.setHeight(10.2)
        self.assertEquals(sample.getHeight(), 10.2)
        sample.setWidth(5.9)
        self.assertEquals(sample.getWidth(), 5.9)

    def test_crystal_structure_handling(self):
        sample = self._ws.sample()

        self.assertEquals(sample.hasCrystalStructure(), False)
        self.assertRaises(RuntimeError, sample.getCrystalStructure)

        cs = CrystalStructure('5.43 5.43 5.43',
                              'F d -3 m',
                              'Si 0 0 0 1.0 0.01')

        sample.setCrystalStructure(cs)

        self.assertEquals(sample.hasCrystalStructure(), True)

        cs_from_sample = sample.getCrystalStructure()

        self.assertEquals(cs.getSpaceGroup().getHMSymbol(), cs_from_sample.getSpaceGroup().getHMSymbol())
        self.assertEquals(cs.getUnitCell().a(), cs_from_sample.getUnitCell().a())
        self.assertEquals(len(cs.getScatterers()), len(cs_from_sample.getScatterers()))
        self.assertEquals(cs.getScatterers()[0], cs_from_sample.getScatterers()[0])


        sample.clearCrystalStructure()

        self.assertEquals(sample.hasCrystalStructure(), False)
        self.assertRaises(RuntimeError, sample.getCrystalStructure)
    def test_simple(self):
        input_x = np.array([0.1,0.2,0.3,0.4,0.5])
        input_y = np.array([1.,2,3,2,1])
        input_e = np.array([0.1,0.14,0.17,0.14,0.1])
        Gr = CreateWorkspace(DataX=input_x,
                             DataY=input_y,
                             DataE=input_e)
        SetSampleMaterial(InputWorkspace=Gr, ChemicalFormula='Ar')
        GKr = PDConvertRealSpace(InputWorkspace=Gr,
                                 To='GK(r)',
                                 From='G(r)')
        x=GKr.readX(0)
        y=GKr.readY(0)
        e=GKr.readE(0)

        bsq = Gr.sample().getMaterial().cohScatterLengthSqrd()
        rho = Gr.sample().getMaterial().numberDensity
        factor = bsq / (4. * np.pi *rho)
        self.assertTrue(np.array_equal(x, input_x))
        self.assertTrue(np.allclose(y, factor*input_y/input_x))
        self.assertTrue(np.allclose(e, factor*input_e/input_x))
    def createTestWorkspace(self):
        """ Create a workspace for testing against with ideal log values
        """
        from mantid.simpleapi import CreateWorkspace
        from mantid.simpleapi import AddSampleLog
        from time import gmtime, strftime, mktime
        import numpy as np

        # Create a matrix workspace
        x = np.array([1.0, 2.0, 3.0, 4.0])
        y = np.array([1.0, 2.0, 3.0])
        e = np.sqrt(np.array([1.0, 2.0, 3.0]))
        wksp = CreateWorkspace(DataX=x, DataY=y, DataE=e, NSpec=1, UnitX="TOF")

        # Add run_start
        tmptime = strftime("%Y-%m-%d %H:%M:%S", gmtime(mktime(gmtime())))
        AddSampleLog(Workspace=wksp, LogName="run_start", LogText=str(tmptime))

        tsp_a = kernel.FloatTimeSeriesProperty("SensorA")
        tsp_b = kernel.FloatTimeSeriesProperty("SensorB")
        tsp_c = kernel.FloatTimeSeriesProperty("SensorC")
        for i in arange(25):
            tmptime = strftime("%Y-%m-%d %H:%M:%S", gmtime(mktime(gmtime()) + i))
            tsp_a.addValue(tmptime, 1.0 * i * i)
            tsp_b.addValue(tmptime, 2.0 * i * i)
            tsp_c.addValue(tmptime, 3.0 * i * i)

        wksp.mutableRun()["SensorA"] = tsp_a
        wksp.mutableRun()["SensorB"] = tsp_b
        wksp.mutableRun()["SensorC"] = tsp_c

        return wksp
Example #11
0
    def test_nested_history(self):
        ws_name = '__tmp_test_algorithm_history'
        ws = CreateWorkspace([0, 1, 2], [0, 1, 2], OutputWorkspace=ws_name)
        alg = self._run_algorithm("ParentAlg", Workspace=ws_name)

        history = ws.getHistory()
        alg_hists = history.getAlgorithmHistories()

        self.assertEquals(history.size(), 2)
        self.assertEquals(len(alg_hists), 2)

        parent_alg = history.getAlgorithmHistory(1)

        self.assertEquals(parent_alg.name(), "ParentAlg")
        self.assertEquals(parent_alg.version(), 1)
        self.assertEquals(parent_alg.childHistorySize(), 1)

        child_alg = parent_alg.getChildAlgorithmHistory(0)

        self.assertEquals(child_alg.name(), "ChildAlg")
        self.assertEquals(child_alg.version(), 1)
        self.assertEquals(child_alg.childHistorySize(), 0)
Example #12
0
class SampleTest(unittest.TestCase):

    def setUp(self):
        self._ws = CreateWorkspace(DataX=[1,2,3,4,5], DataY=[1,2,3,4,5], OutputWorkspace="dummy")

    def test_geometry_getters_and_setters(self):
        sample = self._ws.sample()

        sample.setThickness(12.5)
        self.assertEquals(sample.getThickness(), 12.5)
        sample.setHeight(10.2)
        self.assertEquals(sample.getHeight(), 10.2)
        sample.setWidth(5.9)
        self.assertEquals(sample.getWidth(), 5.9)
Example #13
0
    def test_create_with_1D_numpy_array(self):
        x = np.array([1.,2.,3.,4.])
        y = np.array([1.,2.,3.])
        e = np.sqrt(np.array([1.,2.,3.]))

        wksp = CreateWorkspace(DataX=x, DataY=y,DataE=e,NSpec=1,UnitX='TOF')
        self.assertTrue(isinstance(wksp, MatrixWorkspace))
        self.assertEquals(wksp.getNumberHistograms(), 1)

        self.assertEquals(len(wksp.readY(0)), len(y))
        self.assertEquals(len(wksp.readX(0)), len(x))
        self.assertEquals(len(wksp.readE(0)), len(e))

        for index in range(len(y)):
            self.assertEquals(wksp.readY(0)[index], y[index])
            self.assertEquals(wksp.readE(0)[index], e[index])
            self.assertEquals(wksp.readX(0)[index], x[index])
        # Last X value
        self.assertEquals(wksp.readX(0)[len(x)-1], x[len(x)-1])
        AnalysisDataService.remove("wksp")
Example #14
0
    def test_create_with_2D_numpy_array(self):
        x = np.array([1.,2.,3.,4.])
        y = np.array([[1.,2.,3.],[4.,5.,6.]])
        e = np.sqrt(y)

        wksp = CreateWorkspace(DataX=x, DataY=y,DataE=e,NSpec=2,UnitX='TOF')
        self.assertTrue(isinstance(wksp, MatrixWorkspace))
        self.assertEquals(wksp.getNumberHistograms(), 2)

        for i in [0,1]:
            for j in range(len(y[0])):
                self.assertEquals(wksp.readY(i)[j], y[i][j])
                self.assertEquals(wksp.readE(i)[j], e[i][j])
                self.assertEquals(wksp.readX(i)[j], x[j])
            # Last X value
            self.assertEquals(wksp.readX(i)[len(x)-1], x[len(x)-1])

        AnalysisDataService.remove("wksp")
Example #15
0
def create_group_populated_by_two_workspace():
    group = MuonGroup(group_name="group1")
    counts_workspace_22222 = CreateWorkspace([0], [0])
    asymmetry_workspace_22222 = CreateWorkspace([0], [0])
    asymmetry_workspace_unnorm_22222 = CreateWorkspace([0], [0])

    group.update_workspaces([22222], counts_workspace_22222,
                            asymmetry_workspace_22222,
                            asymmetry_workspace_unnorm_22222, False)
    group.show_raw([22222], 'counts_name_22222', 'asymmetry_name_22222',
                   'asymmetry_name_22222_unnorm')
    counts_workspace_33333 = CreateWorkspace([0], [0])
    asymmetry_workspace_33333 = CreateWorkspace([0], [0])
    asymmetry_workspace_unnorm_33333 = CreateWorkspace([0], [0])

    group.update_workspaces([33333], counts_workspace_33333,
                            asymmetry_workspace_33333,
                            asymmetry_workspace_unnorm_33333, False)
    group.show_raw([33333], 'counts_name_33333', 'asymmetry_name_33333',
                   'asymmetry_name_33333_unnorm')

    return group
Example #16
0
    def test_with_data_from_other_workspace(self):
        wsname = 'LOQ'
        alg = run_algorithm('Load', Filename='LOQ48127.raw', OutputWorkspace=wsname, SpectrumMax=2, child=True)
        loq = alg.getProperty("OutputWorkspace").value
        
        x = loq.extractX()
        y = loq.extractY()
        e = loq.extractE()
        
        wksp = CreateWorkspace(DataX=x, DataY=y,DataE=e,NSpec=2,UnitX='Wavelength')
        self.assertTrue(isinstance(wksp, MatrixWorkspace))
        self.assertEquals(wksp.getNumberHistograms(), 2)
        
        for i in [0,1]:
            for j in range(len(y[0])):
                self.assertEquals(wksp.readY(i)[j], loq.readY(i)[j])
                self.assertEquals(wksp.readE(i)[j], loq.readE(i)[j])
                self.assertEquals(wksp.readX(i)[j], loq.readX(i)[j])
            # Last X value
            self.assertEquals(wksp.readX(i)[len(x)-1], loq.readX(i)[len(x)-1])

        AnalysisDataService.remove("wksp")
    def test_do_simultaneous_fit_adds_multi_input_workspace_to_fit_context(
            self):
        # create function
        single_func = ';name=FlatBackground,$domains=i,A0=0'
        multi_func = 'composite=MultiDomainFunction,NumDeriv=1' + single_func + single_func + ";"
        trial_function = FunctionFactory.createInitialized(multi_func)
        x_data = range(0, 100)
        y_data = [5 + x * x for x in x_data]
        workspace1 = CreateWorkspace(x_data, y_data)
        workspace2 = CreateWorkspace(x_data, y_data)
        parameter_dict = {
            'Function': trial_function,
            'InputWorkspace': [workspace1.name(),
                               workspace2.name()],
            'Minimizer': 'Levenberg-Marquardt',
            'StartX': [0.0] * 2,
            'EndX': [100.0] * 2,
            'EvaluationType': 'CentrePoint'
        }
        self.model.do_simultaneous_fit(parameter_dict, global_parameters=[])

        fit_context = self.model.context.fitting_context
        self.assertEqual(1, len(fit_context))
Example #18
0
    def PyExec(self):
        """ Alg execution. """
        instrument         = self.getProperty(INSTRUMENT_PROP).value
        run_number         = self.getProperty(RUN_NUM_PROP).value
        fit_deadtime       = self.getProperty(FIT_DEADTIME_PROP).value
        fix_phases         = self.getProperty(FIX_PHASES_PROP).value
        default_level      = self.getProperty(DEFAULT_LEVEL).value
        sigma_looseness    = self.getProperty(SIGMA_LOOSENESS_PROP).value
        groupings_file     = self.getProperty(GROUPINGS_PROP).value
        in_phases_file     = self.getProperty(PHASES_PROP).value
        in_deadtimes_file  = self.getProperty(DEADTIMES_PROP).value
        out_phases_file    = self.getProperty(PHASES_RESULT_PROP).value
        out_deadtimes_file = self.getProperty(DEADTIMES_RESULT_PROP).value

        isis = config.getFacility('ISIS')
        padding = isis.instrument(instrument).zeroPadding(0)
        run_name = instrument + str(run_number).zfill(padding)

        try:
            run_number = int(run_number)
        except:
            raise RuntimeError("'%s' is not an integer run number." % run_number)
        try:
            run_file_path = FileFinder.findRuns(run_name)[0]
        except:
            raise RuntimeError("Unable to find file for run %i" % run_number)

        if groupings_file == "":
            groupings_file = DEFAULT_GROUPINGS_FILENAME % instrument

        # Load data and other info from input files.

        def temp_hidden_ws_name():
            """Generate a unique name for a temporary, hidden workspace."""
            selection = string.ascii_lowercase + string.ascii_uppercase + string.digits
            return '__temp_MaxEnt_' + ''.join(random.choice(selection) for _ in range(20))

        input_data_ws_name = temp_hidden_ws_name()
        LoadMuonNexus(Filename=run_file_path, OutputWorkspace=input_data_ws_name)
        input_data_ws = mtd[input_data_ws_name]
        
        if isinstance(input_data_ws, WorkspaceGroup):
            Logger.get("MaxEnt").warning("Multi-period data is not currently supported.  Just using first period.")
            input_data_ws = input_data_ws[0]

        groupings_ws_name = temp_hidden_ws_name()
        LoadDetectorsGroupingFile(InputFile=groupings_file, OutputWorkspace=groupings_ws_name)
        groupings_ws = mtd[groupings_ws_name]

        def yield_floats_from_file(path):
            """Given a path to a file with a float on each line, will return
            the floats one at a time.  Throws otherwise.  Strips whitespace
            and ignores empty lines."""
            with open(path, 'r') as f:
                for i, line in enumerate(line.strip() for line in f):
                    if line == "":
                        continue
                    try:
                        yield float(line)
                    except:
                        raise RuntimeError("Parsing error in '%s': Line %d: '%s'." % 
                                           (path, i, line))

        input_phases         = np.array(list(yield_floats_from_file(in_phases_file)))
        input_phases_size    = len(input_phases)
        input_deadtimes      = np.array(list(yield_floats_from_file(in_deadtimes_file)))
        input_deadtimes_size = len(input_deadtimes)

        n_bins      = input_data_ws.blocksize()
        n_detectors = input_data_ws.getNumberHistograms()

        def time_value_to_time_channel_index(value):
            """Given a time value, will return the index of the time channel in
            which the value falls."""
            bin_width = input_data_ws.readX(0)[1] - input_data_ws.readX(0)[0]
            diff = value - input_data_ws.readX(0)[0]
            return int(diff / bin_width)

        # Mantid corrects for time zero on loading, so we want to find the actual channels
        # where 0.0 occurs, and where we have values of 0.1 onwards.
        time_zero_channel  = time_value_to_time_channel_index(0.0)
        first_good_channel = time_value_to_time_channel_index(0.1)

        input_data = np.concatenate([input_data_ws.readY(i) for i in range(n_detectors)])

        groupings = [groupings_ws.readY(row)[0] for row in range(groupings_ws.getNumberHistograms())]
        groupings = map(int, groupings)
        n_groups = len(set(groupings))

        # Cleanup.

        input_data_ws.delete()
        groupings_ws.delete()

        # We're faced with the problem of providing more than a dozen parameters to
        # the Fortran, which can be a bit messy (especially on the Fortran side of
        # things where we need to make "Cf2py" declarations).  A cleaner way of
        # doing this is to simply pass in a few callbacks -- one for each input
        # type -- and have the Fortran provide the name of the variable it wants
        # to the callback.  The callback will then look up the corresponding value
        # and feed it back to the Fortran.
        #
        # We also have a callback for printing to the results log.

        self.int_vars = {
            "RunNo"       : run_number,
            "frames"      : FRAMES,
            "res"         : RES,
            "Tzeroch"     : time_zero_channel,
            "firstgoodch" : first_good_channel,
            "ptstofit"    : POINTS_TO_FIT,
            "histolen"    : n_bins,
            "nhisto"      : n_detectors,
            "n_groups"    : n_groups,
        }

        self.float_vars = {
            "deflevel" : default_level,
            "sigloose" : sigma_looseness,
        }

        self.bool_vars = {
            "fixphase" : fix_phases,
            "fitdt"    : fit_deadtime,
        }

        self._assert_map_values_are_of_expected_type()

        def lookup(par_name, par_map, default):
            """The basis of the callbacks passed to the Fortran.  Given a parameter
            name it will consult the appropriate variable map, and return the
            corresponding value of the parameter.  Else return a default and log a
            warning if a parameter with the name does not exist."""
            par_name = par_name.strip()
            if par_name in par_map:
                return par_map[par_name]
            msg = """WARNING: tried to find a value for parameter with name %s but
            could not find one.  Default of \"%s\" provided.""" % (par_name, default)
            Logger.get("MaxEnt").warning(msg)
            return default

        def log(priority, message):
            """Log the given message with given priority."""
            try:
                logger = getattr(Logger.get("MaxEnt"), priority.lower())
            except AttributeError:
                # If we don't recognise the priority, use warning() as a default.
                logger = getattr(Logger.get("MaxEnt"), "warning")
            logger(message)
            return True

        # The Fortran expects arrays to be of a certain size, so any arrays that
        # aren't big enough need to be padded.
        input_phases    = self._pad_to_length_with_zeros(input_phases, MAX_HISTOS)
        input_deadtimes = self._pad_to_length_with_zeros(input_deadtimes, MAX_HISTOS)
        input_data      = self._pad_to_length_with_zeros(input_data, MAX_INPUT_DATA_SIZE)
        groupings       = self._pad_to_length_with_zeros(groupings, MAX_HISTOS)

        # TODO: Return the contents of "NNNNN.max", instead of writing to file.
        f_out, fchan_out, output_deadtimes, output_phases, chi_sq = maxent.mantid_maxent(
            # Input data and other info:
            input_data,
            groupings,
            input_deadtimes,
            input_phases,
            # Variable-lookup callbacks:
            lambda par_name: lookup(par_name, self.int_vars,   0),
            lambda par_name: lookup(par_name, self.float_vars, 0.0),
            lambda par_name: lookup(par_name, self.bool_vars,  False),
            # Callback for logging:
            log
        )

        def write_items_to_file(path, items):
            """Given a path to a file and a list of items, will write the items
            to the file, one on each line."""
            with open(path, 'w') as f:
                for item in items:
                    f.write(str(item) + "\n")

        # Chop the padded outputs back down to the correct size.
        output_phases    = output_phases[:input_phases_size]
        output_deadtimes = output_deadtimes[:input_deadtimes_size]
        input_phases     = input_phases[:input_phases_size]
        input_deadtimes  = input_deadtimes[:input_deadtimes_size]
        fchan_out        = fchan_out[:n_bins]
        f_out            = f_out[:n_bins]

        write_items_to_file(out_phases_file,    output_phases)
        write_items_to_file(out_deadtimes_file, output_deadtimes)
                 
        log_output = "\nDead times in:\n" +  str(input_deadtimes) + "\n" +\
                     "\nDead times out:\n" + str(output_deadtimes) + "\n" +\
                     "\nPhases in:\n" +      str(input_phases) + "\n" +\
                     "\nPhases out:\n" +     str(output_phases) + "\n" + \
                     "\nGroupings:\n" +      str(groupings) + "\n" +\
                     "\nChi Squared:\n" +    str(chi_sq) + "\n" +\
                     "\nInput variables:\n"

        for type_map in self.int_vars, self.float_vars, self.bool_vars:
            for name, value in type_map.items():
                log_output += str(name) + " = " + str(value) + "\n"

        Logger.get("MaxEnt").notice(log_output)

        # Generate our own output ws name if the user has not provided one.
        out_ws_name = self.getPropertyValue(OUT_WS_PROP)
        if out_ws_name == "":
            out_ws_name = run_name + "; MaxEnt"
            self.setPropertyValue(OUT_WS_PROP, out_ws_name)

        out_ws = CreateWorkspace(OutputWorkspace=out_ws_name,
                                 DataX=fchan_out[:n_bins],
                                 DataY=f_out[:n_bins])
        self.setProperty(OUT_WS_PROP, out_ws)

        # MaxEnt inputs table.
        input_table_name = run_name + "; MaxEnt Input"
        input_table = CreateEmptyTableWorkspace(OutputWorkspace = input_table_name)
        input_table.addColumn("str", "Name")
        input_table.addColumn("str", "Value")
        inputs = itertools.chain(self.int_vars.items(), 
                                 self.float_vars.items(),
                                 self.bool_vars.items())
        for name, value in inputs:
            input_table.addRow([str(name), str(value)])

        # Deadtimes and phases input/output table.
        dead_phases_table_name = run_name + "; MaxEnt Deadtimes & Phases"
        dead_phases_table = CreateEmptyTableWorkspace(OutputWorkspace = dead_phases_table_name)
        for column_name in "Deadtimes In", "Deadtimes Out", "Phases In", "Phases Out":
          dead_phases_table.addColumn("double", column_name)
        for row in zip(input_deadtimes, output_deadtimes, input_phases, output_phases):
            dead_phases_table.addRow(list(map(float, row)))

        # Chi-squared output table.
        chisq_table_name = run_name + "; MaxEnt Chi^2"
        chisq_table = CreateEmptyTableWorkspace(OutputWorkspace = chisq_table_name)
        chisq_table.addColumn("int", "Cycle")
        for iteration in range(10):
          chisq_table.addColumn("double", "Iter " + str(iteration + 1))
        for cycle, data in enumerate(chi_sq):
            chisq_table.addRow([cycle + 1] + list(map(float,data)))

        all_output_ws = [input_table_name,
                         dead_phases_table_name,
                         chisq_table_name,
                         out_ws_name]

        # The output workspaces of this algorithm belong in the same groups
        # that are created by the muon interface.  If the appropriate group
        # doesn't exist already then it needs to be created.
        if not run_name in mtd:
            GroupWorkspaces(InputWorkspaces = all_output_ws,
                            OutputWorkspace = run_name)
        else:
            group = mtd[run_name]
            for output_ws in all_output_ws:
              if not group.contains(output_ws):
                group.add(output_ws)

        out_ws.getAxis(0).getUnit().setLabel("Field", "G")
        out_ws.setYUnitLabel("P(B)")

        if INSIDE_MANTIDPLOT:
            mantidplot.plotSpectrum(out_ws, 0)
Example #19
0
 def test_scale_is_correct_on_pcolourmesh_of_ragged_workspace(self):
     ws = CreateWorkspace(DataX=[1, 2, 3, 4, 2, 4, 6, 8],
                          DataY=[2] * 8,
                          NSpec=2)
     fig = pcolormesh_from_names([ws])
     self.assertEqual((1.8, 2.2), fig.axes[0].images[0].get_clim())
Example #20
0
 def setUp(self):
     self._ws = CreateWorkspace(DataX=[1,2,3,4,5], DataY=[1,2,3,4,5], OutputWorkspace="dummy")
Example #21
0
 def create_workspace(self, name):
     x_range = range(1, 100)
     y_range = [x * x for x in x_range]
     return CreateWorkspace(DataX=x_range,
                            DataY=y_range,
                            OutputWorkspace=name)
Example #22
0
 def setUpClass(cls):
     cls.g1da = config['graph1d.autodistribution']
     config['graph1d.autodistribution'] = 'On'
     cls.ws2d_histo = CreateWorkspace(DataX=[10, 20, 30, 10, 20, 30],
                                      DataY=[2, 3, 4, 5],
                                      DataE=[1, 2, 3, 4],
                                      NSpec=2,
                                      Distribution=True,
                                      UnitX='Wavelength',
                                      VerticalAxisUnit='DeltaE',
                                      VerticalAxisValues=[4, 6, 8],
                                      OutputWorkspace='ws2d_histo')
     cls.ws2d_point = CreateWorkspace(
         DataX=[1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4],
         DataY=[2] * 12,
         NSpec=3,
         OutputWorkspace='ws2d_point')
     cls.ws1d_point = CreateWorkspace(DataX=[1, 2],
                                      DataY=[1, 2],
                                      NSpec=1,
                                      Distribution=False,
                                      OutputWorkspace='ws1d_point')
     cls.ws2d_histo_rag = CreateWorkspace(
         DataX=[1, 2, 3, 4, 5, 2, 4, 6, 8, 10],
         DataY=[2] * 8,
         NSpec=2,
         VerticalAxisUnit='DeltaE',
         VerticalAxisValues=[5, 7, 9],
         OutputWorkspace='ws2d_histo_rag')
     cls.ws2d_point_rag = CreateWorkspace(DataX=[1, 2, 3, 4, 2, 4, 6, 8],
                                          DataY=[2] * 8,
                                          NSpec=2,
                                          OutputWorkspace='ws2d_point_rag')
     cls.ws_MD_2d = CreateMDHistoWorkspace(
         Dimensionality=3,
         Extents='-3,3,-10,10,-1,1',
         SignalInput=range(25),
         ErrorInput=range(25),
         NumberOfEvents=10 * np.ones(25),
         NumberOfBins='5,5,1',
         Names='Dim1,Dim2,Dim3',
         Units='MomentumTransfer,EnergyTransfer,Angstrom',
         OutputWorkspace='ws_MD_2d')
     cls.ws_MD_1d = CreateMDHistoWorkspace(
         Dimensionality=3,
         Extents='-3,3,-10,10,-1,1',
         SignalInput=range(5),
         ErrorInput=range(5),
         NumberOfEvents=10 * np.ones(5),
         NumberOfBins='1,5,1',
         Names='Dim1,Dim2,Dim3',
         Units='MomentumTransfer,EnergyTransfer,Angstrom',
         OutputWorkspace='ws_MD_1d')
     cls.ws2d_point_uneven = CreateWorkspace(
         DataX=[10, 20, 30],
         DataY=[1, 2, 3],
         NSpec=1,
         OutputWorkspace='ws2d_point_uneven')
     wp = CreateWorkspace(DataX=[15, 25, 35, 45],
                          DataY=[1, 2, 3, 4],
                          NSpec=1)
     ConjoinWorkspaces(cls.ws2d_point_uneven, wp, CheckOverlapping=False)
     cls.ws2d_point_uneven = mantid.mtd['ws2d_point_uneven']
     cls.ws2d_histo_uneven = CreateWorkspace(
         DataX=[10, 20, 30, 40],
         DataY=[1, 2, 3],
         NSpec=1,
         OutputWorkspace='ws2d_histo_uneven')
     wp = CreateWorkspace(DataX=[15, 25, 35, 45, 55],
                          DataY=[1, 2, 3, 4],
                          NSpec=1)
     ConjoinWorkspaces(cls.ws2d_histo_uneven, wp, CheckOverlapping=False)
     cls.ws2d_histo_uneven = mantid.mtd['ws2d_histo_uneven']
     newYAxis = mantid.api.NumericAxis.create(3)
     newYAxis.setValue(0, 10)
     newYAxis.setValue(1, 15)
     newYAxis.setValue(2, 25)
     cls.ws2d_histo_uneven.replaceAxis(1, newYAxis)
     AddTimeSeriesLog(cls.ws2d_histo,
                      Name="my_log",
                      Time="2010-01-01T00:00:00",
                      Value=100)
     AddTimeSeriesLog(cls.ws2d_histo,
                      Name="my_log",
                      Time="2010-01-01T00:30:00",
                      Value=15)
     AddTimeSeriesLog(cls.ws2d_histo,
                      Name="my_log",
                      Time="2010-01-01T00:50:00",
                      Value=100.2)
 def test_shorter_trans_data(self):
     Trans = CreateWorkspace([0.3,1,2],[1,2])
     self.assertRaises(RuntimeError, SANSWideAngleCorrection, self._sample, self._trans, OutputWorkspace='out')
Example #24
0
    def plot(self, **kwargs):
        """
        Plot the function

        :param workspace: workspace upon whose x values
                          the function is plotted.
        """
        from mantid import mtd
        try:
            from mantidplot import plot
        except:
            raise RuntimeError(
                "mantidplot must be importable to plot functions.")
        from mantid.simpleapi import CreateWorkspace
        import numpy as np

        isWorkspace = False
        extractSpectrum = False
        workspaceIndex = 0
        haveXValues = False
        haveStartX = False
        haveEndX = False
        nSteps = 20
        plotName = self.name

        def inRange(x):
            return x >= xMin and x <= xMax

        for key in kwargs:
            if key == "workspace":
                isWorkspace = True
                ws = kwargs[key]
                if type(ws) == type('string'):
                    ws = mtd[ws]
            if key == "workspaceIndex":
                workspaceIndex = kwargs[key]
                if workspaceIndex > 0:
                    extractSpectrum = True
            if key == "xValues":
                xvals = kwargs[key]
                haveXValues = True
            if key == "startX":
                xMin = kwargs[key]
                haveStartX = True
            if key == "endX":
                xMax = kwargs[key]
                haveEndX = True
            if key == "nSteps":
                nSteps = kwargs[key]
                if nSteps < 1:
                    raise RuntimeError("nSteps must be at least 1")
            if key == "name":
                plotName = kwargs[key]

        if haveStartX and haveEndX:
            if xMin >= xMax:
                raise RuntimeError("startX must be less than EndX")

        if haveXValues:
            spectrumWs = self._execute_algorithm('CreateWorkspace',
                                                 DataX=xvals,
                                                 DataY=xvals)
        elif isWorkspace:
            xvals = ws.readX(workspaceIndex)
            if haveStartX and haveEndX:
                xvals = filter(inRange, xvals)
            if extractSpectrum or (haveStartX and haveEndX):
                spectrumWs = self._execute_algorithm('CreateWorkspace',
                                                     DataX=xvals,
                                                     DataY=xvals)
            else:
                spectrumWs = ws
        elif haveStartX and haveEndX:
            xvals = np.linspace(start=xMin, stop=xMax, num=nSteps)
            spectrumWs = self._execute_algorithm('CreateWorkspace',
                                                 DataX=xvals,
                                                 DataY=xvals)
        else:
            if not haveStartX:
                raise RuntimeError(
                    "startX must be defined if no workspace or xValues are defined."
                )
            if not haveEndX:
                raise RuntimeError(
                    "endX must be defined if no workspace or xValues are defined."
                )
            else:
                raise RuntimeError(
                    "insufficient plotting arguments")  # Should not occur.

        outWs = self(spectrumWs)
        vals = outWs.readY(1)
        function = CreateWorkspace(DataX=xvals,
                                   DataY=vals,
                                   OutputWorkspace=plotName)
        plot(plotName, 0)
Example #25
0
def int3samples(runs, name, masks, binning='0.5, 0.05, 8.0'):
    """
    Finds the polarisation versus wavelength for a set of detector tubes.

    Parameters
    ----------
    runs: list of RunData objects
      The runs whose polarisation we are interested in.

    name: string
      The name of this set of runs

    masks: list of string
      The file names of the masks for the sequential tubes that are being used
      for the SEMSANS measurements.

    binning: string
      The binning values to use for the wavelength bins.  The default value is
      '0.5, 0.025, 10.0'
    """
    for tube, _ in enumerate(masks):
        for i in [1, 2]:
            final_state = "{}_{}_{}".format(name, tube, i)
            if final_state in mtd.getObjectNames():
                DeleteWorkspace(final_state)

    for rnum in runs:
        w1 = Load(BASE.format(rnum.number), LoadMonitors=True)
        w1mon = ExtractSingleSpectrum('w1_monitors', 0)
        w1 = ConvertUnits('w1', 'Wavelength', AlignBins=1)
        w1mon = ConvertUnits(w1mon, 'Wavelength')
        w1 = Rebin(w1, binning, PreserveEvents=False)
        w1mon = Rebin(w1mon, binning)
        w1 = w1 / w1mon
        for tube, mask in enumerate(masks):
            Mask_Tube = LoadMask('LARMOR', mask)
            w1temp = CloneWorkspace(w1)
            MaskDetectors(w1temp, MaskedWorkspace="Mask_Tube")
            Tube_Sum = SumSpectra(w1temp)
            for i in [1, 2]:
                final_state = "{}_{}_{}".format(name, tube, i)
                if final_state in mtd.getObjectNames():
                    mtd[final_state] += mtd["Tube_Sum_{}".format(i)]
                else:
                    mtd[final_state] = mtd["Tube_Sum_{}".format(i)]

    x = mtd["{}_0_1".format(name)].extractX()[0]
    dx = (x[1:] + x[:-1]) / 2
    pols = []

    for run in runs:
        he_stat = he3_stats(run)
        start = (run.start - he_stat.dt).seconds / 3600 / he_stat.t1
        end = (run.end - he_stat.dt).seconds / 3600 / he_stat.t1
        for time in np.linspace(start, end, 10):
            temp = he3pol(he_stat.scale, time)(dx)
            pols.append(temp)
    wpol = CreateWorkspace(
        x,
        np.mean(pols, axis=0),
        # and the blank
        UnitX="Wavelength",
        YUnitLabel="Counts")

    for tube, _ in enumerate(masks):
        up = mtd["{}_{}_2".format(name, tube)]
        dn = mtd["{}_{}_1".format(name, tube)]
        pol = (up - dn) / (up + dn)
        pol /= wpol
        DeleteWorkspaces(
            ["{}_{}_{}".format(name, tube, i) for i in range(1, 3)])
        RenameWorkspace("pol", OutputWorkspace="{}_{}".format(name, tube))
    DeleteWorkspaces(["Tube_Sum_1", "Tube_Sum_2"])

    GroupWorkspaces([
        "{}_{}".format(name, tube) for tube, _ in enumerate(masks)
        for i in range(1, 3)
    ],
                    OutputWorkspace=str(name))
Example #26
0
    def test_with_data_from_other_workspace(self):
        wsname = 'LOQ'
        x1 = np.array([1., 2., 3., 4.])
        y1 = np.array([[1., 2., 3.], [4., 5., 6.]])
        e1 = np.sqrt(y1)
        loq = CreateWorkspace(DataX=x1,
                              DataY=y1,
                              DataE=e1,
                              NSpec=2,
                              UnitX='Wavelength')

        x2 = loq.extractX()
        y2 = loq.extractY()
        e2 = loq.extractE()

        wksp = CreateWorkspace(DataX=x2,
                               DataY=y2,
                               DataE=e2,
                               NSpec=2,
                               UnitX='Wavelength')
        self.assertTrue(isinstance(wksp, MatrixWorkspace))
        self.assertEqual(wksp.getNumberHistograms(), 2)

        for i in [0, 1]:
            for j in range(len(y2[0])):
                self.assertEqual(wksp.readY(i)[j], loq.readY(i)[j])
                self.assertEqual(wksp.readE(i)[j], loq.readE(i)[j])
                self.assertEqual(wksp.readX(i)[j], loq.readX(i)[j])
            # Last X value
            self.assertEqual(
                wksp.readX(i)[len(x2) - 1],
                loq.readX(i)[len(x2) - 1])

        AnalysisDataService.remove("wksp")
Example #27
0
    def createTestWorkspace2(self):
        """ Create a workspace for testing against with more situation
        """
        from mantid.simpleapi import CreateWorkspace
        from mantid.simpleapi import AddSampleLog
        import numpy
        from numpy import datetime64, timedelta64
        #from time import gmtime, strftime,mktime # in debug prints

        # Create a matrix workspace
        x = np.array([1., 2., 3., 4.])
        y = np.array([1., 2., 3.])
        e = np.sqrt(np.array([1., 2., 3.]))
        wksp = CreateWorkspace(DataX=x, DataY=y, DataE=e, NSpec=1, UnitX='TOF')

        # Add run_start
        dtimesec = 0.0010
        timefluc = 0.0001
        runstart = '2014-02-15T13:34:03'
        # older numpy assumes local timezone
        if LooseVersion(numpy.__version__) < LooseVersion('1.9'):
            runstart = runstart + 'Z'
        runstart = datetime64(runstart, 'us')  # microsecond needed for deltas

        AddSampleLog(Workspace=wksp,
                     LogName='run_start',
                     LogText=str(runstart))

        tsp_a = kernel.FloatTimeSeriesProperty("SensorA")
        tsp_b = kernel.FloatTimeSeriesProperty("SensorB")
        tsp_c = kernel.FloatTimeSeriesProperty("SensorC")
        tsp_d = kernel.FloatTimeSeriesProperty("SensorD")
        logs = [tsp_a, tsp_b, tsp_c, tsp_d]

        dbbuf = ""

        np.random.seed(0)
        for i in np.arange(25):
            # Randomly pick up log without records
            # first iteration must have all the record
            skiploglist = []
            if i > 0:
                numnorecord = np.random.randint(-1, 4)
                if numnorecord > 0:
                    for j in range(numnorecord):
                        logindex = np.random.randint(0, 6)
                        skiploglist.append(logindex)
                    # ENDFOR (j)
                # ENDIF (numnorecord)
            # ENDIF (i)

            dbbuf += "----------- %d -------------\n" % (i)

            # Record
            for j in range(4):
                # Skip if selected
                if j in skiploglist:
                    continue

                # get random time shifts
                timeshift = (np.random.random() - 0.5) * timefluc

                if i == 0:
                    # first record should have the 'exactly' same time stamps
                    timeshift *= 0.0001

                deltatime = i * dtimesec + timeshift  # fraction of a day
                deltatime = timedelta64(int(deltatime * 24 * 3600 * 1e6),
                                        'us')  # timedelta64 requires int
                tmptime = runstart + deltatime
                tmpvalue = float(i * i * 6) + j
                logs[j].addValue(tmptime, tmpvalue)

                #dbbuf += "{}: {} = {}\n".format(logs[j].name, tmptime, tmpvalue)

            # ENDFOR (j)
        # ENDFOR (i)

        #print(dbbuf)

        wksp.mutableRun()['SensorA'] = tsp_a
        wksp.mutableRun()['SensorB'] = tsp_b
        wksp.mutableRun()['SensorC'] = tsp_c
        wksp.mutableRun()['SensorD'] = tsp_d

        return wksp
Example #28
0
    def runTest(self):
        from CrystalField import CrystalField, CrystalFieldFit, CrystalFieldMultiSite, Background, Function, ResolutionModel

        cf = CrystalField('Ce', 'C2v')
        cf = CrystalField('Ce', 'C2v', B20=0.37737, B22=3.9770)
        cf['B40'] = -0.031
        b = cf['B40']

        # Calculate and return the Hamiltonian matrix as a 2D numpy array.
        h = cf.getHamiltonian()
        print(h)
        # Calculate and return the eigenvalues of the Hamiltonian as a 1D numpy array.
        e = cf.getEigenvalues()
        print(e)
        # Calculate and return the eigenvectors of the Hamiltonian as a 2D numpy array.
        w = cf.getEigenvectors()
        print(w)
        # Using the keyword argument
        cf = CrystalField('Ce', 'C2v', B20=0.37737, B22=3.9770, Temperature=44)
        # Using the property
        cf.Temperature = 44

        print(cf.getPeakList())
        #[[  0.00000000e+00   2.44006198e+01   4.24977124e+01   1.80970926e+01 -2.44006198e+01]
        # [  2.16711565e+02   8.83098530e+01   5.04430056e+00   1.71153708e-01  1.41609425e-01]]
        cf.ToleranceIntensity = 1
        print(cf.getPeakList())
        #[[   0.           24.40061976   42.49771237]
        # [ 216.71156467   88.30985303    5.04430056]]
        cf.PeakShape = 'Gaussian'
        cf.FWHM = 0.9
        sp = cf.getSpectrum()
        print(cf.function)
        CrystalField_Ce = CreateWorkspace(*sp)
        print(CrystalField_Ce)

        # If the peak shape is Gaussian
        cf.peaks.param[1]['Sigma'] = 2.0
        cf.peaks.param[2]['Sigma'] = 0.01

        # If the peak shape is Lorentzian
        cf.PeakShape = 'Lorentzian'
        cf.peaks.param[1]['FWHM'] = 2.0
        cf.peaks.param[2]['FWHM'] = 0.01

        cf = CrystalField('Ce', 'C2v', B20=0.37737, B22=3.9770, B40=-0.031787, B42=-0.11611, B44=-0.12544,
                          Temperature=44.0, FWHM=1.1)
        cf.background = Background(peak=Function('Gaussian', Height=10, Sigma=1),
                                   background=Function('LinearBackground', A0=1.0, A1=0.01))
        h = cf.background.peak.param['Height']
        a1 = cf.background.background.param['A1']
        print(h)
        print(a1)

        cf.ties(B20=1.0, B40='B20/2')
        cf.constraints('1 < B22 <= 2', 'B22 < 4')

        print(cf.function[1].getTies())
        print(cf.function[1].getConstraints())

        cf.background.peak.ties(Height=10.1)
        cf.background.peak.constraints('Sigma > 0')
        cf.background.background.ties(A0=0.1)
        cf.background.background.constraints('A1 > 0')

        print(cf.function[0][0].getConstraints())
        print(cf.function[0][1].getConstraints())
        print(cf.function.getTies())
        print(cf.function.getConstraints())

        cf.peaks.ties({'f2.FWHM': '2*f1.FWHM', 'f3.FWHM': '2*f2.FWHM'})
        cf.peaks.constraints('f0.FWHM < 2.2', 'f1.FWHM >= 0.1')

        cf.PeakShape = 'Gaussian'
        cf.peaks.tieAll('Sigma=0.1', 3)
        cf.peaks.constrainAll('0 < Sigma < 0.1', 4)
        cf.peaks.tieAll('Sigma=f0.Sigma', 1, 3)
        cf.peaks.ties({'f1.Sigma': 'f0.Sigma', 'f2.Sigma': 'f0.Sigma', 'f3.Sigma': 'f0.Sigma'})

        rm = ResolutionModel(([1, 2, 3, 100], [0.1, 0.3, 0.35, 2.1]))
        cf = CrystalField('Ce', 'C2v', B20=0.37737, B22=3.9770, Temperature=44.0, ResolutionModel=rm)

        rm = ResolutionModel(self.my_func, xstart=0.0, xend=24.0, accuracy=0.01)
        cf = CrystalField('Ce', 'C2v', B20=0.37737, B22=3.9770, Temperature=44.0, ResolutionModel=rm)

        marires = Instrument('MARI')
        marires.setChopper('S')
        marires.setFrequency(250)
        marires.setEi(30)
        rm = ResolutionModel(marires.getResolution, xstart=0.0, xend=29.0, accuracy=0.01)
        cf = CrystalField('Ce', 'C2v', B20=0.37737, B22=3.9770, Temperature=44.0, ResolutionModel=rm)

        cf = CrystalField('Ce', 'C2v', B20=0.37737, B22=3.9770, Temperature=44.0, ResolutionModel=rm, FWHMVariation=0.1)

        # ---------------------------

        cf = CrystalField('Ce', 'C2v', B20=0.37737, B22=3.9770, B40=-0.031787, B42=-0.11611, B44=-0.12544,
                          Temperature=[44.0, 50.], FWHM=[1.1, 0.9])
        cf.PeakShape = 'Lorentzian'
        cf.peaks[0].param[0]['FWHM'] = 1.11
        cf.peaks[1].param[1]['FWHM'] = 1.12
        cf.background = Background(peak=Function('Gaussian', Height=10, Sigma=0.3),
                                   background=Function('FlatBackground', A0=1.0))
        cf.background[1].peak.param['Sigma'] = 0.8
        cf.background[1].background.param['A0'] = 1.1

        # The B parameters are common for all spectra - syntax doesn't change
        cf.ties(B20=1.0, B40='B20/2')
        cf.constraints('1 < B22 <= 2', 'B22 < 4')

        # Backgrounds and peaks are different for different spectra - must be indexed
        cf.background[0].peak.ties(Height=10.1)
        cf.background[0].peak.constraints('Sigma > 0.1')
        cf.background[1].peak.ties(Height=20.2)
        cf.background[1].peak.constraints('Sigma > 0.2')
        cf.peaks[1].tieAll('FWHM=2*f1.FWHM', 2, 5)
        cf.peaks[0].constrainAll('FWHM < 2.2', 1, 4)

        rm = ResolutionModel([self.my_func, marires.getResolution], 0, 100, accuracy = 0.01)
        cf.ResolutionModel = rm

        # Calculate second spectrum, use the generated x-values
        sp = cf.getSpectrum(1)
        # Calculate second spectrum, use the first spectrum of a workspace
        sp = cf.getSpectrum(1, 'CrystalField_Ce')
        # Calculate first spectrum, use the i-th spectrum of a workspace
        i=0
        sp = cf.getSpectrum(0, 'CrystalField_Ce', i)

        print(cf.function)

        cf.Temperature = [5, 50, 150]

        print()
        print(cf.function)

        ws = 'CrystalField_Ce'
        ws1 = 'CrystalField_Ce'
        ws2 = 'CrystalField_Ce'
        cf = CrystalField('Ce', 'C2v', B20=0.37737, B22=3.9770, B40=-0.031787, B42=-0.11611, B44=-0.12544, Temperature=5)

        # In case of a single spectrum (ws is a workspace)
        fit = CrystalFieldFit(Model=cf, InputWorkspace=ws)

        # Or for multiple spectra
        fit = CrystalFieldFit(Model=cf, InputWorkspace=[ws1, ws2])
        cf.Temperature = [5, 50]
        fit.fit()

        params = {'B20': 0.377, 'B22': 3.9, 'B40': -0.03, 'B42': -0.116, 'B44': -0.125,
                  'Temperature': [44.0, 50.], 'FWHM': [1.1, 0.9]}
        cf1 = CrystalField('Ce', 'C2v', **params)
        cf2 = CrystalField('Pr', 'C2v', **params)
        cfms = cf1 + cf2
        cf = 2*cf1 + cf2

        cfms = CrystalFieldMultiSite(Ions=['Ce', 'Pr'], Symmetries=['C2v', 'C2v'], Temperatures=[44.0], FWHMs=[1.1])
        cfms['ion0.B40'] = -0.031
        cfms['ion1.B20'] = 0.37737
        b = cfms['ion0.B22']

        print(b)
        print(cfms.function)

        cfms = CrystalFieldMultiSite(Ions=['Ce', 'Pr'], Symmetries=['C2v', 'C2v'], Temperatures=[44.0], FWHMs=[1.1],
                                     parameters={'ion0.B20': 0.37737, 'ion0.B22': 3.9770, 'ion1.B40':-0.031787,
                                                 'ion1.B42':-0.11611, 'ion1.B44':-0.12544})
        cfms = CrystalFieldMultiSite(Ions='Ce', Symmetries='C2v', Temperatures=[20], FWHMs=[1.0],
                                     Background='name=Gaussian,Height=0,PeakCentre=1,Sigma=0;name=LinearBackground,A0=0,A1=0')
        cfms = CrystalFieldMultiSite(Ions=['Ce'], Symmetries=['C2v'], Temperatures=[50], FWHMs=[0.9],
                                     Background=LinearBackground(A0=1.0), BackgroundPeak=Gaussian(Height=10, Sigma=0.3))
        cfms = CrystalFieldMultiSite(Ions='Ce', Symmetries='C2v', Temperatures=[20], FWHMs=[1.0],
                                     Background= Gaussian(PeakCentre=1) + LinearBackground())
        cfms = CrystalFieldMultiSite(Ions=['Ce','Pr'], Symmetries=['C2v', 'C2v'], Temperatures=[44, 50], FWHMs=[1.1, 0.9],
                                     Background=FlatBackground(), BackgroundPeak=Gaussian(Height=10, Sigma=0.3),
                                     parameters={'ion0.B20': 0.37737, 'ion0.B22': 3.9770, 'ion1.B40':-0.031787,
                                                 'ion1.B42':-0.11611, 'ion1.B44':-0.12544})
        cfms.ties({'sp0.bg.f0.Height': 10.1})
        cfms.constraints('sp0.bg.f0.Sigma > 0.1')
        cfms.constraints('ion0.sp0.pk1.FWHM < 2.2')
        cfms.ties({'ion0.sp1.pk2.FWHM': '2*ion0.sp1.pk1.FWHM', 'ion1.sp1.pk3.FWHM': '2*ion1.sp1.pk2.FWHM'})

        # --------------------------

        params = {'ion0.B20': 0.37737, 'ion0.B22': 3.9770, 'ion1.B40':-0.031787, 'ion1.B42':-0.11611, 'ion1.B44':-0.12544}
        cf = CrystalFieldMultiSite(Ions=['Ce', 'Pr'], Symmetries=['C2v', 'C2v'], Temperatures=[44.0, 50.0],
                                   FWHMs=[1.0, 1.0], ToleranceIntensity=6.0, ToleranceEnergy=1.0,  FixAllPeaks=True,
                                   parameters=params)

        cf.fix('ion0.BmolX', 'ion0.BmolY', 'ion0.BmolZ', 'ion0.BextX', 'ion0.BextY', 'ion0.BextZ', 'ion0.B40',
               'ion0.B42', 'ion0.B44', 'ion0.B60', 'ion0.B62', 'ion0.B64', 'ion0.B66', 'ion0.IntensityScaling',
               'ion1.BmolX', 'ion1.BmolY', 'ion1.BmolZ', 'ion1.BextX', 'ion1.BextY', 'ion1.BextZ', 'ion1.B40',
               'ion1.B42', 'ion1.B44', 'ion1.B60', 'ion1.B62', 'ion1.B64', 'ion1.B66', 'ion1.IntensityScaling',
               'sp0.IntensityScaling', 'sp1.IntensityScaling')

        chi2 = CalculateChiSquared(str(cf.function), InputWorkspace=ws1, InputWorkspace_1=ws2)[1]

        fit = CrystalFieldFit(Model=cf, InputWorkspace=[ws1, ws2], MaxIterations=10)
        fit.fit()

        print(chi2)

        cfms = CrystalFieldMultiSite(Ions='Ce', Symmetries='C2', Temperatures=[25], FWHMs=[1.0], PeakShape='Gaussian',
                                     BmolX=1.0, B40=-0.02)
        print(str(cfms.function).split(',')[0])

        # --------------------------

        # Create some crystal field data
        origin = CrystalField('Ce', 'C2v', B20=0.37737, B22=3.9770, B40=-0.031787, B42=-0.11611, B44=-0.12544,
                              Temperature=44.0, FWHM=1.1)
        x, y = origin.getSpectrum()
        ws = makeWorkspace(x, y)

        # Define a CrystalField object with parameters slightly shifted.
        cf = CrystalField('Ce', 'C2v', B20=0, B22=0, B40=0, B42=0, B44=0,
                          Temperature=44.0, FWHM=1.0, ResolutionModel=([0, 100], [1, 1]), FWHMVariation=0)

        # Set any ties on the field parameters.
        cf.ties(B20=0.37737)
        # Create a fit object
        fit = CrystalFieldFit(cf, InputWorkspace=ws)
        # Find initial values for the field parameters.
        # You need to define the energy splitting and names of parameters to estimate.
        # Optionally additional constraints can be set on tied parameters (eg, peak centres).
        fit.estimate_parameters(EnergySplitting=50,
                                Parameters=['B22', 'B40', 'B42', 'B44'],
                                Constraints='20<f1.PeakCentre<45,20<f2.PeakCentre<45',
                                NSamples=1000)
        print('Returned', fit.get_number_estimates(), 'sets of parameters.')
        # The first set (the smallest chi squared) is selected by default.
        # Select a different parameter set if required
        fit.select_estimated_parameters(3)
        print(cf['B22'], cf['B40'], cf['B42'], cf['B44'])
        # Run fit
        fit.fit()

        # --------------------------

        from CrystalField import PointCharge
        axial_pc_model = PointCharge([[-2, 0, 0, -4], [-2, 0, 0, 4]], 'Nd')
        axial_blm = axial_pc_model.calculate()
        print(axial_blm)

        from CrystalField import PointCharge
        from mantid.geometry import CrystalStructure
        perovskite_structure = CrystalStructure('4 4 4 90 90 90', 'P m -3 m', 'Ce 0 0 0 1 0; Al 0.5 0.5 0.5 1 0; O 0.5 0.5 0 1 0')
        cubic_pc_model = PointCharge(perovskite_structure, 'Ce', Charges={'Ce':3, 'Al':3, 'O':-2}, MaxDistance=7.5)

        cubic_pc_model = PointCharge(perovskite_structure, 'Ce', Charges={'Ce':3, 'Al':3, 'O':-2}, Neighbour=2)
        print(cubic_pc_model)

        cif_pc_model = PointCharge('Sm2O3.cif')
        print(cif_pc_model.getIons())

        cif_pc_model.Charges = {'O1':-2, 'O2':-2, 'Sm1':3, 'Sm2':3, 'Sm3':3}
        cif_pc_model.IonLabel = 'Sm2'
        cif_pc_model.Neighbour = 1
        cif_blm = cif_pc_model.calculate()
        print(cif_blm)
        bad_pc_model = PointCharge('Sm2O3.cif', MaxDistance=7.5, Neighbour=2)
        print(bad_pc_model.Neighbour)
        print(bad_pc_model.MaxDistance)

        cif_pc_model.Charges = {'O':-2, 'Sm':3}
        cif_blm = cif_pc_model.calculate()
        print(cif_blm)

        cf = CrystalField('Sm', 'C2', Temperature=5, FWHM=10, **cif_pc_model.calculate())
        fit = CrystalFieldFit(cf, InputWorkspace=ws)

        fit = CrystalFieldFit(cf, InputWorkspace=ws)
        fit.fit()

        # --------------------------

        cf = CrystalField('Ce', 'C2v', B20=0.37737, B22=3.9770, Temperature=44.0)
        Cv = cf.getHeatCapacity()       # Calculates Cv(T) for 1<T<300K in 1K steps  (default)

        T = np.arange(1,900,5)
        Cv = cf.getHeatCapacity(T)      # Calculates Cv(T) for specified values of T (1 to 900K in 5K steps here)

        # Temperatures from a single spectrum workspace
        ws = CreateWorkspace(T, T, T)
        Cv = cf.getHeatCapacity(ws)     # Use the x-values of a workspace as the temperatures
        ws_cp = CreateWorkspace(*Cv)

        # Temperatures from a multi-spectrum workspace
        ws = CreateWorkspace(T, T, T, NSpec=2)
        Cv = cf.getHeatCapacity(ws, 1)  # Uses the second spectrum's x-values for T (e.g. 450<T<900)

        chi_v = cf.getSusceptibility(T, Hdir=[1, 1, 1])
        chi_v_powder = cf.getSusceptibility(T, Hdir='powder')
        chi_v_cgs = cf.getSusceptibility(T, Hdir=[1, 1, 0], Unit='SI')
        chi_v_bohr = cf.getSusceptibility(T, Unit='bohr')
        print(type([chi_v, chi_v_powder, chi_v_cgs, chi_v_bohr]))
        moment_t = cf.getMagneticMoment(Temperature=T, Hdir=[1, 1, 1], Hmag=0.1) # Calcs M(T) with at 0.1T field||[111]
        H = np.linspace(0, 30, 121)
        moment_h = cf.getMagneticMoment(Hmag=H, Hdir='powder', Temperature=10)   # Calcs M(H) at 10K for powder sample
        moment_SI = cf.getMagneticMoment(H, [1, 1, 1], Unit='SI')         # M(H) in Am^2/mol at 1K for H||[111]
        moment_cgs = cf.getMagneticMoment(100, Temperature=T, Unit='cgs') # M(T) in emu/mol in a field of 100G || [001]
        print(type([moment_t, moment_h, moment_SI, moment_cgs]))

        # --------------------------

        from CrystalField import CrystalField, CrystalFieldFit, PhysicalProperties
        # Fits a heat capacity dataset - you must have subtracted the phonon contribution by some method already
        # and the data must be in J/mol/K.
        cf = CrystalField('Ce', 'C2v', B20=0.37737, B22=3.9770, B40=-0.031787, B42=-0.11611, B44=-0.12544,
                          PhysicalProperty=PhysicalProperties('Cv'))
        fitcv = CrystalFieldFit(Model=cf, InputWorkspace=ws)
        fitcv.fit()

        params = {'B20':0.37737, 'B22':3.9770, 'B40':-0.031787, 'B42':-0.11611, 'B44':-0.12544}
        cf = CrystalField('Ce', 'C2v', **params)
        cf.PhysicalProperty = PhysicalProperties('Cv')
        fitcv = CrystalFieldFit(Model=cf, InputWorkspace=ws)
        fitcv.fit()

        # Fits a susceptibility dataset. Data is the volume susceptibility in SI units
        cf = CrystalField('Ce', 'C2v', **params)
        cf.PhysicalProperty = PhysicalProperties('susc', Hdir='powder', Unit='SI')
        fit_chi = CrystalFieldFit(Model=cf, InputWorkspace=ws)
        fit_chi.fit()

        # Fits a magnetisation dataset. Data is in emu/mol, and was measured at 5K with the field || [111].
        cf = CrystalField('Ce', 'C2v', **params)
        cf.PhysicalProperty = PhysicalProperties('M(H)', Temperature=5, Hdir=[1, 1, 1], Unit='cgs')
        fit_mag = CrystalFieldFit(Model=cf, InputWorkspace=ws)
        fit_mag.fit()

        # Fits a magnetisation vs temperature dataset. Data is in Am^2/mol, measured with a 0.1T field || [110]
        cf = CrystalField('Ce', 'C2v', **params)
        cf.PhysicalProperty = PhysicalProperties('M(T)', Hmag=0.1, Hdir=[1, 1, 0], Unit='SI')
        fit_moment = CrystalFieldFit(Model=cf, InputWorkspace=ws)
        fit_moment.fit()

        # --------------------------

        # Pregenerate the required workspaces
        for tt in [10, 44, 50]:
            cf = CrystalField('Ce', 'C2v', B20=0.37737, B22=3.9770, B40=-0.031787, B42=-0.11611, B44=-0.12544, Temperature=tt, FWHM=0.5)
            x, y = cf.getSpectrum()
            self.my_create_ws('ws_ins_'+str(tt)+'K', x, y)
        ws_ins_10K = mtd['ws_ins_10K']
        ws_ins_44K = mtd['ws_ins_44K']
        ws_ins_50K = mtd['ws_ins_50K']
        ws_cp = self.my_create_ws('ws_cp', *cf.getHeatCapacity())
        ws_chi = self.my_create_ws('ws_chi', *cf.getSusceptibility(np.linspace(1,300,100), Hdir='powder', Unit='cgs'))
        ws_mag = self.my_create_ws('ws_mag', *cf.getMagneticMoment(Hmag=np.linspace(0, 30, 100), Hdir=[1,1,1], Unit='bohr', Temperature=5))

        # --------------------------

        # Fits an INS spectrum (at 10K) and the heat capacity simultaneously
        cf = CrystalField('Ce', 'C2v', B20=0.37737, B22=3.9770, B40=-0.031787, B42=-0.11611, B44=-0.12544)
        cf.Temperature = 10
        cf.FWHM = 1.5
        cf.PhysicalProperty = PhysicalProperties('Cv')
        fit = CrystalFieldFit(Model=cf, InputWorkspace=[ws_ins_10K, ws_cp])
        fit.fit()

        # Fits two INS spectra (at 44K and 50K) and the heat capacity, susceptibility and magnetisation simultaneously.
        PPCv = PhysicalProperties('Cv')
        PPchi = PhysicalProperties('susc', 'powder', Unit='cgs')
        PPMag = PhysicalProperties('M(H)', [1, 1, 1], 5, 'bohr')
        cf = CrystalField('Ce', 'C2v', B20=0.37737, B22=3.9770, B40=-0.031787, B42=-0.11611, B44=-0.12544,
                          Temperature=[44.0, 50.], FWHM=[1.1, 0.9], PhysicalProperty=[PPCv, PPchi, PPMag] )
        fit = CrystalFieldFit(Model=cf, InputWorkspace=[ws_ins_44K, ws_ins_50K, ws_cp, ws_chi, ws_mag])
        fit.fit()
Example #29
0
 def my_create_ws(self, outwsname, x, y):
     jitter = (np.random.rand(np.shape(y)[0])-0.5)*np.max(y)/100
     CreateWorkspace(x, y + jitter, y*0+np.max(y)/100, Distribution=True, OutputWorkspace=outwsname)
     return mtd[outwsname]
    def createTestWorkspace2(self):
        """ Create a workspace for testing against with more situation
        """
        from mantid.simpleapi import CreateWorkspace
        from mantid.simpleapi import AddSampleLog
        import numpy
        from numpy import datetime64, timedelta64
        #from time import gmtime, strftime,mktime # in debug prints

        # Create a matrix workspace
        x = np.array([1.,2.,3.,4.])
        y = np.array([1.,2.,3.])
        e = np.sqrt(np.array([1.,2.,3.]))
        wksp = CreateWorkspace(DataX=x, DataY=y,DataE=e,NSpec=1,UnitX='TOF')

        # Add run_start
        dtimesec = 0.0010
        timefluc = 0.0001
        runstart = '2014-02-15T13:34:03'
        # older numpy assumes local timezone
        if LooseVersion(numpy.__version__) < LooseVersion('1.9'):
            runstart = runstart + 'Z'
        runstart = datetime64(runstart, 'us') # microsecond needed for deltas

        AddSampleLog(Workspace=wksp,LogName='run_start',LogText=str(runstart))

        tsp_a = kernel.FloatTimeSeriesProperty("SensorA")
        tsp_b = kernel.FloatTimeSeriesProperty("SensorB")
        tsp_c = kernel.FloatTimeSeriesProperty("SensorC")
        tsp_d = kernel.FloatTimeSeriesProperty("SensorD")
        logs = [tsp_a, tsp_b, tsp_c, tsp_d]

        dbbuf = ""

        np.random.seed(0)
        for i in np.arange(25):
            # Randomly pick up log without records
            # first iteration must have all the record
            skiploglist = []
            if i > 0:
                numnorecord = np.random.randint(-1, 4)
                if numnorecord > 0:
                    for j in range(numnorecord):
                        logindex = np.random.randint(0, 6)
                        skiploglist.append(logindex)
                    # ENDFOR (j)
                # ENDIF (numnorecord)
            # ENDIF (i)

            dbbuf += "----------- %d -------------\n" % (i)

            # Record
            for j in range(4):
                # Skip if selected
                if j in skiploglist:
                    continue

                # get random time shifts
                timeshift = (np.random.random()-0.5)*timefluc

                if i == 0:
                    # first record should have the 'exactly' same time stamps
                    timeshift *= 0.0001

                deltatime = i*dtimesec + timeshift # fraction of a day
                deltatime = timedelta64(int(deltatime * 24 * 3600 * 1e6), 'us') # timedelta64 requires int
                tmptime = runstart + deltatime
                tmpvalue = float(i*i*6)+j
                logs[j].addValue(tmptime, tmpvalue)

                #dbbuf += "{}: {} = {}\n".format(logs[j].name, tmptime, tmpvalue)

            # ENDFOR (j)
        # ENDFOR (i)

        #print(dbbuf)

        wksp.mutableRun()['SensorA']=tsp_a
        wksp.mutableRun()['SensorB']=tsp_b
        wksp.mutableRun()['SensorC']=tsp_c
        wksp.mutableRun()['SensorD']=tsp_d

        return wksp
Example #31
0
    def PyExec(self):
        runs = self.getProperty("Filename").value

        if not runs:
            ipts = self.getProperty("IPTS").value
            runs = [
                '/HFIR/HB2C/IPTS-{}/nexus/HB2C_{}.nxs.h5'.format(ipts, run)
                for run in self.getProperty("RunNumbers").value
            ]

        wavelength = self.getProperty("wavelength").value
        outWS = self.getPropertyValue("OutputWorkspace")
        group_names = []

        grouping = self.getProperty("Grouping").value
        if grouping == 'None':
            grouping = 1
        else:
            grouping = 2 if grouping == '2x2' else 4

        for i, run in enumerate(runs):
            data = np.zeros((512 * 480 * 8), dtype=np.int64)
            with h5py.File(run, 'r') as f:
                monitor_count = f['/entry/monitor1/total_counts'].value[0]
                run_number = f['/entry/run_number'].value[0]
                for b in range(8):
                    data += np.bincount(f['/entry/bank' + str(b + 1) +
                                          '_events/event_id'].value,
                                        minlength=512 * 480 * 8)
            data = data.reshape((480 * 8, 512))
            if grouping == 2:
                data = data[::2, ::2] + data[
                    1::2, ::2] + data[::2, 1::2] + data[1::2, 1::2]
            elif grouping == 4:
                data = (data[::4, ::4] + data[1::4, ::4] + data[2::4, ::4] +
                        data[3::4, ::4] + data[::4, 1::4] + data[1::4, 1::4] +
                        data[2::4, 1::4] + data[3::4, 1::4] + data[::4, 2::4] +
                        data[1::4, 2::4] + data[2::4, 2::4] +
                        data[3::4, 2::4] + data[::4, 3::4] + data[1::4, 3::4] +
                        data[2::4, 3::4] + data[3::4, 3::4])

            CreateWorkspace(DataX=[wavelength - 0.001, wavelength + 0.001],
                            DataY=data,
                            DataE=np.sqrt(data),
                            UnitX='Wavelength',
                            YUnitLabel='Counts',
                            NSpec=1966080 // grouping**2,
                            OutputWorkspace='__tmp_load',
                            EnableLogging=False)
            LoadNexusLogs('__tmp_load', Filename=run, EnableLogging=False)
            AddSampleLog('__tmp_load',
                         LogName="monitor_count",
                         LogType='Number',
                         NumberType='Double',
                         LogText=str(monitor_count),
                         EnableLogging=False)
            AddSampleLog('__tmp_load',
                         LogName="gd_prtn_chrg",
                         LogType='Number',
                         NumberType='Double',
                         LogText=str(monitor_count),
                         EnableLogging=False)
            AddSampleLog('__tmp_load',
                         LogName="Wavelength",
                         LogType='Number',
                         NumberType='Double',
                         LogText=str(wavelength),
                         EnableLogging=False)
            AddSampleLog('__tmp_load',
                         LogName="Ei",
                         LogType='Number',
                         NumberType='Double',
                         LogText=str(
                             UnitConversion.run('Wavelength', 'Energy',
                                                wavelength, 0, 0, 0, Elastic,
                                                0)),
                         EnableLogging=False)
            AddSampleLog('__tmp_load',
                         LogName="run_number",
                         LogText=run_number,
                         EnableLogging=False)

            if grouping > 1:  # Fix detector IDs per spectrum before loading instrument
                __tmp_load = mtd['__tmp_load']
                for n in range(__tmp_load.getNumberHistograms()):
                    s = __tmp_load.getSpectrum(n)
                    for i in range(grouping):
                        for j in range(grouping):
                            s.addDetectorID(
                                int(n * grouping % 512 + n //
                                    (512 / grouping) * 512 * grouping + j +
                                    i * 512))

                LoadInstrument('__tmp_load',
                               InstrumentName='WAND',
                               RewriteSpectraMap=False,
                               EnableLogging=False)
            else:
                LoadInstrument('__tmp_load',
                               InstrumentName='WAND',
                               RewriteSpectraMap=True,
                               EnableLogging=False)

            SetGoniometer('__tmp_load',
                          Axis0="HB2C:Mot:s1,0,1,0,1",
                          EnableLogging=False)

            if self.getProperty("ApplyMask").value:
                MaskBTP('__tmp_load', Pixel='1,2,511,512', EnableLogging=False)
                if mtd['__tmp_load'].getRunNumber(
                ) > 26600:  # They changed pixel mapping and bank name order here
                    MaskBTP('__tmp_load',
                            Bank='1',
                            Tube='479-480',
                            EnableLogging=False)
                    MaskBTP('__tmp_load',
                            Bank='8',
                            Tube='1-2',
                            EnableLogging=False)
                else:
                    MaskBTP('__tmp_load',
                            Bank='8',
                            Tube='475-480',
                            EnableLogging=False)

            if len(runs) == 1:
                RenameWorkspace('__tmp_load', outWS, EnableLogging=False)
            else:
                outName = outWS + "_" + str(mtd['__tmp_load'].getRunNumber())
                group_names.append(outName)
                RenameWorkspace('__tmp_load', outName, EnableLogging=False)

        if len(runs) > 1:
            GroupWorkspaces(group_names,
                            OutputWorkspace=outWS,
                            EnableLogging=False)

        self.setProperty('OutputWorkspace', outWS)
Example #32
0
 def _create_workspace(self,
                       ws_2D=True,
                       sample=True,
                       xAx=True,
                       yAxSpec=True,
                       yAxMt=True,
                       instrument=True):
     """ create Workspace
     :param ws_2D: should workspace be 2D?
     :param sample: should workspace have sample logs?
     :param xAx: should x axis be DeltaE?
     :param yAxMt: should y axis be MomentumTransfer?
     :param yAxSpec: should y axis be SpectrumAxis?
     :param instrument: should workspace have a instrument?
     """
     # Event Workspace
     if not ws_2D:
         ws = CreateSampleWorkspace("Event", "One Peak", XUnit="DeltaE")
         return ws
     if not xAx:
         ws = CreateWorkspace(DataX=self.data_x,
                              DataY=self.data_y,
                              DataE=np.sqrt(self.data_y),
                              NSpec=1,
                              UnitX="TOF")
         return ws
     if not instrument:
         ws = CreateWorkspace(DataX=self.data_x,
                              DataY=self.data_y,
                              DataE=np.sqrt(self.data_y),
                              NSpec=1,
                              UnitX="DeltaE")
         return ws
     if not yAxMt and not yAxSpec:
         ws = CreateWorkspace(DataX=self.data_x,
                              DataY=self.data_y,
                              DataE=np.sqrt(self.data_y),
                              NSpec=1,
                              UnitX="DeltaE")
         LoadInstrument(ws, True, InstrumentName="TOFTOF")
         ConvertSpectrumAxis(InputWorkspace=ws,
                             OutputWorkspace=ws,
                             Target="theta",
                             EMode="Direct")
         return ws
     if not yAxSpec and yAxMt:
         ws = CreateWorkspace(DataX=self.data_x,
                              DataY=self.data_y,
                              DataE=np.sqrt(self.data_y),
                              NSpec=1,
                              UnitX="DeltaE")
         LoadInstrument(ws, True, InstrumentName="TOFTOF")
         self._add_all_sample_logs(ws)
         ConvertSpectrumAxis(InputWorkspace=ws,
                             OutputWorkspace="ws2",
                             Target="ElasticQ",
                             EMode="Direct")
         ws2 = mtd["ws2"]
         return ws2
     if not sample:
         ws = CreateWorkspace(DataX=self.data_x,
                              DataY=self.data_y,
                              DataE=np.sqrt(self.data_y),
                              NSpec=1,
                              UnitX="DeltaE")
         LoadInstrument(ws, False, InstrumentName="TOFTOF")
         for i in range(ws.getNumberHistograms()):
             ws.getSpectrum(i).setDetectorID(i + 1)
         return ws
     else:
         ws = CreateWorkspace(DataX=self.data_x,
                              DataY=self.data_y,
                              DataE=np.sqrt(self.data_y),
                              NSpec=1,
                              UnitX="DeltaE")
         LoadInstrument(ws, True, InstrumentName="TOFTOF")
         self._add_all_sample_logs(ws)
         return ws
Example #33
0
 def setUp(self):
     if self._raw_ws is None:
         dataX = np.linspace(start=5.6e4, stop=5.701e4, num=101)
         dataY = 5 * dataX + 4
         ws = CreateWorkspace(DataX=dataX, DataY=dataY, NSpec=1)
         self.__class__._raw_ws = ws
 def test_no_instrument_associated(self):
     Sample = CreateWorkspace([1,2,3],[1,2])
     Trans = CreateWorkspace([1,2,3],[1,2])
     self.assertRaises(RuntimeError, SANSWideAngleCorrection, Sample, Trans, OutputWorkspace='out')
Example #35
0
class SampleTest(unittest.TestCase):

    def setUp(self):
        self._ws = CreateWorkspace(DataX=[1,2,3,4,5], DataY=[1,2,3,4,5], OutputWorkspace="dummy")

    def test_geometry_getters_and_setters(self):
        sample = self._ws.sample()

        sample.setThickness(12.5)
        self.assertEquals(sample.getThickness(), 12.5)
        sample.setHeight(10.2)
        self.assertEquals(sample.getHeight(), 10.2)
        sample.setWidth(5.9)
        self.assertEquals(sample.getWidth(), 5.9)

    def test_crystal_structure_handling(self):
        sample = self._ws.sample()

        self.assertEquals(sample.hasCrystalStructure(), False)
        self.assertRaises(RuntimeError, sample.getCrystalStructure)

        cs = CrystalStructure('5.43 5.43 5.43',
                              'F d -3 m',
                              'Si 0 0 0 1.0 0.01')

        sample.setCrystalStructure(cs)

        self.assertEquals(sample.hasCrystalStructure(), True)

        cs_from_sample = sample.getCrystalStructure()

        self.assertEquals(cs.getSpaceGroup().getHMSymbol(), cs_from_sample.getSpaceGroup().getHMSymbol())
        self.assertEquals(cs.getUnitCell().a(), cs_from_sample.getUnitCell().a())
        self.assertEquals(len(cs.getScatterers()), len(cs_from_sample.getScatterers()))
        self.assertEquals(cs.getScatterers()[0], cs_from_sample.getScatterers()[0])


        sample.clearCrystalStructure()

        self.assertEquals(sample.hasCrystalStructure(), False)
        self.assertRaises(RuntimeError, sample.getCrystalStructure)

    def test_material(self):
        SetSampleMaterial(self._ws,"Al2 O3",SampleMassDensity=4)
        material = self._ws.sample().getMaterial()

        self.assertAlmostEqual(material.numberDensity, 0.1181, places=4)
        self.assertAlmostEqual(material.relativeMolecularMass(), 101.961, places=3)

        atoms, numatoms = material.chemicalFormula()

        self.assertEquals(len(atoms), len(numatoms))
        self.assertEquals(len(atoms), 2)
        self.assertEquals(numatoms[0], 2)
        self.assertEquals(numatoms[1], 3)

        xs0 = atoms[0].neutron()
        xs1 = atoms[1].neutron()
        # the correct way to calculate for coherent cross section
        # is to average the scattering lengths then convert to a cross section
        b_real = (xs0['coh_scatt_length_real']*2 + xs1['coh_scatt_length_real']*3) / 5
        b_imag = (xs0['coh_scatt_length_img']*2 + xs1['coh_scatt_length_img']*3) / 5
        xs = .04 * pi * (b_real * b_real + b_imag * b_imag)
        self.assertAlmostEquals(material.cohScatterXSection(), xs, places=4)

    def test_get_shape(self):
        sample = self._ws.sample()
        self.assertEquals(type(sample.getShape()), CSGObject)

    def test_get_shape_xml(self):
        sample = self._ws.sample()
        shape = sample.getShape()
        xml = shape.getShapeXML()
        self.assertEquals(type(xml), str)
Example #36
0
    def test_with_data_from_other_workspace(self):
        wsname = 'LOQ'
        x1 = np.array([1.,2.,3.,4.])
        y1 = np.array([[1.,2.,3.],[4.,5.,6.]])
        e1 = np.sqrt(y1)
        loq = CreateWorkspace(DataX=x1, DataY=y1,DataE=e1,NSpec=2,UnitX='Wavelength')

        x2 = loq.extractX()
        y2 = loq.extractY()
        e2 = loq.extractE()

        wksp = CreateWorkspace(DataX=x2, DataY=y2,DataE=e2,NSpec=2,UnitX='Wavelength')
        self.assertTrue(isinstance(wksp, MatrixWorkspace))
        self.assertEquals(wksp.getNumberHistograms(), 2)

        for i in [0,1]:
            for j in range(len(y2[0])):
                self.assertEquals(wksp.readY(i)[j], loq.readY(i)[j])
                self.assertEquals(wksp.readE(i)[j], loq.readE(i)[j])
                self.assertEquals(wksp.readX(i)[j], loq.readX(i)[j])
            # Last X value
            self.assertEquals(wksp.readX(i)[len(x2)-1], loq.readX(i)[len(x2)-1])

        AnalysisDataService.remove("wksp")
Example #37
0
    def PyExec(self):
        try:
            import refl1d  # noqa: F401
        except ImportError:
            err_msg = 'Refl1D not installed, unable to run this algorithm'
            raise RuntimeError(err_msg)

        # Get properties we copied to run LiquidsReflectometryReduction algorithms
        kwargs = dict()
        for prop in PROPS_TO_COPY:
            kwargs[prop] = self.getProperty(prop).value

        # Process the reference normalization run
        norm_wksp = LiquidsReflectometryReduction(
            RunNumbers=kwargs['RunNumbers'],
            InputWorkspace=kwargs['InputWorkspace'],
            NormalizationRunNumber=kwargs['NormalizationRunNumber'],
            SignalPeakPixelRange=kwargs['NormPeakPixelRange'],
            SubtractSignalBackground=kwargs['SubtractNormBackground'],
            SignalBackgroundPixelRange=kwargs['NormBackgroundPixelRange'],
            NormFlag=False,
            NormPeakPixelRange=kwargs['NormPeakPixelRange'],
            SubtractNormBackground=kwargs['SubtractNormBackground'],
            NormBackgroundPixelRange=kwargs['NormBackgroundPixelRange'],
            LowResDataAxisPixelRangeFlag=kwargs[
                'LowResNormAxisPixelRangeFlag'],
            LowResDataAxisPixelRange=kwargs['LowResNormAxisPixelRange'],
            LowResNormAxisPixelRangeFlag=kwargs[
                'LowResNormAxisPixelRangeFlag'],
            LowResNormAxisPixelRange=kwargs['LowResNormAxisPixelRange'],
            TOFRange=kwargs['TOFRange'],
            TOFRangeFlag=kwargs['TOFRangeFlag'],
            QMin=kwargs['QMin'],
            QStep=kwargs['QStep'],
            AngleOffset=kwargs['AngleOffset'],
            AngleOffsetError=kwargs['AngleOffsetError'],
            OutputWorkspace=kwargs['OutputWorkspace'],
            ApplyScalingFactor=False,
            ScalingFactorFile=kwargs['ScalingFactorFile'],
            SlitTolerance=kwargs['SlitTolerance'],
            SlitsWidthFlag=kwargs['SlitsWidthFlag'],
            IncidentMediumSelected=kwargs['IncidentMediumSelected'],
            GeometryCorrectionFlag=kwargs['GeometryCorrectionFlag'],
            FrontSlitName=kwargs['FrontSlitName'],
            BackSlitName=kwargs['BackSlitName'],
            TOFSteps=kwargs['TOFSteps'],
            CropFirstAndLastPoints=kwargs['CropFirstAndLastPoints'],
            ApplyPrimaryFraction=kwargs['ApplyPrimaryFraction'],
            PrimaryFractionRange=kwargs['PrimaryFractionRange'])

        # Calculate the theoretical reflectivity for normalization using Refl1D
        q = norm_wksp.readX(0)
        model_json = self.getProperty("Refl1DModelParameters").value
        model_dict = json.loads(model_json)
        model_reflectivity = self.calculate_reflectivity(model_dict, q)

        model_wksp = CreateWorkspace(
            DataX=q,
            DataY=model_reflectivity,
            DataE=np.zeros(len(q)),
            UnitX=norm_wksp.getAxis(0).getUnit().unitID())

        # Calculate the incident flux ( measured / model) for reference
        incident_flux = Divide(norm_wksp, model_wksp)

        # Process the sample run(s)
        kwargs['NormFlag'] = False
        kwargs['ApplyScalingFactor'] = False
        sample_wksp = LiquidsReflectometryReduction(**kwargs)

        # Normalize using the incident flux
        out_wksp = Divide(sample_wksp, incident_flux)

        # Output
        self.setProperty('OutputWorkspace', out_wksp)

        # Clean up
        DeleteWorkspace(model_wksp)
        DeleteWorkspace(norm_wksp)
        DeleteWorkspace(incident_flux)
Example #38
0
def reduce_to_2theta(hb2b_builder,
                     pixel_matrix,
                     hb2b_data_ws_name,
                     counts_array,
                     mask_vec,
                     mask_ws_name,
                     num_bins=1000):
    """
    Reduce to 2theta with Masks
    :param hb2b_builder:
    :param pixel_matrix:
    :param hb2b_data_ws_name:
    :param counts_array:
    :param mask_vec:
    :param num_bins:
    :return:
    """
    # reduce by PyRS
    if False:
        pyrs_raw_ws = mtd[pyrs_raw_name]
        vec_counts = pyrs_raw_ws.readY(0)
    else:
        vec_counts = counts_array.astype('float64')

    # mask
    if mask_vec is not None:
        print(mask_vec.dtype)
        vec_counts.astype('float64')
        mask_vec.astype('float64')
        vec_counts *= mask_vec
    # reduce
    bin_edgets, histogram = hb2b_builder.reduce_to_2theta_histogram(
        pixel_matrix, vec_counts, num_bins)

    # create workspace
    pyrs_reduced_name = '{}_pyrs_reduced'.format(hb2b_data_ws_name)
    CreateWorkspace(DataX=bin_edgets,
                    DataY=histogram,
                    NSpec=1,
                    OutputWorkspace=pyrs_reduced_name)
    SaveNexusProcessed(InputWorkspace=pyrs_reduced_name,
                       Filename='{}.nxs'.format(pyrs_reduced_name),
                       Title='PyRS reduced: {}'.format(hb2b_data_ws_name))

    if True:
        # Mantid
        # transfer to 2theta for data
        two_theta_ws_name = '{}_2theta'.format(hb2b_data_ws_name)

        # Mask
        if mask_ws_name:
            # Multiply by masking workspace
            masked_ws_name = '{}_masked'.format(hb2b_data_ws_name)
            Multiply(LHSWorkspace=hb2b_data_ws_name,
                     RHSWorkspace=mask_ws_name,
                     OutputWorkspace=masked_ws_name,
                     ClearRHSWorkspace=False)
            hb2b_data_ws_name = masked_ws_name
            SaveNexusProcessed(InputWorkspace=hb2b_data_ws_name,
                               Filename='{}_raw.nxs'.format(hb2b_data_ws_name))
        # END-IF

        # # this is for test only!
        # ConvertSpectrumAxis(InputWorkspace=hb2b_data_ws_name, OutputWorkspace=two_theta_ws_name, Target='Theta',
        #                     OrderAxis=False)
        # Transpose(InputWorkspace=two_theta_ws_name, OutputWorkspace=two_theta_ws_name)
        # two_theta_ws = mtd[two_theta_ws_name]
        # for i in range(10):
        #     print ('{}: x = {}, y = {}'.format(i, two_theta_ws.readX(0)[i], two_theta_ws.readY(0)[i]))
        # for i in range(10010, 10020):
        #     print ('{}: x = {}, y = {}'.format(i, two_theta_ws.readX(0)[i], two_theta_ws.readY(0)[i]))

        ConvertSpectrumAxis(InputWorkspace=hb2b_data_ws_name,
                            OutputWorkspace=two_theta_ws_name,
                            Target='Theta')
        Transpose(InputWorkspace=two_theta_ws_name,
                  OutputWorkspace=two_theta_ws_name)
        # final:
        mantid_reduced_name = '{}_mtd_reduced'.format(hb2b_data_ws_name)
        ResampleX(InputWorkspace=two_theta_ws_name,
                  OutputWorkspace=mantid_reduced_name,
                  NumberBins=num_bins,
                  PreserveEvents=False)
        mantid_ws = mtd[mantid_reduced_name]

        SaveNexusProcessed(
            InputWorkspace=mantid_reduced_name,
            Filename='{}.nxs'.format(mantid_reduced_name),
            Title='Mantid reduced: {}'.format(hb2b_data_ws_name))

        plt.plot(mantid_ws.readX(0),
                 mantid_ws.readY(0),
                 color='blue',
                 mark='o')

    # END-IF

    plt.plot(bin_edgets[:-1], histogram, color='red')

    plt.show()

    return
    def histogram_peak_deviations(
            self,
            peak_centers_in_tof: Union[str, TableWorkspace],
            workspace_with_instrument: Union[str, Workspace],
            output_workspace: str,
            grouping_workspace: Union[str, WorkspaceGroup],
            deviation_params: List[float] = [-0.1, 0.0001, 0.1],
            percent_deviations: bool = False):
        r"""
        Find deviations of the fitted peak centers with respect to the reference values,
        in d-spacing units. Histogram these deviations for all peaks found on each bank

        @param peak_centers_in_tof: table containing the centers of the fitted peaks found
            on each pixel, in TOF units
        @param workspace_with_instrument: any workspace whose embedded instrument will
            be used to calculate the DIFC values per pixel
        @param output_workspace: workspace containing the histograms of peak deviations per bank
        @param grouping_workspace: workspace assigning group ID's (bank numbers) to each pixel
        @param deviation_params: a triad of first histogram boundary, bin width, and
            last histogram boundary, in Angstroms
        @param percent_deviations: each deviation from the reference d-spacing will be divided by the
            d-spacing value and multiplied by 100. Adjust `deviation_params` accordinggly
        """
        # Find DIFC values using the geometry of the instrument embedded in `workspace_with_instrument`
        difc_workspace = self.temp_ws()  # we need a temporary workspace
        CalculateDIFC(workspace_with_instrument,
                      OutputWorkspace=difc_workspace)
        difc_values = mtd[difc_workspace].extractY().flatten()

        # Save the contents of the table to a python dictionary which we can overwrite easily
        tof_table = mtd[str(peak_centers_in_tof)]
        tof_dict = tof_table.toDict(
        )  # the table as a data structure we can modify

        # Calculate the peak deviations with respect to the reference peak centers, in d-spacing or percent units
        column_names = tof_table.getColumnNames()
        column_peaks = [name for name in column_names
                        if '@' in name]  # column containing the peak centers
        for column in column_peaks:
            dspacing_reference = float(column.replace(
                '@', ''))  # the column name has the reference peak center
            # peak deviations, in d-spacing units, that we use to overwrite tof_dic
            tof_dict[column] = np.array(
                tof_dict[column]) / difc_values - dspacing_reference
            # switch to percent deviations if so required
            if percent_deviations is True:
                tof_dict[column] *= 100 / dspacing_reference
            tof_dict[column] = tof_dict[column].tolist()  # cast to list

        # Extract the group ID's, which typically corresponds to bank numbers
        # grouping_workspace_y contain the group ID (bank number) of each pixel
        grouping_workspace_y = [
            int(n) for n in mtd[str(grouping_workspace)].extractY().flatten()
        ]
        group_ids = sorted(list(
            set(grouping_workspace_y)))  # list of group ID's (bank numbers)

        # List all the peak deviations within a group (bank)
        deviations_in_group = {group_id: [] for group_id in group_ids}
        for row_index in range(
                tof_table.rowCount()):  # iterate over each pixel
            group_id = grouping_workspace_y[
                row_index]  # group ID (bank number) of the current pixel
            # find the peak deviations for all peaks that were found in the current pixel
            deviations_in_pixel = np.array(
                [tof_dict[column][row_index] for column in column_peaks])
            # `nan` is assigned to listed peaks missing in the current pixel. We must get rid of them
            deviations_in_group[group_id].extend(
                deviations_in_pixel[~np.isnan(deviations_in_pixel)].tolist())

        # Histogram the deviations for all pixels within a group (bank)
        start, step, stop = deviation_params  # start and stop are first and last histogram boundaries
        bins = np.arange(start, stop + step / 2, step)
        histograms = dict()  # one histogram per group ID (bank)
        histogram_empty = np.zeros(
            len(bins) -
            1)  # this for banks with no peaks, for instance, for masked banks
        for group_id, deviations in deviations_in_group.items():
            if len(deviations
                   ) == 0:  # no peaks in the group (bank), thus no deviations
                histogram = histogram_empty
            else:
                histogram = np.histogram(deviations, bins)[0]
            histograms[group_id] = histogram

        # Create a workspace with the histograms
        spectra = spectra = np.array(list(
            histograms.values())).flatten()  # single list needed
        unit_x = 'dSpacing' if percent_deviations is False else 'Empty'
        title_prefix = 'Peak ' if percent_deviations is False else 'Percent peak '
        CreateWorkspace(DataX=bins,
                        DataY=spectra,
                        NSpec=len(group_ids),
                        UnitX=unit_x,
                        WorkspaceTitle=title_prefix +
                        'Peak deviations per pixel group',
                        OutputWorkspace=output_workspace)
        insert_bank_numbers(
            output_workspace,
            grouping_workspace)  # label each spectrum with the bank number
Example #40
0
class SampleTest(unittest.TestCase):

    def setUp(self):
        self._ws = CreateWorkspace(DataX=[1,2,3,4,5], DataY=[1,2,3,4,5], OutputWorkspace="dummy")

    def test_geometry_getters_and_setters(self):
        sample = self._ws.sample()

        sample.setThickness(12.5)
        self.assertEquals(sample.getThickness(), 12.5)
        sample.setHeight(10.2)
        self.assertEquals(sample.getHeight(), 10.2)
        sample.setWidth(5.9)
        self.assertEquals(sample.getWidth(), 5.9)

    def test_crystal_structure_handling(self):
        sample = self._ws.sample()

        self.assertEquals(sample.hasCrystalStructure(), False)
        self.assertRaises(RuntimeError, sample.getCrystalStructure)

        cs = CrystalStructure('5.43 5.43 5.43',
                              'F d -3 m',
                              'Si 0 0 0 1.0 0.01')

        sample.setCrystalStructure(cs)

        self.assertEquals(sample.hasCrystalStructure(), True)

        cs_from_sample = sample.getCrystalStructure()

        self.assertEquals(cs.getSpaceGroup().getHMSymbol(), cs_from_sample.getSpaceGroup().getHMSymbol())
        self.assertEquals(cs.getUnitCell().a(), cs_from_sample.getUnitCell().a())
        self.assertEquals(len(cs.getScatterers()), len(cs_from_sample.getScatterers()))
        self.assertEquals(cs.getScatterers()[0], cs_from_sample.getScatterers()[0])


        sample.clearCrystalStructure()

        self.assertEquals(sample.hasCrystalStructure(), False)
        self.assertRaises(RuntimeError, sample.getCrystalStructure)

    def test_material(self):
        SetSampleMaterial(self._ws,"Al2 O3",SampleMassDensity=4)
        material = self._ws.sample().getMaterial()

        self.assertAlmostEqual(material.numberDensity, 0.1181, places=4)
        self.assertAlmostEqual(material.relativeMolecularMass(), 101.961, places=3)

        atoms, numatoms = material.chemicalFormula()

        self.assertEquals(len(atoms), len(numatoms))
        self.assertEquals(len(atoms), 2)
        self.assertEquals(numatoms[0], 2)
        self.assertEquals(numatoms[1], 3)

        xs0 = atoms[0].neutron()
        xs1 = atoms[1].neutron()
        xs = ( xs0['coh_scatt_xs']*2 + xs1['coh_scatt_xs']*3 ) / 5
        self.assertAlmostEquals(material.cohScatterXSection(), xs, places=4)

    def test_get_shape(self):
        sample = self._ws.sample()
        self.assertEquals(type(sample.getShape()), CSGObject)

    def test_get_shape_xml(self):
        sample = self._ws.sample()
        shape = sample.getShape()
        xml = shape.getShapeXML()
        self.assertEquals(type(xml), str)
Example #41
0
 def test_that_errorbar_autoscales_by_default(self):
     ws = CreateWorkspace(DataX=[10, 20], DataY=[10, 20], DataE=[1, 1], OutputWorkspace="ws")
     self.ax.errorbar(ws)
     ws2 = CreateWorkspace(DataX=[10, 20], DataY=[10, 5000], DataE=[1, 1], OutputWorkspace="ws2")
     self.ax.errorbar(ws2)
     self.assertGreaterEqual(self.ax.get_ylim()[1], 5000)
Example #42
0
 def setUp(self):
     self._ws = CreateWorkspace(DataX=[1, 2, 3, 4, 5], DataY=[1, 2, 3, 4, 5], OutputWorkspace="dummy")
Example #43
0
 def test_that_errorbar_autoscaling_can_be_turned_off_when_plotting_multiple_workspaces(self):
     ws = CreateWorkspace(DataX=[10, 20], DataY=[10, 20])
     self.ax.errorbar(ws)
     ws2 = CreateWorkspace(DataX=[10, 20], DataY=[10, 5000])
     self.ax.errorbar(ws2, autoscale_on_update=False)
     self.assertLess(self.ax.get_ylim()[1], 5000)
Example #44
0
 def PyExec(self):
     raw_ws = self.getProperty('InputWorkspace').value
     sample_geometry = self.getPropertyValue('SampleGeometry')
     sample_material = self.getPropertyValue('SampleMaterial')
     cal_file_name = self.getPropertyValue('CalFileName')
     SetSample(InputWorkspace=raw_ws,
               Geometry=sample_geometry,
               Material=sample_material)
     # find the closest monitor to the sample for incident spectrum
     raw_spec_info = raw_ws.spectrumInfo()
     incident_index = None
     for i in range(raw_spec_info.size()):
         if raw_spec_info.isMonitor(i):
             l2 = raw_spec_info.position(i)[2]
             if not incident_index:
                 incident_index = i
             else:
                 if raw_spec_info.position(incident_index)[2] < l2 < 0:
                     incident_index = i
     monitor = ExtractSpectra(InputWorkspace=raw_ws, WorkspaceIndexList=[incident_index])
     monitor = ConvertUnits(InputWorkspace=monitor, Target="Wavelength")
     x_data = monitor.dataX(0)
     min_x = np.min(x_data)
     max_x = np.max(x_data)
     width_x = (max_x - min_x) / x_data.size
     fit_spectra = FitIncidentSpectrum(InputWorkspace=monitor,
                                       BinningForCalc=[min_x, 1 * width_x, max_x],
                                       BinningForFit=[min_x, 10 * width_x, max_x],
                                       FitSpectrumWith="CubicSpline")
     self_scattering_correction = CalculatePlaczekSelfScattering(InputWorkspace=raw_ws,
                                                                 IncidentSpectra=fit_spectra,
                                                                 ScaleByPackingFraction=False,
                                                                 Version=1)
     # Convert to Q
     self_scattering_correction = ConvertUnits(InputWorkspace=self_scattering_correction,
                                               Target="MomentumTransfer", EMode='Elastic')
     cal_workspace = LoadCalFile(InputWorkspace=self_scattering_correction,
                                 CalFileName=cal_file_name,
                                 Workspacename='cal_workspace',
                                 MakeOffsetsWorkspace=False,
                                 MakeMaskWorkspace=False,
                                 MakeGroupingWorkspace=True)
     ssc_min_x, ssc_max_x = float('inf'), float('-inf')
     for index in range(self_scattering_correction.getNumberHistograms()):
         spec_info = self_scattering_correction.spectrumInfo()
         if not spec_info.isMasked(index) and not spec_info.isMonitor(index):
             ssc_x_data = np.ma.masked_invalid(self_scattering_correction.dataX(index))
             if np.min(ssc_x_data) < ssc_min_x:
                 ssc_min_x = np.min(ssc_x_data)
             if np.max(ssc_x_data) > ssc_max_x:
                 ssc_max_x = np.max(ssc_x_data)
     ssc_width_x = (ssc_max_x - ssc_min_x) / ssc_x_data.size
     # TO DO: calculate rebin parameters per group
     # and run GroupDetectors on each separately
     self_scattering_correction = Rebin(InputWorkspace=self_scattering_correction,
                                        Params=[ssc_min_x, ssc_width_x, ssc_max_x],
                                        IgnoreBinErrors=True)
     self_scattering_correction = GroupDetectors(InputWorkspace=self_scattering_correction,
                                                 CopyGroupingFromWorkspace='cal_workspace_group')
     n_pixel = np.zeros(self_scattering_correction.getNumberHistograms())
     for i in range(cal_workspace.getNumberHistograms()):
         grouping = cal_workspace.dataY(i)
         if grouping[0] > 0:
             n_pixel[int(grouping[0] - 1)] += 1
     correction_ws = CreateWorkspace(DataY=n_pixel, DataX=[0, 1],
                                     NSpec=self_scattering_correction.getNumberHistograms())
     self_scattering_correction = Divide(LHSWorkspace=self_scattering_correction, RHSWorkspace=correction_ws)
     DeleteWorkspace('cal_workspace_group')
     DeleteWorkspace(correction_ws)
     DeleteWorkspace(fit_spectra)
     DeleteWorkspace(monitor)
     DeleteWorkspace(raw_ws)
     self.setProperty('OutputWorkspace', self_scattering_correction)
Example #45
0
 def test_that_errorbar_autoscaling_can_be_turned_off(self):
     ws = CreateWorkspace(DataX=[10, 20], DataY=[10, 20], DataE=[1, 2], OutputWorkspace="ws")
     self.ax.errorbar(ws)
     ws2 = CreateWorkspace(DataX=[10, 20], DataY=[10, 5000], DataE=[1, 1], OutputWorkspace="ws2")
     self.ax.errorbar(ws2, autoscale_on_update=False)
     self.assertLess(self.ax.get_ylim()[1], 5000)
    def PyExec(self):
        raw_ws = self.getProperty('InputWorkspace').value
        sample_geometry = self.getPropertyValue('SampleGeometry')
        sample_material = self.getPropertyValue('SampleMaterial')
        cal_file_name = self.getPropertyValue('CalFileName')
        SetSample(InputWorkspace=raw_ws,
                  Geometry=sample_geometry,
                  Material=sample_material)
        # find the closest monitor to the sample for incident spectrum
        raw_spec_info = raw_ws.spectrumInfo()
        incident_index = None
        for i in range(raw_spec_info.size()):
            if raw_spec_info.isMonitor(i):
                l2 = raw_spec_info.position(i)[2]
                if not incident_index:
                    incident_index = i
                else:
                    if raw_spec_info.position(incident_index)[2] < l2 < 0:
                        incident_index = i
        monitor = ExtractSpectra(InputWorkspace=raw_ws,
                                 WorkspaceIndexList=[incident_index])
        monitor = ConvertUnits(InputWorkspace=monitor, Target="Wavelength")
        x_data = monitor.dataX(0)
        min_x = np.min(x_data)
        max_x = np.max(x_data)
        width_x = (max_x - min_x) / x_data.size
        fit_spectra = FitIncidentSpectrum(
            InputWorkspace=monitor,
            BinningForCalc=[min_x, 1 * width_x, max_x],
            BinningForFit=[min_x, 10 * width_x, max_x],
            FitSpectrumWith="CubicSpline")
        self_scattering_correction = CalculatePlaczekSelfScattering(
            InputWorkspace=raw_ws, IncidentSpecta=fit_spectra)
        cal_workspace = LoadCalFile(InputWorkspace=self_scattering_correction,
                                    CalFileName=cal_file_name,
                                    Workspacename='cal_workspace',
                                    MakeOffsetsWorkspace=False,
                                    MakeMaskWorkspace=False)
        self_scattering_correction = DiffractionFocussing(
            InputWorkspace=self_scattering_correction,
            GroupingFilename=cal_file_name)

        n_pixel = np.zeros(self_scattering_correction.getNumberHistograms())

        for i in range(cal_workspace.getNumberHistograms()):
            grouping = cal_workspace.dataY(i)
            if grouping[0] > 0:
                n_pixel[int(grouping[0] - 1)] += 1
        correction_ws = CreateWorkspace(
            DataY=n_pixel,
            DataX=[0, 1],
            NSpec=self_scattering_correction.getNumberHistograms())
        self_scattering_correction = Divide(
            LHSWorkspace=self_scattering_correction,
            RHSWorkspace=correction_ws)
        ConvertToDistribution(Workspace=self_scattering_correction)
        self_scattering_correction = ConvertUnits(
            InputWorkspace=self_scattering_correction,
            Target="MomentumTransfer",
            EMode='Elastic')
        DeleteWorkspace('cal_workspace_group')
        DeleteWorkspace(correction_ws)
        DeleteWorkspace(fit_spectra)
        DeleteWorkspace(monitor)
        DeleteWorkspace(raw_ws)
        self.setProperty('OutputWorkspace', self_scattering_correction)
Example #47
0
def create_workspace_wrapper_stub_object(name):
    workspace = CreateWorkspace([0], [0])
    wrapped_workspace = MuonWorkspaceWrapper(workspace)
    wrapped_workspace.show(name)
    return wrapped_workspace
Example #48
0
def sel_const(runs,
              dist=4.0,
              thickness=5e-3,
              show_fits=False,
              show_quality=False):
    """Calculate the spin echo length of the instrument

    Parameters
    ----------
    runs: list of Workspaces
      A list of the workspecaes containing the polarisation versus wavelength
      for consecutive detector tubes
    dist: float
      The distances from the sample to the detector in meters.  The default
      is 4.0
    thickness: float
      The distance between detector tubes in meters.  Defaults to 5mm.
    show_fits: bool
      If true, plots the sinusoid fits used to calculate the frequency
    show_quality: bool
      If true, plots the frequency versus tube position to confirm that the
      frequency grows linearly with position.

    Returns
    -------
    A float containing the spin echo length, in nanometers, of a one angstrom
    neutron.
    """
    freqs = []
    for run in runs:
        x = run.extractX()[0]
        x = (x[1:] + x[:-1]) / 2
        p = run.extractY()[0]
        p[np.isnan(p)] = 0
        fp = np.fft.fft(p)

        conv = len(x) / (np.max(x) - np.min(x))
        max_arg = (np.nanargmax(np.abs(fp[1:int(len(p) / 2)])) + 1)
        amp = np.abs(fp[max_arg]) / len(x)
        max_arg = max_arg * conv / len(x)

        model = "name=UserFunction,Formula=e*cos(x*f),f={},e={}"
        Fit(Function=model.format(max_arg * 2 * np.pi, amp),
            InputWorkspace=run,
            StartX=3,
            EndX=7,
            CreateOutput=True)

        result = mtd[run.getName() + "_Parameters"]

        freqs.append(np.abs(result.column(1)[1] / 2 / np.pi))
        if not show_fits:
            DeleteWorkspace(run.getName() + "_Parameters")
            DeleteWorkspace(run.getName() + "_NormalisedCovarianceMatrix")
            DeleteWorkspace(run.getName() + "_Workspace")

    def model(x, m, b):
        """A simple linear model"""
        return m * x + b

    xs = np.arange(len(runs)) * thickness
    fit, _ = curve_fit(model, xs, freqs)

    sel_fits = CreateWorkspace(xs, freqs)
    Fit(Function="name=LinearBackground",
        InputWorkspace=sel_fits,
        CreateOutput=True)
    result = 0.1 * mtd["sel_fits_Parameters"].column(1)[1] * dist
    if not show_quality:
        DeleteWorkspace("sel_fits_Parameters")
        DeleteWorkspace("sel_fits_NormalisedCovarianceMatrix")
        DeleteWorkspace("sel_fits_Workspace")
        DeleteWorkspace("sel_fits")

    return result
Example #49
0
 def _parseStructure(self, structure):
     from mantid.simpleapi import mtd, LoadCIF, CreateWorkspace, DeleteWorkspace
     import uuid
     self._fromCIF = False
     if isinstance(structure, string_types):
         if mtd.doesExist(structure):
             try:
                 self._cryst = self._copyCrystalStructure(
                     mtd[structure].sample().getCrystalStructure())
                 self._getUniqueAtoms()
             except RuntimeError:
                 raise ValueError('Workspace '
                                  '%s'
                                  ' has no valid CrystalStructure' %
                                  (structure))
         else:
             tmpws = CreateWorkspace(1,
                                     1,
                                     OutputWorkspace='_tempPointCharge_' +
                                     str(uuid.uuid4())[:8])
             try:
                 LoadCIF(tmpws, structure)
                 # Attached CrystalStructure object gets destroyed when workspace is deleted
                 self._cryst = self._copyCrystalStructure(
                     tmpws.sample().getCrystalStructure())
             except:
                 DeleteWorkspace(tmpws)
                 raise
             else:
                 DeleteWorkspace(tmpws)
                 self._getUniqueAtoms()
     elif isinstance(structure, list):
         if (len(structure) == 4
                 and all([isinstance(x, (int, float)) for x in structure])):
             structure = [structure]
         if (all([
                 isinstance(x, list) and (len(x) == 4)
                 and all([isinstance(y, (int, float)) for y in x])
                 for x in structure
         ])):
             self._ligands = structure
         else:
             raise ValueError(
                 'Incorrect ligands direct input. Must be a 4-element list or a list '
                 'of 4-element list. Each ligand must be of the form [charge, x, y, z]'
             )
     elif hasattr(structure, 'getScatterers'):
         self._cryst = structure
         self._getUniqueAtoms()
     else:
         if not hasattr(structure, 'sample'):
             raise ValueError(
                 'First input must be a Mantid CrystalStructure object, workspace or string '
                 '(name of CIF file or workspace)')
         try:
             self._cryst = self._copyCrystalStructure(
                 structure.sample().getCrystalStructure())
             self._getUniqueAtoms()
         except RuntimeError:
             raise ValueError('Workspace '
                              '%s'
                              ' has no valid CrystalStructure' %
                              (structure.name()))
    def createTestWorkspace2(self):
        """ Create a workspace for testing against with more situation
        """
        from mantid.simpleapi import CreateWorkspace
        from mantid.simpleapi import AddSampleLog
        from time import gmtime, strftime,mktime
        from datetime import datetime, timedelta
        import numpy as np

        # Create a matrix workspace
        x = np.array([1.,2.,3.,4.])
        y = np.array([1.,2.,3.])
        e = np.sqrt(np.array([1.,2.,3.]))
        wksp = CreateWorkspace(DataX=x, DataY=y,DataE=e,NSpec=1,UnitX='TOF')

        # Add run_start
        year = 2014
        month = 2
        day = 15
        hour = 13
        minute = 34
        second = 3
        dtimesec = 0.0010

        timefluc = 0.0001

        #tmptime = strftime("%Y-%m-%d %H:%M:%S", gmtime(mktime(gmtime())))
        runstart = datetime(year, month, day, hour, minute, second)
        AddSampleLog(Workspace=wksp,LogName='run_start',LogText=str(runstart))

        tsp_a = kernel.FloatTimeSeriesProperty("SensorA")
        tsp_b = kernel.FloatTimeSeriesProperty("SensorB")
        tsp_c = kernel.FloatTimeSeriesProperty("SensorC")
        tsp_d = kernel.FloatTimeSeriesProperty("SensorD")
        logs = [tsp_a, tsp_b, tsp_c, tsp_d]

        dbbuf = ""

        random.seed(0)
        for i in arange(25):
            # Randomly pick up log without records
            # first iteration must have all the record
            skiploglist = []
            if i > 0:
                numnorecord = random.randint(-1, 4)
                if numnorecord > 0:
                    for j in xrange(numnorecord):
                        logindex = random.randint(0, 6)
                        skiploglist.append(logindex)
                    # ENDFOR (j)
                # ENDIF (numnorecord)
            # ENDIF (i)

            dbbuf += "----------- %d -------------\n" % (i)

            # Record
            for j in xrange(4):
                # Skip if selected
                if j in skiploglist:
                    continue

                # get random time shifts
                timeshift = (random.random()-0.5)*timefluc

                if i == 0:
                    # first record should have the 'exactly' same time stamps
                    timeshift *= 0.0001

                deltatime = timedelta(i*dtimesec + timeshift)
                tmptime = str(runstart + deltatime)
                tmpvalue = float(i*i*6)+j
                logs[j].addValue(tmptime, tmpvalue)

                dbbuf += "%s: %s = %d\n" % (logs[j].name, tmptime, tmpvalue)

            # ENDFOR (j)
        # ENDFOR (i)

        # print dbbuf

        wksp.mutableRun()['SensorA']=tsp_a
        wksp.mutableRun()['SensorB']=tsp_b
        wksp.mutableRun()['SensorC']=tsp_c
        wksp.mutableRun()['SensorD']=tsp_d

        return wksp
class FindPeaksAutomaticTest(unittest.TestCase):
    data_ws = None
    peak_guess_table = None
    peak_table_header = [
        'centre', 'error centre', 'height', 'error height', 'sigma',
        'error sigma', 'area', 'error area'
    ]
    alg_instance = None
    x_values = None
    y_values = None

    def setUp(self):
        # Creating two peaks on an exponential background with gaussian noise
        self.x_values = np.linspace(0, 100, 1001)
        self.x_values = np.linspace(0, 100, 1001)
        self.centre = [25, 75]
        self.height = [35, 20]
        self.width = [10, 5]
        self.y_values = self.gaussian(self.x_values, self.centre[0],
                                      self.height[0], self.width[0])
        self.y_values += self.gaussian(self.x_values, self.centre[1],
                                       self.height[1], self.width[1])
        self.background = 10 * np.ones(len(self.x_values))
        self.y_values += self.background

        # Generating a table with a guess of the position of the centre of the peaks
        peak_table = CreateEmptyTableWorkspace()
        peak_table.addColumn(type='float', name='Approximated Centre')
        peak_table.addRow([self.centre[0] + 2])
        peak_table.addRow([self.centre[1] - 3])

        self.peakids = [
            np.argwhere(self.x_values == self.centre[0])[0, 0],
            np.argwhere(self.x_values == self.centre[1])[0, 0]
        ]

        # Generating a workspace with the data and a flat background
        self.raw_ws = CreateWorkspace(DataX=self.x_values,
                                      DataY=self.y_values,
                                      OutputWorkspace='raw_ws')
        self.data_ws = CreateWorkspace(
            DataX=np.concatenate((self.x_values, self.x_values)),
            DataY=np.concatenate((self.y_values, self.background)),
            DataE=np.sqrt(np.concatenate((self.y_values, self.background))),
            NSpec=2,
            OutputWorkspace='data_ws')

        self.peak_guess_table = peak_table

        self.alg_instance = _FindPeaksAutomatic.FindPeaksAutomatic()

    def tearDown(self):
        self.delete_if_present('data_ws')
        self.delete_if_present('peak_guess_table')
        self.delete_if_present('peak_table')
        self.delete_if_present('refit_peak_table')
        self.delete_if_present('fit_cost')
        self.delete_if_present('fit_result_NormalisedCovarianceMatrix')
        self.delete_if_present('fit_result_Parameters')
        self.delete_if_present('fit_result_Workspace')
        self.delete_if_present('fit_table')
        self.delete_if_present('data_table')
        self.delete_if_present('refit_data_table')
        self.delete_if_present('tmp_table')

        self.alg_instance = None
        self.peak_guess_table = None
        self.data_ws = None

    @staticmethod
    def gaussian(xvals, centre, height, sigma):
        exponent = (xvals - centre) / (np.sqrt(2) * sigma)
        return height * np.exp(-exponent * exponent)

    @staticmethod
    def delete_if_present(workspace):
        if workspace in mtd:
            DeleteWorkspace(workspace)

    def assertTableEqual(self, expected, actual):
        self.assertEqual(expected.columnCount(), actual.columnCount())
        self.assertEqual(expected.rowCount(), actual.rowCount())
        for i in range(expected.rowCount()):
            self.assertEqual(expected.row(i), actual.row(i))

    def assertPeakFound(self,
                        peak_params,
                        centre,
                        height,
                        sigma,
                        tolerance=0.01):
        if not np.isclose(peak_params['centre'], centre, rtol=tolerance):
            raise Exception(
                'Expected {}, got {}. Difference greater than tolerance {}'.
                format(centre, peak_params['centre'], tolerance))
        if not np.isclose(peak_params['height'], height, rtol=tolerance):
            raise Exception(
                'Expected {}, got {}. Difference greater than tolerance {}'.
                format(height, peak_params['height'], tolerance))
        if not np.isclose(peak_params['sigma'], sigma, rtol=tolerance):
            raise Exception(
                'Expected {}, got {}. Difference greater than tolerance {}'.
                format(sigma, peak_params['sigma'], tolerance))

    def test_algorithm_with_no_input_workspace_raises_exception(self):
        with self.assertRaises(RuntimeError):
            FindPeaksAutomatic()

    def test_algorithm_with_negative_acceptance_threshold_throws(self):
        with self.assertRaises(ValueError):
            FindPeaksAutomatic(InputWorkspace=self.data_ws,
                               AcceptanceThreshold=-0.1,
                               PlotPeaks=False)

    def test_algorithm_with_negative_smooth_window_throws(self):
        with self.assertRaises(ValueError):
            FindPeaksAutomatic(InputWorkspace=self.data_ws,
                               SmoothWindow=-5,
                               PlotPeaks=False)

    def test_algorithm_with_negative_num_bad_peaks_to_consider_throws(self):
        with self.assertRaises(ValueError):
            FindPeaksAutomatic(InputWorkspace=self.data_ws,
                               BadPeaksToConsider=-3,
                               PlotPeaks=False)

    def test_algorithm_with_negative_estimate_of_peak_sigma_throws(self):
        with self.assertRaises(ValueError):
            FindPeaksAutomatic(InputWorkspace=self.data_ws,
                               EstimatePeakSigma=-3,
                               PlotPeaks=False)

    def test_algorithm_with_negative_min_peak_sigma_throws(self):
        with self.assertRaises(ValueError):
            FindPeaksAutomatic(InputWorkspace=self.data_ws,
                               MinPeakSigma=-0.1,
                               PlotPeaks=False)

    def test_algorithm_with_negative_max_peak_sigma_throws(self):
        with self.assertRaises(ValueError):
            FindPeaksAutomatic(InputWorkspace=self.data_ws,
                               MaxPeakSigma=-0.1,
                               PlotPeaks=False)

    def test_algorithm_creates_all_output_workspaces(self):
        ws_name = self.raw_ws.getName()
        FindPeaksAutomatic(self.raw_ws)

        self.assertIn('{}_with_errors'.format(ws_name), mtd)
        self.assertIn('{}_{}'.format(self.raw_ws.getName(), 'properties'), mtd)
        self.assertIn(
            '{}_{}'.format(self.raw_ws.getName(), 'refit_properties'), mtd)

    def test_algorithm_does_not_create_temporary_workspaces(self):
        FindPeaksAutomatic(self.raw_ws)

        self.assertNotIn('ret', mtd)
        self.assertNotIn('raw_data_ws', mtd)
        self.assertNotIn('flat_ws', mtd)
        self.assertNotIn('fit_result_NormalisedCovarianceMatrix', mtd)
        self.assertNotIn('fit_result_Parameters', mtd)
        self.assertNotIn('fit_result_Workspace', mtd)
        self.assertNotIn('fit_cost', mtd)

    def test_output_tables_are_correctly_formatted(self):
        FindPeaksAutomatic(self.raw_ws, FitToBaseline=True)

        peak_table = mtd['{}_{}'.format(self.raw_ws.getName(), 'properties')]
        refit_peak_table = mtd['{}_{}'.format(self.raw_ws.getName(),
                                              'refit_properties')]
        self.assertEqual(self.peak_table_header, peak_table.getColumnNames())
        self.assertEqual(self.peak_table_header,
                         refit_peak_table.getColumnNames())
        self.assertEqual(2, peak_table.rowCount())
        self.assertEqual(0, refit_peak_table.rowCount())

    def test_single_erosion_returns_correct_result(self):
        yvals = np.array([-2, 3, 1, 0, 4])

        self.assertEqual(-2, self.alg_instance._single_erosion(yvals, 2, 2))

    def test_single_erosion_checks_extremes_of_list_correctly(self):
        yvals = np.array([-5, -3, 0, 1, -2, 2, 9])

        self.assertEqual(-2, self.alg_instance._single_erosion(yvals, 3, 1))
        self.assertEqual(-3, self.alg_instance._single_erosion(yvals, 3, 2))

    def test_single_erosion_with_zero_window_does_nothing(self):
        yvals = np.array([-5, -3, 0, 1, -2, 2, 9])

        self.assertEqual(0, self.alg_instance._single_erosion(yvals, 2, 0))

    def test_single_dilation_returns_correct_result(self):
        yvals = np.array([-2, 3, 1, 0, 4])

        self.assertEqual(4, self.alg_instance._single_dilation(yvals, 2, 2))

    def test_single_dilation_checks_extremes_of_list_correctly(self):
        yvals = np.array([-5, 3, 0, -7, 2, -2, 9])

        self.assertEqual(2, self.alg_instance._single_dilation(yvals, 3, 1))
        self.assertEqual(3, self.alg_instance._single_dilation(yvals, 3, 2))

    def test_single_dilation_with_zero_window_does_nothing(self):
        yvals = np.array([-5, -3, 0, 1, -2, 2, 9])

        self.assertEqual(0, self.alg_instance._single_dilation(yvals, 2, 0))

    def test_erosion_with_zero_window_is_an_invariant(self):
        np.testing.assert_equal(self.y_values,
                                self.alg_instance.erosion(self.y_values, 0))

    def test_erosion_calls_single_erosion_the_correct_number_of_times(self, ):
        with mock.patch(
                'plugins.algorithms.WorkflowAlgorithms.FindPeaksAutomatic.FindPeaksAutomatic._single_erosion'
        ) as mock_single_erosion:
            times = len(self.y_values)
            win_size = 2
            call_list = []
            for i in range(times):
                call_list.append(mock.call(self.y_values, i, win_size))

            self.alg_instance.erosion(self.y_values, win_size)

            self.assertEqual(times, mock_single_erosion.call_count)
            mock_single_erosion.assert_has_calls(call_list, any_order=True)

    def test_dilation_with_zero_window_is_an_invariant(self):
        np.testing.assert_equal(self.y_values,
                                self.alg_instance.dilation(self.y_values, 0))

    def test_dilation_calls_single_erosion_the_correct_number_of_times(self):
        with mock.patch(
                'plugins.algorithms.WorkflowAlgorithms.FindPeaksAutomatic.FindPeaksAutomatic._single_dilation'
        ) as mock_single_dilation:
            times = len(self.y_values)
            win_size = 2
            call_list = []
            for i in range(times):
                call_list.append(mock.call(self.y_values, i, win_size))

            self.alg_instance.dilation(self.y_values, win_size)

            self.assertEqual(times, mock_single_dilation.call_count)
            mock_single_dilation.assert_has_calls(call_list, any_order=True)

    @mock.patch(
        'plugins.algorithms.WorkflowAlgorithms.FindPeaksAutomatic.FindPeaksAutomatic.erosion'
    )
    @mock.patch(
        'plugins.algorithms.WorkflowAlgorithms.FindPeaksAutomatic.FindPeaksAutomatic.dilation'
    )
    def test_opening_calls_correct_functions_in_correct_order(
            self, mock_dilation, mock_erosion):
        win_size = 3

        self.alg_instance.opening(self.y_values, win_size)
        self.assertEqual(mock_erosion.call_count, 1)
        self.assertEqual(mock_dilation.call_count, 1)

        erosion_ret = self.alg_instance.erosion(self.y_values, win_size)
        mock_erosion.assert_called_with(self.y_values, win_size)
        mock_dilation.assert_called_with(erosion_ret, win_size)

    @mock.patch(
        'plugins.algorithms.WorkflowAlgorithms.FindPeaksAutomatic.FindPeaksAutomatic.opening'
    )
    @mock.patch(
        'plugins.algorithms.WorkflowAlgorithms.FindPeaksAutomatic.FindPeaksAutomatic.dilation'
    )
    @mock.patch(
        'plugins.algorithms.WorkflowAlgorithms.FindPeaksAutomatic.FindPeaksAutomatic.erosion'
    )
    def test_average_calls_right_functions_in_right_order(
            self, mock_erosion, mock_dilation, mock_opening):
        win_size = 3

        self.alg_instance.average(self.y_values, win_size)
        self.assertEqual(mock_erosion.call_count, 1)
        self.assertEqual(mock_dilation.call_count, 1)
        self.assertEqual(mock_opening.call_count, 2)

        op_ret = self.alg_instance.opening(self.y_values, win_size)
        mock_opening.assert_called_with(self.y_values, win_size)
        mock_dilation.assert_called_with(op_ret, win_size)
        mock_erosion.assert_called_with(op_ret, win_size)

    def test_generate_peak_guess_table_correctly_formats_table(self):
        peakids = [2, 4, 10, 34]

        peak_guess_table = self.alg_instance.generate_peak_guess_table(
            self.x_values, peakids)

        self.assertEqual(peak_guess_table.getColumnNames(), ['centre'])

    def test_generate_peak_guess_table_with_no_peaks_generates_empty_table(
            self):
        peak_guess_table = self.alg_instance.generate_peak_guess_table(
            self.x_values, [])

        self.assertEqual(peak_guess_table.rowCount(), 0)

    def test_generate_peak_guess_table_adds_correct_values_of_peak_centre(
            self):
        peakids = [2, 23, 19, 34, 25, 149, 234]
        peak_guess_table = self.alg_instance.generate_peak_guess_table(
            self.x_values, peakids)

        for i, pid in enumerate(sorted(peakids)):
            self.assertAlmostEqual(
                peak_guess_table.row(i)['centre'], self.x_values[pid], 5)

    def test_find_good_peaks_calls_fit_gaussian_peaks_twice_if_no_peaks_given(
            self):
        with mock.patch(
                'plugins.algorithms.WorkflowAlgorithms.FindPeaksAutomatic.FitGaussianPeaks'
        ) as mock_fit:
            tmp_table = CreateEmptyTableWorkspace()
            tmp_table.addColumn(type='float', name='chi2')
            tmp_table.addColumn(type='float', name='poisson')
            tmp_table.addRow([10, 20])
            mock_fit.return_value = (mock.MagicMock(), mock.MagicMock(),
                                     tmp_table)
            self.alg_instance.min_sigma = 1
            self.alg_instance.max_sigma = 10

            self.alg_instance.find_good_peaks(self.x_values, [], 0.1, 5, False,
                                              self.data_ws, 5)

            self.assertEqual(2, mock_fit.call_count)

    def _table_side_effect(self, idx):
        raise ValueError('Index = %d' % idx)

    def test_find_good_peaks_selects_correct_column_for_error(self):
        with mock.patch(
                'plugins.algorithms.WorkflowAlgorithms.FindPeaksAutomatic.FitGaussianPeaks'
        ) as mock_fit:
            mock_table = mock.Mock()
            mock_table.column.side_effect = self._table_side_effect
            mock_fit.return_value = None, None, mock_table

            # chi2 cost
            with self.assertRaises(ValueError) as chi2:
                self.alg_instance.find_good_peaks(self.x_values, [], 0.1, 5,
                                                  False, self.data_ws, 5)

            # poisson cost
            with self.assertRaises(ValueError) as poisson:
                self.alg_instance.find_good_peaks(self.x_values, [], 0.1, 5,
                                                  True, self.data_ws, 5)

            self.assertIn('Index = 0', chi2.exception.args)
            self.assertNotIn('Index = 1', chi2.exception.args)
            self.assertNotIn('Index = 0', poisson.exception.args)
            self.assertIn('Index = 1', poisson.exception.args)

    def test_find_good_peaks_returns_correct_peaks(self):
        self.alg_instance._min_sigma = 1
        self.alg_instance._max_sigma = 10
        actual_peaks, peak_table, refit_peak_table = self.alg_instance.find_good_peaks(
            self.x_values, self.peakids, 0, 5, False, self.data_ws, 5)
        peak1 = peak_table.row(0)
        peak2 = peak_table.row(1)

        self.assertEquals(self.peakids, actual_peaks)
        self.assertEqual(0, refit_peak_table.rowCount())
        self.assertEqual(refit_peak_table.getColumnNames(),
                         peak_table.getColumnNames())

        self.assertPeakFound(peak1, self.centre[0], self.height[0] + 10,
                             self.width[0], 0.05)
        self.assertPeakFound(peak2, self.centre[1], self.height[1] + 10,
                             self.width[1], 0.05)

    def test_find_peaks_is_called_if_scipy_version_higher_1_1_0(self):
        mock_scipy = mock.MagicMock()
        mock_scipy.__version__ = '1.1.0'
        mock_scipy.signal.find_peaks.return_value = (self.peakids, {
            'prominences':
            self.peakids
        })
        with mock.patch.dict('sys.modules', scipy=mock_scipy):
            self.alg_instance.process(self.x_values,
                                      self.y_values,
                                      raw_error=np.sqrt(self.y_values),
                                      acceptance=0,
                                      average_window=50,
                                      bad_peak_to_consider=2,
                                      use_poisson=False,
                                      peak_width_estimate=5,
                                      fit_to_baseline=False,
                                      prog_reporter=mock.Mock())

            self.assertEqual(2, mock_scipy.signal.find_peaks.call_count)
            self.assertEqual(0, mock_scipy.signal.find_peaks_cwt.call_count)

    def test_find_peaks_cwt_is_called_if_scipy_version_lower_1_1_0(self):
        mock_scipy = mock.MagicMock()
        mock_scipy.__version__ = '1.0.0'
        mock_scipy.signal.find_peaks.return_value = (self.peakids, {
            'prominences':
            self.peakids
        })
        with mock.patch.dict('sys.modules', scipy=mock_scipy):
            self.alg_instance.process(self.x_values,
                                      self.y_values,
                                      raw_error=np.sqrt(self.y_values),
                                      acceptance=0,
                                      average_window=50,
                                      bad_peak_to_consider=2,
                                      use_poisson=False,
                                      peak_width_estimate=5,
                                      fit_to_baseline=False,
                                      prog_reporter=mock.Mock())

            self.assertEqual(0, mock_scipy.signal.find_peaks.call_count)
            self.assertEqual(1, mock_scipy.signal.find_peaks_cwt.call_count)

    def test_process_calls_find_good_peaks(self):
        with mock.patch(
                'plugins.algorithms.WorkflowAlgorithms.FindPeaksAutomatic.CreateWorkspace'
        ) as mock_create_ws:
            mock_create_ws.return_value = self.data_ws
            self.alg_instance.find_good_peaks = mock.Mock()

            self.alg_instance.process(self.x_values,
                                      self.y_values,
                                      raw_error=np.sqrt(self.y_values),
                                      acceptance=0,
                                      average_window=50,
                                      bad_peak_to_consider=2,
                                      use_poisson=False,
                                      peak_width_estimate=5,
                                      fit_to_baseline=False,
                                      prog_reporter=mock.Mock())

            base = self.alg_instance.average(self.y_values, 50)
            base += self.alg_instance.average(self.y_values - base, 50)
            flat = self.y_values - base

            self.assertEqual(1, self.alg_instance.find_good_peaks.call_count)
            self.alg_instance.find_good_peaks.asser_called_with(
                self.x_values,
                flat,
                acceptance=0,
                bad_peak_to_consider=2,
                use_poisson=False,
                fit_ws=self.data_ws,
                peak_width_estimate=5)

    def test_process_returns_the_return_value_of_find_good_peaks(self):
        with mock.patch(
                'plugins.algorithms.WorkflowAlgorithms.FindPeaksAutomatic.CreateWorkspace'
        ) as mock_create_ws:
            mock_create_ws.return_value = self.data_ws
            win_size = 500

            actual_return = self.alg_instance.process(
                self.x_values,
                self.y_values,
                raw_error=np.sqrt(self.y_values),
                acceptance=0,
                average_window=win_size,
                bad_peak_to_consider=2,
                use_poisson=False,
                peak_width_estimate=5,
                fit_to_baseline=False,
                prog_reporter=mock.Mock())
            import copy
            actual_return = copy.deepcopy(actual_return)

            base = self.alg_instance.average(self.y_values, win_size)
            base += self.alg_instance.average(self.y_values - base, win_size)
            expected_return = self.alg_instance.find_good_peaks(
                self.x_values,
                self.peakids,
                acceptance=0,
                bad_peak_to_consider=2,
                use_poisson=False,
                fit_ws=self.data_ws,
                peak_width_estimate=5), base

            self.assertEqual(expected_return[0][0], actual_return[0][0])
            self.assertTableEqual(expected_return[0][1], actual_return[0][1])
            np.testing.assert_almost_equal(expected_return[1],
                                           actual_return[1])

    def _assert_matplotlib_not_present(self, *args):
        import sys
        self.assertNotIn('matplotlib.pyplot', sys.modules)

    # If matplotlib.pyplot is imported other tests fail on windows and ubuntu
    def test_matplotlib_pyplot_is_not_imported(self):
        self.alg_instance.dilation = mock.Mock(
            side_effect=self._assert_matplotlib_not_present)
        self.alg_instance.opening(self.y_values, 0)

    def test_that_algorithm_finds_peaks_correctly(self):
        FindPeaksAutomatic(
            InputWorkspace=self.raw_ws,
            SmoothWindow=500,
            EstimatePeakSigma=5,
            MinPeakSigma=3,
            MaxPeakSigma=15,
        )
        peak_table = mtd['{}_{}'.format(self.raw_ws.getName(), 'properties')]
        refit_peak_table = mtd['{}_{}'.format(self.raw_ws.getName(),
                                              'refit_properties')]

        self.assertEqual(2, peak_table.rowCount())
        self.assertEqual(0, refit_peak_table.rowCount())
        self.assertPeakFound(peak_table.row(0), self.centre[0], self.height[0],
                             self.width[0], 0.05)
        self.assertPeakFound(peak_table.row(1), self.centre[1], self.height[1],
                             self.width[1], 0.05)