Exemplo n.º 1
0
    def _add_log_to_workspace(self, ws, log_name, log_value):
        if isinstance(log_value, list):
            ws.mutableRun()[log_name] = self._create_time_series_log(
                log_name, log_value)

        else:
            mantid.AddSampleLog(
                Workspace=ws,
                LogName=log_name,
                LogText=str(log_value),
                LogType="String" if isinstance(log_value, str) else "Number")
Exemplo n.º 2
0
    def test_subtract_summed_runs_throw_on_tof_mismatch(self):
        # Create a sample workspace which will have mismatched TOF range
        sample_ws = mantid.CreateSampleWorkspace()
        mantid.AddSampleLog(Workspace=sample_ws, LogName='gd_prtn_chrg', LogText="10.0", LogType='Number')
        ws_file_name = "100"  # Load POL100

        # This should throw as the TOF ranges do not match
        with assertRaisesRegex(self, ValueError, "specified for this file do not have matching binning. Do the "):
            common.subtract_summed_runs(ws_to_correct=sample_ws, instrument=ISISPowderMockInst(),
                                        empty_sample_ws_string=ws_file_name)

        mantid.DeleteWorkspace(sample_ws)
Exemplo n.º 3
0
 def test_DNSSameNormalization(self):
     outputWorkspaceName = "DNSMergeRunsTest_Test2"
     ws = api.AnalysisDataService.retrieve(self.workspaces[0])
     api.AddSampleLog(ws,
                      LogName='normalized',
                      LogText='no',
                      LogType='String')
     self.assertRaises(RuntimeError,
                       DNSMergeRuns,
                       WorkspaceNames=self.workspaces,
                       OutputWorkspace=outputWorkspaceName)
     return
Exemplo n.º 4
0
 def test_DNSSameWavelength(self):
     outputWorkspaceName = "DNSMergeRunsTest_Test1"
     ws = api.AnalysisDataService.retrieve(self.workspaces[0])
     api.AddSampleLog(ws,
                      LogName='wavelength',
                      LogText=str(5.0),
                      LogType='Number',
                      LogUnit='Angstrom')
     self.assertRaises(RuntimeError,
                       DNSMergeRuns,
                       WorkspaceNames=self.workspaces,
                       OutputWorkspace=outputWorkspaceName)
     return
Exemplo n.º 5
0
    def test_run_normalise_by_current(self):
        initial_value = 17
        prtn_charge = '10.0'
        expected_value = initial_value / float(prtn_charge)

        # Create two workspaces
        ws = mantid.CreateWorkspace(DataX=0, DataY=initial_value)

        # Add Good Proton Charge Log
        mantid.AddSampleLog(Workspace=ws, LogName='gd_prtn_chrg', LogText=prtn_charge, LogType='Number')

        self.assertEqual(initial_value, ws.dataY(0)[0])
        common.run_normalise_by_current(ws)
        self.assertAlmostEqual(expected_value, ws.dataY(0)[0], delta=1e-8)
Exemplo n.º 6
0
    def test_unitAreAddedIfPresent(self):
        input_ws = self._create_sample_workspace()
        mantid.AddSampleLog(Workspace=input_ws,
                            LogName="TestLog",
                            LogText="1",
                            LogType="Number",
                            LogUnit="uAmps")
        run_algorithm(self.ALG_NAME,
                      InputWorkspace=input_ws,
                      Filename=self.TEMP_FILE_NAME)

        with h5py.File(self.TEMP_FILE_NAME, "r") as output_file:
            logs_group = output_file["Sample Logs"]
            self.assertEqual(logs_group["TestLog"].attrs["Units"], "uAmps")
Exemplo n.º 7
0
 def test_DNSPolarisationValid(self):
     outputWorkspaceName = "DNSFlippingRatioCorrTest_Test3"
     api.AddSampleLog(Workspace=self.__nsf_nicrws,
                      LogName='polarisation',
                      LogText='y',
                      LogType='String')
     self.assertRaises(RuntimeError,
                       DNSFlippingRatioCorr,
                       SFDataWorkspace=self.__sf_nicrws.getName(),
                       NSFDataWorkspace=self.__nsf_nicrws.getName(),
                       SFNiCrWorkspace=self.__sf_nicrws.getName(),
                       NSFNiCrWorkspace=self.__nsf_nicrws.getName(),
                       SFBkgrWorkspace=self.__sf_bkgrws.getName(),
                       NSFBkgrWorkspace=self.__nsf_bkgrws.getName(),
                       SFOutputWorkspace=outputWorkspaceName + 'SF',
                       NSFOutputWorkspace=outputWorkspaceName + 'NSF')
     return
Exemplo n.º 8
0
    def test_no_warning_raised_explicitly_dimensionless_run_log(self):
        import mantid.simpleapi as mantid
        target = mantid.CloneWorkspace(self.base_event_ws)
        with warnings.catch_warnings(record=True) as caught_warnings:
            scn.mantid.convert_EventWorkspace_to_data_array(target, False)
            original_number_of_warnings = len(caught_warnings)

        # Add an explicitly dimensionless log
        mantid.AddSampleLog(Workspace=target,
                            LogName='dimensionless_log',
                            LogText='1',
                            LogType='Number',
                            LogUnit='dimensionless')

        with warnings.catch_warnings(record=True) as caught_warnings:
            scn.mantid.convert_EventWorkspace_to_data_array(target, False)
            assert len(caught_warnings) == original_number_of_warnings,\
                "Expected no extra warning about unrecognised units " \
                "from explicitly dimensionless log"
Exemplo n.º 9
0
    def test_subtract_summed_runs(self):
        # Load a vanadium workspace for this test
        sample_empty_number = "100"
        ws_file_name = "POL" + sample_empty_number
        original_ws = mantid.Load(ws_file_name)
        # add a current to ensure subtract_summed_runs gets past initial check
        mantid.AddSampleLog(Workspace=original_ws,
                            LogName='gd_prtn_chrg',
                            LogText="10.0",
                            LogType='Number')
        no_scale_ws = mantid.CloneWorkspace(InputWorkspace=original_ws)
        empty_ws = mantid.CloneWorkspace(InputWorkspace=original_ws)
        empty_ws = empty_ws * 0.3

        returned_ws = common.subtract_summed_runs(ws_to_correct=no_scale_ws,
                                                  empty_sample=empty_ws)
        y_values = returned_ws.readY(0)
        original_y_values = original_ws.readY(0)
        for i in range(returned_ws.blocksize()):
            self.assertAlmostEqual(y_values[i], original_y_values[i] * 0.7)

        mantid.DeleteWorkspace(no_scale_ws)
        mantid.DeleteWorkspace(empty_ws)
        mantid.DeleteWorkspace(returned_ws)
Exemplo n.º 10
0
    def PyExec(self):
        """ Main execution body
        """
        # get list of input workspaces
        input_workspace_list = self._expand_groups()
        workspaceCount = len(input_workspace_list)
        self.log().information("Workspaces to merge " + str(workspaceCount))
        wsOutput = self.getPropertyValue("OutputWorkspace")

        if workspaceCount < 2:
            api.CloneWorkspace(InputWorkspace=input_workspace_list[0],
                               OutputWorkspace=wsOutput)
            self.log().warning(
                "Cannot merge one workspace. Clone is produced.")
            self.setProperty("OutputWorkspace", wsOutput)
            return

        # check whether given workspaces can be merged
        self._can_merge(input_workspace_list)

        # delete output workspace if it exists
        if api.mtd.doesExist(wsOutput):
            api.DeleteWorkspace(Workspace=wsOutput)

        #  Merge runs
        api.MergeRuns(InputWorkspaces=input_workspace_list,
                      OutputWorkspace=wsOutput)

        # Merge logs
        # MergeRuns by default copies all logs from the first workspace
        pdict = {}
        for prop in self.properties_to_merge:
            pdict[prop] = []

        for wsname in input_workspace_list:
            wks = api.AnalysisDataService.retrieve(wsname)
            run = wks.getRun()
            for prop in self.properties_to_merge:
                if run.hasProperty(prop):
                    pdict[prop].append(run.getProperty(prop).value)

        # take average for temperatures
        nentries = len(pdict['temperature'])
        if nentries > 0:
            temps = [float(temp) for temp in pdict['temperature']]
            tmean = sum(temps) / nentries
            api.AddSampleLog(Workspace=wsOutput,
                             LogName='temperature',
                             LogText=str(tmean),
                             LogType='Number',
                             LogUnit='K')
        # sum monitor counts
        mcounts = [int(mco) for mco in pdict['monitor_counts']]
        # check for zero monitor counts
        zeros = np.where(np.array(mcounts) == 0)[0]
        if len(zeros) > 0:
            for index in zeros:
                self.log().warning("Workspace " + input_workspace_list[index] +
                                   " has zero monitor counts.")
        # create sample log
        api.AddSampleLog(Workspace=wsOutput,
                         LogName='monitor_counts',
                         LogText=str(sum(mcounts)),
                         LogType='Number')
        # sum durations
        durations = [int(dur) for dur in pdict['duration']]
        api.AddSampleLog(Workspace=wsOutput,
                         LogName='duration',
                         LogText=str(sum(durations)),
                         LogType='Number',
                         LogUnit='s')
        # get minimal run_start
        fmt = "%Y-%m-%dT%H:%M:%S%z"
        run_start = [parse(entry) for entry in pdict['run_start']]
        api.AddSampleLog(Workspace=wsOutput,
                         LogName='run_start',
                         LogText=min(run_start).strftime(fmt),
                         LogType='String')
        # get maximal run_end
        run_end = [parse(entry) for entry in pdict['run_end']]
        api.AddSampleLog(Workspace=wsOutput,
                         LogName='run_end',
                         LogText=max(run_end).strftime(fmt),
                         LogType='String')
        # list of run_numbers
        api.AddSampleLog(Workspace=wsOutput,
                         LogName='run_number',
                         LogText=str(pdict['run_number']),
                         LogType='String')

        self.setProperty("OutputWorkspace", wsOutput)
Exemplo n.º 11
0
    def PyExec(self):
        # Input
        filename = self.getPropertyValue("Filename")
        outws_name = self.getPropertyValue("OutputWorkspace")
        norm = self.getPropertyValue("Normalization")

        # load data array from the given file
        data_array = np.loadtxt(filename)
        if not data_array.size:
            message = "File " + filename + " does not contain any data!"
            self.log().error(message)
            raise RuntimeError(message)
        # sample logs
        logs = {"names": [], "values": [], "units": []}

        # load run information
        metadata = DNSdata()
        try:
            metadata.read_legacy(filename)
        except RuntimeError as err:
            message = "Error of loading of file " + filename + ": " + str(err)
            self.log().error(message)
            raise RuntimeError(message)

        tmp = api.LoadEmptyInstrument(InstrumentName='DNS')
        self.instrument = tmp.getInstrument()
        api.DeleteWorkspace(tmp)

        # load polarisation table and determine polarisation
        poltable = self.get_polarisation_table()
        pol = self.get_polarisation(metadata, poltable)
        if not pol:
            pol = ['0', 'undefined']
            self.log().warning("Failed to determine polarisation for " +
                               filename +
                               ". Values have been set to undefined.")
        ndet = 24
        unitX = "Wavelength"
        if metadata.tof_channel_number < 2:
            dataX = np.zeros(2 * ndet)
            dataX.fill(metadata.wavelength + 0.00001)
            dataX[::2] -= 0.000002
        else:
            unitX = "TOF"

            # get instrument parameters
            l1 = np.linalg.norm(self.instrument.getSample().getPos() -
                                self.instrument.getSource().getPos())
            self.log().notice("L1 = {} m".format(l1))
            dt_factor = float(
                self.instrument.getStringParameter("channel_width_factor")[0])

            # channel width
            dt = metadata.tof_channel_width * dt_factor
            # calculate tof1
            velocity = h / (m_n * metadata.wavelength * 1e-10)  # m/s
            tof1 = 1e+06 * l1 / velocity  # microseconds
            self.log().debug("TOF1 = {} microseconds".format(tof1))
            self.log().debug("Delay time = {} microsecond".format(
                metadata.tof_delay_time))
            # create dataX array
            x0 = tof1 + metadata.tof_delay_time
            self.log().debug("TOF1 = {} microseconds".format(tof1))
            dataX = np.linspace(x0, x0 + metadata.tof_channel_number * dt,
                                metadata.tof_channel_number + 1)

            # sample logs
            logs["names"].extend(
                ["channel_width", "TOF1", "delay_time", "tof_channels"])
            logs["values"].extend([
                dt, tof1, metadata.tof_delay_time, metadata.tof_channel_number
            ])
            logs["units"].extend(
                ["microseconds", "microseconds", "microseconds", ""])
            if metadata.tof_elastic_channel:
                logs["names"].append("EPP")
                logs["values"].append(metadata.tof_elastic_channel)
                logs["units"].append("")
            if metadata.chopper_rotation_speed:
                logs["names"].append("chopper_speed")
                logs["values"].append(metadata.chopper_rotation_speed)
                logs["units"].append("Hz")
            if metadata.chopper_slits:
                logs["names"].append("chopper_slits")
                logs["values"].append(metadata.chopper_slits)
                logs["units"].append("")

        # data normalization
        factor = 1.0
        yunit = "Counts"
        ylabel = "Intensity"
        if norm == 'duration':
            factor = metadata.duration
            yunit = "Counts/s"
            ylabel = "Intensity normalized to duration"
            if factor <= 0:
                raise RuntimeError("Duration is invalid for file " + filename +
                                   ". Cannot normalize.")
        if norm == 'monitor':
            factor = metadata.monitor_counts
            yunit = "Counts/monitor"
            ylabel = "Intensity normalized to monitor"
            if factor <= 0:
                raise RuntimeError("Monitor counts are invalid for file " +
                                   filename + ". Cannot normalize.")
        # set values for dataY and dataE
        dataY = data_array[0:ndet, 1:] / factor
        dataE = np.sqrt(data_array[0:ndet, 1:]) / factor
        # create workspace
        api.CreateWorkspace(OutputWorkspace=outws_name,
                            DataX=dataX,
                            DataY=dataY,
                            DataE=dataE,
                            NSpec=ndet,
                            UnitX=unitX)
        outws = api.AnalysisDataService.retrieve(outws_name)
        api.LoadInstrument(outws, InstrumentName='DNS', RewriteSpectraMap=True)

        run = outws.mutableRun()
        if metadata.start_time and metadata.end_time:
            run.setStartAndEndTime(DateAndTime(metadata.start_time),
                                   DateAndTime(metadata.end_time))
        # add name of file as a run title
        fname = os.path.splitext(os.path.split(filename)[1])[0]
        run.addProperty('run_title', fname, True)

        # rotate the detector bank to the proper position
        api.RotateInstrumentComponent(outws,
                                      "bank0",
                                      X=0,
                                      Y=1,
                                      Z=0,
                                      Angle=metadata.deterota)
        # add sample log Ei and wavelength
        logs["names"].extend(["Ei", "wavelength"])
        logs["values"].extend([metadata.incident_energy, metadata.wavelength])
        logs["units"].extend(["meV", "Angstrom"])

        # add other sample logs
        logs["names"].extend([
            "deterota", "mon_sum", "duration", "huber", "omega", "T1", "T2",
            "Tsp"
        ])
        logs["values"].extend([
            metadata.deterota, metadata.monitor_counts, metadata.duration,
            metadata.huber, metadata.huber - metadata.deterota, metadata.temp1,
            metadata.temp2, metadata.tsp
        ])
        logs["units"].extend([
            "Degrees", "Counts", "Seconds", "Degrees", "Degrees", "K", "K", "K"
        ])

        # flipper, coil currents and polarisation
        flipper_status = 'OFF'  # flipper OFF
        if abs(metadata.flipper_precession_current) > sys.float_info.epsilon:
            flipper_status = 'ON'  # flipper ON
        logs["names"].extend([
            "flipper_precession", "flipper_z_compensation", "flipper", "C_a",
            "C_b", "C_c", "C_z", "polarisation", "polarisation_comment"
        ])
        logs["values"].extend([
            metadata.flipper_precession_current,
            metadata.flipper_z_compensation_current, flipper_status,
            metadata.a_coil_current, metadata.b_coil_current,
            metadata.c_coil_current, metadata.z_coil_current,
            str(pol[0]),
            str(pol[1])
        ])
        logs["units"].extend(["A", "A", "", "A", "A", "A", "A", "", ""])

        # slits
        logs["names"].extend([
            "slit_i_upper_blade_position", "slit_i_lower_blade_position",
            "slit_i_left_blade_position", "slit_i_right_blade_position"
        ])
        logs["values"].extend([
            metadata.slit_i_upper_blade_position,
            metadata.slit_i_lower_blade_position,
            metadata.slit_i_left_blade_position,
            metadata.slit_i_right_blade_position
        ])
        logs["units"].extend(["mm", "mm", "mm", "mm"])

        # add information whether the data are normalized (duration/monitor/no):
        api.AddSampleLog(outws,
                         LogName='normalized',
                         LogText=norm,
                         LogType='String')
        api.AddSampleLogMultiple(outws,
                                 LogNames=logs["names"],
                                 LogValues=logs["values"],
                                 LogUnits=logs["units"])

        outws.setYUnit(yunit)
        outws.setYUnitLabel(ylabel)

        self.setProperty("OutputWorkspace", outws)
        self.log().debug('LoadDNSLegacy: data are loaded to the workspace ' +
                         outws_name)

        return
Exemplo n.º 12
0
    def PyExec(self):
        if not self._use_corrections:
            logger.information('Not using corrections')
        if not self._use_can:
            logger.information('Not using container')

        prog_container = Progress(self, start=0.0, end=0.2, nreports=4)
        prog_container.report('Starting algorithm')

        # Units should be wavelength
        sample_unit = self._sample_workspace.getAxis(0).getUnit().unitID()
        sample_ws_wavelength = self._convert_units_wavelength(
            self._sample_workspace)

        container_ws_wavelength = (self._process_container_workspace(
            self._container_workspace, prog_container)
                                   if self._use_can else None)

        prog_corr = Progress(self, start=0.2, end=0.6, nreports=2)
        if self._use_corrections:
            prog_corr.report('Preprocessing corrections')

            if self._use_can:
                # Use container factors
                prog_corr.report('Correcting sample and container')
                factor_workspaces = self._get_factor_workspaces()
                output_workspace = self._correct_sample_can(
                    sample_ws_wavelength, container_ws_wavelength,
                    factor_workspaces)
                correction_type = 'sample_and_can_corrections'
            else:
                # Use sample factor only
                output_workspace = self._correct_sample(
                    sample_ws_wavelength, self._corrections_workspace[0])
                correction_type = 'sample_corrections_only'
                # Add corrections filename to log values
                prog_corr.report('Correcting sample')
                s_api.AddSampleLog(Workspace=output_workspace,
                                   LogName='corrections_filename',
                                   LogType='String',
                                   LogText=self._corrections_ws_name)
        else:
            # Do simple subtraction
            output_workspace = self._subtract(sample_ws_wavelength,
                                              container_ws_wavelength)
            correction_type = 'can_subtraction'
            # Add container filename to log values
            can_base = self.getPropertyValue("CanWorkspace")
            can_base = can_base[:can_base.index('_')]
            prog_corr.report('Adding container filename')
            s_api.AddSampleLog(Workspace=output_workspace,
                               LogName='container_filename',
                               LogType='String',
                               LogText=can_base)

        prog_wrkflow = Progress(self, 0.6, 1.0, nreports=5)
        # Record the container scale factor
        if self._use_can and self._scale_can:
            prog_wrkflow.report('Adding container scaling')
            s_api.AddSampleLog(Workspace=output_workspace,
                               LogName='container_scale',
                               LogType='Number',
                               LogText=str(self._can_scale_factor))

        # Record the container shift amount
        if self._use_can and self._shift_can:
            prog_wrkflow.report('Adding container shift')
            s_api.AddSampleLog(Workspace=output_workspace,
                               LogName='container_shift',
                               LogType='Number',
                               LogText=str(self._can_shift_factor))

        # Record the type of corrections applied
        prog_wrkflow.report('Adding correction type')
        s_api.AddSampleLog(Workspace=output_workspace,
                           LogName='corrections_type',
                           LogType='String',
                           LogText=correction_type)

        # Add original sample as log entry
        sam_base = self.getPropertyValue("SampleWorkspace")

        if '_' in sam_base:
            sam_base = sam_base[:sam_base.index('_')]
            prog_wrkflow.report('Adding sample filename')
            s_api.AddSampleLog(Workspace=output_workspace,
                               LogName='sample_filename',
                               LogType='String',
                               LogText=sam_base)

        # Convert Units back to original
        emode = str(output_workspace.getEMode())
        efixed = 0.0
        if emode == "Indirect":
            efixed = self._get_e_fixed(output_workspace)
        output_workspace = self._convert_units(output_workspace, sample_unit,
                                               emode, efixed)

        self.setProperty('OutputWorkspace', output_workspace)
        prog_wrkflow.report('Algorithm Complete')
Exemplo n.º 13
0
    def PyExec(self):
        # Input
        filename = self.getPropertyValue("Filename")
        outws_name = self.getPropertyValue("OutputWorkspace")
        norm = self.getPropertyValue("Normalization")

        # load data array from the given file
        data_array = np.loadtxt(filename)
        if not data_array.size:
            message = "File " + filename + " does not contain any data!"
            self.log().error(message)
            raise RuntimeError(message)

        # load run information
        metadata = DNSdata()
        try:
            metadata.read_legacy(filename)
        except RuntimeError as err:
            message = "Error of loading of file " + filename + ": " + str(err)
            self.log().error(message)
            raise RuntimeError(message)

        # load polarisation table and determine polarisation
        poltable = self.get_polarisation_table()
        pol = self.get_polarisation(metadata, poltable)
        if not pol:
            pol = ['0', 'undefined']
            self.log().warning("Failed to determine polarisation for " + filename +
                               ". Values have been set to undefined.")
        ndet = 24
        # this needed to be able to use ConvertToMD
        dataX = np.zeros(2*ndet)
        dataX.fill(metadata.wavelength + 0.00001)
        dataX[::2] -= 0.000002
        # data normalization
        factor = 1.0
        yunit = "Counts"
        ylabel = "Intensity"
        if norm == 'duration':
            factor = metadata.duration
            yunit = "Counts/s"
            ylabel = "Intensity normalized to duration"
            if factor <= 0:
                raise RuntimeError("Duration is invalid for file " + filename + ". Cannot normalize.")
        if norm == 'monitor':
            factor = metadata.monitor_counts
            yunit = "Counts/monitor"
            ylabel = "Intensity normalized to monitor"
            if factor <= 0:
                raise RuntimeError("Monitor counts are invalid for file " + filename + ". Cannot normalize.")
        # set values for dataY and dataE
        dataY = data_array[0:ndet, 1:]/factor
        dataE = np.sqrt(data_array[0:ndet, 1:])/factor
        # create workspace
        api.CreateWorkspace(OutputWorkspace=outws_name, DataX=dataX, DataY=dataY,
                            DataE=dataE, NSpec=ndet, UnitX="Wavelength")
        outws = api.AnalysisDataService.retrieve(outws_name)
        api.LoadInstrument(outws, InstrumentName='DNS', RewriteSpectraMap=True)

        run = outws.mutableRun()
        if metadata.start_time and metadata.end_time:
            run.setStartAndEndTime(DateAndTime(metadata.start_time),
                                   DateAndTime(metadata.end_time))
        # add name of file as a run title
        fname = os.path.splitext(os.path.split(filename)[1])[0]
        run.addProperty('run_title', fname, True)

        # rotate the detector bank to the proper position
        api.RotateInstrumentComponent(outws, "bank0", X=0, Y=1, Z=0, Angle=metadata.deterota)
        # add sample log Ei and wavelength
        api.AddSampleLog(outws, LogName='Ei', LogText=str(metadata.incident_energy),
                         LogType='Number', LogUnit='meV')
        api.AddSampleLog(outws, LogName='wavelength', LogText=str(metadata.wavelength),
                         LogType='Number', LogUnit='Angstrom')
        # add other sample logs
        api.AddSampleLog(outws, LogName='deterota', LogText=str(metadata.deterota),
                         LogType='Number', LogUnit='Degrees')
        api.AddSampleLog(outws, 'mon_sum',
                         LogText=str(float(metadata.monitor_counts)), LogType='Number')
        api.AddSampleLog(outws, LogName='duration', LogText=str(metadata.duration),
                         LogType='Number', LogUnit='Seconds')
        api.AddSampleLog(outws, LogName='huber', LogText=str(metadata.huber),
                         LogType='Number', LogUnit='Degrees')
        api.AddSampleLog(outws, LogName='omega', LogText=str(metadata.huber - metadata.deterota),
                         LogType='Number', LogUnit='Degrees')
        api.AddSampleLog(outws, LogName='T1', LogText=str(metadata.temp1),
                         LogType='Number', LogUnit='K')
        api.AddSampleLog(outws, LogName='T2', LogText=str(metadata.temp2),
                         LogType='Number', LogUnit='K')
        api.AddSampleLog(outws, LogName='Tsp', LogText=str(metadata.tsp),
                         LogType='Number', LogUnit='K')
        # flipper
        api.AddSampleLog(outws, LogName='flipper_precession',
                         LogText=str(metadata.flipper_precession_current),
                         LogType='Number', LogUnit='A')
        api.AddSampleLog(outws, LogName='flipper_z_compensation',
                         LogText=str(metadata.flipper_z_compensation_current),
                         LogType='Number', LogUnit='A')
        flipper_status = 'OFF'    # flipper OFF
        if abs(metadata.flipper_precession_current) > sys.float_info.epsilon:
            flipper_status = 'ON'    # flipper ON
        api.AddSampleLog(outws, LogName='flipper',
                         LogText=flipper_status, LogType='String')
        # coil currents
        api.AddSampleLog(outws, LogName='C_a', LogText=str(metadata.a_coil_current),
                         LogType='Number', LogUnit='A')
        api.AddSampleLog(outws, LogName='C_b', LogText=str(metadata.b_coil_current),
                         LogType='Number', LogUnit='A')
        api.AddSampleLog(outws, LogName='C_c', LogText=str(metadata.c_coil_current),
                         LogType='Number', LogUnit='A')
        api.AddSampleLog(outws, LogName='C_z', LogText=str(metadata.z_coil_current),
                         LogType='Number', LogUnit='A')
        # type of polarisation
        api.AddSampleLog(outws, 'polarisation', LogText=pol[0], LogType='String')
        api.AddSampleLog(outws, 'polarisation_comment', LogText=str(pol[1]), LogType='String')
        # slits
        api.AddSampleLog(outws, LogName='slit_i_upper_blade_position',
                         LogText=str(metadata.slit_i_upper_blade_position),
                         LogType='Number', LogUnit='mm')
        api.AddSampleLog(outws, LogName='slit_i_lower_blade_position',
                         LogText=str(metadata.slit_i_lower_blade_position),
                         LogType='Number', LogUnit='mm')
        api.AddSampleLog(outws, LogName='slit_i_left_blade_position',
                         LogText=str(metadata.slit_i_left_blade_position),
                         LogType='Number', LogUnit='mm')
        api.AddSampleLog(outws, 'slit_i_right_blade_position',
                         LogText=str(metadata.slit_i_right_blade_position),
                         LogType='Number', LogUnit='mm')
        # data normalization

        # add information whether the data are normalized (duration/monitor/no):
        api.AddSampleLog(outws, LogName='normalized', LogText=norm, LogType='String')

        outws.setYUnit(yunit)
        outws.setYUnitLabel(ylabel)

        self.setProperty("OutputWorkspace", outws)
        self.log().debug('LoadDNSLegacy: data are loaded to the workspace ' + outws_name)

        return
Exemplo n.º 14
0
    def PyExec(self):
        self._setup()

        if not self._use_corrections:
            logger.information('Not using corrections')
        if not self._use_can:
            logger.information('Not using container')

        prog_container = Progress(self, start=0.0, end=0.2, nreports=4)
        prog_container.report('Starting algorithm')

        # Units should be wavelength
        sample_unit = s_api.mtd[self._sample_ws_name].getAxis(
            0).getUnit().unitID()
        self._convert_units_wavelength(sample_unit, self._sample_ws_name,
                                       self._sample_ws_wavelength,
                                       "Wavelength")

        if self._use_can:

            # Appy container shift if needed
            if self._shift_can:
                # Use temp workspace so we don't modify data
                prog_container.report('Shifting can')
                s_api.ScaleX(InputWorkspace=self._can_ws_name,
                             OutputWorkspace=self._shifted_container,
                             Factor=self._can_shift_factor,
                             Operation='Add')
                logger.information('Container data shifted by %f' %
                                   self._can_shift_factor)
            else:
                prog_container.report('Cloning Workspace')
                s_api.CloneWorkspace(InputWorkspace=self._can_ws_name,
                                     OutputWorkspace=self._shifted_container)

        # Apply container scale factor if needed
            if self._scale_can:
                # Use temp workspace so we don't modify original data
                prog_container.report('Scaling can')
                s_api.Scale(InputWorkspace=self._shifted_container,
                            OutputWorkspace=self._scaled_container,
                            Factor=self._can_scale_factor,
                            Operation='Multiply')
                logger.information('Container scaled by %f' %
                                   self._can_scale_factor)
            else:
                prog_container.report('Cloning Workspace')
                s_api.CloneWorkspace(InputWorkspace=self._shifted_container,
                                     OutputWorkspace=self._scaled_container)

            # Units should be wavelength
            can_unit = s_api.mtd[self._scaled_container].getAxis(
                0).getUnit().unitID()
            self._convert_units_wavelength(can_unit, self._scaled_container,
                                           self._scaled_container_wavelength,
                                           "Wavelength")

        prog_corr = Progress(self, start=0.2, end=0.6, nreports=2)
        if self._use_corrections:
            prog_corr.report('Preprocessing corrections')
            self._pre_process_corrections()

            if self._use_can:
                # Use container factors
                prog_corr.report('Correcting sample and can')
                self._correct_sample_can()
                correction_type = 'sample_and_can_corrections'
            else:
                # Use sample factor only
                self._correct_sample()
                correction_type = 'sample_corrections_only'
                # Add corrections filename to log values
                prog_corr.report('Correcting sample')
                s_api.AddSampleLog(Workspace=self._output_ws_name,
                                   LogName='corrections_filename',
                                   LogType='String',
                                   LogText=self._corrections_ws_name)

        else:
            # Do simple subtraction
            self._subtract()
            correction_type = 'can_subtraction'
            # Add container filename to log values
            can_cut = self._can_ws_name.index('_')
            can_base = self._can_ws_name[:can_cut]
            prog_corr.report('Adding container filename')
            s_api.AddSampleLog(Workspace=self._output_ws_name,
                               LogName='container_filename',
                               LogType='String',
                               LogText=can_base)

        prog_wrkflow = Progress(self, 0.6, 1.0, nreports=5)
        # Record the container scale factor
        if self._use_can and self._scale_can:
            prog_wrkflow.report('Adding container scaling')
            s_api.AddSampleLog(Workspace=self._output_ws_name,
                               LogName='container_scale',
                               LogType='Number',
                               LogText=str(self._can_scale_factor))

        # Record the container shift amount
        if self._use_can and self._shift_can:
            prog_wrkflow.report('Adding container shift')
            s_api.AddSampleLog(Workspace=self._output_ws_name,
                               LogName='container_shift',
                               LogType='Number',
                               LogText=str(self._can_shift_factor))

        # Record the type of corrections applied
        prog_wrkflow.report('Adding correction type')
        s_api.AddSampleLog(Workspace=self._output_ws_name,
                           LogName='corrections_type',
                           LogType='String',
                           LogText=correction_type)

        # Add original sample as log entry
        sam_cut = self._sample_ws_name.index('_')
        sam_base = self._sample_ws_name[:sam_cut]
        prog_wrkflow.report('Adding sample filename')
        s_api.AddSampleLog(Workspace=self._output_ws_name,
                           LogName='sample_filename',
                           LogType='String',
                           LogText=sam_base)

        # Convert Units back to original
        self._convert_units_wavelength(sample_unit, self._output_ws_name,
                                       self._output_ws_name, sample_unit)

        self.setPropertyValue('OutputWorkspace', self._output_ws_name)

        # Remove temporary workspaces
        prog_wrkflow.report('Deleting Workspaces')
        if self._corrections in s_api.mtd:
            s_api.DeleteWorkspace(self._corrections)
        if self._scaled_container in s_api.mtd:
            s_api.DeleteWorkspace(self._scaled_container)
        if self._shifted_container in s_api.mtd:
            s_api.DeleteWorkspace(self._shifted_container)
        if self._scaled_container_wavelength in s_api.mtd:
            s_api.DeleteWorkspace(self._scaled_container_wavelength)
        if self._sample_ws_wavelength in s_api.mtd:
            s_api.DeleteWorkspace(self._sample_ws_wavelength)
        prog_wrkflow.report('Algorithm Complete')