Ejemplo n.º 1
0
    def refine(self, dataws, wsindex, parametersToFit, numcycles, startx, endx,
               laststepindex):
        """ Refine parameters
        """
        # Range of fit
        if startx <= 0.:
            startx = dataws.readX(wsindex)[0]
        if endx <= 0.:
            endx = dataws.readX(wsindex)[-1]

        # Set up RefineProfileParameters object
        runner = RefineProfileParameters(self.glog)

        # Locate refinement record table
        profilewsname, braggpeakwsname, bkgdtype, bkgdparamwsname, laststep = self._parseRecordTable(
            laststepindex)

        # Set up runner and refine
        runner.setupMonteCarloRefine(numcycles, parametersToFit)

        outwsname, outprofilewsname, outbraggpeakwsname = self._genOutputWorkspace(
            str(dataws), profilewsname, braggpeakwsname)

        # Set up input and output
        runner.setInputs(str(dataws), self._peakType, profilewsname,
                         braggpeakwsname, bkgdtype, bkgdparamwsname)
        # FIXME - Need to verify whether input and output background parameter ws name can be same
        runner.setOutputs(outwsname, outprofilewsname, outbraggpeakwsname,
                          bkgdparamwsname)

        # Refine and record pre and post refinement information
        self._recordPreRefineInfo(runner, laststep)
        runner.refine(numcycles, parametersToFit, startx, endx)
        self._recordPostRefineInfo(runner)

        # Group newly generated workspaces and add name to reposiotry
        if self._wsgroupCreated is True:
            api.GroupWorkspaces(InputWorkspaces="%s, %s, %s, %s" %
                                (outwsname, outprofilewsname,
                                 outbraggpeakwsname, self._wsgroupName),
                                OutputWorkspace=self._wsgroupName)
        else:
            wsgroup = AnalysisDataService.retrieve(self._wsgroupName)
            hasbkgd = list(wsgroup.getNames()).count(bkgdparamwsname)
            if hasbkgd == 1:
                api.GroupWorkspaces(
                    InputWorkspaces="%s, %s, %s" %
                    (outwsname, outprofilewsname, outbraggpeakwsname),
                    OutputWorkspace=self._wsgroupName)
            elif hasbkgd == 0:
                api.GroupWorkspaces(InputWorkspaces="%s, %s, %s, %s" %
                                    (outwsname, outprofilewsname,
                                     outbraggpeakwsname, bkgdparamwsname),
                                    OutputWorkspace=self._wsgroupName)
            else:
                raise NotImplementedError(
                    "Impossible to have 1 workspace appeared twice in a workspace group."
                )

        return
Ejemplo n.º 2
0
    def load_legacy_cross_Sections(self, file_path):
        """
            For legacy MR data, we need to load each cross-section independently.
            :param str file_path: data file path
        """
        ws_base_name = os.path.basename(file_path)
        cross_sections = list()

        for entry in ['Off_Off', 'On_Off', 'Off_On', 'On_On']:
            try:
                ws_name = "%s_%s" % (ws_base_name, entry)
                ws = api.LoadEventNexus(Filename=file_path,
                                        NXentryName='entry-%s' % entry,
                                        OutputWorkspace=ws_name)
                api.AddSampleLog(Workspace=ws,
                                 LogName='cross_section_id',
                                 LogText=entry)
                cross_sections.append(ws_name)
            except:
                api.logger.information(
                    "Could not load %s from legacy data file" % entry)

        # Prepare output workspace group
        output_wsg = self.getPropertyValue("CrossSectionWorkspaces")

        api.GroupWorkspaces(InputWorkspaces=cross_sections,
                            OutputWorkspace=output_wsg)
        self.setProperty("CrossSectionWorkspaces", output_wsg)
Ejemplo n.º 3
0
    def initSetup(self, dataws, wsindex, peaktype, profilews, braggpeakws,
                  bkgdtype, bkgdparws, startx, endx):
        """ Set up the properties for LeBailFit as the first time including
        do a Le bail calculation based on the input parameters
        including profilews, braggpeakws, and etc
        """
        # Data and data range
        self._datawsname = str(dataws)
        if startx <= 0.:
            startx = dataws.readX(wsindex)[0]
        if endx <= 0.:
            endx = dataws.readX(wsindex)[-1]

        # Profile
        self._peakType = peaktype
        self._profileWS = profilews
        self._braggpeakws = braggpeakws
        self._bkgdtype = bkgdtype
        self._bkgdparws = bkgdparws

        # Generate record table
        self._genRecordTable()

        # Check input parameters, i.e., verification/examine input parameters
        runner = RefineProfileParameters(self.glog)

        outwsname = self._datawsname + "_Init"

        runner.setInputs(self._datawsname, self._peakType, self._profileWS,
                         self._braggpeakws, self._bkgdtype, self._bkgdparws)
        # FIXME - Need to verify whether input and output background parameter ws name can be same
        runner.setOutputs(outwsname, self._profileWS, self._braggpeakws,
                          self._bkgdparws)

        self._recordPreRefineInfo(runner, -1)
        runner.calculate(startx, endx)
        self._recordPostRefineInfo(runner)

        # Group the newly generated workspace and do some record
        api.GroupWorkspaces(
            InputWorkspaces="%s, %s, %s, %s" %
            (outwsname, self._profileWS, self._braggpeakws, self._bkgdparws),
            OutputWorkspace=self._wsgroupName)
        self._wsgroupCreated = True

        # Repository

        # Replace 'Refine' of step 0 to ID (it is always empty)
        self._recordws.setCell(0, 5, self._ID)
        # Replace 'InputProfileWorkspace' by profile type (it is alwasy same as output)
        self._recordws.setCell(0, 9, self._peakType)

        self._isSetup = True

        return
Ejemplo n.º 4
0
    def filter_cross_sections(self, file_path):
        """
            Filter events according to the polarization states
            :param str file_path: data file path
        """
        output_wsg = self.getPropertyValue("CrossSectionWorkspaces")
        pol_state = self.getProperty("PolState").value
        pol_veto = self.getProperty("PolVeto").value
        ana_state = self.getProperty("AnaState").value
        ana_veto = self.getProperty("AnaVeto").value
        ws_event_data = self.getProperty("InputWorkspace").value

        if ws_event_data is not None:
            ws_raw_name = str(ws_event_data)
            ws_raw = ws_event_data
        else:
            ws_raw_name = os.path.basename(file_path)
            ws_raw = api.LoadEventNexus(Filename=file_path,
                                        OutputWorkspace=ws_raw_name)

        if self.getProperty("CheckDevices").value:
            # Check whether we have a polarizer
            polarizer = ws_raw.getRun().getProperty("Polarizer").value[0]
            # Check whether we have an analyzer
            analyzer = ws_raw.getRun().getProperty("Analyzer").value[0]
        else:
            polarizer = 1
            analyzer = 1

        change_list = []
        if polarizer > 0:
            # SF1 ON
            splitws, _ = api.GenerateEventsFilter(
                InputWorkspace=ws_raw_name,
                LogName=pol_state,
                MinimumLogValue=0.99,
                MaximumLogValue=1.01,
                TimeTolerance=0,
                OutputWorkspace='filter',
                InformationWorkspace='filter_info',
                LogBoundary='Left',
                UnitOfTime='Seconds')
            time_dict = splitws.toDict()
            change_list.extend(
                extract_times(time_dict['start'], True, is_sf1=True))
            change_list.extend(
                extract_times(time_dict['stop'], False, is_sf1=True))

            # SF1 OFF
            splitws, _ = api.GenerateEventsFilter(
                InputWorkspace=ws_raw_name,
                LogName=pol_state,
                MinimumLogValue=-0.01,
                MaximumLogValue=0.01,
                TimeTolerance=0,
                OutputWorkspace='filter',
                InformationWorkspace='filter_info',
                LogBoundary='Left',
                UnitOfTime='Seconds')
            time_dict = splitws.toDict()
            change_list.extend(
                extract_times(time_dict['start'], False, is_sf1=True))
            change_list.extend(
                extract_times(time_dict['stop'], True, is_sf1=True))

            # SF1 VETO
            if not pol_veto == '':
                splitws, _ = api.GenerateEventsFilter(
                    InputWorkspace=ws_raw_name,
                    LogName=pol_veto,
                    MinimumLogValue=0.99,
                    MaximumLogValue=1.01,
                    TimeTolerance=0,
                    OutputWorkspace='filter',
                    InformationWorkspace='filter_info',
                    LogBoundary='Left',
                    UnitOfTime='Seconds')
                time_dict = splitws.toDict()
                change_list.extend(
                    extract_times(time_dict['start'], True, is_veto1=True))
                change_list.extend(
                    extract_times(time_dict['stop'], False, is_veto1=True))

        if analyzer > 0:
            # SF2 ON
            splitws, _ = api.GenerateEventsFilter(
                InputWorkspace=ws_raw_name,
                LogName=ana_state,
                MinimumLogValue=0.99,
                MaximumLogValue=1.01,
                TimeTolerance=0,
                OutputWorkspace='filter',
                InformationWorkspace='filter_info',
                LogBoundary='Left',
                UnitOfTime='Seconds')
            time_dict = splitws.toDict()
            change_list.extend(
                extract_times(time_dict['start'], True, is_sf2=True))
            change_list.extend(
                extract_times(time_dict['stop'], False, is_sf2=True))

            # SF2 OFF
            splitws, _ = api.GenerateEventsFilter(
                InputWorkspace=ws_raw_name,
                LogName=ana_state,
                MinimumLogValue=-0.01,
                MaximumLogValue=0.01,
                TimeTolerance=0,
                OutputWorkspace='filter',
                InformationWorkspace='filter_info',
                LogBoundary='Left',
                UnitOfTime='Seconds')
            time_dict = splitws.toDict()
            change_list.extend(
                extract_times(time_dict['start'], False, is_sf2=True))
            change_list.extend(
                extract_times(time_dict['stop'], True, is_sf2=True))

            # SF2 VETO
            if not ana_veto == '':
                splitws, _ = api.GenerateEventsFilter(
                    InputWorkspace=ws_raw_name,
                    LogName=ana_veto,
                    MinimumLogValue=0.99,
                    MaximumLogValue=1.01,
                    TimeTolerance=0,
                    OutputWorkspace='filter',
                    InformationWorkspace='filter_info',
                    LogBoundary='Left',
                    UnitOfTime='Seconds')
                time_dict = splitws.toDict()
                change_list.extend(
                    extract_times(time_dict['start'], True, is_veto2=True))
                change_list.extend(
                    extract_times(time_dict['stop'], False, is_veto2=True))

        start_time = ws_raw.run().startTime().totalNanoseconds()

        change_list = sorted(change_list, key=itemgetter(0))
        split_table_ws = self.create_table(change_list,
                                           start_time,
                                           has_polarizer=polarizer > 0,
                                           has_analyzer=analyzer > 0)

        # Filter events if we found enough information to do so
        if split_table_ws.rowCount() > 0:
            outputs = api.FilterEvents(InputWorkspace=ws_raw,
                                       SplitterWorkspace=split_table_ws,
                                       OutputWorkspaceBaseName=output_wsg,
                                       GroupWorkspaces=True,
                                       FilterByPulseTime=False,
                                       OutputWorkspaceIndexedFrom1=False,
                                       CorrectionToSample="None",
                                       SpectrumWithoutDetector="Skip",
                                       SplitSampleLogs=True,
                                       RelativeTime=True,
                                       ExcludeSpecifiedLogs=True,
                                       OutputTOFCorrectionWorkspace='_tmp')
            AnalysisDataService.remove('_tmp')
            for ws in outputs[-1]:
                pol_state = str(ws).replace(output_wsg + '_', '')
                api.AddSampleLog(Workspace=ws,
                                 LogName='cross_section_id',
                                 LogText=pol_state)

            if ws_event_data is None:
                AnalysisDataService.remove(ws_raw_name)
            self.setProperty("CrossSectionWorkspaces", output_wsg)

        # If we don't have a splitter table, it might be because we don't have analyzer/polarizer
        # information. In this case don't filter and return the raw workspace.
        elif polarizer <= 0 and analyzer <= 0:
            api.logger.warning("No polarizer/analyzer information available")
            self.setProperty("CrossSectionWorkspaces",
                             api.GroupWorkspaces([ws_raw]))
        else:
            api.logger.error("No events remained after filtering")
            if ws_event_data is None:
                AnalysisDataService.remove(ws_raw_name)