示例#1
0
    def _add_row_to_table(self,
                          ws_name,
                          row,
                          run_no=None,
                          bank=None,
                          plotted=False,
                          bgsub=False,
                          niter=50,
                          xwindow=None,
                          SG=True):

        words = ws_name.split("_")
        # find xwindow from ws xunit if not specified
        if not xwindow:
            ws = self.model.get_loaded_workspaces()[ws_name]
            if ws.getAxis(0).getUnit().unitID() == "TOF":
                xwindow = 600
            else:
                xwindow = 0.02
        if run_no is not None and bank is not None:
            self.view.add_table_row(run_no, bank, plotted, bgsub, niter,
                                    xwindow, SG)
        elif len(words) == 4 and words[2] == "bank":
            logger.notice(
                "No sample logs present, determining information from workspace name."
            )
            self.view.add_table_row(words[1], words[3], plotted, bgsub, niter,
                                    xwindow, SG)
        else:
            logger.warning(
                "The workspace '{}' was not in the correct naming format. Files should be named in the following way: "
                "INSTRUMENT_RUNNUMBER_bank_BANK. Using workspace name as identifier."
                .format(ws_name))
            self.view.add_table_row(ws_name, "N/A", plotted, bgsub, niter,
                                    xwindow, SG)
示例#2
0
    def do_fit_all(self, ws_list, do_sequential=True):
        fitprop_list = []
        prev_fitprop = self.view.read_fitprop_from_browser()
        for ws in ws_list:
            logger.notice(f'Starting to fit workspace {ws}')
            fitprop = deepcopy(prev_fitprop)
            # update I/O workspace name
            fitprop['properties']['Output'] = ws
            fitprop['properties']['InputWorkspace'] = ws
            # do fit
            fit_output = Fit(**fitprop['properties'])
            # update results
            fitprop['status'] = fit_output.OutputStatus
            funcstr = str(fit_output.Function.fun)
            fitprop['properties']['Function'] = funcstr
            if "success" in fitprop['status'].lower() and do_sequential:
                # update function in prev fitprop to use for next workspace
                prev_fitprop['properties']['Function'] = funcstr
            # update last fit in fit browser and save setup
            self.view.update_browser(fit_output.OutputStatus, funcstr, ws)
            # append a deep copy to output list (will be initial parameters if not successful)
            fitprop_list.append(fitprop)

        logger.notice('Sequential fitting finished.')
        self.fit_all_done_notifier.notify_subscribers(fitprop_list)
示例#3
0
 def _save_output(self,
                  instrument,
                  sample_run,
                  van_run,
                  bank,
                  sample_workspace,
                  rb_num,
                  unit="TOF"):
     """
     Save a focused workspace to the file system. Saves separate copies to a User directory if an rb number has
     been set.
     :param instrument: The instrument the data is from.
     :param sample_run: The sample run number that was focussed
     :param bank: The name of the bank being saved.
     :param sample_workspace: The name of the workspace to be saved.
     :param rb_num: Usually an experiment id, defines the name of the user directory.
     """
     self._save_focused_output_files_as_nexus(instrument, sample_run,
                                              van_run, bank,
                                              sample_workspace, rb_num,
                                              unit)
     self._save_focused_output_files_as_gss(instrument, sample_run, van_run,
                                            bank, sample_workspace, rb_num,
                                            unit)
     self._save_focused_output_files_as_topas_xye(instrument, sample_run,
                                                  van_run, bank,
                                                  sample_workspace, rb_num,
                                                  unit)
     output_path = path.join(output_settings.get_output_path(), 'Focus')
     logger.notice(f"\n\nFocus files saved to: \"{output_path}\"\n\n")
     if rb_num:
         output_path = path.join(output_settings.get_output_path(), 'User',
                                 rb_num, 'Focus')
         logger.notice(
             f"\n\nFocus files also saved to: \"{output_path}\"\n\n")
示例#4
0
 def _check_region_grouping_ws_exists(grouping_ws_name: str,
                                      inst_ws) -> bool:
     """
     Check that the required grouping workspace for this focus exists, and if not present for a North/South bank
     focus, retrieve them from the user directories or create them (expected if first focus with loaded calibration)
     :param grouping_ws_name: Name of the grouping workspace whose presence in the ADS is being checked
     :param inst_ws: Workspace containing the instrument data for use in making a bank grouping workspace
     :return: True if the required workspace exists (or has just been loaded/created), False if not
     """
     if not Ads.doesExist(grouping_ws_name):
         if "North" in grouping_ws_name:
             logger.notice(
                 "NorthBank grouping workspace not present in ADS, loading")
             EnggUtils.get_bank_grouping_workspace(1, inst_ws)
             return True
         elif "South" in grouping_ws_name:
             logger.notice(
                 "SouthBank grouping workspace not present in ADS, loading")
             EnggUtils.get_bank_grouping_workspace(2, inst_ws)
             return True
         else:
             logger.warning(
                 f"Cannot focus as the grouping workspace \"{grouping_ws_name}\" is not present."
             )
             return False
     return True
示例#5
0
    def PyExec(self):
        # setup progress bar
        prog_reporter = Progress(self, start=0.0, end=1.0, nreports=3)
        # Get input
        ws_list = self.getProperty("PeakWorkspaces").value
        a = self.getProperty('a').value
        b = self.getProperty('b').value
        c = self.getProperty('c').value
        alpha = self.getProperty('alpha').value
        beta = self.getProperty('beta').value
        gamma = self.getProperty('gamma').value
        self.tol = self.getProperty('Tolerance').value

        # Find initial UB and use to index peaks in all runs
        prog_reporter.report(1, "Find initial UB for peak indexing")
        self.find_initial_indexing(
            a, b, c, alpha, beta, gamma,
            ws_list)  # removes runs from ws_list if can't index

        # optimize the lattice parameters across runs (i.e. B matrix)
        prog_reporter.report(2, "Optimize B")

        def fobj(x):
            return self.calcResiduals(x, ws_list)

        alatt0 = [a, b, c, alpha, beta, gamma]
        try:
            alatt, cov, info, msg, ier = leastsq(fobj,
                                                 x0=alatt0,
                                                 full_output=True)
            # eval the fobj at optimal solution to set UB (leastsq iteration stops at a next sub-optimal solution)
            fobj(alatt)
        except ValueError:
            logger.error(
                "CalculateUMatrix failed - check initial lattice parameters and tolerance provided."
            )
            return

        success = ier in [
            1, 2, 3, 4
        ] and cov is not None  # cov is None when matrix is singular
        if success:
            # calculate errors
            dof = sum(
                [self.child_IndexPeaks(ws, RoundHKLs=True)
                 for ws in ws_list]) - len(alatt0)
            err = np.sqrt(abs(np.diag(cov)) * (info['fvec']**2).sum() / dof)
            for wsname in ws_list:
                ws = AnalysisDataService.retrieve(wsname)
                ws.sample().getOrientedLattice().setError(*err)
            logger.notice(
                f"Lattice parameters successfully refined for workspaces: {ws_list}\n"
                f"Lattice Parameters: {np.array2string(alatt, precision=6)}\n"
                f"Parameter Errors  : {np.array2string(err, precision=6)}")
        else:
            logger.warning(
                f"Error in optimization of lattice parameters: {msg}")
        # complete progress
        prog_reporter.report(3, "Done")
示例#6
0
 def create_or_update_bgsub_ws(self, ws_name, bg_params):
     ws = self._loaded_workspaces[ws_name]
     ws_bg = self._bg_sub_workspaces[ws_name]
     if not ws_bg or self._bg_params[ws_name] == [] or bg_params[1:] != self._bg_params[ws_name][1:]:
         background = self.estimate_background(ws_name, *bg_params[1:])
         self._bg_params[ws_name] = bg_params
         bgsub_ws_name = ws_name + "_bgsub"
         bgsub_ws = Minus(LHSWorkspace=ws, RHSWorkspace=background, OutputWorkspace=bgsub_ws_name)
         self._bg_sub_workspaces[ws_name] = bgsub_ws
         DeleteWorkspace(background)
     else:
         logger.notice("Background workspace already calculated")
示例#7
0
def needs_loading(property_value, loading_reduction_type):
    """
    Checks whether a given unary input needs loading or is already loaded in
    ADS.
    @param property_value: the string value of the corresponding FileProperty
    @param loading_reduction_type : the reduction_type of input to load
    """
    loading = False
    ws_name = ''
    if property_value:
        ws_name = path.splitext(path.basename(property_value))[0]
        if mtd.doesExist(ws_name):
            logger.notice('Reusing {0} workspace: {1}'.format(
                loading_reduction_type, ws_name))
        else:
            loading = True
    return [loading, ws_name]
示例#8
0
 def _add_row_to_table(self,
                       ws_name,
                       row,
                       run_no=None,
                       bank=None,
                       checked=False):
     words = ws_name.split("_")
     if run_no is not None and bank is not None:
         self.view.add_table_row(run_no, bank, checked)
         self.row_numbers[ws_name] = row
     elif len(words) == 4 and words[2] == "bank":
         logger.notice(
             "No sample logs present, determining information from workspace name."
         )
         self.view.add_table_row(words[1], words[3], checked)
         self.row_numbers[ws_name] = row
     else:
         logger.warning(
             "The workspace '{}' was not in the correct naming format. Files should be named in the following way: "
             "INSTRUMENT_RUNNUMBER_bank_BANK. Using workspace name as identifier."
             .format(ws_name))
         self.view.add_table_row(ws_name, "N/A", checked)
         self.row_numbers[ws_name] = row
示例#9
0
def needs_processing(property_value, process_reduction_type):
    """
    Checks whether a given unary reduction needs processing or is already cached
    in ADS with expected name.
    @param property_value: the string value of the corresponding MultipleFile
                           input property
    @param process_reduction_type: the reduction_type of process
    """
    do_process = False
    ws_name = ''
    if property_value:
        run_number = get_run_number(property_value)
        ws_name = run_number + '_' + process_reduction_type
        if mtd.doesExist(ws_name):
            if isinstance(mtd[ws_name], WorkspaceGroup):
                run = mtd[ws_name][0].getRun()
            else:
                run = mtd[ws_name].getRun()
            if run.hasProperty('ProcessedAs'):
                process = run.getLogData('ProcessedAs').value
                if process == process_reduction_type:
                    logger.notice('Reusing {0} workspace: {1}'.format(
                        process_reduction_type, ws_name))
                else:
                    logger.warning('{0} workspace found, but processed '
                                   'differently: {1}'.format(
                                       process_reduction_type, ws_name))
                    do_process = True
            else:
                logger.warning('{0} workspace found, but missing the '
                               'ProcessedAs flag: {1}'.format(
                                   process_reduction_type, ws_name))
                do_process = True
        else:
            do_process = True
    return [do_process, ws_name]
示例#10
0
 def on_calibrate_clicked(self):
     plot_output = self.view.get_plot_output()
     if self.view.get_new_checked() and self._validate():
         sample_file = self.view.get_sample_filename()
         if self.view.get_crop_checked():
             self.start_cropped_calibration_worker(sample_file,
                                                   plot_output, self.rb_num)
         else:
             self.start_calibration_worker(sample_file,plot_output, self.rb_num)
     elif self.view.get_load_checked():
         if not self.validate_path():
             logger.notice("Invalid calibration path")
             return
         filename = self.view.get_path_filename()
         try:
             instrument, sample_file, grp_ws_name, roi_text, banks = \
                 self.model.load_existing_calibration_files(filename)
         except:
             return
         self.pending_calibration.set_calibration(sample_file, instrument)
         self.pending_calibration.set_roi_info_load(banks, grp_ws_name, roi_text)
         self.set_current_calibration()
         set_setting(output_settings.INTERFACES_SETTINGS_GROUP, output_settings.ENGINEERING_PREFIX,
                     "last_calibration_path", filename)
示例#11
0
def process_json(json_filename):
    """This will read a json file, process the data and save the calibration.

    Only ``Calibrant`` and ``Groups`` are required.

    An example input showing every possible options is:

    .. code-block:: JSON

      {
        "Calibrant": "12345",
        "Groups": "/path/to/groups.xml",
        "Mask": "/path/to/mask.xml",
        "Instrument": "NOM",
        "Date" : "2019_09_04",
        "SampleEnvironment": "shifter",
        "PreviousCalibration": "/path/to/cal.h5",
        "CalDirectory": "/path/to/output_directory",
        "CrossCorrelate": {"Step": 0.001,
                           "DReference: 1.5,
                           "Xmin": 1.0,
                           "Xmax": 3.0,
                           "MaxDSpaceShift": 0.25},
        "PDCalibration": {"PeakPositions": [1, 2, 3],
                          "TofBinning": (300,0.001,16666),
                          "PeakFunction": 'Gaussian',
                          "PeakWindow": 0.1,
                          "PeakWidthPercent": 0.001}
      }
    """
    with open(json_filename) as json_file:
        args = json.load(json_file)

    calibrant_file = args.get('CalibrantFile', None)
    if calibrant_file is None:
        calibrant = args['Calibrant']
    groups = args['Groups']
    out_groups_by = args.get('OutputGroupsBy', 'Group')
    sample_env = args.get('SampleEnvironment', 'UnknownSampleEnvironment')
    mask = args.get('Mask')
    instrument = args.get('Instrument', 'NOM')
    cc_kwargs = args.get('CrossCorrelate', {})
    pdcal_kwargs = args.get('PDCalibration', {})
    previous_calibration = args.get('PreviousCalibration')

    date = str(args.get('Date', datetime.datetime.now().strftime('%Y_%m_%d')))
    caldirectory = str(args.get('CalDirectory', os.path.abspath('.')))

    if calibrant_file is not None:
        ws = Load(calibrant_file)
        calibrant = ws.getRun().getProperty('run_number').value
    else:
        filename = f'{instrument}_{calibrant}'
        ws = Load(filename)

    calfilename = f'{caldirectory}/{instrument}_{calibrant}_{date}_{sample_env}.h5'
    logger.notice(f'going to create calibration file: {calfilename}')

    groups = LoadDetectorsGroupingFile(groups, InputWorkspace=ws)

    if mask:
        mask = LoadMask(instrument, mask)
        MaskDetectors(ws, MaskedWorkspace=mask)

    if previous_calibration:
        previous_calibration = LoadDiffCal(previous_calibration,
                                           MakeGroupingWorkspace=False,
                                           MakeMaskWorkspace=False)

    diffcal = do_group_calibration(ws,
                                   groups,
                                   previous_calibration,
                                   cc_kwargs=cc_kwargs,
                                   pdcal_kwargs=pdcal_kwargs)
    mask = mtd['group_calibration_pd_diffcal_mask']

    CreateGroupingWorkspace(InputWorkspace=ws,
                            GroupDetectorsBy=out_groups_by,
                            OutputWorkspace='out_groups')
    SaveDiffCal(CalibrationWorkspace=diffcal,
                MaskWorkspace=mask,
                GroupingWorkspace=mtd['out_groups'],
                Filename=calfilename)
示例#12
0
def cc_calibrate_groups(data_ws,
                        group_ws,
                        output_basename="_tmp_group_cc_calibration",
                        previous_calibration=None,
                        Step=0.001,
                        DReference=1.2615,
                        Xmin=1.22,
                        Xmax=1.30,
                        MaxDSpaceShift=None,
                        OffsetThreshold=1E-4,
                        SkipCrossCorrelation=[],
                        PeakFunction="Gaussian",
                        SmoothNPoints=0):
    """This will perform the CrossCorrelate/GetDetectorOffsets on a group
    of detector pixel.

    It works by looping over the different groups in the group_ws,
    extracting all unmasked spectra of a group, then running
    CrossCorrelate and GetDetectorOffsets on just that group, and
    combinning the results at the end. When running a group,
    CrossCorrelate and GetDetectorOffsets could be cycled until
    converging of offsets is reached, given the user input offset
    threshold. If offset threshold is specified to be equal to or
    larger than 1.0, no cycling will be carried out.

    The first unmasked spectra of the group will be used for the
    ReferenceSpectra in CrossCorrelate.

    :param data_ws: Input calibration raw data (in TOF), assumed to already be correctly masked
    :param group_ws: grouping workspace, e.g. output from LoadDetectorsGroupingFile
    :param output_basename: Optional name to use for temporay and output workspace
    :param previous_calibration: Optional previous diffcal workspace
    :param Step: step size for binning of data and input for GetDetectorOffsets, default 0.001
    :param DReference: Derefernce parameter for GetDetectorOffsets, default 1.2615
    :param Xmin: Xmin parameter for CrossCorrelate, default 1.22
    :param Xmax: Xmax parameter for CrossCorrelate, default 1.30
    :param MaxDSpaceShift: MaxDSpaceShift paramter for CrossCorrelate, default None
    :param OffsetThreshold: Convergence threshold for cycling cross correlation, default 1E-4
    :param SkipCrossCorrelation: Skip cross correlation for specified groups
    :param PeakFunction: Peak function to use for extracting the offset
    :param SmoothNPoints: Number of points for smoothing spectra, for cross correlation ONLY
    :return: Combined DiffCal workspace from all the different groups
    """
    if previous_calibration:
        ApplyDiffCal(data_ws, CalibrationWorkspace=previous_calibration)

    data_d = ConvertUnits(data_ws, Target='dSpacing', OutputWorkspace='data_d')

    group_list = np.unique(group_ws.extractY())

    _accum_cc = None
    to_skip = []
    for group in group_list:
        # Figure out input parameters for CrossCorrelate and GetDetectorOffset, specifically
        # for those parameters for which both a single value and a list is accepted. If a
        # list is given, that means different parameter setup will be used for different groups.
        Xmin_group = Xmin[int(group) - 1] if type(Xmin) == list else Xmin
        Xmax_group = Xmax[int(group) - 1] if type(Xmax) == list else Xmax
        MDS_group = MaxDSpaceShift[int(group) - 1] if type(MaxDSpaceShift) == list else MaxDSpaceShift
        DRef_group = DReference[int(group) - 1] if type(DReference) == list else DReference
        OT_group = OffsetThreshold[int(group) - 1] if type(OffsetThreshold) == list else OffsetThreshold
        pf_group = PeakFunction[int(group) - 1] if type(PeakFunction) == list else PeakFunction
        snpts_group = SmoothNPoints[int(group) - 1] if type(SmoothNPoints) == list else SmoothNPoints
        cycling = OT_group < 1.0

        indexes = np.where(group_ws.extractY().flatten() == group)[0]
        sn = np.array(group_ws.getSpectrumNumbers())[indexes]
        try:
            ws_indexes = [data_d.getIndexFromSpectrumNumber(int(i)) for i in sn]
        except RuntimeError:
            # data does not contain spectrum in group
            continue

        if group in SkipCrossCorrelation:
            to_skip.extend(ws_indexes)

        ExtractSpectra(data_d, WorkspaceIndexList=ws_indexes, OutputWorkspace='_tmp_group_cc')
        ExtractUnmaskedSpectra('_tmp_group_cc', OutputWorkspace='_tmp_group_cc')
        ExtractSpectra(data_ws, WorkspaceIndexList=ws_indexes, OutputWorkspace='_tmp_group_cc_raw')
        ExtractUnmaskedSpectra('_tmp_group_cc_raw', OutputWorkspace='_tmp_group_cc_raw')
        num_spectra = mtd['_tmp_group_cc'].getNumberHistograms()
        if num_spectra < 2:
            continue
        Rebin('_tmp_group_cc', Params=f'{Xmin_group},{Step},{Xmax_group}', OutputWorkspace='_tmp_group_cc')
        if snpts_group >= 3:
            SmoothData('_tmp_group_cc', NPoints=snpts_group, OutputWorkspace='_tmp_group_cc')

        # Figure out brightest spectra to be used as the reference for cross correlation.
        CloneWorkspace('_tmp_group_cc_raw', OutputWorkspace='_tmp_group_cc_raw_tmp')
        intg = Integration('_tmp_group_cc_raw_tmp',
                           StartWorkspaceIndex=0,
                           EndWorkspaceIndex=num_spectra-1,
                           OutputWorkspace='_tmp_group_intg')
        brightest_spec_index = int(np.argmax(np.array([intg.readY(i)[0] for i in range(num_spectra)])))

        # Cycling cross correlation. At each step, we will use the obtained offsets and DIFC's from
        # previous step to obtain new DIFC's. In this way, spectra in group will come closer and closer
        # to each other as the cycle goes. This will continue until converging criterion is reached. The
        # converging criterion is set in such a way that the median value of all the non-zero offsets
        # should be smaller than the threshold (user tuned parameter, default to 1E-4, meaning 0.04%
        # relative offset).
        num_cycle = 1
        while True:
            CrossCorrelate('_tmp_group_cc',
                           Xmin=Xmin_group, XMax=Xmax_group,
                           MaxDSpaceShift=MDS_group,
                           ReferenceSpectra=brightest_spec_index,
                           WorkspaceIndexMin=0,
                           WorkspaceIndexMax=num_spectra-1,
                           OutputWorkspace='_tmp_group_cc')

            bin_range = (Xmax_group-Xmin_group)/Step
            GetDetectorOffsets(InputWorkspace='_tmp_group_cc',
                               Step=Step,
                               Xmin=-bin_range, XMax=bin_range,
                               DReference=DRef_group,
                               MaxOffset=1,
                               PeakFunction=pf_group,
                               OutputWorkspace='_tmp_group_cc')

            if group not in SkipCrossCorrelation:
                offsets_tmp = []
                for item in ws_indexes:
                    if abs(mtd['_tmp_group_cc'].readY(item)) != 0:
                        offsets_tmp.append(abs(mtd['_tmp_group_cc'].readY(item)))
                offsets_tmp = np.array(offsets_tmp)
                logger.notice(f'Running group-{group}, cycle-{num_cycle}.')
                logger.notice(f'Median offset (no sign) = {np.median(offsets_tmp)}')
                logger.notice(f'Running group-{group}, cycle-{num_cycle}.')
                logger.notice(f'Median offset (no sign) = {np.median(offsets_tmp)}')
                converged = np.median(offsets_tmp) < OT_group
            else:
                for item in ws_indexes:
                    mtd['_tmp_group_cc'].dataY(item)[0] = 0.0
                logger.notice(f'Cross correlation skipped for group-{group}.')
                converged = True

            if not cycling or converged:
                if cycling and converged:
                    if group not in SkipCrossCorrelation:
                        logger.notice(f'Cross correlation for group-{group} converged, ')
                        logger.notice(f'with offset threshold {OT_group}.')
                break
            else:
                previous_calibration = ConvertDiffCal('_tmp_group_cc',
                                                      PreviousCalibration=previous_calibration,
                                                      OutputWorkspace='_tmp_group_cc_diffcal')
                ApplyDiffCal('_tmp_group_cc_raw', CalibrationWorkspace='_tmp_group_cc_diffcal')
                ConvertUnits('_tmp_group_cc_raw', Target='dSpacing', OutputWorkspace='_tmp_group_cc')
                Rebin('_tmp_group_cc', Params=f'{Xmin_group},{Step},{Xmax_group}', OutputWorkspace='_tmp_group_cc')

            num_cycle += 1

        if not _accum_cc:
            _accum_cc = RenameWorkspace('_tmp_group_cc')
        else:
            _accum_cc += mtd['_tmp_group_cc']
            # DeleteWorkspace('_tmp_group_cc')

    previous_calibration = ConvertDiffCal('_accum_cc',
                                          PreviousCalibration=previous_calibration,
                                          OutputWorkspace=f'{output_basename}_cc_diffcal')

    DeleteWorkspace('_accum_cc')
    DeleteWorkspace('_tmp_group_cc')
    DeleteWorkspace('_tmp_group_cc_raw')
    if cycling and '_tmp_group_cc_diffcal' in mtd:
        DeleteWorkspace('_tmp_group_cc_diffcal')

    return mtd[f'{output_basename}_cc_diffcal'], to_skip
示例#13
0
 def _log_xunit_change(self, xunit):
     logger.notice(
         "Subsequent files will be loaded with the x-axis unit:\t{}".format(
             xunit))