コード例 #1
0
ファイル: data_model.py プロジェクト: robertapplin/mantid
 def on_draw(event):
     if event.canvas.signalsBlocked():
         # This stops infinite loop as draw() is called within this handle (and set signalsBlocked == True)
         # Resets signalsBlocked to False (default value)
         event.canvas.blockSignals(False)
     else:
         axes = event.canvas.figure.get_axes()
         data_line = next(
             (line for line in axes[0].get_tracked_artists()), None)
         bg_line = next((line for line in axes[0].get_lines()
                         if line not in axes[0].get_tracked_artists()),
                        None)
         bgsub_line = next(
             (line for line in axes[1].get_tracked_artists()), None)
         if data_line and bg_line and bgsub_line:
             event.canvas.blockSignals(
                 True
             )  # this doesn't stop this handle being called again on canvas.draw()
             bg_line.set_ydata(data_line.get_ydata() -
                               bgsub_line.get_ydata())
             event.canvas.draw()
         else:
             # would like to close the fig at this point but this interferes with the mantid ADS observers when
             # any of the tracked workspaces are deleted and causes mantid to hard crash - so just print warning
             logger.warning(
                 f"Inspect background figure {event.canvas.figure.number} has been invalidated - the "
                 f"background curve will no longer be updated.")
コード例 #2
0
def old_python():
    """" Check if Python has proper version."""
    is_python_old = AbinsModules.AbinsTestHelpers.old_python()
    if is_python_old:
        logger.warning(
            "Skipping AbinsLoadCRYSTALTest because Python is too old.")
    return is_python_old
コード例 #3
0
    def _add_row_to_table(self,
                          ws_name,
                          row,
                          run_no=None,
                          bank=None,
                          plotted=False,
                          bgsub=False,
                          niter=50,
                          xwindow=None,
                          SG=True):

        words = ws_name.split("_")
        # find xwindow from ws xunit if not specified
        if not xwindow:
            ws = self.model.get_loaded_workspaces()[ws_name]
            if ws.getAxis(0).getUnit().unitID() == "TOF":
                xwindow = 600
            else:
                xwindow = 0.02
        if run_no is not None and bank is not None:
            self.view.add_table_row(run_no, bank, plotted, bgsub, niter,
                                    xwindow, SG)
        elif len(words) == 4 and words[2] == "bank":
            logger.notice(
                "No sample logs present, determining information from workspace name."
            )
            self.view.add_table_row(words[1], words[3], plotted, bgsub, niter,
                                    xwindow, SG)
        else:
            logger.warning(
                "The workspace '{}' was not in the correct naming format. Files should be named in the following way: "
                "INSTRUMENT_RUNNUMBER_bank_BANK. Using workspace name as identifier."
                .format(ws_name))
            self.view.add_table_row(ws_name, "N/A", plotted, bgsub, niter,
                                    xwindow, SG)
コード例 #4
0
    def PyExec(self):
        logger.warning("EnggCalibrateFull is deprecated as of May 2021. Please use PDCalibration instead.")
        # Get peaks in dSpacing from file, and check we have what we need, before doing anything
        expected_peaks_d = EnggUtils.read_in_expected_peaks(self.getPropertyValue("ExpectedPeaksFromFile"),
                                                            self.getProperty('ExpectedPeaks').value)

        if len(expected_peaks_d) < 1:
            raise ValueError("Cannot run this algorithm without any input expected peaks")

        in_wks = self.getProperty('Workspace').value
        wks_indices = EnggUtils.get_ws_indices_from_input_properties(in_wks, self.getProperty('Bank').value,
                                                                     self.getProperty(self.INDICES_PROP_NAME).value)

        van_wks = self.getProperty("VanadiumWorkspace").value
        van_integ_wks = self.getProperty('VanIntegrationWorkspace').value
        van_curves_wks = self.getProperty('VanCurvesWorkspace').value
        # These corrections rely on ToF<->Dspacing conversions, so ideally they'd be done after the
        # calibration step, which creates a cycle / chicken-and-egg issue.
        EnggUtils.apply_vanadium_corrections(self, in_wks, wks_indices, van_wks, van_integ_wks, van_curves_wks)

        rebinned_ws = self._prepare_ws_for_fitting(in_wks, self.getProperty('RebinBinWidth').value)
        pos_tbl, peaks_tbl = self._calculate_calib_positions_tbl(rebinned_ws, wks_indices, expected_peaks_d)

        # Produce 2 results: 'output table' and 'apply calibration' + (optional) calibration file
        self.setProperty("OutDetPosTable", pos_tbl)
        self.setProperty("FittedPeaks", peaks_tbl)
        self._apply_calibration_table(in_wks, pos_tbl)
        self._output_det_pos_file(self.getPropertyValue('OutDetPosFilename'), pos_tbl)
コード例 #5
0
ファイル: data_model.py プロジェクト: robertapplin/mantid
    def update_log_workspace_group(self):
        # both ws and name needed in event a ws is renamed and ws.name() is no longer correct

        if not self._data_workspaces:
            self.delete_logs()
            return

        if not self._log_workspaces:
            self.create_log_workspace_group()
        else:
            for log in self._log_names:
                if not ADS.doesExist(log):
                    self.make_log_table(log)
                    self._log_workspaces.add(log)
            if not ADS.doesExist("run_info"):
                self.make_runinfo_table()
                self._log_workspaces.add("run_info")
        # update log tables
        self.remove_all_log_rows()
        for irow, (ws_name, ws) in enumerate(
                self._data_workspaces.get_loaded_ws_dict().items()):
            try:
                self.add_log_to_table(ws_name, ws, irow)
            except Exception as e:
                logger.warning(
                    f"Unable to output log workspaces for workspace {ws_name}: "
                    + str(e))
コード例 #6
0
ファイル: data_model.py プロジェクト: stuartcampbell/mantid
    def add_log_to_table(self, ws_name, ws, irow):
        # both ws and name needed in event a ws is renamed and ws.name() is no longer correct
        # make dict for run if doesn't exist
        if ws_name not in self._log_values:
            self._log_values[ws_name] = dict()
        # add run info
        run = ws.getRun()
        row = [ws.getInstrument().getFullName(), ws.getRunNumber(), run.getProperty('bankid').value,
               run.getProtonCharge(), ws.getTitle()]
        self.write_table_row(ADS.retrieve("run_info"), row, irow)
        # add log data - loop over existing log workspaces not logs in settings as these might have changed
        currentRunLogs = [l.name for l in run.getLogData()]
        nullLogValue = full(2, nan)  # default nan if can't read/average log data
        if run.getProtonCharge() > 0 and "proton_charge" in currentRunLogs:
            for log in self._log_names:
                if log in self._log_values[ws_name]:
                    avg, stdev = self._log_values[ws_name][log]  # already averaged
                elif log in currentRunLogs:
                    avg, stdev = AverageLogData(ws_name, LogName=log, FixZero=False)
                else:
                    avg, stdev = nullLogValue
                self._log_values[ws_name][log] = [avg, stdev]  # update model dict (even if nan)
        else:
            self._log_values[ws_name] = {log: nullLogValue for log in self._log_names}
            logger.warning(f"{ws.name()} does not contain a proton charge log - log values cannot be averaged.")

        # write log values to table (nan if log could not be averaged)
        for log, avg_and_stdev in self._log_values[ws_name].items():
            self.write_table_row(ADS.retrieve(log), avg_and_stdev, irow)
        self.update_log_group_name()
コード例 #7
0
ファイル: data_model.py プロジェクト: stuartcampbell/mantid
 def load_files(self, filenames_string):
     self._last_added = []
     filenames = [name.strip() for name in filenames_string.split(",")]
     for filename in filenames:
         ws_name = self._generate_workspace_name(filename)
         if ws_name not in self._loaded_workspaces:
             try:
                 if not ADS.doesExist(ws_name):
                     ws = Load(filename, OutputWorkspace=ws_name)
                 else:
                     ws = ADS.retrieve(ws_name)
                 if ws.getNumberHistograms() == 1:
                     self._loaded_workspaces[ws_name] = ws
                     if ws_name not in self._bg_sub_workspaces:
                         self._bg_sub_workspaces[ws_name] = None
                     if ws_name not in self._bg_params:
                         self._bg_params[ws_name] = []
                     self._last_added.append(ws_name)
                 else:
                     logger.warning(
                         f"Invalid number of spectra in workspace {ws_name}. Skipping loading of file.")
             except RuntimeError as e:
                 logger.error(
                     f"Failed to load file: {filename}. Error: {e}. \n Continuing loading of other files.")
         else:
             logger.warning(f"File {ws_name} has already been loaded")
     self.update_log_workspace_group()
コード例 #8
0
ファイル: model.py プロジェクト: stuartcampbell/mantid
 def _check_region_grouping_ws_exists(grouping_ws_name: str,
                                      inst_ws) -> bool:
     """
     Check that the required grouping workspace for this focus exists, and if not present for a North/South bank
     focus, retrieve them from the user directories or create them (expected if first focus with loaded calibration)
     :param grouping_ws_name: Name of the grouping workspace whose presence in the ADS is being checked
     :param inst_ws: Workspace containing the instrument data for use in making a bank grouping workspace
     :return: True if the required workspace exists (or has just been loaded/created), False if not
     """
     if not Ads.doesExist(grouping_ws_name):
         if "North" in grouping_ws_name:
             logger.notice(
                 "NorthBank grouping workspace not present in ADS, loading")
             EnggUtils.get_bank_grouping_workspace(1, inst_ws)
             return True
         elif "South" in grouping_ws_name:
             logger.notice(
                 "SouthBank grouping workspace not present in ADS, loading")
             EnggUtils.get_bank_grouping_workspace(2, inst_ws)
             return True
         else:
             logger.warning(
                 f"Cannot focus as the grouping workspace \"{grouping_ws_name}\" is not present."
             )
             return False
     return True
コード例 #9
0
ファイル: model.py プロジェクト: stuartcampbell/mantid
 def _whole_inst_prefocus(input_workspace, vanadium_integration_ws,
                          full_calib) -> bool:
     """This is used to perform the operations done on the whole instrument workspace, before the chosen region of
     interest is focused using _run_focus
     :param input_workspace: Raw sample run to process prior to focussing over a region of interest
     :param vanadium_integration_ws: Integral of the supplied vanadium run
     :param full_calib: Full instrument calibration workspace (table ws output from PDCalibration)
     :return True if successful, False if aborted
     """
     if input_workspace.getRun().getProtonCharge() > 0:
         NormaliseByCurrent(InputWorkspace=input_workspace,
                            OutputWorkspace=input_workspace)
     else:
         logger.warning(
             f"Skipping focus of run {input_workspace.name()} because it has invalid proton charge."
         )
         return False
     input_workspace /= vanadium_integration_ws
     # replace nans created in sensitivity correction
     ReplaceSpecialValues(InputWorkspace=input_workspace,
                          OutputWorkspace=input_workspace,
                          NaNValue=0,
                          InfinityValue=0)
     ApplyDiffCal(InstrumentWorkspace=input_workspace,
                  CalibrationWorkspace=full_calib)
     ConvertUnits(InputWorkspace=input_workspace,
                  OutputWorkspace=input_workspace,
                  Target='dSpacing')
     return True
コード例 #10
0
def old_python():
    """" Check if Python has proper version."""
    is_python_old = AbinsTestHelpers.old_python()
    if is_python_old:
        logger.warning(
            "Skipping AbinsAtomsDataTest because Python is too old.")
    return is_python_old
コード例 #11
0
ファイル: data_model.py プロジェクト: TakudzwaMakoni/mantid
 def load_files(self, filenames_string, xunit):
     self._last_added = []
     filenames = [name.strip() for name in filenames_string.split(",")]
     for filename in filenames:
         ws_name = self._generate_workspace_name(filename, xunit)
         if ws_name not in self._loaded_workspaces:
             try:
                 if not ADS.doesExist(ws_name):
                     ws = Load(filename, OutputWorkspace=ws_name)
                     if xunit != "TOF":
                         ConvertUnits(InputWorkspace=ws, OutputWorkspace=ws_name, Target=xunit)
                 else:
                     ws = ADS.retrieve(ws_name)
                 if ws.getNumberHistograms() == 1:
                     self._loaded_workspaces[ws_name] = ws
                     if ws_name not in self._background_workspaces:
                         self._background_workspaces[ws_name] = None
                     self._last_added.append(ws_name)
                     self.add_log_to_table(ws_name, ws)
                 else:
                     logger.warning(
                         f"Invalid number of spectra in workspace {ws_name}. Skipping loading of file.")
             except RuntimeError as e:
                 logger.error(
                     f"Failed to load file: {filename}. Error: {e}. \n Continuing loading of other files.")
         else:
             logger.warning(f"File {ws_name} has already been loaded")
コード例 #12
0
    def PyExec(self):
        # setup progress bar
        prog_reporter = Progress(self, start=0.0, end=1.0, nreports=3)
        # Get input
        ws_list = self.getProperty("PeakWorkspaces").value
        a = self.getProperty('a').value
        b = self.getProperty('b').value
        c = self.getProperty('c').value
        alpha = self.getProperty('alpha').value
        beta = self.getProperty('beta').value
        gamma = self.getProperty('gamma').value
        self.tol = self.getProperty('Tolerance').value

        # Find initial UB and use to index peaks in all runs
        prog_reporter.report(1, "Find initial UB for peak indexing")
        self.find_initial_indexing(
            a, b, c, alpha, beta, gamma,
            ws_list)  # removes runs from ws_list if can't index

        # optimize the lattice parameters across runs (i.e. B matrix)
        prog_reporter.report(2, "Optimize B")

        def fobj(x):
            return self.calcResiduals(x, ws_list)

        alatt0 = [a, b, c, alpha, beta, gamma]
        try:
            alatt, cov, info, msg, ier = leastsq(fobj,
                                                 x0=alatt0,
                                                 full_output=True)
            # eval the fobj at optimal solution to set UB (leastsq iteration stops at a next sub-optimal solution)
            fobj(alatt)
        except ValueError:
            logger.error(
                "CalculateUMatrix failed - check initial lattice parameters and tolerance provided."
            )
            return

        success = ier in [
            1, 2, 3, 4
        ] and cov is not None  # cov is None when matrix is singular
        if success:
            # calculate errors
            dof = sum(
                [self.child_IndexPeaks(ws, RoundHKLs=True)
                 for ws in ws_list]) - len(alatt0)
            err = np.sqrt(abs(np.diag(cov)) * (info['fvec']**2).sum() / dof)
            for wsname in ws_list:
                ws = AnalysisDataService.retrieve(wsname)
                ws.sample().getOrientedLattice().setError(*err)
            logger.notice(
                f"Lattice parameters successfully refined for workspaces: {ws_list}\n"
                f"Lattice Parameters: {np.array2string(alatt, precision=6)}\n"
                f"Parameter Errors  : {np.array2string(err, precision=6)}")
        else:
            logger.warning(
                f"Error in optimization of lattice parameters: {msg}")
        # complete progress
        prog_reporter.report(3, "Done")
コード例 #13
0
def old_python():
    """" Check if Python has proper version."""
    is_python_old = AbinsTestHelpers.old_python()
    if is_python_old:
        logger.warning(
            "Skipping AbinsFrequencyPowderGeneratorTest because Python is too old."
        )
    return is_python_old
コード例 #14
0
def old_python():
    """" Check if Python has proper version."""
    is_python_old = AbinsTestHelpers.old_python()
    if is_python_old:
        logger.warning(
            "Skipping AbinsCalculateSingleCrystalTest because Python is too old."
        )
    return is_python_old
コード例 #15
0
def check_processed_flag(ws, exp_value):
    """
        Returns true if the workspace is processed as expected, false otherwise
        @param ws : workspace
        @param exp_value : the expected value of the ProcessedAs log
    """
    if ws.getRun().getLogData('ProcessedAs').value != exp_value:
        logger.warning(f'{exp_value} workspace is not processed as such.')
コード例 #16
0
ファイル: data_model.py プロジェクト: robertapplin/mantid
 def update_workspace_name(self, old_name, new_name):
     if new_name not in self.get_all_workspace_names():
         self._data_workspaces.rename(old_name, new_name)
         if old_name in self._log_values:
             self._log_values[new_name] = self._log_values.pop(old_name)
     else:
         logger.warning(
             f"There already exists a workspace with name {new_name}.")
     self.update_log_workspace_group()
コード例 #17
0
 def remove_workspace(self, ws_name):
     if ws_name in self.get_loaded_workspaces():
         removed = self.get_loaded_workspaces().pop(ws_name)
         self.plot_removed_notifier.notify_subscribers(removed)
         self.plotted.discard(ws_name)
         self._repopulate_table()
         self.model.repopulate_logs()  # so matches new table
     elif ws_name in self.model.get_log_workspaces_name():
         logger.warning(
             'Deleting the log workspace may cause unexpected errors.')
コード例 #18
0
ファイル: data_model.py プロジェクト: TakudzwaMakoni/mantid
 def update_workspace_name(self, old_name, new_name):
     if new_name not in self._loaded_workspaces:
         self._loaded_workspaces[new_name] = self._loaded_workspaces.pop(old_name)
         if old_name in self._background_workspaces:
             self._background_workspaces[new_name] = self._background_workspaces.pop(old_name)
         if old_name in self._bg_params:
             self._bg_params[new_name] = self._bg_params.pop(old_name)
         if old_name in self._log_values:
             self._log_values[new_name] = self._log_values.pop(old_name)
     else:
         logger.warning(f"There already exists a workspace with name {new_name}.")
コード例 #19
0
ファイル: data_model.py プロジェクト: robertapplin/mantid
 def update_fit(self, fit_props):
     for fit_prop in fit_props:
         wsname = fit_prop['properties']['InputWorkspace']
         self._fit_results[wsname] = {
             'model': fit_prop['properties']['Function'],
             'status': fit_prop['status']
         }
         self._fit_results[wsname]['results'] = defaultdict(
             list)  # {function_param: [[Y1, E1], [Y2,E2],...] }
         fnames = [
             x.split('=')[-1] for x in findall(
                 'name=[^,]*', fit_prop['properties']['Function'])
         ]
         # get num params for each function (first elem empty as str begins with 'name=')
         # need to remove ties and constraints which are enclosed in ()
         nparams = [
             s.count('=')
             for s in sub(r'=\([^)]*\)', '', fit_prop['properties']
                          ['Function']).split('name=')[1:]
         ]
         params_dict = ADS.retrieve(fit_prop['properties']['Output'] +
                                    '_Parameters').toDict()
         # loop over rows in output workspace to get value and error for each parameter
         istart = 0
         for ifunc, fname in enumerate(fnames):
             for iparam in range(0, nparams[ifunc]):
                 irow = istart + iparam
                 key = '_'.join([
                     fname, params_dict['Name'][irow].split('.')[-1]
                 ])  # funcname_param
                 self._fit_results[wsname]['results'][key].append([
                     params_dict['Value'][irow], params_dict['Error'][irow]
                 ])
                 if key in fit_prop['peak_centre_params']:
                     # param corresponds to a peak centre in TOF which we also need in dspacing
                     # add another entry into the results dictionary
                     key_d = key + "_dSpacing"
                     try:
                         dcen = self._convert_TOF_to_d(
                             params_dict['Value'][irow], wsname)
                         dcen_er = self._convert_TOFerror_to_derror(
                             params_dict['Error'][irow], dcen, wsname)
                         self._fit_results[wsname]['results'][key_d].append(
                             [dcen, dcen_er])
                     except (ValueError, RuntimeError) as e:
                         logger.warning(
                             f"Unable to output {key_d} parameters for TOF={params_dict['Value'][irow]}: "
                             + str(e))
             istart += nparams[ifunc]
         # append the cost function value (in this case always chisq/DOF) as don't let user change cost func
         # always last row in parameters table
         self._fit_results[wsname]['costFunction'] = params_dict['Value'][
             -1]
     self.create_fit_tables()
コード例 #20
0
def old_modules():
    """" Check if there are proper versions of  Python and numpy."""
    is_python_old = AbinsTestHelpers.old_python()
    if is_python_old:
        logger.warning("Skipping AbinsBasicTest because Python is too old.")

    is_numpy_old = AbinsTestHelpers.is_numpy_valid(np.__version__)
    if is_numpy_old:
        logger.warning("Skipping AbinsBasicTest because numpy is too old.")

    return is_python_old or is_numpy_old
コード例 #21
0
ファイル: model.py プロジェクト: stuartcampbell/mantid
 def _check_region_calib_ws_exists(region: str) -> bool:
     """
     Check that the required workspace for use in focussing the provided region of interest exist in the ADS
     :param region: String describing region of interest
     :return: True if present, False if not
     """
     region_ws_name = REGION_CALIB_WS_PREFIX + region
     present = Ads.doesExist(region_ws_name)
     if not present:
         logger.warning(
             f"Cannot focus as the region calibration workspace \"{region_ws_name}\" is not "
             f"present.")
     return present
コード例 #22
0
def check_distances_match(ws1, ws2):
    """
        Checks if the detector distance between two workspaces are close enough
        @param ws1 : workspace 1
        @param ws2 : workspace 2
    """
    tolerance = 0.01  #m
    l2_1 = ws1.getRun().getLogData('L2').value
    l2_2 = ws2.getRun().getLogData('L2').value
    r1 = ws1.getRunNumber()
    r2 = ws2.getRunNumber()
    if fabs(l2_1 - l2_2) > tolerance:
        logger.warning(
            f'Distance difference out of tolerance {r1}: {l2_1}, {r2}: {l2_2}')
コード例 #23
0
def check_wavelengths_match(ws1, ws2):
    """
        Checks if the wavelength difference between the data is close enough
        @param ws1 : workspace 1
        @param ws2 : workspace 2
    """
    tolerance = 0.01  # AA
    wavelength_1 = get_wavelength(ws1)
    wavelength_2 = get_wavelength(ws2)
    r1 = ws1.getRunNumber()
    r2 = ws2.getRunNumber()
    if fabs(wavelength_1 - wavelength_2) > tolerance:
        logger.warning(
            f'Wavelength difference out of tolerance {r1}: {wavelength_1}, {r2}: {wavelength_2}'
        )
コード例 #24
0
def get_vertical_grouping_pattern(ws):
    """
    Provides vertical grouping pattern and crops to the main detector panel where counts from the beam are measured.
    Used for fitting the horizontal incident beam profile for q resolution calculation.
    TODO: These are static and can be turned to grouping files in instrument/Grouping folder
    :param ws: Empty beam workspace.
    """
    inst_name = mtd[ws].getInstrument().getName()
    min_id = 0
    if 'D11' in inst_name:
        if 'lr' in inst_name:
            step = 128
            max_id = 16384
        elif 'B' in inst_name:
            CropToComponent(InputWorkspace=ws,
                            OutputWorkspace=ws,
                            ComponentNames='detector_center')
            max_id = 49152
            step = 192
        else:
            step = 256
            max_id = 65536
    elif 'D22' in inst_name:
        max_id = 32768
        step = 256
        if 'lr' in inst_name:
            step = 128
            max_id = 16384
        elif 'B' in inst_name:
            CropToComponent(InputWorkspace=ws,
                            OutputWorkspace=ws,
                            ComponentNames='detector_back')
    elif 'D33' in inst_name:
        CropToComponent(InputWorkspace=ws,
                        OutputWorkspace=ws,
                        ComponentNames='back_detector')
        max_id = 32768
        step = 128
    else:
        logger.warning(
            'Instruments other than D11, D22, and D33 are not yet supported for direct beam width fitting.'
        )
        return
    return ','.join([
        "{}-{}".format(start, start + step - 1)
        for start in range(min_id, max_id, step)
    ])
コード例 #25
0
ファイル: data_model.py プロジェクト: ethoeng/mantid
 def load_files(self, filenames_string):
     self._last_added = []
     filenames = [name.strip() for name in filenames_string.split(",")]
     for filename in filenames:
         ws_name = self._generate_workspace_name(filename)
         try:
             ws = Load(filename, OutputWorkspace=ws_name)
             if ws.getNumberHistograms() == 1:
                 self._loaded_workspaces[ws_name] = ws
                 self._last_added.append(ws_name)
             else:
                 logger.warning(
                     "Invalid number of spectra in workspace {}. Skipping loading of file."
                     .format(ws_name))
         except RuntimeError as e:
             logger.error(
                 "Failed to load file: {}. Error: {}. \n Continuing loading of other files."
                 .format(filename, e))
コード例 #26
0
 def _hyphen_range(self, s):
     """ Takes a range in form of "a-b" and generate a list of numbers between a and b inclusive.
     Also accepts comma separated ranges like "a-b,c-d,f" will build a list which will include
     Numbers from a to b, a to d and f"""
     s = "".join(s.split())  # removes white space
     r = set()
     for x in s.split(','):
         t = x.split('-')
         if len(t) not in [1, 2]:
             logger.warning(
                 "Hash_range is given its arguement as {}} which seems not correctly formated."
                 .format(s))
         r.add(int(t[0])) if len(t) == 1 else r.update(
             set(range(int(t[0]),
                       int(t[1]) + 1)))
     l = list(r)
     l.sort()
     #l_in_str = ','.join(str(x) for x in l)
     return l
コード例 #27
0
ファイル: data_model.py プロジェクト: stuartcampbell/mantid
 def restore_files(self, ws_names):
     for ws_name in ws_names:
         try:
             ws = ADS.retrieve(ws_name)
             if ws.getNumberHistograms() == 1:
                 self._loaded_workspaces[ws_name] = ws
                 if self._bg_params[ws_name]:
                     self._bg_sub_workspaces[ws_name] = ADS.retrieve(ws_name + "_bgsub")
                 else:
                     self._bg_sub_workspaces[ws_name] = None
                 if ws_name not in self._bg_params:
                     self._bg_params[ws_name] = []
                 self._last_added.append(ws_name)
             else:
                 logger.warning(
                     f"Invalid number of spectra in workspace {ws_name}. Skipping restoration of workspace.")
         except RuntimeError as e:
             logger.error(
                 f"Failed to restore workspace: {ws_name}. Error: {e}. \n Continuing loading of other files.")
     self.update_log_workspace_group()
コード例 #28
0
ファイル: data_model.py プロジェクト: robertapplin/mantid
 def restore_files(self, ws_names):
     self._data_workspaces.add_from_names_dict(ws_names)
     for ws_name in ws_names:
         try:
             ws = ADS.retrieve(ws_name)
             if ws.getNumberHistograms() == 1:
                 bgsubws = None
                 if self._data_workspaces[ws_name].bg_params:
                     bgsubws = ADS.retrieve(
                         self._data_workspaces[ws_name].bgsub_ws_name)
                 self._last_added.append(ws_name)
                 self._data_workspaces[ws_name].loaded_ws = ws
                 self._data_workspaces[ws_name].bgsub_ws = bgsubws
             else:
                 logger.warning(
                     f"Invalid number of spectra in workspace {ws_name}. Skipping restoration of workspace."
                 )
         except RuntimeError as e:
             logger.error(
                 f"Failed to restore workspace: {ws_name}. Error: {e}. \n Continuing loading of other files."
             )
     self.update_log_workspace_group()
コード例 #29
0
 def _add_row_to_table(self,
                       ws_name,
                       row,
                       run_no=None,
                       bank=None,
                       checked=False):
     words = ws_name.split("_")
     if run_no is not None and bank is not None:
         self.view.add_table_row(run_no, bank, checked)
         self.row_numbers[ws_name] = row
     elif len(words) == 4 and words[2] == "bank":
         logger.notice(
             "No sample logs present, determining information from workspace name."
         )
         self.view.add_table_row(words[1], words[3], checked)
         self.row_numbers[ws_name] = row
     else:
         logger.warning(
             "The workspace '{}' was not in the correct naming format. Files should be named in the following way: "
             "INSTRUMENT_RUNNUMBER_bank_BANK. Using workspace name as identifier."
             .format(ws_name))
         self.view.add_table_row(ws_name, "N/A", checked)
         self.row_numbers[ws_name] = row
コード例 #30
0
def needs_processing(property_value, process_reduction_type):
    """
    Checks whether a given unary reduction needs processing or is already cached
    in ADS with expected name.
    @param property_value: the string value of the corresponding MultipleFile
                           input property
    @param process_reduction_type: the reduction_type of process
    """
    do_process = False
    ws_name = ''
    if property_value:
        run_number = get_run_number(property_value)
        ws_name = run_number + '_' + process_reduction_type
        if mtd.doesExist(ws_name):
            if isinstance(mtd[ws_name], WorkspaceGroup):
                run = mtd[ws_name][0].getRun()
            else:
                run = mtd[ws_name].getRun()
            if run.hasProperty('ProcessedAs'):
                process = run.getLogData('ProcessedAs').value
                if process == process_reduction_type:
                    logger.notice('Reusing {0} workspace: {1}'.format(
                        process_reduction_type, ws_name))
                else:
                    logger.warning('{0} workspace found, but processed '
                                   'differently: {1}'.format(
                                       process_reduction_type, ws_name))
                    do_process = True
            else:
                logger.warning('{0} workspace found, but missing the '
                               'ProcessedAs flag: {1}'.format(
                                   process_reduction_type, ws_name))
                do_process = True
        else:
            do_process = True
    return [do_process, ws_name]