def get_sub_splitters(self, split_start_index, split_stop_index, run_start_ns): """ chop splitters workspace to sub one :param split_start_index: :param split_stop_index: :param run_start_ns: run start (epoch time) in nanoseconds :return: """ # get splitting workspace split_ws_name, info_ws_name = self._reductionSetup.get_splitters(throw_not_set=True) split_ws = AnalysisDataService.retrieve(split_ws_name) sub_split_ws_name = split_ws.name() + '_{0}'.format(split_start_index) # split if isinstance(split_ws, SplittersWorkspace): # splitters workspace mantidsimple.CreateEmptyTableWorkspace(OutputWorkspace=sub_split_ws_name) sub_split_ws = AnalysisDataService.retrieve(sub_split_ws_name) sub_split_ws.addColumn('float', 'start') sub_split_ws.addColumn('float', 'stop') sub_split_ws.addColumn('str', 'index') num_rows = split_ws.rowCount() for i_row in range(split_start_index, min(split_stop_index, num_rows)): start_time = (split_ws.cell(i_row, 0) - run_start_ns) * 1.E-9 stop_time = (split_ws.cell(i_row, 1) - run_start_ns) * 1.E-9 target = str(split_ws.cell(i_row, 2)) sub_split_ws.addRow([start_time, stop_time, target]) # END-FOR elif isinstance(split_ws, MatrixWorkspace): # Matrix workspace vec_x = split_ws.readX(0)[split_start_index:split_stop_index+1] vec_y = split_ws.readY(0)[split_start_index:split_stop_index] vec_e = split_ws.readE(0)[split_start_index:split_stop_index] mantidsimple.CreateWorkspace(DataX=vec_x, DataY=vec_y, DataE=vec_e, NSpec=1, OutputWorkspace=sub_split_ws_name) elif isinstance(split_ws, ITableWorkspace): # Table workspace mantidsimple.CreateEmptyTableWorkspace(OutputWorkspace=sub_split_ws_name) sub_split_ws = AnalysisDataService.retrieve(sub_split_ws_name) sub_split_ws.addColumn('float', 'start') sub_split_ws.addColumn('float', 'stop') sub_split_ws.addColumn('str', 'index') num_rows = split_ws.rowCount() for i_row in range(split_start_index, min(split_stop_index, num_rows)): start_time = split_ws.cell(i_row, 0) stop_time = split_ws.cell(i_row, 1) target = split_ws.cell(i_row, 2) sub_split_ws.addRow([start_time, stop_time, target]) else: # unsupported format raise RuntimeError('Splitting workspace of type {0} is not supported.'.format(split_ws)) return sub_split_ws_name
def _build_output_table(self, parm_dict, tbl_prop_name): tbl_name = self.getPropertyValue(tbl_prop_name) if not tbl_name: return par_names = ['Center', 'Intensity', 'Alpha', 'Beta', 'Sigma', 'Gamma'] par_prefixes = ['pos', 'int', 'alp', 'bet', 'sig', 'gam'] table = msapi.CreateEmptyTableWorkspace(OutputWorkspace=tbl_name) num_peaks = 0 while par_prefixes[0] + str(num_peaks) in parm_dict: num_peaks += 1 for name in par_names: table.addColumn('double', name) for idx in range(0, num_peaks): par_values = [ parm_dict[par_prefix + str(idx)] for par_prefix in par_prefixes ] print("par_values: ", par_values) table.addRow(par_values) for parm in parm_dict: self.log().debug("Parameters for output table: {0}".format(parm))
def test_issues_with_properties(self): """ Tests proper error handling when passing wrong properties or not passing required ones. """ # No InputWorkspace property (required) self.assertRaises(RuntimeError, sapi.EnggCalibrate, File='foo', Bank='1') # Wrong (mispelled) InputWorkspace property self.assertRaises(RuntimeError, sapi.EnggCalibrate, InputWorkpace='anything_goes', Bank='2') # mispelled ExpectedPeaks tbl = sapi.CreateEmptyTableWorkspace(OutputWorkspace='test_table') self.assertRaises(RuntimeError, sapi.EnggCalibrate, Inputworkspace=self.__class__._data_ws, DetectorPositions=tbl, Bank='2', Peaks='2') # mispelled DetectorPositions self.assertRaises(RuntimeError, sapi.EnggCalibrate, InputWorkspace=self.__class__._data_ws, Detectors=tbl, Bank='2', Peaks='2') # There's no output workspace self.assertRaises(RuntimeError, sapi.EnggCalibrate, InputWorkspace=self.__class__._data_ws, Bank='1')
def PyExec(self): in_ws = mtd[self.getPropertyValue('InputWorkspace')] out_ws_name = self.getPropertyValue('OutputWorkspace') out_ws = ms.CreateEmptyTableWorkspace(OutputWOrkspace=out_ws_name) out_ws.addColumn('str', 'statistic') stats = { 'standard_deviation': dict(), 'maximum': dict(), 'minimum': dict(), 'mean': dict(), 'median': dict(), } for name in in_ws.getColumnNames(): try: col_stats = _stats_to_dict( Stats.getStatistics( np.array([float(v) for v in in_ws.column(name)]))) for statname in stats.keys(): stats[statname][name] = col_stats[statname] out_ws.addColumn('float', name) except ValueError: logger.notice('Column \'%s\' is not numerical, skipping' % name) for name, stat in stats.items(): stat1 = dict(stat) stat1['statistic'] = name out_ws.addRow(stat1) self.setProperty('OutputWorkspace', out_ws_name)
def _create_ion_table(self, unit_cell, ions): """ Creates an ion table from the ions and unit cell in the file_data object populated when the phonon/castep file is parsed. @param unit_cell :: The unit cell read from the castep/phonon file @param ions :: The ion data obtained from the castep/phonon file """ ion_table = s_api.CreateEmptyTableWorkspace( OutputWorkspace=self._out_ws_name) ion_table.addColumn('str', 'Species') ion_table.addColumn('int', 'FileIndex') ion_table.addColumn('int', 'Number') ion_table.addColumn('float', 'FractionalX') ion_table.addColumn('float', 'FractionalY') ion_table.addColumn('float', 'FractionalZ') ion_table.addColumn('float', 'CartesianX') ion_table.addColumn('float', 'CartesianY') ion_table.addColumn('float', 'CartesianZ') ion_table.addColumn('float', 'Isotope') self._convert_to_cartesian_coordinates(unit_cell, ions) for ion in ions: ion_table.addRow([ ion['species'], ion['index'], ion['bond_number'], ion['fract_coord'][0], ion['fract_coord'][1], ion['fract_coord'][2], ion['cartesian_coord'][0], ion['cartesian_coord'][1], ion['cartesian_coord'][2], ion['isotope_number'] ])
def generateSplitterWorkspace(self, fragment): r""" Create a table workspace with time intervals to keep Parameters ---------- fragment: str a-b start and end of time fragment to filter out """ inf = 172800 # a run two full days long a, b = fragment.split('-') b = inf if 'end' in b else float(b) a = float(a) splitter = sapi.CreateEmptyTableWorkspace( OutputWorkspace=tws('splitter')) splitter.addColumn('double', 'start') splitter.addColumn('double', 'stop') splitter.addColumn('str', 'target') if a == 0.0: splitter.addRow([b, inf, '0']) elif b == inf: splitter.addRow([0, a, '0']) else: splitter.addRow([0, a, '0']) splitter.addRow([b, inf, '0']) self._temps.extend('splitted_unfiltered', 'TOFCorrectWS')
def _calcIntegrationSpectra(self, vanWS): """ This does the real calculations behind _applySensitivityCorrection(), essentially a call to the 'Integration' algorithm, for when we are given raw data from a Vanadium run. @param vanWS :: workspace with data from a Vanadium run @returns Integration workspace with Vanadium spectra integration values, as a table workspace with one row per spectrum """ expectedDim = 'Time-of-flight' dimType = vanWS.getXDimension().getName() if expectedDim != dimType: raise ValueError( "This algorithm expects a workspace with %s X dimension, but " "the X dimension of the input workspace is: '%s'" % (expectedDim, dimType)) integWS = self._integrateSpectra(vanWS) if 1 != integWS.blocksize() or integWS.getNumberHistograms( ) < vanWS.getNumberHistograms(): raise RuntimeError( "Error while integrating vanadium workspace, the Integration algorithm " "produced a workspace with %d bins and %d spectra. The workspace " "being integrated has %d spectra." % (integWS.blocksize(), integWS.getNumberHistograms(), vanWS.getNumberHistograms())) integTbl = sapi.CreateEmptyTableWorkspace( OutputWorkspace='__vanIntegTbl') integTbl.addColumn('double', 'Spectra Integration') for i in range(integWS.getNumberHistograms()): integTbl.addRow([integWS.readY(i)[0]]) return integTbl
def create_params_table(difc, tzero, difa): """ create the params table from the output from the calibration @param difc :: the list of difc values to add to the table @param tzero :: the list of tzero values to add to the table @param difa :: the list of difa values to add to the table """ param_table = simple.CreateEmptyTableWorkspace( OutputWorkspace="engg_calibration_banks_parameters") # setup table param_table.addColumn("int", "bankid") param_table.addColumn("double", "difc") param_table.addColumn("double", "difa") param_table.addColumn("double", "tzero") # add data to table for i in range(len(difc)): next_row = { "bankid": i, "difc": difc[i], "difa": difa[i], "tzero": tzero[i] } param_table.addRow(next_row)
def _create_fit_results_table(self, rows): table = mantid.CreateEmptyTableWorkspace(OutputWorkspace=self.FIT_RESULTS_TABLE_NAME) for col in self.FIT_PARAMS: table.addColumn("double", col) for row in rows: table.addRow(row) return table
def _build_output_lattice_table(self, lattice_params): table_name = self.getPropertyValue(self.PROP_OUT_LATTICE_PARAMS) table = mantid.CreateEmptyTableWorkspace(OutputWorkspace=table_name, StoreInADS=False) for param in self.LATTICE_TABLE_PARAMS: table.addColumn("double", param.split("_")[-1]) table.addRow([float(lattice_params[param]) for param in self.LATTICE_TABLE_PARAMS]) return table
def _create_lattice_params_table(self, output_name=LATTICE_PARAMS_TABLE_NAME): lattice_params = mantid.CreateEmptyTableWorkspace( OutputWorkspace=output_name) [ lattice_params.addColumn("double", param) for param in self.LATTICE_PARAMS ] lattice_params.addRow([random.random() for _ in self.LATTICE_PARAMS]) return lattice_params
def _createOutputWorkspaces(self): """ """ self._myPixelInfoTableWS = api.CreateEmptyTableWorkspace( OutputWorkspace=self.getPropertyValue('OutputWorkspace')) self._myPixelInfoTableWS.addColumn("int", "DetectorID") self._myPixelInfoTableWS.addColumn("double", "X") self._myPixelInfoTableWS.addColumn("double", "Y") self._myPixelInfoTableWS.addColumn("double", "Z") self._myPixelInfoTableWS.addColumn("int", "OriginalDetID") self._myScanPtFileTableWS = api.CreateEmptyTableWorkspace( OutputWorkspace=self.getPropertyValue('DetectorTableWorkspace')) self._myScanPtFileTableWS.addColumn("int", "Scan") self._myScanPtFileTableWS.addColumn("int", "Pt") self._myScanPtFileTableWS.addColumn("str", "Filename") self._myScanPtFileTableWS.addColumn("int", "StartDetID") return
def apply_vanadium_corrections(parent, ws, indices, vanadium_ws, van_integration_ws, van_curves_ws, progress_range=None): """ DEPRECATED: not used in UI, only in deprecated functions (EnggCalibrateFull, EnggVanadiumCorrections and EnggFocus) Apply the EnggVanadiumCorrections algorithm on the workspace given, by using the algorithm EnggVanadiumCorrections @param parent :: parent (Mantid) algorithm that wants to run this @param ws :: workspace to correct (modified in place) @param indices :: workspace indices that are being processed (those not included will be ignored) @param vanadium_ws :: workspace with data from a Vanadium run @param van_integration_ws :: alternatively to vanWS, pre-calculated integration from Vanadium data @param van_curves_ws :: alternatively to vanWS, pre-calculated bank curves from Vanadium data @param progress_range :: pair for (startProgress, endProgress) with respect to the parent algorithm """ if vanadium_ws and vanadium_ws.getNumberHistograms() < len(indices): raise ValueError( "Inconsistency in inputs: the Vanadium workspace has less spectra (%d) than " "the number of workspace indices to process (%d)" % (vanadium_ws.getNumberHistograms(), len(indices))) elif van_integration_ws and van_curves_ws: # filter only indices from vanIntegWS (crop the table) tbl = mantid.CreateEmptyTableWorkspace( OutputWorkspace="__vanadium_integration_ws") tbl.addColumn('double', 'Spectra Integration') for i in indices: tbl.addRow([van_integration_ws.cell(i, 0)]) van_integration_ws = tbl # These corrections rely on ToF<->Dspacing conversions, so they're done after the calibration step progress_params = dict() if progress_range: progress_params["startProgress"] = progress_range[0] progress_params["endProgress"] = progress_range[1] alg = parent.createChildAlgorithm('EnggVanadiumCorrections', **progress_params) if ws: alg.setProperty('Workspace', ws) if vanadium_ws: alg.setProperty('VanadiumWorkspace', vanadium_ws) if van_integration_ws: alg.setProperty('IntegrationWorkspace', van_integration_ws) if van_curves_ws: alg.setProperty('CurvesWorkspace', van_curves_ws) alg.execute()
def generateOutputParTable(name, difc, zero): """ Produces a table workspace with the two fitted calibration parameters @param name :: the name to use for the table workspace that is created here @param difc :: difc calibration parameter @param zero :: zero calibration parameter """ tbl = sapi.CreateEmptyTableWorkspace(OutputWorkspace=name) tbl.addColumn('double', 'difc') tbl.addColumn('double', 'zero') tbl.addRow([float(difc), float(zero)])
def _generateTableWS(self, vancorrdict): """ Create table workspace """ tablews = api.CreateEmptyTableWorkspace( OutputWorkspace="tempcorrtable") tablews.addColumn('int', 'DetID') tablews.addColumn('double', 'Correction') for detid in sorted(vancorrdict.keys()): tablews.addRow([detid, vancorrdict[detid]]) return tablews
def create_indexed_workspace(self, fractional_peaks, ndim, hklm): """Create a TableWorkepace that contains indexed peak data. This produces a TableWorkepace that looks like a PeaksWorkspace but with the additional index columns included. In future releases support for indexing should be added to the PeaksWorkspace data type itself. :param fractional_peaks: the peaks workspace containing peaks with fractional HKL values. :param ndim: the number of additional indexing columns to add. :param hklm: the new higher dimensional miller indicies to add. :returns: a table workspace with the indexed peak data """ # Create table with the number of columns we need types = [ 'int', 'long64', 'double', 'double', 'double', 'double', 'double', 'double', 'double', 'double', 'double', 'float', 'str', 'float', 'float', 'V3D', 'V3D' ] name = self.getPropertyValue("OutputWorkspace") indexed = api.CreateEmptyTableWorkspace(name) names = fractional_peaks.getColumnNames() # Insert the extra columns for the addtional indicies for i in range(ndim - 3): names.insert(5 + i, 'm{}'.format(i + 1)) types.insert(5 + i, 'double') names = np.array(names) types = np.array(types) # Create columns in the table workspace for name, column_type in zip(names, types): indexed.addColumn(column_type, name) # Copy all columns from original workspace, ignoring HKLs column_data = [] idx = np.arange(0, names.size) hkl_mask = (idx < 2) | (idx > 4 + (ndim - 3)) for name in names[hkl_mask]: column_data.append(fractional_peaks.column(name)) # Insert the addtional HKL columns into the data for i, col in enumerate(hklm.T.tolist()): column_data.insert(i + 2, col) # Insert the columns into the table workspace for i in range(fractional_peaks.rowCount()): row = [column_data[j][i] for j in range(indexed.columnCount())] indexed.addRow(row) return indexed
def _generate_props_table(self): """ Creates a table workspace with values calculated in algorithm. """ props_table = ms.CreateEmptyTableWorkspace(OutputWorkspace=self._props_output_workspace) props_table.addColumn('int', 'NegativeXMinIndex') props_table.addColumn('int', 'PositiveXMinIndex') props_table.addColumn('int', 'PositiveXMaxIndex') props_table.addRow([int(self._negative_min_index), int(self._positive_min_index), int(self._positive_max_index)]) self.setProperty('OutputPropertiesTable', self._props_output_workspace)
def generate_output_param_table(name, difa, difc, tzero): """ Produces a table workspace with the two fitted calibration parameters @param name :: the name to use for the table workspace that is created here @param difa :: DIFA calibration parameter (GSAS parameter) @param difc :: DIFC calibration parameter @param tzero :: TZERO calibration parameter """ tbl = mantid.CreateEmptyTableWorkspace(OutputWorkspace=name) tbl.addColumn('double', 'DIFA') tbl.addColumn('double', 'DIFZ') tbl.addColumn('double', 'TZERO') tbl.addRow([float(difa), float(difc), float(tzero)])
def transposeFitParametersTable(params_table, output_table=None): """ Transpose the parameter table created from a multi domain Fit. This function will make the output consistent with PlotPeakByLogValue. @param params_table - the parameter table output from Fit. @param output_table - name to call the transposed table. If omitted, the output_table will be the same as the params_table """ params_table = s_api.mtd[params_table] table_ws = '__tmp_table_ws' table_ws = s_api.CreateEmptyTableWorkspace(OutputWorkspace=table_ws) param_names = params_table.column(0)[:-1] # -1 to remove cost function param_values = params_table.column(1)[:-1] param_errors = params_table.column(2)[:-1] # Find the number of parameters per function func_index = param_names[0].split('.')[0] num_params = 0 for i, name in enumerate(param_names): if name.split('.')[0] != func_index: num_params = i break # Create columns with parameter names for headers column_names = [ '.'.join(name.split('.')[1:]) for name in param_names[:num_params] ] column_error_names = [name + '_Err' for name in column_names] column_names = list(zip(column_names, column_error_names)) table_ws.addColumn('double', 'axis-1') for name, error_name in column_names: table_ws.addColumn('double', name) table_ws.addColumn('double', error_name) # Output parameter values to table row for i in range(0, params_table.rowCount() - 1, num_params): row_values = param_values[i:i + num_params] row_errors = param_errors[i:i + num_params] row = [value for pair in zip(row_values, row_errors) for value in pair] row = [i / num_params] + row table_ws.addRow(row) if output_table is None: output_table = params_table.name() s_api.RenameWorkspace(table_ws.name(), OutputWorkspace=output_table)
def generate_output_param_table(name, difa, difc, tzero): """ DEPRECATED: not used in UI, only in deprecated functions (EnggCalibrate, EnggFitTOFFromPeaks) Produces a table workspace with the two fitted calibration parameters @param name :: the name to use for the table workspace that is created here @param difa :: DIFA calibration parameter (GSAS parameter) @param difc :: DIFC calibration parameter @param tzero :: TZERO calibration parameter """ tbl = mantid.CreateEmptyTableWorkspace(OutputWorkspace=name) tbl.addColumn('double', 'DIFA') tbl.addColumn('double', 'DIFZ') tbl.addColumn('double', 'TZERO') tbl.addRow([float(difa), float(difc), float(tzero)])
def write_diff_consts_to_table_from_prm(prm_filepath): """ read diff consntants from prm file and write in table workspace :param prm_filepath: path to prm file """ diff_consts = read_diff_constants_from_prm(prm_filepath) # make table table = mantid.CreateEmptyTableWorkspace( OutputWorkspace=DIFF_CONSTS_TABLE_NAME) table.addColumn("int", "Index") table.addColumn("double", "DIFA") table.addColumn("double", "DIFC") table.addColumn("double", "TZERO") # add to row per spectrum to table for ispec in range(len(diff_consts)): table.addRow([ispec, *diff_consts[ispec, :]])
def _initialize_adjustments_table(self, table_name): r"""Create a table with appropriate column names for saving the adjustments to each component""" table = api.CreateEmptyTableWorkspace(OutputWorkspace=table_name) item_types = [ 'str', # component name 'double', 'double', 'double', # cartesian coordinates 'double', 'double', 'double', # direction cosines of axis of rotation 'double' ] # angle of rotation for column_name, column_type in zip(self.adjustment_items, item_types): table.addColumn(name=column_name, type=column_type) return table
def _initialize_displacements_table(self, table_name): r"""Create a table with appropriate column names for saving the relative displacements to each component""" table = api.CreateEmptyTableWorkspace(OutputWorkspace=table_name) item_types = [ 'str', # component name 'double', # change in the distance between the component and the sample 'double', 'double', 'double', # relative displacement in cartesian coordinates 'double', 'double', 'double' ] # relative displacement in Euler angles for column_name, column_type in zip(self.displacement_items, item_types): table.addColumn(name=column_name, type=column_type) return table
def PyExec(self): in_ws = mtd[self.getPropertyValue('InputWorkspace')] indices_list = self.getPropertyValue('ColumnIndices') out_ws_name = self.getPropertyValue('OutputWorkspace') column_names = in_ws.getColumnNames() # If column indices are not provided, then default to _ALL_ columns if len(indices_list) > 0: indices_list = [int(x) for x in indices_list.split(',')] else: indices_list = range(len(column_names)) out_ws = ms.CreateEmptyTableWorkspace(OutputWorkspace=out_ws_name) out_ws.addColumn('str', 'Statistic') stats = collections.OrderedDict([ ('StandardDev', collections.OrderedDict()), ('Minimum', collections.OrderedDict()), ('Median', collections.OrderedDict()), ('Maximum', collections.OrderedDict()), ('Mean', collections.OrderedDict()), ]) for index in indices_list: column_name = column_names[index] try: column_data = np.array([float(v) for v in in_ws.column(index)]) col_stats = _stats_to_dict(Stats.getStatistics(column_data)) for stat_name in stats: stats[stat_name][column_name] = col_stats[stat_name] out_ws.addColumn('float', column_name) except RuntimeError: logger.notice('Column \'%s\' is not numerical, skipping' % column_name) except: logger.notice('Column \'%s\' is not numerical, skipping' % column_name) for index, stat_name in iteritems(stats): stat = collections.OrderedDict(stat_name) stat['Statistic'] = index out_ws.addRow(stat) self.setProperty('OutputWorkspace', out_ws)
def correctMisalignedTubes(ws, calibrationTable, peaksTable, spec, idealTube, fitPar, threshold=10): """ Correct misaligned tubes due to poor fitting results during the first round of calibration. Misaligned tubes are first identified according to a tolerance applied to the absolute difference between the fitted tube positions and the mean across all tubes. The FindPeaks algorithm is then used to find a better fit with the ideal tube positions as starting parameters for the peak centers. From the refitted peaks the positions of the detectors in the tube are recalculated. @param ws: the workspace to get the tube geometry from @param calibrationTable: the calibration table output from running calibration @param peaksTable: the table containing the fitted peak centers from calibration @param spec: the tube spec for the instrument @param idealTube: the ideal tube for the instrument @param fitPar: the fitting parameters for calibration @param threshold: tolerance defining is a peak is outside of the acceptable range @return table of corrected detector positions """ table_name = calibrationTable.name() + 'Corrected' corrections_table = mantid.CreateEmptyTableWorkspace(OutputWorkspace=table_name) corrections_table.addColumn('int', "Detector ID") corrections_table.addColumn('V3D', "Detector Position") mean_peaks, bad_tubes = findBadPeakFits(peaksTable, threshold) for index in bad_tubes: print("Refitting tube %s" % spec.getTubeName(index)) tube_dets, _ = spec.getTube(index) getPoints(ws, idealTube.getFunctionalForms(), fitPar, tube_dets) tube_ws = mantid.mtd['TubePlot'] fit_ws = mantid.FindPeaks(InputWorkspace=tube_ws, WorkspaceIndex=0, PeakPositions=fitPar.getPeaks(), PeaksList='RefittedPeaks') centers = [row['centre'] for row in fit_ws] detIDList, detPosList = getCalibratedPixelPositions(ws, centers, idealTube.getArray(), tube_dets) for id, pos in zip(detIDList, detPosList): corrections_table.addRow({'Detector ID': id, 'Detector Position': kernel.V3D(*pos)}) return corrections_table
def _create_vulcan_binning_table(binning_table_name, binning_workspace_low_res, binning_workspace_high_res): """ create a binning table for binning data into various resolution :param binning_table_name: :param binning_workspace_low_res: :param binning_workspace_high_res: :return: """ # create a TableWorkspace api.CreateEmptyTableWorkspace(OutputWorkspace=binning_table_name) bin_table_ws = AnalysisDataService.retrieve(binning_table_name) bin_table_ws.addColumn('str', 'WorkspaceIndexes') bin_table_ws.addColumn('str', 'BinningParameters') # add a row for simple case bin_table_ws.addRow(['0, 1', '{0}: {1}'.format(binning_workspace_low_res, 0)]) bin_table_ws.addRow(['2', '{0}: {1}'.format(binning_workspace_high_res, 0)]) return bin_table_ws
def _create_dummy_fit_parameters_no_hydrogen(): params = ms.CreateEmptyTableWorkspace(OutputWorkspace='__VesuvioCorrections_test_fit_params') params.addColumn('str', 'Name') params.addColumn('float', 'Value') params.addColumn('float', 'Error') params.addRow(['f0.Mass', 16.0, 0.0]) params.addRow(['f0.Width', 10.0, 0.0]) params.addRow(['f0.Intensity', 4.03064, 0.41762]) params.addRow(['f1.Mass', 27.0, 0.0]) params.addRow(['f1.Width', 13.0, 0.0]) params.addRow(['f1.Intensity', 3.23823, 0.447593]) params.addRow(['f2.Mass', 133.0, 0.0]) params.addRow(['f2.Width', 30.0, 0.0]) params.addRow(['f2.Intensity', 0.882613, 0.218913]) params.addRow(['Cost function value', 3.19573, 0.0]) return params
def applyVanadiumCorrections(parent, ws, indices, vanWS, vanIntegWS, vanCurvesWS): """ Apply the EnggVanadiumCorrections algorithm on the workspace given, by using the algorithm EnggVanadiumCorrections @param parent :: parent (Mantid) algorithm that wants to run this @param ws :: workspace to correct (modified in place) @param indices :: workspace indices that are being processed (those not included will be ignored) @param vanWS :: workspace with data from a Vanadium run @param vanIntegWS :: alternatively to vanWS, pre-calculated integration from Vanadium data @param vanIntegWS :: alternatively to vanWS, pre-calculated bank curves from Vanadium data """ if vanWS and vanWS.getNumberHistograms() < len(indices): raise ValueError( "Inconsistency in inputs: the Vanadium workspace has less spectra (%d) than " "the number of workspace indices to process (%d)" % (vanWS.getNumberHistograms(), len(indices))) elif vanIntegWS and vanCurvesWS: # filter only indices from vanIntegWS (crop the table) tbl = sapi.CreateEmptyTableWorkspace( OutputWorkspace="__vanadium_integration_ws") tbl.addColumn('double', 'Spectra Integration') for i in indices: tbl.addRow([vanIntegWS.cell(i, 0)]) vanIntegWS = tbl # These corrections rely on ToF<->Dspacing conversions, so they're done after the calibration step # sapi.EnggVanadiumCorrections(Workspace=ws, VanadiumWorkspace=vanWS, # IntegrationWorkspace=vanIntegWS, # CurvesWorkspace=vanCurvesWS) alg = parent.createChildAlgorithm('EnggVanadiumCorrections') if ws: alg.setProperty('Workspace', ws) if vanWS: alg.setProperty('VanadiumWorkspace', vanWS) if vanIntegWS: alg.setProperty('IntegrationWorkspace', vanIntegWS) if vanCurvesWS: alg.setProperty('CurvesWorkspace', vanCurvesWS) alg.execute()
def _create_bond_table(self, bonds): """ Creates a bond table from the bond data obtained when the castep file is read @param bonds :: The bond data read from the castep file """ if bonds is None or len(bonds) == 0: raise RuntimeError('No bonds found in CASTEP file') bond_table = s_api.CreateEmptyTableWorkspace( OutputWorkspace=self._out_ws_name) bond_table.addColumn('str', 'SpeciesA') bond_table.addColumn('int', 'NumberA') bond_table.addColumn('str', 'SpeciesB') bond_table.addColumn('int', 'NumberB') bond_table.addColumn('float', 'Length') bond_table.addColumn('float', 'Population') for bond in bonds: bond_table.addRow([ bond['atom_a'][0], bond['atom_a'][1], bond['atom_b'][0], bond['atom_b'][1], bond['length'], bond['population'] ])
def _create_simple_binning_table(binning_table_name): """ create a binning table :return: """ # TODO FIXME : more flexible! """ tof0 = 10000. delta = 0.001 num_pts = 200 """ # create a TableWorkspace api.CreateEmptyTableWorkspace(OutputWorkspace=binning_table_name) bin_table_ws = AnalysisDataService.retrieve(binning_table_name) bin_table_ws.addColumn('str', 'WorkspaceIndexes') bin_table_ws.addColumn('str', 'BinningParameters') # add a row for simple case bin_table_ws.addRow(['0', '10000, -0.002, 13000']) return bin_table_ws