def test_algorithm_uses_right_fit_window(self): x_val = [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22] y_val = [0, 0, 0, 0, 1, 7, 0, 0, 0, 0, 10, 7] ws = CreateWorkspace(x_val * 2, y_val + [0] * len(y_val), NSpec=2) table = CreateEmptyTableWorkspace() table.addColumn("float", "Centre") table.addRow([20]) with mock.patch( 'plugins.algorithms.WorkflowAlgorithms.FitGaussianPeaks.FitGaussianPeaks.' 'estimate_single_parameters') as mock_estimate_params: mock_estimate_params.return_value = None FitGaussianPeaks(InputWorkspace=ws, PeakGuessTable=table, EstimateFitWindow=False, FitWindowSize=11) centre_index = 10 # win_size is ( FitWindowSize -1)/2 as method estimate_single_parameters expects in this form win_size = 5 arguements = mock_estimate_params.call_args_list[0][0] self.assertSequenceEqual(list(arguements[0]), x_val) self.assertSequenceEqual(list(arguements[1]), y_val) self.assertEqual(arguements[2], centre_index) self.assertEqual(arguements[3], win_size) self.assertEqual(len(arguements), 4)
def test_algorithm_estimates_fit_window(self): x_val = [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22] y_val = [0, 0, 0, 0, 1, 7, 0, 0, 0, 0, 10, 7] ws = CreateWorkspace(x_val * 2, y_val + [0] * len(y_val), NSpec=2) table = CreateEmptyTableWorkspace() table.addColumn("float", "Centre") table.addRow([20]) with mock.patch( 'plugins.algorithms.WorkflowAlgorithms.FitGaussianPeaks.FitGaussianPeaks.' 'estimate_single_parameters') as mock_estimate_params: mock_estimate_params.return_value = None FitGaussianPeaks(InputWorkspace=ws, PeakGuessTable=table, EstimateFitWindow=True, FitWindowSize=11) centre_index = 10 """ win_size in this case is calculated from EstimatePeakSigma and is estimated to be 2 and FitWindowSize is ignored """ win_size = 2 arguements = mock_estimate_params.call_args_list[0][0] self.assertSequenceEqual(list(arguements[0]), x_val) self.assertSequenceEqual(list(arguements[1]), y_val) self.assertEqual(arguements[2], centre_index) self.assertEqual(arguements[3], win_size) self.assertEqual(len(arguements), 4)
def trim_calibration_table(input_workspace: InputTable, output_workspace: Optional[str] = None) -> TableWorkspace: r""" Discard trim the X and Z pixel coordinates, since we are only interested in the calibrated Y-coordinate :param input_workspace: :param output_workspace: :return: handle to the trimmed table workspace """ if output_workspace is None: output_workspace = str(input_workspace) # overwrite the input table # Extract detector ID's and Y-coordinates from the input table table = mtd[str(input_workspace)] detector_ids = table.column(0) y_coordinates = [v.Y() for v in table.column(1)] # create the (empty) trimmed table table_trimmed = CreateEmptyTableWorkspace(OutputWorkspace=output_workspace) table_trimmed.addColumn(type='int', name='Detector ID') table_trimmed.addColumn(type='double', name='Detector Y Coordinate') # fill the rows of the trimmed table for detector_id, y_coordinate in zip(detector_ids, y_coordinates): table_trimmed.addRow([detector_id, y_coordinate]) return table_trimmed
def _create_indexed_workspace(self, fractional_peaks, ndim, hklm): # Create table with the number of columns we need indexed = CreateEmptyTableWorkspace() names = fractional_peaks.getColumnNames() types = fractional_peaks.columnTypes() # Insert the extra columns for the addtional indicies for i in range(ndim - 3): names.insert(5 + i, 'm{}'.format(i + 1)) types.insert(5 + i, 'double') names = np.array(names) types = np.array(types) # Create columns in the table workspace for name, column_type in zip(names, types): indexed.addColumn(column_type, name) # Copy all columns from original workspace, ignoring HKLs column_data = [] idx = np.arange(0, names.size) hkl_mask = (idx < 5) | (idx > 4 + (ndim - 3)) for name in names[hkl_mask]: column_data.append(fractional_peaks.column(name)) # Insert the addtional HKL columns into the data for i, col in enumerate(hklm.T.tolist()): column_data.insert(i + 2, col) # Insert the columns into the table workspace for i in range(fractional_peaks.rowCount()): row = [column_data[j][i] for j in range(indexed.columnCount())] indexed.addRow(row) return indexed
def generate_peak_guess_table(self, xvals, peakids): peak_table = CreateEmptyTableWorkspace(StoreInADS=False) peak_table.addColumn(type='float', name='centre') for peak_idx in sorted(peakids): peak_table.addRow([xvals[peak_idx]]) return peak_table
def PyExec(self): self.workspace = self.getProperty("InputWorkspace").value outws_name = self.getPropertyValue("OutputWorkspace") # create table and columns outws = CreateEmptyTableWorkspace(OutputWorkspace=outws_name) columns = ["PeakCentre", "PeakCentreError", "Sigma", "SigmaError", "Height", "HeightError", "chiSq"] nextrow = dict.fromkeys(["WorkspaceIndex"] + columns + ["FitStatus"]) outws.addColumn(type="int", name="WorkspaceIndex", plottype=1) # x for col in columns: outws.addColumn(type="double", name=col) outws.addColumn(type="str", name="FitStatus") nb_hist = self.workspace.getNumberHistograms() for idx in range(nb_hist): nextrow["WorkspaceIndex"] = idx result = self.do_fit_gaussian(idx) if not result: for col in columns: nextrow[col] = 0 nextrow["FitStatus"] = "failed" else: nextrow["FitStatus"] = result[0] nextrow["chiSq"] = result[1] ptable = result[3] for num in range(ptable.rowCount() - 1): row = ptable.row(num) name = row["Name"] nextrow[name] = row["Value"] nextrow[name+"Error"] = row["Error"] DeleteWorkspace(result.OutputParameters) DeleteWorkspace(result.OutputNormalisedCovarianceMatrix) outws.addRow(nextrow) self.setProperty("OutputWorkspace", outws) return
def readCalibrationFile(table_name, in_path): """Read a calibration table from file This loads a calibration TableWorkspace from a CSV file. Example of usage: .. code-block:: python saveCalibration('CalibTable','/tmp/myCalibTable.txt') :param table_name: name to call the TableWorkspace :param in_path: the path to the calibration file """ DET = 'Detector ID' POS = 'Detector Position' re_float = re.compile(r"[+-]? *(?:\d+(?:\.\d*)?|\.\d+)(?:[eE][+-]?\d+)?") calibTable = CreateEmptyTableWorkspace(OutputWorkspace=table_name) calibTable.addColumn(type='int', name=DET) calibTable.addColumn(type='V3D', name=POS) with open(in_path, 'r') as file_p: for line in file_p: values = re.findall(re_float, line) if len(values) != 4: continue nextRow = { DET: int(values[0]), POS: V3D(float(values[1]), float(values[2]), float(values[3])) } calibTable.addRow(nextRow)
def test_sample_tof(self): # creates table workspace with mock elastic peak positions and widths: table_ws = CreateEmptyTableWorkspace() table_ws.addColumn("float", "PeakCentre") table_ws.addColumn("float", "Sigma") for row in range(132): table_ws.addRow([1645.2, 15.0]) sampleProperties = { 'SampleMass': 2.93, 'FormulaUnitMass': 50.94, 'EPWidth': 15 } yig_calibration_file = "D7_YIG_calibration_TOF.xml" PolDiffILLReduction(Run='395639', ProcessAs='Sample', OutputWorkspace='sample_tof', SampleAndEnvironmentProperties=sampleProperties, SampleGeometry='None', OutputTreatment='Individual', InstrumentCalibration=yig_calibration_file, ElasticChannelsWorkspace='table_ws', MeasurementTechnique='TOF') self._check_output(mtd['sample_tof'], 339, 132, 2, 'Energy transfer', 'DeltaE', 'Spectrum', 'Label') self._check_process_flag(mtd['sample_tof'], 'Sample')
def setUp(self): # Creating two peaks on an exponential background with gaussian noise self.x_values = np.linspace(0, 100, 1001) self.centre = [25, 75] self.height = [35, 20] self.width = [10, 5] self.y_values = self.gaussian(self.x_values, self.centre[0], self.height[0], self.width[0]) self.y_values += self.gaussian(self.x_values, self.centre[1], self.height[1], self.width[1]) self.background = 10 * np.ones(len(self.x_values)) # Generating a table with a guess of the position of the centre of the peaks peak_table = CreateEmptyTableWorkspace() peak_table.addColumn(type='float', name='Approximated Centre') peak_table.addRow([self.centre[0] + 2]) peak_table.addRow([self.centre[1] - 3]) # Generating a workspace with the data and a flat background data_ws = CreateWorkspace( DataX=np.concatenate((self.x_values, self.x_values)), DataY=np.concatenate((self.y_values, self.background)), DataE=np.sqrt(np.concatenate((self.y_values, self.background))), NSpec=2) self.data_ws = data_ws self.peak_guess_table = peak_table self.alg_instance = _FitGaussianPeaks.FitGaussianPeaks() self.alg_instance.initialize()
def _createFakePeakPositionTable(self, peakPos): """Create a peak position TableWorkspace with a single column for peakPos.""" tableName = self._names.withSuffix('peak_position_table') table = CreateEmptyTableWorkspace(OutputWorkspace=tableName, EnableLogging=self._subalgLogging) table.addColumn('double', 'PeakCentre') table.addRow((peakPos, )) return table
def _createFakePeakPositionTable(self, peakPos): """Create a peak position TableWorkspace with a single column for peakPos.""" tableName = self._names.withSuffix('peak_position_table') table = CreateEmptyTableWorkspace(OutputWorkspace=tableName, EnableLogging=self._subalgLogging) table.addColumn('double', 'PeakCentre') table.addRow((peakPos,)) return table
def test_gui_updated_when_row_added_from_sequence(self): ws = CreateEmptyTableWorkspace() ws.addColumn("double", "l") presenter = TableWorkspaceDisplay(ws) current_rows = presenter.view.rowCount() ws.addRow([1.0]) self.assertEqual(current_rows + 1, presenter.view.rowCount())
def test_gui_updated_when_row_added_from_dictionary(self): ws = CreateEmptyTableWorkspace() ws.addColumn("double", "test_col") presenter = TableWorkspaceDisplay(ws) current_rows = presenter.view.rowCount() ws.addRow({'test_col': 1.0}) self.assertEqual(current_rows + 1, presenter.view.rowCount())
def create_output_table(centre1, centre2): Centre_position = CreateEmptyTableWorkspace() Centre_position.addColumn(type="double", name="X Centre Position") Centre_position.addColumn(type="double", name="Y Centre Position") Centre_position.addRow({ "X Centre Position": centre1, "Y Centre Position": centre2 })
def test_gui_updated_when_row_added_from_dictionary_batch(self): ws = CreateEmptyTableWorkspace() ws.addColumn("double", "test_col") presenter = TableWorkspaceDisplay(ws, batch=True) current_rows = presenter.view.rowCount() ws.addRow({'test_col': 1.0}) self.assertEqual(current_rows + 1, presenter.view.model().max_rows()) presenter.close(ws.name())
def test_gui_updated_when_row_added_from_sequence_standard(self): ws = CreateEmptyTableWorkspace() ws.addColumn("double", "l") presenter = TableWorkspaceDisplay(ws, batch=False) current_rows = presenter.view.rowCount() ws.addRow([1.0]) self.assertEqual(current_rows + 1, presenter.view.model().rowCount()) presenter.close(ws.name())
def correctMisalignedTubes(ws, calibrationTable, peaksTable, spec, idealTube, fitPar, threshold=10): """ Correct misaligned tubes due to poor fitting results during the first round of calibration. Misaligned tubes are first identified according to a tolerance applied to the absolute difference between the fitted tube positions and the mean across all tubes. The FindPeaks algorithm is then used to find a better fit with the ideal tube positions as starting parameters for the peak centers. From the refitted peaks the positions of the detectors in the tube are recalculated. @param ws: the workspace to get the tube geometry from @param calibrationTable: the calibration table ouput from running calibration @param peaksTable: the table containing the fitted peak centers from calibration @param spec: the tube spec for the instrument @param idealTube: the ideal tube for the instrument @param fitPar: the fitting parameters for calibration @param threshold: tolerance defining is a peak is outside of the acceptable range @return table of corrected detector positions """ table_name = calibrationTable.name() + 'Corrected' corrections_table = CreateEmptyTableWorkspace(OutputWorkspace=table_name) corrections_table.addColumn('int', "Detector ID") corrections_table.addColumn('V3D', "Detector Position") mean_peaks, bad_tubes = findBadPeakFits(peaksTable, threshold) for index in bad_tubes: print("Refitting tube %s" % spec.getTubeName(index)) tube_dets, _ = spec.getTube(index) getPoints(ws, idealTube.getFunctionalForms(), fitPar, tube_dets) tube_ws = mtd['TubePlot'] fit_ws = FindPeaks(InputWorkspace=tube_ws, WorkspaceIndex=0, PeakPositions=fitPar.getPeaks(), PeaksList='RefittedPeaks') centers = [row['centre'] for row in fit_ws] detIDList, detPosList = \ getCalibratedPixelPositions(ws, centers, idealTube.getArray(), tube_dets) for id, pos in zip(detIDList, detPosList): corrections_table.addRow({'Detector ID': id, 'Detector Position': V3D(*pos)}) cleanUpFit() return corrections_table
def test_gui_updated_when_row_added_from_dictionary_standard(self): ws = CreateEmptyTableWorkspace() ws.addColumn("double", "test_col") presenter = TableWorkspaceDisplay(ws, batch=False) presenter.model.block_model_replace = False current_rows = presenter.view.rowCount() ws.addRow({'test_col': 1.0}) self.assertEqual(current_rows + 1, presenter.view.model().rowCount()) presenter.close(ws.name())
def generateCropingTable(qmin, qmax): mask_info = CreateEmptyTableWorkspace() mask_info.addColumn("str", "SpectraList") mask_info.addColumn("double", "XMin") mask_info.addColumn("double", "XMax") for (i, value) in enumerate(qmin): mask_info.addRow([str(i), 0.0, value]) for (i, value) in enumerate(qmax): mask_info.addRow([str(i), value, 100.0]) return mask_info
def test_ion_table(self): ws = DensityOfStates(File=self._file_name, SpectrumType='IonTable') # Build the expected output expected = CreateEmptyTableWorkspace() expected.addColumn('str', 'Ion') expected.addColumn('int', 'Count') expected.addRow(['H', 4]) expected.addRow(['C', 8]) expected.addRow(['O', 8]) self.assertEquals(CheckWorkspacesMatch(ws, expected), 'Success!')
def test_ion_table(self): ws = SimulatedDensityOfStates(File=self._file_name, SpectrumType='IonTable') # Build the expected output expected = CreateEmptyTableWorkspace() expected.addColumn('str', 'Ion') expected.addColumn('int', 'Count') expected.addRow(['H', 4]) expected.addRow(['C', 8]) expected.addRow(['O', 8]) self.assertEquals(CheckWorkspacesMatch(ws, expected), 'Success!')
def parameters_optimized_table(table_name, values=None, errors=None): table = CreateEmptyTableWorkspace(OutputWorkspace=table_name) for column_type, column_name in [('str', 'Name'), ('float', 'Value'), ('float', 'Error')]: table.addColumn(type=column_type, name=column_name) if values is not None and errors is not None: assert len(values) == 4 and len( errors) == 4 # A0, A1, A2, 'Cost function value' for index, row_name in enumerate( ['A0', 'A1', 'A2', 'Cost function value']): table.addRow([row_name, values[index], errors[index]]) return table
def _generate_props_table(self): """ Creates a table workspace with values calculated in algorithm. """ props_table = CreateEmptyTableWorkspace(OutputWorkspace=self._props_output_workspace) props_table.addColumn('int', 'NegativeXMinIndex') props_table.addColumn('int', 'PositiveXMinIndex') props_table.addColumn('int', 'PositiveXMaxIndex') props_table.addRow([int(self._negative_min_index), int(self._positive_min_index), int(self._positive_max_index)]) self.setProperty('OutputPropertiesTable', self._props_output_workspace)
def test_algorithm_does_not_throw_an_error_when_no_valid_peaks_fitted( self): x_val = [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22] y_val = [0, 0, 0, 0, 1, 7, 0, 0, 0, 0, 10, 7] ws = CreateWorkspace(x_val * 2, y_val + [0] * len(y_val), NSpec=2) table = CreateEmptyTableWorkspace() table.addColumn("float", "Centre") table.addRow([20]) FitGaussianPeaks(InputWorkspace=ws, PeakGuessTable=table) self.assertEqual(mtd["peak_table"].rowCount(), 0) self.assertEqual(mtd["refit_peak_table"].rowCount(), 0)
def test_find_good_peaks_calls_fit_gaussian_peaks_twice_if_no_peaks_given(self): with mock.patch('plugins.algorithms.WorkflowAlgorithms.FindPeaksAutomatic.FitGaussianPeaks' ) as mock_fit: tmp_table = CreateEmptyTableWorkspace() tmp_table.addColumn(type='float', name='chi2') tmp_table.addColumn(type='float', name='poisson') tmp_table.addRow([10, 20]) mock_fit.return_value = (mock.MagicMock(), mock.MagicMock(), tmp_table) self.alg_instance.min_sigma = 1 self.alg_instance.max_sigma = 10 self.alg_instance.find_good_peaks(self.x_values, [], 0.1, 5, False, self.data_ws, 5) self.assertEqual(2, mock_fit.call_count)
def PyExec(self): d_spacing = self.getProperty("DSpacing").value temp = self.getProperty("T").value p_calc = calculate_pressure(d_spacing, temp) self.log().notice("The calculated pressure is " + str(p_calc) + " GPa") ws = CreateEmptyTableWorkspace() ws.addColumn(type='double', name="Input dSpacing-111 (A)") ws.addColumn(type='double', name="Temperature (K)") ws.addColumn(type='double', name="Calculated Pressure (GPa)") p_target = self.getProperty("TargetPressure").value use_input_target = True if p_target == 0.0: use_input_target = False p_target = p_calc test_dspacing = np.arange(2, 2.95, 0.0001) pressure = calculate_pressure(test_dspacing, temp) diff = abs(pressure - p_target) index = np.argmin(diff) diff = diff[index] if diff < TOL: found_d = test_dspacing[index] else: diff, found_d = 0, 0 if found_d != 0: if use_input_target: self.log().notice("Temperature: " + str(temp) + " K") self.log().notice("Target pressure: " + str(round(p_target, 6)) + " GPa") self.log().notice("Pressure difference: " + str(round(diff, 6)) + " GPa") self.log().notice("d(111): " + str(round(found_d, 6)) + " A") else: self.log().notice("Temperature: " + str(temp) + " K") self.log().notice("Target pressure (calculated): " + str(round(p_target, 6)) + " GPa") self.log().notice("Pressure difference: " + str(round(diff, 6)) + " GPa") self.log().notice("d(111) : " + str(round(found_d, 6)) + " A") ws.addColumn(type='double', name="Pressure Target (GPa)") ws.addColumn(type='double', name="Pressure difference (GPa)") ws.addColumn(type='double', name="dSpacing found (A)") row = {"Input dSpacing-111 (A)": d_spacing, "Temperature (K)": temp, "Calculated Pressure (GPa)": p_calc, "Pressure Target (GPa)": p_target, "Pressure difference (GPa)": diff, "dSpacing found (A)": found_d} else: self.log().notice("dSpacing corresponding to the Target Pressure and Temperature given not found in range" " 2-2.95. Please try different parameters.") row = {"Input dSpacing-111 (A)": d_spacing, "Temperature (K)": temp, "Calculated Pressure (GPa)": p_calc} ws.addRow(row) self.setProperty("OutputWorkspace", ws) DeleteWorkspace(ws)
def mask_bank(bank_name: str, tubes_fit_success: np.ndarray, output_table: str) -> Optional[TableWorkspace]: r""" Creates a single-column `TableWorkspace` object containing the detector ID's of the unsuccessfully fitted tubes If all tubes were fit successfully, no `TableWorkspace` is created, and `None` is returned. :param bank_name: a string of the form 'bankI' where 'I' is a bank number :param tubes_fit_success: array of boolean containing a True/False entry for each tube, indicating wether the tube was successfully calibrated. :param output_table: name of the output TableWorkspace containing one column for detector ID from tubes not successfully calibrated. :raise AssertionError: the string bank_name does not follow the pattern 'bankI' where 'I' in an integer :return: name of the mask TableWorkspace. Returns `None` if no TableWorkspace is created. """ assert re.match(r'^bank\d+$', bank_name), 'The bank name must be of the form "bankI" where "I" in an integer' if False not in tubes_fit_success: return None # al tubes were fit successfully bank_number = bank_name[4:] # drop 'bank' from bank_name tube_numbers = 1 + np.where(tubes_fit_success == False)[0] # noqa E712 unsuccessfully fitted tube numbers tube_numbers = ','.join([str(n) for n in tube_numbers]) # failing tubes as a string detector_ids = MaskBTP(Instrument='CORELLI', Bank=bank_number, Tube=tube_numbers) table = CreateEmptyTableWorkspace(OutputWorkspace=output_table) table.addColumn('long64', 'Detector ID') [table.addRow([detector_id]) for detector_id in detector_ids.tolist()] if AnalysisDataService.doesExist('CORELLIMaskBTP'): DeleteWorkspaces(['CORELLIMaskBTP']) return mtd[output_table]
def peak_pixels_table(table_name, peak_count, tube_names=None, pixel_positions=None): table = CreateEmptyTableWorkspace(OutputWorkspace=table_name) table.addColumn(type='str', name='TubeId') for i in range(peak_count): table.addColumn(type='float', name='Peak%d' % (i + 1)) if tube_names is not None and pixel_positions is not None: assert len(tube_names) == len( pixel_positions ), 'tube_names and pixel_positions have different length' for tube_index in range(len(tube_names)): # tube_names is a list of str values; pixel_positions is a list of lists of float values table.addRow([tube_names[tube_index]] + pixel_positions[tube_index]) return table
def test_update_match_table_when_table_has_less_than_3_rows(self): correct_table_entries = ["9999", "Detector 3", "Al , C"] # Create source tables likelyhood_table = CreateEmptyTableWorkspace(OutputWorkspace="likelyhood") likelyhood_table.addColumn("str", "Element") likelyhood_table.addColumn("int", "Likelihood") likelyhood_table.addRow(["Al", 20]) likelyhood_table.addRow(["C", 18]) # Run function self.model.update_match_table("likelyhood", "9999; Detector 3") # Assert statements self.assertEqual(self.model.table_entries.get(), correct_table_entries) # Delete tables from ADS self.delete_if_present("likelyhood")
def update_calibration_params_table(params_table): if len(params_table) == 0: return # Create blank, or clear rows from existing, params table. if Ads.doesExist(CALIB_PARAMS_WORKSPACE_NAME): workspace = Ads.retrieve(CALIB_PARAMS_WORKSPACE_NAME) workspace.setRowCount(0) else: workspace = CreateEmptyTableWorkspace(OutputWorkspace=CALIB_PARAMS_WORKSPACE_NAME) workspace.addColumn("int", "bankid") workspace.addColumn("double", "difc") workspace.addColumn("double", "difa") workspace.addColumn("double", "tzero") for row in params_table: workspace.addRow(row)
def test_trim_calibration_table(self): # create a table with detector id and detector XYZ positions table = CreateEmptyTableWorkspace(OutputWorkspace='CalibTable') table.addColumn(type='int', name='Detector ID') table.addColumn(type='V3D', name='Detector Position') table.addRow([0, V3D(0, 1, 2)]) # add two detectors with ID's 0 and 1 table.addRow([1, V3D(3, 4, 5)]) y_values = [1, 4] # call trim_calibration_table and save to new table table_calibrated = trim_calibration_table( table, output_workspace='table_calibrated') # assert the Y-coordinate hasn't changed assert_allclose(table_calibrated.column(1), y_values, atol=0.0001) # call trim_calibration_table and overwrite the table table_calibrated = trim_calibration_table(table) assert table_calibrated.name( ) == 'CalibTable' # table workspace has been overwritten with the calibrated one assert_allclose(table_calibrated.column(1), y_values, atol=0.0001)
def simulate_fit_parameter_output(values, cost): table = CreateEmptyTableWorkspace() table.addColumn(type='str', name='Name') table.addColumn(type='float', name='Value') table.addColumn(type='float', name='Error') for i, (val, err) in enumerate(values): name = '' if i % 3 == 0: name = 'centre' elif i % 3 == 1: name = 'height' elif i % 3 == 2: name = 'sigma' table.addRow([name, val, err]) return table
def _fixed_source_set_and_table(self, table_name): r"""Create a table with appropriate column names for saving the location of the source""" # collect info on the source input_workspace = self.getPropertyValue( 'InputWorkspace') # name of the input workspace source = mtd[self.getPropertyValue( 'InputWorkspace')].getInstrument().getSource() source_name, source_full_name = source.getName(), source.getFullName() # Update the position of the source z_position = -abs(self.getProperty('SourceToSampleDistance').value) MoveInstrumentComponent(input_workspace, source_full_name, X=0.0, Y=0.0, Z=z_position, RelativePosition=False) # Initialize the table of adjustments for the source table = CreateEmptyTableWorkspace(OutputWorkspace=table_name) item_types = [ 'str', 'double', 'double', 'double', 'double', 'double', 'double', 'double' ] item_names = [ 'ComponentName', 'Xposition', 'Yposition', 'Zposition', 'XdirectionCosine', 'YdirectionCosine', 'ZdirectionCosine', 'RotationAngle' ] for column_name, column_type in zip(item_names, item_types): table.addColumn(name=column_name, type=column_type) # Add the appropriate row in the table for the source table.addRow([ source_name, 0.0, 0.0, z_position, # new position for the source 0.0, 0.0, 0.0, # no rotation axis 0.0 ]) # no rotation angle return table
def get_he3_log(path): """ Load the ³He log data into Mantid Table named "helium_log" Parameters ---------- path A string with the path to the ³He as a tsv file """ hetemp = load_helium_file(path) my_table = CreateEmptyTableWorkspace() my_table.addColumn("int", "Number") my_table.addColumn("str", "Cell") my_table.addColumn("float", "scale") my_table.addColumn("str", "Start time") my_table.addColumn("float", "fid") my_table.addColumn("float", "Time Constant") for run in hetemp: my_table.addRow([run.run, run.cell, run.scale, run.dt.isoformat(), run.fid, run.t1]) RenameWorkspace(my_table, "helium_log")
def _create_indexed_workspace(self, fractional_peaks, ndim, hklm): # Create table with the number of columns we need types = ['int', 'long64', 'double', 'double', 'double', 'double', 'double', 'double', 'double', 'double', 'double', 'float', 'str', 'float', 'float', 'V3D', 'V3D'] indexed = CreateEmptyTableWorkspace() names = fractional_peaks.getColumnNames() # Insert the extra columns for the addtional indicies for i in range(ndim - 3): names.insert(5 + i, 'm{}'.format(i + 1)) types.insert(5 + i, 'double') names = np.array(names) types = np.array(types) # Create columns in the table workspace for name, column_type in zip(names, types): indexed.addColumn(column_type, name) # Copy all columns from original workspace, ignoring HKLs column_data = [] idx = np.arange(0, names.size) hkl_mask = (idx < 5) | (idx > 4 + (ndim - 3)) for name in names[hkl_mask]: column_data.append(fractional_peaks.column(name)) # Insert the addtional HKL columns into the data for i, col in enumerate(hklm.T.tolist()): column_data.insert(i + 2, col) # Insert the columns into the table workspace for i in range(fractional_peaks.rowCount()): row = [column_data[j][i] for j in range(indexed.columnCount())] indexed.addRow(row) return indexed
def PyExec(self): """ Alg execution. """ instrument = self.getProperty(INSTRUMENT_PROP).value run_number = self.getProperty(RUN_NUM_PROP).value fit_deadtime = self.getProperty(FIT_DEADTIME_PROP).value fix_phases = self.getProperty(FIX_PHASES_PROP).value default_level = self.getProperty(DEFAULT_LEVEL).value sigma_looseness = self.getProperty(SIGMA_LOOSENESS_PROP).value groupings_file = self.getProperty(GROUPINGS_PROP).value in_phases_file = self.getProperty(PHASES_PROP).value in_deadtimes_file = self.getProperty(DEADTIMES_PROP).value out_phases_file = self.getProperty(PHASES_RESULT_PROP).value out_deadtimes_file = self.getProperty(DEADTIMES_RESULT_PROP).value isis = config.getFacility('ISIS') padding = isis.instrument(instrument).zeroPadding(0) run_name = instrument + str(run_number).zfill(padding) try: run_number = int(run_number) except: raise RuntimeError("'%s' is not an integer run number." % run_number) try: run_file_path = FileFinder.findRuns(run_name)[0] except: raise RuntimeError("Unable to find file for run %i" % run_number) if groupings_file == "": groupings_file = DEFAULT_GROUPINGS_FILENAME % instrument # Load data and other info from input files. def temp_hidden_ws_name(): """Generate a unique name for a temporary, hidden workspace.""" selection = string.ascii_lowercase + string.ascii_uppercase + string.digits return '__temp_MaxEnt_' + ''.join(random.choice(selection) for _ in range(20)) input_data_ws_name = temp_hidden_ws_name() LoadMuonNexus(Filename=run_file_path, OutputWorkspace=input_data_ws_name) input_data_ws = mtd[input_data_ws_name] if isinstance(input_data_ws, WorkspaceGroup): Logger.get("MaxEnt").warning("Multi-period data is not currently supported. Just using first period.") input_data_ws = input_data_ws[0] groupings_ws_name = temp_hidden_ws_name() LoadDetectorsGroupingFile(InputFile=groupings_file, OutputWorkspace=groupings_ws_name) groupings_ws = mtd[groupings_ws_name] def yield_floats_from_file(path): """Given a path to a file with a float on each line, will return the floats one at a time. Throws otherwise. Strips whitespace and ignores empty lines.""" with open(path, 'r') as f: for i, line in enumerate(line.strip() for line in f): if line == "": continue try: yield float(line) except: raise RuntimeError("Parsing error in '%s': Line %d: '%s'." % (path, i, line)) input_phases = np.array(list(yield_floats_from_file(in_phases_file))) input_phases_size = len(input_phases) input_deadtimes = np.array(list(yield_floats_from_file(in_deadtimes_file))) input_deadtimes_size = len(input_deadtimes) n_bins = input_data_ws.blocksize() n_detectors = input_data_ws.getNumberHistograms() def time_value_to_time_channel_index(value): """Given a time value, will return the index of the time channel in which the value falls.""" bin_width = input_data_ws.readX(0)[1] - input_data_ws.readX(0)[0] diff = value - input_data_ws.readX(0)[0] return int(diff / bin_width) # Mantid corrects for time zero on loading, so we want to find the actual channels # where 0.0 occurs, and where we have values of 0.1 onwards. time_zero_channel = time_value_to_time_channel_index(0.0) first_good_channel = time_value_to_time_channel_index(0.1) input_data = np.concatenate([input_data_ws.readY(i) for i in range(n_detectors)]) groupings = [groupings_ws.readY(row)[0] for row in range(groupings_ws.getNumberHistograms())] groupings = map(int, groupings) n_groups = len(set(groupings)) # Cleanup. input_data_ws.delete() groupings_ws.delete() # We're faced with the problem of providing more than a dozen parameters to # the Fortran, which can be a bit messy (especially on the Fortran side of # things where we need to make "Cf2py" declarations). A cleaner way of # doing this is to simply pass in a few callbacks -- one for each input # type -- and have the Fortran provide the name of the variable it wants # to the callback. The callback will then look up the corresponding value # and feed it back to the Fortran. # # We also have a callback for printing to the results log. self.int_vars = { "RunNo" : run_number, "frames" : FRAMES, "res" : RES, "Tzeroch" : time_zero_channel, "firstgoodch" : first_good_channel, "ptstofit" : POINTS_TO_FIT, "histolen" : n_bins, "nhisto" : n_detectors, "n_groups" : n_groups, } self.float_vars = { "deflevel" : default_level, "sigloose" : sigma_looseness, } self.bool_vars = { "fixphase" : fix_phases, "fitdt" : fit_deadtime, } self._assert_map_values_are_of_expected_type() def lookup(par_name, par_map, default): """The basis of the callbacks passed to the Fortran. Given a parameter name it will consult the appropriate variable map, and return the corresponding value of the parameter. Else return a default and log a warning if a parameter with the name does not exist.""" par_name = par_name.strip() if par_name in par_map: return par_map[par_name] msg = """WARNING: tried to find a value for parameter with name %s but could not find one. Default of \"%s\" provided.""" % (par_name, default) Logger.get("MaxEnt").warning(msg) return default def log(priority, message): """Log the given message with given priority.""" try: logger = getattr(Logger.get("MaxEnt"), priority.lower()) except AttributeError: # If we don't recognise the priority, use warning() as a default. logger = getattr(Logger.get("MaxEnt"), "warning") logger(message) return True # The Fortran expects arrays to be of a certain size, so any arrays that # aren't big enough need to be padded. input_phases = self._pad_to_length_with_zeros(input_phases, MAX_HISTOS) input_deadtimes = self._pad_to_length_with_zeros(input_deadtimes, MAX_HISTOS) input_data = self._pad_to_length_with_zeros(input_data, MAX_INPUT_DATA_SIZE) groupings = self._pad_to_length_with_zeros(groupings, MAX_HISTOS) # TODO: Return the contents of "NNNNN.max", instead of writing to file. f_out, fchan_out, output_deadtimes, output_phases, chi_sq = maxent.mantid_maxent( # Input data and other info: input_data, groupings, input_deadtimes, input_phases, # Variable-lookup callbacks: lambda par_name: lookup(par_name, self.int_vars, 0), lambda par_name: lookup(par_name, self.float_vars, 0.0), lambda par_name: lookup(par_name, self.bool_vars, False), # Callback for logging: log ) def write_items_to_file(path, items): """Given a path to a file and a list of items, will write the items to the file, one on each line.""" with open(path, 'w') as f: for item in items: f.write(str(item) + "\n") # Chop the padded outputs back down to the correct size. output_phases = output_phases[:input_phases_size] output_deadtimes = output_deadtimes[:input_deadtimes_size] input_phases = input_phases[:input_phases_size] input_deadtimes = input_deadtimes[:input_deadtimes_size] fchan_out = fchan_out[:n_bins] f_out = f_out[:n_bins] write_items_to_file(out_phases_file, output_phases) write_items_to_file(out_deadtimes_file, output_deadtimes) log_output = "\nDead times in:\n" + str(input_deadtimes) + "\n" +\ "\nDead times out:\n" + str(output_deadtimes) + "\n" +\ "\nPhases in:\n" + str(input_phases) + "\n" +\ "\nPhases out:\n" + str(output_phases) + "\n" + \ "\nGroupings:\n" + str(groupings) + "\n" +\ "\nChi Squared:\n" + str(chi_sq) + "\n" +\ "\nInput variables:\n" for type_map in self.int_vars, self.float_vars, self.bool_vars: for name, value in type_map.items(): log_output += str(name) + " = " + str(value) + "\n" Logger.get("MaxEnt").notice(log_output) # Generate our own output ws name if the user has not provided one. out_ws_name = self.getPropertyValue(OUT_WS_PROP) if out_ws_name == "": out_ws_name = run_name + "; MaxEnt" self.setPropertyValue(OUT_WS_PROP, out_ws_name) out_ws = CreateWorkspace(OutputWorkspace=out_ws_name, DataX=fchan_out[:n_bins], DataY=f_out[:n_bins]) self.setProperty(OUT_WS_PROP, out_ws) # MaxEnt inputs table. input_table_name = run_name + "; MaxEnt Input" input_table = CreateEmptyTableWorkspace(OutputWorkspace = input_table_name) input_table.addColumn("str", "Name") input_table.addColumn("str", "Value") inputs = itertools.chain(self.int_vars.items(), self.float_vars.items(), self.bool_vars.items()) for name, value in inputs: input_table.addRow([str(name), str(value)]) # Deadtimes and phases input/output table. dead_phases_table_name = run_name + "; MaxEnt Deadtimes & Phases" dead_phases_table = CreateEmptyTableWorkspace(OutputWorkspace = dead_phases_table_name) for column_name in "Deadtimes In", "Deadtimes Out", "Phases In", "Phases Out": dead_phases_table.addColumn("double", column_name) for row in zip(input_deadtimes, output_deadtimes, input_phases, output_phases): dead_phases_table.addRow(list(map(float, row))) # Chi-squared output table. chisq_table_name = run_name + "; MaxEnt Chi^2" chisq_table = CreateEmptyTableWorkspace(OutputWorkspace = chisq_table_name) chisq_table.addColumn("int", "Cycle") for iteration in range(10): chisq_table.addColumn("double", "Iter " + str(iteration + 1)) for cycle, data in enumerate(chi_sq): chisq_table.addRow([cycle + 1] + list(map(float,data))) all_output_ws = [input_table_name, dead_phases_table_name, chisq_table_name, out_ws_name] # The output workspaces of this algorithm belong in the same groups # that are created by the muon interface. If the appropriate group # doesn't exist already then it needs to be created. if not run_name in mtd: GroupWorkspaces(InputWorkspaces = all_output_ws, OutputWorkspace = run_name) else: group = mtd[run_name] for output_ws in all_output_ws: if not group.contains(output_ws): group.add(output_ws) out_ws.getAxis(0).getUnit().setLabel("Field", "G") out_ws.setYUnitLabel("P(B)") if INSIDE_MANTIDPLOT: mantidplot.plotSpectrum(out_ws, 0)
def testAlignComponentsPositionXY(self): CreateSampleWorkspace(OutputWorkspace='testWS', NumBanks=1,BankPixelWidth=4) component='bank1' MoveInstrumentComponent(Workspace='testWS',ComponentName=component,X=0.06,Y=0.04,Z=4.98,RelativePosition=False) ### Detector should move to [0.05,0.03,4.98] ### Calibration table generated with: # CreateSampleWorkspace(OutputWorkspace='sample', NumBanks=1,BankPixelWidth=4) # MoveInstrumentComponent(Workspace='sample',ComponentName='bank1',X=0.05,Y=0.03,Z=4.98,RelativePosition=False) # CalculateDIFC(InputWorkspace='sample', OutputWorkspace='sample') # d=mtd['sample'].extractY() # for i in range(len(d)): # print "calTable.addRow(["+str(i+16)+", "+str(d[i][0])+"])" calTable = CreateEmptyTableWorkspace() calTable.addColumn("int", "detid") calTable.addColumn("double", "difc") calTable.addRow([16, 44.3352831346]) calTable.addRow([17, 47.7503426493]) calTable.addRow([18, 51.6581064544]) calTable.addRow([19, 55.9553976608]) calTable.addRow([20, 49.6495672525]) calTable.addRow([21, 52.7214213944]) calTable.addRow([22, 56.285004349]) calTable.addRow([23, 60.2530897937]) calTable.addRow([24, 55.1227558338]) calTable.addRow([25, 57.9048914599]) calTable.addRow([26, 61.1671229038]) calTable.addRow([27, 64.8369848035]) calTable.addRow([28, 60.7118272387]) calTable.addRow([29, 63.2484968666]) calTable.addRow([30, 66.2480051141]) calTable.addRow([31, 69.650545037]) ws = mtd["testWS"] startPos = ws.getInstrument().getComponentByName(component).getPos() startRot = ws.getInstrument().getComponentByName(component).getRotation().getEulerAngles() AlignComponents(CalibrationTable="calTable", Workspace="testWS", ComponentList=component, Xposition=True, Yposition=True) ws = mtd["testWS"] endPos = ws.getInstrument().getComponentByName(component).getPos() endRot = ws.getInstrument().getComponentByName(component).getRotation().getEulerAngles() self.assertAlmostEqual(endPos.getX(), 0.05) self.assertAlmostEqual(endPos.getY(), 0.03) self.assertEqual(startPos.getZ(), endPos.getZ()) self.assertEqual(startRot[0], endRot[0]) self.assertEqual(startRot[1], endRot[1]) self.assertEqual(startRot[2], endRot[2])
def testAlignComponentsRotationY(self): CreateSampleWorkspace(OutputWorkspace='testWS', NumBanks=1,BankPixelWidth=4) component='bank1' MoveInstrumentComponent(Workspace='testWS',ComponentName=component,X=2.00,Y=0,Z=2.00,RelativePosition=False) RotateInstrumentComponent(Workspace='testWS',ComponentName='bank1',X=0,Y=1,Z=0,Angle=50,RelativeRotation=False) ### Detector should rotate to +45deg around Y ### Calibration table generated with: # CreateSampleWorkspace(OutputWorkspace='sample2', NumBanks=1,BankPixelWidth=4) # MoveInstrumentComponent(Workspace='sample2',ComponentName='bank1',X=2.0,Y=0.0,Z=2.0,RelativePosition=False) # RotateInstrumentComponent(Workspace='sample2',ComponentName='bank1',X=0,Y=1,Z=0,Angle=45,RelativeRotation=False) # CalculateDIFC(InputWorkspace='sample2', OutputWorkspace='sample2') # d=mtd['sample2'].extractY() # for i in range(len(d)): # print "calTable.addRow(["+str(i+16)+", "+str(d[i][0])+"])" calTable = CreateEmptyTableWorkspace() calTable.addColumn("int", "detid") calTable.addColumn("double", "difc") calTable.addRow([16, 2481.89300158]) calTable.addRow([17, 2481.90717397]) calTable.addRow([18, 2481.94969]) calTable.addRow([19, 2482.02054626]) calTable.addRow([20, 2490.36640334]) calTable.addRow([21, 2490.38050851]) calTable.addRow([22, 2490.42282292]) calTable.addRow([23, 2490.49334316]) calTable.addRow([24, 2498.83911141]) calTable.addRow([25, 2498.85314962]) calTable.addRow([26, 2498.89526313]) calTable.addRow([27, 2498.96544859]) calTable.addRow([28, 2507.31101837]) calTable.addRow([29, 2507.32498986]) calTable.addRow([30, 2507.36690322]) calTable.addRow([31, 2507.43675513]) ws = mtd["testWS"] startPos = ws.getInstrument().getComponentByName(component).getPos() startRot = ws.getInstrument().getComponentByName(component).getRotation().getEulerAngles("YZX") #YZX AlignComponents(CalibrationTable="calTable", Workspace="testWS", ComponentList=component, AlphaRotation=True) ws = mtd["testWS"] endPos = ws.getInstrument().getComponentByName(component).getPos() endRot = ws.getInstrument().getComponentByName(component).getRotation().getEulerAngles("YZX") #YZX self.assertEqual(startPos, endPos) self.assertAlmostEqual(endRot[0],45.0,places=0) self.assertEqual(startRot[1], endRot[1]) self.assertEqual(startRot[2], endRot[2])
def create_output_table(centre1, centre2): Centre_position = CreateEmptyTableWorkspace() Centre_position.addColumn(type="double", name="X Centre Position") Centre_position.addColumn(type="double", name="Y Centre Position") Centre_position.addRow({"X Centre Position": centre1, "Y Centre Position": centre2})
class CorrectTOFTest(unittest.TestCase): def setUp(self): # create sample workspace self.xmin = 2123.33867005 + 4005.75 self.xmax = 2123.33867005 + 7995.75 self._input_ws = CreateSampleWorkspace(Function="User Defined", UserDefinedFunction="name=LinearBackground, \ A0=0.3;name=Gaussian, PeakCentre=8190, Height=5, Sigma=75", NumBanks=2, BankPixelWidth=1, XMin=self.xmin, XMax=self.xmax, BinWidth=10.5, BankDistanceFromSample=4.0, SourceDistanceFromSample=1.4, OutputWorkspace="ws") lognames = "wavelength,TOF1" logvalues = "6.0,2123.33867005" AddSampleLogMultiple(self._input_ws, lognames, logvalues) # create EPP table self._table = CreateEmptyTableWorkspace(OutputWorkspace="epptable") self._table.addColumn(type="double", name="PeakCentre") table_row = {'PeakCentre': 8189.5} for i in range(2): self._table.addRow(table_row) def tearDown(self): for wsname in ['ws', 'epptable']: if AnalysisDataService.doesExist(wsname): run_algorithm("DeleteWorkspace", Workspace=wsname) def testCorrection(self): # tests that correction is done properly OutputWorkspaceName = "outputws1" alg_test = run_algorithm("CorrectTOF", InputWorkspace=self._input_ws, EPPTable=self._table, OutputWorkspace=OutputWorkspaceName) self.assertTrue(alg_test.isExecuted()) wsoutput = AnalysisDataService.retrieve(OutputWorkspaceName) velocity = h/(m_n*6.0e-10) t_el = 4.0e+6/velocity t_corr = np.arange(self.xmin, self.xmax + 1.0, 10.5) + t_el - (8189.5 - 2123.33867005) self.assertTrue(np.allclose(t_corr, wsoutput.readX(0))) #sdd = 4 self.assertTrue(np.allclose(t_corr + t_el, wsoutput.readX(1))) #sdd = 8 run_algorithm("DeleteWorkspace", Workspace=wsoutput) def testGroup(self): # tests whether the group of workspaces is accepted as an input ws2 = CloneWorkspace(self._input_ws) group = GroupWorkspaces([self._input_ws, ws2]) OutputWorkspaceName = "output_wsgroup" alg_test = run_algorithm("CorrectTOF", InputWorkspace='group', EPPTable=self._table, OutputWorkspace=OutputWorkspaceName) self.assertTrue(alg_test.isExecuted()) wsoutput = AnalysisDataService.retrieve(OutputWorkspaceName) self.assertTrue(isinstance(wsoutput, WorkspaceGroup)) self.assertEqual(2, wsoutput.getNumberOfEntries()) run_algorithm("DeleteWorkspace", Workspace=group) run_algorithm("DeleteWorkspace", Workspace=wsoutput) def testConvertUnits(self): # test whether CorrectTof+ConvertUnits+ConvertToDistribution will give the same result as TOFTOFConvertTOFToDeltaE OutputWorkspaceName = "outputws1" alg_test = run_algorithm("CorrectTOF", InputWorkspace=self._input_ws, EPPTable=self._table, OutputWorkspace=OutputWorkspaceName) self.assertTrue(alg_test.isExecuted()) wscorr = AnalysisDataService.retrieve(OutputWorkspaceName) # convert units, convert to distribution alg_cu = run_algorithm("ConvertUnits", InputWorkspace=wscorr, Target='DeltaE', EMode='Direct', EFixed=2.27, OutputWorkspace=OutputWorkspaceName+'_dE') ws_dE = AnalysisDataService.retrieve(OutputWorkspaceName+'_dE') alg_cd = run_algorithm("ConvertToDistribution", Workspace=ws_dE) # create reference data for X axis tof1 = 2123.33867005 dataX = self._input_ws.readX(0) - tof1 tel = 8189.5 - tof1 factor = m_n*1e+15/eV newX = 0.5*factor*16.0*(1/tel**2 - 1/dataX**2) # compare # self.assertEqual(newX[0], ws_dE.readX(0)[0]) self.assertTrue(np.allclose(newX, ws_dE.readX(0), atol=0.01)) # create reference data for Y axis and compare to the output tof = dataX[:-1] + 5.25 newY = self._input_ws.readY(0)*tof**3/(factor*10.5*16.0) # compare self.assertTrue(np.allclose(newY, ws_dE.readY(0), rtol=0.01)) run_algorithm("DeleteWorkspace", Workspace=ws_dE) run_algorithm("DeleteWorkspace", Workspace=wscorr)
def get_log(runs): """ Uses the run journal to identify which run numbers are associated with which samples and create a table for each sample, containing all the information needed to analyse each run. Parameters ---------- runs A list of integer run numbers """ log_file = JPATH + "\\" + get_relevant_log(min(runs)) results = [] with open(log_file, "r") as infile: journal = xml.etree.cElementTree.iterparse(infile) for _, child in journal: if "NXentry" in child.tag: num = get_xml_run_number(child) if num in runs: for param in child: if "title" in param.tag: sample = param.text elif "start_time" in param.tag: start = datetime.datetime.strptime( param.text, "%Y-%m-%dT%H:%M:%S") elif "end_time" in param.tag: stop = datetime.datetime.strptime( param.text, "%Y-%m-%dT%H:%M:%S") elif "duration" in param.tag: duration = datetime.timedelta( seconds=int(param.text)) elif "proton_charge" in param.tag: proton_charge = float(param.text) results.append( QuickData(num, sample, start, stop, duration, proton_charge)) child.clear() if num > max(runs): break trans = [run for run in results if re.match(RUN_IDENTIFIERS["trans"], run[1])] csans = [run for run in results if re.match(RUN_IDENTIFIERS["can_sans"], run[1])] ctrans = [run for run in results if re.match(RUN_IDENTIFIERS["can_trans"], run[1])] dtrans = [run for run in results if re.match(RUN_IDENTIFIERS["direct_trans"], run[1])] temp = [convert_run(run, trans, csans, ctrans, dtrans) for run in results if (re.match(RUN_IDENTIFIERS["run"], run.sample) or re.match(RUN_IDENTIFIERS["can_sans"], run.sample) or re.match(RUN_IDENTIFIERS["direct_sans"], run.sample)) and run.charge/run.duration.seconds > 0.005] d = {} for run in temp: if run.sample in d.keys(): d[run.sample].append(run) else: d[run.sample] = [run] for k, v in d.items(): my_table = CreateEmptyTableWorkspace() my_table.addColumn("int", "Run Number") my_table.addColumn("str", "Sample") my_table.addColumn("str", "Start time") my_table.addColumn("str", "End time") my_table.addColumn("int", "Trans run") my_table.addColumn("int", "Can Sans run") my_table.addColumn("int", "Can Trans run") my_table.addColumn("int", "Direct Trans run") for run in v: my_table.addRow( [run.number, run.sample, run.start.isoformat(), run.end.isoformat(), run.trans, run.csans, run.ctrans, run.direct]) RenameWorkspace(my_table, k+"_runs")