Пример #1
0
    def _get_output_peak(self, args, ideal_tube):
        delete_peak_table_after = False
        if self.OUTPUTPEAK in args:
            output_peak = args[self.OUTPUTPEAK]
        else:
            output_peak = False

        if isinstance(output_peak, ITableWorkspace):
            if output_peak.columnCount() < len(ideal_tube.getArray()):
                raise RuntimeError(
                    "Wrong argument {0}. "
                    "It expects a boolean flag, or a ITableWorksapce with columns (TubeId, Peak1,...,"
                    "PeakM) for M = number of peaks given in knownPositions".format(self.OUTPUTPEAK))
            return output_peak, delete_peak_table_after

        else:
            if not output_peak:
                delete_peak_table_after = True

            # create the output peak table
            output_peak = CreateEmptyTableWorkspace(OutputWorkspace="PeakTable")
            output_peak.addColumn(type='str', name='TubeId')
            for i in range(len(ideal_tube.getArray())):
                output_peak.addColumn(type='float', name='Peak%d' % (i + 1))
            return output_peak, delete_peak_table_after
Пример #2
0
def mask_bank(bank_name: str, tubes_fit_success: np.ndarray, output_table: str) -> Optional[TableWorkspace]:
    r"""
    Creates a single-column `TableWorkspace` object containing the detector ID's of the
    unsuccessfully fitted tubes

    If all tubes were fit successfully, no `TableWorkspace` is created, and `None` is returned.

    :param bank_name: a string of the form 'bankI' where 'I' is a bank number
    :param tubes_fit_success: array of boolean containing a True/False entry for each tube, indicating wether
    the tube was successfully calibrated.
    :param output_table: name of the output TableWorkspace containing one column for detector ID from tubes
    not successfully calibrated.

    :raise AssertionError: the string bank_name does not follow the pattern 'bankI' where 'I' in an integer
    :return: name of the mask TableWorkspace. Returns `None` if no TableWorkspace is created.
    """
    assert re.match(r'^bank\d+$', bank_name), 'The bank name must be of the form "bankI" where "I" in an integer'
    if False not in tubes_fit_success:
        return None  # al tubes were fit successfully
    bank_number = bank_name[4:]  # drop 'bank' from bank_name
    tube_numbers = 1 + np.where(tubes_fit_success == False)[0]  # noqa E712 unsuccessfully fitted tube numbers
    tube_numbers = ','.join([str(n) for n in tube_numbers])  # failing tubes as a string
    detector_ids = MaskBTP(Instrument='CORELLI', Bank=bank_number, Tube=tube_numbers)
    table = CreateEmptyTableWorkspace(OutputWorkspace=output_table)
    table.addColumn('long64', 'Detector ID')
    [table.addRow([detector_id]) for detector_id in detector_ids.tolist()]
    if AnalysisDataService.doesExist('CORELLIMaskBTP'):
        DeleteWorkspaces(['CORELLIMaskBTP'])
    return mtd[output_table]
Пример #3
0
 def _get_calib_table(self, args):
     if self.CALIBTABLE in args:
         calib_table = args[self.CALIBTABLE]
         # ensure the correct type is passed
         # if a string was passed, transform it in mantid object
         if isinstance(calib_table, str):
             calib_table = mtd[calib_table]
         # check that calibTable has the expected form
         try:
             if not isinstance(calib_table, ITableWorkspace):
                 raise 1
             if calib_table.columnCount() != 2:
                 raise 2
             colNames = calib_table.getColumnNames()
             if colNames[0] != 'Detector ID' or colNames[1] != 'Detector Position':
                 raise 3
         except:
             raise RuntimeError(
                 "Invalid type for {0}."
                 "The expected type was ITableWorkspace with 2 columns(Detector ID and Detector Positions)".
                 format(self.CALIBTABLE))
         else:
             return calib_table
     else:
         calib_table = CreateEmptyTableWorkspace(OutputWorkspace="CalibTable")
         # "Detector ID" column required by ApplyCalibration
         calib_table.addColumn(type="int", name="Detector ID")
         # "Detector Position" column required by ApplyCalibration
         calib_table.addColumn(type="V3D", name="Detector Position")
         return calib_table
Пример #4
0
def readCalibrationFile(table_name, in_path):
    """Read a calibration table from file

    This loads a calibration TableWorkspace from a CSV file.

    Example of usage:

    .. code-block:: python

       saveCalibration('CalibTable','/tmp/myCalibTable.txt')

    :param table_name: name to call the TableWorkspace
    :param in_path: the path to the calibration file

    """
    DET = 'Detector ID'
    POS = 'Detector Position'
    re_float = re.compile(r"[+-]? *(?:\d+(?:\.\d*)?|\.\d+)(?:[eE][+-]?\d+)?")
    calibTable = CreateEmptyTableWorkspace(OutputWorkspace=table_name)
    calibTable.addColumn(type='int', name=DET)
    calibTable.addColumn(type='V3D', name=POS)

    with open(in_path, 'r') as file_p:
        for line in file_p:
            values = re.findall(re_float, line)
            if len(values) != 4:
                continue

            nextRow = {
                DET: int(values[0]),
                POS: V3D(float(values[1]), float(values[2]), float(values[3]))
            }

            calibTable.addRow(nextRow)
Пример #5
0
    def generate_peak_guess_table(self, xvals, peakids):
        peak_table = CreateEmptyTableWorkspace(StoreInADS=False)
        peak_table.addColumn(type='float', name='centre')
        for peak_idx in sorted(peakids):
            peak_table.addRow([xvals[peak_idx]])

        return peak_table
Пример #6
0
    def test_sample_tof(self):
        # creates table workspace with mock elastic peak positions and widths:
        table_ws = CreateEmptyTableWorkspace()
        table_ws.addColumn("float", "PeakCentre")
        table_ws.addColumn("float", "Sigma")
        for row in range(132):
            table_ws.addRow([1645.2, 15.0])

        sampleProperties = {
            'SampleMass': 2.93,
            'FormulaUnitMass': 50.94,
            'EPWidth': 15
        }
        yig_calibration_file = "D7_YIG_calibration_TOF.xml"
        PolDiffILLReduction(Run='395639',
                            ProcessAs='Sample',
                            OutputWorkspace='sample_tof',
                            SampleAndEnvironmentProperties=sampleProperties,
                            SampleGeometry='None',
                            OutputTreatment='Individual',
                            InstrumentCalibration=yig_calibration_file,
                            ElasticChannelsWorkspace='table_ws',
                            MeasurementTechnique='TOF')
        self._check_output(mtd['sample_tof'], 339, 132, 2, 'Energy transfer',
                           'DeltaE', 'Spectrum', 'Label')
        self._check_process_flag(mtd['sample_tof'], 'Sample')
Пример #7
0
    def _create_indexed_workspace(self, fractional_peaks, ndim, hklm):
        # Create table with the number of columns we need
        indexed = CreateEmptyTableWorkspace()
        names = fractional_peaks.getColumnNames()
        types = fractional_peaks.columnTypes()

        # Insert the extra columns for the addtional indicies
        for i in range(ndim - 3):
            names.insert(5 + i, 'm{}'.format(i + 1))
            types.insert(5 + i, 'double')

        names = np.array(names)
        types = np.array(types)

        # Create columns in the table workspace
        for name, column_type in zip(names, types):
            indexed.addColumn(column_type, name)

        # Copy all columns from original workspace, ignoring HKLs
        column_data = []
        idx = np.arange(0, names.size)
        hkl_mask = (idx < 5) | (idx > 4 + (ndim - 3))
        for name in names[hkl_mask]:
            column_data.append(fractional_peaks.column(name))

        # Insert the addtional HKL columns into the data
        for i, col in enumerate(hklm.T.tolist()):
            column_data.insert(i + 2, col)

        # Insert the columns into the table workspace
        for i in range(fractional_peaks.rowCount()):
            row = [column_data[j][i] for j in range(indexed.columnCount())]
            indexed.addRow(row)

        return indexed
Пример #8
0
 def _get_calib_table(self, args):
     if self.CALIBTABLE in args:
         calib_table = args[self.CALIBTABLE]
         # ensure the correct type is passed
         # if a string was passed, transform it in mantid object
         if isinstance(calib_table, str):
             calib_table = mtd[calib_table]
         # check that calibTable has the expected form
         try:
             if not isinstance(calib_table, ITableWorkspace):
                 raise 1
             if calib_table.columnCount() != 2:
                 raise 2
             colNames = calib_table.getColumnNames()
             if colNames[0] != 'Detector ID' or colNames[
                     1] != 'Detector Position':
                 raise 3
         except:
             raise RuntimeError(
                 "Invalid type for {0}."
                 "The expected type was ITableWorkspace with 2 columns(Detector ID and Detector Positions)"
                 .format(self.CALIBTABLE))
         else:
             return calib_table
     else:
         calib_table = CreateEmptyTableWorkspace(
             OutputWorkspace="CalibTable")
         # "Detector ID" column required by ApplyCalibration
         calib_table.addColumn(type="int", name="Detector ID")
         # "Detector Position" column required by ApplyCalibration
         calib_table.addColumn(type="V3D", name="Detector Position")
         return calib_table
Пример #9
0
    def test_algorithm_estimates_fit_window(self):
        x_val = [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22]
        y_val = [0, 0, 0, 0, 1, 7, 0, 0, 0, 0, 10, 7]
        ws = CreateWorkspace(x_val * 2, y_val + [0] * len(y_val), NSpec=2)

        table = CreateEmptyTableWorkspace()
        table.addColumn("float", "Centre")
        table.addRow([20])
        with mock.patch(
                'plugins.algorithms.WorkflowAlgorithms.FitGaussianPeaks.FitGaussianPeaks.'
                'estimate_single_parameters') as mock_estimate_params:
            mock_estimate_params.return_value = None
            FitGaussianPeaks(InputWorkspace=ws,
                             PeakGuessTable=table,
                             EstimateFitWindow=True,
                             FitWindowSize=11)

            centre_index = 10
            """
                win_size in this case is calculated from EstimatePeakSigma and is estimated to be 2 and FitWindowSize
                is ignored
            """
            win_size = 2
            arguements = mock_estimate_params.call_args_list[0][0]
            self.assertSequenceEqual(list(arguements[0]), x_val)
            self.assertSequenceEqual(list(arguements[1]), y_val)
            self.assertEqual(arguements[2], centre_index)
            self.assertEqual(arguements[3], win_size)
            self.assertEqual(len(arguements), 4)
Пример #10
0
def readCalibrationFile(table_name, in_path):
    """Read a calibration table from file

    This loads a calibration TableWorkspace from a CSV file.

    Example of usage:

    .. code-block:: python

       saveCalibration('CalibTable','/tmp/myCalibTable.txt')

    :param table_name: name to call the TableWorkspace
    :param in_path: the path to the calibration file

    """
    DET = 'Detector ID'
    POS = 'Detector Position'
    re_float = re.compile(r"[+-]? *(?:\d+(?:\.\d*)?|\.\d+)(?:[eE][+-]?\d+)?")
    calibTable = CreateEmptyTableWorkspace(OutputWorkspace=table_name)
    calibTable.addColumn(type='int', name=DET)
    calibTable.addColumn(type='V3D', name=POS)

    with open(in_path, 'r') as file_p:
        for line in file_p:
            values = re.findall(re_float, line)
            if len(values) != 4:
                continue

            nextRow = {
                DET: int(values[0]),
                POS: V3D(float(values[1]), float(values[2]), float(values[3]))
            }

            calibTable.addRow(nextRow)
Пример #11
0
def trim_calibration_table(input_workspace: InputTable, output_workspace: Optional[str] = None) -> TableWorkspace:
    r"""
    Discard trim the X and Z pixel coordinates, since we are only interested in the calibrated Y-coordinate

    :param input_workspace:
    :param output_workspace:

    :return: handle to the trimmed table workspace
    """
    if output_workspace is None:
        output_workspace = str(input_workspace)  # overwrite the input table

    # Extract detector ID's and Y-coordinates from the input table
    table = mtd[str(input_workspace)]
    detector_ids = table.column(0)
    y_coordinates = [v.Y() for v in table.column(1)]

    # create the (empty) trimmed table
    table_trimmed = CreateEmptyTableWorkspace(OutputWorkspace=output_workspace)
    table_trimmed.addColumn(type='int', name='Detector ID')
    table_trimmed.addColumn(type='double', name='Detector Y Coordinate')

    # fill the rows of the trimmed table
    for detector_id, y_coordinate in zip(detector_ids, y_coordinates):
        table_trimmed.addRow([detector_id, y_coordinate])

    return table_trimmed
Пример #12
0
    def test_algorithm_uses_right_fit_window(self):
        x_val = [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22]
        y_val = [0, 0, 0, 0, 1, 7, 0, 0, 0, 0, 10, 7]
        ws = CreateWorkspace(x_val * 2, y_val + [0] * len(y_val), NSpec=2)

        table = CreateEmptyTableWorkspace()
        table.addColumn("float", "Centre")
        table.addRow([20])
        with mock.patch(
                'plugins.algorithms.WorkflowAlgorithms.FitGaussianPeaks.FitGaussianPeaks.'
                'estimate_single_parameters') as mock_estimate_params:
            mock_estimate_params.return_value = None
            FitGaussianPeaks(InputWorkspace=ws,
                             PeakGuessTable=table,
                             EstimateFitWindow=False,
                             FitWindowSize=11)
            centre_index = 10
            # win_size is ( FitWindowSize -1)/2 as method estimate_single_parameters expects in this form
            win_size = 5
            arguements = mock_estimate_params.call_args_list[0][0]
            self.assertSequenceEqual(list(arguements[0]), x_val)
            self.assertSequenceEqual(list(arguements[1]), y_val)
            self.assertEqual(arguements[2], centre_index)
            self.assertEqual(arguements[3], win_size)
            self.assertEqual(len(arguements), 4)
Пример #13
0
    def action_copy_spectrum_to_table(self, table):
        selected_rows = [i.row() for i in table.selectionModel().selectedRows()]
        if not selected_rows:
            self.notify_no_selection_to_copy()
            return
        ws = table.model().ws
        table_ws = CreateEmptyTableWorkspace(OutputWorkspace=ws.name() + "_spectra")
        num_rows = ws.blocksize()
        table_ws.setRowCount(num_rows)
        for i, row in enumerate(selected_rows):
            table_ws.addColumn("double", "XS" + str(row))
            table_ws.addColumn("double", "YS" + str(row))
            table_ws.addColumn("double", "ES" + str(row))

            col_x = 3 * i
            col_y = 3 * i + 1
            col_e = 3 * i + 2

            data_y = ws.readY(row)
            data_x = ws.readX(row)
            data_e = ws.readE(row)

            for j in range(num_rows):
                table_ws.setCell(j, col_x, data_x[j])
                table_ws.setCell(j, col_y, data_y[j])
                table_ws.setCell(j, col_e, data_e[j])
Пример #14
0
    def action_copy_bin_to_table(self, table):
        selected_cols = [i.column() for i in table.selectionModel().selectedColumns()]
        if not selected_cols:
            self.notify_no_selection_to_copy()
            return
        ws = table.model().ws
        table_ws = CreateEmptyTableWorkspace(OutputWorkspace=ws.name() + "_bins")
        num_rows = ws.getNumberHistograms()
        table_ws.setRowCount(num_rows)
        table_ws.addColumn("double", "X")
        for i, col in enumerate(selected_cols):
            table_ws.addColumn("double", "YB" + str(col))
            table_ws.addColumn("double", "YE" + str(col))

            col_y = 2 * i + 1
            col_e = 2 * i + 2

            for j in range(num_rows):
                data_y = ws.readY(j)
                data_e = ws.readE(j)

                if i == 0:
                    if ws.axes() > 1:
                        table_ws.setCell(j, 0, ws.getAxis(1).getValue(j))
                    else:
                        table_ws.setCell(j, 0, j)
                table_ws.setCell(j, col_y, data_y[col])
                table_ws.setCell(j, col_e, data_e[col])
Пример #15
0
    def setUp(self):
        # Creating two peaks on an exponential background with gaussian noise
        self.x_values = np.linspace(0, 100, 1001)
        self.centre = [25, 75]
        self.height = [35, 20]
        self.width = [10, 5]
        self.y_values = self.gaussian(self.x_values, self.centre[0],
                                      self.height[0], self.width[0])
        self.y_values += self.gaussian(self.x_values, self.centre[1],
                                       self.height[1], self.width[1])
        self.background = 10 * np.ones(len(self.x_values))

        # Generating a table with a guess of the position of the centre of the peaks
        peak_table = CreateEmptyTableWorkspace()
        peak_table.addColumn(type='float', name='Approximated Centre')
        peak_table.addRow([self.centre[0] + 2])
        peak_table.addRow([self.centre[1] - 3])

        # Generating a workspace with the data and a flat background
        data_ws = CreateWorkspace(
            DataX=np.concatenate((self.x_values, self.x_values)),
            DataY=np.concatenate((self.y_values, self.background)),
            DataE=np.sqrt(np.concatenate((self.y_values, self.background))),
            NSpec=2)

        self.data_ws = data_ws
        self.peak_guess_table = peak_table

        self.alg_instance = _FitGaussianPeaks.FitGaussianPeaks()
        self.alg_instance.initialize()
Пример #16
0
    def _get_output_peak(self, args, ideal_tube):
        delete_peak_table_after = False
        if self.OUTPUTPEAK in args:
            output_peak = args[self.OUTPUTPEAK]
        else:
            output_peak = False

        if isinstance(output_peak, ITableWorkspace):
            if output_peak.columnCount() < len(ideal_tube.getArray()):
                raise RuntimeError(
                    "Wrong argument {0}. "
                    "It expects a boolean flag, or a ITableWorksapce with columns (TubeId, Peak1,...,"
                    "PeakM) for M = number of peaks given in knownPositions".
                    format(self.OUTPUTPEAK))
            return output_peak, delete_peak_table_after

        else:
            if not output_peak:
                delete_peak_table_after = True

            # create the output peak table
            output_peak = CreateEmptyTableWorkspace(
                OutputWorkspace="PeakTable")
            output_peak.addColumn(type='str', name='TubeId')
            for i in range(len(ideal_tube.getArray())):
                output_peak.addColumn(type='float', name='Peak%d' % (i + 1))
            return output_peak, delete_peak_table_after
 def _createFakePeakPositionTable(self, peakPos):
     """Create a peak position TableWorkspace with a single column for peakPos."""
     tableName = self._names.withSuffix('peak_position_table')
     table = CreateEmptyTableWorkspace(OutputWorkspace=tableName,
                                       EnableLogging=self._subalgLogging)
     table.addColumn('double', 'PeakCentre')
     table.addRow((peakPos,))
     return table
 def _createFakePeakPositionTable(self, peakPos):
     """Create a peak position TableWorkspace with a single column for peakPos."""
     tableName = self._names.withSuffix('peak_position_table')
     table = CreateEmptyTableWorkspace(OutputWorkspace=tableName,
                                       EnableLogging=self._subalgLogging)
     table.addColumn('double', 'PeakCentre')
     table.addRow((peakPos, ))
     return table
Пример #19
0
 def make_runinfo_table(self):
     run_info = CreateEmptyTableWorkspace()
     run_info.addColumn(type="str", name="Instrument")
     run_info.addColumn(type="int", name="Run")
     run_info.addColumn(type="int", name="Bank")
     run_info.addColumn(type="float", name="uAmps")
     run_info.addColumn(type="str", name="Title")
     return run_info
    def test_gui_updated_when_column_removed(self):
        ws = CreateEmptyTableWorkspace()
        ws.addColumn("double", "test_col")

        presenter = TableWorkspaceDisplay(ws)
        ws.removeColumn('test_col')

        self.assertEqual(0, presenter.view.columnCount())
Пример #21
0
def create_output_table(centre1, centre2):
    Centre_position = CreateEmptyTableWorkspace()

    Centre_position.addColumn(type="double", name="X Centre Position")
    Centre_position.addColumn(type="double", name="Y Centre Position")
    Centre_position.addRow({
        "X Centre Position": centre1,
        "Y Centre Position": centre2
    })
    def test_gui_updated_when_row_added_from_sequence(self):
        ws = CreateEmptyTableWorkspace()
        ws.addColumn("double", "l")

        presenter = TableWorkspaceDisplay(ws)
        current_rows = presenter.view.rowCount()
        ws.addRow([1.0])

        self.assertEqual(current_rows + 1, presenter.view.rowCount())
    def test_gui_updated_when_row_added_from_dictionary(self):
        ws = CreateEmptyTableWorkspace()
        ws.addColumn("double", "test_col")

        presenter = TableWorkspaceDisplay(ws)
        current_rows = presenter.view.rowCount()
        ws.addRow({'test_col': 1.0})

        self.assertEqual(current_rows + 1, presenter.view.rowCount())
    def test_gui_updated_when_row_added_from_dictionary_batch(self):
        ws = CreateEmptyTableWorkspace()
        ws.addColumn("double", "test_col")

        presenter = TableWorkspaceDisplay(ws, batch=True)
        current_rows = presenter.view.rowCount()
        ws.addRow({'test_col': 1.0})

        self.assertEqual(current_rows + 1, presenter.view.model().max_rows())
        presenter.close(ws.name())
    def test_gui_updated_when_row_added_from_sequence_standard(self):
        ws = CreateEmptyTableWorkspace()
        ws.addColumn("double", "l")

        presenter = TableWorkspaceDisplay(ws, batch=False)
        current_rows = presenter.view.rowCount()
        ws.addRow([1.0])

        self.assertEqual(current_rows + 1, presenter.view.model().rowCount())
        presenter.close(ws.name())
    def test_gui_updated_when_column_removed_batch(self):
        ws = CreateEmptyTableWorkspace()
        ws.addColumn("double", "test_col")

        presenter = TableWorkspaceDisplay(ws, batch=True)
        presenter.model.block_model_replace = False
        ws.removeColumn('test_col')

        self.assertEqual(0, presenter.view.columnCount())
        presenter.close(ws.name())
Пример #27
0
def correctMisalignedTubes(ws, calibrationTable, peaksTable, spec, idealTube,
                           fitPar, threshold=10):
    """ Correct misaligned tubes due to poor fitting results
    during the first round of calibration.

    Misaligned tubes are first identified according to a tolerance
    applied to the absolute difference between the fitted tube
    positions and the mean across all tubes.

    The FindPeaks algorithm is then used to find a better fit
    with the ideal tube positions as starting parameters
    for the peak centers.

    From the refitted peaks the positions of the detectors in the
    tube are recalculated.

    @param ws: the workspace to get the tube geometry from
    @param calibrationTable: the calibration table ouput from running
    calibration
    @param peaksTable: the table containing the fitted peak centers from
    calibration
    @param spec: the tube spec for the instrument
    @param idealTube: the ideal tube for the instrument
    @param fitPar: the fitting parameters for calibration
    @param threshold: tolerance defining is a peak is outside of the acceptable
    range
    @return table of corrected detector positions
    """
    table_name = calibrationTable.name() + 'Corrected'
    corrections_table = CreateEmptyTableWorkspace(OutputWorkspace=table_name)
    corrections_table.addColumn('int', "Detector ID")
    corrections_table.addColumn('V3D', "Detector Position")

    mean_peaks, bad_tubes = findBadPeakFits(peaksTable, threshold)

    for index in bad_tubes:
        print("Refitting tube %s" % spec.getTubeName(index))
        tube_dets, _ = spec.getTube(index)
        getPoints(ws, idealTube.getFunctionalForms(), fitPar, tube_dets)
        tube_ws = mtd['TubePlot']
        fit_ws = FindPeaks(InputWorkspace=tube_ws, WorkspaceIndex=0,
                           PeakPositions=fitPar.getPeaks(),
                           PeaksList='RefittedPeaks')
        centers = [row['centre'] for row in fit_ws]
        detIDList, detPosList = \
            getCalibratedPixelPositions(ws, centers, idealTube.getArray(),
                                        tube_dets)

        for id, pos in zip(detIDList, detPosList):
            corrections_table.addRow({'Detector ID': id,
                                      'Detector Position': V3D(*pos)})

        cleanUpFit()

    return corrections_table
 def test_correct_number_of_rows_fetched_initially_batch(self):
     ws = CreateEmptyTableWorkspace()
     ws.addColumn("double", "l")
     list(map(ws.addRow, ([i] for i in range(5 * BATCH_SIZE))))
     presenter = TableWorkspaceDisplay(ws, batch=True)
     # fetch more starting at index 0,0
     index = presenter.view.model().index(0, 0)
     presenter.view.model().fetchMore(index)
     self.assertEqual(5 * BATCH_SIZE, presenter.view.model().max_rows())
     self.assertEqual(BATCH_SIZE, presenter.view.model().rowCount())
     presenter.close(ws.name())
    def test_gui_updated_when_row_added_from_dictionary_standard(self):
        ws = CreateEmptyTableWorkspace()
        ws.addColumn("double", "test_col")

        presenter = TableWorkspaceDisplay(ws, batch=False)
        presenter.model.block_model_replace = False
        current_rows = presenter.view.rowCount()
        ws.addRow({'test_col': 1.0})

        self.assertEqual(current_rows + 1, presenter.view.model().rowCount())
        presenter.close(ws.name())
def generateCropingTable(qmin, qmax):
    mask_info = CreateEmptyTableWorkspace()
    mask_info.addColumn("str", "SpectraList")
    mask_info.addColumn("double", "XMin")
    mask_info.addColumn("double", "XMax")
    for (i, value) in enumerate(qmin):
        mask_info.addRow([str(i), 0.0, value])
    for (i, value) in enumerate(qmax):
        mask_info.addRow([str(i), value, 100.0])

    return mask_info
Пример #31
0
    def test_ion_table(self):
        ws = DensityOfStates(File=self._file_name, SpectrumType='IonTable')

        # Build the expected output
        expected = CreateEmptyTableWorkspace()
        expected.addColumn('str', 'Ion')
        expected.addColumn('int', 'Count')
        expected.addRow(['H', 4])
        expected.addRow(['C', 8])
        expected.addRow(['O', 8])

        self.assertEquals(CheckWorkspacesMatch(ws, expected), 'Success!')
    def test_ion_table(self):
        ws = SimulatedDensityOfStates(File=self._file_name, SpectrumType='IonTable')

        # Build the expected output
        expected = CreateEmptyTableWorkspace()
        expected.addColumn('str', 'Ion')
        expected.addColumn('int', 'Count')
        expected.addRow(['H', 4])
        expected.addRow(['C', 8])
        expected.addRow(['O', 8])

        self.assertEquals(CheckWorkspacesMatch(ws, expected), 'Success!')
Пример #33
0
    def testAlignComponentsPositionXY(self):
        CreateSampleWorkspace(OutputWorkspace='testWS', NumBanks=1,BankPixelWidth=4)
        component='bank1'
        MoveInstrumentComponent(Workspace='testWS',ComponentName=component,X=0.06,Y=0.04,Z=4.98,RelativePosition=False)

        ### Detector should move to [0.05,0.03,4.98]
        ### Calibration table generated with:
        # CreateSampleWorkspace(OutputWorkspace='sample', NumBanks=1,BankPixelWidth=4)
        # MoveInstrumentComponent(Workspace='sample',ComponentName='bank1',X=0.05,Y=0.03,Z=4.98,RelativePosition=False)
        # CalculateDIFC(InputWorkspace='sample', OutputWorkspace='sample')
        # d=mtd['sample'].extractY()
        # for i in range(len(d)):
        #        print "calTable.addRow(["+str(i+16)+", "+str(d[i][0])+"])"

        calTable = CreateEmptyTableWorkspace()
        calTable.addColumn("int", "detid")
        calTable.addColumn("double", "difc")

        calTable.addRow([16, 44.3352831346])
        calTable.addRow([17, 47.7503426493])
        calTable.addRow([18, 51.6581064544])
        calTable.addRow([19, 55.9553976608])
        calTable.addRow([20, 49.6495672525])
        calTable.addRow([21, 52.7214213944])
        calTable.addRow([22, 56.285004349])
        calTable.addRow([23, 60.2530897937])
        calTable.addRow([24, 55.1227558338])
        calTable.addRow([25, 57.9048914599])
        calTable.addRow([26, 61.1671229038])
        calTable.addRow([27, 64.8369848035])
        calTable.addRow([28, 60.7118272387])
        calTable.addRow([29, 63.2484968666])
        calTable.addRow([30, 66.2480051141])
        calTable.addRow([31, 69.650545037])

        ws = mtd["testWS"]
        startPos = ws.getInstrument().getComponentByName(component).getPos()
        startRot = ws.getInstrument().getComponentByName(component).getRotation().getEulerAngles()
        AlignComponents(CalibrationTable="calTable",
                        Workspace="testWS",
                        ComponentList=component,
                        Xposition=True,
                        Yposition=True)
        ws = mtd["testWS"]
        endPos = ws.getInstrument().getComponentByName(component).getPos()
        endRot = ws.getInstrument().getComponentByName(component).getRotation().getEulerAngles()
        self.assertAlmostEqual(endPos.getX(), 0.05)
        self.assertAlmostEqual(endPos.getY(), 0.03)
        self.assertEqual(startPos.getZ(), endPos.getZ())
        self.assertEqual(startRot[0], endRot[0])
        self.assertEqual(startRot[1], endRot[1])
        self.assertEqual(startRot[2], endRot[2])
 def test_scrolling_updates_number_of_rows_fetched_batch(self):
     ws = CreateEmptyTableWorkspace()
     ws.addColumn("double", "l")
     list(map(ws.addRow, ([i] for i in range(5 * BATCH_SIZE))))
     presenter = TableWorkspaceDisplay(ws, batch=True)
     # fetch more starting at index 0,0
     index = presenter.view.model().index(0, 0)
     presenter.view.model().fetchMore(index)
     self.assertEqual(BATCH_SIZE, presenter.view.model().rowCount())
     # scrolling should update our batch size to 2*BATCH_SIZE
     presenter.view.scrollToBottom()
     self.assertEqual(2 * BATCH_SIZE, presenter.view.model().rowCount())
     presenter.close(ws.name())
Пример #35
0
    def _generate_props_table(self):
        """
        Creates a table workspace with values calculated in algorithm.
        """
        props_table = CreateEmptyTableWorkspace(OutputWorkspace=self._props_output_workspace)

        props_table.addColumn('int', 'NegativeXMinIndex')
        props_table.addColumn('int', 'PositiveXMinIndex')
        props_table.addColumn('int', 'PositiveXMaxIndex')

        props_table.addRow([int(self._negative_min_index), int(self._positive_min_index), int(self._positive_max_index)])

        self.setProperty('OutputPropertiesTable', self._props_output_workspace)
Пример #36
0
    def test_algorithm_does_not_throw_an_error_when_no_valid_peaks_fitted(
            self):
        x_val = [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22]
        y_val = [0, 0, 0, 0, 1, 7, 0, 0, 0, 0, 10, 7]
        ws = CreateWorkspace(x_val * 2, y_val + [0] * len(y_val), NSpec=2)
        table = CreateEmptyTableWorkspace()
        table.addColumn("float", "Centre")
        table.addRow([20])

        FitGaussianPeaks(InputWorkspace=ws, PeakGuessTable=table)

        self.assertEqual(mtd["peak_table"].rowCount(), 0)
        self.assertEqual(mtd["refit_peak_table"].rowCount(), 0)
Пример #37
0
 def parameters_optimized_table(table_name, values=None, errors=None):
     table = CreateEmptyTableWorkspace(OutputWorkspace=table_name)
     for column_type, column_name in [('str', 'Name'),
                                      ('float', 'Value'),
                                      ('float', 'Error')]:
         table.addColumn(type=column_type, name=column_name)
     if values is not None and errors is not None:
         assert len(values) == 4 and len(
             errors) == 4  # A0, A1, A2, 'Cost function value'
         for index, row_name in enumerate(
             ['A0', 'A1', 'A2', 'Cost function value']):
             table.addRow([row_name, values[index], errors[index]])
     return table
Пример #38
0
    def _generate_props_table(self):
        """
        Creates a table workspace with values calculated in algorithm.
        """
        props_table = CreateEmptyTableWorkspace(OutputWorkspace=self._props_output_workspace)

        props_table.addColumn('int', 'NegativeXMinIndex')
        props_table.addColumn('int', 'PositiveXMinIndex')
        props_table.addColumn('int', 'PositiveXMaxIndex')

        props_table.addRow([int(self._negative_min_index), int(self._positive_min_index), int(self._positive_max_index)])

        self.setProperty('OutputPropertiesTable', self._props_output_workspace)
Пример #39
0
    def testAlignComponentsRotationY(self):
        CreateSampleWorkspace(OutputWorkspace='testWS', NumBanks=1,BankPixelWidth=4)
        component='bank1'
        MoveInstrumentComponent(Workspace='testWS',ComponentName=component,X=2.00,Y=0,Z=2.00,RelativePosition=False)
        RotateInstrumentComponent(Workspace='testWS',ComponentName='bank1',X=0,Y=1,Z=0,Angle=50,RelativeRotation=False)

        ### Detector should rotate to +45deg around Y
        ### Calibration table generated with:
        # CreateSampleWorkspace(OutputWorkspace='sample2', NumBanks=1,BankPixelWidth=4)
        # MoveInstrumentComponent(Workspace='sample2',ComponentName='bank1',X=2.0,Y=0.0,Z=2.0,RelativePosition=False)
        # RotateInstrumentComponent(Workspace='sample2',ComponentName='bank1',X=0,Y=1,Z=0,Angle=45,RelativeRotation=False)
        # CalculateDIFC(InputWorkspace='sample2', OutputWorkspace='sample2')
        # d=mtd['sample2'].extractY()
        # for i in range(len(d)):
        #        print "calTable.addRow(["+str(i+16)+", "+str(d[i][0])+"])"

        calTable = CreateEmptyTableWorkspace()
        calTable.addColumn("int", "detid")
        calTable.addColumn("double", "difc")

        calTable.addRow([16, 2481.89300158])
        calTable.addRow([17, 2481.90717397])
        calTable.addRow([18, 2481.94969])
        calTable.addRow([19, 2482.02054626])
        calTable.addRow([20, 2490.36640334])
        calTable.addRow([21, 2490.38050851])
        calTable.addRow([22, 2490.42282292])
        calTable.addRow([23, 2490.49334316])
        calTable.addRow([24, 2498.83911141])
        calTable.addRow([25, 2498.85314962])
        calTable.addRow([26, 2498.89526313])
        calTable.addRow([27, 2498.96544859])
        calTable.addRow([28, 2507.31101837])
        calTable.addRow([29, 2507.32498986])
        calTable.addRow([30, 2507.36690322])
        calTable.addRow([31, 2507.43675513])

        ws = mtd["testWS"]
        startPos = ws.getInstrument().getComponentByName(component).getPos()
        startRot = ws.getInstrument().getComponentByName(component).getRotation().getEulerAngles("YZX") #YZX
        AlignComponents(CalibrationTable="calTable",
                        Workspace="testWS",
                        ComponentList=component,
                        AlphaRotation=True)
        ws = mtd["testWS"]
        endPos = ws.getInstrument().getComponentByName(component).getPos()
        endRot = ws.getInstrument().getComponentByName(component).getRotation().getEulerAngles("YZX") #YZX
        self.assertEqual(startPos, endPos)
        self.assertAlmostEqual(endRot[0],45.0,places=0)
        self.assertEqual(startRot[1], endRot[1])
        self.assertEqual(startRot[2], endRot[2])
Пример #40
0
def _createDiagnosticsReportTable(reportWSName, numberHistograms, algorithmLogging):
    """Return a table workspace for detector diagnostics reporting."""
    if mtd.doesExist(reportWSName):
        reportWS = mtd[reportWSName]
    else:
        reportWS = CreateEmptyTableWorkspace(OutputWorkspace=reportWSName,
                                             EnableLogging=algorithmLogging)
    existingColumnNames = reportWS.getColumnNames()
    if 'WorkspaceIndex' not in existingColumnNames:
        reportWS.addColumn('int', 'WorkspaceIndex', _PLOT_TYPE_X)
    reportWS.setRowCount(numberHistograms)
    for i in range(numberHistograms):
        reportWS.setCell('WorkspaceIndex', i, i)
    return reportWS
Пример #41
0
    def PyExec(self):
        self.workspace = self.getProperty("InputWorkspace").value
        outws_name = self.getPropertyValue("OutputWorkspace")

        # create table and columns
        outws = CreateEmptyTableWorkspace(OutputWorkspace=outws_name)
        columns = ["PeakCentre", "PeakCentreError", "Sigma", "SigmaError", "Height", "HeightError", "chiSq"]
        nextrow = dict.fromkeys(["WorkspaceIndex"] + columns + ["FitStatus"])
        outws.addColumn(type="int", name="WorkspaceIndex", plottype=1)  # x
        for col in columns:
            outws.addColumn(type="double", name=col)
        outws.addColumn(type="str", name="FitStatus")

        nb_hist = self.workspace.getNumberHistograms()
        for idx in range(nb_hist):
            nextrow["WorkspaceIndex"] = idx
            result = self.do_fit_gaussian(idx)
            if not result:
                for col in columns:
                    nextrow[col] = 0
                nextrow["FitStatus"] = "failed"
            else:
                nextrow["FitStatus"] = result[0]
                nextrow["chiSq"] = result[1]
                ptable = result[3]
                for num in range(ptable.rowCount() - 1):
                    row = ptable.row(num)
                    name = row["Name"]
                    nextrow[name] = row["Value"]
                    nextrow[name+"Error"] = row["Error"]
                DeleteWorkspace(result.OutputParameters)
                DeleteWorkspace(result.OutputNormalisedCovarianceMatrix)
            outws.addRow(nextrow)
        self.setProperty("OutputWorkspace", outws)
        return
Пример #42
0
    def _create_indexed_workspace(self, fractional_peaks, ndim, hklm):
        # Create table with the number of columns we need
        types = ['int', 'long64', 'double', 'double', 'double', 'double',  'double', 'double',
                 'double', 'double', 'double', 'float', 'str', 'float', 'float', 'V3D', 'V3D']
        indexed = CreateEmptyTableWorkspace()
        names = fractional_peaks.getColumnNames()

        # Insert the extra columns for the addtional indicies
        for i in range(ndim - 3):
            names.insert(5 + i, 'm{}'.format(i + 1))
            types.insert(5 + i, 'double')

        names = np.array(names)
        types = np.array(types)

        # Create columns in the table workspace
        for name, column_type in zip(names, types):
            indexed.addColumn(column_type, name)

        # Copy all columns from original workspace, ignoring HKLs
        column_data = []
        idx = np.arange(0, names.size)
        hkl_mask = (idx < 5) | (idx > 4 + (ndim - 3))
        for name in names[hkl_mask]:
            column_data.append(fractional_peaks.column(name))

        # Insert the addtional HKL columns into the data
        for i, col in enumerate(hklm.T.tolist()):
            column_data.insert(i + 2, col)

        # Insert the columns into the table workspace
        for i in range(fractional_peaks.rowCount()):
            row = [column_data[j][i] for j in range(indexed.columnCount())]
            indexed.addRow(row)

        return indexed
Пример #43
0
    def PyExec(self):
        """ Alg execution. """
        instrument         = self.getProperty(INSTRUMENT_PROP).value
        run_number         = self.getProperty(RUN_NUM_PROP).value
        fit_deadtime       = self.getProperty(FIT_DEADTIME_PROP).value
        fix_phases         = self.getProperty(FIX_PHASES_PROP).value
        default_level      = self.getProperty(DEFAULT_LEVEL).value
        sigma_looseness    = self.getProperty(SIGMA_LOOSENESS_PROP).value
        groupings_file     = self.getProperty(GROUPINGS_PROP).value
        in_phases_file     = self.getProperty(PHASES_PROP).value
        in_deadtimes_file  = self.getProperty(DEADTIMES_PROP).value
        out_phases_file    = self.getProperty(PHASES_RESULT_PROP).value
        out_deadtimes_file = self.getProperty(DEADTIMES_RESULT_PROP).value

        isis = config.getFacility('ISIS')
        padding = isis.instrument(instrument).zeroPadding(0)
        run_name = instrument + str(run_number).zfill(padding)

        try:
            run_number = int(run_number)
        except:
            raise RuntimeError("'%s' is not an integer run number." % run_number)
        try:
            run_file_path = FileFinder.findRuns(run_name)[0]
        except:
            raise RuntimeError("Unable to find file for run %i" % run_number)

        if groupings_file == "":
            groupings_file = DEFAULT_GROUPINGS_FILENAME % instrument

        # Load data and other info from input files.

        def temp_hidden_ws_name():
            """Generate a unique name for a temporary, hidden workspace."""
            selection = string.ascii_lowercase + string.ascii_uppercase + string.digits
            return '__temp_MaxEnt_' + ''.join(random.choice(selection) for _ in range(20))

        input_data_ws_name = temp_hidden_ws_name()
        LoadMuonNexus(Filename=run_file_path, OutputWorkspace=input_data_ws_name)
        input_data_ws = mtd[input_data_ws_name]
        
        if isinstance(input_data_ws, WorkspaceGroup):
            Logger.get("MaxEnt").warning("Multi-period data is not currently supported.  Just using first period.")
            input_data_ws = input_data_ws[0]

        groupings_ws_name = temp_hidden_ws_name()
        LoadDetectorsGroupingFile(InputFile=groupings_file, OutputWorkspace=groupings_ws_name)
        groupings_ws = mtd[groupings_ws_name]

        def yield_floats_from_file(path):
            """Given a path to a file with a float on each line, will return
            the floats one at a time.  Throws otherwise.  Strips whitespace
            and ignores empty lines."""
            with open(path, 'r') as f:
                for i, line in enumerate(line.strip() for line in f):
                    if line == "":
                        continue
                    try:
                        yield float(line)
                    except:
                        raise RuntimeError("Parsing error in '%s': Line %d: '%s'." % 
                                           (path, i, line))

        input_phases         = np.array(list(yield_floats_from_file(in_phases_file)))
        input_phases_size    = len(input_phases)
        input_deadtimes      = np.array(list(yield_floats_from_file(in_deadtimes_file)))
        input_deadtimes_size = len(input_deadtimes)

        n_bins      = input_data_ws.blocksize()
        n_detectors = input_data_ws.getNumberHistograms()

        def time_value_to_time_channel_index(value):
            """Given a time value, will return the index of the time channel in
            which the value falls."""
            bin_width = input_data_ws.readX(0)[1] - input_data_ws.readX(0)[0]
            diff = value - input_data_ws.readX(0)[0]
            return int(diff / bin_width)

        # Mantid corrects for time zero on loading, so we want to find the actual channels
        # where 0.0 occurs, and where we have values of 0.1 onwards.
        time_zero_channel  = time_value_to_time_channel_index(0.0)
        first_good_channel = time_value_to_time_channel_index(0.1)

        input_data = np.concatenate([input_data_ws.readY(i) for i in range(n_detectors)])

        groupings = [groupings_ws.readY(row)[0] for row in range(groupings_ws.getNumberHistograms())]
        groupings = map(int, groupings)
        n_groups = len(set(groupings))

        # Cleanup.

        input_data_ws.delete()
        groupings_ws.delete()

        # We're faced with the problem of providing more than a dozen parameters to
        # the Fortran, which can be a bit messy (especially on the Fortran side of
        # things where we need to make "Cf2py" declarations).  A cleaner way of
        # doing this is to simply pass in a few callbacks -- one for each input
        # type -- and have the Fortran provide the name of the variable it wants
        # to the callback.  The callback will then look up the corresponding value
        # and feed it back to the Fortran.
        #
        # We also have a callback for printing to the results log.

        self.int_vars = {
            "RunNo"       : run_number,
            "frames"      : FRAMES,
            "res"         : RES,
            "Tzeroch"     : time_zero_channel,
            "firstgoodch" : first_good_channel,
            "ptstofit"    : POINTS_TO_FIT,
            "histolen"    : n_bins,
            "nhisto"      : n_detectors,
            "n_groups"    : n_groups,
        }

        self.float_vars = {
            "deflevel" : default_level,
            "sigloose" : sigma_looseness,
        }

        self.bool_vars = {
            "fixphase" : fix_phases,
            "fitdt"    : fit_deadtime,
        }

        self._assert_map_values_are_of_expected_type()

        def lookup(par_name, par_map, default):
            """The basis of the callbacks passed to the Fortran.  Given a parameter
            name it will consult the appropriate variable map, and return the
            corresponding value of the parameter.  Else return a default and log a
            warning if a parameter with the name does not exist."""
            par_name = par_name.strip()
            if par_name in par_map:
                return par_map[par_name]
            msg = """WARNING: tried to find a value for parameter with name %s but
            could not find one.  Default of \"%s\" provided.""" % (par_name, default)
            Logger.get("MaxEnt").warning(msg)
            return default

        def log(priority, message):
            """Log the given message with given priority."""
            try:
                logger = getattr(Logger.get("MaxEnt"), priority.lower())
            except AttributeError:
                # If we don't recognise the priority, use warning() as a default.
                logger = getattr(Logger.get("MaxEnt"), "warning")
            logger(message)
            return True

        # The Fortran expects arrays to be of a certain size, so any arrays that
        # aren't big enough need to be padded.
        input_phases    = self._pad_to_length_with_zeros(input_phases, MAX_HISTOS)
        input_deadtimes = self._pad_to_length_with_zeros(input_deadtimes, MAX_HISTOS)
        input_data      = self._pad_to_length_with_zeros(input_data, MAX_INPUT_DATA_SIZE)
        groupings       = self._pad_to_length_with_zeros(groupings, MAX_HISTOS)

        # TODO: Return the contents of "NNNNN.max", instead of writing to file.
        f_out, fchan_out, output_deadtimes, output_phases, chi_sq = maxent.mantid_maxent(
            # Input data and other info:
            input_data,
            groupings,
            input_deadtimes,
            input_phases,
            # Variable-lookup callbacks:
            lambda par_name: lookup(par_name, self.int_vars,   0),
            lambda par_name: lookup(par_name, self.float_vars, 0.0),
            lambda par_name: lookup(par_name, self.bool_vars,  False),
            # Callback for logging:
            log
        )

        def write_items_to_file(path, items):
            """Given a path to a file and a list of items, will write the items
            to the file, one on each line."""
            with open(path, 'w') as f:
                for item in items:
                    f.write(str(item) + "\n")

        # Chop the padded outputs back down to the correct size.
        output_phases    = output_phases[:input_phases_size]
        output_deadtimes = output_deadtimes[:input_deadtimes_size]
        input_phases     = input_phases[:input_phases_size]
        input_deadtimes  = input_deadtimes[:input_deadtimes_size]
        fchan_out        = fchan_out[:n_bins]
        f_out            = f_out[:n_bins]

        write_items_to_file(out_phases_file,    output_phases)
        write_items_to_file(out_deadtimes_file, output_deadtimes)
                 
        log_output = "\nDead times in:\n" +  str(input_deadtimes) + "\n" +\
                     "\nDead times out:\n" + str(output_deadtimes) + "\n" +\
                     "\nPhases in:\n" +      str(input_phases) + "\n" +\
                     "\nPhases out:\n" +     str(output_phases) + "\n" + \
                     "\nGroupings:\n" +      str(groupings) + "\n" +\
                     "\nChi Squared:\n" +    str(chi_sq) + "\n" +\
                     "\nInput variables:\n"

        for type_map in self.int_vars, self.float_vars, self.bool_vars:
            for name, value in type_map.items():
                log_output += str(name) + " = " + str(value) + "\n"

        Logger.get("MaxEnt").notice(log_output)

        # Generate our own output ws name if the user has not provided one.
        out_ws_name = self.getPropertyValue(OUT_WS_PROP)
        if out_ws_name == "":
            out_ws_name = run_name + "; MaxEnt"
            self.setPropertyValue(OUT_WS_PROP, out_ws_name)

        out_ws = CreateWorkspace(OutputWorkspace=out_ws_name,
                                 DataX=fchan_out[:n_bins],
                                 DataY=f_out[:n_bins])
        self.setProperty(OUT_WS_PROP, out_ws)

        # MaxEnt inputs table.
        input_table_name = run_name + "; MaxEnt Input"
        input_table = CreateEmptyTableWorkspace(OutputWorkspace = input_table_name)
        input_table.addColumn("str", "Name")
        input_table.addColumn("str", "Value")
        inputs = itertools.chain(self.int_vars.items(), 
                                 self.float_vars.items(),
                                 self.bool_vars.items())
        for name, value in inputs:
            input_table.addRow([str(name), str(value)])

        # Deadtimes and phases input/output table.
        dead_phases_table_name = run_name + "; MaxEnt Deadtimes & Phases"
        dead_phases_table = CreateEmptyTableWorkspace(OutputWorkspace = dead_phases_table_name)
        for column_name in "Deadtimes In", "Deadtimes Out", "Phases In", "Phases Out":
          dead_phases_table.addColumn("double", column_name)
        for row in zip(input_deadtimes, output_deadtimes, input_phases, output_phases):
            dead_phases_table.addRow(list(map(float, row)))

        # Chi-squared output table.
        chisq_table_name = run_name + "; MaxEnt Chi^2"
        chisq_table = CreateEmptyTableWorkspace(OutputWorkspace = chisq_table_name)
        chisq_table.addColumn("int", "Cycle")
        for iteration in range(10):
          chisq_table.addColumn("double", "Iter " + str(iteration + 1))
        for cycle, data in enumerate(chi_sq):
            chisq_table.addRow([cycle + 1] + list(map(float,data)))

        all_output_ws = [input_table_name,
                         dead_phases_table_name,
                         chisq_table_name,
                         out_ws_name]

        # The output workspaces of this algorithm belong in the same groups
        # that are created by the muon interface.  If the appropriate group
        # doesn't exist already then it needs to be created.
        if not run_name in mtd:
            GroupWorkspaces(InputWorkspaces = all_output_ws,
                            OutputWorkspace = run_name)
        else:
            group = mtd[run_name]
            for output_ws in all_output_ws:
              if not group.contains(output_ws):
                group.add(output_ws)

        out_ws.getAxis(0).getUnit().setLabel("Field", "G")
        out_ws.setYUnitLabel("P(B)")

        if INSIDE_MANTIDPLOT:
            mantidplot.plotSpectrum(out_ws, 0)
Пример #44
0
def create_output_table(centre1, centre2):
    Centre_position = CreateEmptyTableWorkspace()

    Centre_position.addColumn(type="double", name="X Centre Position")
    Centre_position.addColumn(type="double", name="Y Centre Position")
    Centre_position.addRow({"X Centre Position": centre1, "Y Centre Position": centre2})
Пример #45
0
def get_he3_log(path):
    """
    Load the ³He log data into Mantid Table named "helium_log"

    Parameters
    ----------
    path
      A string with the path to the ³He as a tsv file
    """
    hetemp = load_helium_file(path)
    my_table = CreateEmptyTableWorkspace()
    my_table.addColumn("int", "Number")
    my_table.addColumn("str", "Cell")
    my_table.addColumn("float", "scale")
    my_table.addColumn("str", "Start time")
    my_table.addColumn("float", "fid")
    my_table.addColumn("float", "Time Constant")

    for run in hetemp:
        my_table.addRow([run.run, run.cell,
                         run.scale,
                         run.dt.isoformat(),
                         run.fid, run.t1])
    RenameWorkspace(my_table, "helium_log")
Пример #46
0
def get_log(runs):
    """
    Uses the run journal to identify which run numbers are associated with
    which samples and create a table for each sample, containing all the
    information needed to analyse each run.

    Parameters
    ----------
    runs
      A list of integer run numbers
    """
    log_file = JPATH + "\\" + get_relevant_log(min(runs))
    results = []
    with open(log_file, "r") as infile:
        journal = xml.etree.cElementTree.iterparse(infile)
        for _, child in journal:
            if "NXentry" in child.tag:
                num = get_xml_run_number(child)
                if num in runs:
                    for param in child:
                        if "title" in param.tag:
                            sample = param.text
                        elif "start_time" in param.tag:
                            start = datetime.datetime.strptime(
                                param.text,
                                "%Y-%m-%dT%H:%M:%S")
                        elif "end_time" in param.tag:
                            stop = datetime.datetime.strptime(
                                param.text,
                                "%Y-%m-%dT%H:%M:%S")
                        elif "duration" in param.tag:
                            duration = datetime.timedelta(
                                seconds=int(param.text))
                        elif "proton_charge" in param.tag:
                            proton_charge = float(param.text)
                    results.append(
                        QuickData(num, sample, start, stop, duration,
                                  proton_charge))
                child.clear()
                if num > max(runs):
                    break
    trans = [run for run in results
             if re.match(RUN_IDENTIFIERS["trans"], run[1])]
    csans = [run for run in results
             if re.match(RUN_IDENTIFIERS["can_sans"], run[1])]
    ctrans = [run for run in results
              if re.match(RUN_IDENTIFIERS["can_trans"], run[1])]
    dtrans = [run for run in results
              if re.match(RUN_IDENTIFIERS["direct_trans"], run[1])]
    temp = [convert_run(run, trans, csans, ctrans, dtrans)
            for run in results
            if (re.match(RUN_IDENTIFIERS["run"], run.sample) or
                re.match(RUN_IDENTIFIERS["can_sans"], run.sample) or
                re.match(RUN_IDENTIFIERS["direct_sans"], run.sample))
            and run.charge/run.duration.seconds > 0.005]

    d = {}
    for run in temp:
        if run.sample in d.keys():
            d[run.sample].append(run)
        else:
            d[run.sample] = [run]

    for k, v in d.items():
        my_table = CreateEmptyTableWorkspace()
        my_table.addColumn("int", "Run Number")
        my_table.addColumn("str", "Sample")
        my_table.addColumn("str", "Start time")
        my_table.addColumn("str", "End time")
        my_table.addColumn("int", "Trans run")
        my_table.addColumn("int", "Can Sans run")
        my_table.addColumn("int", "Can Trans run")
        my_table.addColumn("int", "Direct Trans run")

        for run in v:
            my_table.addRow(
                [run.number, run.sample,
                 run.start.isoformat(),
                 run.end.isoformat(),
                 run.trans, run.csans, run.ctrans,
                 run.direct])
        RenameWorkspace(my_table, k+"_runs")
Пример #47
0
def calibrate(ws, tubeSet, knownPositions, funcForm, **kwargs):
    """

      Define the calibrated positions of the detectors inside the tubes defined
      in tubeSet.

      Tubes may be considered a list of detectors alined that may be considered
      as pixels for the analogy when they values are displayed.

      The position of these pixels are provided by the manufactor, but its real
      position depends on the electronics inside the tube and varies slightly
      from tube to tube. The calibrate method, aims to find the real positions
      of the detectors (pixels) inside the tube.

      For this, it will receive an Integrated workspace, where a special
      measurement was performed so to have a
      pattern of peaks or through. Where gaussian peaks or edges can be found.


      The calibration follows the following steps

      1. Finding the peaks on each tube
      2. Fitting the peaks agains the Known Positions
      3. Defining the new position for the pixels(detectors)

      Let's consider the simplest way of calling calibrate:

      .. code-block:: python

         from tube import calibrate
         ws = Load('WISH17701')
         ws = Integration(ws)
         known_pos = [-0.41,-0.31,-0.21,-0.11,-0.02, 0.09, 0.18, 0.28, 0.39 ]
         peaks_form = 9*[1] # all the peaks are gaussian peaks
         calibTable = calibrate(ws,'WISH/panel03',known_pos, peaks_form)

      In this example, the calibrate framework will consider all the
      tubes (152) from WISH/panel03.
      You may decide to look for a subset of the tubes, by passing the
      **rangeList** option.

      .. code-block:: python

         # This code will calibrate only the tube indexed as number 3
         # (usually tube0004)
         calibTable = calibrate(ws,'WISH/panel03',known_pos,
                                peaks_form, rangeList=[3])

      **Finding the peaks on each tube**

      * Dynamically fitting peaks

       The framework expects that for each tube, it will find a peak pattern
       around the pixels corresponding to the known_pos positions.

       The way it will work out the estimated peak position (in pixel) is

       1. Get the length of the tube: distance(first_detector,last_detector) in the tube.
       2. Get the number of detectors in the tube (nDets)
       3. It will be assumed that the center of the tube correspond to the origin (0)

       .. code-block:: python

          centre_pixel = known_pos * nDets/tube_length + nDets/2

       It will them look for the real peak around the estimated value as:

       .. code-block:: python

          # consider tube_values the array of counts, and peak the estimated
          # position for the peak
          real_peak_pos = argmax(tube_values[peak-margin:peak+margin])

       After finding the real_peak_pos, it will try to fit the region around
       the peak to find the best expected position of the peak in a continuous
       space. It will do this by fitting the region around the peak to a
       Gaussian Function, and them extract the PeakCentre returned by the
       Fitting.

       .. code-block:: python

          centre = real_peak_pos
          fit_start, fit_stop = centre-margin, centre+margin
          values = tube_values[fit_start,fit_stop]
          background = min(values)
          peak = max(values) - background
          width = len(where(values > peak/2+background))
          # It will fit to something like:
          # Fit(function=LinerBackground,A0=background;Gaussian,
          # Height=peak, PeakCentre=centre, Sigma=width,fit_start,fit_end)

      * Force Fitting Parameters


       These dinamically values can be avoided by defining the **fitPar** for
       the calibrate function

       .. code-block:: python

          eP = [57.5, 107.0, 156.5, 206.0, 255.5, 305.0, 354.5, 404.0, 453.5]
          # Expected Height of Gaussian Peaks (initial value of fit parameter)
          ExpectedHeight = 1000.0
          # Expected width of Gaussian peaks in pixels
          # (initial value of fit parameter)
          ExpectedWidth = 10.0
          fitPar = TubeCalibFitParams( eP, ExpectedHeight, ExpectedWidth )
          calibTable = calibrate(ws, 'WISH/panel03', known_pos, peaks_form, fitPar=fitPar)

       Different Function Factors


       Although the examples consider only Gaussian peaks, it is possible to
       change the function factors to edges by passing the index of the
       known_position through the **funcForm**. Hence, considering three special
       points, where there are one gaussian peak and thow edges, the calibrate
       could be configured as:

       .. code-block:: python

          known_pos = [-0.1 2 2.3]
          # gaussian peak followed by two edges (through)
          form_factor = [1 2 2]
          calibTable = calibrate(ws,'WISH/panel03',known_pos,
                                 form_factor)

      * Override Peaks


       It is possible to scape the finding peaks position steps by providing the
       peaks through the **overridePeaks** parameters. The example below tests
       the calibration of a single tube (30) but scapes the finding peaks step.

       .. code-block:: python

          known_pos = [-0.41,-0.31,-0.21,-0.11,-0.02, 0.09, 0.18, 0.28, 0.39 ]
          define_peaks = [57.5, 107.0, 156.5, 206.0, 255.5, 305.0, 354.5,
                         404.0, 453.5]
          calibTable = calibrate(ws, 'WISH/panel03', known_pos, peaks_form,
                           overridePeaks={30:define_peaks}, rangeList=[30])

      * Output Peaks Positions

       Enabling the option **outputPeak** a WorkspaceTable will be produced with
       the first column as tube name and the following columns with the position
       where corresponding peaks were found. Like the table below.

       +-------+-------+-----+-------+
       |TubeId | Peak1 | ... | PeakM |
       +=======+=======+=====+=======+
       |tube0  | 15.5  | ... | 370.3 |
       +-------+-------+-----+-------+
       |  ...  |  ...  | ... |  ...  |
       +-------+-------+-----+-------+
       |tubeN  | 14.9  | ... | 371.2 |
       +-------+-------+-----+-------+

       The signature changes to:

       .. code-block:: python

          calibTable, peakTable = calibrate(...)

       It is possible to give a peakTable directly to the **outputPeak** option,
       which will make the calibration to append the peaks to the given table.

       .. hint::

         It is possible to save the peakTable to a file using the
         :meth:`savePeak` method.

      **Find the correct position along the tube**


       The second step of the calibration is to define the correct position of
       pixels along the tube. This is done by fitting the peaks positions found
       at the previous step against the known_positions provided.

       ::

        known       |              *
        positions   |           *
                    |      *
                    |  *
                    |________________
                      pixels positions

       The default operation is to fit the pixels positions against the known
       positions with a quadratic function in order to define an operation to
       move all the pixels to their real positions. If necessary, the user may
       select to fit using a polinomial of 3rd order, through the parameter
       **fitPolyn**.

       .. note::

         The known positions are given in the same unit as the spacial position
         (3D) and having the center of the tube as the origin.

       Hence, this section will define a function that:

       .. math:: F(pix) = RealRelativePosition

      **Define the new position for the detectors**

       Finally, the position of the detectors are defined as a vector operation
       like

       .. math::

         \\vec{p} = \\vec{c} + v \\vec{u}

       Where :math:`\\vec{p}` is the position in the 3D space, **v** is the
       RealRelativePosition deduced from the last session, and finally,
       :math:`\\vec{u}` is the unitary vector in the direction of the tube.



      :param ws: Integrated workspace with tubes to be calibrated.
      :param tubeSet: Specification of Set of tubes to be calibrated. If a string is passed, a TubeSpec will be created passing the string as the setTubeSpecByString.

       This will be the case for TubeSpec as string

       .. code-block:: python

         self.tube_spec = TubeSpec(ws)
         self.tube_spec.setTubeSpecByString(tubeSet)

       If a list of strings is passed, the TubeSpec will be created with this list:

       .. code-block:: python

          self.tube_spec = TubeSpec(ws)
          self.tube_spec.setTubeSpecByStringArray(tubeSet)

       If a :class:`~tube_spec.TubeSpec` object is passed, it will be used as it is.


      :param knownPositions: The defined position for the peaks/edges, taking the center as the origin and having the same units as the tube length in the 3D space.

      :param funcForm: list with special values to define the format of the peaks/edge (peaks=1, edge=2). If it is not provided, it will be assumed that all the knownPositions are peaks.


      Optionals parameters to tune the calibration:

      :param fitPar: Define the parameters to be used in the fit as a :class:`~tube_calib_fit_params.TubeCalibFitParams`. If not provided, the dynamic mode is used. See :py:func:`~Examples.TubeCalibDemoMaps_All.provideTheExpectedValue`

      :param margin: value in pixesl that will be used around the peaks/edges to fit them. Default = 15. See the code of :py:mod:`~Examples.TubeCalibDemoMerlin` where **margin** is used to calibrate small tubes.

       .. code-block:: python

          fit_start, fit_end = centre - margin, centre + margin

      :param rangeList: list of tubes indexes that will be calibrated. As in the following code (see: :py:func:`~Examples.TubeCalibDemoMaps_All.improvingCalibrationSingleTube`):

       .. code-block:: python

          for index in rangelist:
              do_calibrate(tubeSet.getTube(index))

      :param calibTable: Pass the calibration table, it will them append the values to the provided one and return it. (see: :py:mod:`~Examples.TubeCalibDemoMerlin`)

      :param plotTube: If given, the tube whose index is in plotTube will be ploted as well as its fitted peaks, it can receive a list of indexes to plot.(see: :py:func:`~Examples.TubeCalibDemoMaps_All.changeMarginAndExpectedValue`)

      :param excludeShortTubes: Do not calibrate tubes whose length is smaller than given value. (see at: Examples/TubeCalibDemoMerlin_Adjustable.py)

      :param overridePeaks: dictionary that defines an array of peaks positions (in pixels) to be used for the specific tube(key). (see: :py:func:`~Examples.TubeCalibDemoMaps_All.improvingCalibrationSingleTube`)

       .. code-block:: python

          for index in rangelist:
            if overridePeaks.has_key(index):
              use_this_peaks = overridePeaks[index]
              # skip finding peaks
              fit_peaks_to_position()

      :param fitPolyn: Define the order of the polinomial to fit the pixels positions agains the known positions. The acceptable values are 1, 2 or 3. Default = 2.


      :param outputPeak: Enable the calibrate to output the peak table, relating the tubes with the pixels positions. It may be passed as a boolean value (outputPeak=True) or as a peakTable value. The later case is to inform calibrate to append the new values to the given peakTable. This is usefull when you have to operate in subsets of tubes. (see :py:mod:`~Examples.TubeCalibDemoMerlin` that shows a nice inspection on this table).

       .. code-block:: python

         calibTable, peakTable = calibrate(ws, (omitted), rangeList=[1],
                  outputPeak=True)
         # appending the result to peakTable
         calibTable, peakTable = calibrate(ws, (omitted), rangeList=[2],
                  outputPeak=peakTable)
         # now, peakTable has information for tube[1] and tube[2]

      :rtype: calibrationTable, a TableWorkspace with two columns DetectorID(int) and DetectorPositions(V3D).

    """
    FITPAR = 'fitPar'
    MARGIN = 'margin'
    RANGELIST = 'rangeList'
    CALIBTABLE = 'calibTable'
    PLOTTUBE = 'plotTube'
    EXCLUDESHORT = 'excludeShortTubes'
    OVERRIDEPEAKS = 'overridePeaks'
    FITPOLIN = 'fitPolyn'
    OUTPUTPEAK = 'outputPeak'

    #check that only valid arguments were passed through kwargs
    for key in kwargs.keys():
        if key not in [FITPAR, MARGIN, RANGELIST, CALIBTABLE, PLOTTUBE,
                       EXCLUDESHORT, OVERRIDEPEAKS, FITPOLIN,
                       OUTPUTPEAK]:
            msg = "Wrong argument: '%s'! This argument is not defined in the signature of this function. Hint: remember that arguments are case sensitive" % key
            raise RuntimeError(msg)


    # check parameter ws: if it was given as string, transform it in
    # mantid object
    if isinstance(ws,str):
        ws = mtd[ws]
    if not isinstance(ws,MatrixWorkspace):
        raise RuntimeError("Wrong argument ws = %s. It must be a MatrixWorkspace" % (str(ws)))

    # check parameter tubeSet. It accepts string or preferable a TubeSpec
    if isinstance(tubeSet,str):
        selectedTubes = tubeSet
        tubeSet = TubeSpec(ws)
        tubeSet.setTubeSpecByString(selectedTubes)
    elif isinstance(tubeSet, list):
        selectedTubes = tubeSet
        tubeSet = TubeSpec(ws)
        tubeSet.setTubeSpecByStringArray(selectedTubes)
    elif not isinstance(tubeSet,TubeSpec):
        raise RuntimeError("Wrong argument tubeSet. It must be a TubeSpec or a string that defines the set of tubes to be calibrated. For example: WISH/panel03")

    # check the known_positions parameter
    # for old version compatibility, it also accepts IdealTube, eventhough
    # they should only be used internally
    if not (isinstance(knownPositions, list) or
            isinstance(knownPositions, tuple) or
            isinstance(knownPositions, numpy.ndarray)):
        raise RuntimeError("Wrong argument knownPositions. It expects a list of values for the positions expected for the peaks in relation to the center of the tube")
    else:
        idealTube = IdealTube()
        idealTube.setArray(numpy.array(knownPositions))


    #deal with funcForm parameter
    try:
        nPeaks = len(idealTube.getArray())
        if len(funcForm) != nPeaks:
            raise 1
        for val in funcForm:
            if val not in [1,2]:
                raise 2
    except:
        raise RuntimeError("Wrong argument FuncForm. It expects a list of values describing the form of everysingle peaks. So, for example, if there are three peaks where the first is a peak and the followers as edge, funcForm = [1, 2, 2]. Currently, it is defined 1-Gaussian Peak, 2 - Edge. The knownPos has %d elements and the given funcForm has %d."%(nPeaks, len(funcForm)))

    #apply the functional form to the ideal Tube
    idealTube.setForm(funcForm)

    # check the FITPAR parameter (optional)
    # if the FITPAR is given, than it will just pass on, if the FITPAR is
    # not given, it will create a FITPAR 'guessing' the centre positions,
    # and allowing the find peaks calibration methods to adjust the parameter
    # for the peaks automatically
    if kwargs.has_key(FITPAR):
        fitPar = kwargs[FITPAR]
        #fitPar must be a TubeCalibFitParams
        if not isinstance(fitPar, TubeCalibFitParams):
            raise RuntimeError("Wrong argument %s. This argument, when given, must be a valid TubeCalibFitParams object"%FITPAR)
    else:
        # create a fit parameters guessing centre positions
        # the guessing obeys the following rule:
        #
        # centre_pixel = known_pos * ndets/tube_length + ndets / 2
        #
        # Get tube length and number of detectors
        tube_length = tubeSet.getTubeLength(0)
        #ndets = len(wsp_index_for_tube0)
        id1, ndets, step = tubeSet.getDetectorInfoFromTube(0)

        known_pos = idealTube.getArray()
        # position of the peaks in pixels
        centre_pixel = known_pos * ndets/tube_length + ndets * 0.5

        fitPar = TubeCalibFitParams(centre_pixel)
        # make it automatic, it means, that for every tube,
        # the parameters for fit will be re-evaluated, from the first
        # guess positions given by centre_pixel
        fitPar.setAutomatic(True)


    # check the MARGIN paramter (optional)
    if kwargs.has_key(MARGIN):
        try:
            margin = float(kwargs[MARGIN])
        except:
            raise RuntimeError("Wrong argument %s. It was expected a number!"%MARGIN)
        fitPar.setMargin(margin)

    #deal with RANGELIST parameter
    if kwargs.has_key(RANGELIST):
        rangeList = kwargs[RANGELIST]
        if isinstance(rangeList,int):
            rangeList = [rangeList]
        try:
            # this deals with list and tuples and iterables to make sure
            # rangeList becomes a list
            rangeList = list(rangeList)
        except:
            raise RuntimeError("Wrong argument %s. It expects a list of indexes for calibration"%RANGELIST)
    else:
        rangeList = range(tubeSet.getNumTubes())

    # check if the user passed the option calibTable
    if kwargs.has_key(CALIBTABLE):
        calibTable = kwargs[CALIBTABLE]
        #ensure the correct type is passed
        # if a string was passed, transform it in mantid object
        if isinstance(calibTable,str):
            calibTable = mtd[calibTable]
        #check that calibTable has the expected form
        try:
            if not isinstance(calibTable,ITableWorkspace):
                raise 1
            if calibTable.columnCount() != 2:
                raise 2
            colNames = calibTable.getColumnNames()
            if colNames[0] != 'Detector ID' or colNames[1] != 'Detector Position':
                raise 3
        except:
            raise RuntimeError("Invalid type for %s. The expected type was ITableWorkspace with 2 columns(Detector ID and Detector Positions)" % CALIBTABLE)
    else:
        calibTable = CreateEmptyTableWorkspace(OutputWorkspace="CalibTable")
        # "Detector ID" column required by ApplyCalibration
        calibTable.addColumn(type="int",name="Detector ID")
        # "Detector Position" column required by ApplyCalibration
        calibTable.addColumn(type="V3D",name="Detector Position")


    #deal with plotTube option
    if kwargs.has_key(PLOTTUBE):
        plotTube = kwargs[PLOTTUBE]
        if isinstance(plotTube, int):
            plotTube = [plotTube]
        try:
            plotTube = list(plotTube)
        except:
            raise RuntimeError("Wrong argument %s. It expects an index (int) or a list of indexes" %PLOTTUBE)
    else:
        plotTube = []

    #deal with minimun tubes sizes
    if kwargs.has_key(EXCLUDESHORT):
        excludeShortTubes = kwargs[EXCLUDESHORT]
        try:
            excludeShortTubes = float(excludeShortTubes)
        except:
            raise RuntimeError("Wrong argument %s. It expects a float value for the minimun size of tubes to be calibrated")
    else:
        #a tube with length 0 can not be calibrated, this is the minimun value
        excludeShortTubes = 0.0

    #deal with OVERRIDEPEAKS parameters
    if kwargs.has_key(OVERRIDEPEAKS):
        overridePeaks = kwargs[OVERRIDEPEAKS]
        try:
            nPeaks = len(idealTube.getArray())
            # check the format of override peaks
            if not isinstance(overridePeaks, dict):
                raise 1
            for key in overridePeaks.keys():
                if not isinstance(key,int):
                    raise 2
                if key < 0 or key >= tubeSet.getNumTubes():
                    raise 3
                if len(overridePeaks[key]) != nPeaks:
                    raise 4
        except:
            raise RuntimeError("Wrong argument %s. It expects a dictionary with key as the tube index and the value as a list of peaks positions. Ex (3 peaks): overridePeaks = {1:[2,5.4,500]}"%OVERRIDEPEAKS)
    else:
        overridePeaks = dict()


    # deal with FITPOLIN parameter
    if kwargs.has_key(FITPOLIN):
        polinFit = kwargs[FITPOLIN]
        if polinFit not in [1, 2,3]:
            raise RuntimeError("Wrong argument %s. It expects a number 1 for linear, 2 for quadratic, or 3 for 3rd polinomial order when fitting the pixels positions agains the known positions" % FITPOLIN)
    else:
        polinFit = 2

    # deal with OUTPUT PEAK
    deletePeakTableAfter = False
    if kwargs.has_key(OUTPUTPEAK):
        outputPeak = kwargs[OUTPUTPEAK]
    else:
        outputPeak = False
    if isinstance(outputPeak, ITableWorkspace):
        if outputPeak.columnCount() < len(idealTube.getArray()):
            raise RuntimeError("Wrong argument %s. It expects a boolean flag, or a ITableWorksapce with columns (TubeId, Peak1,...,PeakM) for M = number of peaks given in knownPositions" % OUTPUTPEAK)
    else:
        if not outputPeak:
            deletePeakTableAfter = True
        # create the output peak table
        outputPeak = CreateEmptyTableWorkspace(OutputWorkspace="PeakTable")
        outputPeak.addColumn(type='str',name='TubeId')
        for i in range(len(idealTube.getArray())):
            outputPeak.addColumn(type='float',name='Peak%d'%(i+1))

    getCalibration(ws, tubeSet, calibTable, fitPar, idealTube, outputPeak,\
        overridePeaks, excludeShortTubes, plotTube, rangeList, polinFit)

    if deletePeakTableAfter:
        DeleteWorkspace(str(outputPeak))
        return calibTable
    else:
        return calibTable, outputPeak
Пример #48
0
class CorrectTOFTest(unittest.TestCase):

    def setUp(self):
        # create sample workspace
        self.xmin = 2123.33867005 + 4005.75
        self.xmax = 2123.33867005 + 7995.75

        self._input_ws = CreateSampleWorkspace(Function="User Defined", UserDefinedFunction="name=LinearBackground, \
                                               A0=0.3;name=Gaussian, PeakCentre=8190, Height=5, Sigma=75", NumBanks=2,
                                               BankPixelWidth=1, XMin=self.xmin, XMax=self.xmax, BinWidth=10.5,
                                               BankDistanceFromSample=4.0, SourceDistanceFromSample=1.4, OutputWorkspace="ws")
        lognames = "wavelength,TOF1"
        logvalues = "6.0,2123.33867005"
        AddSampleLogMultiple(self._input_ws, lognames, logvalues)
        # create EPP table
        self._table = CreateEmptyTableWorkspace(OutputWorkspace="epptable")
        self._table.addColumn(type="double", name="PeakCentre")
        table_row = {'PeakCentre': 8189.5}
        for i in range(2):
            self._table.addRow(table_row)

    def tearDown(self):
        for wsname in ['ws', 'epptable']:
            if AnalysisDataService.doesExist(wsname):
                run_algorithm("DeleteWorkspace", Workspace=wsname)

    def testCorrection(self):
        # tests that correction is done properly
        OutputWorkspaceName = "outputws1"
        alg_test = run_algorithm("CorrectTOF", InputWorkspace=self._input_ws, EPPTable=self._table, OutputWorkspace=OutputWorkspaceName)
        self.assertTrue(alg_test.isExecuted())
        wsoutput = AnalysisDataService.retrieve(OutputWorkspaceName)
        velocity = h/(m_n*6.0e-10)
        t_el = 4.0e+6/velocity
        t_corr = np.arange(self.xmin, self.xmax + 1.0, 10.5) + t_el - (8189.5 - 2123.33867005)
        self.assertTrue(np.allclose(t_corr, wsoutput.readX(0)))            #sdd = 4
        self.assertTrue(np.allclose(t_corr + t_el, wsoutput.readX(1)))     #sdd = 8

        run_algorithm("DeleteWorkspace", Workspace=wsoutput)

    def testGroup(self):
        # tests whether the group of workspaces is accepted as an input
        ws2 = CloneWorkspace(self._input_ws)
        group = GroupWorkspaces([self._input_ws, ws2])
        OutputWorkspaceName = "output_wsgroup"
        alg_test = run_algorithm("CorrectTOF", InputWorkspace='group', EPPTable=self._table, OutputWorkspace=OutputWorkspaceName)
        self.assertTrue(alg_test.isExecuted())
        wsoutput = AnalysisDataService.retrieve(OutputWorkspaceName)
        self.assertTrue(isinstance(wsoutput, WorkspaceGroup))
        self.assertEqual(2, wsoutput.getNumberOfEntries())

        run_algorithm("DeleteWorkspace", Workspace=group)
        run_algorithm("DeleteWorkspace", Workspace=wsoutput)

    def testConvertUnits(self):
        # test whether CorrectTof+ConvertUnits+ConvertToDistribution will give the same result as TOFTOFConvertTOFToDeltaE
        OutputWorkspaceName = "outputws1"
        alg_test = run_algorithm("CorrectTOF", InputWorkspace=self._input_ws, EPPTable=self._table, OutputWorkspace=OutputWorkspaceName)
        self.assertTrue(alg_test.isExecuted())
        wscorr = AnalysisDataService.retrieve(OutputWorkspaceName)

        # convert units, convert to distribution
        alg_cu = run_algorithm("ConvertUnits", InputWorkspace=wscorr, Target='DeltaE', EMode='Direct', EFixed=2.27, OutputWorkspace=OutputWorkspaceName+'_dE')
        ws_dE = AnalysisDataService.retrieve(OutputWorkspaceName+'_dE')
        alg_cd = run_algorithm("ConvertToDistribution", Workspace=ws_dE)

        # create reference data for X axis
        tof1 = 2123.33867005
        dataX =  self._input_ws.readX(0) - tof1
        tel = 8189.5 - tof1
        factor = m_n*1e+15/eV
        newX = 0.5*factor*16.0*(1/tel**2 - 1/dataX**2)
        # compare
        # self.assertEqual(newX[0], ws_dE.readX(0)[0])
        self.assertTrue(np.allclose(newX, ws_dE.readX(0), atol=0.01))

        # create reference data for Y axis and compare to the output
        tof = dataX[:-1] + 5.25
        newY = self._input_ws.readY(0)*tof**3/(factor*10.5*16.0)
        # compare
        self.assertTrue(np.allclose(newY, ws_dE.readY(0), rtol=0.01))

        run_algorithm("DeleteWorkspace", Workspace=ws_dE)
        run_algorithm("DeleteWorkspace", Workspace=wscorr)