Esempio n. 1
0
    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)

        self._xgm_output_channel = ""
        self._xgm_ppt = "data.intensitySa3TD"
        self._digitizer_output_channel = ""
        self._digitizer_ppts = [
            f"digitizers.channel_1_{ch}.apd.pulseIntegral"
            for ch in _DIGITIZER_CHANNEL_NAMES
        ]
        self._mono_device_id = ""
        self._mono_ppt = "actualEnergy"

        self._digitizer_channels = [False] * 4

        self._n_pulses_per_train = _DEFAULT_N_PULSES_PER_TRAIN
        self._apd_stride = 1
        self._i0_threshold = _DEFAULT_I0_THRESHOLD
        self._window = _MAX_WINDOW
        self._correlation_window = _MAX_CORRELATION_WINDOW
        self._n_bins = _DEFAULT_N_BINS

        self._i0 = SimpleSequence(max_len=_MAX_WINDOW)
        self._i1 = [
            SimpleSequence(max_len=_MAX_WINDOW)
            for _ in _DIGITIZER_CHANNEL_NAMES
        ]
        self._energy = SimpleSequence(max_len=_MAX_WINDOW)

        self._energy_scan = SimplePairSequence(max_len=_MAX_WINDOW)
Esempio n. 2
0
    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)

        self._device_id1 = ""
        self._ppt1 = ""
        self._device_id2 = ""
        self._ppt2 = ""

        self._slow1 = SimpleSequence(max_len=self._MAX_POINTS)
        self._slow2 = SimpleSequence(max_len=self._MAX_POINTS)
        self._a13 = SimpleSequence(max_len=self._MAX_POINTS)
        self._a23 = SimpleSequence(max_len=self._MAX_POINTS)
        self._a21 = SimpleSequence(max_len=self._MAX_POINTS)

        self._edges1 = None
        self._counts1 = None
        self._a13_stats = None
        self._a23_stats = None
        self._a21_stats = None

        self._edges2 = None
        self._a21_heat = None
        self._a21_heat_count = None

        self._bin_range1 = self.str2range(_DEFAULT_BIN_RANGE)
        self._actual_range1 = None
        self._auto_range1 = [True, True]
        self._n_bins1 = _DEFAULT_N_BINS
        self._bin_range2 = self.str2range(_DEFAULT_BIN_RANGE)
        self._actual_range2 = None
        self._auto_range2 = [True, True]
        self._n_bins2 = _DEFAULT_N_BINS

        self._bin1d = True
        self._bin2d = True
Esempio n. 3
0
    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)

        self._vector1 = ''
        self._vector2 = ''

        self._vector1_full = SimpleSequence(max_len=6000)
        self._vector2_full = SimpleSequence(max_len=6000)
Esempio n. 4
0
    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)

        self._magnet_device_id = ""
        self._magnet_ppt = "value"  # amazing property name

        self._current_threshold = _DEFAULT_CURRENT_THRESHOLD

        self._current = SimpleSequence(max_len=_MAX_WINDOW)

        self._current_scan = SimplePairSequence(max_len=_MAX_WINDOW)
Esempio n. 5
0
class VectorViewProcessor(QThreadWorker):
    """Vector view processor."""
    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)

        self._vector1 = ''
        self._vector2 = ''

        self._vector1_full = SimpleSequence(max_len=6000)
        self._vector2_full = SimpleSequence(max_len=6000)

    def onVector1Change(self, value: str):
        self._vector1 = value

    def onVector2Change(self, value: str):
        self._vector2 = value

    @profiler("Vector view processor")
    def process(self, data):
        """Override."""
        processed = data["processed"]

        vec1, vec2 = self._fetch_data(processed)

        if vec1 is not None and vec2 is not None:
            if len(vec1) != len(vec2):
                raise ProcessingError(f"Vectors have different lengths: "
                                      f"{len(vec1)} and {len(vec2)}!")

            if len(self._vector1_full) != len(self._vector2_full):
                self.reset()
            self._vector1_full.extend(vec1)
            self._vector2_full.extend(vec2)

        self.log.info(f"Train {processed.tid} processed")

        return {
            "vector1": vec1,
            "vector2": vec2,
            "vector1_full": self._vector1_full.data(),
            "vector2_full": self._vector2_full.data(),
        }

    def _fetch_data(self, processed):
        ret = []
        for name in [self._vector1, self._vector2]:
            vec = None
            if name == 'ROI FOM':
                vec = processed.pulse.roi.fom
                if vec is None:
                    raise ProcessingError(
                        "Pulse-resolved ROI FOM is not available!")

            elif name == 'XGM intensity':
                vec = processed.pulse.xgm.intensity
                if vec is None:
                    raise ProcessingError("XGM intensity is not available!")

            elif name == 'Digitizer pulse integral':
                digit = processed.pulse.digitizer
                vec = digit[digit.ch_normalizer].pulse_integral
                if vec is None:
                    raise ProcessingError(
                        "Digitizer pulse integral is not available!")

            ret.append(vec)
        return ret

    def reset(self):
        """Override."""
        self._vector1_full.reset()
        self._vector2_full.reset()
Esempio n. 6
0
class TrXasProcessor(QThreadWorker, _BinMixin):
    """Time-resolved XAS processor.

    The implementation of tr-XAS processor is easier than bin processor
    since it cannot have empty device ID or property. Moreover, it does
    not include VFOM heatmap.

    Absorption ROI-i/ROI-j is defined as -log(sum(ROI-i)/sum(ROI-j)).

    Attributes:
        _device_id1 (str): device ID 1.
        _ppt1 (str): property of device 1.
        _device_id2 (str): device ID 2.
        _ppt2 (str): property of device 2.
        _slow1 (SimpleSequence): store train-resolved data of source 1.
        _slow2 (SimpleSequence): store train-resolved data of source 2.
        _a13 (SimpleSequence): store train-resolved absorption ROI1/ROI3.
        _a23 (SimpleSequence): store train-resolved absorption ROI2/ROI3.
        _a21 (SimpleSequence): store train-resolved absorption ROI2/ROI1.
        _edges1 (numpy.array): edges of bin 1. shape = (_n_bins1 + 1,)
        _counts1 (numpy.array): counts of bin 1. shape = (_n_bins1,)
        _a13_stats (numpy.array): 1D binning of absorption ROI1/ROI3 with
            respect to source 1.
        _a23_stats (numpy.array): 1D binning of absorption ROI2/ROI3 with
            respect to source 1.
        _a21_stats (numpy.array): 1D binning of absorption ROI2/ROI1 with
            respect to source 1.
        _edges2 (numpy.array): edges of bin 2. shape = (_n_bins2 + 1,)
        _a21_heat (numpy.array): 2D binning of absorption ROI2/ROI1.
            shape = (_n_bins2, _n_bins1)
        _a21_heat_count (numpy.array): counts of 2D binning of absorption
            ROI2/ROI1. shape = (_n_bins2, _n_bins1)
        _bin_range1 (tuple): bin 1 range requested.
        _actual_range1 (tuple): actual bin range used in bin 1.
        _n_bins1 (int): number of bins of bin 1.
        _bin_range2 (tuple): bin 2 range requested.
        _actual_range2 (tuple): actual bin range used in bin 2.
        _n_bins2 (int): number of bins of bin 2.
        _bin1d (bool): a flag indicates whether data need to be re-binned
            with respect to source 1.
        _bin2d (bool): a flag indicates whether data need to be re-binned
            with respect to both source 1 and source 2.
        _reset (bool): True for clearing all the existing data.
    """

    # 10 pulses/train * 60 seconds * 30 minutes = 18,000
    _MAX_POINTS = 100 * 60 * 60

    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)

        self._device_id1 = ""
        self._ppt1 = ""
        self._device_id2 = ""
        self._ppt2 = ""

        self._slow1 = SimpleSequence(max_len=self._MAX_POINTS)
        self._slow2 = SimpleSequence(max_len=self._MAX_POINTS)
        self._a13 = SimpleSequence(max_len=self._MAX_POINTS)
        self._a23 = SimpleSequence(max_len=self._MAX_POINTS)
        self._a21 = SimpleSequence(max_len=self._MAX_POINTS)

        self._edges1 = None
        self._counts1 = None
        self._a13_stats = None
        self._a23_stats = None
        self._a21_stats = None

        self._edges2 = None
        self._a21_heat = None
        self._a21_heat_count = None

        self._bin_range1 = self.str2range(_DEFAULT_BIN_RANGE)
        self._actual_range1 = None
        self._auto_range1 = [True, True]
        self._n_bins1 = _DEFAULT_N_BINS
        self._bin_range2 = self.str2range(_DEFAULT_BIN_RANGE)
        self._actual_range2 = None
        self._auto_range2 = [True, True]
        self._n_bins2 = _DEFAULT_N_BINS

        self._bin1d = True
        self._bin2d = True

    def onDeviceId1Changed(self, value: str):
        self._device_id1 = value

    def onProperty1Changed(self, value: str):
        self._ppt1 = value

    def onDeviceId2Changed(self, value: str):
        self._device_id2 = value

    def onProperty2Changed(self, value: str):
        self._ppt2 = value

    def onNBins1Changed(self, value: str):
        n_bins = int(value)
        if n_bins != self._n_bins1:
            self._n_bins1 = n_bins
            self._bin1d = True
            self._bin2d = True

    def onBinRange1Changed(self, value: tuple):
        if value != self._bin_range1:
            self._bin_range1 = value
            self._auto_range1[:] = [math.isinf(v) for v in value]
            self._bin1d = True
            self._bin2d = True

    def onNBins2Changed(self, value: str):
        n_bins = int(value)
        if n_bins != self._n_bins2:
            self._n_bins2 = n_bins
            self._bin2d = True

    def onBinRange2Changed(self, value: tuple):
        if value != self._bin_range2:
            self._bin_range2 = value
            self._auto_range2[:] = [math.isinf(v) for v in value]

    def sources(self):
        """Override."""
        return [
            (self._device_id1, self._ppt1, 0),
            (self._device_id2, self._ppt2, 0),
        ]

    @profiler("tr-XAS Processor")
    def process(self, data):
        """Override."""
        processed = data["processed"]

        roi1, roi2, roi3 = None, None, None
        a13, a23, a21, s1, s2 = None, None, None, None, None
        try:
            roi1, roi2, roi3, a13, a23, a21, s1, s2 = \
                self._update_data_point(processed, data['raw'])
        except ProcessingError as e:
            self.log.error(repr(e))

        actual_range1 = self.get_actual_range(
            self._slow1.data(), self._bin_range1, self._auto_range1)
        if actual_range1 != self._actual_range1:
            self._actual_range1 = actual_range1
            self._bin1d = True
            self._bin2d = True

        if self._bin1d:
            self._new_1d_binning()
            self._bin1d = False
        else:
            if a21 is not None:
                self._update_1d_binning(a13, a23, a21, s1)

        actual_range2 = self.get_actual_range(
            self._slow2.data(), self._bin_range2, self._auto_range2)
        if actual_range2 != self._actual_range2:
            self._actual_range2 = actual_range2
            self._bin2d = True

        if self._bin2d:
            self._new_2d_binning()
            self._bin2d = False
        else:
            if a21 is not None:
                self._update_2d_binning(a21, s1, s2)

        self.log.info(f"Train {processed.tid} processed")

        return {
            "roi1": roi1,
            "roi2": roi2,
            "roi3": roi3,
            "centers1": self.edges2centers(self._edges1)[0],
            "counts1": self._counts1,
            "centers2": self.edges2centers(self._edges2)[0],
            "a13_stats": self._a13_stats,
            "a23_stats": self._a23_stats,
            "a21_stats": self._a21_stats,
            "a21_heat": self._a21_heat,
            "a21_heat_count": self._a21_heat_count
        }

    def _update_data_point(self, processed, raw):
        roi = processed.roi
        masked = processed.image.masked_mean

        # get three ROIs
        roi1 = roi.geom1.rect(masked)
        if roi1 is None:
            raise ProcessingError("ROI1 is not available!")
        roi2 = roi.geom2.rect(masked)
        if roi2 is None:
            raise ProcessingError("ROI2 is not available!")
        roi3 = roi.geom3.rect(masked)
        if roi3 is None:
            raise ProcessingError("ROI3 is not available!")

        # get sums of the three ROIs
        sum1 = nansum(roi1)
        if sum1 <= 0:
            raise ProcessingError("ROI1 sum <= 0!")
        sum2 = nansum(roi2)
        if sum2 <= 0:
            raise ProcessingError("ROI2 sum <= 0!")
        sum3 = nansum(roi3)
        if sum3 <= 0:
            raise ProcessingError("ROI3 sum <= 0!")

        # calculate absorptions
        a13 = -np.log(sum1 / sum3)
        a23 = -np.log(sum2 / sum3)
        a21 = -np.log(sum2 / sum1)

        # update historic data
        self._a13.append(a13)
        self._a23.append(a23)
        self._a21.append(a21)

        # fetch slow data
        s1 = self.getPropertyData(raw, self._device_id1, self._ppt1)
        self._slow1.append(s1)
        s2 = self.getPropertyData(raw, self._device_id2, self._ppt2)
        self._slow2.append(s2)

        return roi1, roi2, roi3, a13, a23, a21, s1, s2

    def _new_1d_binning(self):
        self._a13_stats, _, _ = compute_spectrum_1d(
            self._slow1.data(),
            self._a13.data(),
            n_bins=self._n_bins1,
            bin_range=self._actual_range1,
            edge2center=False,
            nan_to_num=True
        )

        self._a23_stats, _, _ = compute_spectrum_1d(
            self._slow1.data(),
            self._a23.data(),
            n_bins=self._n_bins1,
            bin_range=self._actual_range1,
            edge2center=False,
            nan_to_num=True
        )

        self._a21_stats, edges, counts = compute_spectrum_1d(
            self._slow1.data(),
            self._a21.data(),
            n_bins=self._n_bins1,
            bin_range=self._actual_range1,
            edge2center=False,
            nan_to_num=True
        )
        self._edges1 = edges
        self._counts1 = counts

    def _update_1d_binning(self, a13, a23, a21, delay):
        iloc_x = self.searchsorted(self._edges1, delay)
        if 0 <= iloc_x < self._n_bins1:
            self._counts1[iloc_x] += 1
            count = self._counts1[iloc_x]
            self._a13_stats[iloc_x] += (a13 - self._a13_stats[iloc_x]) / count
            self._a23_stats[iloc_x] += (a23 - self._a23_stats[iloc_x]) / count
            self._a21_stats[iloc_x] += (a21 - self._a21_stats[iloc_x]) / count

    def _new_2d_binning(self):
        # to have energy on x axis and delay on y axis
        # Note: the return array from 'stats.binned_statistic_2d' has a swap x and y
        # axis compared to conventional image data
        self._a21_heat, _, self._edges2, _ = \
            stats.binned_statistic_2d(self._slow1.data(),
                                      self._slow2.data(),
                                      self._a21.data(),
                                      'mean',
                                      [self._n_bins1, self._n_bins2],
                                      [self._actual_range1, self._actual_range2])
        np.nan_to_num(self._a21_heat, copy=False)

        self._a21_heat_count, _, _, _ = \
            stats.binned_statistic_2d(self._slow1.data(),
                                      self._slow2.data(),
                                      self._a21.data(),
                                      'count',
                                      [self._n_bins1, self._n_bins2],
                                      [self._actual_range1, self._actual_range2])
        np.nan_to_num(self._a21_heat_count, copy=False)

    def _update_2d_binning(self, a21, energy, delay):
        iloc_x = self.searchsorted(self._edges2, energy)
        iloc_y = self.searchsorted(self._edges1, delay)
        if 0 <= iloc_x < self._n_bins2 \
                and 0 <= iloc_y < self._n_bins1:
            self._a21_heat_count[iloc_y, iloc_x] += 1
            self._a21_heat[iloc_y, iloc_x] += \
                (a21 - self._a21_heat[iloc_y, iloc_x]) / \
                self._a21_heat_count[iloc_y, iloc_x]

    def reset(self):
        """Override."""
        self._slow1.reset()
        self._slow2.reset()
        self._a13.reset()
        self._a23.reset()
        self._a21.reset()

        self._edges1 = None
        self._counts1 = None
        self._a13_stats = None
        self._a23_stats = None
        self._a21_stats = None
        self._edges2 = None
        self._a21_heat = None
        self._a21_heat_count = None

        self._bin1d = True
        self._bin2d = True
Esempio n. 7
0
class XasTimXmcdProcessor(XasTimProcessor):
    """XAS-TIM XMCD processor.

    Attributes:
        _magnet_device_id (str): Magnet device ID.
        _magnet_ppt (str): Magnet property name for current.
        _current_threshold (float): Lower boundary of the magnet current.
            Pulses will be dropped if the absolute current is below the
            threshold.
        _current (SimpleSequence): Store pulse magnet currents.
        _current_scan (SimplePairSequence): A sequence of (train ID, curent).
    """
    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)

        self._magnet_device_id = ""
        self._magnet_ppt = "value"  # amazing property name

        self._current_threshold = _DEFAULT_CURRENT_THRESHOLD

        self._current = SimpleSequence(max_len=_MAX_WINDOW)

        self._current_scan = SimplePairSequence(max_len=_MAX_WINDOW)

    def onMagnetDeviceChanged(self, device: str):
        self._magnet_device_id = device

    def onMagnetThresholdChanged(self, value: str):
        self._current_threshold = float(value)

    def sources(self):
        """Override."""
        srcs = super().sources()
        srcs.append((self._magnet_device_id, self._magnet_ppt, 0))
        return srcs

    @profiler("XAS-TIM-XMCD Processor")
    def process(self, data):
        """Override."""
        tid, xgm_intensity, digitizer_apds, energy = \
            self._update_data_history(data)

        current = self.getPropertyData(data['raw'], self._magnet_device_id,
                                       self._magnet_ppt)
        self._current.extend([current] * len(xgm_intensity))
        self._current_scan.append((tid, current))

        # apply filters
        flt = np.logical_and(
            self._i0.data() > self._i0_threshold,
            np.abs(self._current.data()) > self._current_threshold)
        i0 = self._i0.data()[flt][-self._window:]
        i1 = [None] * 4
        for i, _item in enumerate(self._i1):
            if self._digitizer_channels[i]:
                i1[i] = _item.data()[flt][-self._window:]
        energy = self._energy.data()[flt][-self._window:]
        current = self._current.data()[flt][-self._window:]

        # compute spectra
        p_flt = current > 0
        n_flt = current < 0
        e_p, e_n = energy[p_flt], energy[n_flt]
        stats = []
        for i, _item in enumerate(i1):
            if self._digitizer_channels[i]:
                mcp_stats_p, _, _ = compute_spectrum_1d(e_p,
                                                        _item[p_flt],
                                                        n_bins=self._n_bins)
                mcp_stats_n, _, _ = compute_spectrum_1d(e_n,
                                                        _item[n_flt],
                                                        n_bins=self._n_bins)
                stats.append([mcp_stats_p, mcp_stats_n])
            else:
                # Do not calculate spectrum which is not requested to display
                stats.append((None, None))

        i0_stats_p, _, _ = compute_spectrum_1d(e_p,
                                               i0[p_flt],
                                               n_bins=self._n_bins)
        i0_stats_n, _, _ = compute_spectrum_1d(e_n,
                                               i0[n_flt],
                                               n_bins=self._n_bins)
        i0_stats, centers, counts = compute_spectrum_1d(energy,
                                                        i0,
                                                        n_bins=self._n_bins)

        for i, (p, n) in enumerate(stats):
            if p is not None:
                if i < 3:
                    stats[i][0] = -np.log(-p / i0_stats_p)
                    stats[i][1] = -np.log(-n / i0_stats_n)
                else:
                    # MCP4 has a different spectrum
                    stats[i][0] = -p / i0_stats_p
                    stats[i][1] = -n / i0_stats_n

        stats.append(i0_stats)

        self.log.info(f"Train {tid} processed")

        return {
            "xgm_intensity": xgm_intensity,
            "digitizer_apds": digitizer_apds,
            "energy_scan": self._energy_scan.data(),
            "current_scan": self._current_scan.data(),
            "correlation_length": self._correlation_window,
            "i0": i0,
            "i1": i1,
            "spectra": (stats, centers, counts),
        }

    def reset(self):
        """Override."""
        super().reset()
        self._current.reset()
        self._current_scan.reset()
    def testSimpleSequence(self):
        MAX_LENGTH = 100

        hist = SimpleSequence(max_len=MAX_LENGTH)
        self.assertEqual(0, len(hist))

        hist.append(3)
        hist.append(4)
        hist.extend([1, 2])
        ax = hist.data()
        np.testing.assert_array_almost_equal([3, 4, 1, 2], ax)

        # test Sequence protocol
        self.assertEqual(3, hist[0])
        self.assertEqual(2, hist[-1])
        with self.assertRaises(IndexError):
            hist[4]
        self.assertEqual(4, len(hist))

        # more test on extend
        hist.extend([3] * (MAX_LENGTH - 2))
        np.testing.assert_array_almost_equal([1, 2] + [3] * (MAX_LENGTH - 2),
                                             hist.data())
        self.assertEqual(100, len(hist))

        # test reset
        hist.reset()
        np.testing.assert_array_almost_equal([], hist.data())

        # ----------------------------
        # test when max length reached
        # ----------------------------

        overflow = 10
        for i in range(MAX_LENGTH + overflow):
            hist.append(i)
        ax = hist.data()
        self.assertEqual(MAX_LENGTH, len(ax))
        self.assertEqual(overflow, ax[0])
        self.assertEqual(MAX_LENGTH + overflow - 1, ax[-1])

        # ----------------------------
        # test when capacity reached
        # ----------------------------
        for i in range(MAX_LENGTH):
            hist.append(i)
        ax = hist.data()
        self.assertEqual(MAX_LENGTH, len(ax))
        self.assertEqual(0, ax[0])
        self.assertEqual(MAX_LENGTH - 1, ax[-1])

        # ----------------------------
        # test constructing from array
        # ----------------------------
        hist = SimpleSequence.from_array([1, 2, 3])
        self.assertEqual(3, len(hist))
Esempio n. 9
0
class XasTimProcessor(QThreadWorker):
    """XAS-TIM processor.

    Attributes:
        _xgm_output_channel (str): XGM output channel name.
        _xgm_ppt (str): XGM property name for pulse-resolved intensity.
        _digitizer_output_channel (str): Digitizer output channel name.
        _digitizer_ppts (list): A list of property names for different
            digitizer channels.
        _mono_device_id (str): Soft mono device ID.
        _mono_ppt (str): Soft mono property name for energy.
        _digitizer_channels (list): A list of boolean to indicates the
            required digitizer channel.
        _n_pulses_per_train (int): Number of pulses per train.
        _apd_stride (int): Pulse index stride of the digitizer APD data.
        _i0_threshold (float): Lower boundary of the XGM intensity. Pulses
            will be dropped if the intensity is below the threshold.
        _window (int): Maximum number of pulses used to calculating spectra.
        _correlation_window (int): Maximum number of pulses in correlation
            plots. It includes the pulses which are dropped by the filter.
        _n_bins (int): Number of bins in spectra calculation.
        _i0 (SimpleSequence): Store XGM pulse intensities.
        _i1 (list): A list of SimpleSequence, which stores pulsed apd data
            for each digitizer channel.
        _energy (SimpleSequence): Store pulse energies.
        _energy_scan (SimplePairSequence): A sequence of (train ID, energy).
    """
    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)

        self._xgm_output_channel = ""
        self._xgm_ppt = "data.intensitySa3TD"
        self._digitizer_output_channel = ""
        self._digitizer_ppts = [
            f"digitizers.channel_1_{ch}.apd.pulseIntegral"
            for ch in _DIGITIZER_CHANNEL_NAMES
        ]
        self._mono_device_id = ""
        self._mono_ppt = "actualEnergy"

        self._digitizer_channels = [False] * 4

        self._n_pulses_per_train = _DEFAULT_N_PULSES_PER_TRAIN
        self._apd_stride = 1
        self._i0_threshold = _DEFAULT_I0_THRESHOLD
        self._window = _MAX_WINDOW
        self._correlation_window = _MAX_CORRELATION_WINDOW
        self._n_bins = _DEFAULT_N_BINS

        self._i0 = SimpleSequence(max_len=_MAX_WINDOW)
        self._i1 = [
            SimpleSequence(max_len=_MAX_WINDOW)
            for _ in _DIGITIZER_CHANNEL_NAMES
        ]
        self._energy = SimpleSequence(max_len=_MAX_WINDOW)

        self._energy_scan = SimplePairSequence(max_len=_MAX_WINDOW)

    def onXgmOutputChannelChanged(self, ch: str):
        self._xgm_output_channel = ch

    def onDigitizerOutputChannelChanged(self, ch: str):
        self._digitizer_output_channel = ch

    def onDigitizerChannelsChanged(self, index: int, value: bool):
        self._digitizer_channels[index] = value
        if value:
            # reset the data history when a new channel is added in order to
            # ensure the same length of data history
            self.reset()

    def onMonoDeviceChanged(self, device: str):
        self._mono_device_id = device

    def onNPulsesPerTrainChanged(self, value: str):
        self._n_pulses_per_train = int(value)

    def onApdStrideChanged(self, value: str):
        self._apd_stride = int(value)

    def onI0ThresholdChanged(self, value: str):
        self._i0_threshold = float(value)

    def onPulseWindowChanged(self, value: str):
        self._window = int(value)

    def onCorrelationWindowChanged(self, value: str):
        self._correlation_window = int(value)

    def onNoBinsChanged(self, value: str):
        self._n_bins = int(value)

    def sources(self):
        """Override."""
        return [(self._xgm_output_channel, self._xgm_ppt, 1),
                *[(self._digitizer_output_channel, ppt, 1)
                  for ppt in self._digitizer_ppts],
                (self._mono_device_id, self._mono_ppt, 0)]

    def _update_data_history(self, data):
        data, meta = data["raw"], data["meta"]

        tid = self.getTrainId(meta)

        xgm_intensity = self.getPropertyData(data, self._xgm_output_channel,
                                             self._xgm_ppt)

        digitizer_apds = []
        if sum(self._digitizer_channels) == 0:
            raise ProcessingError(
                "At least one digitizer channel is required!")
        for i, ppt in enumerate(self._digitizer_ppts):
            if self._digitizer_channels[i]:
                apd = self.getPropertyData(data,
                                           self._digitizer_output_channel, ppt)
                if apd is None:
                    raise ProcessingError(
                        f"Digitizer channel {ppt} not found!")
                digitizer_apds.append(apd)
            else:
                digitizer_apds.append(None)

        energy = self.getPropertyData(data, self._mono_device_id,
                                      self._mono_ppt)

        # Check and slice XGM intensity.
        pulse_slicer = slice(0, self._n_pulses_per_train)
        if len(xgm_intensity) < self._n_pulses_per_train:
            raise ProcessingError(f"Length of {self._xgm_ppt} is less "
                                  f"than {self._n_pulses_per_train}: "
                                  f"actual {len(xgm_intensity)}")
        xgm_intensity = xgm_intensity[pulse_slicer]

        # Check and slice digitizer APD data.
        for i, (apd,
                ppt) in enumerate(zip(digitizer_apds, self._digitizer_ppts)):
            if self._digitizer_channels[i]:
                v = apd[::self._apd_stride]
                if len(v) < self._n_pulses_per_train:
                    raise ProcessingError(
                        f"Length of {ppt} (sliced) is less than "
                        f"{self._n_pulses_per_train}: actual {len(v)}")
                digitizer_apds[i] = v[:self._n_pulses_per_train]

        # update data history
        self._i0.extend(xgm_intensity)
        for i, apd in enumerate(digitizer_apds):
            if self._digitizer_channels[i]:
                self._i1[i].extend(apd)
        self._energy.extend([energy] * len(xgm_intensity))

        self._energy_scan.append((tid, energy))

        return tid, xgm_intensity, digitizer_apds, energy

    @profiler("XAS-TIM Processor")
    def process(self, data):
        """Override."""
        tid, xgm_intensity, digitizer_apds, energy = \
            self._update_data_history(data)

        # apply filter
        flt = self._i0.data() > self._i0_threshold
        i0 = self._i0.data()[flt][-self._window:]
        i1 = [None] * 4
        for i, _item in enumerate(self._i1):
            if self._digitizer_channels[i]:
                i1[i] = _item.data()[flt][-self._window:]
        energy = self._energy.data()[flt][-self._window:]

        # compute spectra
        stats = []
        for i, item in enumerate(i1):
            if self._digitizer_channels[i]:
                mcp_stats, _, _ = compute_spectrum_1d(energy,
                                                      item,
                                                      n_bins=self._n_bins)
                stats.append(mcp_stats)
            else:
                # Do not calculate spectrum which is not requested to display
                stats.append(None)

        i0_stats, centers, counts = compute_spectrum_1d(energy,
                                                        i0,
                                                        n_bins=self._n_bins)
        for i, _item in enumerate(stats):
            if _item is not None:
                if i < 3:
                    stats[i] = -np.log(-_item / i0_stats)
                else:
                    # MCP4 has a different spectrum
                    stats[i] = -_item / i0_stats
        stats.append(i0_stats)

        self.log.info(f"Train {tid} processed")

        return {
            "xgm_intensity": xgm_intensity,
            "digitizer_apds": digitizer_apds,
            "energy_scan": self._energy_scan.data(),
            "correlation_length": self._correlation_window,
            "i0": i0,
            "i1": i1,
            "spectra": (stats, centers, counts),
        }

    def reset(self):
        """Override."""
        self._i0.reset()
        for item in self._i1:
            item.reset()
        self._energy.reset()
        self._energy_scan.reset()