Beispiel #1
0
def _stitch_event_data(
        item: sc.DataArray, frames: sc.Dataset, dim: str, new_dim: str,
        bins: Union[int, sc.Variable]) -> Union[sc.DataArray, dict]:

    edges = sc.flatten(sc.transpose(sc.concat(
        [frames["time_min"].data, frames["time_max"].data], 'dummy'),
                                    dims=['frame', 'dummy']),
                       to=dim)

    binned = sc.bin(item, edges=[edges])

    for i in range(frames.sizes["frame"]):
        # TODO: temporary fix working on the .value because read-only flag is set
        binned[dim, i *
               2].value.coords[dim] -= frames["time_correction"].data["frame",
                                                                      i]

    erase = None
    if new_dim != dim:
        binned.bins.coords[new_dim] = binned.bins.coords[dim]
        del binned.bins.coords[dim]
        erase = [dim]

    binned.masks['frame_gaps'] = (
        sc.arange(dim, 2 * frames.sizes["frame"] - 1) % 2).astype(bool)

    new_edges = sc.concat([(frames["time_min"]["frame", 0] -
                            frames["time_correction"]["frame", 0]).data,
                           (frames["time_max"]["frame", -1] -
                            frames["time_correction"]["frame", -1]).data],
                          new_dim)
    return sc.bin(binned, edges=[new_edges], erase=erase)
Beispiel #2
0
    def _bin_events(data: DetectorData):
        if not bin_by_pixel:
            # If loading "raw" data, leave binned by pulse.
            return data.event_data
        if data.detector_ids is None:
            # If detector ids were not found in an associated detector group
            # we will just have to bin according to whatever
            # ids we have a events for (pixels with no recorded events
            # will not have a bin)
            event_id = data.event_data.bins.constituents['data'].coords[
                _detector_dimension]
            data.detector_ids = sc.array(dims=[_detector_dimension],
                                         values=np.unique(event_id.values))

        # Events in the NeXus file are effectively binned by pulse
        # (because they are recorded chronologically)
        # but for reduction it is more useful to bin by detector id
        # Broadcast pulse times to events
        data.event_data.bins.coords['pulse_time'] = sc.bins_like(
            data.event_data, fill_value=data.event_data.coords['pulse_time'])
        # TODO Look into using `erase=[_pulse_dimension]` instead of binning
        # underlying buffer. Must prove that performance can be unaffected.
        da = sc.bin(data.event_data.bins.constituents['data'],
                    groups=[data.detector_ids])
        # Add a single time-of-flight bin
        da = sc.DataArray(data=sc.broadcast(da.data,
                                            dims=da.dims + [_time_of_flight],
                                            shape=da.shape + [1]),
                          coords={_detector_dimension: data.detector_ids})
        if pixel_positions_loaded:
            # TODO: the name 'position' should probably not be hard-coded but moved
            # to a variable that cah be changed in a single place.
            da.coords['position'] = data.pixel_positions
        return da
Beispiel #3
0
 def _getitem(self, select: ScippIndex) -> sc.DataArray:
     # Note that ._detector_data._load_detector provides a different loading
     # facility for NXdetector but handles only loading of detector_number,
     # as needed for event data loading
     if self._is_events:
         # If there is a 'detector_number' field it is used to bin events into
         # detector pixels. Note that due to the nature of NXevent_data, which stores
         # events from all pixels and random order, we always have to load the entire
         # bank. Slicing with the provided 'select' is done while binning.
         event_data = self._nxbase[...]
         if self.detector_number is None:
             # Ideally we would prefer to use np.unique, but a quick experiment shows
             # that this can easily be 100x slower, so it is not an option. In
             # practice most files have contiguous event_id values within a bank
             # (NXevent_data).
             id_min = event_data.bins.coords['event_id'].min()
             id_max = event_data.bins.coords['event_id'].max()
             detector_numbers = sc.arange(dim='detector_number',
                                          start=id_min.value,
                                          stop=id_max.value + 1,
                                          dtype=id_min.dtype)
         else:
             detector_numbers = self.detector_number[select]
         event_data.bins.coords[
             'detector_number'] = event_data.bins.coords.pop('event_id')
         # After loading raw NXevent_data it is guaranteed that the event table
         # is contiguous and that there is no masking. We can therefore use the
         # more efficient approach of binning from scratch instead of erasing the
         # 'pulse' binning defined by NXevent_data.
         return sc.bin(event_data.bins.constituents['data'],
                       groups=[detector_numbers])
     return self._nxbase[select]
Beispiel #4
0
def test_bins_arithmetic():
    var = sc.Variable(dims=['event'], values=[1.0, 2.0, 3.0, 4.0])
    table = sc.DataArray(var, {'x': var})
    binned = sc.bin(table, [sc.Variable(dims=['x'], values=[1.0, 5.0])])
    hist = sc.DataArray(
        data=sc.Variable(dims=['x'], values=[1.0, 2.0]),
        coords={'x': sc.Variable(dims=['x'], values=[1.0, 3.0, 5.0])})
    binned.bins *= sc.lookup(func=hist, dim='x')
    assert sc.is_equal(
        binned.bins.data.data,
        sc.Variable(dims=['event'], values=[1.0, 2.0, 6.0, 8.0]))
Beispiel #5
0
Datei: plot.py Projekt: scipp/ess
def _bin_event_data_for_ploting(data, frame, bins_per_frame):
    """
    Bin event data using `bins_per_frame` to make a meaningful plot.
    """
    return sc.bin(data,
                  edges=[
                      sc.linspace(dim='tof',
                                  start=frame["time_min"].value,
                                  stop=frame["time_max"].value,
                                  num=bins_per_frame,
                                  unit=frame['time_min'].unit)
                  ]).bins.sum()
Beispiel #6
0
def _tof_correction(data: sc.DataArray, dim: str = 'tof') -> sc.DataArray:
    """
    A correction for the presense of the chopper with respect to the "true" ToF.
    Also fold the two pulses.
    TODO: generalise mechanism to fold any number of pulses.
    """
    tau = sc.to_unit(
        1 / (2 * data.coords['source_chopper'].value['frequency'].data),
        data.coords[dim].unit)
    chopper_phase = data.coords['source_chopper'].value['phase'].data
    tof_offset = tau * chopper_phase / (180.0 * sc.units.deg)
    # Make 2 bins, one for each pulse
    edges = sc.concat([-tof_offset, tau - tof_offset, 2 * tau - tof_offset],
                      dim)
    data = sc.bin(data, edges=[sc.to_unit(edges, data.coords[dim].unit)])
    # Make one offset for each bin
    offset = sc.concat([tof_offset, tof_offset - tau], dim)
    # Apply the offset on both bins
    data.bins.coords[dim] += offset
    # Rebin to exclude second (empty) pulse range
    return sc.bin(data, edges=[sc.concat([0. * sc.units.us, tau], dim)])
Beispiel #7
0
def _check_lambda_inside_resolution(lam,
                                    dlam_over_lam,
                                    data,
                                    event_mode=False,
                                    check_value=True):
    dlam = 0.5 * dlam_over_lam * lam
    if event_mode:
        sum_in_range = sc.bin(data,
                              edges=[
                                  sc.array(dims=['wavelength'],
                                           values=[(lam - dlam).value,
                                                   (lam + dlam).value],
                                           unit=lam.unit)
                              ]).bins.sum().data['wavelength', 0]
    else:
        sum_in_range = sc.sum(data['wavelength', lam - dlam:lam + dlam]).data
    assert sc.isclose(sum_in_range, 1.0 * sc.units.counts).value is check_value
Beispiel #8
0
def make_binned_data_array(ndim=1, variances=False, masks=False):

    dim_list = ['tof', 'x', 'y', 'z', 'Q_x']

    N = 50
    M = 10

    values = 10.0 * np.random.random(N)

    da = sc.DataArray(data=sc.Variable(dims=['position'],
                                       unit=sc.units.counts,
                                       values=values),
                      coords={
                          'position':
                          sc.Variable(
                              dims=['position'],
                              values=['site-{}'.format(i) for i in range(N)])
                      })

    if variances:
        da.variances = values

    bin_list = []
    for i in range(ndim):
        da.coords[dim_list[i]] = sc.Variable(dims=['position'],
                                             unit=sc.units.m,
                                             values=np.random.random(N))
        bin_list.append(
            sc.Variable(dims=[dim_list[i]],
                        unit=sc.units.m,
                        values=np.linspace(0.1, 0.9, M - i)))

    binned = sc.bin(da, bin_list)

    if masks:
        # Make a checkerboard mask, see https://stackoverflow.com/a/51715491
        binned.masks["mask"] = sc.Variable(
            dims=binned.dims,
            values=(np.indices(binned.shape).sum(axis=0) % 2).astype(np.bool))

    return binned
Beispiel #9
0
def _do_stitching_on_beamline(wavelengths, dim, event_mode=False):
    # Make beamline parameters for 6 frames
    coords = wfm.make_fake_beamline(nframes=6)

    # They are all created half-way through the pulse.
    # Compute their arrival time at the detector.
    alpha = sc.to_unit(constants.m_n / constants.h, 's/m/angstrom')
    dz = sc.norm(coords['position'] - coords['source_position'])
    arrival_times = sc.to_unit(alpha * dz * wavelengths,
                               'us') + coords['source_pulse_t_0'] + (
                                   0.5 * coords['source_pulse_length'])
    coords[dim] = arrival_times

    # Make a data array that contains the beamline and the time coordinate
    tmin = sc.min(arrival_times)
    tmax = sc.max(arrival_times)
    dt = 0.1 * (tmax - tmin)

    if event_mode:
        num = 2
    else:
        num = 2001
    time_binning = sc.linspace(dim=dim,
                               start=(tmin - dt).value,
                               stop=(tmax + dt).value,
                               num=num,
                               unit=dt.unit)
    events = sc.DataArray(data=sc.ones(dims=['event'],
                                       shape=arrival_times.shape,
                                       unit=sc.units.counts,
                                       with_variances=True),
                          coords=coords)
    if event_mode:
        da = sc.bin(events, edges=[time_binning])
    else:
        da = sc.histogram(events, bins=time_binning)

    # Find location of frames
    frames = wfm.get_frames(da)

    stitched = wfm.stitch(frames=frames, data=da, dim=dim, bins=2001)

    wav = scn.convert(stitched,
                      origin='tof',
                      target='wavelength',
                      scatter=False)
    if event_mode:
        out = wav
    else:
        out = sc.rebin(wav,
                       dim='wavelength',
                       bins=sc.linspace(dim='wavelength',
                                        start=1.0,
                                        stop=10.0,
                                        num=1001,
                                        unit='angstrom'))

    choppers = {key: da.meta[key].value for key in ch.find_chopper_keys(da)}
    # Distance between WFM choppers
    dz_wfm = sc.norm(choppers["chopper_wfm_2"]["position"].data -
                     choppers["chopper_wfm_1"]["position"].data)
    # Delta_lambda  / lambda
    dlambda_over_lambda = dz_wfm / sc.norm(
        coords['position'] - frames['wfm_chopper_mid_point'].data)

    return out, dlambda_over_lambda