示例#1
0
def load(filename,
         beamline: dict = make_beamline(),
         disable_warnings: bool = True) -> sc.DataArray:
    """
    Loader for a single Amor data file.

    :param beamline: A dict defining the beamline parameters.
    :param disable_warnings: Do not show warnings from file loading if `True`.
        Default is `True`.
    """
    if disable_warnings:
        with warnings.catch_warnings():
            warnings.filterwarnings("ignore", category=UserWarning)
            data = scn.load_nexus(filename)
    else:
        data = scn.load_nexus(filename)

    # Convert tof nanoseconds to microseconds for convenience
    # TODO: is it safe to assume that the dtype of the binned wrapper coordinate is
    # the same as the dtype of the underlying event coordinate?
    data.bins.coords['tof'] = data.bins.coords['tof'].astype('float64',
                                                             copy=False)
    data.coords['tof'] = data.coords['tof'].astype('float64', copy=False)
    data.bins.coords['tof'] = sc.to_unit(data.bins.coords['tof'],
                                         'us',
                                         copy=False)
    data.coords['tof'] = sc.to_unit(data.coords['tof'], 'us', copy=False)

    # Add beamline parameters
    for key, value in beamline.items():
        data.coords[key] = value

    # Perform tof correction and fold two pulses
    return _tof_correction(data)
示例#2
0
def theta(gravity: sc.Variable, wavelength: sc.Variable, incident_beam: sc.Variable,
          scattered_beam: sc.Variable, sample_rotation: sc.Variable) -> sc.Variable:
    """
    Compute the theta angle, including gravity correction,
    This is similar to the theta calculation in SANS (see
    https://docs.mantidproject.org/nightly/algorithms/Q1D-v2.html#q-unit-conversion),
    but we ignore the horizontal `x` component.
    See the schematic in Fig 5 of doi: 10.1016/j.nima.2016.03.007.
    """
    grav = sc.norm(gravity)
    L2 = sc.norm(scattered_beam)
    y = sc.dot(scattered_beam, gravity) / grav
    y_correction = sc.to_unit(wavelength, _elem_unit(L2), copy=True)
    y_correction *= y_correction
    drop = L2**2
    drop *= grav * (m_n**2 / (2 * h**2))
    # Optimization when handling either the dense or the event coord of binned data:
    # - For the event coord, both operands have same dims, and we can multiply in place
    # - For the dense coord, we need to broadcast using non in-place operation
    if set(drop.dims).issubset(set(y_correction.dims)):
        y_correction *= drop
    else:
        y_correction = y_correction * drop
    y_correction += y
    out = sc.abs(y_correction, out=y_correction)
    out /= L2
    out = sc.asin(out, out=out)
    out -= sc.to_unit(sample_rotation, 'rad')
    return out
示例#3
0
def _plot_components(scipp_obj, components, positions_var, scene):
    det_center = sc.mean(positions_var)
    # Some scaling to set width according to distance from detector center
    shapes = _instrument_view_shape_types()
    for name, settings in components.items():
        type = settings["type"]
        size = _as_vector(settings["size"])
        component_position = settings["center"]
        color = settings.get("color", "#808080")
        wireframe = settings.get("wireframe", False)
        if type not in shapes:
            supported_shapes = ", ".join(shapes.keys())
            raise ValueError(f"Unknown shape: {type} requested for {name}. "
                             f"Allowed values are: {supported_shapes}")
        component_position = sc.to_unit(component_position, positions_var.unit)
        size = sc.to_unit(size, positions_var.unit)
        _add_to_scene(position=component_position,
                      scene=scene,
                      shape=shapes[type],
                      display_text=name,
                      bounding_box=tuple(size.values),
                      color=color,
                      wireframe=wireframe,
                      det_center=det_center)
    # Reset camera
    camera = _get_camera(scene)
    if camera:
        furthest_distance = _furthest_component(det_center, scipp_obj,
                                                components)
        camera.far = max(camera.far, furthest_distance * 5.0)
示例#4
0
文件: utils.py 项目: scipp/ess
def time_open(chopper: sc.Dataset, unit: str = "us") -> sc.Variable:
    """
    Get the times when a chopper window is open.

    :param chopper: The Dataset containing the chopper parameters.
    :param unit: Convert to this unit before returning. Default is `'rad'`.
    """
    return sc.to_unit(
        (cutout_angles_begin(chopper) + sc.to_unit(chopper["phase"].data, "rad")) /
        angular_frequency(chopper),
        unit,
        copy=False)
示例#5
0
def _load_event_time_zero(group: Group,
                          nexus: LoadFromNexus,
                          index=...) -> sc.Variable:
    time_zero_group = "event_time_zero"

    event_time_zero = nexus.load_dataset(group,
                                         time_zero_group,
                                         dimensions=[_pulse_dimension],
                                         index=index)

    try:
        pulse_times = sc.to_unit(event_time_zero, sc.units.ns, copy=False)
    except sc.UnitError:
        raise BadSource(f"Could not load pulse times: units attribute "
                        f"'{event_time_zero.unit}' in NXEvent at "
                        f"{group.name}/{time_zero_group} is not convertible"
                        f" to nanoseconds.")

    try:
        time_offset = nexus.get_string_attribute(
            nexus.get_dataset_from_group(group, time_zero_group), "offset")
    except MissingAttribute:
        time_offset = "1970-01-01T00:00:00Z"

    # Need to convert the values which were loaded as float64 into int64 to be able
    # to do datetime arithmetic. This needs to be done after conversion to ns to
    # avoid unnecessary loss of accuracy.
    pulse_times = pulse_times.astype(sc.DType.int64, copy=False)
    return pulse_times + sc.scalar(np.datetime64(time_offset),
                                   unit=sc.units.ns,
                                   dtype=sc.DType.datetime64)
示例#6
0
def test_convert_tof_to_energy_transfer_indirect():
    tof = make_test_data(coords=('tof', 'L1', 'L2'), dataset=True)
    with pytest.raises(RuntimeError):
        scn.convert(tof, origin='tof', target='energy_transfer', scatter=True)
    ef = 25.0 * sc.units.meV
    tof.coords['final_energy'] = ef
    indirect = scn.convert(tof,
                           origin='tof',
                           target='energy_transfer',
                           scatter=True)
    check_tof_conversion_metadata(indirect, 'energy_transfer', sc.units.meV)

    t = tof.coords['tof']
    t0 = sc.to_unit(tof.coords['L2'] * sc.sqrt(m_n / 2 / ef), t.unit)
    assert sc.all(t0 < t).value  # only test physical region here
    ref = sc.to_unit(m_n / 2 * (tof.coords['L1'] / (t - t0))**2,
                     ef.unit).rename_dims({'tof': 'energy_transfer'}) - ef
    assert sc.allclose(indirect.coords['energy_transfer'],
                       ref,
                       rtol=sc.scalar(1e-13))
示例#7
0
def _tof_correction(data: sc.DataArray, dim: str = 'tof') -> sc.DataArray:
    """
    A correction for the presense of the chopper with respect to the "true" ToF.
    Also fold the two pulses.
    TODO: generalise mechanism to fold any number of pulses.
    """
    tau = sc.to_unit(
        1 / (2 * data.coords['source_chopper'].value['frequency'].data),
        data.coords[dim].unit)
    chopper_phase = data.coords['source_chopper'].value['phase'].data
    tof_offset = tau * chopper_phase / (180.0 * sc.units.deg)
    # Make 2 bins, one for each pulse
    edges = sc.concat([-tof_offset, tau - tof_offset, 2 * tau - tof_offset],
                      dim)
    data = sc.bin(data, edges=[sc.to_unit(edges, data.coords[dim].unit)])
    # Make one offset for each bin
    offset = sc.concat([tof_offset, tof_offset - tau], dim)
    # Apply the offset on both bins
    data.bins.coords[dim] += offset
    # Rebin to exclude second (empty) pulse range
    return sc.bin(data, edges=[sc.concat([0. * sc.units.us, tau], dim)])
示例#8
0
def test_convert_tof_to_energy_elastic():
    tof = make_test_data(coords=('tof', 'Ltotal'), dataset=True)
    energy = scn.convert(tof, origin='tof', target='energy', scatter=True)
    check_tof_conversion_metadata(energy, 'energy', sc.units.meV)

    tof_in_seconds = sc.to_unit(tof.coords['tof'], 's')
    # e [J] = 1/2 m(n) [kg] (l [m] / tof [s])^2
    joule_to_mev = sc.to_unit(1.0 * sc.Unit('J'), sc.units.meV).value
    neutron_mass = sc.to_unit(m_n, sc.units.kg).value

    # Spectrum 0 is 11 m from source
    for val, t in zip(energy.coords['energy']['spectrum', 0].values,
                      tof_in_seconds.values):
        np.testing.assert_almost_equal(
            val, joule_to_mev * neutron_mass / 2 * (11 / t)**2, val * 1e-3)
    # Spectrum 1
    L = 10.0 + math.sqrt(1.0 * 1.0 + 0.1 * 0.1)
    for val, t in zip(energy.coords['energy']['spectrum', 1].values,
                      tof_in_seconds.values):
        np.testing.assert_almost_equal(
            val, joule_to_mev * 0.5 * neutron_mass * (L / t)**2, val * 1e-3)
示例#9
0
def _convert_time_to_datetime64(
        raw_times: sc.Variable,
        group_path: str,
        start: str = None,
        scaling_factor: Union[float, np.float_] = None) -> sc.Variable:
    """
    The nexus standard allows an arbitrary scaling factor to be inserted
    between the numbers in the `time` series and the unit of time reported
    in the nexus attribute.

    The times are also relative to a given log start time, which might be
    different for each log. If this log start time is not available, the start of the
    unix epoch (1970-01-01T00:00:00Z) is used instead.

    See https://manual.nexusformat.org/classes/base_classes/NXlog.html

    Args:
        raw_times: The raw time data from a nexus file.
        group_path: The path within the nexus file to the log being read.
            Used to generate warnings if loading the log fails.
        start: Optional, the start time of the log in an ISO8601
            string. If not provided, defaults to the beginning of the
            unix epoch (1970-01-01T00:00:00Z).
        scaling_factor: Optional, the scaling factor between the provided
            time series data and the unit of the raw_times Variable. If
            not provided, defaults to 1 (a no-op scaling factor).
    """
    try:
        raw_times_ns = sc.to_unit(raw_times, sc.units.ns, copy=False)
    except sc.UnitError:
        raise BadSource(
            f"The units of time in the entry at "
            f"'{group_path}/time{{units}}' must be convertible to seconds, "
            f"but this cannot be done for '{raw_times.unit}'. Skipping "
            f"loading group at '{group_path}'.")

    try:
        _start_ts = sc.scalar(value=np.datetime64(start
                                                  or "1970-01-01T00:00:00Z"),
                              unit=sc.units.ns,
                              dtype=sc.DType.datetime64)
    except ValueError:
        raise BadSource(
            f"The date string '{start}' in the entry at "
            f"'{group_path}/time@start' failed to parse as an ISO8601 date. "
            f"Skipping loading group at '{group_path}'")

    _scale = sc.scalar(
        value=scaling_factor if scaling_factor is not None else 1,
        unit=sc.units.dimensionless)

    return _start_ts + (raw_times_ns * _scale).astype(sc.DType.int64,
                                                      copy=False)
示例#10
0
def test_convert_tof_to_energy_transfer_indirect_unphysical():
    tof = make_test_data(coords=('tof', 'L1', 'L2'), dataset=True)
    ef = 25.0 * sc.units.meV
    tof.coords['final_energy'] = ef
    t0 = sc.to_unit(tof.coords['L2'] * sc.sqrt(m_n / ef), sc.units.s)
    coord, is_unphysical = make_unphysical_tof(t0, tof)
    tof.coords['tof'] = coord

    result = scn.convert(tof,
                         origin='tof',
                         target='energy_transfer',
                         scatter=True)
    assert sc.identical(sc.isnan(result.coords['energy_transfer']),
                        is_unphysical)
示例#11
0
文件: utils.py 项目: scipp/ess
def cutout_angles_width(chopper: sc.Dataset, unit="rad") -> sc.Variable:
    """
    Get the angular widths of the chopper cutouts.

    :param chopper: The Dataset containing the chopper parameters.
    :param unit: Convert to this unit before returning. Default is `'rad'`.
    """
    if "cutout_angles_width" in chopper:
        out = chopper["cutout_angles_width"].data
    elif all(x in chopper for x in ["cutout_angles_begin", "cutout_angles_end"]):
        out = chopper["cutout_angles_end"].data - chopper["cutout_angles_begin"].data
    else:
        raise KeyError("Chopper does not contain the information required to compute "
                       "the cutout_angles_width.")
    return sc.to_unit(out, unit, copy=False)
示例#12
0
文件: resolution.py 项目: scipp/ess
def detector_resolution(spatial_resolution: sc.Variable, pixel_position: sc.Variable,
                        sample_position: sc.Variable) -> sc.Variable:
    """
    Calculate the resolution function due to the spatial resolution of the detector.
    The function returns the standard deviation of detector resolution.

    :param spatial_resolution: Detector spatial resolution.
    :param pixel_position: The position of each pixel in the dimension parallel to the
        beam.
    :param sample_position: The position of the sample in the dimension parallel to the
        beam.
    """
    fwhm = sc.to_unit(sc.atan(spatial_resolution / (pixel_position - sample_position)),
                      "deg")
    return fwhm / (2 * np.sqrt(2 * np.log(2)))
示例#13
0
文件: chopper_test.py 项目: scipp/ess
def test_time_open_closed(params):
    dim = 'frame'
    chopper = ch.make_chopper(
        frequency=sc.scalar(0.5, unit=sc.units.one / sc.units.s),
        phase=sc.scalar(0., unit='rad'),
        position=params['position'],
        cutout_angles_begin=sc.array(dims=[dim],
                                     values=np.pi * np.array([0.0, 0.5, 1.0]),
                                     unit='rad'),
        cutout_angles_end=sc.array(dims=[dim],
                                   values=np.pi * np.array([0.5, 1.0, 1.5]),
                                   unit='rad'),
        kind=params['kind'])

    assert sc.allclose(
        ch.time_open(chopper),
        sc.to_unit(sc.array(dims=[dim], values=[0.0, 0.5, 1.0], unit='s'),
                   'us'))
    assert sc.allclose(
        ch.time_closed(chopper),
        sc.to_unit(sc.array(dims=[dim], values=[0.5, 1.0, 1.5], unit='s'),
                   'us'))

    chopper["phase"] = sc.scalar(2.0 * np.pi / 3.0, unit='rad')
    assert sc.allclose(
        ch.time_open(chopper),
        sc.to_unit(
            sc.array(dims=[dim],
                     values=np.array([0.0, 0.5, 1.0]) + 2.0 / 3.0,
                     unit='s'), 'us'))
    assert sc.allclose(
        ch.time_closed(chopper),
        sc.to_unit(
            sc.array(dims=[dim],
                     values=np.array([0.5, 1.0, 1.5]) + 2.0 / 3.0,
                     unit='s'), 'us'))
示例#14
0
def test_frames_analytical_short_pulse():
    ds = sc.Dataset(coords=wfm.make_fake_beamline(
        pulse_length=sc.to_unit(sc.scalar(1.86e+03, unit='us'), 's')))
    frames = wfm.get_frames(ds)
    _check_against_reference(ds, frames)
示例#15
0
def _convert_array_to_metres(array: np.ndarray, unit: str) -> np.ndarray:
    return sc.to_unit(
        sc.Variable(dims=["temporary_variable"],
                    values=array,
                    unit=unit,
                    dtype=np.float64), "m").values
示例#16
0
def frames_analytical(data: Union[sc.DataArray, sc.Dataset]) -> sc.Dataset:
    """
    Compute analytical frame boundaries and shifts based on chopper
    parameters and detector pixel positions.
    A set of frame boundaries is returned for each pixel.
    The frame shifts are the same for all pixels.
    See Schmakat et al. (2020);
    https://www.sciencedirect.com/science/article/pii/S0168900220308640
    for a description of the procedure.

    TODO: This currently ignores scattering paths, only the distance from
    source to pixel.
    For imaging, this is what we want, but for scattering techniques, we should
    use l1 + l2 in the future.
    """

    # Identify the WFM choppers based on their `kind` property
    wfm_choppers = {}
    for name in ch.find_chopper_keys(data):
        chopper = data.meta[name].value
        if chopper["kind"].value == "wfm":
            wfm_choppers[name] = chopper
    if len(wfm_choppers) != 2:
        raise RuntimeError("The number of WFM choppers is expected to be 2, "
                           "found {}".format(len(wfm_choppers)))
    # Find the near and far WFM choppers based on their positions relative to the source
    wfm_chopper_names = list(wfm_choppers.keys())
    if (sc.norm(wfm_choppers[wfm_chopper_names[0]]["position"].data -
                data.meta["source_position"]) <
            sc.norm(wfm_choppers[wfm_chopper_names[1]]["position"].data -
                    data.meta["source_position"])).value:
        near_index = 0
        far_index = 1
    else:
        near_index = 1
        far_index = 0
    near_wfm_chopper = wfm_choppers[wfm_chopper_names[near_index]]
    far_wfm_chopper = wfm_choppers[wfm_chopper_names[far_index]]

    # Compute distances for each detector pixel
    detector_positions = data.meta["position"] - data.meta["source_position"]

    # Container for frames information
    frames = sc.Dataset()

    # Distance between WFM choppers
    dz_wfm = sc.norm(far_wfm_chopper["position"].data -
                     near_wfm_chopper["position"].data)
    # Mid-point between WFM choppers
    z_wfm = 0.5 * (
        near_wfm_chopper["position"].data +
        far_wfm_chopper["position"].data) - data.meta["source_position"]
    # Ratio of WFM chopper distances
    z_ratio_wfm = (sc.norm(far_wfm_chopper["position"].data -
                           data.meta["source_position"]) /
                   sc.norm(near_wfm_chopper["position"].data -
                           data.meta["source_position"]))
    # Distance between detector positions and wfm chopper mid-point
    zdet_minus_zwfm = sc.norm(detector_positions - z_wfm)

    # Neutron mass to Planck constant ratio
    alpha = sc.to_unit(constants.m_n / constants.h, 'us/m/angstrom')

    # Frame time corrections: these are the mid-time point between the WFM choppers,
    # which is the same as the opening edge of the second WFM chopper in the case
    # of optically blind choppers.
    frames["time_correction"] = ch.time_open(far_wfm_chopper)

    # Find delta_t for the min and max wavelengths:
    # dt_lambda_max is equal to the time width of the WFM choppers windows
    dt_lambda_max = ch.time_closed(near_wfm_chopper) - ch.time_open(
        near_wfm_chopper)

    # t_lambda_max is found from the relation between t and delta_t: equation (2) in
    # Schmakat et al. (2020).
    t_lambda_max = (dt_lambda_max / dz_wfm) * zdet_minus_zwfm

    # t_lambda_min is found from the relation between lambda_N and lambda_N+1,
    # equation (3) in Schmakat et al. (2020).
    t_lambda_min = t_lambda_max * z_ratio_wfm - data.meta[
        "source_pulse_length"] * (zdet_minus_zwfm / sc.norm(
            near_wfm_chopper["position"].data - data.meta["source_position"]))

    # dt_lambda_min is found from the relation between t and delta_t: equation (2)
    # in Schmakat et al. (2020), and using the expression for t_lambda_max.
    dt_lambda_min = dt_lambda_max * z_ratio_wfm - data.meta[
        "source_pulse_length"] * dz_wfm / sc.norm(
            near_wfm_chopper["position"].data - data.meta["source_position"])

    # Compute wavelength information
    lambda_min = t_lambda_min / (alpha * zdet_minus_zwfm)
    lambda_max = t_lambda_max / (alpha * zdet_minus_zwfm)
    dlambda_min = dz_wfm * lambda_min / zdet_minus_zwfm
    dlambda_max = dz_wfm * lambda_max / zdet_minus_zwfm

    # Frame edges and resolutions for each pixel.
    # The frames do not stop at t_lambda_min and t_lambda_max, they also include the
    # fuzzy areas (delta_t) at the edges.
    frames["time_min"] = t_lambda_min - (
        0.5 * dt_lambda_min) + frames["time_correction"]
    frames["delta_time_min"] = dt_lambda_min

    frames["time_max"] = t_lambda_max + (
        0.5 * dt_lambda_max) + frames["time_correction"]
    frames["delta_time_max"] = dt_lambda_max
    frames["wavelength_min"] = lambda_min
    frames["wavelength_max"] = lambda_max
    frames["delta_wavelength_min"] = dlambda_min
    frames["delta_wavelength_max"] = dlambda_max

    frames["wfm_chopper_mid_point"] = 0.5 * (
        near_wfm_chopper["position"].data + far_wfm_chopper["position"].data)

    return frames
示例#17
0
def make_fake_beamline(
        chopper_wfm_1_position=sc.vector(value=[0.0, 0.0, 6.775], unit='m'),
        chopper_wfm_2_position=sc.vector(value=[0.0, 0.0, 7.225], unit='m'),
        frequency=sc.scalar(56.0, unit=sc.units.one / sc.units.s),
        lambda_min=sc.scalar(1.0, unit='angstrom'),
        pulse_length=sc.scalar(2.86e-03, unit='s'),
        pulse_t_0=sc.scalar(1.3e-4, unit='s'),
        nframes=2):
    """
    Fake chopper cascade with 2 optically blind WFM choppers.
    Based on mathematical description in Schmakat et al. (2020);
    https://www.sciencedirect.com/science/article/pii/S0168900220308640
    """

    dim = 'frame'
    # Neutron mass to Planck constant ratio
    alpha = sc.to_unit(m_n / h, 's/m/angstrom')
    omega = (2.0 * np.pi * sc.units.rad) * frequency

    cutout_angles_center_wfm_1 = sc.empty(dims=[dim], shape=[nframes], unit='rad')
    cutout_angles_center_wfm_2 = sc.empty_like(cutout_angles_center_wfm_1)
    cutout_angles_width = sc.empty_like(cutout_angles_center_wfm_1)

    for i in range(nframes):
        # Equation (3) in Schmakat et al. (2020)
        lambda_max = (pulse_length +
                      alpha * lambda_min * sc.norm(chopper_wfm_1_position)) / (
                          alpha * sc.norm(chopper_wfm_2_position))
        # Equation (4) in Schmakat et al. (2020)
        theta = omega * (pulse_length + alpha *
                         (lambda_min - lambda_max) * sc.norm(chopper_wfm_1_position))
        # Equation (5) in Schmakat et al. (2020)
        phi_wfm_1 = omega * (
            pulse_t_0 + 0.5 * pulse_length + 0.5 * alpha *
            (lambda_min + lambda_max) * sc.norm(chopper_wfm_1_position))
        # Equation (6) in Schmakat et al. (2020)
        phi_wfm_2 = omega * (pulse_t_0 + 1.5 * pulse_length + 0.5 * alpha * (
            (3.0 * lambda_min) - lambda_max) * sc.norm(chopper_wfm_1_position))

        cutout_angles_width[dim, i] = theta
        cutout_angles_center_wfm_1[dim, i] = phi_wfm_1
        cutout_angles_center_wfm_2[dim, i] = phi_wfm_2

        lambda_min = lambda_max

    return {
        "chopper_wfm_1":
        sc.scalar(
            make_chopper(frequency=frequency,
                         phase=sc.scalar(0.0, unit='deg'),
                         position=chopper_wfm_1_position,
                         cutout_angles_center=cutout_angles_center_wfm_1,
                         cutout_angles_width=cutout_angles_width,
                         kind=sc.scalar('wfm'))),
        "chopper_wfm_2":
        sc.scalar(
            make_chopper(frequency=frequency,
                         phase=sc.scalar(0.0, unit='deg'),
                         position=chopper_wfm_2_position,
                         cutout_angles_center=cutout_angles_center_wfm_2,
                         cutout_angles_width=cutout_angles_width,
                         kind=sc.scalar('wfm'))),
        'position':
        sc.vector(value=[0., 0., 60.], unit='m'),
        "source_pulse_length":
        sc.to_unit(pulse_length, 'us'),
        "source_pulse_t_0":
        sc.to_unit(pulse_t_0, 'us'),
        "source_position":
        sc.vector(value=[0.0, 0.0, 0.0], unit='m')
    }
示例#18
0
async def _data_stream(
        data_queue: mp.Queue,
        worker_instruction_queue: mp.Queue,
        kafka_broker: str,
        topics: Optional[List[str]],
        interval: sc.Variable,
        event_buffer_size: int,
        slow_metadata_buffer_size: int,
        fast_metadata_buffer_size: int,
        chopper_buffer_size: int,
        run_info_topic: Optional[str] = None,
        start_at: StartTime = StartTime.NOW,
        end_at: StopTime = StopTime.NEVER,
        query_consumer: Optional["KafkaQueryConsumer"] = None,  # noqa: F821
        consumer_type: ConsumerType = ConsumerType.REAL,
        halt_after_n_data_chunks: int = np.iinfo(np.int32).max,  # for tests
        halt_after_n_warnings: int = np.iinfo(np.int32).max,  # for tests
        test_message_queue: Optional[mp.Queue] = None,  # for tests
        timeout: Optional[sc.Variable] = None,  # for tests
) -> Generator[sc.DataArray, None, None]:
    """
    Main implementation of data stream is extracted to this function so that
    fake consumers can be injected for unit tests
    """

    # Search backwards to find the last run_start message
    try:
        from ._consumer import (get_run_start_message, KafkaQueryConsumer)
        from ._data_consumption_manager import (data_consumption_manager,
                                                ManagerInstruction, InstructionType)
    except ImportError:
        raise ImportError(_missing_dependency_message)

    if topics is None and run_info_topic is None:
        raise ValueError("At least one of 'topics' and 'run_info_topic'"
                         " must be specified")

    # This is defaulted to None in the function signature
    # to avoid it having to be imported earlier
    if query_consumer is None:
        query_consumer = KafkaQueryConsumer(kafka_broker)

    # stream_info contains information on where to look for data and metadata.
    # The data from the start message is yielded as the first chunk of data.
    #
    # TODO: This should, in principle, not look any different from any other
    # chunk of data, right now it seems it may be different?
    # (see https://github.com/scipp/scippneutron/issues/114)
    #
    # Generic data chunk structure: geometry, metadata, and event data are all
    # optional.
    # - first data chunk will most probably contain no event data
    # - subsequent chunks can contain geometry info, if e.g. some pixels have
    #   moved
    # - metadata (e.g. sample environment) might be empty, if values have not
    #   changed
    stream_info = None
    run_id = ""
    run_title = "-"  # for display in widget
    stop_time_ms = None
    n_data_chunks = 0
    if run_info_topic is not None:
        run_start_info = get_run_start_message(run_info_topic, query_consumer)
        run_id = run_start_info.job_id
        run_title = run_start_info.run_name
        # default value for stop_time in message flatbuffer is 0,
        # it means that field has not been populated
        if end_at == StopTime.END_OF_RUN and run_start_info.stop_time != 0:
            stop_time_ms = run_start_info.stop_time
        if topics is None:
            loaded_data, stream_info = _load_nexus_json(run_start_info.nexus_structure,
                                                        get_start_info=True)
            topics = [stream.topic for stream in stream_info]
        else:
            loaded_data, _ = _load_nexus_json(run_start_info.nexus_structure,
                                              get_start_info=False)
        topics.append(run_info_topic)  # listen for stop run message
        yield loaded_data
        n_data_chunks += 1

    if start_at == StartTime.START_OF_RUN:
        start_time = run_start_info.start_time * sc.Unit("milliseconds")
    else:
        start_time = time.time() * sc.units.s

    # Convert to int and float as easier to pass to mp.Process
    # (sc.Variable would have to be serialised/deserialised)
    start_time_ms = int(sc.to_unit(start_time, "milliseconds").value)
    interval_s = float(sc.to_unit(interval, 's').value)

    # Specify to start the process using the "spawn" method, otherwise
    # on Linux the default is to fork the Python interpreter which
    # is "problematic" in a multithreaded process, this can apparently
    # even cause multiprocessing's own Queue to cause problems when forking.
    # See documentation:
    # https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
    # pytorch docs mention Queue problem:
    # https://pytorch.org/docs/stable/notes/multiprocessing.html
    data_collect_process = mp.get_context("spawn").Process(
        target=data_consumption_manager,
        args=(start_time_ms, stop_time_ms, run_id, topics, kafka_broker, consumer_type,
              stream_info, interval_s, event_buffer_size, slow_metadata_buffer_size,
              fast_metadata_buffer_size, chopper_buffer_size, worker_instruction_queue,
              data_queue, test_message_queue))
    try:
        data_stream_widget = DataStreamWidget(start_time_ms=start_time_ms,
                                              stop_time_ms=stop_time_ms,
                                              run_title=run_title)
        data_collect_process.start()

        # When testing, if something goes wrong, the while loop below can
        # become infinite. So we introduce a timeout.
        if timeout is not None:
            start_timeout = time.time()
            timeout_s = float(sc.to_unit(timeout, 's').value)
        n_warnings = 0
        while data_collect_process.is_alive(
        ) and n_data_chunks < halt_after_n_data_chunks and \
                n_warnings < halt_after_n_warnings and \
                not data_stream_widget.stop_requested:
            if timeout is not None and (time.time() - start_timeout) > timeout_s:
                raise TimeoutError("data_stream timed out in test")
            try:
                new_data = data_queue.get_nowait()

                if isinstance(new_data, Warning):
                    # Raise warnings in this process so that they
                    # can be captured in tests
                    warn(new_data)
                    n_warnings += 1
                    continue
                elif isinstance(new_data, StopTimeUpdate):
                    data_stream_widget.set_stop_time(new_data.stop_time_ms)
                    if end_at == StopTime.END_OF_RUN:
                        worker_instruction_queue.put(
                            ManagerInstruction(InstructionType.UPDATE_STOP_TIME,
                                               new_data.stop_time_ms))
                    continue
                n_data_chunks += 1
                yield convert_from_pickleable_dict(new_data)
            except QueueEmpty:
                await asyncio.sleep(0.5 * interval_s)
    finally:
        # Ensure cleanup happens however the loop exits
        worker_instruction_queue.put(ManagerInstruction(InstructionType.STOP_NOW))
        if data_collect_process.is_alive():
            process_halt_timeout_s = 4.
            data_collect_process.join(process_halt_timeout_s)
        if data_collect_process.is_alive():
            data_collect_process.terminate()
        for queue in (data_queue, worker_instruction_queue, test_message_queue):
            _cleanup_queue(queue)
        data_stream_widget.set_stopped()
示例#19
0
def test_frames_analytical_large_t_0():
    ds = sc.Dataset(coords=wfm.make_fake_beamline(
        pulse_t_0=sc.to_unit(sc.scalar(300., unit='us'), 's')))
    frames = wfm.get_frames(ds)
    _check_against_reference(ds, frames)
示例#20
0
def _frames_from_slopes(data):
    detector_pos_norm = sc.norm(data.meta["position"])

    # Get the number of WFM frames
    choppers = {
        key: data.meta[key].value
        for key in ch.find_chopper_keys(data)
    }
    nframes = ch.cutout_angles_begin(choppers["chopper_wfm_1"]).sizes["frame"]

    # Now find frame boundaries
    frames = sc.Dataset()
    frames["time_min"] = sc.zeros(dims=["frame"],
                                  shape=[nframes],
                                  unit=sc.units.us)
    frames["time_max"] = sc.zeros_like(frames["time_min"])
    frames["delta_time_min"] = sc.zeros_like(frames["time_min"])
    frames["delta_time_max"] = sc.zeros_like(frames["time_min"])
    frames["wavelength_min"] = sc.zeros(dims=["frame"],
                                        shape=[nframes],
                                        unit=sc.units.angstrom)
    frames["wavelength_max"] = sc.zeros_like(frames["wavelength_min"])
    frames["delta_wavelength_min"] = sc.zeros_like(frames["wavelength_min"])
    frames["delta_wavelength_max"] = sc.zeros_like(frames["wavelength_min"])

    frames["time_correction"] = sc.zeros(dims=["frame"],
                                         shape=[nframes],
                                         unit=sc.units.us)

    near_wfm_chopper = choppers["chopper_wfm_1"]
    far_wfm_chopper = choppers["chopper_wfm_2"]

    # Distance between WFM choppers
    dz_wfm = sc.norm(far_wfm_chopper["position"].data -
                     near_wfm_chopper["position"].data)
    # Mid-point between WFM choppers
    z_wfm = 0.5 * (near_wfm_chopper["position"].data +
                   far_wfm_chopper["position"].data)
    # Distance between detector positions and wfm chopper mid-point
    zdet_minus_zwfm = sc.norm(data.meta["position"] - z_wfm)
    # Neutron mass to Planck constant ratio
    alpha = sc.to_unit(constants.m_n / constants.h, 'us/m/angstrom')

    near_t_open = ch.time_open(near_wfm_chopper)
    near_t_close = ch.time_closed(near_wfm_chopper)
    far_t_open = ch.time_open(far_wfm_chopper)

    for i in range(nframes):
        dt_lambda_max = near_t_close["frame", i] - near_t_open["frame", i]
        slope_lambda_max = dz_wfm / dt_lambda_max
        intercept_lambda_max = sc.norm(
            near_wfm_chopper["position"].data
        ) - slope_lambda_max * near_t_close["frame", i]
        t_lambda_max = (detector_pos_norm -
                        intercept_lambda_max) / slope_lambda_max

        slope_lambda_min = sc.norm(near_wfm_chopper["position"].data) / (
            near_t_close["frame", i] -
            (data.meta["source_pulse_length"] + data.meta["source_pulse_t_0"]))
        intercept_lambda_min = sc.norm(
            far_wfm_chopper["position"].data
        ) - slope_lambda_min * far_t_open["frame", i]
        t_lambda_min = (detector_pos_norm -
                        intercept_lambda_min) / slope_lambda_min

        t_lambda_min_plus_dt = (
            detector_pos_norm -
            (sc.norm(near_wfm_chopper["position"].data) -
             slope_lambda_min * near_t_close["frame", i])) / slope_lambda_min
        dt_lambda_min = t_lambda_min_plus_dt - t_lambda_min

        # Compute wavelength information
        lambda_min = (t_lambda_min + 0.5 * dt_lambda_min -
                      far_t_open["frame", i]) / (alpha * zdet_minus_zwfm)
        lambda_max = (t_lambda_max - 0.5 * dt_lambda_max -
                      far_t_open["frame", i]) / (alpha * zdet_minus_zwfm)
        dlambda_min = dz_wfm * lambda_min / zdet_minus_zwfm
        dlambda_max = dz_wfm * lambda_max / zdet_minus_zwfm

        frames["time_min"]["frame", i] = t_lambda_min
        frames["delta_time_min"]["frame", i] = dt_lambda_min
        frames["time_max"]["frame", i] = t_lambda_max
        frames["delta_time_max"]["frame", i] = dt_lambda_max
        frames["wavelength_min"]["frame", i] = lambda_min
        frames["wavelength_max"]["frame", i] = lambda_max
        frames["delta_wavelength_min"]["frame", i] = dlambda_min
        frames["delta_wavelength_max"]["frame", i] = dlambda_max
        frames["time_correction"]["frame", i] = far_t_open["frame", i]

    frames["wfm_chopper_mid_point"] = z_wfm
    return frames
示例#21
0
文件: stitch_test.py 项目: scipp/ess
def _do_stitching_on_beamline(wavelengths, dim, event_mode=False):
    # Make beamline parameters for 6 frames
    coords = wfm.make_fake_beamline(nframes=6)

    # They are all created half-way through the pulse.
    # Compute their arrival time at the detector.
    alpha = sc.to_unit(constants.m_n / constants.h, 's/m/angstrom')
    dz = sc.norm(coords['position'] - coords['source_position'])
    arrival_times = sc.to_unit(alpha * dz * wavelengths,
                               'us') + coords['source_pulse_t_0'] + (
                                   0.5 * coords['source_pulse_length'])
    coords[dim] = arrival_times

    # Make a data array that contains the beamline and the time coordinate
    tmin = sc.min(arrival_times)
    tmax = sc.max(arrival_times)
    dt = 0.1 * (tmax - tmin)

    if event_mode:
        num = 2
    else:
        num = 2001
    time_binning = sc.linspace(dim=dim,
                               start=(tmin - dt).value,
                               stop=(tmax + dt).value,
                               num=num,
                               unit=dt.unit)
    events = sc.DataArray(data=sc.ones(dims=['event'],
                                       shape=arrival_times.shape,
                                       unit=sc.units.counts,
                                       with_variances=True),
                          coords=coords)
    if event_mode:
        da = sc.bin(events, edges=[time_binning])
    else:
        da = sc.histogram(events, bins=time_binning)

    # Find location of frames
    frames = wfm.get_frames(da)

    stitched = wfm.stitch(frames=frames, data=da, dim=dim, bins=2001)

    wav = scn.convert(stitched,
                      origin='tof',
                      target='wavelength',
                      scatter=False)
    if event_mode:
        out = wav
    else:
        out = sc.rebin(wav,
                       dim='wavelength',
                       bins=sc.linspace(dim='wavelength',
                                        start=1.0,
                                        stop=10.0,
                                        num=1001,
                                        unit='angstrom'))

    choppers = {key: da.meta[key].value for key in ch.find_chopper_keys(da)}
    # Distance between WFM choppers
    dz_wfm = sc.norm(choppers["chopper_wfm_2"]["position"].data -
                     choppers["chopper_wfm_1"]["position"].data)
    # Delta_lambda  / lambda
    dlambda_over_lambda = dz_wfm / sc.norm(
        coords['position'] - frames['wfm_chopper_mid_point'].data)

    return out, dlambda_over_lambda