コード例 #1
0
ファイル: operations.py プロジェクト: scipp/ess
def groupby2D(data, nx_target, ny_target, x='x', y='y', z='wavelength'):

    element_width_x = data.sizes[x] // nx_target
    element_width_y = data.sizes[y] // ny_target

    xx = sc.Variable(dims=[x],
                     values=np.arange(data.sizes[x]) // element_width_x)
    yy = sc.Variable(dims=[y],
                     values=np.arange(data.sizes[y]) // element_width_y)
    grid = xx + nx_target * yy
    spectrum_mapping = sc.Variable(dims=["spectrum"],
                                   values=np.ravel(grid.values, order='F'))

    reshaped = sc.Dataset()
    for key, val in data.items():
        reshaped[key] = sc.flatten(x=val, dims=[y, x], to='spectrum')

    reshaped.coords["spectrum_mapping"] = spectrum_mapping

    grouped = sc.groupby(reshaped, "spectrum_mapping").sum("spectrum")

    reshaped = sc.Dataset()
    for key, val in grouped.items():
        item = sc.fold(x=val,
                       dim="spectrum_mapping",
                       dims=[y, x],
                       shape=(ny_target, nx_target))
        reshaped[key] = item
    return reshaped
コード例 #2
0
def reduce(data, q_bins):
    data = sc.neutron.convert(data, 'wavelength', 'Q',
                              out=data)  # TODO no gravity yet
    data = sc.histogram(data, q_bins)
    if 'layer' in data.coords:
        return sc.groupby(data, 'layer').sum('spectrum')
    else:
        return sc.sum(data, 'spectrum')
コード例 #3
0
def reduce_by_wavelength(data, q_bins, groupby, wavelength_bands):
    slices = contrib.make_slices(
        contrib.midpoints(data.coords['wavelength'], 'wavelength'),
        'wavelength', wavelength_bands)
    data = sc.neutron.convert(data, 'wavelength', 'Q',
                              out=data)  # TODO no gravity yet
    bands = None
    for s in slices:
        band = sc.histogram(data['Q', s], q_bins)
        band = sc.groupby(band, group=groupby).sum('spectrum')
        bands = sc.concatenate(bands, band,
                               'wavelength') if bands is not None else band
    bands.coords['wavelength'] = wavelength_bands
    return bands
コード例 #4
0
def convert_with_calibration(dataset, cal):
    """ Convert from tof to dspacing taking calibration into account  """
    validate_calibration(cal)
    validate_dataset(dataset)
    output = dataset.copy()

    # 1. There may be a grouping of detectors, in which case we need to
    # apply it to the cal information first.
    if "detector_info" in list(output.coords.keys()):
        # 1a. Merge cal with detector-info, which contains information on how
        # `dataset` groups its detectors. At the same time, the coord
        # comparison in `merge` ensures that detector IDs of `dataset` match
        # those of `calibration`.
        detector_info = output.coords["detector_info"].value
        cal = sc.merge(detector_info, cal)

        # Masking and grouping information in the calibration table interferes
        # with `groupby.mean`, dropping.

        for name in ("mask", "group"):
            if name in list(cal.keys()):
                del cal[name]

        # 1b. Translate detector-based calibration information into coordinates
        # of data. We are hard-coding some information here: the existence of
        # "spectra", since we require labels named "spectrum" and a
        # corresponding dimension. Given that this is in a branch that is
        # access only if "detector_info" is present this should probably be ok.
        cal = sc.groupby(cal, group="spectrum").mean('detector')

    elif cal["tzero"].dims not in dataset.dims:
        raise ValueError("Calibration depends on dimension " +
                         cal["tzero"].dims +
                         " that is not present in the converted data " +
                         dataset.dims + ". Missing detector information?")

    # 2. Convert to tof if input is in another dimension
    if 'tof' not in list(output.coords.keys()):
        list_of_dims_for_input = output.dims
        list_of_dims_for_input.remove('spectrum')
        # TODO what happens if there are more than 1 dimension left
        dim_to_convert = list_of_dims_for_input[0]
        output = scn.convert(output, dim_to_convert, 'tof', scatter=True)

    # 3. Transform coordinate
    # all values of DIFa are equal to zero: d-spacing = (tof - TZERO) / DIFc
    if np.all(cal['difa'].data.values == 0):
        # dealing with 'bins'
        output.bins.coords['dspacing'] = \
            (output.bins.coords['tof'] - cal["tzero"].data) / cal["difc"].data
        # dealing with other part of dataset
        output.coords['tof'] = \
            (output.coords['tof'] - cal["tzero"].data) / cal["difc"].data

    else:
        # DIFa non zero: tof = DIFa * d**2 + DIFc * d + TZERO.
        # d-spacing is the positive solution of this polynomials

        # dealing with 'bins'
        output.bins.coords['dspacing'] = \
            0.5 * (- cal["difc"].data + np.sqrt(cal["difc"].data**2
                                               + 4 * cal["difa"].data
                                                   * (output.coords['tof']
                                                - cal["tzero"].data))
                  ) / cal["difa"].data

        # dealing with other part of dataset
        output.coords['tof'] = 0.5 * (-cal["difc"].data + np.sqrt(
            cal["difc"].data**2 + 4 * cal["difa"].data *
            (output.coords['tof'] - cal["tzero"].data))) / cal["difa"].data

    del output.bins.constituents['data'].coords['tof']
    output.rename_dims({'tof': 'dspacing'})

    # change units
    output.coords['dspacing'].unit = sc.units.angstrom

    # transpose d-spacing if tof dimension of input dataset has more
    # than 1 dimension
    if len(output.coords['dspacing'].shape) == 2:
        output.coords['dspacing'] = sc.transpose(output.coords['dspacing'],
                                                 dims=['spectrum', 'dspacing'])

    # move `position`, `source_position` and `sample_position`
    # from coordinates to attributes
    if 'sample_position' in list(output.coords.keys()):
        output.attrs['sample_position'] = output.coords['sample_position']
        del output.coords['sample_position']

    if 'source_position' in list(output.coords.keys()):
        output.attrs['source_position'] = output.coords['source_position']
        del output.coords['source_position']

    if 'position' in list(output.coords.keys()):
        output.attrs['position'] = output.coords['position']
        del output.coords['position']

    return output
コード例 #5
0
def process_vanadium_data(vanadium,
                          empty_instr,
                          lambda_binning,
                          calibration=None,
                          **absorp):
    """
    Create corrected vanadium dataset

    Correction applied to Vanadium data only
    1. Subtract empty instrument
    2. Correct absorption
    3. Use calibration for grouping
    4. Focus into groups

    Parameters
    ----------
    vanadium : Vanadium nexus datafile

    empty_instr: Empty instrument nexus file

    lambda_binning: format=(lambda_min, lambda_min, number_of_bins)
                    lambda_min and lambda_max are in Angstroms

    calibration: calibration file
                 Mantid format

    **absorp: dictionary containing information to correct absorption for sample and vanadium
              only the inputs related to Vanadium will be selected to calculate the correction
              see docstrings of powder_reduction for more details
              see help of Mantid's algorithm CylinderAbsorption for details
              https://docs.mantidproject.org/nightly/algorithms/CylinderAbsorption-v1.html

    """
    vana_red = process_event_data(vanadium, lambda_binning)
    ec_red = process_event_data(empty_instr, lambda_binning)

    # vana - EC
    vana_red -= ec_red

    del ec_red

    # Absorption correction applied
    if bool(absorp):
        # The values of number_density, scattering and attenuation are hard-coded since they must
        # correspond to Vanadium. Only radius and height of the Vanadium cylindrical sample
        # shape can be set. The names of these inputs if present have to be renamed to match
        # the requirements of Mantid's algorithm CylinderAbsorption

        #  Create dictionary to calculate absorption correction for Vanadium.
        absorp_vana = {
            key.replace('Vanadium', 'Sample'): value
            for key, value in absorp.items() if 'Vanadium' in key
        }
        absorp_vana['SampleNumberDensity'] = 0.07118
        absorp_vana['ScatteringXSection'] = 5.16
        absorp_vana['AttenuationXSection'] = 4.8756

        correction = absorption_correction(vanadium, lambda_binning,
                                           **absorp_vana)

        # the 3 following lines of code are to place info about source and sample
        # position at the right place in the correction dataArray in order to
        # proceed to the normalization

        del correction.coords['source_position']
        del correction.coords['sample_position']
        del correction.coords['position']

        correction = sc.rebin(
            correction, 'wavelength',
            sc.Variable(['wavelength'],
                        values=vana_red.coords['wavelength'].values,
                        unit=sc.units.angstrom))

        vana_red /= correction

        del correction

    # convert to TOF
    vana_red_tof = sc.neutron.convert(vana_red,
                                      'wavelength',
                                      'tof',
                                      realign='linear')

    del vana_red

    # convert to d-spacing (no calibration applied)
    vana_dspacing = sc.neutron.convert(vana_red_tof,
                                       'tof',
                                       'd-spacing',
                                       realign='linear')

    del vana_red_tof

    # Calibration
    # Load
    input_load_cal = {'InstrumentFilename': 'WISH_Definition.xml'}
    calvana = load_calibration(calibration, mantid_args=input_load_cal)
    # Merge table with detector->spectrum mapping from vanadium
    # (implicitly checking that detectors between vanadium and calibration are the same)
    cal_vana = sc.merge(calvana, vana_dspacing.coords['detector_info'].value)

    # Compute spectrum mask from detector mask
    maskvana = sc.groupby(cal_vana['mask'], group='spectrum').any('detector')

    # Compute spectrum groups from detector groups
    gvana = sc.groupby(cal_vana['group'], group='spectrum')

    groupvana = gvana.min('detector')

    assert groupvana == gvana.max('detector'), \
        "Calibration table has mismatching group for detectors in same spectrum"

    vana_dspacing.coords['group'] = groupvana.data
    vana_dspacing.masks['mask'] = maskvana.data

    # Focus
    focused_vana = sc.groupby(vana_dspacing, group='group').sum('spectrum')

    return focused_vana
コード例 #6
0
def powder_reduction(sample='sample.nxs',
                     calibration=None,
                     vanadium=None,
                     empty_instr=None,
                     lambda_binning=(0.7, 10.35, 5615),
                     **absorp):
    """
    Simple WISH reduction workflow

    Note
    ----

    The sample data were not recorded using the same layout
    of WISH as the Vanadium and empty instrument. That's why:
    - loading calibration for Vanadium used a different IDF
    - the Vanadium correction involved cropping the sample data
      to the first 5 groups (panels)
    ----

    Corrections applied:
    - Vanadium correction
    - Absorption correction
    - Normalization by monitors
    - Conversion considering calibration
    - Masking and grouping detectors into panels

    Parameters
    ----------
    sample: Nexus event file

    calibration: .cal file following Mantid's standards
        The columns correspond to detectors' IDs, offset, selection of detectors
        and groups

    vanadium: Nexus event file

    empty_instr: Nexus event file

    lambda_binning: min, max and number of steps for binning in wavelength
                    min and max are in Angstroms

    **absorp: dictionary containing information to correct absorption for Sample and
              Vanadium.
              There could be only up to two elements related to the correction for Vanadium: 
              the radius and height of the cylindrical sample shape.
              To distinguish them from the inputs related to the sample, their names in the 
              dictionary  are 'CylinderVanadiumRadius' and 'CylinderVanadiumHeight'. The other keys
              of the 'absorp' dictionary follow Mantid's syntax and are related to the sample data 
              only.    
              see help of Mantid's algorithm CylinderAbsorption for details
              https://docs.mantidproject.org/nightly/algorithms/CylinderAbsorption-v1.html

    Returns
    -------
    Scipp dataset containing reduced data in d-spacing

    Hints
    -----

    To plot the output data, one can histogram in d-spacing and sum according to groups
    using scipp.histogram and sc.sum, respectively.

    """
    # Load counts
    sample_data = sc.neutron.load(sample,
                                  advanced_geometry=True,
                                  load_pulse_times=False,
                                  mantid_args={'LoadMonitors': True})

    # Load calibration
    if calibration is not None:
        input_load_cal = {"InstrumentName": "WISH"}
        cal = load_calibration(calibration, mantid_args=input_load_cal)
        # Merge table with detector->spectrum mapping from sample
        # (implicitly checking that detectors between sample and calibration are the same)
        cal_sample = sc.merge(cal, sample_data.coords['detector_info'].value)
        # Compute spectrum mask from detector mask
        mask = sc.groupby(cal_sample['mask'], group='spectrum').any('detector')

        # Compute spectrum groups from detector groups
        g = sc.groupby(cal_sample['group'], group='spectrum')

        group = g.min('detector')

        assert group == g.max('detector'), \
            "Calibration table has mismatching group for detectors in same spectrum"

        sample_data.coords['group'] = group.data
        sample_data.masks['mask'] = mask.data

    # Correct 4th monitor spectrum
    # There are 5 monitors for WISH. Only one, the fourth one, is selected for
    # correction (like in the real WISH workflow).

    # Select fourth monitor and convert from tof to wavelength
    mon4_lambda = sc.neutron.convert(sample_data.attrs['monitor4'].values,
                                     'tof', 'wavelength')

    # Spline background
    mon4_spline_background = bspline_background(mon4_lambda,
                                                sc.Dim('wavelength'),
                                                smoothing_factor=70)

    # Smooth monitor
    mon4_smooth = smooth_data(mon4_spline_background,
                              dim='wavelength',
                              NPoints=40)
    # Delete intermediate data
    del mon4_lambda, mon4_spline_background

    # Correct data
    # 1. Normalize to monitor
    # Convert to wavelength (counts)
    sample_lambda = sc.neutron.convert(sample_data, 'tof', 'wavelength')

    # Rebin monitors' data
    lambda_min, lambda_max, number_bins = lambda_binning

    edges_lambda = sc.Variable(['wavelength'],
                               unit=sc.units.angstrom,
                               values=np.linspace(lambda_min,
                                                  lambda_max,
                                                  num=number_bins))
    mon_rebin = sc.rebin(mon4_smooth, 'wavelength', edges_lambda)

    # Realign sample data
    sample_lambda.realign({'wavelength': edges_lambda})
    sample_lambda /= mon_rebin

    del mon_rebin, mon4_smooth

    # 2. absorption correction
    if bool(absorp):
        # Copy dictionary of absorption parameters
        absorp_sample = absorp.copy()
        # Remove input related to Vanadium if present in absorp dictionary
        found_vana_info = [
            key for key in absorp_sample.keys() if 'Vanadium' in key
        ]

        for item in found_vana_info:
            absorp_sample.pop(item, None)

        # Calculate absorption correction for sample data
        correction = absorption_correction(sample, lambda_binning,
                                           **absorp_sample)

        # the 3 following lines of code are to place info about source and sample
        # position at the right place in the correction dataArray in order to
        # proceed to the normalization

        del correction.coords['source_position']
        del correction.coords['sample_position']
        del correction.coords['position']

        correction_rebin = sc.rebin(correction, 'wavelength', edges_lambda)

        del correction

        sample_lambda /= correction_rebin

    del sample_data

    sample_tof = sc.neutron.convert(sample_lambda,
                                    'wavelength',
                                    'tof',
                                    realign='linear')

    del sample_lambda

    # 3. Convert to d-spacing taking calibration into account
    # has to switch to standard conversion in all cases, while support of convert_with_calibration
    # for realign='linear' is implemented
    sample_dspacing = sc.neutron.convert(sample_tof,
                                         'tof',
                                         'd-spacing',
                                         realign='linear')
    del cal_sample

    # if calibration is None:
    #     # No calibration data, use standard convert algorithm
    #     sample_dspacing = sc.neutron.convert(sample_tof, 'tof', 'd-spacing', realign='linear')
    #
    # else:
    #     # Calculate dspacing from calibration file
    #     sample_dspacing = sc.neutron.diffraction.convert_with_calibration(sample_tof, cal_sample)
    #     del cal_sample

    # 4. Focus panels
    # Assuming sample is in d-spacing: Focus into groups
    focused = sc.groupby(sample_dspacing, group='group').sum('spectrum')

    del sample_dspacing

    # 5. Vanadium correction (requires Vanadium and Empty instrument)
    if vanadium is not None and empty_instr is not None:
        print("Proceed with reduction of Vanadium data ")

        vana_red_focused = process_vanadium_data(vanadium, empty_instr,
                                                 lambda_binning, calibration,
                                                 **absorp)

        # The following selection of groups depends on the loaded data for
        # Sample, Vanadium and Empty instrument
        focused = focused['group', 0:5].copy()

        # histogram vanadium for normalizing + cleaning 'metadata'
        vana_histo = sc.histogram(vana_red_focused)
        del vana_red_focused
        vana_histo.coords['detector_info'] = focused.coords[
            'detector_info'].copy()
        del vana_histo.coords['source_position']
        del vana_histo.coords['sample_position']

        # normalize by vanadium
        focused /= vana_histo

        del vana_histo

    return focused
コード例 #7
0
def process_vanadium_data(vanadium,
                          empty_instr,
                          lambda_binning,
                          calibration=None,
                          **absorp):
    """
    Create corrected vanadium dataset

    Correction applied to Vanadium data only
    1. Subtract empty instrument
    2. Correct absorption
    3. Use calibration for grouping
    4. Focus into groups

    Parameters
    ----------
    vanadium : Vanadium nexus datafile

    empty_instr: Empty instrument nexus file

    lambda_binning: format=(lambda_min, lambda_min, number_of_bins)
                    lambda_min and lambda_max are in Angstroms

    calibration: calibration file
                 Mantid format

    **absorp: dictionary containing information to correct absorption for
              sample and vanadium
              Only the inputs related to Vanadium will be selected to calculate
              the correction
              see docstrings of powder_reduction for more details
              see help of Mantid's algorithm CylinderAbsorption for details
        https://docs.mantidproject.org/nightly/algorithms/CylinderAbsorption-v1.html

    """
    vana_red = process_event_data(vanadium, lambda_binning)
    ec_red = process_event_data(empty_instr, lambda_binning)

    # remove 'spectrum' from wavelength coordinate and match this coordinate
    # between Vanadium and Empty instrument data
    min_lambda = vana_red.coords['wavelength'].values[:, 0].min()
    max_lambda = vana_red.coords['wavelength'].values[:, 1].max()
    vana_red.coords['wavelength'] = sc.Variable(['wavelength'],
                                                unit=sc.units.angstrom,
                                                values=np.linspace(min_lambda,
                                                                   max_lambda,
                                                                   num=2))

    ec_red.coords['wavelength'] = sc.Variable(['wavelength'],
                                              unit=sc.units.angstrom,
                                              values=np.linspace(min_lambda,
                                                                 max_lambda,
                                                                 num=2))

    # vana - EC
    ec_red.coords['wavelength'] = vana_red.coords['wavelength']
    vana_red.bins.concatenate(-ec_red, out=vana_red)

    del ec_red

    # Absorption correction applied
    if bool(absorp):
        # The values of number_density, scattering and attenuation are
        # hard-coded since they must correspond to Vanadium. Only radius and
        # height of the Vanadium cylindrical sample shape can be set. The
        # names of these inputs if present have to be renamed to match
        # the requirements of Mantid's algorithm CylinderAbsorption

        #  Create dictionary to calculate absorption correction for Vanadium.
        absorp_vana = {
            key.replace('Vanadium', 'Sample'): value
            for key, value in absorp.items() if 'Vanadium' in key
        }
        absorp_vana['SampleNumberDensity'] = 0.07118
        absorp_vana['ScatteringXSection'] = 5.16
        absorp_vana['AttenuationXSection'] = 4.8756

        correction = absorption_correction(vanadium, lambda_binning,
                                           **absorp_vana)

        # the 3 following lines of code are to place info about source and
        # sample position at the right place in the correction dataArray in
        # order to proceed to the normalization

        del correction.coords['source_position']
        del correction.coords['sample_position']
        del correction.coords['position']

        lambda_min, lambda_max, number_bins = lambda_binning

        edges_lambda = sc.Variable(['wavelength'],
                                   unit=sc.units.angstrom,
                                   values=np.linspace(lambda_min,
                                                      lambda_max,
                                                      num=number_bins))

        correction = sc.rebin(correction, 'wavelength', edges_lambda)

        vana_red = vana_red.bins / sc.lookup(func=correction, dim='wavelength')

        del correction

    # Calibration
    # Load
    input_load_cal = {'InstrumentFilename': 'WISH_Definition.xml'}
    calvana = load_calibration(calibration, mantid_args=input_load_cal)
    # Merge table with detector->spectrum mapping from vanadium
    # (implicitly checking that detectors between vanadium and
    # calibration are the same)
    cal_vana = sc.merge(calvana, vana_red.coords['detector_info'].value)

    del calvana

    # Compute spectrum mask from detector mask
    maskvana = sc.groupby(cal_vana['mask'], group='spectrum').any('detector')

    # Compute spectrum groups from detector groups
    gvana = sc.groupby(cal_vana['group'], group='spectrum')

    groupvana = gvana.min('detector')

    assert sc.identical(groupvana, gvana.max('detector')), \
        ("Calibration table has mismatching group "
         "for detectors in same spectrum")

    vana_red.coords['group'] = groupvana.data
    vana_red.masks['mask'] = maskvana.data

    # convert to d-spacing with calibration
    vana_dspacing = convert_with_calibration(vana_red, cal_vana)

    del vana_red, cal_vana

    # Focus
    focused_vana = \
    sc.groupby(vana_dspacing, group='group').bins.concatenate('spectrum')

    del vana_dspacing

    return focused_vana
コード例 #8
0
def get_detector_properties(ws,
                            source_pos,
                            sample_pos,
                            spectrum_dim,
                            advanced_geometry=False):
    if not advanced_geometry:
        return (get_detector_pos(ws, spectrum_dim), None, None)
    spec_info = ws.spectrumInfo()
    det_info = ws.detectorInfo()
    comp_info = ws.componentInfo()
    nspec = len(spec_info)
    det_rot = np.zeros([nspec, 3, 3])
    det_bbox = np.zeros([nspec, 3])

    if sample_pos is not None and source_pos is not None:
        total_detectors = spec_info.detectorCount()
        act_beam = (sample_pos - source_pos)
        rot = _rot_from_vectors(act_beam, sc.vector(value=[0, 0, 1]))
        inv_rot = _rot_from_vectors(sc.vector(value=[0, 0, 1]), act_beam)

        pos_d = sc.Dataset()
        # Create empty to hold position info for all spectra detectors
        pos_d["x"] = sc.zeros(dims=["detector"],
                              shape=[total_detectors],
                              unit=sc.units.m)
        pos_d["y"] = sc.zeros_like(pos_d["x"])
        pos_d["z"] = sc.zeros_like(pos_d["x"])
        pos_d.coords[spectrum_dim] = sc.array(dims=["detector"],
                                              values=np.empty(total_detectors))

        spectrum_values = pos_d.coords[spectrum_dim].values

        x_values = pos_d["x"].values
        y_values = pos_d["y"].values
        z_values = pos_d["z"].values

        idx = 0
        for i, spec in enumerate(spec_info):
            if spec.hasDetectors:
                definition = spec_info.getSpectrumDefinition(i)
                n_dets = len(definition)
                quats = []
                bboxes = []
                for j in range(n_dets):
                    det_idx = definition[j][0]
                    p = det_info.position(det_idx)
                    r = det_info.rotation(det_idx)
                    spectrum_values[idx] = i
                    x_values[idx] = p.X()
                    y_values[idx] = p.Y()
                    z_values[idx] = p.Z()
                    idx += 1
                    quats.append(
                        np.array([r.imagI(),
                                  r.imagJ(),
                                  r.imagK(),
                                  r.real()]))
                    if comp_info.hasValidShape(det_idx):
                        s = comp_info.shape(det_idx)
                        bboxes.append(s.getBoundingBox().width())
                det_rot[
                    i, :] = sc.geometry.rotation_matrix_from_quaternion_coeffs(
                        np.mean(quats, axis=0))
                det_bbox[i, :] = np.sum(bboxes, axis=0)

        rot_pos = rot * sc.geometry.position(pos_d["x"].data, pos_d["y"].data,
                                             pos_d["z"].data)

        _to_spherical(rot_pos, pos_d)

        averaged = sc.groupby(pos_d,
                              spectrum_dim,
                              bins=sc.Variable(dims=[spectrum_dim],
                                               values=np.arange(
                                                   -0.5,
                                                   len(spec_info) + 0.5,
                                                   1.0))).mean("detector")

        sign = averaged["p-sign"].data / sc.abs(averaged["p-sign"].data)
        averaged["p"] = sign * (
            (np.pi * sc.units.rad) - averaged["p-delta"].data)
        averaged["x"] = averaged["r"].data * sc.sin(
            averaged["t"].data) * sc.cos(averaged["p"].data)
        averaged["y"] = averaged["r"].data * sc.sin(
            averaged["t"].data) * sc.sin(averaged["p"].data)
        averaged["z"] = averaged["r"].data * sc.cos(averaged["t"].data)

        pos = sc.geometry.position(averaged["x"].data, averaged["y"].data,
                                   averaged["z"].data)

        return (inv_rot * pos,
                sc.spatial.linear_transforms(dims=[spectrum_dim],
                                             values=det_rot),
                sc.vectors(dims=[spectrum_dim],
                           values=det_bbox,
                           unit=sc.units.m))
    else:
        pos = np.zeros([nspec, 3])

        for i, spec in enumerate(spec_info):
            if spec.hasDetectors:
                definition = spec_info.getSpectrumDefinition(i)
                n_dets = len(definition)
                vec3s = []
                quats = []
                bboxes = []
                for j in range(n_dets):
                    det_idx = definition[j][0]
                    p = det_info.position(det_idx)
                    r = det_info.rotation(det_idx)
                    vec3s.append([p.X(), p.Y(), p.Z()])
                    quats.append(
                        np.array([r.imagI(),
                                  r.imagJ(),
                                  r.imagK(),
                                  r.real()]))
                    if comp_info.hasValidShape(det_idx):
                        s = comp_info.shape(det_idx)
                        bboxes.append(s.getBoundingBox().width())
                pos[i, :] = np.mean(vec3s, axis=0)
                det_rot[
                    i, :] = sc.geometry.rotation_matrix_from_quaternion_coeffs(
                        np.mean(quats, axis=0))
                det_bbox[i, :] = np.sum(bboxes, axis=0)
            else:
                pos[i, :] = [np.nan, np.nan, np.nan]
                det_rot[i, :] = [np.nan, np.nan, np.nan, np.nan]
                det_bbox[i, :] = [np.nan, np.nan, np.nan]
        return (sc.vectors(dims=[spectrum_dim], values=pos, unit=sc.units.m),
                sc.spatial.linear_transforms(dims=[spectrum_dim],
                                             values=det_rot),
                sc.vectors(
                    dims=[spectrum_dim],
                    values=det_bbox,
                    unit=sc.units.m,
                ))
コード例 #9
0
def grouping_reducer(*, dim, group):
    return lambda x: sc.groupby(x, group=group).sum(dim=dim)