Пример #1
0
# For this example, we will only focus on the ``Raw_Data`` dataset which contains the 6D raw measurement data. First lets
# access the HDF5 dataset and check if it is a ``Main`` dataset in the first place:

h5_raw = h5_f['/Measurement_000/Channel_000/Raw_Data']
print(h5_raw)
print('h5_raw is a main dataset? {}'.format(px.hdf_utils.check_if_main(h5_raw)))

########################################################################################################################
# It turns out that this is indeed a Main dataset. Therefore, we can turn this in to a Pycrodataset without any
# problems.
#
# Creating a PycroDataset
# -----------------------
# All one needs for creating a PycroDataset object is a Main dataset. Here is how we can supercharge h5_raw:

pd_raw = px.PycroDataset(h5_raw)
print(pd_raw)

########################################################################################################################
# Notice how easy it was to create a PycroDataset object. Also, note how the PycroDataset is much more informative in
# comparison with the conventional h5py.Dataset object.
#
# PycroDataset = Supercharged(h5py.Dataset)
# =========================================
# Remember that PycroDataset is just an extension of the h5py.Dataset object class. Therefore, both the ``h5_raw`` and
# ``pd_raw`` refer to the same object as the following equality test demonstrates. Except ``pd_raw`` knows about the
# ``ancillary datasets`` and other information which makes it a far more powerful object for you.


print(pd_raw == h5_raw)
# 2. The easier method - reshape the data to N dimensions and slice the dataset
#
# * This approach, while easy, may not be suitable for large datasets which may or may not fit in memory
#
# 3. The hard method - find the spectroscopic and position indices of interest and slice the 2D dataset
#

#########################################################################
# Approach 1 - Using the PycroDataset
# -----------------------------------
# We will use the new PycroDataset class to create an N dimensional slice  directly from the two dimensional
# data in the file.
#

# First we convert from an HDF5 Dataset to a PycroDataset
pd_main = px.PycroDataset(h5_main)
print(pd_main.shape)

#########################################################################
# As you can see, the data is still two dimensional.  The PycroDataset has several attributes that will help with
# the slicing.
#

# Let's check the names and sizes of each dimension
print(pd_main.n_dim_labels)
print(pd_main.n_dim_sizes)

#########################################################################
# With this information, we can now get our data slice.
#
slice_dict = dict(X=[2], Y=[3], Field=[0], Cycle=[1])
Пример #3
0
h5_file = h5py.File(data_file_path, mode='r+')

print('Contents of data file:')
print('----------------------')
px.hdf_utils.print_tree(h5_file)
print('----------------------')

h5_meas_grp = h5_file['Measurement_000']

# Extracting some basic parameters:
num_rows = px.hdf_utils.get_attr(h5_meas_grp, 'grid_num_rows')
num_cols = px.hdf_utils.get_attr(h5_meas_grp, 'grid_num_cols')

# Getting a reference to the main dataset:
h5_main = px.PycroDataset(h5_meas_grp['Channel_000/Raw_Data'])
px.hdf_utils.write_simple_attrs(h5_main, {
    'quantity': 'Deflection',
    'units': 'V'
})

# Extracting the X axis - vector of frequencies
h5_spec_vals = px.hdf_utils.get_auxiliary_datasets(h5_main,
                                                   'Spectroscopic_Values')[-1]
freq_vec = np.squeeze(h5_spec_vals.value) * 1E-3

print('Data currently of shape:', h5_main.shape)

x_label = 'Frequency (kHz)'
y_label = 'Amplitude (a.u.)'
Пример #4
0
########################################################################################################################
# Lets open the file in an editable (r+) mode and look at the contents:

h5_file = h5py.File(h5_path, mode='r+')
print('File contents:\n')
px.hdf_utils.print_tree(h5_file)

########################################################################################################################
# The focus of this example is not on the data storage or formatting but rather on demonstrating our new Process class
# so lets dive straight into the main dataset that requires analysis of the spectra:

h5_chan_grp = h5_file['Measurement_000/Channel_000']

# Accessing the dataset of interest:
h5_main = px.PycroDataset(h5_chan_grp['Raw_Data'])
print('\nThe main dataset:\n------------------------------------')
print(h5_main)

# Extract some metadata:
num_rows, num_cols = h5_main.pos_dim_sizes
freq_vec = h5_main.get_spec_values('Frequency') * 1E-3

########################################################################################################################
# Use the Process class
# ======================
#
# Instantiation
# -------------
# Note that the instantiation of the new ``PeakFinder`` Process class only requires that we supply the main dataset on
# which the computation will be performed: