Esempio n. 1
0
def createHDF5_file(signal, parm_dict, h5_path='', ds_name='FF_Raw'):
	"""
	Generates the HDF5 file given path to a specific file and a parameters dictionary

	Parameters
	----------
	h5_path : string
		Path to desired h5 file.

	signal : str, ndarray
		Path to the data file to be converted or a workspace array

	parm_dict : dict
		Scan parameters

	Returns
	-------
	h5_path: str
		The filename path to the H5 file create

	"""

	sg = signal

	if 'str' in str(type(signal)):
		sg = load.signal(signal)

	if not any(h5_path):  # if not passed, auto-generate name
		fname = signal.replace('/', '\\')
		h5_path = fname[:-4] + '.h5'
	else:
		fname = h5_path

	hdf = px.ioHDF5(h5_path)
	usid.hdf_utils.print_tree(hdf.file)

	ff_group = px.MicroDataGroup('FF_Group', parent='/')
	root_group = px.MicroDataGroup('/')

	#    fname = fname.split('\\')[-1][:-4]
	sg = px.MicroDataset(ds_name, data=sg, dtype=np.float32, parent=ff_group)

	if 'pnts_per_pixel' not in parm_dict.keys():
		parm_dict['pnts_per_avg'] = signal.shape[1]
		parm_dict['pnts_per_pixel'] = 1
		parm_dict['pnts_per_line'] = parm_dict['num_cols']

	ff_group.addChildren([sg])
	ff_group.attrs = parm_dict

	# Get reference for writing the data
	h5_refs = hdf.writeData(ff_group, print_log=True)

	hdf.flush()
# acquired from advanced atomic force microscopes. In this dataset, a spectra was colllected for each position in a two
# dimensional grid of spatial locations. Thus, this is a three dimensional dataset that has been flattened to a two
# dimensional matrix in accordance with the pycroscopy data format.
#
# Fortunately, all statistical analysis, machine learning, spectral unmixing algorithms, etc. only accept data that is
# formatted in the same manner of [position x spectra] in a two dimensional matrix.
#
# We will begin by downloading the BE-PFM dataset from Github
#

data_file_path = 'temp_um.h5'
# download the data file from Github:
url = 'https://raw.githubusercontent.com/pycroscopy/pycroscopy/master/data/BELine_0004.h5'
_ = wget.download(url, data_file_path, bar=None)

hdf = px.ioHDF5(data_file_path)
h5_file = hdf.file

print('Contents of data file:')
print('----------------------')
px.hdf_utils.print_tree(h5_file)
print('----------------------')

h5_meas_grp = h5_file['Measurement_000']

# Extracting some basic parameters:
num_rows = px.hdf_utils.get_attr(h5_meas_grp,'grid_num_rows')
num_cols = px.hdf_utils.get_attr(h5_meas_grp,'grid_num_cols')

# Getting a reference to the main dataset:
h5_main = h5_meas_grp['Channel_000/Raw_Data']
data_group.addChildren([ds_empty])
root_group.addChildren([ds_main, data_group])

##############################################################################
# The showTree method allows us to view the data structure before the hdf5 file is
# created.
root_group.showTree()

##############################################################################
# Now that we have created the objects, we can write them to an hdf5 file

# First we specify the path to the file
h5_path = 'microdata_test.h5'

# Then we use the ioHDF5 class to build the file from our objects.
hdf = px.ioHDF5(h5_path)

##############################################################################
# The writeData method builds the hdf5 file using the structure defined by the
# MicroData objects.  It returns a list of references to all h5py objects in the
# new file.
h5_refs = hdf.writeData(root_group, print_log=True)

# We can use these references to get the h5py dataset and group objects
h5_main = px.io.hdf_utils.getH5DsetRefs(['Main_Data'], h5_refs)[0]
h5_empty = px.io.hdf_utils.getH5DsetRefs(['Empty_Data'], h5_refs)[0]

##############################################################################
# Compare the data in our dataset to the original
print(np.allclose(h5_main[()], data1))
Esempio n. 4
0
"""
============
Load Dataset
============

Load a dataset from the hdf5 file.  For this example, we will be loading the Raw_Data dataset.

"""

# Code source: pycroscopy
# Liscense: MIT

import h5py
import pycroscopy as px

h5_path = px.uiGetFile(caption='Select .h5 file', filter='HDF5 file (*.h5)')

# Load the dataset with h5py
h5_file1 = h5py.File(h5_path, 'r')

h5_raw1 = h5_file1['Measurement_000\Channel_000\Raw_Data']

# Load the dataset with pycroscopy
h5_file2 = px.ioHDF5(h5_path)

h5_raw2 = h5_file2['Measurement_000\Channel_000\Raw_Data']