Example #1
0
    def test_build_ind_val_dsets_legal_bare_minimum_spec(self):
        num_cols = 3
        num_rows = 2
        sizes = [num_cols, num_rows]
        dim_names = ['X', 'Y']
        dim_units = ['nm', 'um']

        descriptor = []
        for length, name, units in zip(sizes, dim_names, dim_units):
            descriptor.append(
                write_utils.Dimension(name, units, np.arange(length)))

        spec_data = np.vstack(
            (np.tile(np.arange(num_cols),
                     num_rows), np.repeat(np.arange(num_rows), num_cols)))

        ds_inds, ds_vals = write_utils.build_ind_val_dsets(descriptor,
                                                           is_spectral=True)

        self.__validate_aux_virtual_dset_pair(ds_inds,
                                              ds_vals,
                                              dim_names,
                                              dim_units,
                                              spec_data,
                                              is_spectral=True)
    def test_build_ind_val_dsets_legal_bare_minimum_spec(self):
        num_cols = 3
        num_rows = 2
        sizes = [num_cols, num_rows]
        dim_names = ['X', 'Y']
        dim_units = ['nm', 'um']

        descriptor = []
        for length, name, units in zip(sizes, dim_names, dim_units):
            descriptor.append(write_utils.Dimension(name, units, np.arange(length)))

        spec_data = np.vstack((np.tile(np.arange(num_cols), num_rows),
                               np.repeat(np.arange(num_rows), num_cols)))

        ds_inds, ds_vals = write_utils.build_ind_val_dsets(descriptor, is_spectral=True)

        self.__validate_aux_virtual_dset_pair(ds_inds, ds_vals, dim_names, dim_units, spec_data, is_spectral=True)
Example #3
0
    def test_build_ind_val_dsets_legal_override_steps_offsets_base_name(self):
        num_cols = 2
        num_rows = 3
        dim_names = ['X', 'Y']
        dim_units = ['nm', 'um']
        col_step = 0.25
        row_step = 0.05
        col_initial = 1
        row_initial = 0.2

        descriptor = []
        for length, name, units, step, initial in zip(
            [num_cols, num_rows], dim_names, dim_units, [col_step, row_step],
            [col_initial, row_initial]):
            descriptor.append(
                write_utils.Dimension(name, units,
                                      initial + step * np.arange(length)))

        new_base_name = 'Overriden'
        spec_inds = np.vstack(
            (np.tile(np.arange(num_cols),
                     num_rows), np.repeat(np.arange(num_rows), num_cols)))
        spec_vals = np.vstack(
            (np.tile(np.arange(num_cols), num_rows) * col_step + col_initial,
             np.repeat(np.arange(num_rows), num_cols) * row_step +
             row_initial))

        ds_inds, ds_vals = write_utils.build_ind_val_dsets(
            descriptor, is_spectral=True, base_name=new_base_name)
        self.__validate_aux_virtual_dset_pair(ds_inds,
                                              ds_vals,
                                              dim_names,
                                              dim_units,
                                              spec_inds,
                                              vals_matrix=spec_vals,
                                              base_name=new_base_name,
                                              is_spectral=True)
    def test_build_ind_val_dsets_legal_override_steps_offsets_base_name(self):
        num_cols = 2
        num_rows = 3
        dim_names = ['X', 'Y']
        dim_units = ['nm', 'um']
        col_step = 0.25
        row_step = 0.05
        col_initial = 1
        row_initial = 0.2

        descriptor = []
        for length, name, units, step, initial in zip([num_cols, num_rows], dim_names, dim_units,
                                                      [col_step, row_step], [col_initial, row_initial]):
            descriptor.append(write_utils.Dimension(name, units, initial + step * np.arange(length)))

        new_base_name = 'Overriden'
        spec_inds = np.vstack((np.tile(np.arange(num_cols), num_rows),
                               np.repeat(np.arange(num_rows), num_cols)))
        spec_vals = np.vstack((np.tile(np.arange(num_cols), num_rows) * col_step + col_initial,
                               np.repeat(np.arange(num_rows), num_cols) * row_step + row_initial))

        ds_inds, ds_vals = write_utils.build_ind_val_dsets(descriptor, is_spectral=True, base_name=new_base_name)
        self.__validate_aux_virtual_dset_pair(ds_inds, ds_vals, dim_names, dim_units, spec_inds,
                                              vals_matrix=spec_vals, base_name=new_base_name, is_spectral=True)
Example #5
0
def load_ringdown(data_files, parm_dict, h5_path,
				  verbose=False, loadverbose=True, average=True, mirror=False):
	"""
	Generates the HDF5 file given path to files_list and parameters dictionary

	Creates a Datagroup FFtrEFM_Group with a single dataset in chunks

	Parameters
	----------
	data_files : list
		List of the \*.ibw files to be invidually scanned

	parm_dict : dict
		Scan parameters to be saved as attributes

	h5_path : string
		Path to H5 file on disk

	verbose : bool, optional
		Display outputs of each function or not

	loadverbose : Boolean (optional)
		Whether to print any simple "loading Line X" statements for feedback

	mirror : bool, optional
		Flips the ibw signal if acquired during a retrace, so data match the topography pixel-to-pixel

	Returns
	-------
	h5_path: str
		The filename path to the H5 file created

	"""
	# e.g. if a 16000 point signal with 2000 averages and 10 pixels 
	#   (10MHz sampling of a 1.6 ms long signal=16000, 200 averages per pixel)
	# parm_dict['pnts_per_pixel'] = 200 (# signals at each pixel)
	#           ['pnts_per_avg'] = 16000 (# pnts per signal, called an "average")
	#           ['pnts_per_line'] = 2000 (# signals in each line)

	num_rows = parm_dict['num_rows']
	num_cols = parm_dict['num_cols']

	# The signals are hard-coded in the AFM software as 800 points long
	# Therefore, we can calculate pnts_per_pixel etc from the first file
	signal = loadibw(data_files[0])['wave']['wData']  # Load data.
	parm_dict['pnts_per_pixel'] = int(signal.shape[0] / (800 * num_cols))
	parm_dict['pnts_per_avg'] = 800  # hard-coded in our AFM software
	parm_dict['total_time'] = 16e-3  # hard-coded in our AFM software

	if 'AMPINVOLS' not in parm_dict:
		parm_dict.update({'AMPINVOLS': 100e-9})

	pnts_per_avg = parm_dict['pnts_per_avg']
	orig_pnts_per_pixel = parm_dict['pnts_per_pixel']
	if average:
		parm_dict['pnts_per_pixel'] = 1
		parm_dict['pnts_per_line'] = num_cols
	pnts_per_pixel = parm_dict['pnts_per_pixel']
	pnts_per_line = parm_dict['pnts_per_line']

	hdf = h5py.File(h5_path)

	try:
		rd_group = hdf.file.create_group('RD_Group')
	except:
		rd_group = usid.hdf_utils.create_indexed_group(hdf.file['/'], 'RD_Group')

	pos_desc = [Dimension('X', 'm', np.linspace(0, parm_dict['FastScanSize'], num_cols * pnts_per_pixel)),
				Dimension('Y', 'm', np.linspace(0, parm_dict['SlowScanSize'], num_rows))]
	ds_pos_ind, ds_pos_val = build_ind_val_dsets(pos_desc, is_spectral=False, verbose=verbose)

	spec_desc = [Dimension('Time', 's', np.linspace(0, parm_dict['total_time'], pnts_per_avg))]
	ds_spec_inds, ds_spec_vals = build_ind_val_dsets(spec_desc, is_spectral=True)

	for p in parm_dict:
		rd_group.attrs[p] = parm_dict[p]
	rd_group.attrs['pnts_per_line'] = num_cols  # to change number of pnts in a line

	h5_rd = usid.hdf_utils.write_main_dataset(rd_group,  # parent HDF5 group
											  (num_rows * num_cols * pnts_per_pixel, pnts_per_avg),
											  # shape of Main dataset
											  'Ringdown',  # Name of main dataset
											  'Amplitude',  # Physical quantity contained in Main dataset
											  'nm',  # Units for the physical quantity
											  pos_desc,  # Position dimensions
											  spec_desc,  # Spectroscopic dimensions
											  dtype=np.float32,  # data type / precision
											  compression='gzip',
											  main_dset_attrs=parm_dict)

	# Cycles through the remaining files. This takes a while (~few minutes)
	for k, num in zip(data_files, np.arange(0, len(data_files))):

		if loadverbose:
			fname = k.replace('/', '\\')
			print('####', fname.split('\\')[-1], '####')
			fname = str(num).rjust(4, '0')

		signal = loadibw(k)['wave']['wData']
		signal = np.reshape(signal.T, [num_cols * orig_pnts_per_pixel, pnts_per_avg])

		if average:
			pixels = np.split(signal, num_cols, axis=0)
			signal = np.vstack([np.mean(p, axis=0) for p in pixels])

		signal *= parm_dict['AMPINVOLS']

		if mirror:
			h5_rd[num_cols * pnts_per_pixel * num: num_cols * pnts_per_pixel * (num + 1), :] = np.flipud(signal[:, :])
		else:
			h5_rd[num_cols * pnts_per_pixel * num: num_cols * pnts_per_pixel * (num + 1), :] = signal[:, :]

	if verbose == True:
		usid.hdf_utils.print_tree(hdf.file, rel_paths=True)

	return h5_rd
Example #6
0
def add_standard_sets(h5_path,
                      group,
                      fast_x=32e-6,
                      slow_y=8e-6,
                      parm_dict={},
                      ds='FF_Raw',
                      verbose=False):
    """
	Adds Position_Indices and Position_Value datasets to a folder within the h5_file
	
	Uses the values of fast_x and fast_y to determine the values
	
	Parameters
	----------
	h5_path : h5 File or str 
		Points to a path to process
	
	group : str or H5PY group 
		Location to process data to, either as str or H5PY
		
	parms_dict : dict, optional
		Parameters to be passed. By default this should be at the command line for FFtrEFM data
		
	ds : str, optional
		Dataset name to search for within this group and set as h5_main
		
	verbose : bool, optional
		Whether to write to the command line
	"""

    hdf = px.io.HDFwriter(h5_path)

    if not any(parm_dict):
        parm_dict = get_utils.get_params(h5_path)

    if 'FastScanSize' in parm_dict:
        fast_x = parm_dict['FastScanSize']

    if 'SlowScanSize' in parm_dict:
        slow_y = parm_dict['SlowScanSize']

    try:
        num_rows = parm_dict['num_rows']
        num_cols = parm_dict['num_cols']
        pnts_per_avg = parm_dict['pnts_per_avg']
        dt = 1 / parm_dict['sampling_rate']
    except:  # some defaults
        warnings.warn('Improper parameters specified.')
        num_rows = 64
        num_cols = 128
        pnts_per_avg = 1
        dt = 1

    try:
        grp = px.io.VirtualGroup(group)
    except:
        grp = px.io.VirtualGroup(group.name)

    pos_desc = [
        Dimension('X', 'm', np.linspace(0, parm_dict['FastScanSize'],
                                        num_cols)),
        Dimension('Y', 'm', np.linspace(0, parm_dict['SlowScanSize'],
                                        num_rows))
    ]
    ds_pos_ind, ds_pos_val = build_ind_val_dsets(pos_desc,
                                                 is_spectral=False,
                                                 verbose=verbose)

    spec_desc = [Dimension('Time', 's', np.linspace(0, pnts_per_avg, dt))]
    ds_spec_inds, ds_spec_vals = build_ind_val_matrices(spec_desc,
                                                        is_spectral=True,
                                                        verbose=verbose)

    aux_ds_names = [
        'Position_Indices', 'Position_Values', 'Spectroscopic_Indices',
        'Spectroscopic_Values'
    ]

    grp.add_children([ds_pos_ind, ds_pos_val, ds_spec_inds, ds_spec_vals])

    h5_refs = hdf.write(grp, print_log=verbose)

    h5_main = hdf.file[grp.name]

    if any(ds):
        h5_main = usid.hdf_utils.find_dataset(hdf.file[grp.name], ds)[0]

    try:
        usid.hdf_utils.link_h5_objects_as_attrs(
            h5_main, usid.hdf_utils.get_h5_obj_refs(aux_ds_names, h5_refs))
    except:
        usid.hdf_utils.link_h5_objects_as_attrs(
            h5_main, usid.hdf_utils.get_h5_obj_refs(aux_ds_names, h5_refs))

    hdf.flush()

    return h5_main
Example #7
0
def load_pixel_averaged_FF(data_files, parm_dict, h5_path,
						   verbose=False, loadverbose=True, mirror=False):
	"""
	Creates a new group FF_Avg where the raw FF files are averaged together

	This function does not process the Raw data and is more useful when the resulting 
	Raw data matrix is very large (causing memory errors)
	
	This is more useful as pixel-wise averages are more relevant in FF-processing

	This Dataset is (n_pixels*n_rows, n_pnts_per_avg)

	Parameters
	----------
	h5_file : h5py File
		H5 File to be examined. File typically set as h5_file = hdf.file
		hdf = px.ioHDF5(h5_path), h5_path = path to disk

	verbose : bool, optional
		Display outputs of each function or not

	loadverbose : Boolean (optional)
		Whether to print any simple "loading Line X" statements for feedback

	Returns
	-------
	h5_avg : Dataset
		The new averaged Dataset

	"""

	hdf = px.io.HDFwriter(h5_path)

	try:
		ff_avg_group = hdf.file.create_group('FF_Group')
	except:
		ff_avg_group = usid.hdf_utils.create_indexed_group(hdf.file['/'], 'FF_Group')

	try:
		ff_avg_group = hdf.file[ff_avg_group.name].create_group('FF_Avg')
	except:
		ff_avg_group = usid.hdf_utils.create_indexed_group(ff_avg_group, 'FF_Avg')

	num_rows = parm_dict['num_rows']
	num_cols = parm_dict['num_cols']
	pnts_per_avg = parm_dict['pnts_per_avg']
	pnts_per_line = parm_dict['pnts_per_line']
	pnts_per_pixel = parm_dict['pnts_per_pixel']
	parm_dict['pnts_per_pixel'] = 1  # only 1 average per pixel now
	parm_dict['pnts_per_line'] = num_cols  # equivalent now with averaged data
	n_pix = int(pnts_per_line / pnts_per_pixel)
	dt = 1 / parm_dict['sampling_rate']

	# Set up the position vectors for the data
	pos_desc = [Dimension('X', 'm', np.linspace(0, parm_dict['FastScanSize'], num_cols)),
				Dimension('Y', 'm', np.linspace(0, parm_dict['SlowScanSize'], num_rows))]
	ds_pos_ind, ds_pos_val = build_ind_val_dsets(pos_desc, is_spectral=False)

	spec_desc = [Dimension('Time', 's', np.linspace(0, parm_dict['total_time'], pnts_per_avg))]
	ds_spec_inds, ds_spec_vals = build_ind_val_dsets(spec_desc, is_spectral=True)

	for p in parm_dict:
		ff_avg_group.attrs[p] = parm_dict[p]
	ff_avg_group.attrs['pnts_per_line'] = num_cols  # to change number of pnts in a line
	ff_avg_group.attrs['pnts_per_pixel'] = 1  # to change number of pnts in a pixel

	h5_avg = usid.hdf_utils.write_main_dataset(ff_avg_group,  # parent HDF5 group
											   (num_rows * num_cols, pnts_per_avg),  # shape of Main dataset
											   'FF_Avg',  # Name of main dataset
											   'Deflection',  # Physical quantity contained in Main dataset
											   'V',  # Units for the physical quantity
											   pos_desc,  # Position dimensions
											   spec_desc,  # Spectroscopic dimensions
											   dtype=np.float32,  # data type / precision
											   compression='gzip',
											   main_dset_attrs=parm_dict)

	# Generates a line from each data file, averages, then saves the data

	for k, n in zip(data_files, np.arange(0, len(data_files))):

		if loadverbose:
			fname = k.replace('/', '\\')
			print('####', fname.split('\\')[-1], '####')
			fname = str(n).rjust(4, '0')

		line_file = load.signal(k)

		_ll = line.Line(line_file, parm_dict, n_pixels=n_pix, pycroscopy=False)
		_ll = _ll.pixel_wise_avg().T

		if mirror:
			h5_avg[n * num_cols:(n + 1) * num_cols, :] = np.flipud(_ll[:, :])
		else:
			h5_avg[n * num_cols:(n + 1) * num_cols, :] = _ll[:, :]

	if verbose == True:
		usid.hdf_utils.print_tree(hdf.file, rel_paths=True)
		h5_avg = usid.hdf_utils.find_dataset(hdf.file, 'FF_Avg')[0]

		print('H5_avg of size:', h5_avg.shape)

	hdf.flush()

	return h5_avg
Example #8
0
def load_pixel_averaged_from_raw(h5_file, verbose=True, loadverbose=True):
	"""
	Creates a new group FF_Avg where the FF_raw file is averaged together.

	This is more useful as pixel-wise averages are more relevant in FF-processing

	This Dataset is (n_pixels*n_rows, n_pnts_per_avg)

	Parameters
	----------
	h5_file : h5py File
		H5 File to be examined. File typically set as h5_file = hdf.file
		hdf = px.ioHDF5(h5_path), h5_path = path to disk

	verbose : bool, optional
		Display outputs of each function or not

	loadverbose : Boolean (optional)
		Whether to print any simple "loading Line X" statements for feedback

	Returns
	-------
	h5_avg : Dataset
		The new averaged Dataset

	"""

	hdf = px.io.HDFwriter(h5_file)
	h5_main = usid.hdf_utils.find_dataset(hdf.file, 'FF_Raw')[0]

	try:
		ff_avg_group = h5_main.parent.create_group('FF_Avg')
	except:
		ff_avg_group = usid.hdf_utils.create_indexed_group(h5_main.parent, 'FF_Avg')

	parm_dict = usid.hdf_utils.get_attributes(h5_main.parent)

	num_rows = parm_dict['num_rows']
	num_cols = parm_dict['num_cols']
	pnts_per_avg = parm_dict['pnts_per_avg']
	pnts_per_line = parm_dict['pnts_per_line']
	pnts_per_pixel = parm_dict['pnts_per_pixel']
	parm_dict['pnts_per_pixel'] = 1  # only 1 average per pixel now
	parm_dict['pnts_per_line'] = num_cols  # equivalent now with averaged data
	n_pix = int(pnts_per_line / pnts_per_pixel)
	dt = 1 / parm_dict['sampling_rate']

	# Set up the position vectors for the data
	pos_desc = [Dimension('X', 'm', np.linspace(0, parm_dict['FastScanSize'], num_cols)),
				Dimension('Y', 'm', np.linspace(0, parm_dict['SlowScanSize'], num_rows))]
	ds_pos_ind, ds_pos_val = build_ind_val_dsets(pos_desc, is_spectral=False)

	spec_desc = [Dimension('Time', 's', np.linspace(0, parm_dict['total_time'], pnts_per_avg))]
	ds_spec_inds, ds_spec_vals = build_ind_val_dsets(spec_desc, is_spectral=True)

	for p in parm_dict:
		ff_avg_group.attrs[p] = parm_dict[p]
	ff_avg_group.attrs['pnts_per_line'] = num_cols  # to change number of pnts in a line
	ff_avg_group.attrs['pnts_per_pixel'] = 1  # to change number of pnts in a pixel

	h5_avg = usid.hdf_utils.write_main_dataset(ff_avg_group,  # parent HDF5 group
											   (num_rows * num_cols, pnts_per_avg),  # shape of Main dataset
											   'FF_Avg',  # Name of main dataset
											   'Deflection',  # Physical quantity contained in Main dataset
											   'V',  # Units for the physical quantity
											   pos_desc,  # Position dimensions
											   spec_desc,  # Spectroscopic dimensions
											   dtype=np.float32,  # data type / precision
											   compression='gzip',
											   main_dset_attrs=parm_dict)

	# Uses get_line to extract line. Averages and returns to the Dataset FF_Avg
	# We can operate on the dataset array directly, get_line is used for future_proofing if
	#  we want to add additional operation (such as create an Image class)
	for i in range(num_rows):

		if loadverbose == True:
			print('#### Row:', i, '####')

		_ll = get_utils.get_line(h5_main, pnts=pnts_per_line, line_num=i, array_form=False, avg=False)
		_ll = _ll.pixel_wise_avg()

		h5_avg[i * num_cols:(i + 1) * num_cols, :] = _ll[:, :]

	if verbose == True:
		usid.hdf_utils.print_tree(hdf.file, rel_paths=True)
		h5_avg = usid.hdf_utils.find_dataset(hdf.file, 'FF_Avg')[0]

		print('H5_avg of size:', h5_avg.shape)

	hdf.flush()

	return h5_avg
Example #9
0
def load_FF(data_files, parm_dict, h5_path, verbose=False, loadverbose=True,
			average=True, mirror=False):
	"""
	Generates the HDF5 file given path to data_files and parameters dictionary

	Creates a Datagroup FFtrEFM_Group with a single dataset in chunks

	Parameters
	----------
	data_files : list
		List of the \*.ibw files to be invidually scanned. This is generated
		by load_folder above

	parm_dict : dict
		Scan parameters to be saved as attributes. This is generated
		by load_folder above, or you can pass this explicitly.

	h5_path : string
		Path to H5 file on disk

	verbose : bool, optional
		Display outputs of each function or not

	loadverbose : Boolean (optional)
		Whether to print any simple "loading Line X" statements for feedback

	average: bool, optional
		Whether to average each pixel before saving to H5. This saves both time and space
		
	mirror : bool, optional
		Mirrors the data when saving. This parameter is to match the FFtrEFM data
		with the associate topography as FFtrEFM is acquired during a retrace while
		topo is saved during a forward trace

	Returns
	-------
	h5_path: str
		The filename path to the H5 file created

	"""

	# Prepare data for writing to HDF
	num_rows = parm_dict['num_rows']
	num_cols = parm_dict['num_cols']
	pnts_per_avg = parm_dict['pnts_per_avg']
	name = 'FF_Raw'

	if average:
		parm_dict['pnts_per_pixel'] = 1
		parm_dict['pnts_per_line'] = num_cols
		name = 'FF_Avg'

	pnts_per_pixel = parm_dict['pnts_per_pixel']
	pnts_per_line = parm_dict['pnts_per_line']

	dt = 1 / parm_dict['sampling_rate']
	def_vec = np.arange(0, parm_dict['total_time'], dt)
	if def_vec.shape[0] != parm_dict['pnts_per_avg']:
		def_vec = def_vec[:-1]
		# warnings.warn('Time-per-point calculation error')

	# To do: Fix the labels/atrtibutes on the relevant data sets
	hdf = px.io.HDFwriter(h5_path)
	try:
		ff_group = hdf.file.create_group('FF_Group')
	except:
		ff_group = usid.hdf_utils.create_indexed_group(hdf.file['/'], 'FF_Group')

	# Set up the position vectors for the data
	pos_desc = [Dimension('X', 'm', np.linspace(0, parm_dict['FastScanSize'], num_cols * pnts_per_pixel)),
				Dimension('Y', 'm', np.linspace(0, parm_dict['SlowScanSize'], num_rows))]

	ds_pos_ind, ds_pos_val = build_ind_val_dsets(pos_desc, is_spectral=False, verbose=verbose)

	spec_desc = [Dimension('Time', 's', np.linspace(0, parm_dict['total_time'], pnts_per_avg))]
	ds_spec_inds, ds_spec_vals = build_ind_val_dsets(spec_desc, is_spectral=True)

	for p in parm_dict:
		ff_group.attrs[p] = parm_dict[p]
	ff_group.attrs['pnts_per_line'] = num_cols  # to change number of pnts in a line
	# ff_group.attrs['pnts_per_pixel'] = 1 # to change number of pnts in a pixel

	h5_ff = usid.hdf_utils.write_main_dataset(ff_group,  # parent HDF5 group
											  (num_rows * num_cols * pnts_per_pixel, pnts_per_avg),
											  # shape of Main dataset
											  name,  # Name of main dataset
											  'Deflection',  # Physical quantity contained in Main dataset
											  'V',  # Units for the physical quantity
											  pos_desc,  # Position dimensions
											  spec_desc,  # Spectroscopic dimensions
											  dtype=np.float32,  # data type / precision
											  compression='gzip',
											  main_dset_attrs=parm_dict)

	pnts_per_line = parm_dict['pnts_per_line']

	# Cycles through the remaining files. This takes a while (~few minutes)
	for k, num in zip(data_files, np.arange(0, len(data_files))):

		if loadverbose:
			fname = k.replace('/', '\\')
			print('####', fname.split('\\')[-1], '####')
			fname = str(num).rjust(4, '0')

		line_file = load.signal(k)

		if average:
			_ll = line.Line(line_file, parm_dict, n_pixels=num_cols, pycroscopy=False)
			_ll = _ll.pixel_wise_avg().T
		else:
			_ll = line_file.transpose()

		f = hdf.file[h5_ff.name]

		if mirror:
			f[pnts_per_line * num:pnts_per_line * (num + 1), :] = np.flipud(_ll[:, :])
		else:
			f[pnts_per_line * num:pnts_per_line * (num + 1), :] = _ll[:, :]

	if verbose == True:
		usid.hdf_utils.print_tree(hdf.file, rel_paths=True)

	hdf.flush()

	return h5_ff