コード例 #1
0
    def _get_maindatasets(self):
        """
        Gets the maindatasets in the paths provided
        If the dataset path is not provided, gets all the datasets in the file 
        using usid.hdf_utils.get_all_main()
        
        Raises an error if no main datasets are found
        
        Attributes
        ----------
        self._main_datasets: list
            list of all main USID datasets found in the file path
        
        Returns
        -------
        NONE (writes all the maindatasets in the path to attribute self._main_datasets)
        """

        if self._dataset_path is not None:
            self._main_datasets = []

            if type(self._dataset_path) == list:
                for dataset in self._dataset_path:
                    if usid.hdf_utils.check_if_main(self._file[dataset]):
                        self._main_datasets.append(
                            usid.USIDataset(self._file[dataset]))
                    else:
                        warnings.warn(
                            '{} is not a main dataset'.format(dataset))

            elif type(self._dataset_path) == str:
                if usid.hdf_utils.check_if_main(
                        self._file[self._dataset_path]):
                    self._main_datasets.append(
                        usid.USIDataset(self._file[self._dataset_path]))
                else:
                    warnings.warn('{} is not a main dataset'.format(
                        self._dataset_path))

            else:
                raise TypeError(
                    'Provide a path (StringType) to the main dataset or a List of paths'
                )

        else:
            self._main_datasets = usid.hdf_utils.get_all_main(self._file)

        if len(self._main_datasets) == 0:
            raise TypeError('There are no main USID datasets in this file')
コード例 #2
0
 def _set_dst_(self, dst_grp):
     if 'Main' not in dst_grp:
         raise ValueError('EC001')
     self.dst_data = usid.USIDataset(dst_grp['Main'])
     self.dst_bar = dst_grp['Spectroscopic_Values'][:]
     self.dst_shape = tuple(self.dst_data.pos_dim_sizes)
     return
コード例 #3
0
ファイル: svd.py プロジェクト: rajgiriUW/ffta
def svd_filter(h5_main, clean_components=None):
    """
    Filters data given the array clean_components
    
    :param h5_main: Dataset to be filtered and reconstructed.
        This must be the same as where SVD was performed
    :type h5_main: h5py
    
    :param clean_components:
        Clean_components has 2 components will filter from start to finish
        Clean_components has 3+ components will use those individual components
    :type clean_components:
    
    :returns:
    :rtype:
    
    """
    if not (isinstance(h5_main, usid.USIDataset)):
        h5_main = usid.USIDataset(h5_main)

    h5_rb = rebuild_svd(h5_main, components=clean_components)

    parameters = get_utils.get_params(h5_main)

    for key in parameters:
        if key not in h5_rb.attrs:
            h5_rb.attrs[key] = parameters[key]

    return h5_rb
コード例 #4
0
def setup_movie(h5_ds, size=(10, 6), vscale=[None, None], cmap='inferno'):
    '''
	
	:param h5_ds: The instantaneous frequency data. NOT deflection, this is for post-processed data
	:type h5_ds: USID dataset
	
	:param size:
	:type size: tuple
	
	:param vscale:
	:type vscale: list
	
	:param cmap:
	:type cmap: str
	
	:returns: tuple (fig, ax, cbar, vmin, vmax)
		WHERE
		[type] fig is...
		[type] ax is...
		[type] cbar is...
		[type] vmin is...
		[type vmax is...
	'''
    fig, ax = plt.subplots(nrows=1, figsize=size, facecolor='white')

    if 'USID' not in str(type(h5_ds)):
        h5_ds = usid.USIDataset(h5_ds)

    params = sidpy.hdf_utils.get_attributes(h5_ds)
    if 'trigger' not in params:
        params = sidpy.hdf_utils.get_attributes(h5_ds.parent)

    ds = h5_ds.get_n_dim_form()[:, :, 0]

    # set scale based on the first line, pre-trigger to post-trigger
    tdx = params['trigger'] * params['sampling_rate'] / params['pnts_per_avg']
    tdx = int(tdx * len(h5_ds[0, :]))
    if any(vscale):
        [vmin, vmax] = vscale
    else:
        vmin = np.min(h5_ds[0][int(tdx * 0.7):int(tdx * 1.3)])
        vmax = np.max(h5_ds[0][int(tdx * 0.7):int(tdx * 1.3)])

    length = h5_ds.get_pos_values('X')
    height = h5_ds.get_pos_values('Y')

    im0 = ax.imshow(ds,
                    cmap=cmap,
                    origin='lower',
                    extent=[0, length[-1] * 1e6, 0, height[-1] * 1e6],
                    vmin=vmin,
                    vmax=vmax)
    cbar = plt.colorbar(im0,
                        ax=ax,
                        orientation='vertical',
                        fraction=0.023,
                        pad=0.03,
                        use_gridspec=True)

    return fig, ax, cbar, vmin, vmax
コード例 #5
0
def h5toimg(h5, channel):
    cmap = usid.hdf_utils.get_attr(h5, 'ColorMap ' + str(channel))
    channel = 'Channel_' + str(channel).zfill(3)
    img = usid.USIDataset(h5[channel + '/Raw_Data'])
    height, width = getDim(h5, channel)[:2]
    img = np.flip(np.reshape(img, (height, width)), axis=0)
    img = ((img + abs(np.amin(img))) / (np.amax(img) - np.amin(img)) * 255)
    return img.astype(np.uint8), ColorMap(cmap)
コード例 #6
0
    def load_data(self):
        import numpy as np
        import pyUSID as usid

        h5_main = usid.hdf_utils.find_dataset(self.h5_file, 'Raw_Data')[0]
        self.h5_main = usid.USIDataset(h5_main)
        self.real1 = np.real(self.h5_main[:])
        self.imag1 = np.imag(self.h5_main[:])

        h5_main2 = usid.hdf_utils.find_dataset(self.h5_file, 'Raw_Data')[1]
        self.h5_main2 = usid.USIDataset(h5_main2)
        self.real2 = np.real(self.h5_main2[:])
        self.imag2 = np.imag(self.h5_main2[:])

        meas_grp = self.h5_main[:]
        for att in meas_grp.attrs.keys():
            print(att, meas_grp[att])
コード例 #7
0
    def plotPixel(self, pix_ind=None):
        if pix_ind is None:
            return None

        # Need x, R, R_sig, V, i_meas, i_recon, i_corrected to graph against .h5_spec_vals[()]
        x = usid.USIDataset(
            self.h5_results_grp["Resistance"]).h5_spec_vals[()][0]
        R = self.h5_results_grp["Resistance"][()][pix_ind, :]
        R_sig = self.h5_results_grp["R_sig"][()][pix_ind, :]
        V = usid.USIDataset(
            self.h5_results_grp["Reconstructed_Current"]).h5_spec_vals[()][0]
        i_meas = self.h5_main[()][pix_ind, ::self.parse_mod]
        i_recon = self.h5_results_grp["Reconstructed_Current"][()][pix_ind, :]
        i_corrected = self.h5_results_grp["Corrected_Current"][()][pix_ind, :]

        return publicGetGraph(self.Ns, pix_ind, self.shift_index,
                              self.split_index, x, R, R_sig, V, i_meas,
                              i_recon, i_corrected)
コード例 #8
0
    def setUp(self, proc_class=AvgSpecUltraBasic, **proc_kwargs):
        delete_existing_file(data_utils.std_beps_path)
        data_utils.make_beps_file()
        self.h5_file = h5py.File(data_utils.std_beps_path, mode='r+')
        self.h5_main = self.h5_file['Raw_Measurement/source_main']
        self.h5_main = usid.USIDataset(self.h5_main)
        self.exp_result = np.expand_dims(np.mean(self.h5_main[()], axis=1),
                                         axis=1)

        self.proc = proc_class(self.h5_main, **proc_kwargs)
コード例 #9
0
    def test_read_only_file(self):
        delete_existing_file(data_utils.std_beps_path)
        data_utils.make_beps_file()
        self.h5_file = h5py.File(data_utils.std_beps_path, mode='r')
        self.h5_main = self.h5_file['Raw_Measurement/source_main']
        self.h5_main = usid.USIDataset(self.h5_main)

        with self.assertRaises(TypeError):
            _ = AvgSpecUltraBasic(self.h5_main)
        delete_existing_file(data_utils.std_beps_path)
コード例 #10
0
    def _get_channels(self, fmt="arr"):
        """
        Function that gets the different channels from an Asylum file and returns them 

        TODO: Need to make the assignment of channels more generic, and check for extra channels
        in a better way (in case the user has flattened the topography data or done similar amendments
        to the data)
        """

        topo1 = usid.USIDataset(
            self.file['Measurement_000/Channel_000/Raw_Data'])
        ampl1 = usid.USIDataset(
            self.file['Measurement_000/Channel_001/Raw_Data'])
        phase1 = usid.USIDataset(
            self.file['Measurement_000/Channel_002/Raw_Data'])
        ampl2 = usid.USIDataset(
            self.file['Measurement_000/Channel_003/Raw_Data'])
        phase2 = usid.USIDataset(
            self.file['Measurement_000/Channel_004/Raw_Data'])
        frequency = usid.USIDataset(
            self.file['Measurement_000/Channel_005/Raw_Data'])

        if fmt == 'arr':
            topo1_nd = np.transpose(topo1.get_n_dim_form().squeeze())
            ampl1_nd = np.transpose(ampl1.get_n_dim_form().squeeze())
            ampl2_nd = np.transpose(ampl2.get_n_dim_form().squeeze())
            phase1_nd = np.transpose(phase1.get_n_dim_form().squeeze())
            phase2_nd = np.transpose(phase2.get_n_dim_form().squeeze())
            freq_nd = np.transpose(frequency.get_n_dim_form().squeeze())

            if len(self.file['Measurement_000']) > 10:
                topo2 = usid.USIDataset(
                    self.file['Measurement_000/Channel_006/Raw_Data'])
                topo2_nd = np.transpose(topo2.get_n_dim_form().squeeze())

                return [
                    topo1_nd, ampl1_nd, phase1_nd, ampl2_nd, phase2_nd,
                    topo2_nd
                ]
            return [
                topo1_nd, ampl1_nd, phase1_nd, ampl2_nd, phase2_nd, freq_nd
            ]

        if len(self.file['Measurement_000']) > 10:
            topo2 = usid.USIDataset(
                self.file['Measurement_000/Channel_006/Raw_Data'])
            topo2_nd = np.transpose(topo2.get_n_dim_form().squeeze())

            return [topo1, ampl1, phase1, ampl2, phase2, frequency, topo2]

        return [topo1, ampl1, phase1, ampl2, phase2, frequency]
コード例 #11
0
def get_main(url, h5_path='temp.h5'):
    if os.path.exists(h5_path):
        os.remove(h5_path)
    _ = wget.download(url, h5_path, bar=None)
    # Open the file in read-only mode
    h5_file = h5py.File(h5_path, mode='r')
    # Get handle to the the raw data
    h5_meas_grp = h5_file['Measurement_000']
    # Accessing the dataset of interest:
    h5_main = usid.USIDataset(h5_meas_grp['Channel_000/Raw_Data'])
    num_rows, num_cols = h5_main.pos_dim_sizes
    return h5_main
コード例 #12
0
ファイル: svd.py プロジェクト: lindat18/ffta
def test_svd(h5_main, num_components=128, show_plots=True, override=True, verbose=True):
	"""

	Parameters
	----------
	h5_main : h5Py Dataset
		Main dataset to filter
		
	num_components : int, optional
		Number of SVD components. Increasing this lenghtens computation
		
	show_plots : bool, optional
		If True displays skree, abundance_maps, and data loops
		
	override : bool, optional
		Force SVD.Compute to reprocess data no matter what
		
	verbose : bool, optional
		Print out component ratio values
		
	Returns
	-------
	h5_svd_group : h5Py Group
		Group containing the h5_svd data
	
	"""

	if not (isinstance(h5_main, usid.USIDataset)):
		h5_main = usid.USIDataset(h5_main)

	h5_svd = SVD(h5_main, num_components=num_components)

	[num_rows, num_cols] = h5_main.pos_dim_sizes

	# performs SVD
	h5_svd_group = h5_svd.compute(override=override)

	h5_S = h5_svd_group['S']

	if verbose:
		skree_sum = np.zeros(h5_S.shape)
		for i in range(h5_S.shape[0]):
			skree_sum[i] = np.sum(h5_S[:i]) / np.sum(h5_S)

		print('Need', skree_sum[skree_sum < 0.8].shape[0], 'components for 80%')
		print('Need', skree_sum[skree_sum < 0.9].shape[0], 'components for 90%')
		print('Need', skree_sum[skree_sum < 0.95].shape[0], 'components for 95%')
		print('Need', skree_sum[skree_sum < 0.99].shape[0], 'components for 99%')

	if show_plots:
		plot_svd(h5_svd_group)

	return h5_svd_group
コード例 #13
0
    def test_no_map_func(self):
        delete_existing_file(data_utils.std_beps_path)
        data_utils.make_beps_file()
        self.h5_file = h5py.File(data_utils.std_beps_path, mode='r+')
        self.h5_main = self.h5_file['Raw_Measurement/source_main']
        self.h5_main = usid.USIDataset(self.h5_main)

        proc = NoMapFunc(self.h5_main)

        with self.assertRaises(NotImplementedError):
            _ = proc.compute()

        delete_existing_file(data_utils.std_beps_path)
コード例 #14
0
def getDataFromUSID(fileName, pixelNumber):
    # Open the file
    h5File = h5py.File(fileName)

    # Grab the main dataset
    h5Group = h5File["Measurement_000/Channel_000"]
    h5Main = usid.USIDataset(h5Group["Raw_Data"])

    # Get some more information out of this dataset
    samp_rate = usid.hdf_utils.get_attr(h5Group, 'IO_samp_rate_[Hz]')
    ex_freq = usid.hdf_utils.get_attr(h5Group, 'excitation_frequency_[Hz]')

    # Getting ancillary information and other parameters
    h5_spec_vals = h5Main.h5_spec_vals

    # Excitation waveform for a single line / row of data
    excit_wfm = h5_spec_vals[()]

    # We expect each pixel to have a single period of the sinusoidal excitation
    # Calculating the excitation waveform for a single pixel
    pts_per_cycle = int(np.round(samp_rate / ex_freq))
    single_AO = excit_wfm[0, :pts_per_cycle]

    # Get excitation amplitude
    ex_amp = usid.hdf_utils.get_attr(h5Group, 'excitation_amplitude_[V]')

    # Assume that we have already filterd and reshaped the data
    # Now load in a filtered and reshaped data
    h5_filt_grp = usid.hdf_utils.find_results_groups(h5Main,
                                                     "FFT_Filtering")[-1]
    h5_filt = h5_filt_grp["Filtered_Data"]
    h5_resh_grp = usid.hdf_utils.find_results_groups(h5_filt, "Reshape")[-1]
    h5_resh = usid.USIDataset(h5_resh_grp["Reshaped_Data"])

    breakpoint()

    # Just return the voltage and response for now
    return [float(v) for v in single_AO[::4]
            ], [float(v) for v in h5_resh[pixelNumber, ::4]]
コード例 #15
0
    def setUp(self,
              proc_class=AvgSpecUltraBasicWGetPrevResults,
              percent_complete=100,
              parms_dict=None,
              status_dset=True,
              status_attr=False,
              verbose=False,
              h5_target_group=None):
        delete_existing_file(data_utils.std_beps_path)
        data_utils.make_beps_file()
        self.h5_file = h5py.File(data_utils.std_beps_path, mode='r+')
        self.h5_main = self.h5_file['Raw_Measurement/source_main']
        self.h5_main = usid.USIDataset(self.h5_main)

        # Make some fake results here:
        if any([
                isinstance(item, (list, tuple)) for item in
            [percent_complete, status_attr, status_dset, parms_dict]
        ]):
            self.fake_results_grp = []
            self.h5_results = []
            self.exp_result = []

            for this_per, this_parms, has_status_dset, has_status_attr in zip(
                    percent_complete, parms_dict, status_dset, status_attr):

                ret_vals = self.__create_fake_result(
                    percent_complete=this_per,
                    parms_dict=this_parms,
                    status_dset=has_status_dset,
                    status_attr=has_status_attr,
                    h5_parent_group=h5_target_group,
                    verbose=verbose)
                self.fake_results_grp.append(ret_vals[0])
                self.h5_results.append(ret_vals[1])
                self.exp_result.append(ret_vals[2])
        else:
            ret_vals = self.__create_fake_result(
                percent_complete=percent_complete,
                parms_dict=parms_dict,
                status_dset=status_dset,
                status_attr=status_attr,
                h5_parent_group=h5_target_group,
                verbose=verbose)
            self.fake_results_grp, self.h5_results, self.exp_result = ret_vals

        self.proc = AvgSpecUltraBasicWGetPrevResults(
            self.h5_main, h5_target_group=h5_target_group, verbose=verbose)
コード例 #16
0
    def test_read_only_h5_parent_group(self):
        delete_existing_file(data_utils.std_beps_path)
        data_utils.make_beps_file()
        self.h5_file = h5py.File(data_utils.std_beps_path, mode='r+')
        self.h5_main = self.h5_file['Raw_Measurement/source_main']
        self.h5_main = usid.USIDataset(self.h5_main)

        results_path = 'sep_results.h5'
        with h5py.File(results_path, mode='w') as file_handle:
            file_handle.create_group("Blah")
        h5_f_new = h5py.File(results_path, mode='r')

        with self.assertRaises(IOError):
            _ = AvgSpecUltraBasic(self.h5_main, h5_target_group=h5_f_new)
        delete_existing_file(data_utils.std_beps_path)
        delete_existing_file(results_path)
コード例 #17
0
    def test_invalid_parms_dict(self):
        delete_existing_file(data_utils.std_beps_path)
        data_utils.make_beps_file()
        self.h5_file = h5py.File(data_utils.std_beps_path, mode='r+')
        self.h5_main = self.h5_file['Raw_Measurement/source_main']
        self.h5_main = usid.USIDataset(self.h5_main)

        class TempProc(usid.Process):

            def __init__(self, h5_main, *args, **kwargs):
                super(TempProc, self).__init__(h5_main, 'Proc',
                                               parms_dict='Parms',
                                               *args, **kwargs)

        with self.assertRaises(TypeError):
            _ = TempProc(self.h5_main)
        delete_existing_file(data_utils.std_beps_path)
コード例 #18
0
    def get_image(self, grp, spec, tol=0.001, *args, **kwds):
        data = usid.USIDataset(grp['Main'])
        s_labels = data.spec_dim_labels
        if 's_dim' not in kwds:
            label = s_labels[0]
        else:
            s_dim = kwds['s_dim']
            if type(s_dim) is str: label = s_dim
            else: label = s_labels[int(s_dim)]
        bar = data.get_spec_values(label)

        try:
            bounds_idx = (find_nearest_member(bar, spec - (spec * tol)),
                          find_nearest_member(bar, spec + (spec * tol)))
            if bounds_idx[1] == bounds_idx[0]:
                bounds_idx[1] = bounds_idx[0] + 1
        except:
            grpname = grp.name.split('/')[-1]
            print('No image could be obtained for: %s at %f' % (grpname, spec))
            return None
        hit_stack, result = data.slice(
            {label: slice(bounds_idx[0], bounds_idx[1])})
        hit_mz = bar[bounds_idx[0]:bounds_idx[1]]
        mz_stack = np.empty_like(hit_stack)

        for i, v in enumerate(hit_mz):
            mz_stack[..., i] = v
        if 'norm' in kwds:
            #apply normalization
            norm_mode = kwds['norm']
            norm_mode_options = [key.upper() for key in grp['norm'].keys()]
            if norm_mode is None: norm_factors = np.ones(data.shape[:-1])
            elif norm_mode.upper() in norm_mode_options:
                norm_factors = grp['norm'][norm_mode.upper()][:]
            else:
                raise KeyError(
                    'Selected normalization mode has not been calculated.')
        else:
            norm_factors = np.ones(data.shape[:-1])

        try:
            image = np.trapz(hit_stack, mz_stack, axis=2)
        except IndexError:
            pass
        image = np.multiply(image, 1 / norm_factors.reshape(image.shape))
        return image
コード例 #19
0
    def test_none_parms_dict(self):
        delete_existing_file(data_utils.std_beps_path)
        data_utils.make_beps_file()
        self.h5_file = h5py.File(data_utils.std_beps_path, mode='r+')
        self.h5_main = self.h5_file['Raw_Measurement/source_main']
        self.h5_main = usid.USIDataset(self.h5_main)

        class TempProc(usid.Process):

            def __init__(self, h5_main, *args, **kwargs):
                super(TempProc, self).__init__(h5_main, 'Proc',
                                               parms_dict=None,
                                               *args, **kwargs)

        proc = TempProc(self.h5_main)
        self.assertEqual(proc.parms_dict, dict())
        delete_existing_file(data_utils.std_beps_path)
コード例 #20
0
def compare_signal_from_usid(file_path,
                             ndata,
                             new_sig,
                             axes_to_spec=[],
                             sig_type=hs.signals.BaseSignal,
                             dataset_path=None,
                             compound_comp_name=None,
                             **kwargs):
    # 1. Validate object type
    assert isinstance(new_sig, sig_type)
    if len(axes_to_spec) > 0:
        new_sig = new_sig.as_signal2D(axes_to_spec)

    # 2. Validate that data has been read in correctly:
    assert np.allclose(new_sig.data, ndata)
    with h5py.File(file_path, mode='r') as h5_f:
        if dataset_path is None:
            h5_main = usid.hdf_utils.get_all_main(h5_f)[0]
        else:
            h5_main = usid.USIDataset(h5_f[dataset_path])
        # 3. Validate that all axes / dimensions have been translated correctly
        if len(axes_to_spec) > 0:
            _compare_axes(new_sig.axes_manager.navigation_axes,
                          h5_main.pos_dim_descriptors, h5_main.get_pos_values,
                          **kwargs)
        else:
            assert new_sig.axes_manager.navigation_dimension == 0
        # 3. Validate that all spectroscopic axes / dimensions have been
        # translated correctly:
        if h5_main.shape[1] == 1:
            _compare_axes(new_sig.axes_manager.signal_axes,
                          h5_main.pos_dim_descriptors, h5_main.get_pos_values,
                          **kwargs)
        else:
            _compare_axes(new_sig.axes_manager.signal_axes,
                          h5_main.spec_dim_descriptors,
                          h5_main.get_spec_values, **kwargs)

        # 5. Validate that metadata has been read in correctly:
        _validate_metadata_from_h5dset(new_sig,
                                       h5_main,
                                       compound_comp_name=compound_comp_name)
コード例 #21
0
    def test_load_FF(self):
        self.delete_old_h5()
        h5_path, data_files, parm_dict = ffta.load.load_hdf.load_folder(
            folder_path=self.ff_folder)
        h5_path = h5_path.replace('\\', '/')  # travis
        h5_avg = ffta.load.load_hdf.load_FF(data_files, parm_dict, h5_path)

        assert (h5_avg.shape == (1024, 16000))
        usid.USIDataset(h5_avg)

        h5_svd = ffta.analysis.svd.test_svd(h5_avg, show_plots=False)
        h5_rb = ffta.analysis.svd.svd_filter(h5_avg,
                                             clean_components=[0, 1, 2, 3, 4])
        assert (h5_rb.shape == (1024, 16000))
        assert (h5_rb.name ==
                '/FF_Group/FF_Avg-SVD_000/Rebuilt_Data_000/Rebuilt_Data'
                )  # in right spot

        ff = ffta.hdf_utils.process.FFtrEFM(h5_rb, override=True)
        ff.update_parm(roi=0.0007, n_taps=499, fit=True, filter_amplitude=True)
        ff.compute()
        ff.reshape()
        ffta.hdf_utils.process.save_CSV_from_file(ff)

        tfp = h5_rb.file[
            'FF_Group/FF_Avg-SVD_000/Rebuilt_Data_000/Rebuilt_Data-Fast_Free_000/tfp']
        assert (tfp.shape == (8, 128))

        shift = h5_rb.file[
            'FF_Group/FF_Avg-SVD_000/Rebuilt_Data_000/Rebuilt_Data-Fast_Free_000/shift']
        assert (shift.shape == (8, 128))

        inst_freq = h5_rb.file[
            'FF_Group/FF_Avg-SVD_000/Rebuilt_Data_000/Rebuilt_Data-Fast_Free_000/Inst_Freq']
        assert (inst_freq.shape == (1024, 16000))

        h5_avg.file.close()

        self.delete_old_h5()

        return
コード例 #22
0
ファイル: create_movie.py プロジェクト: lindat18/ffta
def setup_movie(h5_ds, size=(10, 6), vscale=[None, None], cmap='inferno'):
    fig, ax = plt.subplots(nrows=1, figsize=size, facecolor='white')

    if 'USID' not in str(type(h5_ds)):
        h5_ds = usid.USIDataset(h5_ds)

    params = usid.hdf_utils.get_attributes(h5_ds)
    if 'trigger' not in params:
        params = usid.hdf_utils.get_attributes(h5_ds.parent)

    ds = h5_ds.get_n_dim_form()[:, :, 0]

    # set scale based on the first line, pre-trigger to post-trigger
    tdx = params['trigger'] * params['sampling_rate'] / params['pnts_per_avg']
    tdx = int(tdx * len(h5_ds[0, :]))
    if any(vscale):
        [vmin, vmax] = vscale
    else:
        vmin = np.min(h5_ds[0][int(tdx * 0.7):int(tdx * 1.3)])
        vmax = np.max(h5_ds[0][int(tdx * 0.7):int(tdx * 1.3)])

    length = h5_ds.get_pos_values('X')
    height = h5_ds.get_pos_values('Y')

    im0 = ax.imshow(ds,
                    cmap=cmap,
                    origin='lower',
                    extent=[0, length[-1] * 1e6, 0, height[-1] * 1e6],
                    vmin=vmin,
                    vmax=vmax)
    cbar = plt.colorbar(im0,
                        ax=ax,
                        orientation='vertical',
                        fraction=0.023,
                        pad=0.03,
                        use_gridspec=True)

    return fig, ax, cbar, vmin, vmax
コード例 #23
0
def compare_usid_from_signal(sig,
                             h5_path,
                             empty_pos=False,
                             empty_spec=False,
                             dataset_path=None,
                             **kwargs):
    with h5py.File(h5_path, mode='r') as h5_f:
        # 1. Validate that what has been written is a USID Main dataset
        if dataset_path is None:
            _array_translator_basic_checks(h5_f)
            h5_main = usid.hdf_utils.get_all_main(h5_f)[0]
        else:
            h5_main = usid.USIDataset(h5_f[dataset_path])

        usid_data = h5_main.get_n_dim_form().squeeze()
        # 2. Validate that raw data has been written correctly:
        assert np.allclose(sig.data, usid_data)
        # 3. Validate that axes / dimensions have been translated correctly:
        if empty_pos:
            _assert_empty_dims(sig.axes_manager.navigation_axes,
                               h5_main.pos_dim_labels, h5_main.get_pos_values,
                               **kwargs)
        else:
            _compare_axes(sig.axes_manager.navigation_axes,
                          h5_main.pos_dim_descriptors, h5_main.get_pos_values,
                          **kwargs)
        # 4. Check to make sure that there is only one spectroscopic dimension
        # of size 1
        if empty_spec:
            _assert_empty_dims(sig.axes_manager.signal_axes,
                               h5_main.spec_dim_labels,
                               h5_main.get_spec_values, **kwargs)
        else:
            _compare_axes(sig.axes_manager.signal_axes,
                          h5_main.spec_dim_descriptors,
                          h5_main.get_spec_values, **kwargs)
コード例 #24
0
ファイル: usid_hdf5.py プロジェクト: ssomnath/hyperspy
def _usidataset_to_signal(h5_main, ignore_non_linear_dims=True, lazy=True,
                          *kwds):
    """
    Converts a single specified USIDataset object to one or more Signal objects

    Parameters
    ----------
    h5_main : pyUSID.USIDataset object
        USID Main dataset
    ignore_non_linear_dims : bool, Optional
        If True, parameters that were varied non-linearly in the desired
        dataset will result in Exceptions.
        Else, all such non-linearly varied parameters will be treated as
        linearly varied parameters and
        a Signal object will be generated.
    lazy : bool, Optional
        If set to True, data will be read as a Dask array.
        Else, data will be read in as a numpy array

    Returns
    -------
    list of hyperspy.signals.BaseSignal objects
        USIDatasets with compound datatypes are broken down to multiple Signal
        objects.
    """
    h5_main = usid.USIDataset(h5_main)
    # TODO: Cannot handle data without N-dimensional form yet
    # First get dictionary of axes that HyperSpy likes to see. Ignore singular
    # dimensions
    pos_dict = _get_dim_dict(h5_main.pos_dim_labels,
                             sidpy.hdf_utils.get_attr(h5_main.h5_pos_inds,
                                                     'units'),
                             h5_main.get_pos_values,
                             ignore_non_linear_dims=ignore_non_linear_dims)
    spec_dict = _get_dim_dict(h5_main.spec_dim_labels,
                              sidpy.hdf_utils.get_attr(h5_main.h5_spec_inds,
                                                      'units'),
                              h5_main.get_spec_values,
                              ignore_non_linear_dims=ignore_non_linear_dims)

    num_spec_dims = len(spec_dict)
    num_pos_dims = len(pos_dict)
    _logger.info('Dimensions: Positions: {}, Spectroscopic: {}'
                 '.'.format(num_pos_dims, num_spec_dims))

    ret_vals = usid.hdf_utils.reshape_to_n_dims(h5_main, get_labels=True,
                                                lazy=lazy)
    ds_nd, success, dim_labs = ret_vals

    if success is not True:
        raise ValueError('Dataset could not be reshaped!')
    ds_nd = ds_nd.squeeze()
    _logger.info('N-dimensional shape: {}'.format(ds_nd.shape))
    _logger.info('N-dimensional labels: {}'.format(dim_labs))

    # Capturing metadata present in conventional h5USID files:
    group_attrs = dict()
    h5_chan_grp = h5_main.parent
    if isinstance(h5_chan_grp, h5py.Group):
        if 'Channel' in h5_chan_grp.name.split('/')[-1]:
            group_attrs = sidpy.hdf_utils.get_attributes(h5_chan_grp)
            h5_meas_grp = h5_main.parent
            if isinstance(h5_meas_grp, h5py.Group):
                if 'Measurement' in h5_meas_grp.name.split('/')[-1]:
                    temp = sidpy.hdf_utils.get_attributes(h5_meas_grp)
                    group_attrs.update(temp)

    """
    Normally, we might have been done but the order of the dimensions may be
    different in N-dim form and
    attributes in ancillary dataset
    """
    num_pos_dims = len(h5_main.pos_dim_labels)
    pos_dim_list = _assemble_dim_list(pos_dict, dim_labs[:num_pos_dims])
    spec_dim_list = _assemble_dim_list(spec_dict, dim_labs[num_pos_dims:])
    dim_list = pos_dim_list + spec_dim_list

    _, is_complex, is_compound, _, _ = sidpy.hdf.dtype_utils.check_dtype(h5_main)

    trunc_func = partial(_convert_to_signal_dict,
                         dim_dict_list=dim_list,
                         h5_path=h5_main.file.filename,
                         h5_dset_path=h5_main.name,
                         name=h5_main.name.split('/')[-1],
                         group_attrs=group_attrs)

    # Extracting the quantity and units of the main dataset
    quant, units = _split_descriptor(h5_main.data_descriptor)

    if is_compound:
        sig = []
        # Iterate over each dimension name:
        for name in ds_nd.dtype.names:
            q_sub, u_sub = _split_descriptor(name)
            sig.append(trunc_func(ds_nd[name], q_sub, u_sub, sig_type=quant))
    else:
        sig = [trunc_func(ds_nd, quant, units)]

    return sig
コード例 #25
0
ファイル: be_relax_fit.py プロジェクト: yig319/pycroscopy
    def __init__(self, h5_main, variables=None, fit_method='Exponential', sens=1, phase_off=0,
                 starts_with='write', **kwargs):
        """
        This instantiation reads and calculates parameters in the data file necessary for reading, writing, analyzing,
        and visualizing the data. It writes these parameters to attributes to be referenced.

        :param h5_main: h5py.Dataset object from pycroscopy.analysis.BESHOfitter
        :param variables: list(string), Default ['Frequency']
        Lists of attributes that h5_main should possess so that it may be analyzed by Model.
        :param fit_method: fit_method for berelaxfit fit, can be 'Exponential', 'Double_Exp', 'Str_Exp' or 'Logistic'
        :param sens: tip sensitivity in pm/V. Default: 1, the data are not scaled
        :param phase_off: to apply to phase data. Default: 0, the data are not offset.
        :param starts_with: 'write' or 'read' , depending on whether the first step is a read or write step. Default:
        'write'

        **Currently, the BE software does not consistently encode whether spectra start with a read or write step
        """
        if h5_main == None:
            h5_main = self.h5_main
        super(BERelaxFit, self).__init__(h5_main, variables, **kwargs)
        self.starts_with = starts_with
        self.raw_data = h5_main.parent.parent['Raw_Data']
        self.raw_amp = np.abs(self.raw_data)
        self.raw_phase = np.angle(self.raw_data)
        self.h5_main_usid = usid.USIDataset(h5_main)
        self.raw_amp_reshape = self.raw_amp.reshape(self.h5_main_usid.pos_dim_sizes[0],
                                                    self.h5_main_usid.pos_dim_sizes[1],
                                                    h5_main.parent.parent.parent.attrs['num_steps'],-1)
        self.raw_phase_reshape = self.raw_phase.reshape(self.h5_main_usid.pos_dim_sizes[0],
                                                        self.h5_main_usid.pos_dim_sizes[1],
                                                        h5_main.parent.parent.parent.attrs['num_steps'],-1)
        self.fit_method = fit_method
        self.no_read_steps = self.h5_main.parent.parent.parent.attrs['VS_num_meas_per_read_step']
        self.no_write_steps = self.h5_main.parent.parent.parent.attrs['VS_num_meas_per_write_step']
        self.sensitivity = sens
        self.phase_offset = phase_off
        self.no_time_steps = self.h5_main.parent.parent.parent.attrs['num_steps']
        self.time_elapsed_per_step = self.h5_main.parent.parent.parent.attrs['BE_pulse_duration_[s]']
        self.time_elapsed_per_spectrum = (self.no_read_steps) * self.time_elapsed_per_step
        self.all_dc_offset_values = self.h5_main.h5_spec_vals[1,np.argwhere(self.h5_main.h5_spec_inds[0]==0)]
        self.dc_offset_expand = self.h5_main.h5_spec_vals[1,:]
        #make list of indices of read/write steps
        self.no_rs_spectra = int(len(np.argwhere(self.h5_main.h5_spec_inds[0, :] == 0)) / 2)
        self.read_inds_split = []
        self.write_inds_split = []
        self.all_inds_split = np.array_split(np.arange(0, self.no_time_steps, step=1), self.no_rs_spectra)
        self.write_spectra = []
        if self.starts_with == 'write':
            for i in range(self.no_rs_spectra):
                self.read_inds_split.append(self.all_inds_split[i][self.no_write_steps:])
                self.write_dc_offset_values = self.all_dc_offset_values[::2]

                #if there is only one RS spectrum
                if type(self.write_dc_offset_values) == np.float32:
                    self.write_dc_offset_values = [self.write_dc_offset_values]

        if self.starts_with == 'read':
            for i in range(self.no_rs_spectra):
                self.read_inds_split.append(self.all_inds_split[i][:-int(self.no_write_steps)])
                self.write_dc_offset_values = self.h5_main.h5_spec_vals[1,
                                                                        np.argwhere(self.h5_main.h5_spec_vals[
                                                                                        0] == self.no_read_steps)]
                # if there is only one RS spectrum
                if type(self.write_dc_offset_values) == np.float32:
                    self.write_dc_offset_values = [self.write_dc_offset_values]

        self.no_read_offset = len(self.all_dc_offset_values) - self.no_rs_spectra
        self.write_inds_split = np.split(np.setxor1d(self.all_inds_split, self.read_inds_split),
                                                       self.no_rs_spectra)
print("#-----------------------------------#")
print("Contents of data file:")
usid.hdf_utils.print_tree(h5_file)
print("#-----------------------------------#")
print("\n")

h5_meas_grp = h5_file["Measurement_000"]

# Extracting some basic parameters

num_rows = usid.hdf_utils.get_attr(h5_meas_grp, 'grid_num_rows')
num_cols = usid.hdf_utils.get_attr(h5_meas_grp, 'grid_num_cols')

# Getting a reference to the main dataset

h5_main = usid.USIDataset(h5_meas_grp['Channel_000/Raw_Data'])
usid.hdf_utils.write_simple_attrs(h5_main, {
    'quantity': 'Deflection',
    'units': 'V'
})

# Extracting the X axis - vector of frequencies

h5_spec_vals = usid.hdf_utils.get_auxiliary_datasets(
    h5_main, 'Spectroscopic_Values')[-1]
freq_vec = np.squeeze(h5_spec_vals.value) * 1E-3

print("#-----------------------------------#")
print("Data currently of shape:", h5_main.shape)
print("#-----------------------------------#")
print("\n")
コード例 #27
0
h5_path = r"C:/Users/Administrator/Dropbox/GIv Bayesian June 2019/pzt_nanocap_6_split_bayesian_compensation_R_correction (Alvin Tan's conflicted copy 2019-06-25).h5"

numPixels = 500
#M = 100
#Ns = int(7e7)

#skip mpi for now
#with h5py.File(h5_path, mode='r+', driver='mpio', comm=MPI.COMM_WORLD) as h5_f:
with h5py.File(h5_path, mode='r+') as h5_f:
    h5_grp = h5_f['Measurement_000/Channel_000']
    f = usid.hdf_utils.get_attr(h5_grp, 'excitation_frequency_[Hz]')
    V0 = usid.hdf_utils.get_attr(h5_grp, 'excitation_amplitude_[V]')

    #original dataset
    h5_resh = usid.USIDataset(h5_grp[
        'Raw_Data-FFT_Filtering_000/Filtered_Data-Reshape_000/Reshaped_Data'])
    pixelInds = np.random.randint(0, h5_resh[()].shape[0], numPixels)
    print("PixelInds is {} with shape {}".format(pixelInds, pixelInds.shape))

    #copy subset to new h5 file
    f = h5py.File('subsetFile{}.h5'.format(time.time()), 'a')
    subsetGroup = f.create_group("subsetBoi")
    h5_spec_inds, h5_spec_vals = write_ind_val_dsets(
        subsetGroup,
        Dimension("Bias", "V", int(h5_resh.h5_spec_inds.size)),
        is_spectral=True)
    h5_spec_vals[()] = h5_resh.h5_spec_vals[()]
    h5_pos_inds, h5_pos_vals = write_ind_val_dsets(subsetGroup,
                                                   Dimension(
                                                       "Position", "m",
                                                       numPixels),
コード例 #28
0
ファイル: create_movie.py プロジェクト: lindat18/ffta
def create_freq_movie(h5_ds,
                      filename='inst_freq',
                      time_step=50,
                      idx_start=500,
                      idx_stop=100,
                      smooth=None,
                      size=(10, 6),
                      vscale=[None, None],
                      cmap='inferno',
                      interval=60,
                      repeat_delay=100,
                      crop=None):
    '''
	Creates an animation that goes through all the instantaneous frequency data.
	
	Parameters
	----------
	h5_ds : USID Dataset 
		The instantaneous frequency data. NOT deflection, this is for post-processed data

	rotate : bool, optional
		The data are saved in ndim_form in pycroscopy rotated 90 degrees

	time_step : int, optional
		10 @ 10 MHz = 1 us
		50 @ 10 MHz = 5 us

	idx_start : int
		What index to start at. Typically to avoid the Hilbert Transform edge artifacts, you start a little ahead

	idx_stop : int
		Same as the above,in terms of how many points BEFORE the end to stop

	smooth : int, optional
		Whether to apply a simple boxcar smoothing kernel to the data

	size : tuple, optional
		Figure size

	vscale : list [float, float], optional
		To hard-code the color scale, otherwise these are automatically generated

	crop : int
		Crops the image to a certain line, in case part of the scan is bad
	'''

    if not isinstance(h5_ds, usid.USIDataset):
        h5_ds = usid.USIDataset(h5_ds)

    if any(vscale):
        fig, ax, cbar, _, _ = setup_movie(h5_ds, size, vscale, cmap=cmap)
        [vmin, vmax] = vscale
    else:
        fig, ax, cbar, vmin, vmax = setup_movie(h5_ds, size, cmap=cmap)

    _orig = np.copy(h5_ds[()])
    length = h5_ds.get_pos_values('X')
    height = h5_ds.get_pos_values('Y')
    if isinstance(crop, int):
        height = height * crop / h5_ds.get_n_dim_form()[:, :, 0].shape[0]

    params = usid.hdf_utils.get_attributes(h5_ds)
    if 'trigger' not in params:
        params = usid.hdf_utils.get_attributes(h5_ds.parent)

    if isinstance(smooth, int):
        kernel = np.ones(smooth) / smooth
        for i in np.arange(h5_ds.shape[0]):
            h5_ds[i, :] = sps.fftconvolve(h5_ds[i, :], kernel, mode='same')

    cbar.set_label('Frequency (Hz)', rotation=270, labelpad=20, fontsize=16)

    tx = h5_ds.get_spec_values('Time')

    # Loop through time segments
    ims = []
    for k, t in zip(np.arange(idx_start,
                              len(tx) - idx_stop, time_step),
                    tx[idx_start:-idx_stop:time_step]):

        _if = h5_ds.get_n_dim_form()[:, :, k]
        if isinstance(crop, int):
            if crop < 0:
                _if = _if[crop:, :]
            else:
                _if = _if[:crop, :]

        htitle = 'at ' + '{0:.4f}'.format(t * 1e3) + ' ms'
        im0 = ax.imshow(_if,
                        cmap=cmap,
                        origin='lower',
                        animated=True,
                        extent=[0, length[-1] * 1e6, 0, height[-1] * 1e6],
                        vmin=vmin,
                        vmax=vmax)

        if t > params['trigger']:
            tl0 = ax.text(length[-1] * 1e6 / 2 - .35,
                          height[-1] * 1e6 + .01,
                          htitle + ' ms, TRIGGER',
                          color='blue',
                          weight='bold',
                          fontsize=16)
        else:
            tl0 = ax.text(length[-1] * 1e6 / 2 - .35,
                          height[-1] * 1e6 + .01,
                          htitle + ' ms, PRE-TRIGGER',
                          color='black',
                          weight='regular',
                          fontsize=14)

        ims.append([im0, tl0])

    ani = animation.ArtistAnimation(fig,
                                    ims,
                                    interval=interval,
                                    repeat_delay=repeat_delay)

    try:
        ani.save(filename + '.mp4')
    except TypeError as e:
        print(e)
        print(
            'A "str is not callable" message is often due to not running set_mpeg function'
        )

    # restore data
    for i in np.arange(h5_ds.shape[0]):
        h5_ds[i, :] = _orig[i, :]

    return
コード例 #29
0
ファイル: create_movie.py プロジェクト: lindat18/ffta
def create_cpd_movie(h5_ds,
                     filename='cpd',
                     size=(10, 6),
                     vscale=[None, None],
                     cmap='inferno',
                     smooth=None,
                     interval=60,
                     repeat_delay=100):
    '''

	:param h5_ds:
	:param rotate:
	:param filename:
	:param size:
	:param vscale:
	:param interval:
	:param repeat_delay:
	:return:
	'''

    if not isinstance(h5_ds, usid.USIDataset):
        h5_ds = usid.USIDataset(h5_ds)

    if any(vscale):
        fig, ax, cbar, _, _ = setup_movie(h5_ds, size, vscale, cmap=cmap)
        [vmin, vmax] = vscale
    else:
        fig, ax, cbar, vmin, vmax = setup_movie(h5_ds, size, cmap=cmap)

    cbar.set_label('Potential (V)', rotation=270, labelpad=20, fontsize=16)

    _orig = np.copy(h5_ds[()])
    length = h5_ds.get_pos_values('X')
    height = h5_ds.get_pos_values('Y')
    params = usid.hdf_utils.get_attributes(h5_ds)
    if 'trigger' not in params:
        params = usid.hdf_utils.get_attributes(h5_ds.parent)

    if isinstance(smooth, int):
        kernel = np.ones(smooth) / smooth
        for i in np.arange(h5_ds.shape[0]):
            h5_ds[i, :] = sps.fftconvolve(h5_ds[i, :], kernel, mode='same')

    tx = h5_ds.get_spec_values('Time')

    # Loop through time segments
    ims = []
    for k, t in enumerate(tx):

        _if = h5_ds.get_n_dim_form()[:, :, k]
        htitle = 'at ' + '{0:.4f}'.format(t * 1e3) + ' ms'
        im0 = ax.imshow(_if,
                        cmap=cmap,
                        origin='lower',
                        animated=True,
                        extent=[0, length[-1] * 1e6, 0, height[-1] * 1e6],
                        vmin=vmin,
                        vmax=vmax)

        if t > params['trigger']:
            tl0 = ax.text(length[-1] * 1e6 / 2 - .35,
                          height[-1] * 1e6 + .01,
                          htitle + ' ms, TRIGGER',
                          color='blue',
                          weight='bold',
                          fontsize=16)
        else:
            tl0 = ax.text(length[-1] * 1e6 / 2 - .35,
                          height[-1] * 1e6 + .01,
                          htitle + ' ms, PRE-TRIGGER',
                          color='black',
                          weight='regular',
                          fontsize=14)

        ims.append([im0, tl0])

    ani = animation.ArtistAnimation(fig,
                                    ims,
                                    interval=interval,
                                    repeat_delay=repeat_delay)

    try:
        ani.save(filename + '.mp4')
    except TypeError:
        print(
            'A "str is not callable" message is often due to not running set_mpeg function'
        )

    # restore data
    for i in np.arange(h5_ds.shape[0]):
        h5_ds[i, :] = _orig[i, :]

    return
コード例 #30
0
ファイル: plot_usi_dataset.py プロジェクト: ericpre/pyUSID
# access the HDF5 dataset and check if it is a ``Main`` dataset in the first place:

h5_raw = h5_f['/Measurement_000/Channel_000/Raw_Data']
print(h5_raw)
print('h5_raw is a main dataset? {}'.format(
    usid.hdf_utils.check_if_main(h5_raw)))

########################################################################################################################
# It turns out that this is indeed a Main dataset. Therefore, we can turn this in to a USIDataset without any
# problems.
#
# Creating a USIDataset
# -----------------------
# All one needs for creating a USIDataset object is a Main dataset. Here is how we can supercharge h5_raw:

pd_raw = usid.USIDataset(h5_raw)
print(pd_raw)

########################################################################################################################
# Notice how easy it was to create a USIDataset object. Also, note how the USIDataset is much more informative in
# comparison with the conventional h5py.Dataset object.
#
# USIDataset = Supercharged(h5py.Dataset)
# =========================================
# Remember that USIDataset is just an extension of the h5py.Dataset object class. Therefore, both the ``h5_raw`` and
# ``pd_raw`` refer to the same object as the following equality test demonstrates. Except ``pd_raw`` knows about the
# ``ancillary datasets`` and other information which makes it a far more powerful object for you.

print(pd_raw == h5_raw)

########################################################################################################################