コード例 #1
0
    def test_wrong_type(self):
        file_path = 'link_h5_objects_as_attrs.h5'
        data_utils.delete_existing_file(file_path)
        with h5py.File(file_path, mode='w') as h5_f:
            h5_main = h5_f.create_dataset('main', data=np.arange(5))

            with self.assertRaises(TypeError):
                hdf_utils.link_h5_objects_as_attrs(h5_main, np.arange(4))

            with self.assertRaises(TypeError):
                hdf_utils.link_h5_objects_as_attrs(np.arange(4), h5_main)

        os.remove(file_path)
コード例 #2
0
    def test_legal(self):
        file_path = 'link_h5_objects_as_attrs.h5'
        data_utils.delete_existing_file(file_path)
        with h5py.File(file_path, mode='w') as h5_f:

            h5_main = h5_f.create_dataset('main', data=np.arange(5))
            h5_anc = h5_f.create_dataset('Ancillary', data=np.arange(3))
            h5_group = h5_f.create_group('Results')

            hdf_utils.link_h5_objects_as_attrs(h5_f,
                                               [h5_anc, h5_main, h5_group])
            for exp, name in zip([h5_main, h5_anc, h5_group],
                                 ['main', 'Ancillary', 'Results']):
                self.assertEqual(exp, h5_f[h5_f.attrs[name]])

            # Single object
            hdf_utils.link_h5_objects_as_attrs(h5_main, h5_anc)
            self.assertEqual(h5_f[h5_main.attrs['Ancillary']], h5_anc)

            # Linking to a group:
            hdf_utils.link_h5_objects_as_attrs(h5_group, [h5_anc, h5_main])
            for exp, name in zip([h5_main, h5_anc], ['main', 'Ancillary']):
                self.assertEqual(exp, h5_group[h5_group.attrs[name]])

        os.remove(file_path)
コード例 #3
0
    def translate(self, parm_path):
        """
        Basic method that translates .mat data files to a single .h5 file
        
        Parameters
        ------------
        parm_path : string / unicode
            Absolute file path of the parameters .mat file. 
            
        Returns
        ----------
        h5_path : string / unicode
            Absolute path of the translated h5 file
        """
        self.parm_path = path.abspath(parm_path)
        (folder_path, file_name) = path.split(parm_path)
        (file_name, base_name) = path.split(folder_path)
        h5_path = path.join(folder_path, base_name + '.h5')

        # Read parameters
        parm_dict = readGmodeParms(parm_path)

        # Add the w^2 specific parameters to this list
        parm_data = loadmat(parm_path, squeeze_me=True, struct_as_record=True)
        freq_sweep_parms = parm_data['freqSweepParms']
        parm_dict['freq_sweep_delay'] = np.float(
            freq_sweep_parms['delay'].item())
        gen_sig = parm_data['genSig']
        parm_dict['wfm_fix_d_fast'] = np.int32(gen_sig['restrictT'].item())
        freq_array = np.float32(parm_data['freqArray'])

        # prepare and write spectroscopic values
        samp_rate = parm_dict['IO_down_samp_rate_[Hz]']
        num_bins = int(parm_dict['wfm_n_cycles'] * parm_dict['wfm_p_slow'] *
                       samp_rate)

        w_vec = np.arange(-0.5 * samp_rate, 0.5 * samp_rate,
                          np.float32(samp_rate / num_bins))

        # There is most likely a more elegant solution to this but I don't have the time... Maybe np.meshgrid
        spec_val_mat = np.zeros((len(freq_array) * num_bins, 2),
                                dtype=VALUES_DTYPE)
        spec_val_mat[:, 0] = np.tile(w_vec, len(freq_array))
        spec_val_mat[:, 1] = np.repeat(freq_array, num_bins)

        spec_ind_mat = np.zeros((2, len(freq_array) * num_bins),
                                dtype=np.int32)
        spec_ind_mat[0, :] = np.tile(np.arange(num_bins), len(freq_array))
        spec_ind_mat[1, :] = np.repeat(np.arange(len(freq_array)), num_bins)

        num_rows = parm_dict['grid_num_rows']
        num_cols = parm_dict['grid_num_cols']
        parm_dict['data_type'] = 'GmodeW2'

        num_pix = num_rows * num_cols

        global_parms = dict()
        global_parms['grid_size_x'] = parm_dict['grid_num_cols']
        global_parms['grid_size_y'] = parm_dict['grid_num_rows']
        # assuming that the experiment was completed:
        global_parms['current_position_x'] = parm_dict['grid_num_cols'] - 1
        global_parms['current_position_y'] = parm_dict['grid_num_rows'] - 1
        global_parms['data_type'] = parm_dict[
            'data_type']  # self.__class__.__name__
        global_parms['translator'] = 'W2'

        # Now start creating datasets and populating:
        if path.exists(h5_path):
            remove(h5_path)

        h5_f = h5py.File(h5_path, 'w')
        write_simple_attrs(h5_f, global_parms)

        meas_grp = create_indexed_group(h5_f, 'Measurement')
        chan_grp = create_indexed_group(meas_grp, 'Channel')
        write_simple_attrs(chan_grp, parm_dict)

        pos_dims = [
            Dimension('X', 'nm', num_rows),
            Dimension('Y', 'nm', num_cols)
        ]
        spec_dims = [
            Dimension('Response Bin', 'a.u.', num_bins),
            Dimension('Excitation Frequency ', 'Hz', len(freq_array))
        ]

        # Minimize file size to the extent possible.
        # DAQs are rated at 16 bit so float16 should be most appropriate.
        # For some reason, compression is more effective on time series data

        h5_main = write_main_dataset(chan_grp, (num_pix, num_bins),
                                     'Raw_Data',
                                     'Deflection',
                                     'V',
                                     pos_dims,
                                     spec_dims,
                                     chunks=(1, num_bins),
                                     dtype=np.float32)

        h5_ex_freqs = chan_grp.create_dataset('Excitation_Frequencies',
                                              freq_array)
        h5_bin_freq = chan_grp.create_dataset('Bin_Frequencies', w_vec)

        # Now doing link_h5_objects_as_attrs:
        link_h5_objects_as_attrs(h5_main, [h5_ex_freqs, h5_bin_freq])

        # Now read the raw data files:
        pos_ind = 0
        for row_ind in range(1, num_rows + 1):
            for col_ind in range(1, num_cols + 1):
                file_path = path.join(
                    folder_path,
                    'fSweep_r' + str(row_ind) + '_c' + str(col_ind) + '.mat')
                print('Working on row {} col {}'.format(row_ind, col_ind))
                if path.exists(file_path):
                    # Load data file
                    pix_data = loadmat(file_path, squeeze_me=True)
                    pix_mat = pix_data['AI_mat']
                    # Take the inverse FFT on 2nd dimension
                    pix_mat = np.fft.ifft(np.fft.ifftshift(pix_mat, axes=1),
                                          axis=1)
                    # Verified with Matlab - no conjugate required here.
                    pix_vec = pix_mat.transpose().reshape(pix_mat.size)
                    h5_main[pos_ind, :] = np.float32(pix_vec)
                    h5_f.flush()  # flush from memory!
                else:
                    print('File not found for: row {} col {}'.format(
                        row_ind, col_ind))
                pos_ind += 1
                if (100.0 * pos_ind / num_pix) % 10 == 0:
                    print('completed translating {} %'.format(
                        int(100 * pos_ind / num_pix)))

        h5_f.close()

        return h5_path
コード例 #4
0
ファイル: sporc.py プロジェクト: pycroscopy/pycroscopy
    def translate(self, parm_path):
        """
        Basic method that translates .mat data files to a single .h5 file
        
        Parameters
        ------------
        parm_path : string / unicode
            Absolute file path of the parameters .mat file. 
            
        Returns
        ----------
        h5_path : string / unicode
            Absolute path of the translated h5 file
        """
        parm_path = path.abspath(parm_path)
        (folder_path, file_name) = path.split(parm_path)
        (file_name, base_name) = path.split(folder_path)
        h5_path = path.join(folder_path, base_name + '.h5')

        # Read parameters
        print('reading parameter files')
        parm_dict, excit_wfm, spec_ind_mat = self.__readparms(parm_path)
        parm_dict['data_type'] = 'SPORC'

        num_rows = parm_dict['grid_num_rows']
        num_cols = parm_dict['grid_num_cols']
        num_pix = num_rows * num_cols

        # new data format
        spec_ind_mat = np.transpose(VALUES_DTYPE(spec_ind_mat))

        # Now start creating datasets and populating:
        pos_desc = [Dimension('Y', 'm', np.arange(num_rows)), Dimension('X', 'm', np.arange(num_cols))]
        ds_pos_ind, ds_pos_val = build_ind_val_dsets(pos_desc, is_spectral=False)

        spec_ind_labels = ['x index', 'y index', 'loop index', 'repetition index', 'slope index']
        spec_ind_dict = dict()
        for col_ind, col_name in enumerate(spec_ind_labels):
            spec_ind_dict[col_name] = (slice(col_ind, col_ind + 1), slice(None))
        ds_spec_inds = VirtualDataset('Spectroscopic_Indices', INDICES_DTYPE(spec_ind_mat))
        ds_spec_inds.attrs['labels'] = spec_ind_dict
        ds_spec_vals = VirtualDataset('Spectroscopic_Values', spec_ind_mat)
        ds_spec_vals.attrs['labels'] = spec_ind_dict
        ds_spec_vals.attrs['units'] = ['V', 'V', '', '', '']

        ds_excit_wfm = VirtualDataset('Excitation_Waveform', np.float32(excit_wfm))

        ds_raw_data = VirtualDataset('Raw_Data', data=[],
                                     maxshape=(num_pix, len(excit_wfm)),
                                     dtype=np.float16, chunking=(1, len(excit_wfm)),
                                     compression='gzip')

        # technically should change the date, etc.

        chan_grp = VirtualGroup('Channel_000')
        chan_grp.attrs = parm_dict
        chan_grp.add_children([ds_pos_ind, ds_pos_val, ds_spec_inds, ds_spec_vals,
                               ds_excit_wfm, ds_raw_data])

        global_parms = generate_dummy_main_parms()
        global_parms['grid_size_x'] = parm_dict['grid_num_cols']
        global_parms['grid_size_y'] = parm_dict['grid_num_rows']
        # assuming that the experiment was completed:        
        global_parms['current_position_x'] = parm_dict['grid_num_cols'] - 1
        global_parms['current_position_y'] = parm_dict['grid_num_rows'] - 1
        global_parms['data_type'] = parm_dict['data_type']
        global_parms['translator'] = 'SPORC'

        meas_grp = VirtualGroup('Measurement_000')
        meas_grp.add_children([chan_grp])
        spm_data = VirtualGroup('')
        spm_data.attrs = global_parms
        spm_data.add_children([meas_grp])

        if path.exists(h5_path):
            remove(h5_path)

        # Write everything except for the main data.
        hdf = HDFwriter(h5_path)

        h5_refs = hdf.write(spm_data)

        h5_main = get_h5_obj_refs(['Raw_Data'], h5_refs)[0]

        # Now doing link_h5_objects_as_attrs:
        aux_ds_names = ['Excitation_Waveform', 'Position_Indices', 'Position_Values',
                        'Spectroscopic_Indices', 'Spectroscopic_Values']
        link_h5_objects_as_attrs(h5_main, get_h5_obj_refs(aux_ds_names, h5_refs))

        print('reading raw data now...')

        # Now read the raw data files:
        pos_ind = 0
        for row_ind in range(1, num_rows + 1):
            for col_ind in range(1, num_cols + 1):
                file_path = path.join(folder_path, 'result_r' + str(row_ind) + '_c' + str(col_ind) + '.mat')
                # print('Working on row {} col {}'.format(row_ind,col_ind))
                if path.exists(file_path):
                    # Load data file
                    pix_data = loadmat(file_path, squeeze_me=True)
                    # Take the inverse FFT on 1st dimension
                    pix_vec = np.fft.ifft(np.fft.ifftshift(pix_data['data']))
                    # Verified with Matlab - no conjugate required here.
                    h5_main[pos_ind, :] = np.float16(np.real(pix_vec))
                    hdf.flush()  # flush from memory!
                else:
                    print('File for row {} col {} not found'.format(row_ind, col_ind))
                pos_ind += 1
                if (100.0 * pos_ind / num_pix) % 10 == 0:
                    print('Finished reading {} % of data'.format(int(100 * pos_ind / num_pix)))

        hdf.close()

        return h5_path
コード例 #5
0
ファイル: sporc.py プロジェクト: yig319/pycroscopy
    def translate(self, parm_path):
        """
        Basic method that translates .mat data files to a single .h5 file
        
        Parameters
        ------------
        parm_path : string / unicode
            Absolute file path of the parameters .mat file. 
            
        Returns
        ----------
        h5_path : string / unicode
            Absolute path of the translated h5 file
        """
        parm_path = path.abspath(parm_path)
        (folder_path, file_name) = path.split(parm_path)
        (file_name, base_name) = path.split(folder_path)
        h5_path = path.join(folder_path, base_name + '.h5')

        # Read parameters
        print('reading parameter files')
        parm_dict, excit_wfm, spec_ind_mat = self.__readparms(parm_path)
        parm_dict['data_type'] = 'SPORC'

        num_rows = parm_dict['grid_num_rows']
        num_cols = parm_dict['grid_num_cols']
        num_pix = num_rows * num_cols

        # new data format
        spec_ind_mat = np.transpose(VALUES_DTYPE(spec_ind_mat))

        # Now start creating datasets and populating:
        pos_desc = [
            Dimension('Y', 'm', np.arange(num_rows)),
            Dimension('X', 'm', np.arange(num_cols))
        ]
        ds_pos_ind, ds_pos_val = build_ind_val_dsets(pos_desc,
                                                     is_spectral=False)

        spec_ind_labels = [
            'x index', 'y index', 'loop index', 'repetition index',
            'slope index'
        ]
        spec_ind_dict = dict()
        for col_ind, col_name in enumerate(spec_ind_labels):
            spec_ind_dict[col_name] = (slice(col_ind,
                                             col_ind + 1), slice(None))
        ds_spec_inds = VirtualDataset('Spectroscopic_Indices',
                                      INDICES_DTYPE(spec_ind_mat))
        ds_spec_inds.attrs['labels'] = spec_ind_dict
        ds_spec_vals = VirtualDataset('Spectroscopic_Values', spec_ind_mat)
        ds_spec_vals.attrs['labels'] = spec_ind_dict
        ds_spec_vals.attrs['units'] = ['V', 'V', '', '', '']

        ds_excit_wfm = VirtualDataset('Excitation_Waveform',
                                      np.float32(excit_wfm))

        ds_raw_data = VirtualDataset('Raw_Data',
                                     data=[],
                                     maxshape=(num_pix, len(excit_wfm)),
                                     dtype=np.float16,
                                     chunking=(1, len(excit_wfm)),
                                     compression='gzip')

        # technically should change the date, etc.

        chan_grp = VirtualGroup('Channel_000')
        chan_grp.attrs = parm_dict
        chan_grp.add_children([
            ds_pos_ind, ds_pos_val, ds_spec_inds, ds_spec_vals, ds_excit_wfm,
            ds_raw_data
        ])

        global_parms = generate_dummy_main_parms()
        global_parms['grid_size_x'] = parm_dict['grid_num_cols']
        global_parms['grid_size_y'] = parm_dict['grid_num_rows']
        # assuming that the experiment was completed:
        global_parms['current_position_x'] = parm_dict['grid_num_cols'] - 1
        global_parms['current_position_y'] = parm_dict['grid_num_rows'] - 1
        global_parms['data_type'] = parm_dict['data_type']
        global_parms['translator'] = 'SPORC'

        meas_grp = VirtualGroup('Measurement_000')
        meas_grp.add_children([chan_grp])
        spm_data = VirtualGroup('')
        spm_data.attrs = global_parms
        spm_data.add_children([meas_grp])

        if path.exists(h5_path):
            remove(h5_path)

        # Write everything except for the main data.
        hdf = HDFwriter(h5_path)

        h5_refs = hdf.write(spm_data)

        h5_main = get_h5_obj_refs(['Raw_Data'], h5_refs)[0]

        # Now doing link_h5_objects_as_attrs:
        aux_ds_names = [
            'Excitation_Waveform', 'Position_Indices', 'Position_Values',
            'Spectroscopic_Indices', 'Spectroscopic_Values'
        ]
        link_h5_objects_as_attrs(h5_main,
                                 get_h5_obj_refs(aux_ds_names, h5_refs))

        print('reading raw data now...')

        # Now read the raw data files:
        pos_ind = 0
        for row_ind in range(1, num_rows + 1):
            for col_ind in range(1, num_cols + 1):
                file_path = path.join(
                    folder_path,
                    'result_r' + str(row_ind) + '_c' + str(col_ind) + '.mat')
                # print('Working on row {} col {}'.format(row_ind,col_ind))
                if path.exists(file_path):
                    # Load data file
                    pix_data = loadmat(file_path, squeeze_me=True)
                    # Take the inverse FFT on 1st dimension
                    pix_vec = np.fft.ifft(np.fft.ifftshift(pix_data['data']))
                    # Verified with Matlab - no conjugate required here.
                    h5_main[pos_ind, :] = np.float16(np.real(pix_vec))
                    hdf.flush()  # flush from memory!
                else:
                    print('File for row {} col {} not found'.format(
                        row_ind, col_ind))
                pos_ind += 1
                if (100.0 * pos_ind / num_pix) % 10 == 0:
                    print('Finished reading {} % of data'.format(
                        int(100 * pos_ind / num_pix)))

        hdf.close()

        return h5_path
コード例 #6
0
ファイル: beps_ndf.py プロジェクト: pycroscopy/pycroscopy
    def __close_meas_group(self, h5_refs, show_plots, save_plots, do_histogram):
        """
        Performs following operations : 
            * Updates the number of pixels attribute in the measurement group
            * Writes Noise floor axis labels as region references
            * Writes position values and indices along with region references
            * Links all ancilliary datasets to the main data set
            * Writes the spatiall averaged plot data
        
        Parameters
        ----------
        h5_refs : list of HDF references
            References to the written datasets
        show_plots : Boolean 
            Whether or not to show plots
        save_plots : Boolean
            Whether or not to save the generated plots
        do_histogram : Boolean
            Whether or not to generate and save 2D histograms of the raw data
            
        Returns
        -------
        None

        """
        # Update the number of pixels in the attributes
        meas_grp = self.ds_main.parent
        meas_grp.attrs['num_pix'] = self.ds_pixel_index

        # Write position specific datasets now that the dataset is complete
        pos_slice_dict = dict()
        for spat_ind, spat_dim in enumerate(self.pos_labels):
            pos_slice_dict[spat_dim] = (slice(None), slice(spat_ind, spat_ind + 1))

        ds_pos_ind = VirtualDataset('Position_Indices',
                                    self.pos_mat[self.ds_pixel_start_indx:self.ds_pixel_start_indx +
                                                                          self.ds_pixel_index, :],
                                    dtype=INDICES_DTYPE)

        ds_pos_ind.attrs['labels'] = pos_slice_dict
        ds_pos_ind.attrs['units'] = self.pos_units

        self.pos_vals_list = np.array(self.pos_vals_list)
        # Ensuring that the X and Y values vary from 0 to N instead of -0.5 N to + 0.5 N
        for col_ind in range(2):
            min_val = np.min(self.pos_vals_list[:, col_ind])
            self.pos_vals_list[:, col_ind] -= min_val
            self.pos_vals_list[:, col_ind] *= 1E+6  # convert to microns

        if np.max(self.pos_vals_list[:, 2]) > 1E-3:
            # Setpoint spectroscopy
            if 'Z' in self.pos_labels:
                dim_ind = self.pos_labels.index('Z')
                # TODO: Find a way to correct the labels
                # self.pos_labels[dim_ind] = 'Setpoint'
                self.pos_units[dim_ind] = 'defl V'
        else:
            # Z spectroscopy
            self.pos_vals_list[:, 2] *= 1E+6  # convert to microns

        pos_val_mat = VALUES_DTYPE(self.pos_mat[self.ds_pixel_start_indx:self.ds_pixel_start_indx +
                                                                         self.ds_pixel_index, :])

        for col_ind, targ_dim_name in enumerate(['X', 'Y', 'Z']):
            if targ_dim_name in self.pos_labels:
                dim_ind = self.pos_labels.index(targ_dim_name)
                # Replace indices with the x, y, z values from the pixels
                pos_val_mat[:, dim_ind] = self.pos_vals_list[:, col_ind]

        ds_pos_val = VirtualDataset('Position_Values', pos_val_mat)
        ds_pos_val.attrs['labels'] = pos_slice_dict
        ds_pos_val.attrs['units'] = self.pos_units

        meas_grp = VirtualGroup(meas_grp.name, '/')
        meas_grp.add_children([ds_pos_ind, ds_pos_val])

        h5_refs += self.hdf.write(meas_grp)

        # Do all the reference linking:
        aux_ds_names = ['Excitation_Waveform', 'Position_Indices', 'Position_Values', 'UDVS_Indices',
                        'Spectroscopic_Indices', 'Bin_Step', 'Bin_Indices', 'Bin_Wfm_Type',
                        'Bin_Frequencies', 'Bin_FFT', 'UDVS', 'UDVS_Labels', 'Noise_Floor', 'Spectroscopic_Values']
        link_h5_objects_as_attrs(self.ds_main, get_h5_obj_refs(aux_ds_names, h5_refs))

        # While we have all the references and mean data, write the plot groups as well:
        generatePlotGroups(USIDataset(self.ds_main), self.mean_resp,
                           self.folder_path, self.basename,
                           self.max_resp, self.min_resp,
                           max_mem_mb=self.max_ram,
                           spec_label=self.spec_label,
                           show_plots=show_plots, save_plots=save_plots,
                           do_histogram=do_histogram, debug=self.debug)

        # Now that everything about this dataset is complete:
        self.dset_index += 1
コード例 #7
0
    def translate(self, parm_path):
        """
        The main function that translates the provided file into a .h5 file

        Parameters
        ------------
        parm_path : string / unicode
            Absolute file path of the parameters .mat file.

        Returns
        ----------
        h5_path : string / unicode
            Absolute path of the translated h5 file
        """
        parm_path = path.abspath(parm_path)
        parm_dict, excit_wfm = self._read_parms(parm_path)

        self._parse_file_path(parm_path)

        num_dat_files = len(self.file_list)

        f = open(self.file_list[0], 'rb')
        spectrogram_size, count_vals = self._parse_spectrogram_size(f)
        print("spectrogram size:", spectrogram_size)
        num_pixels = parm_dict['grid_num_rows'] * parm_dict['grid_num_cols']
        print('Number of pixels: ', num_pixels)
        print('Count Values: ', count_vals)
        if (num_pixels + 1) != count_vals:
            print(
                "Data size does not match number of pixels expected. Cannot continue"
            )

        # Now start creating datasets and populating:

        ds_spec_inds, ds_spec_vals = build_ind_val_dsets(Dimension(
            'Bias', 'V', excit_wfm),
                                                         is_spectral=True,
                                                         verbose=False)

        ds_spec_vals.data = np.atleast_2d(
            excit_wfm)  # The data generated above varies linearly. Override.

        pos_desc = [
            Dimension('X', 'a.u.', np.arange(parm_dict['grid_num_cols'])),
            Dimension('Y', 'a.u.', np.arange(parm_dict['grid_num_rows']))
        ]

        ds_pos_ind, ds_pos_val = build_ind_val_dsets(pos_desc,
                                                     is_spectral=False,
                                                     verbose=False)

        ds_raw_data = VirtualDataset('Raw_Data',
                                     data=[],
                                     maxshape=(ds_pos_ind.shape[0],
                                               spectrogram_size - 5),
                                     dtype=np.complex64,
                                     chunking=(1, spectrogram_size - 5),
                                     compression='gzip')
        ds_raw_data.attrs['quantity'] = ['Complex']

        aux_ds_names = [
            'Position_Indices', 'Position_Values', 'Spectroscopic_Indices',
            'Spectroscopic_Values'
        ]

        num_ai_chans = np.int(num_dat_files /
                              2)  # Division by 2 due to real/imaginary

        # technically should change the date, etc.
        spm_data = VirtualGroup('')
        global_parms = generate_dummy_main_parms()
        global_parms['data_type'] = 'trKPFM'
        global_parms['translator'] = 'trKPFM'
        spm_data.attrs = global_parms
        meas_grp = VirtualGroup('Measurement_000')
        meas_grp.attrs = parm_dict
        spm_data.add_children([meas_grp])

        hdf = HDFwriter(self.h5_path)
        # spm_data.showTree()
        hdf.write(spm_data, print_log=False)

        self.raw_datasets = list()

        for chan_index in range(num_ai_chans):
            chan_grp = VirtualGroup(
                '{:s}{:03d}'.format('Channel_', chan_index),
                '/Measurement_000/')

            if chan_index == 0:

                chan_grp.attrs = {'Harmonic': 1}
            else:
                chan_grp.attrs = {'Harmonic': 2}

            chan_grp.add_children([
                ds_pos_ind, ds_pos_val, ds_spec_inds, ds_spec_vals, ds_raw_data
            ])
            h5_refs = hdf.write(chan_grp, print_log=False)
            h5_raw = get_h5_obj_refs(['Raw_Data'], h5_refs)[0]
            link_h5_objects_as_attrs(h5_raw,
                                     get_h5_obj_refs(aux_ds_names, h5_refs))
            self.raw_datasets.append(h5_raw)
            self.raw_datasets.append(h5_raw)

        # Now that the N channels have been made, populate them with the actual data....
        self._read_data(parm_dict, parm_path, spectrogram_size)

        hdf.close()
        return self.h5_path
コード例 #8
0
    def translate(self, parm_path):
        """
        Basic method that translates .mat data files to a single .h5 file
        
        Parameters
        ------------
        parm_path : string / unicode
            Absolute file path of the parameters .mat file. 
            
        Returns
        ----------
        h5_path : string / unicode
            Absolute path of the translated h5 file
        """
        self.parm_path = path.abspath(parm_path)
        (folder_path, file_name) = path.split(parm_path)
        (file_name, base_name) = path.split(folder_path)
        h5_path = path.join(folder_path, base_name + '.h5')

        # Read parameters
        parm_dict = readGmodeParms(parm_path)

        # Add the w^2 specific parameters to this list
        parm_data = loadmat(parm_path, squeeze_me=True, struct_as_record=True)
        #freq_sweep_parms = parm_data['freqSweepParms']
        #parm_dict['freq_sweep_delay'] = np.float(freq_sweep_parms['delay'].item())
        gen_sig = parm_data['genSig']
        #parm_dict['wfm_fix_d_fast'] = np.int32(gen_sig['restrictT'].item())
        #freq_array = np.float32(parm_data['freqArray'])

        # prepare and write spectroscopic values
        samp_rate = parm_dict['IO_down_samp_rate_[Hz]']
        num_bins = int(parm_dict['wfm_n_cycles'] * parm_dict['wfm_p_slow'] * samp_rate)

        w_vec = np.arange(-0.5 * samp_rate, 0.5 * samp_rate, np.float32(samp_rate / num_bins))

        # There is most likely a more elegant solution to this but I don't have the time... Maybe np.meshgrid
        spec_val_mat = np.zeros((len(freq_array) * num_bins, 2), dtype=VALUES_DTYPE)
        spec_val_mat[:, 0] = np.tile(w_vec, len(freq_array))
        spec_val_mat[:, 1] = np.repeat(freq_array, num_bins)

        spec_ind_mat = np.zeros((2, len(freq_array) * num_bins), dtype=np.int32)
        spec_ind_mat[0, :] = np.tile(np.arange(num_bins), len(freq_array))
        spec_ind_mat[1, :] = np.repeat(np.arange(len(freq_array)), num_bins)

        num_rows = parm_dict['grid_num_rows']
        num_cols = parm_dict['grid_num_cols']
        parm_dict['data_type'] = 'GVS'

        num_pix = num_rows * num_cols

        global_parms = generate_dummy_main_parms()
        global_parms['grid_size_x'] = parm_dict['grid_num_cols']
        global_parms['grid_size_y'] = parm_dict['grid_num_rows']
        # assuming that the experiment was completed:
        global_parms['current_position_x'] = parm_dict['grid_num_cols'] - 1
        global_parms['current_position_y'] = parm_dict['grid_num_rows'] - 1
        global_parms['data_type'] = parm_dict['data_type']  # self.__class__.__name__
        global_parms['translator'] = 'GVS'

        # Now start creating datasets and populating:
        if path.exists(h5_path):
            remove(h5_path)

        h5_f = h5py.File(h5_path, 'w')
        write_simple_attrs(h5_f, global_parms)

        meas_grp = create_indexed_group(h5_f, 'Measurement')
        chan_grp = create_indexed_group(meas_grp, 'Channel')
        write_simple_attrs(chan_grp, parm_dict)


        pos_dims = [Dimension('X', 'nm', num_rows),
                    Dimension('Y', 'nm', num_cols)]
        spec_dims = [Dimension('Response Bin', 'a.u.', num_bins),
                     Dimension('Excitation Frequency ', 'Hz', len(freq_array))]

        # Minimize file size to the extent possible.
        # DAQs are rated at 16 bit so float16 should be most appropriate.
        # For some reason, compression is more effective on time series data

        h5_main = write_main_dataset(chan_grp, (num_pix, num_bins), 'Raw_Data',
                                     'Deflection', 'V',
                                     pos_dims, spec_dims,
                                     chunks=(1, num_bins), dtype=np.float32)

        h5_ex_freqs = chan_grp.create_dataset('Excitation_Frequencies', freq_array)
        h5_bin_freq = chan_grp.create_dataset('Bin_Frequencies', w_vec)

        # Now doing link_h5_objects_as_attrs:
        link_h5_objects_as_attrs(h5_main, [h5_ex_freqs, h5_bin_freq])

        # Now read the raw data files:
        pos_ind = 0
        for row_ind in range(1, num_rows + 1):
            for col_ind in range(1, num_cols + 1):
                file_path = path.join(folder_path, 'fSweep_r' + str(row_ind) + '_c' + str(col_ind) + '.mat')
                print('Working on row {} col {}'.format(row_ind, col_ind))
                if path.exists(file_path):
                    # Load data file
                    pix_data = loadmat(file_path, squeeze_me=True)
                    pix_mat = pix_data['AI_mat']
                    # Take the inverse FFT on 2nd dimension
                    pix_mat = np.fft.ifft(np.fft.ifftshift(pix_mat, axes=1), axis=1)
                    # Verified with Matlab - no conjugate required here.
                    pix_vec = pix_mat.transpose().reshape(pix_mat.size)
                    h5_main[pos_ind, :] = np.float32(pix_vec)
                    h5_f.flush()  # flush from memory!
                else:
                    print('File not found for: row {} col {}'.format(row_ind, col_ind))
                pos_ind += 1
                if (100.0 * pos_ind / num_pix) % 10 == 0:
                    print('completed translating {} %'.format(int(100 * pos_ind / num_pix)))

        h5_f.close()

        return h5_path