Exemplo n.º 1
0
    def _create_fit_datasets(self):
        """
        Creates the HDF5 fit dataset. pycroscopy requires that the h5 group, guess dataset,
        corresponding spectroscopic and position datasets be created and populated at this point.
        This function will create the HDF5 dataset for the fit and link it to same ancillary datasets as the guess.
        The fit dataset will NOT be populated here but will instead be populated using the __setData function
        """

        if self.h5_guess is None:
            warn('Need to guess before fitting!')
            return

        if self.step_start_inds is None:
            h5_spec_inds = self.h5_main.h5_spec_inds
            self.step_start_inds = np.where(h5_spec_inds[0] == 0)[0]

        if self.num_udvs_steps is None:
            self.num_udvs_steps = len(self.step_start_inds)

        if self.freq_vec is None:
            self._get_frequency_vector()

        h5_sho_grp = self.h5_guess.parent
        write_simple_attrs(h5_sho_grp, {'SHO_fit_method': "pycroscopy BESHO"})

        # Create the fit dataset as an empty dataset of the same size and dtype as the guess.
        # Also automatically links in the ancillary datasets.
        self.h5_fit = USIDataset(create_empty_dataset(self.h5_guess, dtype=sho32, dset_name='Fit'))

        # This is necessary comparing against new runs to avoid re-computation + resuming partial computation
        write_simple_attrs(self.h5_fit, self._parms_dict)
        write_simple_attrs(self.h5_fit, {'SHO_fit_method': "pycroscopy BESHO", 'last_pixel': 0})

        self.h5_fit.file.flush()
Exemplo n.º 2
0
    def _create_fit_datasets(self):
        """
        Creates the HDF5 fit dataset. pycroscopy requires that the h5 group, guess dataset,
        corresponding spectroscopic and position datasets be created and populated at this point.
        This function will create the HDF5 dataset for the fit and link it to same ancillary datasets as the guess.
        The fit dataset will NOT be populated here but will instead be populated using the __setData function
        """

        if self._h5_guess is None or self.h5_results_grp is None:
            warn('Need to guess before fitting!')
            return

        """
        Once the guess is complete, the last_pixel attribute will be set to complete for the group.
        Once the fit is initiated, during the creation of the status dataset, this last_pixel
        attribute will be used and it wil make the fit look like it was already complete. Which is not the case.
        This is a problem of doing two processes within the same group. 
        Until all legacy is removed, we will simply reset the last_pixel attribute.
        """
        self.h5_results_grp.attrs['last_pixel'] = 0

        write_simple_attrs(self.h5_results_grp, self.parms_dict)

        # Create the fit dataset as an empty dataset of the same size and dtype
        # as the guess.
        # Also automatically links in the ancillary datasets.
        self._h5_fit = USIDataset(create_empty_dataset(self._h5_guess,
                                                       dtype=sho32,
                                                       dset_name='Fit'))

        self._h5_fit.file.flush()
        
        if self.verbose and self.mpi_rank == 0:
            print('Finished creating Fit dataset')
Exemplo n.º 3
0
    def extract_loop_parameters(h5_loop_fit, nuc_threshold=0.03):
        """
        Method to extract a set of physical loop parameters from a dataset of fit parameters

        Parameters
        ----------
        h5_loop_fit : h5py.Dataset
            Dataset of loop fit parameters
        nuc_threshold : float
            Nucleation threshold to use in calculation physical parameters

        Returns
        -------
        h5_loop_parm : h5py.Dataset
            Dataset of physical parameters
        """
        dset_name = h5_loop_fit.name.split('/')[-1] + '_Loop_Parameters'
        h5_loop_parameters = create_empty_dataset(h5_loop_fit, dtype=switching32,
                                                  dset_name=dset_name,
                                                  new_attrs={'nuc_threshold': nuc_threshold})

        loop_coef_vec = flatten_compound_to_real(np.reshape(h5_loop_fit, [-1, 1]))
        switching_coef_vec = calc_switching_coef_vec(loop_coef_vec, nuc_threshold)

        h5_loop_parameters[:, :] = switching_coef_vec.reshape(h5_loop_fit.shape)

        h5_loop_fit.file.flush()

        return h5_loop_parameters
Exemplo n.º 4
0
    def _create_guess_datasets(self):
        """
        Creates the HDF5 Guess dataset and links the it to the ancillary datasets.
        """
        self.h5_guess = create_empty_dataset(self.h5_loop_metrics, loop_fit32, 'Guess')
        write_simple_attrs(self._h5_group, {'guess method': 'pycroscopy statistical'})

        # This is necessary comparing against new runs to avoid re-computation + resuming partial computation
        write_simple_attrs(self.h5_guess, self._parms_dict)
        write_simple_attrs(self.h5_guess, {'Loop_fit_method': "pycroscopy statistical", 'last_pixel': 0})

        self.h5_main.file.flush()
Exemplo n.º 5
0
 def _create_results_datasets(self):
     self.h5_results_grp = hdf_utils.create_results_group(
         self.h5_main, self.process_name)
     assert isinstance(self.h5_results_grp, h5py.Group)
     if self.verbose: print('Results group created.')
     self.results = hdf_utils.create_empty_dataset(
         self.h5_main,
         self.h5_main.dtype,
         'Filtered_Data',
         h5_group=self.h5_results_grp)
     #self.results = hdf_utils.write_main_dataset(self.h5_results_grp, (self.h5_main.shape[0], 1), "Results", "Results", "Units", None,
     #usid.io.write_utils.Dimension('arb', '', [1]), h5_pos_inds=self.h5_main.h5_pos_inds, h5_pos_vals=self.h5_main.h5_pos_vals, dtype=np.float32)
     if self.verbose: print('Empty main dataset for results written')
Exemplo n.º 6
0
    def _create_fit_datasets(self):
        """
        Creates the HDF5 Fit dataset and links the it to the ancillary datasets.
        """

        if self.h5_guess is None:
            warn('Need to guess before fitting!')
            return

        self.h5_fit = create_empty_dataset(self.h5_guess, loop_fit32, 'Fit')
        write_simple_attrs(self._h5_group, {'fit method': 'pycroscopy functional'})

        # This is necessary comparing against new runs to avoid re-computation + resuming partial computation
        write_simple_attrs(self.h5_fit, self._parms_dict)
        write_simple_attrs(self.h5_fit, {'Loop_fit_method': "pycroscopy functional", 'last_pixel': 0})
Exemplo n.º 7
0
    def _create_projection_datasets(self):
        """
        Setup the Loop_Fit Group and the loop projection datasets

        """
        # First grab the spectroscopic indices and values and position indices
        self._sho_spec_inds = self.h5_main.h5_spec_inds
        self._sho_spec_vals = self.h5_main.h5_spec_vals
        self._sho_pos_inds = self.h5_main.h5_pos_inds

        fit_dim_ind = self.h5_main.spec_dim_labels.index(self._fit_dim_name)

        self._fit_spec_index = fit_dim_ind
        self._fit_offset_index = 1 + fit_dim_ind

        # Calculate the number of loops per position
        cycle_start_inds = np.argwhere(self._sho_spec_inds[fit_dim_ind, :] == 0).flatten()
        tot_cycles = cycle_start_inds.size

        # Make the results group
        self._h5_group = create_results_group(self.h5_main, 'Loop_Fit')
        write_simple_attrs(self._h5_group, {'projection_method': 'pycroscopy BE loop model'})

        # Write datasets
        self.h5_projected_loops = create_empty_dataset(self.h5_main, np.float32, 'Projected_Loops',
                                                       h5_group=self._h5_group)

        h5_loop_met_spec_inds, h5_loop_met_spec_vals = write_reduced_spec_dsets(self._h5_group, self._sho_spec_inds,
                                                                                self._sho_spec_vals, self._fit_dim_name,
                                                                                basename='Loop_Metrics')

        self.h5_loop_metrics = write_main_dataset(self._h5_group, (self.h5_main.shape[0], tot_cycles), 'Loop_Metrics',
                                                  'Metrics', 'compound', None, None, dtype=loop_metrics32,
                                                  h5_pos_inds=self.h5_main.h5_pos_inds,
                                                  h5_pos_vals=self.h5_main.h5_pos_vals,
                                                  h5_spec_inds=h5_loop_met_spec_inds,
                                                  h5_spec_vals=h5_loop_met_spec_vals)

        # Copy region reference:
        copy_region_refs(self.h5_main, self.h5_projected_loops)
        copy_region_refs(self.h5_main, self.h5_loop_metrics)

        self.h5_main.file.flush()
        self._met_spec_inds = self.h5_loop_metrics.h5_spec_inds

        return
Exemplo n.º 8
0
    def _create_fit_datasets(self):
        """
        Creates the HDF5 fit dataset. pycroscopy requires that the h5 group, guess dataset,
        corresponding spectroscopic and position datasets be created and populated at this point.
        This function will create the HDF5 dataset for the fit and link it to same ancillary datasets as the guess.
        The fit dataset will NOT be populated here but will instead be populated using the __setData function
        """

        if self.h5_guess is None:
            warn('Need to guess before fitting!')
            return

        if self.step_start_inds is None:
            h5_spec_inds = self.h5_main.h5_spec_inds
            self.step_start_inds = np.where(h5_spec_inds[0] == 0)[0]

        if self.num_udvs_steps is None:
            self.num_udvs_steps = len(self.step_start_inds)

        if self.freq_vec is None:
            self._get_frequency_vector()

        h5_sho_grp = self.h5_guess.parent
        write_simple_attrs(h5_sho_grp, {'SHO_fit_method': "pycroscopy BESHO"})

        # Create the fit dataset as an empty dataset of the same size and dtype as the guess.
        # Also automatically links in the ancillary datasets.
        self.h5_fit = USIDataset(
            create_empty_dataset(self.h5_guess, dtype=sho32, dset_name='Fit'))

        # This is necessary comparing against new runs to avoid re-computation + resuming partial computation
        write_simple_attrs(self.h5_fit, self._parms_dict)
        write_simple_attrs(self.h5_fit, {
            'SHO_fit_method': "pycroscopy BESHO",
            'last_pixel': 0
        })

        self.h5_fit.file.flush()
Exemplo n.º 9
0
    def _create_results_datasets(self):
        """
        Creates all the datasets necessary for holding all parameters + data.
        """

        self.h5_results_grp = create_results_group(self.h5_main, self.process_name)

        self.parms_dict.update({'last_pixel': 0, 'algorithm': 'pycroscopy_SignalFilter'})

        write_simple_attrs(self.h5_results_grp, self.parms_dict)

        assert isinstance(self.h5_results_grp, h5py.Group)

        if isinstance(self.composite_filter, np.ndarray):
            h5_comp_filt = self.h5_results_grp.create_dataset('Composite_Filter',
                                                              data=np.float32(self.composite_filter))

            if self.verbose and self.mpi_rank == 0:
                print('Rank {} - Finished creating the Composite_Filter dataset'.format(self.mpi_rank))

        # First create the position datsets if the new indices are smaller...
        if self.num_effective_pix != self.h5_main.shape[0]:
            # TODO: Do this part correctly. See past solution:
            """
            # need to make new position datasets by taking every n'th index / value:
                new_pos_vals = np.atleast_2d(h5_pos_vals[slice(0, None, self.num_effective_pix), :])
                pos_descriptor = []
                for name, units, leng in zip(h5_pos_inds.attrs['labels'], h5_pos_inds.attrs['units'],
                                             [int(np.unique(h5_pos_inds[:, dim_ind]).size / self.num_effective_pix)
                                              for dim_ind in range(h5_pos_inds.shape[1])]):
                    pos_descriptor.append(Dimension(name, units, np.arange(leng)))
                ds_pos_inds, ds_pos_vals = build_ind_val_dsets(pos_descriptor, is_spectral=False, verbose=self.verbose)
                h5_pos_vals.data = np.atleast_2d(new_pos_vals)  # The data generated above varies linearly. Override.

            """
            h5_pos_inds_new, h5_pos_vals_new = write_ind_val_dsets(self.h5_results_grp,
                                                                   Dimension('pixel', 'a.u.', self.num_effective_pix),
                                                                   is_spectral=False, verbose=self.verbose and self.mpi_rank==0)
            if self.verbose and self.mpi_rank == 0:
                print('Rank {} - Created the new position ancillary dataset'.format(self.mpi_rank))

        else:
            h5_pos_inds_new = self.h5_main.h5_pos_inds
            h5_pos_vals_new = self.h5_main.h5_pos_vals

            if self.verbose and self.mpi_rank == 0:
                print('Rank {} - Reusing source datasets position datasets'.format(self.mpi_rank))

        if self.noise_threshold is not None:
            self.h5_noise_floors = write_main_dataset(self.h5_results_grp, (self.num_effective_pix, 1), 'Noise_Floors',
                                                      'Noise', 'a.u.', None, Dimension('arb', '', [1]),
                                                      dtype=np.float32, aux_spec_prefix='Noise_Spec_',
                                                      h5_pos_inds=h5_pos_inds_new, h5_pos_vals=h5_pos_vals_new,
                                                      verbose=self.verbose and self.mpi_rank == 0)
            if self.verbose and self.mpi_rank == 0:
                print('Rank {} - Finished creating the Noise_Floors dataset'.format(self.mpi_rank))

        if self.write_filtered:
            # Filtered data is identical to Main_Data in every way - just a duplicate
            self.h5_filtered = create_empty_dataset(self.h5_main, self.h5_main.dtype, 'Filtered_Data',
                                                    h5_group=self.h5_results_grp)
            if self.verbose and self.mpi_rank == 0:
                print('Rank {} - Finished creating the Filtered dataset'.format(self.mpi_rank))

        self.hot_inds = None

        if self.write_condensed:
            self.hot_inds = np.where(self.composite_filter > 0)[0]
            self.hot_inds = np.uint(self.hot_inds[int(0.5 * len(self.hot_inds)):])  # only need to keep half the data
            condensed_spec = Dimension('hot_frequencies', '', int(0.5 * len(self.hot_inds)))
            self.h5_condensed = write_main_dataset(self.h5_results_grp, (self.num_effective_pix, len(self.hot_inds)),
                                                   'Condensed_Data', 'Complex', 'a. u.', None, condensed_spec,
                                                   h5_pos_inds=h5_pos_inds_new, h5_pos_vals=h5_pos_vals_new,
                                                   dtype=np.complex, verbose=self.verbose and self.mpi_rank == 0)
            if self.verbose and self.mpi_rank == 0:
                print('Rank {} - Finished creating the Condensed dataset'.format(self.mpi_rank))

        if self.mpi_size > 1:
            self.mpi_comm.Barrier()
        self.h5_main.file.flush()
Exemplo n.º 10
0
    def _create_results_datasets(self):
        """
        Creates hdf5 datasets and datagroups to hold the resutls
        """
        # create all h5 datasets here:
        num_pos = self.h5_main.shape[0]

        if self.verbose and self.mpi_rank == 0:
            print('Now creating the datasets')

        self.h5_results_grp = create_results_group(self.h5_main,
                                                   self.process_name)

        write_simple_attrs(self.h5_results_grp, {
            'algorithm_author': 'Kody J. Law',
            'last_pixel': 0
        })
        write_simple_attrs(self.h5_results_grp, self.parms_dict)

        if self.verbose and self.mpi_rank == 0:
            print('created group: {} with attributes:'.format(
                self.h5_results_grp.name))
            print(get_attributes(self.h5_results_grp))

        # One of those rare instances when the result is exactly the same as the source
        self.h5_i_corrected = create_empty_dataset(
            self.h5_main,
            np.float32,
            'Corrected_Current',
            h5_group=self.h5_results_grp)

        if self.verbose and self.mpi_rank == 0:
            print('Created I Corrected')
            # print_tree(self.h5_results_grp)

        # For some reason, we cannot specify chunks or compression!
        # The resistance dataset requires the creation of a new spectroscopic dimension
        self.h5_resistance = write_main_dataset(
            self.h5_results_grp,
            (num_pos, self.num_x_steps),
            'Resistance',
            'Resistance',
            'GOhms',
            None,
            Dimension('Bias', 'V', self.num_x_steps),
            dtype=np.
            float32,  # chunks=(1, self.num_x_steps), #compression='gzip',
            h5_pos_inds=self.h5_main.h5_pos_inds,
            h5_pos_vals=self.h5_main.h5_pos_vals)

        if self.verbose and self.mpi_rank == 0:
            print('Created Resistance')
            # print_tree(self.h5_results_grp)

        assert isinstance(self.h5_resistance,
                          USIDataset)  # only here for PyCharm
        self.h5_new_spec_vals = self.h5_resistance.h5_spec_vals

        # The variance is identical to the resistance dataset
        self.h5_variance = create_empty_dataset(self.h5_resistance, np.float32,
                                                'R_variance')

        if self.verbose and self.mpi_rank == 0:
            print('Created Variance')
            # print_tree(self.h5_results_grp)

        # The capacitance dataset requires new spectroscopic dimensions as well
        self.h5_cap = write_main_dataset(
            self.h5_results_grp,
            (num_pos, 1),
            'Capacitance',
            'Capacitance',
            'pF',
            None,
            Dimension('Direction', '', [1]),
            h5_pos_inds=self.h5_main.h5_pos_inds,
            h5_pos_vals=self.h5_main.h5_pos_vals,
            dtype=cap_dtype,  #compression='gzip',
            aux_spec_prefix='Cap_Spec_')

        if self.verbose and self.mpi_rank == 0:
            print('Created Capacitance')
            # print_tree(self.h5_results_grp)
            print('Done creating all results datasets!')

        if self.mpi_size > 1:
            self.mpi_comm.Barrier()
        self.h5_main.file.flush()
Exemplo n.º 11
0
    def _create_results_datasets(self):
        """
        Setup the Loop_Fit Group and the loop projection datasets

        """
        # First grab the spectroscopic indices and values and position indices
        # TODO: Avoid unnecessary namespace pollution
        # self._sho_spec_inds = self.h5_main.h5_spec_inds
        # self._sho_spec_vals = self.h5_main.h5_spec_vals
        # self._sho_pos_inds = self.h5_main.h5_pos_inds

        # Which row in the spec datasets is DC offset?
        self._fit_spec_index = self.h5_main.spec_dim_labels.index(
            self._fit_dim_name)

        # TODO: Unkown usage of variable. Waste either way
        # self._fit_offset_index = 1 + self._fit_spec_index

        # Calculate the number of loops per position
        cycle_start_inds = np.argwhere(
            self.h5_main.h5_spec_inds[self._fit_spec_index, :] == 0).flatten()
        tot_cycles = cycle_start_inds.size
        if self.verbose:
            print('Found {} cycles starting at indices: {}'.format(
                tot_cycles, cycle_start_inds))

        # Make the results group
        self.h5_results_grp = create_results_group(self.h5_main,
                                                   self.process_name)
        write_simple_attrs(self.h5_results_grp, self.parms_dict)

        # Write datasets
        self.h5_projected_loops = create_empty_dataset(
            self.h5_main,
            np.float32,
            'Projected_Loops',
            h5_group=self.h5_results_grp)

        h5_loop_met_spec_inds, h5_loop_met_spec_vals = write_reduced_anc_dsets(
            self.h5_results_grp,
            self.h5_main.h5_spec_inds,
            self.h5_main.h5_spec_vals,
            self._fit_dim_name,
            basename='Loop_Metrics',
            verbose=self.verbose)

        self.h5_loop_metrics = write_main_dataset(
            self.h5_results_grp, (self.h5_main.shape[0], tot_cycles),
            'Loop_Metrics',
            'Metrics',
            'compound',
            None,
            None,
            dtype=loop_metrics32,
            h5_pos_inds=self.h5_main.h5_pos_inds,
            h5_pos_vals=self.h5_main.h5_pos_vals,
            h5_spec_inds=h5_loop_met_spec_inds,
            h5_spec_vals=h5_loop_met_spec_vals)

        # Copy region reference:
        # copy_region_refs(self.h5_main, self.h5_projected_loops)
        # copy_region_refs(self.h5_main, self.h5_loop_metrics)

        self.h5_main.file.flush()
        self._met_spec_inds = self.h5_loop_metrics.h5_spec_inds

        if self.verbose and self.mpi_rank == 0:
            print('Finished creating Guess dataset')
Exemplo n.º 12
0
    def _create_results_datasets(self):
        """
		Creates all the datasets necessary for holding all parameters + data.
		"""

        self.h5_results_grp = create_results_group(self.h5_main,
                                                   self.process_name)

        self.params_dict.update({
            'last_pixel':
            0,
            'algorithm':
            'pycroscopy_AdaptiveBayesianInference'
        })

        # Write in our full_V and num_pixels as attributes to this new group
        write_simple_attrs(self.h5_results_grp, self.params_dict)

        assert isinstance(self.h5_results_grp, h5py.Group)

        # If we ended up parsing down the data, create new spectral datasets (i.e. smaller full_V's)
        # By convention, we convert the full_V back to a sine wave.
        if self.parse_mod != 1:
            h5_spec_inds_new, h5_spec_vals_new = write_ind_val_dsets(
                self.h5_results_grp,
                Dimension("Bias", "V", self.full_V.size),
                is_spectral=True)
            h5_spec_vals_new[()] = get_unshifted_response(
                self.full_V, self.shift_index)
        else:
            h5_spec_inds_new = self.h5_main.h5_spec_inds
            h5_spec_vals_new = self.h5_main.h5_spec_vals

        # Also make some new spectroscopic datasets for R and R_sig
        h5_spec_inds_R, h5_spec_vals_R = write_ind_val_dsets(
            self.h5_results_grp,
            Dimension("Bias", "V", 2 * self.M),
            is_spectral=True,
            base_name="Spectroscopic_R")
        h5_spec_vals_R[()] = np.concatenate((self.x, self.x)).T

        # Initialize our datasets
        # Note by convention, the spectroscopic values are stored as a sine wave
        # so i_recon and i_corrected are shifted at the end of bayesian_utils.process_pixel
        # accordingly.
        self.h5_R = write_main_dataset(self.h5_results_grp,
                                       (self.h5_main.shape[0], 2 * self.M),
                                       "Resistance",
                                       "Resistance",
                                       "GOhms",
                                       None,
                                       None,
                                       dtype=np.float64,
                                       h5_pos_inds=self.h5_main.h5_pos_inds,
                                       h5_pos_vals=self.h5_main.h5_pos_vals,
                                       h5_spec_inds=h5_spec_inds_R,
                                       h5_spec_vals=h5_spec_vals_R)

        assert isinstance(self.h5_R, usid.USIDataset)  # Quick sanity check
        self.h5_R_sig = create_empty_dataset(self.h5_R, np.float64, "R_sig")

        self.h5_capacitance = write_main_dataset(
            self.h5_results_grp, (self.h5_main.shape[0], 2),
            "Capacitance",
            "Capacitance",
            "pF",
            None,
            Dimension("Direction", "", 2),
            h5_pos_inds=self.h5_main.h5_pos_inds,
            h5_pos_vals=self.h5_main.h5_pos_vals,
            dtype=np.float64,
            aux_spec_prefix="Cap_Spec_")

        # Not sure what units this should be so tentatively storing it as amps
        self.h5_i_recon = write_main_dataset(
            self.h5_results_grp, (self.h5_main.shape[0], self.full_V.size),
            "Reconstructed_Current",
            "Current",
            "nA",
            None,
            None,
            dtype=np.float64,
            h5_pos_inds=self.h5_main.h5_pos_inds,
            h5_pos_vals=self.h5_main.h5_pos_vals,
            h5_spec_inds=h5_spec_inds_new,
            h5_spec_vals=h5_spec_vals_new)
        self.h5_i_corrected = create_empty_dataset(self.h5_i_recon, np.float64,
                                                   "Corrected_Current")
        '''
		# Initialize our datasets
		# Note, each pixel of the datasets will hold the forward and reverse sweeps concatenated together.
		# R and R_sig are plotted against [x, -x], and i_recon and i_corrected are plotted against full_V.
		self.h5_R = h5_results_grp.create_dataset("R", shape=(self.h5_main.shape[0], 2*self.M), dtype=np.float)
		self.h5_R_sig = h5_results_grp.create_dataset("R_sig", shape=(self.h5_main.shape[0], 2*self.M), dtype=np.float)
		self.h5_capacitance = h5_results_grp.create_dataset("capacitance", shape=(self.h5_main.shape[0], 2), dtype=np.float)
		self.h5_i_recon = h5_results_grp.create_dataset("i_recon", shape=(self.h5_main.shape[0], self.full_V.size), dtype=np.float)
		self.h5_i_corrected = h5_results_grp.create_dataset("i_corrected", shape=(self.h5_main.shape[0], self.full_V.size), dtype=np.float)
		'''

        if self.verbose: print("results datasets set up")
        self.h5_main.file.flush()
Exemplo n.º 13
0
    def _create_results_datasets(self):
        """
        Creates hdf5 datasets and datagroups to hold the resutls
        """
        # create all h5 datasets here:
        num_pos = self.h5_main.shape[0]

        if self.verbose and self.mpi_rank == 0:
            print('Now creating the datasets')

        self.h5_results_grp = create_results_group(self.h5_main, self.process_name)

        write_simple_attrs(self.h5_results_grp, {'algorithm_author': 'Kody J. Law', 'last_pixel': 0})
        write_simple_attrs(self.h5_results_grp, self.parms_dict)

        if self.verbose and self.mpi_rank == 0:
            print('created group: {} with attributes:'.format(self.h5_results_grp.name))
            print(get_attributes(self.h5_results_grp))

        # One of those rare instances when the result is exactly the same as the source
        self.h5_i_corrected = create_empty_dataset(self.h5_main, np.float32, 'Corrected_Current', h5_group=self.h5_results_grp)

        if self.verbose and self.mpi_rank == 0:
            print('Created I Corrected')
            # print_tree(self.h5_results_grp)

        # For some reason, we cannot specify chunks or compression!
        # The resistance dataset requires the creation of a new spectroscopic dimension
        self.h5_resistance = write_main_dataset(self.h5_results_grp, (num_pos, self.num_x_steps), 'Resistance', 'Resistance',
                                                'GOhms', None, Dimension('Bias', 'V', self.num_x_steps),
                                                dtype=np.float32, # chunks=(1, self.num_x_steps), #compression='gzip',
                                                h5_pos_inds=self.h5_main.h5_pos_inds,
                                                h5_pos_vals=self.h5_main.h5_pos_vals)

        if self.verbose and self.mpi_rank == 0:
            print('Created Resistance')
            # print_tree(self.h5_results_grp)

        assert isinstance(self.h5_resistance, USIDataset)  # only here for PyCharm
        self.h5_new_spec_vals = self.h5_resistance.h5_spec_vals

        # The variance is identical to the resistance dataset
        self.h5_variance = create_empty_dataset(self.h5_resistance, np.float32, 'R_variance')

        if self.verbose and self.mpi_rank == 0:
            print('Created Variance')
            # print_tree(self.h5_results_grp)

        # The capacitance dataset requires new spectroscopic dimensions as well
        self.h5_cap = write_main_dataset(self.h5_results_grp, (num_pos, 1), 'Capacitance', 'Capacitance', 'pF', None,
                                         Dimension('Direction', '', [1]),  h5_pos_inds=self.h5_main.h5_pos_inds,
                                         h5_pos_vals=self.h5_main.h5_pos_vals, dtype=cap_dtype, #compression='gzip',
                                         aux_spec_prefix='Cap_Spec_')

        if self.verbose and self.mpi_rank == 0:
            print('Created Capacitance')
            # print_tree(self.h5_results_grp)
            print('Done creating all results datasets!')

        if self.mpi_size > 1:
            self.mpi_comm.Barrier()
        self.h5_main.file.flush()