Esempio n. 1
0
    def test_inequality(self):
        name = 'Bias'
        units = 'V'
        values = [0, 1, 2, 3]

        left = write_utils.Dimension(name, units, values)
        right = write_utils.Dimension(name, units, [0, 1, 2, 4])
        self.assertFalse(left == right)

        left = write_utils.Dimension(name, units, [0, 1, 2])
        right = write_utils.Dimension(name, units, values)
        self.assertFalse(left == right)

        left = write_utils.Dimension('name', units, values)
        right = write_utils.Dimension(name, units, values)
        self.assertFalse(left == right)

        left = write_utils.Dimension(name, 'units', values)
        right = write_utils.Dimension(name, units, values)
        self.assertFalse(left == right)

        left = write_utils.Dimension(name, units, values,
                                     mode=write_utils.DimType.DEPENDENT)
        right = write_utils.Dimension(name, units, values)
        self.assertFalse(left == right)
Esempio n. 2
0
 def test_not_numpy_or_dask_array_main(self):
     translator = ArrayTranslator()
     with self.assertRaises(TypeError):
         delete_existing_file(file_path)
         _ = translator.translate(file_path, 'Blah', {'This is not a dataset': True}, 'quant', 'unit',
                                  write_utils.Dimension('Position_Dim', 'au', 5),
                                  write_utils.Dimension('Spec_Dim', 'au', 3))
Esempio n. 3
0
    def test_equality(self):
        name = 'Bias'
        units = 'V'

        dim_1 = write_utils.Dimension(name, units, [0, 1, 2, 3, 4])
        dim_2 = write_utils.Dimension(name, units, np.arange(5, dtype=np.float32))
        self.assertEqual(dim_1, dim_2)
Esempio n. 4
0
    def test_prod_sizes_mismatch(self):
        file_path = 'test.h5'
        data_utils.delete_existing_file(file_path)
        main_data = np.random.rand(15, 14)
        main_data_name = 'Test_Main'
        quantity = 'Current'
        dset_units = 'nA'

        pos_sizes = [5, 15]  # too many steps in the Y direction
        pos_names = ['X', 'Y']
        pos_units = ['nm', 'um']
        pos_dims = []
        for length, name, units in zip(pos_sizes, pos_names, pos_units):
            pos_dims.append(
                write_utils.Dimension(name, units, np.arange(length)))

        spec_sizes = [7, 2]
        spec_names = ['Bias', 'Cycle']
        spec_units = ['V', '']
        spec_dims = []
        for length, name, units in zip(spec_sizes, spec_names, spec_units):
            spec_dims.append(
                write_utils.Dimension(name, units, np.arange(length)))

        with h5py.File(file_path) as h5_f:
            with self.assertRaises(ValueError):
                _ = hdf_utils.write_main_dataset(h5_f, main_data,
                                                 main_data_name, quantity,
                                                 dset_units, pos_dims,
                                                 spec_dims)
        os.remove(file_path)
Esempio n. 5
0
 def test_objects(self):
     translator = ArrayTranslator()
     with self.assertRaises(TypeError):
         delete_existing_file(file_path)
         _ = translator.translate(
             file_path, 'Blah', np.random.rand(5, 13), 'quant', 'unit',
             write_utils.Dimension('Dim_1', 'au', 5),
             ['blah', write_utils.Dimension('Dim_2', 'au', 4)])
Esempio n. 6
0
 def test_object_single(self):
     translator = ArrayTranslator()
     with self.assertRaises(TypeError):
         delete_existing_file(file_path)
         _ = translator.translate(file_path, 'Blah', np.random.rand(5, 13), 'quant', 'unit',
                                  'my_string_Dimension',
                                  [write_utils.Dimension('Spec_Dim', 'au', 3),
                                   write_utils.Dimension('Dim_2', 'au', 4)])
Esempio n. 7
0
 def test_position(self):
     translator = ArrayTranslator()
     with self.assertRaises(ValueError):
         delete_existing_file(file_path)
         _ = translator.translate(file_path, 'Blah', np.random.rand(15, 3), 'quant', 'unit',
                                  [write_utils.Dimension('Dim_1', 'au', 5),
                                   write_utils.Dimension('Dim_2', 'au', 4)],
                                  write_utils.Dimension('Spec_Dim', 'au', 3))
Esempio n. 8
0
 def test_empty_name(self):
     translator = ArrayTranslator()
     with self.assertRaises(ValueError):
         delete_existing_file(file_path)
         _ = translator.translate(file_path, 'Blah', np.random.rand(5, 3), 'quant', 'unit',
                                  write_utils.Dimension('Position_Dim', 'au', 5),
                                  write_utils.Dimension('Spec_Dim', 'au', 3),
                                  extra_dsets={' ': [1, 2, 3]})
Esempio n. 9
0
 def test_not_arrays(self):
     translator = ArrayTranslator()
     with self.assertRaises(TypeError):
         delete_existing_file(file_path)
         _ = translator.translate(file_path, 'Blah', np.random.rand(5, 3), 'quant', 'unit',
                                  write_utils.Dimension('Position_Dim', 'au', 5),
                                  write_utils.Dimension('Spec_Dim', 'au', 3),
                                  extra_dsets={'Blah_other': 'I am not an array'})
Esempio n. 10
0
 def test_reserved_names(self):
     translator = ArrayTranslator()
     with self.assertRaises(KeyError):
         delete_existing_file(file_path)
         _ = translator.translate(file_path, 'Blah', np.random.rand(5, 3), 'quant', 'unit',
                                  write_utils.Dimension('Position_Dim', 'au', 5),
                                  write_utils.Dimension('Spec_Dim', 'au', 3),
                                  extra_dsets={'Spectroscopic_Indices': np.arange(4),
                                               'Blah_other': np.arange(15)})
Esempio n. 11
0
    def test_main_dset_1D(self):
        translator = ArrayTranslator()
        with self.assertRaises(ValueError):
            delete_existing_file(file_path)
            _ = translator.translate(file_path, 'Blah', np.arange(4), 'quant', 'unit',
                                     write_utils.Dimension('Position_Dim', 'au', 5),
                                     write_utils.Dimension('Spec_Dim', 'au', 3))

        with self.assertRaises(ValueError):
            delete_existing_file(file_path)
            _ = translator.translate(file_path, 'Blah', da.from_array(np.arange(4), chunks=(4)), 'quant', 'unit',
                                     write_utils.Dimension('Position_Dim', 'au', 5),
                                     write_utils.Dimension('Spec_Dim', 'au', 3))
Esempio n. 12
0
    def test_aux_dset_descriptor_illegal(self):

        with self.assertRaises(TypeError):
            _ = write_utils.Dimension('Name', 14, np.arange(4))

        with self.assertRaises(TypeError):
            _ = write_utils.Dimension(14, 'nm', np.arange(4))

        with self.assertRaises(ValueError):
            _ = write_utils.Dimension('Name', 'unit', 0)

        with self.assertRaises(TypeError):
            _ = write_utils.Dimension('Name', 'unit', 'invalid')
Esempio n. 13
0
    def test_illegal_instantiation(self):

        with self.assertRaises(TypeError):
            _ = write_utils.Dimension('Name', 14, np.arange(4))

        with self.assertRaises(TypeError):
            _ = write_utils.Dimension(14, 'nm', np.arange(4))

        with self.assertRaises(ValueError):
            _ = write_utils.Dimension('Name', 'unit', 0)

        with self.assertRaises(TypeError):
            _ = write_utils.Dimension('Name', 'unit', 'invalid')
Esempio n. 14
0
    def test_values_as_array(self):
        name = 'Bias'
        units = 'V'
        values = np.random.rand(5)

        descriptor = write_utils.Dimension(name, units, values)
        for expected, actual in zip([name, units, values],
                                    [descriptor.name, descriptor.units, descriptor.values]):
            self.assertTrue(np.all([x == y for x, y in zip(expected, actual)]))
Esempio n. 15
0
    def test_repr(self):
        name = 'Bias'
        units = 'V'
        values = np.arange(5)

        descriptor = write_utils.Dimension(name, units, len(values))
        actual = '{}'.format(descriptor)
        expected = '{} ({}) mode:{} : {}'.format(name, units, descriptor.mode,
                                                 values)
        self.assertEqual(actual, expected)
Esempio n. 16
0
    def test_values_as_length(self):
        name = 'Bias'
        units = 'V'
        values = np.arange(5)

        descriptor = write_utils.Dimension(name, units, len(values))
        for expected, actual in zip([name, units],
                                    [descriptor.name, descriptor.units]):
            self.assertTrue(np.all([x == y for x, y in zip(expected, actual)]))
        self.assertTrue(np.allclose(values, descriptor.values))
Esempio n. 17
0
    def test_repr(self):
        name = 'Bias'
        quantity = 'generic'
        units = 'V'
        values = np.arange(5, dtype=np.float)

        descriptor = write_utils.Dimension(name, units, len(values))
        print(type(descriptor))
        actual = '{}'.format(descriptor)
        expected = '{}: {} ({}) mode:{} : {}'.format(name, quantity, units, descriptor.mode, values)
        self.assertEqual(actual, expected)
Esempio n. 18
0
    def test_inequality(self):
        name = 'Bias'
        units = 'V'

        self.assertNotEqual(write_utils.Dimension(name, units, [0, 1, 2, 3]),
                            write_utils.Dimension(name, units, [0, 1, 2, 4]))

        self.assertNotEqual(write_utils.Dimension('fdfd', units, [0, 1, 2, 3]),
                            write_utils.Dimension(name, units, [0, 1, 2, 3]))

        self.assertNotEqual(write_utils.Dimension(name, 'fdfd', [0, 1, 2, 3]),
                            write_utils.Dimension(name, units, [0, 1, 2, 3]))

        self.assertNotEqual(
            write_utils.Dimension(name,
                                  units, [0, 1, 2, 3],
                                  mode=write_utils.DimType.DEPENDENT),
            write_utils.Dimension(name,
                                  units, [0, 1, 2, 3],
                                  mode=write_utils.DimType.INCOMPLETE))

        self.assertNotEqual(write_utils.Dimension(name, units, [0, 1, 2]),
                            write_utils.Dimension(name, units, [0, 1, 2, 3]))
Esempio n. 19
0
    def test_not_strings(self):
        translator = ArrayTranslator()
        with self.assertRaises(TypeError):
            delete_existing_file(file_path)
            _ = translator.translate(file_path, 1.2345, np.random.rand(5, 3), 'quant', 'unit',
                                     write_utils.Dimension('Position_Dim', 'au', 5),
                                     write_utils.Dimension('Spec_Dim', 'au', 3))

        with self.assertRaises(TypeError):
            delete_existing_file(file_path)
            _ = translator.translate(file_path, 'Blah', np.random.rand(5, 3), {'quant': 1}, 'unit',
                                     write_utils.Dimension('Position_Dim', 'au', 5),
                                     write_utils.Dimension('Spec_Dim', 'au', 3))

        with self.assertRaises(TypeError):
            delete_existing_file(file_path)
            _ = translator.translate(file_path, 'Blah', np.random.rand(5, 3), 'quant', ['unit'],
                                     write_utils.Dimension('Position_Dim', 'au', 5),
                                     write_utils.Dimension('Spec_Dim', 'au', 3))
Esempio n. 20
0
    def test_existing_both_aux(self):
        file_path = 'test.h5'
        data_utils.delete_existing_file(file_path)
        main_data = np.random.rand(15, 14)
        main_data_name = 'Test_Main'
        quantity = 'Current'
        dset_units = 'nA'

        pos_sizes = [5, 3]
        pos_names = ['X', 'Y']
        pos_units = ['nm', 'um']
        pos_dims = []
        for length, name, units in zip(pos_sizes, pos_names, pos_units):
            pos_dims.append(
                write_utils.Dimension(name, units, np.arange(length)))
        pos_data = np.vstack((np.tile(np.arange(5),
                                      3), np.repeat(np.arange(3), 5))).T

        spec_sizes = [7, 2]
        spec_names = ['Bias', 'Cycle']
        spec_units = ['V', '']
        spec_dims = []
        for length, name, units in zip(spec_sizes, spec_names, spec_units):
            spec_dims.append(
                write_utils.Dimension(name, units, np.arange(length)))
        spec_data = np.vstack((np.tile(np.arange(7),
                                       2), np.repeat(np.arange(2), 7)))

        with h5py.File(file_path) as h5_f:
            h5_spec_inds, h5_spec_vals = hdf_utils.write_ind_val_dsets(
                h5_f, spec_dims, is_spectral=True)
            h5_pos_inds, h5_pos_vals = hdf_utils.write_ind_val_dsets(
                h5_f, pos_dims, is_spectral=False)

            usid_main = hdf_utils.write_main_dataset(h5_f,
                                                     main_data,
                                                     main_data_name,
                                                     quantity,
                                                     dset_units,
                                                     None,
                                                     None,
                                                     h5_spec_inds=h5_spec_inds,
                                                     h5_spec_vals=h5_spec_vals,
                                                     h5_pos_vals=h5_pos_vals,
                                                     h5_pos_inds=h5_pos_inds,
                                                     main_dset_attrs=None)

            data_utils.validate_aux_dset_pair(self,
                                              h5_f,
                                              h5_pos_inds,
                                              h5_pos_vals,
                                              pos_names,
                                              pos_units,
                                              pos_data,
                                              h5_main=usid_main,
                                              is_spectral=False)

            data_utils.validate_aux_dset_pair(self,
                                              h5_f,
                                              h5_spec_inds,
                                              h5_spec_vals,
                                              spec_names,
                                              spec_units,
                                              spec_data,
                                              h5_main=usid_main,
                                              is_spectral=True)
        os.remove(file_path)
Esempio n. 21
0
    def test_empty(self):
        file_path = 'test.h5'
        data_utils.delete_existing_file(file_path)
        main_data = (15, 14)
        main_data_name = 'Test_Main'
        quantity = 'Current'
        dset_units = 'nA'

        pos_sizes = [5, 3]
        pos_names = ['X', 'Y']
        pos_units = ['nm', 'um']

        pos_dims = []
        for length, name, units in zip(pos_sizes, pos_names, pos_units):
            pos_dims.append(
                write_utils.Dimension(name, units, np.arange(length)))
        pos_data = np.vstack((np.tile(np.arange(5),
                                      3), np.repeat(np.arange(3), 5))).T

        spec_sizes = [7, 2]
        spec_names = ['Bias', 'Cycle']
        spec_units = ['V', '']
        spec_dims = []
        for length, name, units in zip(spec_sizes, spec_names, spec_units):
            spec_dims.append(
                write_utils.Dimension(name, units, np.arange(length)))
        spec_data = np.vstack((np.tile(np.arange(7),
                                       2), np.repeat(np.arange(2), 7)))

        with h5py.File(file_path) as h5_f:
            usid_main = hdf_utils.write_main_dataset(h5_f,
                                                     main_data,
                                                     main_data_name,
                                                     quantity,
                                                     dset_units,
                                                     pos_dims,
                                                     spec_dims,
                                                     dtype=np.float16,
                                                     main_dset_attrs=None)
            self.assertIsInstance(usid_main, USIDataset)
            self.assertEqual(usid_main.name.split('/')[-1], main_data_name)
            self.assertEqual(usid_main.parent, h5_f)
            self.assertEqual(main_data, usid_main.shape)

            data_utils.validate_aux_dset_pair(self,
                                              h5_f,
                                              usid_main.h5_pos_inds,
                                              usid_main.h5_pos_vals,
                                              pos_names,
                                              pos_units,
                                              pos_data,
                                              h5_main=usid_main,
                                              is_spectral=False)

            data_utils.validate_aux_dset_pair(self,
                                              h5_f,
                                              usid_main.h5_spec_inds,
                                              usid_main.h5_spec_vals,
                                              spec_names,
                                              spec_units,
                                              spec_data,
                                              h5_main=usid_main,
                                              is_spectral=True)
        os.remove(file_path)
Esempio n. 22
0
 def test_default_mode(self):
     dim = write_utils.Dimension('Name', 'units', 1)
     self.assertEqual(dim.mode, write_utils.DimType.DEFAULT)
Esempio n. 23
0
 def test_invalid_mode(self):
     with self.assertRaises(TypeError):
         _ = write_utils.Dimension('Name', 'units', 5, mode='Incomplete')
Esempio n. 24
0
    def test_legal_translation(self):
        data_name = 'TestDataType'
        attrs = {
            'att_1': 'string_val',
            'att_2': 1.2345,
            'att_3': [1, 2, 3, 4],
            'att_4': ['str_1', 'str_2', 'str_3']
        }

        extra_dsets = {'dset_1': np.random.rand(5), 'dset_2': np.arange(25)}

        file_path = 'test_numpy_translator.h5'
        self.__delete_existing_file(file_path)
        main_data = np.random.rand(15, 14)
        main_data_name = 'Test_Main'
        quantity = 'Current'
        units = 'nA'

        pos_sizes = [5, 3]
        pos_names = ['X', 'Y']
        pos_units = ['nm', 'um']
        pos_dims = []
        for name, unit, length in zip(pos_names, pos_units, pos_sizes):
            pos_dims.append(
                write_utils.Dimension(name, unit, np.arange(length)))
        pos_data = np.vstack((np.tile(np.arange(5),
                                      3), np.repeat(np.arange(3), 5))).T

        spec_sizes = [7, 2]
        spec_names = ['Bias', 'Cycle']
        spec_units = ['V', '']
        spec_dims = []
        for name, unit, length in zip(spec_names, spec_units, spec_sizes):
            spec_dims.append(
                write_utils.Dimension(name, unit, np.arange(length)))

        spec_data = np.vstack((np.tile(np.arange(7),
                                       2), np.repeat(np.arange(2), 7)))

        translator = NumpyTranslator()
        _ = translator.translate(file_path,
                                 data_name,
                                 main_data,
                                 quantity,
                                 units,
                                 pos_dims,
                                 spec_dims,
                                 parm_dict=attrs,
                                 extra_dsets=extra_dsets)

        with h5py.File(file_path, mode='r') as h5_f:
            # we are not interested in most of the attributes under root besides two:
            self.assertEqual(data_name, hdf_utils.get_attr(h5_f, 'data_type'))
            self.assertEqual('NumpyTranslator',
                             hdf_utils.get_attr(h5_f, 'translator'))

            # First level should have absolutely nothing besides one group
            self.assertEqual(len(h5_f.items()), 1)
            self.assertTrue('Measurement_000' in h5_f.keys())
            h5_meas_grp = h5_f['Measurement_000']
            self.assertIsInstance(h5_meas_grp, h5py.Group)

            # check the attributes under this group
            self.assertEqual(len(h5_meas_grp.attrs), len(attrs))
            for key, expected_val in attrs.items():
                self.assertTrue(
                    np.all(
                        hdf_utils.get_attr(h5_meas_grp, key) == expected_val))

            # Again, this group should only have one group - Channel_000
            self.assertEqual(len(h5_meas_grp.items()), 1)
            self.assertTrue('Channel_000' in h5_meas_grp.keys())
            h5_chan_grp = h5_meas_grp['Channel_000']
            self.assertIsInstance(h5_chan_grp, h5py.Group)

            # This channel group is not expected to have any attributes but it will contain the main dataset
            self.assertEqual(len(h5_chan_grp.items()), 5 + len(extra_dsets))
            for dset_name in [
                    'Raw_Data', 'Position_Indices', 'Position_Values',
                    'Spectroscopic_Indices', 'Spectroscopic_Values'
            ]:
                self.assertTrue(dset_name in h5_chan_grp.keys())
                h5_dset = h5_chan_grp[dset_name]
                self.assertIsInstance(h5_dset, h5py.Dataset)

            pycro_main = USIDataset(h5_chan_grp['Raw_Data'])

            self.assertIsInstance(pycro_main, USIDataset)
            self.assertEqual(pycro_main.name.split('/')[-1], 'Raw_Data')
            self.assertEqual(pycro_main.parent, h5_chan_grp)
            self.assertTrue(np.allclose(main_data, pycro_main[()]))

            self.__validate_aux_dset_pair(h5_chan_grp,
                                          pycro_main.h5_pos_inds,
                                          pycro_main.h5_pos_vals,
                                          pos_names,
                                          pos_units,
                                          pos_data,
                                          h5_main=pycro_main,
                                          is_spectral=False)

            self.__validate_aux_dset_pair(h5_chan_grp,
                                          pycro_main.h5_spec_inds,
                                          pycro_main.h5_spec_vals,
                                          spec_names,
                                          spec_units,
                                          spec_data,
                                          h5_main=pycro_main,
                                          is_spectral=True)

            # Now validate each of the extra datasets:
            for key, val in extra_dsets.items():
                self.assertTrue(key in h5_chan_grp.keys())
                h5_dset = h5_chan_grp[key]
                self.assertIsInstance(h5_dset, h5py.Dataset)
                self.assertTrue(np.allclose(val, h5_dset[()]))

        os.remove(file_path)
Esempio n. 25
0
    def base_translation_tester(self,
                                main_dset_as_dask=False,
                                extra_dsets_type='numpy',
                                use_parm_dict=True):
        data_name = 'My_Awesome_Measurement'

        if use_parm_dict:
            attrs = {
                'att_1': 'string_val',
                'att_2': 1.2345,
                'att_3': [1, 2, 3, 4],
                'att_4': ['str_1', 'str_2', 'str_3']
            }
        else:
            attrs = None

        extra_dsets = {}
        if extra_dsets_type is not None:
            ref_dsets = {'dset_1': np.random.rand(5), 'dset_2': np.arange(25)}
            if extra_dsets_type == 'numpy':
                extra_dsets = ref_dsets
            elif extra_dsets_type == 'dask':
                for key, val in ref_dsets.items():
                    extra_dsets.update(
                        {key: da.from_array(val, chunks=val.shape)})
            else:
                extra_dsets_type = None

        delete_existing_file(file_path)

        main_data = np.random.rand(15, 14)
        if main_dset_as_dask:
            main_data = da.from_array(main_data, chunks=main_data.shape)
        quantity = 'Current'
        units = 'nA'

        pos_sizes = [5, 3]
        pos_names = ['X', 'Y']
        pos_units = ['nm', 'um']
        pos_dims = []
        for name, unit, length in zip(pos_names, pos_units, pos_sizes):
            pos_dims.append(
                write_utils.Dimension(name, unit, np.arange(length)))
        pos_data = np.vstack((np.tile(np.arange(5),
                                      3), np.repeat(np.arange(3), 5))).T

        spec_sizes = [7, 2]
        spec_names = ['Bias', 'Cycle']
        spec_units = ['V', '']
        spec_dims = []
        for name, unit, length in zip(spec_names, spec_units, spec_sizes):
            spec_dims.append(
                write_utils.Dimension(name, unit, np.arange(length)))

        spec_data = np.vstack((np.tile(np.arange(7),
                                       2), np.repeat(np.arange(2), 7)))

        translator = ArrayTranslator()
        _ = translator.translate(file_path,
                                 data_name,
                                 main_data,
                                 quantity,
                                 units,
                                 pos_dims,
                                 spec_dims,
                                 parm_dict=attrs,
                                 extra_dsets=extra_dsets)

        with h5py.File(file_path, mode='r') as h5_f:
            # we are not interested in most of the attributes under root besides two:
            self.assertEqual(data_name, hdf_utils.get_attr(h5_f, 'data_type'))
            # self.assertEqual('NumpyTranslator', hdf_utils.get_attr(h5_f, 'translator'))

            # First level should have absolutely nothing besides one group
            self.assertEqual(len(h5_f.items()), 1)
            self.assertTrue('Measurement_000' in h5_f.keys())
            h5_meas_grp = h5_f['Measurement_000']
            self.assertIsInstance(h5_meas_grp, h5py.Group)

            # check the attributes under this group
            # self.assertEqual(len(h5_meas_grp.attrs), len(attrs))
            if use_parm_dict:
                for key, expected_val in attrs.items():
                    self.assertTrue(
                        np.all(
                            hdf_utils.get_attr(h5_meas_grp, key) ==
                            expected_val))

            # Again, this group should only have one group - Channel_000
            self.assertEqual(len(h5_meas_grp.items()), 1)
            self.assertTrue('Channel_000' in h5_meas_grp.keys())
            h5_chan_grp = h5_meas_grp['Channel_000']
            self.assertIsInstance(h5_chan_grp, h5py.Group)

            # This channel group is not expected to have any (custom) attributes but it will contain the main dataset
            self.assertEqual(len(h5_chan_grp.items()), 5 + len(extra_dsets))
            for dset_name in [
                    'Raw_Data', 'Position_Indices', 'Position_Values',
                    'Spectroscopic_Indices', 'Spectroscopic_Values'
            ]:
                self.assertTrue(dset_name in h5_chan_grp.keys())
                h5_dset = h5_chan_grp[dset_name]
                self.assertIsInstance(h5_dset, h5py.Dataset)

            usid_main = USIDataset(h5_chan_grp['Raw_Data'])

            self.assertIsInstance(usid_main, USIDataset)
            self.assertEqual(usid_main.name.split('/')[-1], 'Raw_Data')
            self.assertEqual(usid_main.parent, h5_chan_grp)
            self.assertTrue(np.allclose(main_data, usid_main[()]))

            validate_aux_dset_pair(self,
                                   h5_chan_grp,
                                   usid_main.h5_pos_inds,
                                   usid_main.h5_pos_vals,
                                   pos_names,
                                   pos_units,
                                   pos_data,
                                   h5_main=usid_main,
                                   is_spectral=False)

            validate_aux_dset_pair(self,
                                   h5_chan_grp,
                                   usid_main.h5_spec_inds,
                                   usid_main.h5_spec_vals,
                                   spec_names,
                                   spec_units,
                                   spec_data,
                                   h5_main=usid_main,
                                   is_spectral=True)

            # Now validate each of the extra datasets:
            if extra_dsets_type is not None:
                for key, val in extra_dsets.items():
                    self.assertTrue(key in h5_chan_grp.keys())
                    h5_dset = h5_chan_grp[key]
                    self.assertIsInstance(h5_dset, h5py.Dataset)
                    if extra_dsets_type == 'dask':
                        val = val.compute()
                    self.assertTrue(np.allclose(val, h5_dset[()]))

        os.remove(file_path)