コード例 #1
0
ファイル: test_nsdfwriter.py プロジェクト: neurord/nsdf
 def test_append_data(self):
     """Try appending data to existing NaN-padded nonuniform dataset"""
     # start over for appending data
     writer = nsdf.NSDFWriter(self.filepath, mode='a',
                              dialect=nsdf.dialect.NANPADDED)
     source_ds = writer.mapping[nsdf.NONUNIFORM][self.popname]
     self.assertTrue(nsdf.match_datasets(self.sources, source_ds))
     rate = 100.0
     new_dlen = np.random.poisson(lam=rate, size=len(self.sources))
     for ii, uid in enumerate(self.sources):
         data = np.cumsum(np.random.exponential(scale=1.0/rate,
                                                size=new_dlen[ii]))
         time = np.random.uniform(0, 1, size=new_dlen[ii])
         self.data_object.put_data(uid, (data, time))
     writer.add_nonuniform_nan(source_ds, self.data_object)
     del writer
     with h5.File(self.filepath, 'r') as fd:
         data_path = '/data/{}/{}/{}'.format(nsdf.NONUNIFORM,
                                             self.popname,
                                             self.data_object.name)
         dataset = fd[data_path]
         time_ds = dataset.dims[1]['time']
         for ii, uid in enumerate(self.sources):
             orig_data, orig_time = self.data_object.get_data(uid)
             file_data = dataset[ii,
                                 self.dlen[ii]:self.dlen[ii]+new_dlen[ii]]
             nptest.assert_allclose(orig_data, file_data)
             nptest.assert_allclose(dataset[self.dlen[ii] +
                                            new_dlen[ii]:], np.nan)
             file_time = time_ds[ii,
                                 self.dlen[ii]:self.dlen[ii]+new_dlen[ii]]
             nptest.assert_allclose(orig_time, file_time)
             nptest.assert_allclose(time_ds[ii, self.dlen[ii] +
                                            new_dlen[ii]:], np.nan)                
     os.remove(self.filepath)
コード例 #2
0
ファイル: test_nsdfwriter.py プロジェクト: nsdf/nsdf
 def test_append_data(self):
     """Try appending data to existing NaN-padded nonuniform dataset"""
     # start over for appending data
     writer = nsdf.NSDFWriter(self.filepath,
                              mode='a',
                              dialect=nsdf.dialect.NANPADDED)
     source_ds = writer.mapping[nsdf.NONUNIFORM][self.popname]
     self.assertTrue(nsdf.match_datasets(self.sources, source_ds))
     rate = 100.0
     new_dlen = np.random.poisson(lam=rate, size=len(self.sources))
     for ii, uid in enumerate(self.sources):
         data = np.cumsum(
             np.random.exponential(scale=1.0 / rate, size=new_dlen[ii]))
         time = np.random.uniform(0, 1, size=new_dlen[ii])
         self.data_object.put_data(uid, (data, time))
     writer.add_nonuniform_nan(source_ds, self.data_object)
     del writer
     with h5.File(self.filepath, 'r') as fd:
         data_path = '/data/{}/{}/{}'.format(nsdf.NONUNIFORM, self.popname,
                                             self.data_object.name)
         dataset = fd[data_path]
         time_ds = dataset.dims[1]['time']
         for ii, uid in enumerate(self.sources):
             orig_data, orig_time = self.data_object.get_data(uid)
             file_data = dataset[ii,
                                 self.dlen[ii]:self.dlen[ii] + new_dlen[ii]]
             nptest.assert_allclose(orig_data, file_data)
             nptest.assert_allclose(dataset[self.dlen[ii] + new_dlen[ii]:],
                                    np.nan)
             file_time = time_ds[ii,
                                 self.dlen[ii]:self.dlen[ii] + new_dlen[ii]]
             nptest.assert_allclose(orig_time, file_time)
             nptest.assert_allclose(
                 time_ds[ii, self.dlen[ii] + new_dlen[ii]:], np.nan)
     os.remove(self.filepath)
コード例 #3
0
ファイル: test_nsdfwriter.py プロジェクト: nsdf/nsdf
 def test_source_ds(self):
     with h5.File(self.filepath, 'r') as fd:
         source_ds_name = '/map/{}/{}'.format(nsdf.NONUNIFORM, self.popname)
         source_ds = fd[source_ds_name]
         self.assertTrue(
             nsdf.match_datasets(source_ds, self.data_object.get_sources()))
     os.remove(self.filepath)
コード例 #4
0
ファイル: test_nsdfwriter.py プロジェクト: neurord/nsdf
 def test_source_ds(self):
     with h5.File(self.filepath, 'r') as fd:
         source_ds_name = '/map/{}/{}'.format(nsdf.NONUNIFORM,
                                              self.popname)
         source_ds = fd[source_ds_name]            
         self.assertTrue(nsdf.match_datasets(source_ds,
                                             self.data_object.get_sources()))
     os.remove(self.filepath)
コード例 #5
0
ファイル: test_nsdfwriter.py プロジェクト: nsdf/nsdf
 def test_source_ds(self):
     with h5.File(self.filepath, 'r') as fd:
         try:
             source_ds_path = 'map/{}/{}'.format(nsdf.EVENT, self.popname)
             source_ds = fd[source_ds_path]
         except KeyError:
             self.fail('{} does not exist after'
                       ' adding event data sources'.source_ds_path)
         self.assertTrue(nsdf.match_datasets(source_ds, self.sources))
     os.remove(self.filepath)
コード例 #6
0
ファイル: test_nsdfwriter.py プロジェクト: neurord/nsdf
 def test_source_ds(self):
     with h5.File(self.filepath, 'r') as fd:
         try:
             source_ds_path = 'map/{}/{}'.format(nsdf.EVENT,
                                                      self.popname)
             source_ds = fd[source_ds_path]
         except KeyError:
             self.fail('{} does not exist after'
                       ' adding event data sources'.source_ds_path)
         self.assertTrue(nsdf.match_datasets(source_ds,
                                             self.sources))
     os.remove(self.filepath)                                       
コード例 #7
0
ファイル: test_nsdfwriter.py プロジェクト: neurord/nsdf
    def test_source_ds(self):
        """Add the soma (gc_0) all the granule cells in olfactory bulb model
        as data sources for nonuniformly sampled data.

        """
        with h5.File(self.filepath, 'r') as fd:
            try:
                source_ds_path = '/map/{}/{}/{}'.format(
                    nsdf.NONUNIFORM,
                    self.popname, self.data_object.name)
                source_ds = fd[source_ds_path]
            except KeyError:
                self.fail('{} not created.'.format(source_ds_path))
            self.assertTrue(nsdf.match_datasets(source_ds['source'],
                                                self.data_object.get_sources()))
        os.remove(self.filepath)
コード例 #8
0
ファイル: test_nsdfwriter.py プロジェクト: nsdf/nsdf
    def test_source_ds(self):
        """Add the soma (gc_0) all the granule cells in olfactory bulb model
        as data sources for nonuniformly sampled data.

        """
        with h5.File(self.filepath, 'r') as fd:
            try:
                source_ds_path = '/map/{}/{}/{}'.format(
                    nsdf.NONUNIFORM, self.popname, self.data_object.name)
                source_ds = fd[source_ds_path]
            except KeyError:
                self.fail('{} not created.'.format(source_ds_path))
            self.assertTrue(
                nsdf.match_datasets(source_ds['source'],
                                    self.data_object.get_sources()))
        os.remove(self.filepath)
コード例 #9
0
ファイル: test_nsdfwriter.py プロジェクト: neurord/nsdf
    def test_add_uniform_ds(self):
        """Add the soma (gc_0) all the granule cells in olfactory bulb model
        as data sources for uniformly sampled data.

        """
        with h5.File(self.filepath, 'r') as fd:
            try:
                uniform_group = fd['map']['uniform']
            except KeyError:
                self.fail('/map/uniform group does not exist after'
                          ' adding uniform data sources')
            try:
                uniform_ds = fd['/map/uniform/pop0']
            except KeyError:
                self.fail('pop0 not created.')
            self.assertTrue(nsdf.match_datasets(uniform_ds, self.granule_somata))
        os.remove(self.filepath)
コード例 #10
0
ファイル: test_nsdfwriter.py プロジェクト: nsdf/nsdf
    def test_add_uniform_ds(self):
        """Add the soma (gc_0) all the granule cells in olfactory bulb model
        as data sources for uniformly sampled data.

        """
        with h5.File(self.filepath, 'r') as fd:
            try:
                uniform_group = fd['map']['uniform']
            except KeyError:
                self.fail('/map/uniform group does not exist after'
                          ' adding uniform data sources')
            try:
                uniform_ds = fd['/map/uniform/pop0']
            except KeyError:
                self.fail('pop0 not created.')
            self.assertTrue(
                nsdf.match_datasets(uniform_ds, self.granule_somata))
        os.remove(self.filepath)