Example #1
0
    def test_write_block(self):
        """ Test that writing to a user block does not destroy the file """
        name = self.mktemp()

        f = File(name, 'w', userblock_size=512)
        f.create_group("Foobar")
        f.close()

        pyfile = open(name, 'r+b')
        try:
            pyfile.write(b'X'*512)
        finally:
            pyfile.close()

        f = h5py.File(name, 'r')
        try:
            assert "Foobar" in f
        finally:
            f.close()

        pyfile = open(name, 'rb')
        try:
            self.assertEqual(pyfile.read(512), b'X'*512)
        finally:
            pyfile.close()
class BaseDataset(TestCase):

    """
    data is a 3-dimensional dataset with dimensions [z, y, x]

    The z dimension is labeled. It does not have any attached scales.
    The y dimension is not labeled. It has one attached scale.
    The x dimension is labeled. It has two attached scales.

    data2 is a 3-dimensional dataset with no associated dimension scales.
    """

    def setUp(self):
        self.f = File(self.mktemp(), 'w')
        self.f['data'] = np.ones((4, 3, 2), 'f')
        self.f['data2'] = np.ones((4, 3, 2), 'f')
        self.f['x1'] = np.ones((2), 'f')
        h5py.h5ds.set_scale(self.f['x1'].id)
        h5py.h5ds.attach_scale(self.f['data'].id, self.f['x1'].id, 2)
        self.f['x2'] = np.ones((2), 'f')
        h5py.h5ds.set_scale(self.f['x2'].id, b'x2 name')
        h5py.h5ds.attach_scale(self.f['data'].id, self.f['x2'].id, 2)
        self.f['y1'] = np.ones((3), 'f')
        h5py.h5ds.set_scale(self.f['y1'].id, b'y1 name')
        h5py.h5ds.attach_scale(self.f['data'].id, self.f['y1'].id, 1)
        self.f['z1'] = np.ones((4), 'f')

        h5py.h5ds.set_label(self.f['data'].id, 0, b'z')
        h5py.h5ds.set_label(self.f['data'].id, 2, b'x')

    def tearDown(self):
        if self.f:
            self.f.close()
Example #3
0
 def test_blocksize(self):
     """ Core driver supports variable block size """
     fname = self.mktemp()
     fid = File(fname, 'w', driver='core', block_size=1024,
                backing_store=False)
     self.assertTrue(fid)
     fid.close()
Example #4
0
    def test_issue_212(self):
        """ Issue 212

        Fails with:

        AttributeError: 'SharedConfig' object has no attribute 'lapl'
        """
        def closer(x):
            def w():
                try:
                    if x:
                        x.close()
                except IOError:
                    pass
            return w
        orig_name = self.mktemp()
        new_name = self.mktemp()
        f = File(orig_name, 'w')
        self.addCleanup(closer(f))
        f.create_group('a')
        f.close()

        g = File(new_name, 'w')
        self.addCleanup(closer(g))
        g['link'] = ExternalLink(orig_name, '/')  # note root group
        g.close()

        h = File(new_name, 'r')
        self.addCleanup(closer(h))
        self.assertIsInstance(h['link']['a'], Group)
Example #5
0
class BasH5(object):
    """
    This class is deprecated.

    .. deprecated:: 0.3.0
        Use `BasH5Reader` instead.

    """
    def __init__(self, filename, readType='Raw'):
        self._h5f = File(filename, 'r')
        self.rgnTable = RegionTable(self._h5f)
        self.baseCallsDG = None        
        if readType == 'Raw':
            self.baseCallsDG = BaseCallsDataGroup(self._h5f, '/PulseData/BaseCalls')
        elif readType == 'CCS':
            self.baseCallsDG = CCSBaseCallsDataGroup(self._h5f, '/PulseData/ConsensusBaseCalls') 
            self.rbaseCallsDG = BaseCallsDataGroup(self._h5f, '/PulseData/BaseCalls')


    def __del__(self):
        self._h5f.close()
                
    def getZMWs(self):
        for hn in self.baseCallsDG.holeNumber:
            yield hn            

    def getSequencingZMWs(self):
        for hn in self.getZMWs():
            if self.baseCallsDG.getStatusStringForZMW(hn) == 'SEQUENCING' and self.baseCallsDG.getBaseCallLenForZMW(hn):
                yield hn
Example #6
0
def test_hdf5_writer_data(tmpdir):

    filename = tmpdir.join('test1.hdf5').strpath

    data = Data(x=np.arange(6).reshape(2, 3),
                y=(np.arange(6) * 2).reshape(2, 3))

    hdf5_writer(filename, data)

    from h5py import File

    f = File(filename)
    assert len(f) == 2
    np.testing.assert_equal(f['x'].value, data['x'])
    np.testing.assert_equal(f['y'].value, data['y'])
    f.close()

    # Only write out some components

    filename = tmpdir.join('test2.hdf5').strpath

    hdf5_writer(filename, data, components=[data.id['x']])

    f = File(filename)
    assert len(f) == 1
    np.testing.assert_equal(f['x'].value, data['x'])
    f.close()
Example #7
0
    def __init__(self, fname):
        from h5py import File
        from numpy import array, log10
        import json

        h5f = File(fname, "r")
        log = sorted(json.loads(h5f["measure"].value).values(),
                 key=lambda e: e["Status"]["Iteration"])
        runargs = json.loads(h5f["runargs"].value)
        self.N       = runargs["N"]
        self.zeta    = runargs.get("zeta", 1.0)
        self.time    = array([entry["Status" ]["CurrentTime"] for entry in log])
        self.mean_T  = array([entry["mean_T" ] for entry in log])
        self.max_T   = array([entry["max_T"  ] for entry in log])
        self.mean_Ms = array([entry["mean_Ms"] for entry in log])
        self.max_Ms  = array([entry["max_Ms" ] for entry in log])
        self.mean_Ma = array([entry["mean_Ma"] for entry in log])
        self.min_Ma  = array([entry["min_Ma" ] for entry in log])
        self.kin     = array([entry["energies"]["kinetic" ] for entry in log])
        self.tie     = array([entry["energies"]["internal"] for entry in log])
        self.mag     = array([entry["energies"]["magnetic"] for entry in log])
        self.tot     = array([entry["energies"]["total"   ] for entry in log])
        self.mean_gamma = array([entry["mean_velocity"][0] for entry in log])
        self.max_gamma = array([entry["max_lorentz_factor"] for entry in log])
        self.runargs = runargs
        h5f.close()
Example #8
0
def orderByQmodulus(filename,outfile=None):
  """ Sassena does not enforce any ordering of the structure factors.
  Here we order by increasing value of modulus of Q-vectors. """
  from h5py import File
  import numpy
  f=File(filename,'r')
  overwrite=False
  if not outfile:
    outfile=tempfile() # temporaty output file
    overwrite=True
  g=File(outfile,'w')
  ds_q = numpy.array(f["qvectors"]) # shape==(nvectors,3)
  moduli=numpy.square(ds_q).sum(axis=1) # moduli-squared of the Q-vectors
  rank=numpy.argsort(moduli) # rank from smallest to greatest
  for dset in ('qvectors', 'fqt', 'fq', 'fq0', 'fq2'):
    if dset in f.keys(): 
      x=numpy.array(f[dset])
      if not outfile:
        del f[dset]
        f[dset]=x[rank]
      else:
        g[dset]=x[rank]
  for key,val in f.attrs.items(): g.attrs[key]=val
  g.close()
  f.close()
  if overwrite:
    os.system('/bin/mv %s %s'%(outfile,filename))
  return None
Example #9
0
class TestSimpleSlicing(TestCase):

    """
        Feature: Simple NumPy-style slices (start:stop:step) are supported.
    """

    def setUp(self):
        self.f = File(self.mktemp(), 'w')
        self.arr = np.arange(10)
        self.dset = self.f.create_dataset('x', data=self.arr)

    def tearDown(self):
        if self.f:
            self.f.close()

    def test_negative_stop(self):
        """ Negative stop indexes work as they do in NumPy """
        self.assertArrayEqual(self.dset[2:-2], self.arr[2:-2])

    def test_write(self):
        """Assigning to a 1D slice of a 2D dataset
        """
        dset = self.f.create_dataset('x2', (10, 2))

        x = np.zeros((10, 1))
        dset[:, 0] = x[:, 0]
        with self.assertRaises(TypeError):
            dset[:, 1] = x
 def testRetrieveData(self):
     try:
         from h5py import File
     except ImportError:
         print('module h5py not found')
         return
     device_database = PropertyTree()
     device_database.put_string('type', 'SeriesRC')
     device_database.put_double('series_resistance', R)
     device_database.put_double('capacitance', C)
     device = EnergyStorageDevice(device_database, MPI.COMM_WORLD)
     eis_database  = setup_expertiment()
     eis_database.put_int('steps_per_decade', 1)
     eis_database.put_int('steps_per_cycle', 64)
     eis_database.put_int('cycles', 2)
     eis_database.put_int('ignore_cycles', 1)
     fout = File('trash.hdf5', 'w')
     spectrum_data = measure_impedance_spectrum(device, eis_database, fout)
     fout.close()
     fin = File('trash.hdf5', 'r')
     retrieved_data = retrieve_impedance_spectrum(fin)
     fin.close()
     print(spectrum_data['impedance']-retrieved_data['impedance'])
     print(retrieved_data)
     self.assertEqual(linalg.norm(spectrum_data['frequency'] -
                                  retrieved_data['frequency'], inf), 0.0)
     # not sure why we don't get equality for the impedance
     self.assertLess(linalg.norm(spectrum_data['impedance'] -
                                 retrieved_data['impedance'], inf), 1e-10)
Example #11
0
def dump( histogram, filename = None, pathinfile = '/', 
          mode = 'c', fs = None, compression = 'lzf'):
    '''dump( histogram, hdf_filename, path_in_hdf_file, mode ) -> save histogram into a hdf file.

    histogram:
      The histogram to be written
    hdf_filename:
      The hdf filename in which the histogram will be saved
    path_in_hdf_file:
      The path inside the hdf file where the histogram is located.
    mode:
      The mode to be used to write to the hdf file.
      'c': create new hdf file. If hdf file of the same name exists, this command will fail.
      'w': write to existing hdf file. If the path_in_hdf_file already exists in the hdf file, this command will fail.
    compression:
      The compression ratio. If it is 0, no compression will be done.
      The valid values are integers from 0 to 9 (inclusive).
    '''
    from Renderer import Renderer
    #g = graphFromHDF5File( filename, pathinfile, fs = fs )
    pathinfile = pathinfile.split( '/' )
    p = pathinfile + [histogram.name()]
    p = '/'.join( p )
    if not p.startswith('/'): 
        p = '/' + p
    writeCodes = {'c':'w','w':'a'}
    if fs is None:
        from h5py import File
        fs = File(filename, writeCodes[mode])
        Renderer(fs, compression).render(histogram)
        fs.close()
    else:
        Renderer(fs, compression).render(histogram)
Example #12
0
class BaseDataset(TestCase):
    def setUp(self):
        self.f = File(self.mktemp(), 'w')

    def tearDown(self):
        if self.f:
            self.f.close()
Example #13
0
 def test_iter_zero(self):
     """ Iteration works properly for the case with no group members """
     hfile = File(self.mktemp(), 'w')
     try:
         lst = [x for x in hfile]
         self.assertEqual(lst, [])
     finally:
         hfile.close()
Example #14
0
 def test_core(self):
     """ Core driver is supported (no backing store) """
     fname = self.mktemp()
     fid = File(fname, 'w', driver='core', backing_store=False)
     self.assertTrue(fid)
     self.assertEqual(fid.driver, 'core')
     fid.close()
     self.assertFalse(os.path.exists(fname))
Example #15
0
 def test_create_exclusive(self):
     """ Mode 'w-' opens file in exclusive mode """
     fname = self.mktemp()
     fid = File(fname, 'w-')
     self.assertTrue(fid)
     fid.close()
     with self.assertRaises(IOError):
         File(fname, 'w-')
Example #16
0
 def test_mode(self):
     """ Retrieved File objects have a meaningful mode attribute """
     hfile = File(self.mktemp(),'w')
     try:
         grp = hfile.create_group('foo')
         self.assertEqual(grp.file.mode, hfile.mode)
     finally:
         hfile.close()
Example #17
0
def hasVersion(filename):
  """Check filename as sassena version"""
  from h5py import File
  f = File(filename,'r')
  value=False
  if 'sassena_version' in f.attrs.keys(): value=True
  f.close()
  return value
Example #18
0
def ReadFiniteRadiusWaveform(n, filename, WaveformName, ChMass, InitialAdmEnergy, YLMRegex, LModes, DataType, Ws) :
    """
    This is just a worker function defined for ReadFiniteRadiusData,
    below, reading a single waveform from an h5 file of many
    waveforms.  You probably don't need to call this directly.

    """
    from scipy.integrate import cumtrapz as integrate
    from numpy import setdiff1d, empty, delete, sqrt, log, array
    from h5py import File
    import GWFrames
    try :
        f = File(filename, 'r')
    except IOError :
        print("ReadFiniteRadiusWaveform could not open the file '{0}'".format(filename))
        raise
    try :
        W = f[WaveformName]
        NTimes_Input = W['AverageLapse.dat'].shape[0]
        T = W['AverageLapse.dat'][:,0]
        Indices = MonotonicIndices(T)
        T = T[Indices]
        Radii = array(W['ArealRadius.dat'])[Indices,1]
        AverageLapse = array(W['AverageLapse.dat'])[Indices,1]
        CoordRadius = W['CoordRadius.dat'][0,1]
        YLMdata = [DataSet for DataSet in list(W) for m in [YLMRegex.search(DataSet)] if (m and int(m.group('L')) in LModes)]
        YLMdata = sorted(YLMdata, key=lambda DataSet : [int(YLMRegex.search(DataSet).group('L')), int(YLMRegex.search(DataSet).group('M'))])
        LM = sorted([[int(m.group('L')), int(m.group('M'))] for DataSet in YLMdata for m in [YLMRegex.search(DataSet)] if m])
        NModes = len(LM)
        # Lapse is given by 1/sqrt(-g^{00}), where g is the full 4-metric
        T[1:] = integrate(AverageLapse/sqrt(((-2.0*InitialAdmEnergy)/Radii) + 1.0), T) + T[0]
        T -= (Radii + (2.0*InitialAdmEnergy)*log((Radii/(2.0*InitialAdmEnergy))-1.0))
        Ws[n].SetTime(T/ChMass)
        # WRONG!!!: # Radii /= ChMass
        NTimes = Ws[n].NTimes()
        # Ws[n].SetFrame is not done, because we assume the inertial frame
        Ws[n].SetFrameType(GWFrames.Inertial) # Assumption! (but this should be safe)
        Ws[n].SetDataType(DataType)
        Ws[n].SetRIsScaledOut(True) # Assumption! (but it should be safe)
        Ws[n].SetMIsScaledOut(True) # We have made this true
        Ws[n].SetLM(LM)
        Data = empty((NModes, NTimes), dtype='complex')
        if(DataType == GWFrames.h) :
            UnitScaleFactor = 1.0 / ChMass
        elif(DataType == GWFrames.hdot) :
            UnitScaleFactor = 1.0
        elif(DataType == GWFrames.Psi4) :
            UnitScaleFactor = ChMass
        else :
            raise ValueError('DataType "{0}" is unknown.'.format(DataType))
        RadiusRatio = Radii / CoordRadius
        for m,DataSet in enumerate(YLMdata) :
            modedata = array(W[DataSet])
            Data[m,:] = (modedata[Indices,1] + 1j*modedata[Indices,2]) * RadiusRatio * UnitScaleFactor
        Ws[n].SetData(Data)
    finally :
        f.close()
    return Radii/ChMass
Example #19
0
 def test_filename(self):
     """ .filename behaves properly for string data """
     fname = self.mktemp()
     fid = File(fname, 'w')
     try:
         self.assertEqual(fid.filename, fname)
         self.assertIsInstance(fid.filename, six.text_type)
     finally:
         fid.close()
Example #20
0
 def test_property(self):
     """ File object can be retrieved from subgroup """
     fname = self.mktemp()
     hfile = File(fname, 'w')
     try:
         hfile2 = hfile['/'].file
         self.assertEqual(hfile, hfile2)
     finally:
         hfile.close()
Example #21
0
    def test_close_multiple_mpio_driver(self):
        """ MPIO driver and options """
        from mpi4py import MPI

        fname = self.mktemp()
        f = File(fname, 'w', driver='mpio', comm=MPI.COMM_WORLD)
        f.create_group("test")
        f.close()
        f.close()
Example #22
0
 def test_backing(self):
     """ Core driver saves to file when backing store used """
     fname = self.mktemp()
     fid = File(fname, 'w', driver='core', backing_store=True)
     fid.create_group('foo')
     fid.close()
     fid = File(fname, 'r')
     assert 'foo' in fid
     fid.close()
Example #23
0
    def h5_writer(data, h5_path):
        from h5py import File
        from os.path import exists

        if exists(h5_path):
            remove(h5_path)

        f = File(h5_path, 'w')
        f.create_dataset('default', data=data, compression='gzip', chunks=True, shuffle=True)
        f.close()
Example #24
0
 def test_create(self):
     """ Mode 'w' opens file in overwrite mode """
     fname = self.mktemp()
     fid = File(fname, 'w')
     self.assertTrue(fid)
     fid.create_group('foo')
     fid.close()
     fid = File(fname, 'w')
     self.assertNotIn('foo', fid)
     fid.close()
def weightsToHDF(w, name):
    f=File(name+".h5","w")
    
    weights=f.create_group("Weights")
    
    for i in range(len(w[:-1])):
        weights.create_dataset("Hidden "+str(i+1),data=w[i])
    weights.create_dataset("Output",data=w[-1])
    
    f.close()
Example #26
0
 def test_unicode(self):
     """ Unicode filenames can be used, and retrieved properly via .filename
     """
     fname = self.mktemp(prefix = six.unichr(0x201a))
     fid = File(fname, 'w')
     try:
         self.assertEqual(fid.filename, fname)
         self.assertIsInstance(fid.filename, six.text_type)
     finally:
         fid.close()
Example #27
0
def hdf5_writer(filename, data, components=None):
    """
    Write a dataset or a subset to a FITS file.

    Parameters
    ----------
    data : `~glue.core.data.Data` or `~glue.core.subset.Subset`
        The data or subset to export
    components : `list` or `None`
        The components to export. Set this to `None` to export all components.
    """

    if isinstance(data, Subset):
        mask = data.to_mask()
        data = data.data
    else:
        mask = None

    from h5py import File

    f = File(filename, 'w')

    for cid in data.visible_components:

        if components is not None and cid not in components:
            continue

        comp = data.get_component(cid)
        if comp.categorical:
            if comp.labels.dtype.kind == 'U':
                values = np.char.encode(comp.labels, encoding='ascii', errors='replace')
            else:
                values = comp.labels.copy()
        else:
            values = comp.data.copy()

        if mask is not None:
            if values.ndim == 1:
                values = values[mask]
            else:
                if values.dtype.kind == 'f':
                    values[~mask] = np.nan
                elif values.dtype.kind == 'i':
                    values[~mask] = 0
                elif values.dtype.kind == 'S':
                    values[~mask] = ''
                else:
                    warnings.warn("Unknown data type in HDF5 export: {0}".format(values.dtype))
                    continue

        print(values)

        f.create_dataset(cid.label, data=values)

    f.close()
Example #28
0
    def test_write_only(self):
        """ User block only allowed for write """
        name = self.mktemp()
        f = File(name, 'w')
        f.close()

        with self.assertRaises(ValueError):
            f = h5py.File(name, 'r', userblock_size=512)

        with self.assertRaises(ValueError):
            f = h5py.File(name, 'r+', userblock_size=512)
Example #29
0
 def test_readonly(self):
     """ Mode 'r' opens file in readonly mode """
     fname = self.mktemp()
     fid = File(fname, 'w')
     fid.close()
     self.assertFalse(fid)
     fid = File(fname, 'r')
     self.assertTrue(fid)
     with self.assertRaises(ValueError):
         fid.create_group('foo')
     fid.close()
Example #30
0
 def test_readwrite(self):
     """ Mode 'r+' opens existing file in readwrite mode """
     fname = self.mktemp()
     fid = File(fname, 'w')
     fid.create_group('foo')
     fid.close()
     fid = File(fname, 'r+')
     assert 'foo' in fid
     fid.create_group('bar')
     assert 'bar' in fid
     fid.close()
Example #31
0
 def test_file_mode_generalizes(self):
     fname = self.mktemp()
     fid = File(fname, 'w', libver='latest')
     g = fid.create_group('foo')
     # fid and group member file attribute should have the same mode
     assert fid.mode == g.file.mode == 'r+'
     fid.swmr_mode = True
     # fid and group member file attribute should still be 'r+'
     # even though file intent has changed
     assert fid.mode == g.file.mode == 'r+'
     fid.close()
Example #32
0
    def test_write_only(self):
        """ User block only allowed for write """
        name = self.mktemp()
        f = File(name, 'w')
        f.close()

        with self.assertRaises(ValueError):
            f = h5py.File(name, 'r', userblock_size=512)

        with self.assertRaises(ValueError):
            f = h5py.File(name, 'r+', userblock_size=512)
Example #33
0
 def test_readonly(self):
     """ Mode 'r' opens file in readonly mode """
     fname = self.mktemp()
     fid = File(fname, 'w')
     fid.close()
     self.assertFalse(fid)
     fid = File(fname, 'r')
     self.assertTrue(fid)
     with self.assertRaises(ValueError):
         fid.create_group('foo')
     fid.close()
Example #34
0
 def test_readwrite(self):
     """ Mode 'r+' opens existing file in readwrite mode """
     fname = self.mktemp()
     fid = File(fname, 'w')
     fid.create_group('foo')
     fid.close()
     fid = File(fname, 'r+')
     assert 'foo' in fid
     fid.create_group('bar')
     assert 'bar' in fid
     fid.close()
Example #35
0
    def test_sec2(self):
        """ Sec2 driver is supported on posix """
        fid = File(self.mktemp(), 'w', driver='sec2')
        self.assertTrue(fid)
        self.assertEqual(fid.driver, 'sec2')
        fid.close()

        # Testing creation with append flag
        fid = File(self.mktemp(), 'a', driver='sec2')
        self.assertTrue(fid)
        self.assertEqual(fid.driver, 'sec2')
        fid.close()
Example #36
0
 def test_write(self, get_test_data):
     file_path = DATASET_PATH + "test_file.hdf5"
     writer = HDF5Writer(file_path)
     writer.write("ones", (3, 2), pt.ones(3, 2), "0.01")
     writer.write("twos", (3, 2), pt.ones(3, 2) * 2, "0.01")
     writer.write("threes", (3, 2), pt.ones(3, 2) * 3, "0.03")
     del writer
     hdf5_file = File(file_path, mode="a")
     assert os.path.isfile(file_path)
     assert list(hdf5_file["variable"].keys()) == ["0.01", "0.03"]
     hdf5_file.close()
     os.remove(file_path)
Example #37
0
 def test_readonly(self):
     """ Core driver can be used to open existing files """
     fname = self.mktemp()
     fid = File(fname, 'w')
     fid.create_group('foo')
     fid.close()
     fid = File(fname, 'r', driver='core')
     self.assertTrue(fid)
     assert 'foo' in fid
     with self.assertRaises(ValueError):
         fid.create_group('bar')
     fid.close()
Example #38
0
 def test_backing(self):
     """ Core driver saves to file when backing store used """
     fname = self.mktemp()
     fid = File(fname, 'w', driver='core', backing_store=True)
     fid.create_group('foo')
     fid.close()
     fid = File(fname, 'r')
     assert 'foo' in fid
     fid.close()
     # keywords for other drivers are invalid when using the default driver
     with self.assertRaises(TypeError):
         File(fname, 'w', backing_store=True)
Example #39
0
def write_esh5_orbitals(cell,
                        name,
                        kpts=numpy.zeros((1, 3), dtype=numpy.float64)):
    """Writes periodic AO basis to hdf5 file.  

    Parameters
    ----------
    cell: PySCF get.Cell object
      PySCF cell object which contains information of the system, including 
      AO basis set, FFT mesh, unit cell information, etc.
    name: string 
      Name of hdf5 file.
    kpts: array. Default: numpy.zeros((1,3)
      K-point array of dimension (nkpts, 3)
    dtype: datatype. Default: numpy.float64
      Datatype of orbitals in file.   

    """
    def to_qmcpack_complex(array):
        shape = array.shape
        return array.view(numpy.float64).reshape(shape + (2, ))

    nao = cell.nao_nr()

    fh5 = File(name, 'w')
    coords = cell.gen_uniform_grids(cell.mesh)

    kpts = numpy.asarray(kpts)
    nkpts = len(kpts)
    norbs = numpy.zeros((nkpts, ), dtype=int)
    norbs[:] = nao

    grp = fh5.create_group("OrbsG")
    dset = grp.create_dataset("reciprocal_vectors",
                              data=cell.reciprocal_vectors())
    dset = grp.create_dataset("number_of_kpoints", data=len(kpts))
    dset = grp.create_dataset("kpoints", data=kpts)
    dset = grp.create_dataset("number_of_orbitals", data=norbs)
    dset = grp.create_dataset("fft_grid", data=cell.mesh)
    dset = grp.create_dataset("grid_type", data=int(0))
    nnr = cell.mesh[0] * cell.mesh[1] * cell.mesh[2]
    # loop over kpoints later
    for (ik, k) in enumerate(kpts):
        ao = numint.KNumInt().eval_ao(cell, coords, k)[0]
        fac = numpy.exp(-1j * numpy.dot(coords, k))
        for i in range(norbs[ik]):
            aoi = fac * numpy.asarray(ao[:, i].T, order='C')
            aoi_G = tools.fft(aoi, cell.mesh)
            aoi_G = aoi_G.reshape(cell.mesh).transpose(2, 1, 0).reshape(nnr)
            dset = grp.create_dataset('kp' + str(ik) + '_b' + str(i),
                                      data=to_qmcpack_complex(aoi_G))
    fh5.close()
 def analyzeExperiment(self, experimentResults):
     # write to data file
     super(Origin, self).toHDF5(experimentResults[self.settings])
     # and to settings file
     try:
         f = File('settings.hdf5', 'a')
         super(Origin, self).toHDF5(f['settings/experiment'])
         f.flush()  # write changes
     except Exception as e:
         logger.exception('Uncaught Exception in origin.postExperiment.')
     finally:
         f.close()  # close the file
     return 0
Example #41
0
    def __init__(self, split):
        print('==> initializing 2D {} data.'.format(split))
        annot = {}
        tags = ['imgname', 'part', 'center', 'scale']
        f = File(h5_path, 'r')
        for tag in tags:
            annot[tag] = np.asarray(f[tag]).copy()
        f.close()

        print('Loaded 2D {} {} samples'.format(split, len(annot['scale'])))

        self.split = split
        self.annot = annot
Example #42
0
 def check_progress_in_file(self):
     try:
         file = File(self.filePath, 'r')
         last_time = file['Source timer']
         last_time = float(np.array(last_time))
         state = None
         file.close()
         print(f'\tПрогресс восстановлен')
     except Exception:
         last_time = None
         state = None
     finally:
         return last_time, state
Example #43
0
    def __init__(self, opt, split):
        print('util2==> initializing 3D {} data.'.format(split))
        annot = {}
        f_dir = os.path.join(
            '/data/ai/xxy/mask_rcnn/maskrcnn_3d/data/annotSampleTest.h5')
        f = File(f_dir, 'r')
        tags = [
            'action', 'bbox', 'camera', 'id', 'joint_2d', 'joint_3d_mono',
            'subaction', 'subject', 'istrain'
        ]
        # tags=["category_id", "num_keypoints", "is_crowd",   "keypoints",     "keypoints3D", "bbox","image_id"  ]
        if split == 'train':
            label = f['istrain'][:].tolist()
            ids = [i for i, x in enumerate(label) if x == 1][0:100000]

        else:
            label = f['istrain'][:].tolist()
            ids = [i for i, x in enumerate(label) if x == 0]

        for tag in tags:
            annot[tag] = np.asarray(f[tag]).copy()
        f.close()

        # ids = np.arange(annot['image_id'].shape[0])
        for tag in tags:
            annot[tag] = annot[tag][ids]

        self.num_joints = 16
        self.num_eval_joints = 16
        self.acc_idxs = [0, 1, 2, 3, 4, 5, 10, 11, 14, 15]
        self.mean_bone_length = 4296.99233013
        self.edges = [[0, 1], [1, 2], [2, 6], [6, 3], [3, 4], [4, 5], [10, 11],
                      [11, 12], [12, 8], [8, 13], [13, 14], [14, 15], [6, 8],
                      [8, 9]]
        self.edges_3d = [[0, 1], [1, 2], [2, 6], [6, 3], [3, 4], [4, 5],
                         [10, 11], [11, 12], [12, 8], [8, 13], [13, 14],
                         [14, 15], [6, 8], [8, 9]]
        self.shuffle_ref = [[0, 5], [1, 4], [2, 3], [10, 15], [11, 14],
                            [12, 13]]
        self.mean = np.array([0.485, 0.456, 0.406],
                             np.float32).reshape(1, 1, 3)
        self.std = np.array([0.229, 0.224, 0.225], np.float32).reshape(1, 1, 3)
        self.aspect_ratio = 1.0 * opt.input_w / opt.input_h  #### 1
        self.root = 7
        self.split = split
        self.opt = opt
        self.annot = annot
        self.nSamples = len(self.annot['id'])
        # self.idxs = np.arange(self.nSamples) if split == 'train' else np.arange(0, self.nSamples, 1 if opt.full_test else 10)
        self.idxs = np.arange(self.nSamples)
        print('Loaded 3D {} {} samples'.format(split, len(self.annot['id'])))
Example #44
0
    def store_h5py(self, x_train, y_train, x_val, y_val, x_test, y_test):
        """ HDF5 storage. Has the pros of much faster I/O and compressed size than
         SQL storage and the dis of memory vs the solid storage."""
        try:
            from h5py import File
            hdf = File(self.config.data.HDFS_INTERNAL_DATA_FILENAME, "w")
        except IOError as e:
            TextProcessing.logToFile.error(
                "The internal file failed to open for write in <TextProcessing/store_h5py"
            )
            TextProcessing.logToFile.error(e)
        else:
            try:
                group_data = hdf.create_group("dataset")

                group_train = group_data.create_group("train")
                group_val = group_data.create_group("val")
                group_test = group_data.create_group("test")

                group_train.create_dataset("x_trainset",
                                           data=x_train,
                                           compression="gzip")
                group_train.create_dataset("y_trainset",
                                           data=y_train,
                                           compression="gzip")

                group_val.create_dataset("x_valset",
                                         data=x_val,
                                         compression="gzip")
                group_val.create_dataset("y_valset",
                                         data=y_val,
                                         compression="gzip")

                group_test.create_dataset("x_testset",
                                          data=x_test,
                                          compression="gzip")
                group_test.create_dataset("y_testset",
                                          data=y_test,
                                          compression="gzip")
            except IOError as e:
                TextProcessing.logToFile.error(
                    "Failed to store in hdfs in <TextProcessing/store_h5py")
                TextProcessing.logToFile.error(e)
            else:
                hdf.close()
                TextProcessing.logToFile.logger.info(
                    "Successful creation of data file with in-house text processing."
                )
                TextProcessing.logToStream.logger.info(
                    "Successful creation of data file with in-house text processing."
                )
Example #45
0
 def test_write(self, get_test_data):
     file_path = get_test_data.test_path + "test_file.hdf5"
     writer = HDF5Writer(file_path)
     writer.write("ones", (3, 2), pt.ones(3, 2), "0.01")
     writer.write("twos", (3, 2), pt.ones(3, 2) * 2, "0.01")
     writer.write("threes", (3, 2), pt.ones(3, 2) * 3, "0.03")
     del writer
     hdf5_file = File(file_path,
                      mode="a",
                      driver="mpio",
                      comm=MPI.COMM_WORLD)
     assert os.path.isfile(file_path)
     assert list(hdf5_file["variable"].keys()) == ["0.01", "0.03"]
     hdf5_file.close()
Example #46
0
    def __init__(self, opt, split, returnMeta=False):
        print(f"==> initializing 2D {split} data.")
        annot = {}
        tags = ['imgname', 'part', 'center', 'scale']
        f = File('{}/mpii/annot/{}.h5'.format(ref.dataDir, split), 'r')
        for tag in tags:
            annot[tag] = np.asarray(f[tag]).copy()
        f.close()
        print(f"Loaded 2D {split} {len(annot['scale'])} samples.")

        self.split = split
        self.opt = opt
        self.annot = annot
        self.returnMeta = returnMeta
Example #47
0
  def __init__(self, opt, split):
    print('==> initializing 2D {} data.'.format(split))
    annot = {}
    tags = ['imgname','part','center','scale']
    f = File('{}/mpii/annot/{}.h5'.format(ref.dataDir, split), 'r')
    for tag in tags:
      annot[tag] = np.asarray(f[tag]).copy()
    f.close()

    print('Loaded 2D {} {} samples'.format(split, len(annot['scale'])))
    
    self.split = split
    self.opt = opt
    self.annot = annot
Example #48
0
class SortedQueryTest(AbstractQueryMixin, TestCase):

    path = 'SortedQueryTest.h5'

    def getDataset(self):
        self.f = File(self.path, 'w')
        self.input = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
        self.d = self.f.create_dataset('dset', data=self.input)
        return SortedArray(self.d)

    def tearDown(self):
        self.f.close()
        if os.path.exists(self.path):
            os.remove(self.path)
Example #49
0
    def test_core(self):
        """ Core driver is supported (no backing store) """
        fname = self.mktemp()
        fid = File(fname, 'w', driver='core', backing_store=False)
        self.assertTrue(fid)
        self.assertEqual(fid.driver, 'core')
        fid.close()
        self.assertFalse(os.path.exists(fname))

        # Testing creation with append flag
        fid = File(self.mktemp(), 'a', driver='core')
        self.assertTrue(fid)
        self.assertEqual(fid.driver, 'core')
        fid.close()
Example #50
0
    def test_match_existing(self):
        """ User block size must match that of file when opening for append """
        name = self.mktemp()
        f = File(name, 'w', userblock_size=512)
        f.close()

        with self.assertRaises(ValueError):
            f = File(name, 'a', userblock_size=1024)

        f = File(name, 'a', userblock_size=512)
        try:
            self.assertEqual(f.userblock_size, 512)
        finally:
            f.close()
Example #51
0
class BaseMapping(BaseGroup):
    """
        Base class for mapping tests
    """
    def setUp(self):
        self.f = File(self.mktemp(), 'w')
        self.groups = ('a', 'b', 'c', 'd')
        for x in self.groups:
            self.f.create_group(x)
        self.f['x'] = h5py.SoftLink('/mongoose')
        self.groups = self.groups + ('x', )

    def tearDown(self):
        if self.f:
            self.f.close()
Example #52
0
    def __init__(self, split):
        print('==> Initializing MPI_INF %s data' % (split))

        annot = {}
        tags = ['idx', 'pose2d', 'pose3d', 'bbox', 'cam_f', 'cam_c', 'subject', 'sequence', 'video']
        f = File('%s/inf/inf_%s.h5' % (conf.data_dir, split), 'r')
        for tag in tags:
            annot[tag] = np.asarray(f[tag]).copy()
        f.close()

        self.split = split
        self.annot = annot
        self.num_samples = self.annot['pose2d'].shape[0]

        print('Load %d MPI_INF %s samples' % (self.num_samples, self.split))
Example #53
0
def main():
    index_ids, index_vectors = jl.load('data/index.bin')
    test_ids, test_vectors = jl.load('data/test.bin')
    file = File('data/distances.h5', 'r')
    dataset = file['result']

    result = jl.Parallel(n_jobs=-1, backend='threading')(
        jl.delayed(process)(i, v, id_, index_ids, dataset)
        for i, (id_, v) in tqdm(enumerate(zip(test_ids, test_vectors)),
                                desc='test',
                                total=len(test_ids)))

    file.close()
    result = pd.DataFrame(result)
    result.to_csv('result/retrieval.csv', index=False)
Example #54
0
 def pp_keys(self):
     data_file = File(self.data_path + self.log, 'r', libver='latest', swmr=True)
     try:
         keys = [key for key in data_file.keys()]
         pp_key_vals = []
         for vals in keys:
             if vals != 'tpts' and vals != 'p_tot':
                 pp_key_vals.append(vals)
             else:
                 pass
         data_file.close()
         return pp_key_vals
     except:
         self.exit_handler()
         raise Exception('ERROR')
Example #55
0
 def load_namespaces(cls, namespace_catalog, path, namespaces=None):
     '''
     Load cached namespaces from a file.
     '''
     f = File(path, 'r')
     spec_group = f[f.attrs[SPEC_LOC_ATTR]]
     if namespaces is None:
         namespaces = list(spec_group.keys())
     for ns in namespaces:
         ns_group = spec_group[ns]
         latest_version = list(ns_group.keys())[-1]
         ns_group = ns_group[latest_version]
         reader = H5SpecReader(ns_group)
         namespace_catalog.load_namespaces('namespace', reader=reader)
     f.close()
Example #56
0
    def __init__(self):
        print('==> Initializing MPII data')
        annot = {}
        tags = ['imgname', 'part', 'center', 'scale']
        f1 = File('%s/mpii/annot/%s.h5' % (conf.data_dir, 'train'), 'r')
        f2 = File('%s/mpii/annot/%s.h5' % (conf.data_dir, 'val'), 'r')
        for tag in tags:
            annot[tag] = np.concatenate((np.asarray(f1[tag]).copy(), np.asarray(f2[tag]).copy()), axis=0)
        f1.close()
        f2.close()

        self.annot = annot
        self.num_samples = len(self.annot['scale'])
        
        print('Load %d MPII samples' % (len(annot['scale'])))
Example #57
0
    def new_model(self, filename):
        def add_group(group, si):
            attrs = group.attrs.items()
            attr_si = QStandardItem('attrs')
            attr_si.setEditable(False)
            for i in range(len(attrs)):
                attr_child = QStandardItem(attrs[i][0])
                attr_child.setEditable(False)
                val = QStandardItem(str(attrs[i][1]))
                val.setEditable(False)
                attr_child.setChild(0, val)
                attr_si.setChild(i, attr_child)
            si.setChild(0, attr_si)

            groups = group.items()
            # Add 1 b/c attributes is 0
            for i in range(len(groups)):
                gname = groups[i][0]
                ## Add leading zeros for nicer sorting...
                #if gname.startswith('trajectory'):
                #	gname = "trajectory %06d"%(int(gname.split(' ')[-1]))
                newthing = QStandardItem(gname)
                newthing.setEditable(False)
                try:
                    add_group(groups[i][1], newthing)
                    si.setChild(i + 1, newthing)
                except:
                    try:
                        val = QStandardItem("Dataset - shape: " +
                                            str(groups[i][1].value.shape))
                        val.setData(groups[i][1].ref)
                        val.setEditable(False)
                        newthing.setChild(0, val)
                        si.setChild(i + 1, newthing)
                    except:
                        print groups[i][0]

        self.model = QStandardItemModel(self.viewer)
        try:
            f = File(filename, 'r')
            dataset = QStandardItem(f.filename)
            dataset.setEditable(False)
            add_group(f, dataset)
            f.close()
            self.model.appendRow(dataset)

        except:
            pass
Example #58
0
class TestSimpleSlicing(TestCase):
    """
        Feature: Simple NumPy-style slices (start:stop:step) are supported.
    """
    def setUp(self):
        self.f = File(self.mktemp(), 'w')
        self.arr = np.arange(10)
        self.dset = self.f.create_dataset('x', data=self.arr)

    def tearDown(self):
        if self.f:
            self.f.close()

    def test_negative_stop(self):
        """ Negative stop indexes work as they do in NumPy """
        self.assertArrayEqual(self.dset[2:-2], self.arr[2:-2])
Example #59
0
    def __init__(self, protocol, split, dense=False, scale=False, noise=0, std_train=0, std_test=0, noise_path=None):
        print('==> Initializing H36M %s data' % (split))
        annot = {}
        tags = ['idx', 'pose2d', 'pose3d', 'bbox', 'cam_f', 'cam_c', 'cam_R', 'cam_T',
                'subject', 'action', 'subaction', 'camera']
        if split == 'train':
            #f = File('%s/h36m/protocol2/h36m17_protocol%d.h5' % (conf.data_dir,protocol), 'r')
            f = File('%s/data/h36m17_new.h5' % (conf.data_dir), 'r')
        elif split == 'test' or split == 'val': 
            f = File('%s/data/h36m17_protocol2_4.h5' % (conf.data_dir), 'r')
        for tag in tags:
            annot[tag] = np.asarray(f[tag]).copy()
        f.close()
        if dense == False:
            idxs = np.mod(annot['idx'], 50) == 1
            idxs = np.arange(annot['idx'].shape[0])[idxs]
            for tag in tags:
                annot[tag] = annot[tag][idxs]
    
       
        idxs = np.full(annot['idx'].shape[0], False)
        subject = subject_list[protocol-1][1-int(split=='train' or split=='test_train')]
        for i in range(len(subject)):
            idxs = idxs + (annot['subject']==subject[i])
        idxs = np.arange(annot['idx'].shape[0])[idxs]
        for tag in tags:
            annot[tag] = annot[tag][idxs]

        self.protocol = protocol
        self.split = split
        self.dense = dense
        self.scale = scale
        self.noise = noise
        self.std_train = std_train
        self.std_test = std_test
        self.noise_path = noise_path
        self.annot = annot
        self.num_samples = len(self.annot['idx'])

        # image size
        self.width = 256
        self.height = 256

        # load error statistics
        self.load_error_stat()

        print('Load %d H36M %s samples' % (self.num_samples, self.split))
Example #60
0
    def test_readonly_delete_exception(self):
        """ Deleting object in readonly file raises KeyError """
        # Note: it is impossible to restore the old behavior (ValueError)
        # without breaking the above test (non-existing objects)
        fname = self.mktemp()
        hfile = File(fname, 'w')
        try:
            hfile.create_group('foo')
        finally:
            hfile.close()

        hfile = File(fname, 'r')
        try:
            with self.assertRaises(KeyError):
                del hfile['foo']
        finally:
            hfile.close()