Example #1
0
def test_dict_to_hdf_with_datetime():
    d = {
        'e': [datetime.datetime.now() for i in range(5)],
        'f': datetime.datetime.utcnow(),
        'g': [('Hello', 5), (6, 'No HDF but json'), {
            'foo': True
        }]
    }
    fname = 'test_hdfdict.h5'
    if os.path.isfile(fname):
        os.unlink(fname)
    hf = h5py.File(fname)
    hdfdict.dump(d, hf)
    res = hdfdict.load(hf)

    def equaldt(a, b):
        d = a - b
        return d.total_seconds() < 1e-3

    assert all([equaldt(a, b) for (a, b) in zip(d['e'], res['e'])])
    assert equaldt(d['f'], res['f'])
    assert d['g'][0][0] == 'Hello'
    assert d['g'][1][0] == 6
    assert d.keys() == res.keys()
    hf.close()

    if os.path.isfile(fname):
        os.unlink(fname)
Example #2
0
def test_dict_to_hdf():
    if os.path.isfile(fname):
        os.unlink(fname)
    hdfdict.dump(d, fname)
    for lazy in [True, False]:
        res = hdfdict.load(fname, lazy=lazy)
        assume(np.all(d['a'] == res['a']))
        assume(np.all(d['b'] == res['b']))
        assume(np.all(d['c'] == res['c']))
        assume(tuple(d.keys()) == tuple(res.keys()))
Example #3
0
def test_dict_to_hdf_with_datetime():
    d = {
        'e': [datetime.datetime.now() for i in range(5)],
        'f': datetime.datetime.utcnow(),
        'g': [('Hello', 5), (6, 'No HDF but json'), {
            'foo': True
        }],
        'h': {
            'test2': datetime.datetime.now(),
            'again': ['a', 1],
            (1, 2): (3, 4)
        }
    }
    fname = 'test_hdfdict.h5'
    if os.path.isfile(fname):
        os.unlink(fname)
    hf = h5py.File(fname)
    hdfdict.dump(d, hf)
    res = hdfdict.load(hf, lazy=False)
    res = hdfdict.load(hf)
    res.unlazy()  # all lazy objects will be rolled out.

    def equaldt(a, b):
        d = a - b
        return d.total_seconds() < 1e-3

    assume(all([equaldt(a, b) for (a, b) in zip(d['e'], res['e'])]))
    assume(equaldt(d['f'], res['f']))
    assume(d['g'][0][0] == 'Hello')
    assume(d['g'][1][0] == 6)
    assume(d.keys() == res.keys())
    assume(isinstance(res['h']['test2'], datetime.datetime))
    assume(res['h']['again'][0] == 'a')
    assume(res['h']['again'][1] == 1)
    hf.close()

    if os.path.isfile(fname):
        os.unlink(fname)
Example #4
0
def load(input):
    """
    Reads and convert an HDF5 group into a dictionary

    Parameters
    ----------
    input: HDF5Group
        input dumped group

    Returns
    -------
    dict
    """
    input = hdfdict.load(input, lazy=False)
    input = recursively_read_dict_contents(input)
    return input
Example #5
0
 def load_model(self, fname):
     """
     Load Model
     Loads the parameters and the architecture of the saved models
     :param fname: Directory from where the model saved be opened
     """
     print("Model loading....")
     model_dict = dict(hdfdict.load(fname))
     params_dict = model_dict["Parameters"]
     arch_dict = model_dict["Architecture"]
     self.reset()
     for key in arch_dict:
         self.add(arch_dict[key][0].decode('utf8'), int(arch_dict[key][1]), int(arch_dict[key][2]),
                  arch_dict[key][3].decode('utf8'), int(arch_dict[key][4]))
     dee = 1
     for layer in self.layer_names:
         layer.weights = params_dict["W" + str(dee)]
         layer.bias = params_dict["b" + str(dee)]
         dee += 1
     print("Model loaded!")
def load_data(feature=None, dataset=None):
    features_dir = '../../../../../features'
    feature_path = os.path.join(features_dir, feature, dataset)
    hdf5_files = os.listdir(feature_path)
    feature = 'chroma' if feature == 'chromagram' else feature
    feature_data = {
        'genre': [],
        'label': [],
        feature: [],
    }
    for f in hdf5_files:
        data = dict(hdfdict.load(os.path.join(feature_path, f)))
        feature_data['genre'] += data['genre'].tolist()
        feature_data['label'] += data['label'].tolist()
        feature_data[feature] += data[feature].tolist()

    feature_data['genre'] = np.array(feature_data['genre'])
    feature_data['label'] = np.array(feature_data['label'])
    feature_data[feature] = np.array(feature_data[feature])

    return feature_data
Example #7
0
                              map_location=device)

    new_state_dict = OrderedDict()
    for key, val in c_checkpoint['model_b'].items():
        new_key = key[7:]
        new_state_dict[new_key] = val
    speaker_encoder.load_state_dict(new_state_dict)

    #=======================Load in vocoder==================================

    vocoder = MelGan(device)
    try:
        with open(Config.dir_paths["melgan_config_path"]) as f:
            melgan_config = yaml.load(f, Loader=yaml.Loader)

        melgan_stats = hdfdict.load(Config.dir_paths["melgan_stats_path"])
    except Exception as err:
        log.error(f"Unable to load in melgan config or stats ({err})")
        exit(0)

    target_embedding = np.load(
        args.target_embedding_path)  #load in target embedding
    target_embedding = torch.from_numpy(
        target_embedding[np.newaxis, :]).to(device)

    source_embedding = np.load(args.source_embedding_path)
    source_embedding = torch.from_numpy(
        source_embedding[np.newaxis, :]).to(device)

    #========================Main loop start===========================
    log.info("Started live_convert.py")
Example #8
0
def main():

 #Get unitcell volume
 if os.path.isfile(os.sys.argv[1]) :

  cp = []
  with open(os.sys.argv[1],'r') as fh :
    dummy  = fh.readline()
    alat   = float(fh.readline())
    for i in range(3): 
     g = fh.readline().split()
     cp.append([float(g[0]),float(g[1]),float(g[2])])  
    factor = 1
    cp = np.array(cp)*alat
    V = abs(np.dot(cp[0],np.cross(cp[1],cp[2])))*1e-30

 else:
    print('No unitcell found')
    quit()

 factor = 1

 nx = int(os.sys.argv[2])
 ny = int(os.sys.argv[3])
 nz = int(os.sys.argv[4])
 T = int(os.sys.argv[5])

 cp = np.array(cp)*alat
 V = abs(np.dot(cp[0],np.cross(cp[1],cp[2])))*1e-30

 tail = str(nx) + str(ny) + str(nz) + '.hdf5'
 
 #KAPPA-------------------------
 #f = dd.io.load('kappa-m' + tail)
 
 f = hdfdict.load('kappa-m' + tail)
 
 mode_kappa = f['mode_kappa']
 #weight = f['weight'][:]
 g = f['gamma'][:]
 gg = np.pi * g[0]*1e12 #NOTE: there should be a factor 4 here according to doc.
 (nq,nb) = np.shape(g[0])
 nm = nq*nb
 alpha = V*nq
 v = np.array(f['group_velocity'])*1e2 #m/2
 w = np.array(f['frequency'])*1e12 #1/s
 q = 1.60218e-19 #C
 kb = 1.380641e-23 #j/K
 h = 6.626070151e-34 #Js
 eta = w*h/T/kb/2
 C = kb*np.power(eta,2)*np.power(np.sinh(eta),-2) #J/K


 f = gg.reshape(nm)
 I = np.where(f > 0.0)
 tau = np.zeros(nm)
 tau[I] = 1/f[I]
 w = w.reshape(nm)
 v = np.array([v[:,:,0].reshape(nb*nq),v[:,:,1].reshape(nb*nq),v[:,:,2].reshape(nb*nq)])
 v = v.T
 C = C.reshape(nm)
 ftol = 1e-30
 index = (np.logical_and(C>ftol,f>ftol)).nonzero()[0]
 exclude = (np.logical_or(C<=ftol,f<=ftol)).nonzero()[0]

 C = C[index]
 v = v[index]
 w = w[index]
 tau = tau[index]
 sigma = np.einsum('i,ij->ij',C,v)
 kappa = np.einsum('li,lj,l,l->ij',v,v,tau,C)/alpha
 print('KAPPA (RTA):')
 print(kappa)

 #---------------------------------

 #FULL MATRIX----------------------------------
 fname = 'unitary-m' + tail
 #f = dd.io.load(fname)

 f = hdfdict.load('unitary-m' + tail)
 Q = f['unitary_matrix'][0,0]
  
 #f = dd.io.load('coleigs-m' + tail)
 f = hdfdict.load('coleigs-m' + tail)
 D = f['collision_eigenvalues'][0]
 Dm = np.diag(D)
 Q = Q.reshape(nm,nm)
 
 

 #A = np.matmul(Q.T,np.matmul(Dm,Q))*1e12*np.pi
 
 QT = Q.T
 A = np.matmul(QT,(D*QT).T)
 A = np.delete(A,exclude,0)
 A = np.delete(A,exclude,1)


 #a = np.einsum('ij,i->j',A,np.sqrt(C))
 #print(sum(a))
 #print(sum(np.absolute(a)))
 #show()

 W = np.einsum('ij,i,j->ij',A,np.sqrt(C),np.sqrt(C))*1e12*np.pi
 #W = np.einsum('ij,i,j->ij',A,1/np.sqrt(C),1/np.sqrt(C))*1e12*np.pi

 print('Start inversion...')
 kappa = np.einsum('li,lk,kj->ij',sigma,pinvh(W),sigma)/alpha
 print('KAPPA (FULL):')
 print(kappa)

 data = {'W':W,'v':v,'C':C,'kappa':kappa,'alpha':np.array([alpha])}
 #np.savez_compressed('full.npz',data)   
 #Saving data
 save_data('full',data)   
 #hdfdict.dump(data,'full.h5')

 data = {'C':C/alpha,'tau':tau,'v':v,'kappa':kappa}
 saveCompressed('rta.npz',**data)   
Example #9
0
        df = pd.concat([pd.DataFrame(df[c].tolist()).add_prefix(c) for c in df.columns], axis=1)
        
        dfd[k] = df
        
        l = len(obj[0])
        
        #print (l)
        #print(dfd.keys())
        
        print(name,"Adding col to dfd index ", k)
        #print(dfd.keys())
    #for key, val in obj.attrs.items():
    #    print("    %s: %s" % (key, val),type(val))

f = h5py.File('walking5.h5', 'r') # Read h5 file
f.visititems(print_attrs) # visititems is for visit all objects in this group and subgroups
                          # In this case, object will be a Dataset instance

for name in f:
    print(name)
res = hdfdict.load("walking5.h5")
print(res.keys())

writer = pd.ExcelWriter('hd5excelout18.xlsx', engine = 'xlsxwriter')
for k, df in dfd.items():
    print(k,len(df)) # print the key number and the number of rows for each dataframe
    df.columns = df.columns.str.replace(r'\d+', '') # remove all number (Zeros) of columns which have the same name 
    df.to_excel(writer, sheet_name = 'sheet_len_'+k, index= False)
writer.save()
writer.close()
Example #10
0
 def load(cls, filepath):
     return cls(**_hdfdict.load(filepath, lazy=False))