Пример #1
0
    def store_train_h5(self, path):
        if os.path.exists(path):
            os.remove(path)

        dpack = pyt.datapacker(path)

        for j, (i, X, F, E, S, P) in enumerate(
                zip(self.kid, self.xyz, self.frc, self.Eqm, self.spc,
                    self.prt)):
            xyz = X[i]
            frc = F[i]
            eng = E[i]
            spc = S
            nme = P

            # Prepare and store the test data set
            if xyz.size != 0:
                dpack.store_data(nme + '/mol' + str(j),
                                 coordinates=xyz,
                                 forces=frc,
                                 energies=eng,
                                 species=spc)

        # Cleanup the disk
        dpack.cleanup()
Пример #2
0
    def generate_h5(self, path):
        # open an HDF5 for compressed storage.
        # Note that if the path exists, it will open whatever is there.
        dpack = pyt.datapacker(path)

        d = self.ldtdir + self.datdir + '/data/'
        files = [f for f in os.listdir(d) if ".dat" in f]
        files.sort()
        Nf = len(files)
        Nd = 0
        for n, f in enumerate(files):
            #print(d + f)
            L = file_len(d + f)

            if L >= 4:
                #print(d + f)

                data = hdt.readncdatall(d + f)

                if 'energies' in data:
                    Ne = data['energies'].size
                    Nd += Ne

                    f = f.rsplit("-", 1)

                    fn = f[0] + "/mol" + f[1].split(".")[0]

                    dpack.store_data(fn, **data)
        dpack.cleanup()
Пример #3
0
    def store_data(self, filename):
        if os.path.exists(filename):
            os.remove(filename)

        dpack = ant.datapacker(filename)
        for k in self.tdata.keys():
            dpack.store_data(k, **(self.tdata[k]))
        dpack.cleanup()
Пример #4
0
    def __init__(self, hdf5files, saef, output, storecac, storetest, Naev):
        self.xyz = []
        self.frc = []
        self.Eqm = []
        self.spc = []
        self.idx = []
        self.gid = []
        self.prt = []

        self.Naev = Naev

        self.kid = []  # list to track data kept

        self.nt = []  # total conformers
        self.nc = []  # total kept

        self.of = open(output, 'w')

        self.tf = 0

        for f in hdf5files:
            # Construct the data loader class
            adl = pyt.anidataloader(f)
            print('Loading file:', f)

            # Declare test cache
            if os.path.exists(storetest):
                os.remove(storetest)

            dpack = pyt.datapacker(storetest)

            for i, data in enumerate(adl):

                xyz = data['coordinates']
                frc = data['forces']
                eng = data['energies']
                spc = data['species']
                nme = data['path']

                # Toss out high forces
                Mv = np.max(np.linalg.norm(frc, axis=2), axis=1)
                index = np.where(Mv > 1.75)[0]
                indexk = np.where(Mv <= 1.75)[0]

                # CLear forces
                xyz = xyz[indexk]
                frc = frc[indexk]
                eng = eng[indexk]

                idx = np.random.uniform(0.0, 1.0, eng.size)
                tr_idx = np.asarray(np.where(idx < 0.99))[0]
                te_idx = np.asarray(np.where(idx >= 0.99))[0]

                #print(tr_idx)
                if tr_idx.size > 0:
                    self.prt.append(nme)

                    self.xyz.append(
                        np.ndarray.astype(xyz[tr_idx], dtype=np.float32))
                    self.frc.append(
                        np.ndarray.astype(frc[tr_idx], dtype=np.float32))
                    self.Eqm.append(
                        np.ndarray.astype(eng[tr_idx], dtype=np.float64))
                    self.spc.append(spc)

                    Nd = eng[tr_idx].size
                    #print(Nd)

                    self.idx.append(np.arange(Nd))
                    self.kid.append(np.array([], dtype=np.int))
                    self.gid.append(np.array([], dtype=np.int))

                    self.tf = self.tf + Nd

                    self.nt.append(Nd)
                    self.nc.append(0)

                # Prepare and store the test data set
                if xyz[te_idx].size != 0:
                    #t_xyz = xyz[te_idx].reshape(te_idx.size, xyz[te_idx].shape[1] * xyz[te_idx].shape[2])
                    dpack.store_data(nme + '/mol' + str(i),
                                     coordinates=xyz[te_idx],
                                     forces=frc[te_idx],
                                     energies=np.array(eng[te_idx]),
                                     species=spc)

            # Clean up
            adl.cleanup()

            # Clean up
            dpack.cleanup()

        self.nt = np.array(self.nt)
        self.nc = np.array(self.nc)

        self.ts = 0
        self.vs = 0

        self.Nbad = self.tf

        self.saef = saef
        self.storecac = storecac
Пример #5
0
    #    ds.append(np.array(tidx).size)
    #    tmol += 1

    #print('total data:',tdat)

    #ddif = tdat - mtdat

    #ds = np.array(ds)
    #cnt = np.ones(ddif,dtype=np.int32)
    #rmtot = np.zeros(tmol,dtype=np.int32)
    #for i in range(tmol):
    #    rmtot[i] = cnt[]

    #print(rmtot)

    dp = pyt.datapacker(h5dir)
    #print('Diff data:',ddif,'tmol:',tmol)
    tmol = 0
    gcount = 0
    bcount = 0
    for i, f in enumerate(files):
        X = []
        E = []
        tmol += 1
        for e in ends:
            data = hdn.readncdat(dtdir + f + e + '.dat')
            X.append(np.array(data[0], dtype=np.float32))
            E.append(np.array(data[2], dtype=np.float64))
            S = data[1]

        X = np.concatenate(X)
    if os.path.exists(store_dir + str(i) + '/testset/testset.h5'):
        os.remove(store_dir + str(i) + '/testset/testset.h5')

    if not os.path.exists(store_dir + str(i) + '/testset'):
        os.mkdir(store_dir + str(i) + '/testset')

cachet = [
    cg('_train', saef, store_dir + str(r) + '/', forcet, chargt, False)
    for r in range(N)
]
cachev = [
    cg('_valid', saef, store_dir + str(r) + '/', forcet, chargt, False)
    for r in range(N)
]
testh5 = [
    pyt.datapacker(store_dir + str(r) + '/testset/testset.h5')
    for r in range(N)
]

Nd = np.zeros(N, dtype=np.int32)
Nbf = 0
for f, fn in enumerate(h5files):
    print('Processing file(' + str(f + 1) + ' of ' + str(len(h5files)) + '):',
          fn)
    adl = pyt.anidataloader(fn)

    To = adl.size()
    Ndc = 0
    Fmt = []
    Emt = []
    for c, data in enumerate(adl):
Пример #7
0
'''
hdf5file = '/home/jujuman/Research/ANI-DATASET/ani_data_c01test.h5'
storecac = '/home/jujuman/Research/GDB-11-wB97X-6-31gd/cache01_2/'
saef   = "/home/jujuman/Research/GDB-11-wB97X-6-31gd/sae_6-31gd.dat"
path = "/home/jujuman/Research/GDB-11-wB97X-6-31gd/cache01_2/testset/c01-testset.h5"
'''

# Construct the data loader class
adl = pya.anidataloader(hdf5file)

# Declare data cache
cachet = cg('_train', saef, storecac)
cachev = cg('_valid', saef, storecac)

# Declare test cache
dpack = pyt.datapacker(path)

# Load morse parameters
popt = np.load('mp_ani_params_test.npz')['param']

# Loop over data in set
for data in adl.getnextdata():
    loc = data['parent'] + "/" + data['child']
    print(loc)

    xyz = data['coordinates']
    eng = data['energies']
    spc = data['species']

    # Compute Morse Potential
    sdat = [
Пример #8
0
    def build_strided_training_cache(self,
                                     Nblocks,
                                     Nvalid,
                                     Ntest,
                                     build_test=True,
                                     build_valid=False,
                                     forces=True,
                                     grad=False,
                                     Fkey='forces',
                                     forces_unit=1.0,
                                     Ekey='energies',
                                     energy_unit=1.0,
                                     Eax0sum=False,
                                     rmhighe=True):
        if not os.path.isfile(self.netdict['saefile']):
            self.sae_linear_fitting(Ekey=Ekey,
                                    energy_unit=energy_unit,
                                    Eax0sum=Eax0sum)
        h5d = self.h5dir
        store_dir = self.train_root + "cache-data-"
        N = self.Nn
        Ntrain = Nblocks - Nvalid - Ntest
        if Nblocks % N != 0:
            raise ValueError(
                'Error: number of networks must evenly divide number of blocks.'
            )
        Nstride = Nblocks / N
        for i in range(N):
            if not os.path.exists(store_dir + str(i)):
                os.mkdir(store_dir + str(i))
            if build_test:
                if os.path.exists(store_dir + str(i) + '/../testset/testset' +
                                  str(i) + '.h5'):
                    os.remove(store_dir + str(i) + '/../testset/testset' +
                              str(i) + '.h5')
                if not os.path.exists(store_dir + str(i) + '/../testset'):
                    os.mkdir(store_dir + str(i) + '/../testset')
        cachet = [
            cg('_train', self.netdict['saefile'], store_dir + str(r) + '/',
               False) for r in range(N)
        ]
        cachev = [
            cg('_valid', self.netdict['saefile'], store_dir + str(r) + '/',
               False) for r in range(N)
        ]

        if build_test:
            testh5 = [
                pyt.datapacker(store_dir + str(r) + '/../testset/testset' +
                               str(r) + '.h5') for r in range(N)
            ]

        if build_valid:
            valdh5 = [
                pyt.datapacker(store_dir + str(r) + '/../testset/valdset' +
                               str(r) + '.h5') for r in range(N)
            ]

        if rmhighe:
            dE = []
            for f in self.h5file:
                adl = pyt.anidataloader(h5d + f)
                for data in adl:
                    S = data['species']
                    E = data['energies']
                    X = data['coordinates']

                    Esae = hdt.compute_sae(self.netdict['saefile'], S)

                    dE.append((E - Esae) / np.sqrt(len(S)))

            dE = np.concatenate(dE)
            cidx = np.where(np.abs(dE) < 15.0)
            std = np.abs(dE[cidx]).std()
            men = np.mean(dE[cidx])

            print(men, std, men + std)
            idx = np.intersect1d(
                np.where(dE >= -np.abs(15 * std + men))[0],
                np.where(dE <= np.abs(11 * std + men))[0])
            cnt = idx.size
            print('DATADIST: ', dE.size, cnt, (dE.size - cnt),
                  100.0 * ((dE.size - cnt) / dE.size))

        E = []
        data_count = np.zeros((N, 3), dtype=np.int32)
        for f in self.h5file:
            print('Reading data file:', h5d + f)
            adl = pyt.anidataloader(h5d + f)
            for data in adl:
                #print(data['path'],data['energies'].size)

                S = data['species']

                if data[Ekey].size > 0 and (set(S).issubset(
                        self.netdict['atomtyp'])):

                    X = np.array(data['coordinates'],
                                 order='C',
                                 dtype=np.float32)

                    #print(np.array(data[Ekey].shape),np.sum(np.array(data[Ekey], order='C', dtype=np.float64),axis=1).shape,data[Fkey].shape)

                    if Eax0sum:
                        E = energy_unit * np.sum(np.array(
                            data[Ekey], order='C', dtype=np.float64),
                                                 axis=1)
                    else:
                        E = energy_unit * np.array(
                            data[Ekey], order='C', dtype=np.float64)

                    if forces and not grad:
                        F = forces_unit * np.array(
                            data[Fkey], order='C', dtype=np.float32)
                    elif forces and grad:
                        F = -forces_unit * np.array(
                            data[Fkey], order='C', dtype=np.float32)
                    else:
                        F = 0.0 * X

                    if rmhighe:
                        Esae = hdt.compute_sae(self.netdict['saefile'], S)

                        ind_dE = (E - Esae) / np.sqrt(len(S))

                        hidx = np.union1d(
                            np.where(ind_dE < -(15.0 * std + men))[0],
                            np.where(ind_dE > (11.0 * std + men))[0])
                        lidx = np.intersect1d(
                            np.where(ind_dE >= -(15.0 * std + men))[0],
                            np.where(ind_dE <= (11.0 * std + men))[0])

                        if hidx.size > 0:
                            print(
                                '  -(' + f + ':' + data['path'] +
                                ')High energies detected:\n    ',
                                (E[hidx] - Esae) / np.sqrt(len(S)))

                        X = X[lidx]
                        E = E[lidx]
                        F = F[lidx]

                    # Build random split index
                    ridx = np.random.randint(0, Nblocks, size=E.size)
                    Didx = [
                        np.argsort(ridx)[np.where(ridx == i)]
                        for i in range(Nblocks)
                    ]

                    # Build training cache
                    for nid, cache in enumerate(cachet):
                        set_idx = np.concatenate([
                            Didx[((bid + nid * int(Nstride)) % Nblocks)]
                            for bid in range(Ntrain)
                        ])
                        if set_idx.size != 0:
                            data_count[nid, 0] += set_idx.size
                            cache.insertdata(X[set_idx], F[set_idx],
                                             E[set_idx], list(S))

                    # for nid,cache in enumerate(cachev):
                    #     set_idx = np.concatenate([Didx[((1+bid+nid*int(Nstride)) % Nblocks)] for bid in range(Ntrain)])
                    #     if set_idx.size != 0:
                    #         data_count[nid,0]+=set_idx.size
                    #         cache.insertdata(X[set_idx], F[set_idx], E[set_idx], list(S))

                    for nid, cache in enumerate(cachev):
                        set_idx = np.concatenate([
                            Didx[(Ntrain + bid + nid * int(Nstride)) % Nblocks]
                            for bid in range(Nvalid)
                        ])
                        if set_idx.size != 0:
                            data_count[nid, 1] += set_idx.size
                            cache.insertdata(X[set_idx], F[set_idx],
                                             E[set_idx], list(S))
                            if build_valid:
                                valdh5[nid].store_data(f + data['path'],
                                                       coordinates=X[set_idx],
                                                       forces=F[set_idx],
                                                       energies=E[set_idx],
                                                       species=list(S))

                    if build_test:
                        for nid, th5 in enumerate(testh5):
                            set_idx = np.concatenate([
                                Didx[(Ntrain + Nvalid + bid +
                                      nid * int(Nstride)) % Nblocks]
                                for bid in range(Ntest)
                            ])
                            if set_idx.size != 0:
                                data_count[nid, 2] += set_idx.size
                                th5.store_data(f + data['path'],
                                               coordinates=X[set_idx],
                                               forces=F[set_idx],
                                               energies=E[set_idx],
                                               species=list(S))

        # Save train and valid meta file and cleanup testh5
        for t, v in zip(cachet, cachev):
            t.makemetadata()
            v.makemetadata()

        if build_test:
            for th in testh5:
                th.cleanup()

        if build_valid:
            for vh in valdh5:
                vh.cleanup()

        print(' Train ', ' Valid ', ' Test ')
        print(data_count)
        print('Training set built.')
Пример #9
0
 def build_training_cache(self, forces=True):
     store_dir = self.train_root + "cache-data-"
     N = self.Nn
     for i in range(N):
         if not os.path.exists(store_dir + str(i)):
             os.mkdir(store_dir + str(i))
         if os.path.exists(store_dir + str(i) + '/../testset/testset' +
                           str(i) + '.h5'):
             os.remove(store_dir + str(i) + '/../testset/testset' + str(i) +
                       '.h5')
         if not os.path.exists(store_dir + str(i) + '/../testset'):
             os.mkdir(store_dir + str(i) + '/../testset')
     cachet = [
         cg('_train', self.netdict['saefile'], store_dir + str(r) + '/',
            False) for r in range(N)
     ]
     cachev = [
         cg('_valid', self.netdict['saefile'], store_dir + str(r) + '/',
            False) for r in range(N)
     ]
     testh5 = [
         pyt.datapacker(store_dir + str(r) + '/../testset/testset' +
                        str(r) + '.h5') for r in range(N)
     ]
     Nd = np.zeros(N, dtype=np.int32)
     Nbf = 0
     for f, fn in enumerate(self.h5file):
         print(
             'Processing file(' + str(f + 1) + ' of ' +
             str(len(self.h5file)) + '):', fn)
         adl = pyt.anidataloader(self.h5dir + fn)
         To = adl.size()
         Ndc = 0
         Fmt = []
         Emt = []
         for c, data in enumerate(adl):
             Pn = data['path'] + '_' + str(f).zfill(6) + '_' + str(c).zfill(
                 6)
             # Extract the data
             X = data['coordinates']
             E = data['energies']
             S = data['species']
             # 0.0 forces if key doesnt exist
             if forces:
                 F = data['forces']
             else:
                 F = 0.0 * X
             Fmt.append(np.max(np.linalg.norm(F, axis=2), axis=1))
             Emt.append(E)
             Mv = np.max(np.linalg.norm(F, axis=2), axis=1)
             index = np.where(Mv > 10.5)[0]
             indexk = np.where(Mv <= 10.5)[0]
             Nbf += index.size
             # Clear forces
             X = X[indexk]
             F = F[indexk]
             E = E[indexk]
             Esae = hdt.compute_sae(self.netdict['saefile'], S)
             hidx = np.where(np.abs(E - Esae) > 10.0)
             lidx = np.where(np.abs(E - Esae) <= 10.0)
             if hidx[0].size > 0:
                 print(
                     '  -(' + str(c).zfill(3) +
                     ')High energies detected:\n    ', E[hidx])
             X = X[lidx]
             E = E[lidx]
             F = F[lidx]
             Ndc += E.size
             if (set(S).issubset(self.netdict['atomtyp'])):
                 # Random mask
                 R = np.random.uniform(0.0, 1.0, E.shape[0])
                 idx = np.array([interval(r, N) for r in R])
                 # Build random split lists
                 split = []
                 for j in range(N):
                     split.append([i for i, s in enumerate(idx) if s == j])
                     nd = len([i for i, s in enumerate(idx) if s == j])
                     Nd[j] = Nd[j] + nd
                 # Store data
                 for i, t, v, te in zip(range(N), cachet, cachev, testh5):
                     ## Store training data
                     X_t = np.array(np.concatenate(
                         [X[s] for j, s in enumerate(split) if j != i]),
                                    order='C',
                                    dtype=np.float32)
                     F_t = np.array(np.concatenate(
                         [F[s] for j, s in enumerate(split) if j != i]),
                                    order='C',
                                    dtype=np.float32)
                     E_t = np.array(np.concatenate(
                         [E[s] for j, s in enumerate(split) if j != i]),
                                    order='C',
                                    dtype=np.float64)
                     if E_t.shape[0] != 0:
                         t.insertdata(X_t, F_t, E_t, list(S))
                     ## Store Validation
                     if np.array(split[i]).size > 0:
                         X_v = np.array(X[split[i]],
                                        order='C',
                                        dtype=np.float32)
                         F_v = np.array(F[split[i]],
                                        order='C',
                                        dtype=np.float32)
                         E_v = np.array(E[split[i]],
                                        order='C',
                                        dtype=np.float64)
                         if E_v.shape[0] != 0:
                             v.insertdata(X_v, F_v, E_v, list(S))
     # Print some stats
     print('Data count:', Nd)
     print('Data split:', 100.0 * Nd / np.sum(Nd), '%')
     # Save train and valid meta file and cleanup testh5
     for t, v, th in zip(cachet, cachev, testh5):
         t.makemetadata()
         v.makemetadata()
         th.cleanup()
Пример #10
0
N = 5

for i in range(N):
    if not os.path.exists(store_dir + str(i)):
        os.mkdir(store_dir + str(i))

if os.path.exists(wkdir + 'testset.h5'):
    os.remove(wkdir + 'testset.h5')

cachet = [
    cg('_train', saef, store_dir + str(r) + '/', False) for r in range(N)
]
cachev = [
    cg('_valid', saef, store_dir + str(r) + '/', False) for r in range(N)
]
testh5 = pyt.datapacker(wkdir + 'testset.h5')

Nd = np.zeros(N, dtype=np.int32)
Nbf = 0
for f, fn in enumerate(h5files):
    print('Processing file(' + str(f + 1) + ' of ' + str(len(h5files)) + '):',
          fn[1])
    adl = pyt.anidataloader(fn)

    To = adl.size()
    Ndc = 0
    Fmt = []
    Emt = []
    for c, data in enumerate(adl):
        #if c == 2 or c == 2 or c == 2:
        # Get test store name
Пример #11
0
# Construct pyNeuroChem classes
print('Constructing CV network list...')
ncl =  [pync.conformers(cnstfile, saefile, wkdir + 'cv_train_' + str(l) + '/networks/', 0, False) for l in range(4)]
print('Complete.')

store_xyz = '/home/jujuman/Research/DataReductionMethods/models/cv/bad_xyz/'

svpath = '/home/jujuman/Scratch/Research/DataReductionMethods/models/ani_red_cnl_c08f.h5'
h5file = '/home/jujuman/Scratch/Research/DataReductionMethods/models/train_c08f/ani_red_c08f.h5'

# Remove file if exists
if os.path.exists(svpath):
    os.remove(svpath)

#open an HDF5 for compressed storage.
dpack = pyt.datapacker(svpath)

# Declare loader
adl = pyt.anidataloader(h5file)

Nd = 0
Nb = 0
for data in adl:
    # Extract the data
    Ea = data['energies']
    S = data['species']
    X = data['coordinates'].reshape(Ea.shape[0], len(S),3)

    El = []
    for nc in ncl:
        nc.setConformers(confs=X, types=list(S))
Пример #12
0
N = 5

for i in range(N):
    if not os.path.exists(store_dir + str(i)):
        os.mkdir(store_dir + str(i))

    if os.path.exists(store_dir + str(i) + '/../testset/testset'+str(i)+'.h5'):
        os.remove(store_dir + str(i) + '/../testset/testset'+str(i)+'.h5')

    if not os.path.exists(store_dir + str(i) + '/../testset'):
        os.mkdir(store_dir + str(i) + '/../testset')

cachet = [cg('_train', saef, store_dir + str(r) + '/',False) for r in range(N)]
cachev = [cg('_valid', saef, store_dir + str(r) + '/',False) for r in range(N)]
testh5 = [pyt.datapacker(store_dir + str(r) + '/../testset/testset'+str(r)+'.h5') for r in range(N)]

Nd = np.zeros(N,dtype=np.int32)
Nbf = 0
for f,fn in enumerate(h5files):
    print('Processing file('+ str(f+1) +' of '+ str(len(h5files)) +'):', fn)
    adl = pyt.anidataloader(fn)

    To = adl.size()
    Ndc = 0
    Fmt = []
    Emt = []
    for c, data in enumerate(adl):
        #if c == 2 or c == 2 or c == 2:
        # Get test store name
        #Pn = fn.split('/')[-1].split('.')[0] + data['path']
    if os.path.exists(store_dir + str(i) + '/../testset/testset' + str(i) +
                      '.h5'):
        os.remove(store_dir + str(i) + '/../testset/testset' + str(i) + '.h5')

    if not os.path.exists(store_dir + str(i) + '/../testset'):
        os.mkdir(store_dir + str(i) + '/../testset')

cachet = [
    cg('_train', saef, store_dir + str(r) + '/', False) for r in range(N)
]
cachev = [
    cg('_valid', saef, store_dir + str(r) + '/', False) for r in range(N)
]
testh5 = [
    pyt.datapacker(store_dir + str(r) + '/../testset/testset' + str(r) + '.h5')
    for r in range(N)
]

Nd = np.zeros(N, dtype=np.int32)
Nbf = 0
for f, fn in enumerate(h5files):
    print('Processing file(' + str(f + 1) + ' of ' + str(len(h5files)) + '):',
          fn)
    adl = pyt.anidataloader(fn)

    To = adl.size()
    Ndc = 0
    Fmt = []
    Emt = []
    for c, data in enumerate(adl):
Пример #14
0
    def build_strided_training_cache(self,
                                     Nblocks,
                                     Nvalid,
                                     Ntest,
                                     build_test=True,
                                     forces=True,
                                     grad=False,
                                     Fkey='forces',
                                     forces_unit=1.0,
                                     Ekey='energies',
                                     energy_unit=1.0,
                                     Eax0sum=False):
        if not os.path.isfile(self.netdict['saefile']):
            self.sae_linear_fitting(Ekey=Ekey,
                                    energy_unit=energy_unit,
                                    Eax0sum=Eax0sum)

        h5d = self.h5dir

        store_dir = self.train_root + "cache-data-"
        N = self.Nn
        Ntrain = Nblocks - Nvalid - Ntest

        if Nblocks % N != 0:
            raise ValueError(
                'Error: number of networks must evenly divide number of blocks.'
            )

        Nstride = Nblocks / N

        for i in range(N):
            if not os.path.exists(store_dir + str(i)):
                os.mkdir(store_dir + str(i))

            if build_test:
                if os.path.exists(store_dir + str(i) + '/../testset/testset' +
                                  str(i) + '.h5'):
                    os.remove(store_dir + str(i) + '/../testset/testset' +
                              str(i) + '.h5')

                if not os.path.exists(store_dir + str(i) + '/../testset'):
                    os.mkdir(store_dir + str(i) + '/../testset')

        cachet = [
            cg('_train', self.netdict['saefile'], store_dir + str(r) + '/',
               False) for r in range(N)
        ]
        cachev = [
            cg('_valid', self.netdict['saefile'], store_dir + str(r) + '/',
               False) for r in range(N)
        ]

        if build_test:
            testh5 = [
                pyt.datapacker(store_dir + str(r) + '/../testset/testset' +
                               str(r) + '.h5') for r in range(N)
            ]

        E = []
        data_count = np.zeros((N, 3), dtype=np.int32)
        for f in self.h5file:
            adl = pyt.anidataloader(h5d + f)
            for data in adl:
                #print(data['path'],data['energies'].size)

                S = data['species']

                if data[Ekey].size > 0 and (set(S).issubset(
                        self.netdict['atomtyp'])):

                    X = np.array(data['coordinates'],
                                 order='C',
                                 dtype=np.float32)

                    if Eax0sum:
                        E = energy_unit * np.sum(np.array(
                            data[Ekey], order='C', dtype=np.float64),
                                                 axis=1)
                    else:
                        E = energy_unit * np.array(
                            data[Ekey], order='C', dtype=np.float64)

                    if forces and not grad:
                        F = forces_unit * np.array(
                            data[Fkey], order='C', dtype=np.float32)
                    if forces and grad:
                        F = -forces_unit * np.array(
                            data[Fkey], order='C', dtype=np.float32)
                    else:
                        F = 0.0 * X

                    # Build random split index
                    ridx = np.random.randint(0, Nblocks, size=E.size)
                    Didx = [
                        np.argsort(ridx)[np.where(ridx == i)]
                        for i in range(Nblocks)
                    ]

                    # Build training cache
                    for nid, cache in enumerate(cachet):
                        set_idx = np.concatenate([
                            Didx[((bid + nid * int(Nstride)) % Nblocks)]
                            for bid in range(Ntrain)
                        ])
                        if set_idx.size != 0:
                            data_count[nid, 0] += set_idx.size
                            cache.insertdata(X[set_idx], F[set_idx],
                                             E[set_idx], list(S))

                    for nid, cache in enumerate(cachev):
                        set_idx = np.concatenate([
                            Didx[(Ntrain + bid + nid * int(Nstride)) % Nblocks]
                            for bid in range(Nvalid)
                        ])
                        if set_idx.size != 0:
                            data_count[nid, 1] += set_idx.size
                            cache.insertdata(X[set_idx], F[set_idx],
                                             E[set_idx], list(S))

                    if build_test:
                        for nid, th5 in enumerate(testh5):
                            set_idx = np.concatenate([
                                Didx[(Ntrain + Nvalid + bid +
                                      nid * int(Nstride)) % Nblocks]
                                for bid in range(Ntest)
                            ])
                            if set_idx.size != 0:
                                data_count[nid, 2] += set_idx.size
                                th5.store_data(f + data['path'],
                                               coordinates=X[set_idx],
                                               forces=F[set_idx],
                                               energies=E[set_idx],
                                               species=list(S))

        # Save train and valid meta file and cleanup testh5
        for t, v in zip(cachet, cachev):
            t.makemetadata()
            v.makemetadata()

        if build_test:
            for th in testh5:
                th.cleanup()

        print(' Train ', ' Valid ', ' Test ')
        print(data_count)
        print('Training set built.')
Пример #15
0
    def __init__(self, hdf5files, saef, storecac, storetest):
        self.xyz = []
        self.Eqm = []
        self.spc = []
        self.idx = []
        self.prt = []

        self.kid = [] # list to track data kept

        self.nt = [] # total conformers
        self.nc = [] # total kept

        self.tf = 0

        for f in hdf5files:
            # Construct the data loader class
            adl = pyt.anidataloader(f)

            # Declare test cache
            if os.path.exists(storetest):
                os.remove(storetest)

            dpack = pyt.datapacker(storetest)

            for i, data in enumerate(adl):
                xyz = np.array_split(data['coordinates'], 10)
                eng = np.array_split(data['energies'], 10)
                spc = data['species']
                nme = data['parent']

                self.prt.append(nme)

                self.xyz.append( np.concatenate(xyz[0:9]) )
                self.Eqm.append( np.concatenate(eng[0:9]) )
                self.spc.append(spc)

                Nd = np.concatenate(eng[0:9]).shape[0]

                self.idx.append( np.arange(Nd) )
                self.kid.append( np.array([], dtype=np.int) )

                self.tf = self.tf + Nd

                self.nt.append(Nd)
                self.nc.append(0)

                # Prepare and store the test data set
                if xyz[9].size != 0:
                    t_xyz = xyz[9].reshape(xyz[9].shape[0], xyz[9].shape[1] * xyz[9].shape[2])
                    dpack.store_data(nme + '/mol' + str(i), coordinates=t_xyz, energies=np.array(eng[9]), species=spc)

            # Clean up
            adl.cleanup()

            # Clean up
            dpack.cleanup()

        self.nt = np.array(self.nt)
        self.nc = np.array(self.nc)

        self.ts = 0
        self.vs = 0

        self.Nbad = self.tf

        self.saef = saef
        self.storecac = storecac
Пример #16
0
import numpy as np
import hdnntools as gt
import pyanitools as pyt
import os

lfile = '/home/jujuman/DataTesting/gdb9-2500-div-dim.h5'
sfile = '/home/jujuman/DataTesting/gdb9-2500-div-dim_35.h5'

if os.path.exists(sfile):
    os.remove(sfile)

adl = pyt.anidataloader(lfile)
dpk = pyt.datapacker(sfile)

for i,x in enumerate(adl):
    print(i)
    xyz = np.asarray(x['coordinates'],dtype=np.float32)
    erg = x['energies']
    spc = x['species']

    dpk.store_data('/gdb-09-DIV/mol'+str(i), coordinates=xyz.reshape(erg.shape[0],len(spc)*3), energies=erg, species=spc)

adl.cleanup()
dpk.cleanup()
Пример #17
0
import hdnntools as hdt


def check_for_outsider(okayl, chckl):
    for i in chckl:
        if i not in okayl:
            return False
    return True


dst = "/home/jujuman/Research/ANI-DATASET/h5data/gdb9-2500-bad_new.h5"
src = "/home/jujuman/Research/ANI-DATASET/GDB-09-Data/gdb9-2500-bad.h5"

#open an HDF5 for compressed storage.
#Note that if the path exists, it will open whatever is there.
dpack = pyt.datapacker(dst)
aload = pyt.anidataloader(src)

at = [
    'H',
    'C',
    'N',
    'O',
    #'F',
    #'S',
]

for id, data in enumerate(aload.get_roman_data()):

    xyz = np.asarray(data['coordinates'], dtype=np.float32)
    erg = np.asarray(data['energies'], dtype=np.float64)
Пример #18
0
import pyanitools as pyt
#import pyaniasetools as aat
import numpy as np
import hdnntools as hdt
import os

#import matplotlib.pyplot as plt

file_old = '/home/jsmith48/scratch/auto_al/h5files/ANI-AL-0707.0000.0408.h5'
file_new = '/home/jsmith48/scratch/auto_al/h5files_fix/ANI-AL-0707.0000.0408.h5'

print('Working on file:', file_old)
adl = pyt.anidataloader(file_old)

# Data storage
dpack = pyt.datapacker(file_new, mode='w')

for i, data in enumerate(adl):
    #if i == 20:
    #    break
    X = data['coordinates']
    S = data['species']
    Edft = data['energies']
    path = data['path']
    del data['path']

    #Eani, Fani = anicv.compute_energy_conformations(X=np.array(X,dtype=np.float32),S=S)

    Esae = hdt.compute_sae(
        '/home/jsmith48/scratch/auto_al/modelCNOSFCl/sae_wb97x-631gd.dat', S)