コード例 #1
0
 def _molecular_dynamics(self, resume=None):
     """Performs a molecular dynamics simulation, until mdmin is
     exceeded. If resuming, the file number (md%05i) is expected."""
     self._log('msg', 'Molecular dynamics: md%05i' % self._counter)
     mincount = 0
     energies, oldpositions = [], []
     thermalized = False
     if resume:
         self._log('msg', 'Resuming MD from md%05i.traj' % resume)
         if os.path.getsize('md%05i.traj' % resume) == 0:
             self._log(
                 'msg', 'md%05i.traj is empty. Resuming from '
                 'qn%05i.traj.' % (resume, resume - 1))
             atoms = io.read('qn%05i.traj' % (resume - 1), index=-1)
         else:
             images = io.Trajectory('md%05i.traj' % resume, 'r')
             for atoms in images:
                 energies.append(atoms.get_potential_energy())
                 oldpositions.append(atoms.positions.copy())
                 passedmin = self._passedminimum(energies)
                 if passedmin:
                     mincount += 1
             self._atoms.set_momenta(atoms.get_momenta())
             thermalized = True
         self._atoms.positions = atoms.get_positions()
         self._log('msg',
                   'Starting MD with %i existing energies.' % len(energies))
     if not thermalized:
         MaxwellBoltzmannDistribution(self._atoms,
                                      temp=self._temperature * units.kB,
                                      force_temp=True)
     traj = io.Trajectory('md%05i.traj' % self._counter, 'a', self._atoms)
     self._constrain()
     dyn = NPT(self._atoms,
               timestep=self._timestep * units.fs,
               temperature=self._temperature * units.kB,
               externalstress=self._externalstress,
               ttime=self._ttime * units.fs,
               pfactor=self._pfactor * units.fs**2)
     #        dyn = NPTber(self._atoms, timestep=self._timestep * units.fs, temperature=self._temperature, fixcm=True, pressure=self._pressure, taut=self._taut * units.fs, taup=self._taup * units.fs, compressibility=self._compressibility)
     log = MDLogger(dyn,
                    self._atoms,
                    'md%05i.log' % self._counter,
                    header=True,
                    stress=False,
                    peratom=False)
     dyn.attach(log, interval=1)
     dyn.attach(traj, interval=1)
     while mincount < self._mdmin:
         #            self._constrain()
         dyn.run(1)
         #            del self._atoms.constraints
         energies.append(self._atoms.get_potential_energy())
         passedmin = self._passedminimum(energies)
         if passedmin:
             mincount += 1
         oldpositions.append(self._atoms.positions.copy())
     # Reset atoms to minimum point.
     self._atoms.positions = oldpositions[passedmin[0]]
コード例 #2
0
ファイル: analysis.py プロジェクト: MTD-group/amlt
def read_evaluation_data(
    filename,
    MLIP_cache_dir='MLIP_cache',
    struct_types=['known', 'polymorphD3', 'random'],
    dyn_types=['md', 'relax', 'sp', 'ce', 'dimer'],
    use_forces=True,
    remove_force_drift=True,
    bad_data_traj_list=[],
):

    with open(filename, 'r') as fid:
        lines = fid.readlines()

    image_pairs = []

    for line in lines:
        if '#' not in line:

            sline = line.split()
            struct_type = sline[-3]
            dyn_type = sline[-2]

            if dyn_type in dyn_types and struct_type in struct_types:
                data_file = sline[-1]
                im_index = int(sline[-4])
                if data_file in bad_data_traj_list:
                    print('%s[%i] in bad_data_traj_list, skipping...' %
                          (data_file, im_index))
                else:

                    dirlist = data_file.split('/')
                    cache_path = MLIP_cache_dir + '/' + dirlist[
                        -4] + '/' + dirlist[-3] + '/' + dirlist[-2] + '/'
                    cache_file = cache_path + '%i.traj' % im_index

                    data_traj = io.Trajectory(data_file, 'r')
                    cache_traj = io.Trajectory(cache_file, 'r')

                    data_im = data_traj[im_index]
                    cache_im = cache_traj[0]

                    if use_forces:
                        if remove_force_drift:
                            cancel_net_force(cache_im)
                            cancel_net_force(data_im)

                    image_pairs.append((cache_im, data_im, cache_path))

                    data_traj.close()
                    cache_traj.close()

    return image_pairs
コード例 #3
0
ファイル: minimahopping.py プロジェクト: HaoLiSky/TSASE
    def _molecular_dynamics(self, N, resume=None):
        """Performs a molecular dynamics simulation, until mdmin is
        exceeded. If resuming, the file number (md%05i) is expected."""
        self._log('msg', 'Molecular dynamics: md%05i' % self._counter)
        mincount = 0
        energies, oldpositions = [], []
        thermalized = False
        if resume:
            self._log('msg', 'Resuming MD from md%05i.traj' % resume)
	    if os.path.getsize('md.traj') == 0:
                self._log('msg', 'md%05i.traj is empty. Resuming from '
                          'qn%05i.traj.' % (resume, resume - 1))
                atoms = io.read('qn.traj', index=-1) 
	    else:
		images = io.Trajectory('md.traj' % resume, 'r')
                for atoms in images:
                    energies.append(atoms.get_potential_energy())
                    oldpositions.append(atoms.positions.copy())
                    passedmin = self._passedminimum(energies)
                    if passedmin:
                        mincount += 1
                self._atoms.set_momenta(atoms.get_momenta())
                thermalized = True
            self._atoms.positions = atoms.get_positions()
            self._log('msg', 'Starting MD with %i existing energies.' %
                      len(energies))
        if not thermalized:
            self.MaxwellBoltzmannDistribution(self._atoms,
					 N,
                                         temp=self._temperature * units.kB,
					 force_temp=True)
        if (self._counter > 1):
            os.remove('md.log')
	    os.remove('md.traj')
	traj = io.Trajectory('md.traj', 'a', self._atoms)
        dyn = VelocityVerlet(self._atoms, dt=self._timestep * units.fs)
        log = MDLogger(dyn, self._atoms, 'md.log',
                       header=True, stress=False, peratom=False)
        dyn.attach(log, interval=1)
        dyn.attach(traj, interval=1)
        while mincount < self._mdmin:
            dyn.run(1)
            energies.append(self._atoms.get_potential_energy())
            passedmin = self._passedminimum(energies)
            if passedmin:
                mincount += 1
            oldpositions.append(self._atoms.positions.copy())
        # Reset atoms to minimum point.
        self._atoms.positions = oldpositions[passedmin[0]]
コード例 #4
0
ファイル: minima_basin2.py プロジェクト: HaoLiSky/TSASE
 def _molecular_dynamics(self, step, N):
     """Performs a molecular dynamics simulation, until mdmin is
     exceeded. If resuming, the file number (md%05i) is expected."""
     mincount = 0
     energies, oldpositions = [], []
     thermalized = False
     if not thermalized:
         self.MaxwellBoltzmannDistribution(N,
                                      temp=self.temperature * kB,
                                      force_temp=True)
     traj = io.Trajectory('md.traj', 'a', self.atoms)
     dyn = VelocityVerlet(self.atoms, dt=self.timestep * units.fs)
     log = MDLogger(dyn, self.atoms, 'md.log',
                    header=True, stress=False, peratom=False)
     dyn.attach(log, interval=1)
     dyn.attach(traj, interval=1)
     os.remove('md.log')
     os.remove('md.traj')
     while mincount < self.mdmin:
         dyn.run(1)
         energies.append(self.atoms.get_potential_energy())
         passedmin = self.passedminimum(energies)
         if passedmin:
             mincount += 1
         oldpositions.append(self.atoms.positions.copy())
     # Reset atoms to minimum point.
     self.atoms.positions = oldpositions[passedmin[0]]
コード例 #5
0
def makePDFs(trajs, n_images, output):
    """Creates PDFs of each band in the trajectory files, assuming n_images
    images per band. Then concatenates them all into a single pdf called
    <output>.pdf and deletes the source files."""
    if n_images == -1:
        n_images = guess_nimages(ase.io.read(trajs[0], index=':'))
    count = 0
    filenames = []
    tmpd = mkdtemp()  # temporary directory
    for file in trajs:
        print('Analyzing: %s' % file)
        traj = io.Trajectory(file, 'r')
        assert (len(traj) % n_images) == 0
        steps = len(traj) // n_images
        for step in range(steps):
            filenames.append(os.path.join(tmpd, 'neb_plot%04i.pdf' % count))
            indices = range(step * n_images, step * n_images + n_images)
            print('%i/%i  %s: %s' % (step + 1, steps, filenames[-1], indices))
            images = [traj[index] for index in indices]
            fig = get_NEB_plot(images)
            fig.savefig(filenames[-1])
            pyplot.close(fig)  # garbage collection
            count += 1

    command = 'pdftk ' + ('%s ' * len(filenames)) + 'cat output %s.pdf'
    command = command % tuple(filenames + [output])
    print('Combining pdfs with pdftk to %s.' % output)
    os.system(command)

    shutil.rmtree(tmpd)
コード例 #6
0
def read_energy_radius_traj(file_name):

        traj = io.Trajectory(os.path.abspath(file_name),'r')
        #data = [ (im.get_volume()/len(im), im.get_potential_energy(force_consistent = True)/len(im)) for im in traj]
        data = [ (np.linalg.norm(im[0].position - im[1].position),
                im.get_potential_energy()/len(im)) for im in traj]
        traj.close()
        data = np.asarray(data).T
        smap = np.argsort(data[0])

        return np.array([data[0][smap],data[1][smap]] )
コード例 #7
0
def read_energy_volume_traj(file_name):
    '''Returns [V/atom,E/atom] sorted by volume/atom from traj file'''
    from ase import io
    traj = io.Trajectory(os.path.abspath(file_name), 'r')
    #data = [ (im.get_volume()/len(im), im.get_potential_energy(force_consistent = True)/len(im)) for im in traj]
    data = [(im.get_volume() / len(im), im.get_potential_energy() / len(im))
            for im in traj]
    traj.close()
    data = np.asarray(data).T
    smap = np.argsort(data[0])

    return np.array([data[0][smap], data[1][smap]])
コード例 #8
0
def hash_images(images, Gs, log=None, ordered=False):
    """ Converts input images -- which may be a list, a trajectory file, or
    a database -- into a dictionary indexed by their hashes.

    Returns this dictionary. If ordered is True, returns an OrderedDict. When
    duplicate images are encountered (based on encountering an identical hash),
    a warning is written to the logfile. The number of duplicates of each image
    can be accessed by examinging dict_images.metadata['duplicates'], where
    dict_images is the returned dictionary.
    """
    if log is None:
        log = Logger(None)
    if images is None:
        return
    elif hasattr(images, "keys"):
        log(" %i unique images after hashing." % len(images))
        return images  # Apparently already hashed.
    else:
        # Need to be hashed, and possibly read from file.
        if isinstance(images, str):
            log("Attempting to read images from file %s." % images)
            extension = os.path.splitext(images)[1]
            from ase import io

            if extension == ".traj":
                images = io.Trajectory(images, "r")
            elif extension == ".db":
                images = [
                    row.toatoms() for row in connect(images, "db").select(None)
                ]

        # images converted to dictionary form; key is hash of image.
        log("Hashing images...", tic="hash")
        dict_images = MetaDict()
        dict_images.metadata["duplicates"] = {}
        dup = dict_images.metadata["duplicates"]
        if ordered is True:
            from collections import OrderedDict

            dict_images = OrderedDict()
        for image in images:
            hash = get_hash(image, Gs)
            if hash in dict_images.keys():
                log("Warning: Duplicate image (based on identical hash)."
                    " Was this expected? Hash: %s" % hash)
                if hash in dup.keys():
                    dup[hash] += 1
                else:
                    dup[hash] = 2
            dict_images[hash] = image
        log(" %i unique images after hashing." % len(dict_images))
        log("...hashing completed.", toc="hash")
        return dict_images
コード例 #9
0
 def _read_minima(self):
     """Reads in the list of minima from the minima file."""
     exists = os.path.exists(self._minima_traj)
     if exists:
         empty = os.path.getsize(self._minima_traj) == 0
         if not empty:
             traj = io.Trajectory(self._minima_traj, 'r')
             self._minima = [atoms for atoms in traj]
         else:
             self._minima = []
         return True
     else:
         self._minima = []
         return False
コード例 #10
0
ファイル: completeexample.py プロジェクト: aglgit/amp
def test():

    # Generate atomic system to create test data.
    atoms = fcc110('Cu', (2, 2, 2), vacuum=7.)
    adsorbate = Atoms([
        Atom('H', atoms[7].position + (0., 0., 2.)),
        Atom('H', atoms[7].position + (0., 0., 5.))
    ])
    atoms.extend(adsorbate)
    atoms.set_constraint(FixAtoms(indices=[0, 2]))
    calc = EMT()  # cheap calculator
    atoms.set_calculator(calc)

    # Run some molecular dynamics to generate data.
    trajectory = io.Trajectory('data.traj', 'w', atoms=atoms)
    MaxwellBoltzmannDistribution(atoms, temp=300. * units.kB)
    dynamics = VelocityVerlet(atoms, dt=1. * units.fs)
    dynamics.attach(trajectory)
    for step in range(50):
        dynamics.run(5)
    trajectory.close()

    # Train the calculator.
    train_images, test_images = randomize_images('data.traj')

    calc = Amp(descriptor=Behler(), regression=NeuralNetwork())
    calc.train(train_images, energy_goal=0.001, force_goal=None)

    # Plot and test the predictions.
    import matplotlib
    matplotlib.use('Agg')
    from matplotlib import pyplot

    fig, ax = pyplot.subplots()

    for image in train_images:
        actual_energy = image.get_potential_energy()
        predicted_energy = calc.get_potential_energy(image)
        ax.plot(actual_energy, predicted_energy, 'b.')

    for image in test_images:
        actual_energy = image.get_potential_energy()
        predicted_energy = calc.get_potential_energy(image)
        ax.plot(actual_energy, predicted_energy, 'r.')

    ax.set_xlabel('Actual energy, eV')
    ax.set_ylabel('Amp energy, eV')

    fig.savefig('parityplot.png')
コード例 #11
0
ファイル: minimahopping.py プロジェクト: HaoLiSky/TSASE
 def _plot_qn(self, index, line):
     """Plots a dashed vertical line for the optimization."""
     if line[1] == 'performing MD':
         return
     file = os.path.join(self._rundirectory, 'qn%05i.traj' % index)
     if os.path.getsize(file) == 0:
         return
     traj = io.Trajectory(file, 'r')
     energies = [traj[0].get_potential_energy(),
                 traj[-1].get_potential_energy()]
     if index > 0:
         file = os.path.join(self._rundirectory, 'md%05i.traj' % index)
         atoms = io.read(file, index=-3)
         energies[0] = atoms.get_potential_energy()
     self._ax.plot([index + 0.25] * 2, energies, ':k')
コード例 #12
0
 def _plot_md(self, step, line):
     """Adds a curved plot of molecular dynamics trajectory."""
     if step == 0:
         return
     energies = [self._data[step - 1][0]]
     file = os.path.join(self._rundirectory, 'md%05i.traj' % step)
     traj = io.Trajectory(file, 'r')
     for atoms in traj:
         energies.append(atoms.get_potential_energy())
     xi = step - 1 + .5
     if len(energies) > 2:
         xf = xi + (step + 0.25 - xi) * len(energies) / (len(energies) - 2.)
     else:
         xf = step
     if xf > (step + .75):
         xf = step
     self._ax.plot(np.linspace(xi, xf, num=len(energies)), energies, '-k')
コード例 #13
0
ファイル: nvtb_md.py プロジェクト: LtRainZhang/amp-python
def generate_data(count, filename='cmd.traj'):
    """Generates test or training data with a simple MD simulation."""
    if os.path.exists(filename):
        return
    traj = io.Trajectory(filename, 'w')
    atoms = io.read('2.xyz')
    atoms.set_calculator(Amp.load('sio2.amp'))
    atoms.get_potential_energy()
    traj.write(atoms)
    mb(atoms, 300. * units.kB)
    #    dyn = vv(atoms, dt=1. * units.fs, logfile='cmd.log')
    dyn = NVTB(atoms,
               timestep=0.5 * units.fs,
               temperature=300,
               taut=2.0 * units.fs,
               logfile='cmd.log')
    for step in range(count - 1):
        dyn.run(1)
        traj.write(atoms)
コード例 #14
0
def randomize_images(images, fraction=0.8):
    """Randomly assigns 'fraction' of the images to a training set and (1
    - 'fraction') to a test set. Returns two lists of ASE images.

    Parameters
    ----------
    images : list or str
        List of ASE atoms objects in ASE format. This can also be the path to
        an ASE trajectory (.traj) or database (.db) file.
    fraction : float
        Portion of train_images to all images.

    Returns
    -------
    train_images, test_images : list
        Lists of train and test images.
    """
    file_opened = False
    if type(images) == str:
        extension = os.path.splitext(images)[1]
        if extension == '.traj':
            images = aseio.Trajectory(images, 'r')
        elif extension == '.db':
            images = aseio.read(images)
        file_opened = True

    trainingsize = int(fraction * len(images))
    testsize = len(images) - trainingsize
    testindices = []
    while len(testindices) < testsize:
        next = np.random.randint(len(images))
        if next not in testindices:
            testindices.append(next)
    testindices.sort()
    trainindices = [
        index for index in range(len(images)) if index not in testindices
    ]
    train_images = [images[index] for index in trainindices]
    test_images = [images[index] for index in testindices]
    if file_opened:
        images.close()
    return train_images, test_images
コード例 #15
0
def main(argv):
    tr = io.Trajectory(argv)
    return tr
コード例 #16
0
 def _record_minimum(self):
     """Adds the current atoms configuration to the minima list."""
     traj = io.Trajectory(self._minima_traj, 'a')
     traj.write(self._atoms)
     self._read_minima()
     self._log('msg', 'Recorded minima #%i.' % (len(self._minima) - 1))
コード例 #17
0
mpirun = spawn.find_executable('mpirun')
vasp = '/home/mmm0007/vasp/vasp.5.4.1/bin/vasp_std'

vasp_client = VaspClient(client_id=0,
                         npj=96,
                         ppn=1,
                         exe=vasp,
                         mpirun=mpirun,
                         parmode='mpi',
                         ibrion=13,
                         nsw=1000000,
                         npar=4,
                         **vasp_args)

if not args.no_relax:
    traj = io.Trajectory('relaxation.traj', 'a', gam_cell)
    qm_pot = SocketCalculator(vasp_client)
    gam_cell.set_calculator(qm_pot)
    opt = PreconLBFGS(gam_cell)
    opt.attach(traj.write, interval=1)
    opt.run(fmax=args.fmax)
    traj.close()
    qm_pot.shutdown()

#remove defect atom with symbol
del gam_cell[[atom.symbol == args.symbol for atom in gam_cell]]
defect_cell = gam_cell.copy()
defect_cell.write('no_impurity.xyz')

#Need accurate forces
vasp_args['ediff'] = 1e-5
コード例 #18
0
    while iframe >= 2000 and iframe % 2 == 0:
        print(nAtoms, file=xyz)
        print(
            'Lattice="%5.3f 0.0 0.0 %5.3f %5.3f 0.0 0.0 0.0 %5.3f" Properties=species:S:1:pos:R:3 energy=%15.8f pbc="T T T"'
            % (ax, bx, by, cz, float(out[iframe])),
            file=xyz)
        for i in range(nAtoms):
            tmp = lines[i + 1].split()
            u = float(tmp[0])
            v = float(tmp[1])
            w = float(tmp[2])
            frac_coor = np.array([[u], [v], [w]])
            cart_coor = (lattice_vector.T).dot(frac_coor)
            if i < num_ele1:
                print('%-4s %15.8f %15.8f %15.8f' %
                      (ele1, cart_coor[0], cart_coor[1], cart_coor[2]),
                      file=xyz)
            else:
                print('%-4s %15.8f %15.8f %15.8f' %
                      (ele2, cart_coor[0], cart_coor[1], cart_coor[2]),
                      file=xyz)
        break
xyz.close()

aseTrj = io.Trajectory(dirName + aseTrajFileName, 'w')
exyz = io.read(dirName + exyzFileName, index=':')
for atoms in exyz:
    atoms.set_pbc((1, 1, 1))
    aseTrj.write(atoms)
コード例 #19
0
ファイル: super_cell.py プロジェクト: MTD-group/amlt
        return atoms
    else:
        if verbose:
            print("Stucture expanded to %ix%ix%i" % tuple(cells_needed) +
                  " super cell to fit rcut = %f" % rcut)
        return super_cell(
            atoms,
            cells_needed,
            use_initial_magnetic_moments=use_initial_magnetic_moments)


if __name__ == "__main__":

    from ase import io

    traj = io.Trajectory('test_structures/AFM_Cr.traj', 'r')
    Cr_atoms = traj[0]
    traj.close()

    ZrO2_atoms = io.read('test_structures/ZrO2.OUTCAR')

    #tlins_atoms = io.read('test_structures/TlInS2_mp-632539_primitive.cif')

    tests = [Cr_atoms, ZrO2_atoms]

    for atoms in tests:
        super_cell(atoms, [1, 1, 2])

        #calc = atoms.get_calculator()
        #print(calc)
        #if calc != None:
コード例 #20
0
    """
    Imports a pdb
    """
    filen = open(file_name, 'r')
    orig = np.identity(3)
    trans = np.zeros(3)

    for line in filen.readlines():
        if line.startswith('ATOM') or line.startswith('HETATM'):
            try:
                # Only dealing with H, C, O and N so single letter for symbol
                symbol = line[13].strip().lower().capitalize()
                words = line[30:55].split()
                position = np.array(
                    [float(words[0]),
                     float(words[1]),
                     float(words[2])])
                position = np.dot(orig, position) + trans
                atoms.append(Atom(symbol, position))
            except:
                pass
    return (atom_col)


traj = io.Trajectory(filename='./common/kr_geoms.traj', mode='w')

for i in range(1, 71):
    atoms = Atoms()
    atoms = import_pdb(atoms, '../common/{}-as.pdb'.format(i))
    traj.write(atoms=atoms)
コード例 #21
0
from ase import io
from ase.neb import NEB
from ase.optimize import MDMin, BFGS
from ase.calculators.dftb import Dftb 
import copy

calc = Dftb(label='neb_path1',
Hamiltonian_MaxAngularMomentum_='',
Hamiltonian_MaxAngularMomentum_O='"p"',
Hamiltonian_MaxAngularMomentum_H='"s"',
Hamiltonian_MaxAngularMomentum_C='"p"',
Hamiltonian_MaxAngularMomentum_N='"p"')

all = io.Trajectory('../common/kr_geoms.traj')
initial = all[0]
final = all[6]
#initial.set_calculator(calc=calc)
#final.set_calculator(calc=calc)

#print(len(all))
#print(initial.get_positions())
#print(final.get_positions())
#images = [all[0]]
#for i in range(1,7):
#    images += [all[i]]

images = [initial]
images += [copy.deepcopy(initial) for i in range(3)]
images += [final]

neb = NEB(images, k=0.01)
コード例 #22
0
ファイル: discon.py プロジェクト: JaniceLC/aseplayground
args = parser.parse_args()
from os import listdir


def list_of_files(dir_name, suffix):
    return [f for f in listdir(dir_name) if f.endswith('.' + suffix)]


args = parser.parse_args()
ftraj = list_of_files(args.lm, 'traj')
minima = []
for i in range(len(ftraj)):
    empty = os.path.getsize(args.lm + "/" + ftraj[i]) == 0
    if not empty:
        traj = io.Trajectory(args.lm + "/" + ftraj[i], 'r')
        mini = [atoms for atoms in traj]
        mini = [mini[-1:]]
    else:
        print('no minima founded in ', ftraj[i])
        mini = []
    minima = minima + mini

nminima = len(minima)

# load minima

nminima = len(minima)
print('# minima: ', nminima)
sep = math.floor(nminima / args.nmin)
minima = minima[-1::-sep]
コード例 #23
0
ファイル: utilities.py プロジェクト: ssrokyz/clone-of-0.6.1
def hash_images(images, log=None, ordered=False, list_seq=False):
    """ Converts input images -- which may be a list, a trajectory file, or
    a database -- into a dictionary indexed by their hashes.

    Returns this dictionary. If ordered is True, returns an OrderedDict. When
    duplicate images are encountered (based on encountering an identical hash),
    a warning is written to the logfile. The number of duplicates of each image
    can be accessed by examinging dict_images.metadata['duplicates'], where
    dict_images is the returned dictionary.
    """
    if log is None:
        log = Logger(None)
    if images is None:
        return
    ############# images alread have keys
    elif hasattr(images, 'keys'):
        log(' %i unique images after hashing.' % len(images))
        return images  # Apparently already hashed.
    ############ else
    else:
        ########## Need to be hashed, and possibly read from file.
        if isinstance(images, str):
            log('Attempting to read images from file %s.' % images)
            extension = os.path.splitext(images)[1]
            from ase import io
            if extension == '.traj':
                images = io.Trajectory(images, 'r')
            elif extension == '.db':
                images = [
                    row.toatoms() for row in connect(images, 'db').select(None)
                ]

        ####### images converted to dictionary form;
        #########    key is hash of image.
        ####### i.e. hash is keys for images
        log('Hashing images...', tic='hash')
        dict_images = MetaDict()
        dict_images.metadata['duplicates'] = {}
        dup = dict_images.metadata['duplicates']
        if ordered is True:
            from collections import OrderedDict
            dict_images = OrderedDict()
        if list_seq:
            seq_list = []

        for image in images:
            ########### get_hash make hash for an atom object
            hash = get_hash(image)
            if list_seq:
                seq_list.append(hash)
            if hash in dict_images.keys():
                log('Warning: Duplicate image (based on identical hash).'
                    ' Was this expected? Hash: %s' % hash)
                if hash in dup.keys():
                    dup[hash] += 1
                else:
                    dup[hash] = 2
            dict_images[hash] = image
        log(' %i unique images after hashing.' % len(dict_images))
        log('...hashing completed.', toc='hash')
        if list_seq:
            return dict_images, seq_list
        else:
            return dict_images
コード例 #24
0
ファイル: utils.py プロジェクト: MTD-group/amlt
def get_traj_file_list(basedirs=[''],
                       traj_skip=1,
                       traj_skip_offset=0,
                       traj_offset=0,
                       struct_types=['random', 'known', 'polymorphD3'],
                       dyn_types=['md', 'relax', 'sp', 'ce', 'dimer'],
                       return_image_and_atom_counts=False):

    from ase import io
    from glob import glob
    import time
    import os
    import numpy as np

    def composition_str(sorted_elements, counts):
        comp_format = '( '
        for el, cnt in zip(sorted_elements, counts):
            comp_format = comp_format + el + '_%i ' % cnt
        comp_format = comp_format + ')'

        return comp_format

    #def remove_force_drift(atoms):
    #    forces = atoms.calc.results['forces']
    #    drift = np.sum(forces,  axis = 0)/len(atoms)
    #    atoms.calc.results['forces'] = forces-drift
    #    #return drift

    element_set = set()

    traj_file_list = []
    image_and_atom_counts = []
    total = 0
    total_atoms = 0
    time1 = time.time()
    for basedir in basedirs:
        for struct_type in struct_types:
            for dyn_type in dyn_types:
                top_direct = os.path.abspath(
                    basedir) + ('/%s_%s/') % (struct_type, dyn_type)
                #print(top_direct)
                if os.path.isdir(top_direct):
                    print(top_direct)
                    sub_total = 0
                    sub_dirs = sorted(glob(top_direct + '*/'))
                    #file_list.sort()
                    sub_dirs.sort(key=lambda x: len(x))
                    for sub_dir in sub_dirs:
                        name = sub_dir.split('/')[-2]
                        if name.isdigit():

                            if int(name) >= traj_offset and int(
                                    name) % traj_skip == traj_skip_offset:
                                traj = io.Trajectory(filename=sub_dir +
                                                     'images.traj',
                                                     mode='r')  #trying a read
                                traj_file_list.append(sub_dir + 'images.traj')

                                if True:  #we'll need a conditional here related to un needed parsing for making this general
                                    print(sub_dir.ljust(22), end='')
                                    # for printing the compositions
                                    symbols = traj[0].get_chemical_symbols()
                                    element_set.update(symbols)
                                    sorted_elements = sorted(list(element_set))
                                    comp = [
                                        symbols.count(el)
                                        for el in sorted_elements
                                    ]
                                    comp_format = composition_str(
                                        sorted_elements, comp)

                                    image_and_atom_counts.append(
                                        (len(traj), len(traj[0])))

                                    subsub_total = len(traj)
                                    total_atoms += len(traj) * len(traj[0])
                                    print(  (' atoms %i'%len(traj[0])).ljust(12) + \
                                            ('images found %i '%subsub_total).ljust(24) + \
                                            comp_format )

                                sub_total += len(traj)
                                traj.close()
                    # subtotal for this struct+dyn_type
                    print('sub_total: %i \n' % sub_total)
                    total += sub_total

    print('Total Number of Images:', total)
    print('Total Atoms: %i' % total_atoms)
    print('Time for file parsing is: {:.3f} sec.'.format(time.time() - time1))

    if return_image_and_atom_counts:
        return traj_file_list, np.array(image_and_atom_counts)
    else:
        return traj_file_list
コード例 #25
0
ファイル: utils.py プロジェクト: MTD-group/amlt
def get_image_list(basedirs=[''],
                   image_skip=2,
                   image_skip_offset=0,
                   traj_skip=1,
                   traj_skip_offset=0,
                   image_offset=0,
                   traj_offset=0,
                   struct_types=['random', 'known', 'polymorphD3'],
                   dyn_types=['md', 'relax', 'sp', 'ce', 'dimer'],
                   max_energy_per_atom=None,
                   max_force_on_atom=None,
                   max_energy_deviation_per_atom=None,
                   remove_force_drift_in_training_data=True,
                   return_file_paths=False):

    from ase import io
    from glob import glob
    import time
    import os
    import numpy as np

    def composition_str(sorted_elements, counts):
        comp_format = '( '
        for el, cnt in zip(sorted_elements, counts):
            comp_format = comp_format + el + '_%i ' % cnt
        comp_format = comp_format + ')'

        return comp_format

    def remove_force_drift(atoms):
        forces = atoms.calc.results['forces']
        drift = np.sum(forces, axis=0) / len(atoms)
        atoms.calc.results['forces'] = forces - drift
        #return drift

    element_set = set()

    image_list = []
    file_path_list = []
    total = 0
    time1 = time.time()
    for basedir in basedirs:
        for struct_type in struct_types:
            for dyn_type in dyn_types:
                top_direct = os.path.abspath(
                    basedir) + ('/%s_%s/') % (struct_type, dyn_type)
                #if dyn_type == 'ce':

                #print(top_direct)
                if os.path.isdir(top_direct):
                    print(top_direct)
                    sub_total = 0
                    sub_dirs = sorted(glob(top_direct + '*/'))
                    #file_list.sort()
                    sub_dirs.sort(key=lambda x: len(x))
                    for sub_dir in sub_dirs:
                        name = sub_dir.split('/')[-2]
                        if name.isdigit():

                            if int(name) >= traj_offset and int(
                                    name) % traj_skip == traj_skip_offset:
                                traj = io.Trajectory(filename=sub_dir +
                                                     'images.traj',
                                                     mode='r')
                                subsub_total = 0

                                if dyn_type == 'ce':
                                    ce_data = np.loadtxt(sub_dir + 'ce.log',
                                                         skiprows=2).T
                                    ce_endev = ce_data[-1]

                                print(sub_dir.ljust(22), end='')
                                # for printing the compositions
                                symbols = traj[0].get_chemical_symbols()
                                element_set.update(symbols)
                                sorted_elements = sorted(list(element_set))
                                comp = [
                                    symbols.count(el) for el in sorted_elements
                                ]
                                comp_format = composition_str(
                                    sorted_elements, comp)

                                for image_index in range(len(traj)):
                                    image = traj[image_index]

                                    #if image.get_potential_energy()/len(image) <= max_energy_per_atom:

                                    training_image = False

                                    if struct_type == 'known':
                                        training_image = True
                                    #if (int(name)%trajskip) == trajskip_offset:
                                    if image_index >= image_offset and (
                                        (image_index) %
                                            image_skip) == image_skip_offset:
                                        training_image = True

                                    if training_image:
                                        ## now we test for unreasonably high energies/forces
                                        ## if thought we'd include the structure
                                        if max_energy_per_atom is not None:
                                            energy_per_atom = image.get_potential_energy(
                                            ) / len(image)
                                            if energy_per_atom > max_energy_per_atom:
                                                training_image = False
                                                print('image', image_index,
                                                      'Energy too high:',
                                                      energy_per_atom)

                                        if max_force_on_atom is not None:
                                            max_force = np.linalg.norm(
                                                image.get_forces(),
                                                axis=1).max()
                                            if max_force > max_force_on_atom:
                                                training_image = False
                                                print('image', image_index,
                                                      'Max Force too high:',
                                                      max_force)
                                        if dyn_type == 'ce' and max_energy_deviation_per_atom is not None:
                                            if abs(
                                                    ce_endev[image_index]
                                            ) > max_energy_deviation_per_atom:
                                                training_image = False
                                                print(
                                                    'image', image_index,
                                                    'CE energy deviation too high:',
                                                    ce_endev[image_index])
                                    #if (int(name) in bad_polymorphs) and struct_type == 'polymorphD3' :
                                    #    training_image = False

                                    if training_image:
                                        #print ('adding image no', image_index)
                                        if remove_force_drift_in_training_data:
                                            remove_force_drift(image)
                                        image_list.append(image)
                                        file_path_list.append([
                                            image_index, struct_type, dyn_type,
                                            sub_dir + 'images.traj'
                                        ])
                                        subsub_total += 1
                                        sub_total += 1
                                        total += 1

                                print( (' atoms %i'%len(traj[0])).ljust(12) +\
                                        ('images loaded %i/%i '%(subsub_total, len(traj))).ljust(24) +comp_format )

                                traj.close()
                    # subtotal for this struct+dyn_type
                    print('sub_total: %i \n' % sub_total)

    sum_total_atoms = 0
    for image in image_list:
        sum_total_atoms += len(image)

    print('Total Number of Images:', total)
    print('Total Atoms: %i' % sum_total_atoms)
    print('Time for file parsing is: {:.3f} sec.'.format(time.time() - time1))

    if return_file_paths:
        return image_list, file_path_list
    else:
        return image_list
コード例 #26
0
    },  #convergence parameters
    dipole={'status':
            True},  #dipole correction to account for periodicity in z
    output={
        'avoidio': False,
        'removewf': True,
        'wf_collect': False
    },
    spinpol=False,
    parflags='-npool 2',
    outdir='calcdir')  #output directory for Quantum Espresso files

#calc.calculation_required = lambda x, y: True
slab.set_calculator(calc)
slab.get_potential_energy()
traj = io.Trajectory('opt.traj', 'w')
traj.write(slab)
#qn=QuasiNewton(slab,trajectory='opt.traj',logfile='opt.log')
#qn.run(fmax=0.03)
calc.save_flev_chg('efchg.tgz')
potential = calc.extract_total_potential()

potential_file = open('potential.pickle', 'w')
pickle.dump(potential, potential_file)
potential_file.close()

posin = io.read('opt.traj')
p = posin.copy()
p.calc = calc
p.calc.load_flev_chg('efchg.tgz')
dos = calc.calc_pdos(nscf=True,
コード例 #27
0
image_list = []
total = 0
time1 = time.time()
for struct_type in struct_types:
    for dyn_type in dyn_types:
        top_direct = ('%s_%s/') % (struct_type, dyn_type)
        if path.isdir(top_direct):
            print(top_direct)
            sub_total = 0
            sub_dirs = sorted(glob(top_direct + '*/'))
            for sub_dir in sub_dirs:
                name = sub_dir.split('/')[-2]
                if name.isdigit():

                    traj = io.Trajectory(filename=sub_dir + 'images.traj',
                                         mode='r')

                    image_index = -1
                    image = traj[image_index]

                    verbose = False

                    #                    im = super_cell_if_needed(image, cut_off_radius, verbose=verbose)
                    im = image

                    print(
                        sub_dir.ljust(22) +
                        ('atoms %i' % len(traj[0])).ljust(12) +
                        'images loaded %i/%i' % (1, len(traj)))
                    DFT_e = image.get_potential_energy() / len(image)
                    NN_e = MLIP.get_potential_energy(im) / len(im)
コード例 #28
0
def plot_error(load,
               images,
               plot_forces=True,
               plotfile='errorplot.pdf',
               color='b.',
               overwrite=False):
    """
    Makes a plot of deviations in per atom energies and forces versus real
    energies and forces.

    :param load: Path for loading an existing parameters of Amp calculator.
    :type load: str
    :param images: List of ASE atoms objects with positions, symbols, energies,
                   and forces in ASE format. This is the training set of data.
                   This can also be the path to an ASE trajectory (.traj) or
                   database (.db) file. Energies can be obtained from any
                   reference, e.g. DFT calculations.
    :type images: list or str
    :param plot_forces: Determines whether or not forces should be plotted as
                        well.
    :type plot_forces: bool
    :param plotfile: File for plots.
    :type plotfile: Object
    :param color: Plot color.
    :type color: str
    :param overwrite: If a plot or an script containing values found overwrite
                      it.
    :type overwrite: bool
    """
    base_filename = os.path.splitext(plotfile)[0]
    energyscript = os.path.join('energy-' + base_filename + '.json')

    if (not overwrite) and os.path.exists(plotfile):
        raise IOError('File exists: %s.\nIf you want to overwrite,'
                      ' set overwrite=True or manually delete.' % plotfile)

    if plot_forces is not None:
        forcescript = os.path.join('force-' + base_filename + '.json')

    from . import Amp
    from utilities import hash_image
    from matplotlib import rc
    # activate latex text rendering
    rc('text', usetex=True)

    calc = Amp(load=load)

    if isinstance(images, str):
        extension = os.path.splitext(images)[1]
        if extension == '.traj':
            images = io.Trajectory(images, 'r')
        elif extension == '.db':
            images = io.read(images)

    # Images is converted to dictionary form; key is hash of image.
    dict_images = {}
    for image in images:
        hash = hash_image(image)
        dict_images[hash] = image
    images = dict_images.copy()
    del dict_images
    hashs = sorted(images.keys())
    no_of_images = len(hashs)

    energy_data = {}
    # Reading energy script
    try:
        fp = paropen(energyscript, 'rb')
        data = json.load(fp)
    except IOError:
        pass
    else:
        for hash in data.keys():
            energy_data[hash] = data[hash]

    # calculating errors for images if json is not found
    if len(energy_data.keys()) == 0:
        count = 0
        while count < no_of_images:
            hash = hashs[count]
            atoms = images[hash]
            no_of_atoms = len(atoms)
            act_energy = atoms.get_potential_energy(apply_constraint=False)
            amp_energy = calc.get_potential_energy(atoms)
            energy_error = abs(amp_energy - act_energy) / no_of_atoms
            act_energy_per_atom = act_energy / no_of_atoms
            energy_data[hash] = [act_energy_per_atom, energy_error]
            count += 1
        # saving energy script
        try:
            json.dump(energy_data, energyscript)
            energyscript.flush()
            return
        except AttributeError:
            with paropen(energyscript, 'wb') as outfile:
                json.dump(energy_data, outfile)

    # calculating energy per atom rmse
    energy_square_error = 0.
    count = 0
    while count < no_of_images:
        hash = hashs[count]
        energy_square_error += energy_data[hash][1]**2.
        count += 1
    del hash

    energy_per_atom_rmse = np.sqrt(energy_square_error / no_of_images)

    min_act_energy = min([energy_data[hash][0] for hash in hashs])
    max_act_energy = max([energy_data[hash][0] for hash in hashs])

    if plot_forces is None:
        fig = pyplot.figure(figsize=(5., 5.))
        ax = fig.add_subplot(111)
    else:
        fig = pyplot.figure(figsize=(5., 10.))
        ax = fig.add_subplot(211)

    # energy plot
    count = 0
    while count < no_of_images:
        hash = hashs[count]
        ax.plot(energy_data[hash][0], energy_data[hash][1], color)
        count += 1
    # draw horizontal line for rmse
    ax.plot(
        [min_act_energy, max_act_energy],
        [energy_per_atom_rmse, energy_per_atom_rmse],
        color='black',
        linestyle='dashed',
        lw=1,
    )
    ax.text(max_act_energy,
            energy_per_atom_rmse,
            'energy rmse = %6.5f' % energy_per_atom_rmse,
            ha='right',
            va='bottom',
            color='black')

    ax.set_xlabel(r"\textit{ab initio} energy (eV) per atom")
    ax.set_ylabel(
        r"$|$\textit{ab initio} energy - \textit{Amp} energy$|$ / number of atoms"
    )
    ax.set_title("Energies")

    if plot_forces:

        force_data = {}
        # Reading force script
        try:
            fp = paropen(forcescript, 'rb')
            data = json.load(fp)
        except IOError:
            pass
        else:
            hashs = data.keys()
            no_of_images = len(hashs)
            count0 = 0
            while count0 < no_of_images:
                hash = hashs[count0]
                force_data[hash] = {}
                indices = data[hash].keys()
                len_of_indices = len(indices)
                count1 = 0
                while count1 < len_of_indices:
                    index = indices[count1]
                    force_data[hash][int(index)] = {}
                    ks = data[hash][index].keys()
                    len_of_ks = len(ks)
                    count2 = 0
                    while count2 < len_of_ks:
                        k = ks[count2]
                        force_data[hash][int(index)][int(k)] = \
                            data[hash][index][k]
                        count2 += 1
                    count1 += 1
                count0 += 1

        # calculating errors for images if json is not found
        if len(force_data.keys()) == 0:
            count = 0
            while count < no_of_images:
                hash = hashs[count]
                atoms = images[hash]
                no_of_atoms = len(atoms)
                force_data[hash] = {}
                act_force = atoms.get_forces(apply_constraint=False)
                atoms.set_calculator(calc)
                amp_force = calc.get_forces(atoms)
                index = 0
                while index < no_of_atoms:
                    force_data[hash][index] = {}
                    k = 0
                    while k < 3:
                        force_data[hash][index][k] = \
                            [act_force[index][k],
                             abs(amp_force[index][k] - act_force[index][k])]
                        k += 1
                    index += 1
                count += 1

            # saving force script
            try:
                json.dump(force_data, forcescript)
                forcescript.flush()
                return
            except AttributeError:
                with paropen(forcescript, 'wb') as outfile:
                    json.dump(force_data, outfile)

        # calculating force rmse
        force_square_error = 0.
        count = 0
        while count < no_of_images:
            hash = hashs[count]
            atoms = images[hash]
            no_of_atoms = len(atoms)
            index = 0
            while index < no_of_atoms:
                k = 0
                while k < 3:
                    force_square_error += \
                        ((1.0 / 3.0) * force_data[hash][index][k][1] ** 2.) / \
                        no_of_atoms
                    k += 1
                index += 1
            count += 1
        del hash, index, k

        force_rmse = np.sqrt(force_square_error / no_of_images)

        min_act_force = min([
            force_data[hash][index][k][0] for hash in hashs
            for index in range(len(images[hash])) for k in range(3)
        ])
        max_act_force = max([
            force_data[hash][index][k][0] for hash in hashs
            for index in range(len(images[hash])) for k in range(3)
        ])

        ##############################################################
        # force plot
        ax = fig.add_subplot(212)

        count = 0
        while count < no_of_images:
            hash = hashs[count]
            atoms = images[hash]
            no_of_atoms = len(atoms)
            index = 0
            while index < no_of_atoms:
                k = 0
                while k < 3:
                    ax.plot(force_data[hash][index][k][0],
                            force_data[hash][index][k][1], color)
                    k += 1
                index += 1
            count += 1
        # draw horizontal line for rmse
        ax.plot(
            [min_act_force, max_act_force],
            [force_rmse, force_rmse],
            color='black',
            linestyle='dashed',
            lw=1,
        )
        ax.text(
            max_act_force,
            force_rmse,
            'force rmse = %5.4f' % force_rmse,
            ha='right',
            va='bottom',
            color='black',
        )

        ax.set_xlabel(r"\textit{ab initio} force, eV/\AA")
        ax.set_ylabel(r"$|$\textit{ab initio} force - \textit{Amp} force$|$")
        ax.set_title(r"Forces")

        ##############################################################

    fig.savefig(plotfile)
コード例 #29
0
total = 0
#time1 = time.time()
for struct_type in struct_types:
    for dyn_type in dyn_types:
        top_direct = ('%s_%s/') % (struct_type, dyn_type)
        if os.path.isdir(top_direct):
            print(top_direct)
            sub_total = 0
            sub_dirs = sorted(glob(top_direct + '*/'))
            for sub_dir in sub_dirs:
                name = sub_dir.split('/')[-2]
                if name.isdigit():
                    #print()
                    traj = io.Trajectory(filename=sub_dir +
                                         'images_supercell.traj',
                                         mode='r')
                    subsub_total = 0
                    #for image in traj:
                    for image_index in range(len(traj)):
                        image = traj[image_index]
                        if int(name) % 2 == 0:
                            #if image_index >= 0 and image_index%1 == 0:
                            train_images.append(image)
                        elif struct_type == 'known':
                            train_images.append(image)
                        else:
                            test_images.append(image)
                        subsub_total += 1

                    print(
コード例 #30
0
ファイル: Na2_relax_excited.py プロジェクト: thonmaker/gpaw
width = 0.01  # Fermi width
nbands = 6  # bands in GS calculation
nconv = 4  # bands in GS calculation to converge
R = 2.99  # starting distance
iex = 1  # excited state index
d = 0.01  # step for numerical force evaluation
exc = 'LDA'  # xc for the linear response TDDFT kernel

s = Cluster([Atom('Na'), Atom('Na', [0, 0, R])])
s.minimal_box(box, h=h)

c = GPAW(h=h,
         nbands=nbands,
         eigensolver='cg',
         occupations=FermiDirac(width=width),
         setups={'Na': '1'},
         convergence={'bands': nconv})
c.calculate(s)
lr = LrTDDFT(c, xc=exc, eps=0.1, jend=nconv - 1)

ex = ExcitedState(lr, iex, d=d)
s.set_calculator(ex)

ftraj = 'relax_ex' + str(iex)
ftraj += '_box' + str(box) + '_h' + str(h)
ftraj += '_d' + str(d) + '.traj'
traj = io.Trajectory(ftraj, 'w', s)
dyn = optimize.FIRE(s)
dyn.attach(traj.write)
dyn.run(fmax=0.05)