Exemple #1
0
def write_xyz(filename, obj, name='pwtools_dummy_mol_name'):
    """Write VMD-style [VMD] XYZ file.
    
    length: Angstrom
    
    Parameters
    ----------
    filename : target file name
    obj : Trajectory or Structure
    name : str, optional
        Molecule name.

    References
    ----------
    [VMD] http://www.ks.uiuc.edu/Research/vmd/plugins/molfile/xyzplugin.html
    """
    traj = crys.struct2traj(obj)
    xyz_str = ""
    for istep in range(traj.nstep):
        xyz_str += "%i\n%s\n%s" %(traj.natoms,
                                  name + '.%i' %(istep + 1),
                                  pwscf.atpos_str_fast(traj.symbols, 
                                                       traj.coords[istep,...]),
                                  )
    common.file_write(filename, xyz_str)
Exemple #2
0
    def write_input(self, atoms, properties=None, system_changes=None):
        FileIOCalculator.write_input(self, atoms, properties, system_changes)
        struct = crys.atoms2struct(atoms)
        self.cell = common.str_arr(struct.cell)
        self.kpoints = pwscf.kpoints_str_pwin(kpts2mp(atoms, self.kpts))
        if isinstance(self.pp, bytes):
            pseudos = [
                "%s.%s" % (sym, self.pp) for sym in struct.symbols_unique
            ]
        else:
            assert len(self.pp) == struct.ntypat
            pseudos = []
            for sym in struct.symbols_unique:
                for ppi in self.pp:
                    if ppi.startswith(sym):
                        pseudos.append(ppi)
                        break
        assert len(pseudos) == struct.ntypat
        self.atspec = pwscf.atspec_str(symbols=struct.symbols_unique,
                                       masses=struct.mass_unique,
                                       pseudos=pseudos)
        self.atpos = pwscf.atpos_str(symbols=struct.symbols,
                                     coords=struct.coords_frac)
        self.natoms = struct.natoms
        self.ntyp = struct.ntypat

        if self.backup:
            for fn in [self.infile, self.outfile]:
                if os.path.exists(fn):
                    common.backup(fn)
        common.file_write(self.infile, self.fill_infile_templ())
Exemple #3
0
def write_xyz(filename, obj, name='pwtools_dummy_mol_name'):
    """Write VMD-style [VMD] XYZ file.
    
    length: Angstrom
    
    Parameters
    ----------
    filename : target file name
    obj : Trajectory or Structure
    name : str, optional
        Molecule name.

    References
    ----------
    [VMD] http://www.ks.uiuc.edu/Research/vmd/plugins/molfile/xyzplugin.html
    """
    traj = crys.struct2traj(obj)
    xyz_str = ""
    for istep in range(traj.nstep):
        xyz_str += "%i\n%s\n%s" % (
            traj.natoms,
            name + '.%i' % (istep + 1),
            pwscf.atpos_str_fast(traj.symbols, traj.coords[istep, ...]),
        )
    common.file_write(filename, xyz_str)
Exemple #4
0
    def _fit(self):
        # volume[Bohr^3] etot[Ha] for eos.x
        volume = self.volume*(Ang**3.0 / Bohr**3.0)
        energy = self.energy*(eV / Ha)
        data = np.array([volume, energy]).T
        infn_txt =\
        """
%s
%i
%i
%f,  %f,  %i
%i
%s
        """%(self.name, 
             self.natoms, 
             self.etype, 
             volume[0], volume[-1], self.npoints,
             len(volume), 
             common.str_arr(data))
        common.file_write(self.infn, infn_txt)
        out = common.backtick('cd %s && %s' %(self.dir, self.app_basename))
        if self.verbose:
            print out
            print(open(os.path.join(self.dir,'PARAM.OUT')).read())
        # Remove normalization on natoms. See .../eos/output.f90:
        # fitev: [volume [Bohr^3] / natoms, energy [Ha] / natoms]
        # fitpv: [volume [Bohr^3] / natoms, pressure [GPa]]
        fitev = np.loadtxt(os.path.join(self.dir,'EVPAI.OUT')) * self.natoms
        # convert energy back to [Ang^3, eV]
        fitev[:,0] *= (Bohr**3 / Ang**3)
        fitev[:,1] *= (Ha / eV)
        self.ev = fitev
        fitpv = np.loadtxt(os.path.join(self.dir,'PVPAI.OUT'))
        fitpv[:,0] *= (self.natoms * Bohr**3 / Ang**3)
        self.pv = fitpv
Exemple #5
0
 def write_input(self, atoms, properties=None, system_changes=None):
     FileIOCalculator.write_input(self, atoms, properties,
                                  system_changes)
     struct = crys.atoms2struct(atoms)
     self.cell = common.str_arr(struct.cell)
     self.kpoints = pwscf.kpoints_str_pwin(kpts2mp(atoms, self.kpts))
     if isinstance(self.pp, types.StringType):
         pseudos = ["%s.%s" %(sym, self.pp) for sym in struct.symbols_unique]
     else: 
         assert len(self.pp) == struct.ntypat
         pseudos = []
         for sym in struct.symbols_unique:
             for ppi in self.pp:
                 if ppi.startswith(sym):
                     pseudos.append(ppi)
                     break
     assert len(pseudos) == struct.ntypat
     self.atspec = pwscf.atspec_str(symbols=struct.symbols_unique,
                                    masses=struct.mass_unique,
                                    pseudos=pseudos)
     self.atpos = pwscf.atpos_str(symbols=struct.symbols,
                                  coords=struct.coords_frac)
     self.natoms = struct.natoms
     self.ntyp = struct.ntypat
     
     if self.backup:
         for fn in [self.infile, self.outfile]:
             if os.path.exists(fn):
                 common.backup(fn)
     common.file_write(self.infile, self.fill_infile_templ())
Exemple #6
0
    def _fit(self):
        # volume[Bohr^3] etot[Ha] for eos.x
        volume = self.volume * (Ang**3.0 / Bohr**3.0)
        energy = self.energy * (eV / Ha)
        data = np.array([volume, energy]).T
        infn_txt =\
        """
%s
%i
%i
%f,  %f,  %i
%i
%s
        """%(self.name,
             self.natoms,
             self.etype,
             volume[0], volume[-1], self.npoints,
             len(volume),
             common.str_arr(data))
        common.file_write(self.infn, infn_txt)
        out = common.backtick('cd %s && %s' % (self.dir, self.app_basename))
        if self.verbose:
            print(out)
            print((open(os.path.join(self.dir, 'PARAM.OUT')).read()))
        # Remove normalization on natoms. See .../eos/output.f90:
        # fitev: [volume [Bohr^3] / natoms, energy [Ha] / natoms]
        # fitpv: [volume [Bohr^3] / natoms, pressure [GPa]]
        fitev = np.loadtxt(os.path.join(self.dir, 'EVPAI.OUT')) * self.natoms
        # convert energy back to [Ang^3, eV]
        fitev[:, 0] *= (Bohr**3 / Ang**3)
        fitev[:, 1] *= (Ha / eV)
        self.ev = fitev
        fitpv = np.loadtxt(os.path.join(self.dir, 'PVPAI.OUT'))
        fitpv[:, 0] *= (self.natoms * Bohr**3 / Ang**3)
        self.pv = fitpv
Exemple #7
0
def test_is_seq():
    fn = os.path.join(testdir, 'is_seq_test_file')
    file_write(fn, 'lala')
    fd = open(fn , 'r')
    for xx in ([1,2,3], (1,2,3), np.array([1,2,3])):
        print(type(xx))
        assert is_seq(xx) is True
    for xx in ('aaa', u'aaa', fd):
        print(type(xx))
        assert is_seq(xx) is False 
    fd.close()
Exemple #8
0
def test_is_seq():
    fn = os.path.join(testdir, 'is_seq_test_file')
    file_write(fn, 'lala')
    fd = open(fn, 'r')
    for xx in ([1, 2, 3], (1, 2, 3), np.array([1, 2, 3])):
        print(type(xx))
        assert is_seq(xx) is True
    for xx in ('aaa', fd):
        print(type(xx))
        assert is_seq(xx) is False
    fd.close()
Exemple #9
0
 def write_input(self, atoms, properties=None, system_changes=None):
     FileIOCalculator.write_input(self, atoms, properties,
                                  system_changes)
     if self.backup:
         for fn in [self.infile, self.outfile, self.dumpfile,
                    self.structfile, self.logfile]:
             if os.path.exists(fn):
                 common.backup(fn)
     common.file_write(self.infile, self.fill_infile_templ())
     io.write_lammps(self.structfile, 
                     crys.atoms2struct(atoms), 
                     symbolsbasename=os.path.basename(self.structfile) + \
                         '.symbols')
Exemple #10
0
def test_placeholders():
    templ_dir = pj(testdir, 'calc.templ')
    templ_fn = pj(templ_dir, 'foo.in')
    tgt_dir = pj(testdir, 'calc')
    tgt_fn = pj(tgt_dir, 'foo.in')
    for dr in [templ_dir, tgt_dir]:
        if not os.path.exists(dr):
            os.makedirs(dr)

    templ_txt = "XXXFOO XXXBAR XXXBAZ"
    file_write(templ_fn, templ_txt)

    # specify keys
    templ = FileTemplate(basename='foo.in',
                         keys=['foo', 'bar'],
                         templ_dir=templ_dir)
    rules = {'foo': 1, 'bar': 'lala', 'baz': 3}
    templ.write(rules, calc_dir=tgt_dir)
    assert file_read(tgt_fn).strip() == "1 lala XXXBAZ"

    # no keys
    templ = FileTemplate(basename='foo.in', templ_dir=templ_dir)
    rules = {'foo': 1, 'bar': 'lala', 'baz': 3}
    templ.write(rules, calc_dir=tgt_dir)
    assert file_read(tgt_fn).strip() == "1 lala 3"

    # sql
    rules = {
        'foo': sql.SQLEntry(sqltype='integer', sqlval=1),
        'bar': sql.SQLEntry(sqltype='text', sqlval='lala'),
        'baz': sql.SQLEntry(sqltype='integer', sqlval=3)
    }
    templ.writesql(rules, calc_dir=tgt_dir)
    assert file_read(tgt_fn).strip() == "1 lala 3"

    # non-default placefolders
    templ_txt = "@foo@ @bar@"
    file_write(templ_fn, templ_txt)
    templ = FileTemplate(basename='foo.in',
                         templ_dir=templ_dir,
                         func=lambda x: "@%s@" % x)
    rules = {'foo': 1, 'bar': 'lala'}
    templ.write(rules, calc_dir=tgt_dir)
    assert file_read(tgt_fn).strip() == "1 lala"

    # pass txt directly
    templ_txt = "XXXFOO XXXBAR XXXBAZ"
    templ = FileTemplate(basename='foo.in', txt=templ_txt)
    rules = {'foo': 1, 'bar': 'lala', 'baz': 3}
    templ.write(rules, calc_dir=tgt_dir)
    assert file_read(tgt_fn).strip() == "1 lala 3"
Exemple #11
0
def write_wien_sgroup(filename, struct, **kwds):
    """
    Write `struct` to input file for WIEN2K's ``sgroup`` symmetry analysis
    tool.

    Parameters
    ----------
    filename : str
        name of the output file
    struct : Structure
    **kwds : see wien_sgroup_input()
    """
    txt = wien_sgroup_input(struct, **kwds)
    common.file_write(filename, txt)
Exemple #12
0
 def write_input(self, atoms, properties=None, system_changes=None):
     FileIOCalculator.write_input(self, atoms, properties, system_changes)
     if self.backup:
         for fn in [
                 self.infile, self.outfile, self.dumpfile, self.structfile,
                 self.logfile
         ]:
             if os.path.exists(fn):
                 common.backup(fn)
     common.file_write(self.infile, self.fill_infile_templ())
     io.write_lammps(self.structfile,
                     crys.atoms2struct(atoms),
                     symbolsbasename=os.path.basename(self.structfile) + \
                         '.symbols')
Exemple #13
0
def write_wien_sgroup(filename, struct, **kwds):
    """
    Write `struct` to input file for WIEN2K's ``sgroup`` symmetry analysis
    tool.

    Parameters
    ----------
    filename : str
        name of the output file
    struct : Structure
    **kwds : see wien_sgroup_input()
    """
    txt = wien_sgroup_input(struct, **kwds)
    common.file_write(filename, txt)
Exemple #14
0
def write_cif(filename, struct):
    """Q'n'D Cif writer. Uses PyCifRW.
    
    length: Angstrom

    Parameters
    ----------
    filename : str
        name of output .cif file
    struct : Structure, length units Angstrom assumed        
    """
    ffmt = "%.16e"
    cf = pycifrw_CifFile.CifFile()
    block = pycifrw_CifFile.CifBlock()

    block['_cell_length_a'] = frepr(struct.cryst_const[0], ffmt=ffmt)
    block['_cell_length_b'] = frepr(struct.cryst_const[1], ffmt=ffmt)
    block['_cell_length_c'] = frepr(struct.cryst_const[2], ffmt=ffmt)
    block['_cell_angle_alpha'] = frepr(struct.cryst_const[3], ffmt=ffmt)
    block['_cell_angle_beta'] = frepr(struct.cryst_const[4], ffmt=ffmt)
    block['_cell_angle_gamma'] = frepr(struct.cryst_const[5], ffmt=ffmt)
    block['_symmetry_space_group_name_H-M'] = 'P 1'
    block['_symmetry_Int_Tables_number'] = 1
    # assigning a list produces a "loop_"
    block['_symmetry_equiv_pos_as_xyz'] = ['x,y,z']

    # atoms
    #
    # _atom_site_label: We just use symbols, which is then =
    #   _atom_site_type_symbol, but we *could* use that to number atoms of each
    #   specie, e.g. Si1, Si2, ..., Al1, Al2, ...
    data_names = [
        '_atom_site_label', '_atom_site_fract_x', '_atom_site_fract_y',
        '_atom_site_fract_z', '_atom_site_type_symbol'
    ]
    _xyz2str = lambda arr: [ffmt % x for x in arr]
    data = [
        struct.symbols,
        _xyz2str(struct.coords_frac[:, 0]),
        _xyz2str(struct.coords_frac[:, 1]),
        _xyz2str(struct.coords_frac[:, 2]), struct.symbols
    ]
    # "loop_" with multiple columns
    block.AddCifItem([[data_names], [data]])
    cf['pwtools'] = block
    # maxoutlength = 2048 is default for cif 1.1 standard (which is default in
    # pycifrw 3.x). Reset default wraplength=80 b/c ASE's cif reader cannot
    # handle wrapped lines.
    common.file_write(filename, cf.WriteOut(wraplength=2048))
Exemple #15
0
def write_cif(filename, struct):
    """Q'n'D Cif writer. Uses PyCifRW.
    
    length: Angstrom

    Parameters
    ----------
    filename : str
        name of output .cif file
    struct : Structure, length units Angstrom assumed        
    """
    ffmt = "%.16e"
    cf = pycifrw_CifFile.CifFile()
    block = pycifrw_CifFile.CifBlock()

    block['_cell_length_a'] = frepr(struct.cryst_const[0], ffmt=ffmt)
    block['_cell_length_b'] = frepr(struct.cryst_const[1], ffmt=ffmt)
    block['_cell_length_c'] = frepr(struct.cryst_const[2], ffmt=ffmt)
    block['_cell_angle_alpha'] = frepr(struct.cryst_const[3], ffmt=ffmt)
    block['_cell_angle_beta'] = frepr(struct.cryst_const[4], ffmt=ffmt)
    block['_cell_angle_gamma'] = frepr(struct.cryst_const[5], ffmt=ffmt)
    block['_symmetry_space_group_name_H-M'] = 'P 1'
    block['_symmetry_Int_Tables_number'] = 1
    # assigning a list produces a "loop_"
    block['_symmetry_equiv_pos_as_xyz'] = ['x,y,z']
    
    # atoms
    #
    # _atom_site_label: We just use symbols, which is then =
    #   _atom_site_type_symbol, but we *could* use that to number atoms of each
    #   specie, e.g. Si1, Si2, ..., Al1, Al2, ...
    data_names = ['_atom_site_label', 
                  '_atom_site_fract_x',
                  '_atom_site_fract_y',
                  '_atom_site_fract_z',
                  '_atom_site_type_symbol']
    _xyz2str = lambda arr: [ffmt %x for x in arr]
    data = [struct.symbols, 
            _xyz2str(struct.coords_frac[:,0]), 
            _xyz2str(struct.coords_frac[:,1]), 
            _xyz2str(struct.coords_frac[:,2]),
            struct.symbols]
    # "loop_" with multiple columns            
    block.AddCifItem([[data_names], [data]])                
    cf['pwtools'] = block
    # maxoutlength = 2048 is default for cif 1.1 standard (which is default in
    # pycifrw 3.x). Reset default wraplength=80 b/c ASE's cif reader cannot
    # handle wrapped lines.
    common.file_write(filename, cf.WriteOut(wraplength=2048))
Exemple #16
0
def test_placeholders():
    templ_dir = pj(testdir, "calc.templ")
    templ_fn = pj(templ_dir, "foo.in")
    tgt_dir = pj(testdir, "calc")
    tgt_fn = pj(tgt_dir, "foo.in")
    for dr in [templ_dir, tgt_dir]:
        if not os.path.exists(dr):
            os.makedirs(dr)

    templ_txt = "XXXFOO XXXBAR XXXBAZ"
    file_write(templ_fn, templ_txt)

    # specify keys
    templ = FileTemplate(basename="foo.in", keys=["foo", "bar"], templ_dir=templ_dir)
    rules = {"foo": 1, "bar": "lala", "baz": 3}
    templ.write(rules, calc_dir=tgt_dir)
    assert file_read(tgt_fn).strip() == "1 lala XXXBAZ"

    # no keys
    templ = FileTemplate(basename="foo.in", templ_dir=templ_dir)
    rules = {"foo": 1, "bar": "lala", "baz": 3}
    templ.write(rules, calc_dir=tgt_dir)
    assert file_read(tgt_fn).strip() == "1 lala 3"

    # sql
    rules = {
        "foo": sql.SQLEntry(sqltype="integer", sqlval=1),
        "bar": sql.SQLEntry(sqltype="text", sqlval="lala"),
        "baz": sql.SQLEntry(sqltype="integer", sqlval=3),
    }
    templ.writesql(rules, calc_dir=tgt_dir)
    assert file_read(tgt_fn).strip() == "1 lala 3"

    # non-default placefolders
    templ_txt = "@foo@ @bar@"
    file_write(templ_fn, templ_txt)
    templ = FileTemplate(basename="foo.in", templ_dir=templ_dir, func=lambda x: "@%s@" % x)
    rules = {"foo": 1, "bar": "lala"}
    templ.write(rules, calc_dir=tgt_dir)
    assert file_read(tgt_fn).strip() == "1 lala"

    # pass txt directly
    templ_txt = "XXXFOO XXXBAR XXXBAZ"
    templ = FileTemplate(basename="foo.in", txt=templ_txt)
    rules = {"foo": 1, "bar": "lala", "baz": 3}
    templ.write(rules, calc_dir=tgt_dir)
    assert file_read(tgt_fn).strip() == "1 lala 3"
Exemple #17
0
def write_axsf(filename, obj):
    """Write animated XSF file for Structure (only 1 step) or Trajectory.

    Note that forces are converted eV / Ang -> Ha / Ang.
    
    length: Angstrom
    forces: Ha / Angstrom

    Parameters
    ----------
    filename : target file name
    obj : Structure or Trajectory

    References
    ----------
    [XSF] http://www.xcrysden.org/doc/XSF.html
    """
    # Notes
    # -----
    # XSF: The XSF spec [XSF] is a little fuzzy about what PRIMCOORD actually
    #     is (fractional or cartesian Angstrom). Only the latter case results
    #     in a correctly displayed structure in xcrsyden. So we use that.
    #
    # Speed: The only time-consuming step is calling atpos_str*() in the loop
    #     b/c that transforms *every* single float to a string, which
    #     effectively is a double loop over `ccf`. No way to get faster w/ pure
    #     Python.
    #
    traj = crys.struct2traj(obj)
    # ccf = cartesian coords + forces (6 columns)
    if traj.is_set_attr('forces'):
        ccf = np.concatenate((traj.coords, traj.forces*eV/Ha), axis=-1)
    else:
        ccf = traj.coords
    axsf_str = "ANIMSTEPS %i\nCRYSTAL" %traj.nstep
    for istep in range(traj.nstep):
        axsf_str += "\nPRIMVEC %i\n%s" %(istep+1,
                                         common.str_arr(traj.cell[istep,...]))
        axsf_str += "\nPRIMCOORD %i\n%i 1\n%s" %(istep+1,
                                                 traj.natoms,
                                                 pwscf.atpos_str_fast(traj.symbols, 
                                                                      ccf[istep,...]))
    common.file_write(filename, axsf_str)
Exemple #18
0
def write_axsf(filename, obj):
    """Write animated XSF file for Structure (only 1 step) or Trajectory.

    Note that forces are converted eV / Ang -> Ha / Ang.
    
    length: Angstrom
    forces: Ha / Angstrom

    Parameters
    ----------
    filename : target file name
    obj : Structure or Trajectory

    References
    ----------
    [XSF] http://www.xcrysden.org/doc/XSF.html
    """
    # Notes
    # -----
    # XSF: The XSF spec [XSF] is a little fuzzy about what PRIMCOORD actually
    #     is (fractional or cartesian Angstrom). Only the latter case results
    #     in a correctly displayed structure in xcrsyden. So we use that.
    #
    # Speed: The only time-consuming step is calling atpos_str*() in the loop
    #     b/c that transforms *every* single float to a string, which
    #     effectively is a double loop over `ccf`. No way to get faster w/ pure
    #     Python.
    #
    traj = crys.struct2traj(obj)
    # ccf = cartesian coords + forces (6 columns)
    if traj.is_set_attr('forces'):
        ccf = np.concatenate((traj.coords, traj.forces * eV / Ha), axis=-1)
    else:
        ccf = traj.coords
    axsf_str = "ANIMSTEPS %i\nCRYSTAL" % traj.nstep
    for istep in range(traj.nstep):
        axsf_str += "\nPRIMVEC %i\n%s" % (
            istep + 1, common.str_arr(traj.cell[istep, ...]))
        axsf_str += "\nPRIMCOORD %i\n%i 1\n%s" % (
            istep + 1, traj.natoms,
            pwscf.atpos_str_fast(traj.symbols, ccf[istep, ...]))
    common.file_write(filename, axsf_str)
Exemple #19
0
def test_pw_vc_relax_out():
    filename = 'files/pw.vc_relax_cell_unit.out'
    common.system('gunzip %s.gz' % filename)
    pp = PwMDOutputFile(filename=filename)
    pp.parse()
    common.system('gzip %s' % filename)
    none_attrs = [
        'coords',
        'ekin',
        'temperature',
        'timestep',
    ]
    assert_attrs_not_none(pp, none_attrs=none_attrs)
    traj = pp.get_traj()
    none_attrs = [\
        'ekin',
        'temperature',
        'timestep',
        'velocity',
        'time',
        ]
    assert_attrs_not_none(traj, none_attrs=none_attrs)
    assert pp.cell_unit == 'alat'
    assert pp.cell.shape == (6, 3, 3)
    for idx in range(1, pp.cell.shape[0]):
        assert crys.rms(pp.cell[idx, ...] - pp.cell[0, ...]) > 0.0

    # Test _get_block_header_unit, which is used in get_cell_unit().
    dct = \
        {'FOO': None,
         'FOO alat': 'alat',
         'FOO (alat)': 'alat',
         'FOO {alat}': 'alat',
         'FOO (alat=1.23)': 'alat',
         'FOO (alat=  1.23)': 'alat',
         }

    for txt, val in dct.items():
        fn = pj(testdir, 'test_block_header_unit.txt')
        common.file_write(fn, txt)
        pp.filename = fn
        assert pp._get_block_header_unit('FOO') == val
def test_pw_vc_relax_out():
    filename = 'files/pw.vc_relax_cell_unit.out'
    common.system('gunzip %s.gz' %filename)
    pp = PwMDOutputFile(filename=filename)
    pp.parse()
    common.system('gzip %s' %filename)
    none_attrs = ['coords', 
                  'ekin', 
                  'temperature',
                  'timestep',
                  ]
    assert_attrs_not_none(pp, none_attrs=none_attrs)
    traj = pp.get_traj()
    none_attrs = [\
        'ekin', 
        'temperature',
        'timestep',
        'velocity',
        'time',
        ]
    assert_attrs_not_none(traj, none_attrs=none_attrs)
    assert pp.cell_unit == 'alat'
    assert pp.cell.shape == (6,3,3)
    for idx in range(1, pp.cell.shape[0]):
        assert crys.rms(pp.cell[idx,...] - pp.cell[0,...]) > 0.0
    
    # Test _get_block_header_unit, which is used in get_cell_unit().
    dct = \
        {'FOO': None,
         'FOO alat': 'alat',
         'FOO (alat)': 'alat',
         'FOO {alat}': 'alat',
         'FOO (alat=1.23)': 'alat',
         'FOO (alat=  1.23)': 'alat',
         }

    for txt,val in dct.iteritems():
        fn = pj(testdir, 'test_block_header_unit.txt')
        common.file_write(fn, txt)
        pp.filename = fn
        assert pp._get_block_header_unit('FOO') == val
Exemple #21
0
def write_lammps(filename, struct, symbolsbasename='lmp.struct.symbols'):
    """Write Structure object to lammps format. That file can be read in a
    lammps input file by ``read_data``. Write file ``lmp.struct.symbols`` with
    atom symbols.
    
    Parameters
    ----------
    filename : str 
        name of file to write
    symbolsbasename : str, optional
        file for atom symbols
    struct : Structure

    References
    ----------
    ase.calculators.lammpsrun (ASE 3.8).
    """
    dr = os.path.dirname(filename)
    fn = os.path.join(dr, symbolsbasename)
    common.file_write(fn, '\n'.join(struct.symbols))
    common.file_write(filename, lammps.struct_str(struct))
Exemple #22
0
def write_lammps(filename, struct, symbolsbasename='lmp.struct.symbols'):
    """Write Structure object to lammps format. That file can be read in a
    lammps input file by ``read_data``. Write file ``lmp.struct.symbols`` with
    atom symbols.
    
    Parameters
    ----------
    filename : str 
        name of file to write
    symbolsbasename : str, optional
        file for atom symbols
    struct : Structure

    References
    ----------
    ase.calculators.lammpsrun (ASE 3.8).
    """
    dr = os.path.dirname(filename)
    fn = os.path.join(dr, symbolsbasename)
    common.file_write(fn, '\n'.join(struct.symbols))        
    common.file_write(filename, lammps.struct_str(struct))
Exemple #23
0
def default_repl_keys():
    """Return a dict of default keys for replacement in a
    :class:`ParameterStudy`. Each key will in practice be processed by
    :class:`FileTemplate`, such that e.g. the key 'foo' becomes the placeholder
    'XXXFOO'.

    Each of these placeholders can be used in any parameter study in any file
    template, indenpendent from `params_lst` to :class:`ParameterStudy`.

    Notes
    -----
    If this function is called, dummy files are written to a temp dir, the
    datsbase is read, and the file are deleted.
    """
    # HACK!!! Due to the way ParameterStudy.write_input() is coded, we really
    # need to set up a dummy ParameterStudy and read out the database to get
    # the replacement keys :-)
    import tempfile
    calc_root = tempfile.mkdtemp()
    jobfn_templ = pj(calc_root, 'foo.job')
    common.file_write(jobfn_templ, 'dummy job template, go away!')
    m = Machine(hostname='foo',
                template=FileTemplate(basename='foo.job', templ_dir=calc_root))
    print("writing test files to: %s, will be deleted" % calc_root)
    params_lst = [[SQLEntry(key='dummy', sqlval=1)]]
    study = ParameterStudy(machines=m,
                           params_lst=params_lst,
                           calc_root=calc_root)
    study.write_input()
    db = SQLiteDB(study.dbfn, table='calc')
    db_keys = [str(x[0]) for x in db.get_header()]
    for kk in ['dummy', 'hostname']:
        db_keys.pop(db_keys.index(kk))
    db.finish()
    ret = {
        'ParameterStudy': db_keys,
        'Machine': list(m.get_sql_record().keys())
    }
    shutil.rmtree(calc_root)
    return ret
Exemple #24
0
def default_repl_keys():
    """Return a dict of default keys for replacement in a
    :class:`ParameterStudy`. Each key will in practice be processed by
    :class:`FileTemplate`, such that e.g. the key 'foo' becomes the placeholder
    'XXXFOO'.

    Each of these placeholders can be used in any parameter study in any file
    template, indenpendent from `params_lst` to :class:`ParameterStudy`.

    Notes
    -----
    If this function is called, dummy files are written to a temp dir, the
    datsbase is read, and the file are deleted.
    """
    # HACK!!! Due to the way ParameterStudy.write_input() is coded, we really
    # need to set up a dummy ParameterStudy and read out the database to get
    # the replacement keys :-)
    import tempfile
    calc_root = tempfile.mkdtemp()
    jobfn_templ = pj(calc_root, 'foo.job')
    common.file_write(jobfn_templ, 'dummy job template, go away!')
    m = Machine(hostname='foo', 
                template=FileTemplate(basename='foo.job',
                                      templ_dir=calc_root))
    print ("writing test files to: %s, will be deleted" %calc_root)
    params_lst = [[SQLEntry(key='dummy', sqlval=1)]]
    study = ParameterStudy(machines=m, params_lst=params_lst,
                           calc_root=calc_root)
    study.write_input()
    db = SQLiteDB(study.dbfn, table='calc')
    db_keys = [str(x[0]) for x in db.get_header()]
    for kk in ['dummy', 'hostname']:
        db_keys.pop(db_keys.index(kk))
    db.finish()
    ret = {'ParameterStudy' : db_keys,
           'Machine' : m.get_sql_record().keys()}
    shutil.rmtree(calc_root)
    return ret
Exemple #25
0
def test_backup():
    # file
    name = tempfile.mktemp(prefix='testfile', dir=testdir)
    file_write(name, 'foo')
    backup(name)
    assert os.path.exists(name + '.0')
    backup(name)
    assert os.path.exists(name + '.1')
    backup(name)
    assert os.path.exists(name + '.2')

    # dir
    name = tempfile.mktemp(prefix='testdir', dir=testdir)
    create_full_dir(name)
    backup(name)
    assert os.path.exists(name + '.0')
    backup(name)
    assert os.path.exists(name + '.1')
    backup(name)
    assert os.path.exists(name + '.2')

    # link to file
    filename = tempfile.mktemp(prefix='testfile', dir=testdir)
    linkname = tempfile.mktemp(prefix='testlink', dir=testdir)
    file_write(filename, 'foo')
    os.symlink(filename, linkname)
    backup(linkname)
    assert os.path.exists(linkname + '.0')
    assert os.path.isfile(linkname + '.0')
    assert file_read(linkname + '.0') == file_read(filename)

    # link to dir
    dirname = tempfile.mktemp(prefix='testdir', dir=testdir)
    linkname = tempfile.mktemp(prefix='testlink', dir=testdir)
    create_full_dir(dirname)
    os.symlink(dirname, linkname)
    backup(linkname)
    assert os.path.exists(linkname + '.0')
    assert os.path.isdir(linkname + '.0')
    for name in ['a', 'b', 'c']:
        assert file_read(pj(dirname, name)) == \
               file_read(pj(linkname + '.0', name))

    # prefix
    name = tempfile.mktemp(prefix='testfile', dir=testdir)
    file_write(name, 'foo')
    backup(name, prefix="-bak")
    assert os.path.exists(name + '-bak0')

    # nonexisting src, should silently pass
    filename = str(uuid.uuid4())
    assert not os.path.exists(filename)
    backup(filename)
Exemple #26
0
def test_backup():
    # file
    name = tempfile.mktemp(prefix='testfile', dir=testdir)
    file_write(name, 'foo')
    backup(name)
    assert os.path.exists(name + '.0')
    backup(name)
    assert os.path.exists(name + '.1')
    backup(name)
    assert os.path.exists(name + '.2')
    
    # dir
    name = tempfile.mktemp(prefix='testdir', dir=testdir)
    create_full_dir(name)
    backup(name)
    assert os.path.exists(name + '.0')
    backup(name)
    assert os.path.exists(name + '.1')
    backup(name)
    assert os.path.exists(name + '.2')

    # link to file
    filename = tempfile.mktemp(prefix='testfile', dir=testdir)
    linkname = tempfile.mktemp(prefix='testlink', dir=testdir)
    file_write(filename, 'foo')
    os.symlink(filename, linkname)
    backup(linkname)
    assert os.path.exists(linkname + '.0')
    assert os.path.isfile(linkname + '.0')
    assert file_read(linkname + '.0') == file_read(filename)

    # link to dir
    dirname = tempfile.mktemp(prefix='testdir', dir=testdir)
    linkname = tempfile.mktemp(prefix='testlink', dir=testdir)
    create_full_dir(dirname)
    os.symlink(dirname, linkname)
    backup(linkname)
    assert os.path.exists(linkname + '.0')
    assert os.path.isdir(linkname + '.0')
    for name in ['a', 'b', 'c']:
        assert file_read(pj(dirname, name)) == \
               file_read(pj(linkname + '.0', name))
    
    # prefix
    name = tempfile.mktemp(prefix='testfile', dir=testdir)
    file_write(name, 'foo')
    backup(name, prefix="-bak")
    assert os.path.exists(name + '-bak0')
Exemple #27
0
def create_full_dir(dn):
    os.makedirs(dn)
    for name in ['a', 'b', 'c']:
        file_write(pj(dn, name), 'foo')
XXXKS
"""
matdyn_in_fn = 'matdyn.disp.in'
matdyn_freq_fn = 'matdyn.freq.disp'
mass_str = '\n'.join("amass(%i)=%e" %(ii+1,m) for ii,m in \
                      enumerate(st.mass_unique))
rules = {'XXXNKS': ks_path.shape[0],
         'XXXKS': common.str_arr(ks_path),
         'XXXMASS': mass_str,
         'XXXFNFREQ': matdyn_freq_fn,
         }
txt = common.template_replace(templ_txt,
                              rules,
                              conv=True,
                              mode='txt')
common.file_write(matdyn_in_fn, txt)
common.system("gunzip q2r.fc.gz; matdyn.x < %s; gzip q2r.fc" %matdyn_in_fn)

# parse matdyn output and plot

# define special points path, used in plot_dis() to plot lines at special
# points and make x-labels
sp = kpath.SpecialPointsPath(ks=sp_points, ks_frac=sp_points_frac,
                             symbols=sp_symbols)

# QE 4.x, 5.x
ks, freqs = pwscf.read_matdyn_freq(matdyn_freq_fn)
fig,ax = kpath.plot_dis(kpath.get_path_norm(ks_path), freqs, sp, marker='', ls='-', color='k') 

# QE 5.x
##d = np.loadtxt(matdyn_freq_fn + '.gp')
Exemple #29
0
assert len(sys.argv) == 2, "need one input arg: nvt or npt"
if sys.argv[1] == 'npt':
    ens_txt = npt_txt
elif sys.argv[1] == 'nvt':    
    ens_txt = nvt_txt
else:
    raise Exception("only nvt / npt allowed")

# create structure file
st = crys.Structure(coords_frac=np.array([[0.0]*3, [.5]*3]),
                    cryst_const=np.array([2.85]*3 + [60]*3),
                    symbols=['Al','N'])
io.write_lammps('lmp.struct', crys.scell(st,(3,3,3)))

# write lmp.in for nvt or npt
common.file_write('lmp.in', lmp_in_templ.format(ensemble=ens_txt))

# run lammps
common.system("mpirun -np 2 lammps < lmp.in", wait=True)

# read trajectory
trtxt_orig = io.read_lammps_md_txt('log.lammps')
trdcd = io.read_lammps_md_dcd('log.lammps')

# plotting
plots = mpl.prepare_plots(['coords', 'coords_frac', 'velocity', 
                           'cryst_const', 'cell'])
for name,pl in plots.items():
    trtxt = trtxt_orig.copy()
    print(name)
    xtxt = getattr(trtxt, name)
/
XXXNKS
XXXKS
"""
matdyn_in_fn = 'matdyn.disp.in'
matdyn_freq_fn = 'matdyn.freq.disp'
mass_str = '\n'.join("amass(%i)=%e" %(ii+1,m) for ii,m in \
                      enumerate(st.mass_unique))
rules = {
    'XXXNKS': ks_path.shape[0],
    'XXXKS': common.str_arr(ks_path),
    'XXXMASS': mass_str,
    'XXXFNFREQ': matdyn_freq_fn,
}
txt = common.template_replace(templ_txt, rules, conv=True, mode='txt')
common.file_write(matdyn_in_fn, txt)
common.system("gunzip q2r.fc.gz; matdyn.x < %s; gzip q2r.fc" % matdyn_in_fn)

# parse matdyn output and plot

# define special points path, used in plot_dis() to plot lines at special
# points and make x-labels
sp = kpath.SpecialPointsPath(ks=sp_points,
                             ks_frac=sp_points_frac,
                             symbols=sp_symbols)

# QE 4.x, 5.x
ks, freqs = pwscf.read_matdyn_freq(matdyn_freq_fn)
fig, ax = kpath.plot_dis(kpath.get_path_norm(ks_path),
                         freqs,
                         sp,
Exemple #31
0
 def write(self, dct, calc_dir='calc', mode='dct'):
     """Write file self.filename (e.g. calc/0/pw.in) by replacing 
     placeholders in the template (e.g. calc.templ/pw.in).
     
     Parameters
     ----------
     dct : dict 
         key-value pairs, dct.keys() are converted to placeholders with
         self.func()
     calc_dir : str
         the dir where to write the target file to
     mode : str, {'dct', 'sql'}
         | mode='dct': replacement values are dct[<key>]
         | mode='sql': replacement values are dct[<key>].fileval and every
         |     dct[<key>] is an SQLEntry instance
     """
     assert mode in ['dct', 'sql'], ("Wrong 'mode' kwarg, use 'dct' "
                                     "or 'sql'")
     # copy_only : bypass reading the file and passing the text thru the
     # replacement machinery and getting the text back, unchanged. While
     # this works, it is slower and useless.
     if self.keys == []:
         _keys = None
         txt = None
         copy_only = True
     else:
         if self.keys is None:
             _keys = dct.iterkeys()
             warn_not_found = False
         else:
             _keys = self.keys
             warn_not_found = True
         if self.txt is None:
             txt = common.file_read(self.filename)
         else:
             txt = self.txt
         copy_only = False
     
     tgt = pj(calc_dir, self.basename)
     verbose("write: %s" %tgt)
     if copy_only:    
         verbose("write: ignoring input, just copying file to %s"
                 %(self.filename, tgt))
         shutil.copy(self.filename, tgt)
     else:            
         rules = {}
         for key in _keys:
             if mode == 'dct':
                 rules[self.func(key)] = dct[key]
             elif mode == 'sql':                    
                 # dct = sql_record, a list of SQLEntry's
                 rules[self.func(key)] = dct[key].fileval
             else:
                 raise StandardError("'mode' must be wrong")
         new_txt = common.template_replace(txt, 
                                           rules, 
                                           mode='txt',
                                           conv=True,
                                           warn_not_found=warn_not_found,
                                           warn_mult_found=False,
                                           disp=False)
         common.file_write(tgt, new_txt) 
Exemple #32
0
 def write_input(self, mode='a', backup=True, sleep=0, excl=True):
     """
     Create calculation dir(s) for each parameter set and write input files
     based on ``templates``. Write sqlite database storing all relevant
     parameters. Write (bash) shell script to start all calculations (run
     locally or submitt batch job file, depending on ``machine.subcmd``).
 
     Parameters
     ----------
     mode : str, optional
         Fine tune how to write input files (based on ``templates``) to calc
         dirs calc_foo/0/, calc_foo/1/, ... . Note that this doesn't change
         the base dir calc_foo at all, only the subdirs for each calc.
         {'a', 'w'}
         
         | 'a': Append mode (default). If a previous database is found, then
         |     subsequent calculations are numbered based on the last 'idx'.
         |     calc_foo/0 # old
         |     calc_foo/1 # old
         |     calc_foo/2 # new
         |     calc_foo/3 # new
         | 'w': Write mode. The target dirs are purged and overwritten. Also,
         |     the database (self.dbfn) is overwritten. Use this to
         |     iteratively tune your inputs, NOT for working on already
         |     present results!
         |     calc_foo/0 # new
         |     calc_foo/1 # new
     backup : bool, optional
         Before writing anything, do a backup of self.calc_dir if it already
         exists.
     sleep : int, optional
         For the script to start (submitt) all jobs: time in seconds for the
         shell sleep(1) commmand.
     excl : bool
         If in append mode, a file <calc_root>/excl_push with all indices of
         calculations from old revisions is written. Can be used with
         ``rsync --exclude-from=excl_push`` when pushing appended new
         calculations to a cluster.
     """
     assert mode in ['a', 'w'], "Unknown mode: '%s'" % mode
     if os.path.exists(self.dbfn):
         if backup:
             common.backup(self.dbfn)
         if mode == 'w':
             os.remove(self.dbfn)
     have_new_db = not os.path.exists(self.dbfn)
     common.makedirs(self.calc_root)
     # this call creates a file ``self.dbfn`` if it doesn't exist
     sqldb = SQLiteDB(self.dbfn, table=self.db_table)
     # max_idx: counter for calc dir numbering
     revision = 0
     if have_new_db:
         max_idx = -1
     else:
         if mode == 'a':
             if sqldb.has_column('idx'):
                 max_idx = sqldb.execute("select max(idx) from %s" \
                 %self.db_table).fetchone()[0]
             else:
                 raise Exception(
                     "database '%s': table '%s' has no "
                     "column 'idx', don't know how to number calcs" %
                     (self.dbfn, self.db_table))
             if sqldb.has_column('revision'):
                 revision = int(
                     sqldb.get_single("select max(revision) \
                     from %s" % self.db_table)) + 1
         elif mode == 'w':
             max_idx = -1
     sql_records = []
     hostnames = []
     for imach, machine in enumerate(self.machines):
         hostnames.append(machine.hostname)
         calc_dir = pj(self.calc_root, self.calc_dir_prefix + \
                       '_%s' %machine.hostname)
         if os.path.exists(calc_dir):
             if backup:
                 common.backup(calc_dir)
             if mode == 'w':
                 common.system("rm -r %s" % calc_dir, wait=True)
         run_txt = "here=$(pwd)\n"
         for _idx, params in enumerate(self.params_lst):
             params = common.flatten(params)
             idx = max_idx + _idx + 1
             calc_subdir = pj(calc_dir, str(idx))
             extra_dct = \
                 {'revision': revision,
                  'study_name': self.study_name,
                  'idx': idx,
                  'calc_name' : self.study_name + "_run%i" %idx,
                  }
             extra_params = [SQLEntry(key=key, sqlval=val) for key,val in \
                             extra_dct.items()]
             # templates[:] to copy b/c they may be modified in Calculation
             calc = Calculation(
                 machine=machine,
                 templates=self.templates[:],
                 params=params + extra_params,
                 calc_dir=calc_subdir,
             )
             if mode == 'w' and os.path.exists(calc_subdir):
                 shutil.rmtree(calc_subdir)
             calc.write_input()
             run_txt += "cd %i && %s %s && cd $here && sleep %i\n" %(idx,\
                         machine.subcmd, machine.get_jobfile_basename(), sleep)
             if imach == 0:
                 sql_records.append(calc.get_sql_record())
         common.file_write(pj(calc_dir, 'run.sh'), run_txt)
     for record in sql_records:
         record['hostname'] = SQLEntry(sqlval=','.join(hostnames))
     # for incomplete parameters: collect header parts from all records and
     # make a set = unique entries
     raw_header = [(key, entry.sqltype.upper()) for record in sql_records \
         for key, entry in record.items()]
     header = list(set(raw_header))
     if have_new_db:
         sqldb.create_table(header)
     else:
         for record in sql_records:
             for key, entry in record.items():
                 if not sqldb.has_column(key):
                     sqldb.add_column(key, entry.sqltype.upper())
     for record in sql_records:
         cmd = "insert into %s (%s) values (%s)"\
             %(self.db_table,
               ",".join(list(record.keys())),
               ",".join(['?']*len(list(record.keys()))))
         sqldb.execute(cmd,
                       tuple(entry.sqlval for entry in record.values()))
     if excl and revision > 0 and sqldb.has_column('revision'):
         old_idx_lst = [
             str(x) for x, in sqldb.execute(
                 "select idx from calc where \
                                                       revision < ?", (
                     revision, ))
         ]
         common.file_write(pj(self.calc_root, 'excl_push'),
                           '\n'.join(old_idx_lst))
     sqldb.finish()
pair_coeff * * ../AlN.tersoff Al N

### IO
dump dump_txt all custom 1 lmp.out.dump id type xu yu zu fx fy fz &
    vx vy vz xsu ysu zsu 
dump_modify dump_txt sort id 
dump dump_dcd all dcd 1 lmp.out.dcd
dump_modify dump_dcd sort id unwrap yes
thermo_style custom step temp vol cella cellb cellc cellalpha cellbeta cellgamma &
                    ke pe etotal &
                    press pxx pyy pzz pxy pxz pyz cpu press
thermo_modify flush yes
thermo 1

fix 1 all box/relax tri 0.0
minimize 1e-8 1e-8 5000 10000 
"""

st = crys.Structure(coords_frac=np.array([[0.0]*3, [.5]*3]),
                    cryst_const=np.array([2.85]*3 + [60]*3),
                    symbols=['Al','N'])

for dr in ['md-nvt', 'md-npt', 'vc-relax']:
    common.system("rm -rfv {dr}; mkdir -v {dr}".format(dr=dr))
io.write_lammps('vc-relax/lmp.struct', st)
io.write_lammps('md-nvt/lmp.struct', crys.scell(st,(2,2,2)))
io.write_lammps('md-npt/lmp.struct', crys.scell(st,(2,2,2)))

for dr,txt in lmp_in.iteritems():
    common.file_write('%s/lmp.in' %dr, txt)
Exemple #34
0
 def write_input(self, mode='a', backup=True, sleep=0, excl=True):
     """
     Create calculation dir(s) for each parameter set and write input files
     based on ``templates``. Write sqlite database storing all relevant
     parameters. Write (bash) shell script to start all calculations (run
     locally or submitt batch job file, depending on ``machine.subcmd``).
 
     Parameters
     ----------
     mode : str, optional
         Fine tune how to write input files (based on ``templates``) to calc
         dirs calc_foo/0/, calc_foo/1/, ... . Note that this doesn't change
         the base dir calc_foo at all, only the subdirs for each calc.
         {'a', 'w'}
         
         | 'a': Append mode (default). If a previous database is found, then
         |     subsequent calculations are numbered based on the last 'idx'.
         |     calc_foo/0 # old
         |     calc_foo/1 # old
         |     calc_foo/2 # new
         |     calc_foo/3 # new
         | 'w': Write mode. The target dirs are purged and overwritten. Also,
         |     the database (self.dbfn) is overwritten. Use this to
         |     iteratively tune your inputs, NOT for working on already
         |     present results!
         |     calc_foo/0 # new
         |     calc_foo/1 # new
     backup : bool, optional
         Before writing anything, do a backup of self.calc_dir if it already
         exists.
     sleep : int, optional
         For the script to start (submitt) all jobs: time in seconds for the
         shell sleep(1) commmand.
     excl : bool
         If in append mode, a file <calc_root>/excl_push with all indices of
         calculations from old revisions is written. Can be used with
         ``rsync --exclude-from=excl_push`` when pushing appended new
         calculations to a cluster.
     """
     assert mode in ['a', 'w'], "Unknown mode: '%s'" %mode
     if os.path.exists(self.dbfn):
         if backup:
             common.backup(self.dbfn)
         if mode == 'w':
             os.remove(self.dbfn)
     have_new_db = not os.path.exists(self.dbfn)
     common.makedirs(self.calc_root)
     # this call creates a file ``self.dbfn`` if it doesn't exist
     sqldb = SQLiteDB(self.dbfn, table=self.db_table)
     # max_idx: counter for calc dir numbering
     revision = 0
     if have_new_db:
         max_idx = -1
     else:
         if mode == 'a':
             if sqldb.has_column('idx'):
                 max_idx = sqldb.execute("select max(idx) from %s" \
                 %self.db_table).fetchone()[0]
             else:
                 raise StandardError("database '%s': table '%s' has no "
                       "column 'idx', don't know how to number calcs"
                       %(self.dbfn, self.db_table))
             if sqldb.has_column('revision'):
                 revision = int(sqldb.get_single("select max(revision) \
                     from %s" %self.db_table)) + 1
         elif mode == 'w':
             max_idx = -1
     sql_records = []
     hostnames = []
     for imach, machine in enumerate(self.machines):
         hostnames.append(machine.hostname)
         calc_dir = pj(self.calc_root, self.calc_dir_prefix + \
                       '_%s' %machine.hostname)
         if os.path.exists(calc_dir):
             if backup:
                 common.backup(calc_dir)
             if mode == 'w':
                 common.system("rm -r %s" %calc_dir, wait=True)
         run_txt = "here=$(pwd)\n"
         for _idx, params in enumerate(self.params_lst):
             params = common.flatten(params)
             idx = max_idx + _idx + 1
             calc_subdir = pj(calc_dir, str(idx))
             extra_dct = \
                 {'revision': revision,
                  'study_name': self.study_name,
                  'idx': idx,
                  'calc_name' : self.study_name + "_run%i" %idx,
                  }
             extra_params = [SQLEntry(key=key, sqlval=val) for key,val in \
                             extra_dct.iteritems()]
             # templates[:] to copy b/c they may be modified in Calculation
             calc = Calculation(machine=machine,
                                templates=self.templates[:], 
                                params=params + extra_params,
                                calc_dir=calc_subdir,
                                )
             if mode == 'w' and os.path.exists(calc_subdir):
                 shutil.rmtree(calc_subdir)
             calc.write_input()                               
             run_txt += "cd %i && %s %s && cd $here && sleep %i\n" %(idx,\
                         machine.subcmd, machine.get_jobfile_basename(), sleep)
             if imach == 0:                            
                 sql_records.append(calc.get_sql_record())
         common.file_write(pj(calc_dir, 'run.sh'), run_txt)
     for record in sql_records:
         record['hostname'] = SQLEntry(sqlval=','.join(hostnames))
     # for incomplete parameters: collect header parts from all records and
     # make a set = unique entries
     raw_header = [(key, entry.sqltype.upper()) for record in sql_records \
         for key, entry in record.iteritems()]
     header = list(set(raw_header))
     if have_new_db:
         sqldb.create_table(header)
     else:
         for record in sql_records:
             for key, entry in record.iteritems():
                 if not sqldb.has_column(key):
                     sqldb.add_column(key, entry.sqltype.upper())
     for record in sql_records:
         cmd = "insert into %s (%s) values (%s)"\
             %(self.db_table,
               ",".join(record.keys()),
               ",".join(['?']*len(record.keys())))
         sqldb.execute(cmd, tuple(entry.sqlval for entry in record.itervalues()))
     if excl and revision > 0 and sqldb.has_column('revision'):
         old_idx_lst = [str(x) for x, in sqldb.execute("select idx from calc where \
                                                       revision < ?", (revision,))]
         common.file_write(pj(self.calc_root, 'excl_push'),
                           '\n'.join(old_idx_lst))
     sqldb.finish()
Exemple #35
0
    def write(self, dct, calc_dir='calc', mode='dct'):
        """Write file self.filename (e.g. calc/0/pw.in) by replacing 
        placeholders in the template (e.g. calc.templ/pw.in).
        
        Parameters
        ----------
        dct : dict 
            key-value pairs, dct.keys() are converted to placeholders with
            self.func()
        calc_dir : str
            the dir where to write the target file to
        mode : str, {'dct', 'sql'}
            | mode='dct': replacement values are dct[<key>]
            | mode='sql': replacement values are dct[<key>].fileval and every
            |     dct[<key>] is an SQLEntry instance
        """
        assert mode in ['dct', 'sql'], ("Wrong 'mode' kwarg, use 'dct' "
                                        "or 'sql'")
        # copy_only : bypass reading the file and passing the text thru the
        # replacement machinery and getting the text back, unchanged. While
        # this works, it is slower and useless.
        if self.keys == []:
            _keys = None
            txt = None
            copy_only = True
        else:
            if self.keys is None:
                _keys = dct.keys()
                warn_not_found = False
            else:
                _keys = self.keys
                warn_not_found = True
            if self.txt is None:
                txt = common.file_read(self.filename)
            else:
                txt = self.txt
            copy_only = False

        tgt = pj(calc_dir, self.basename)
        verbose("write: %s" % tgt)
        if copy_only:
            verbose("write: ignoring input, just copying file to %s" %
                    (self.filename, tgt))
            shutil.copy(self.filename, tgt)
        else:
            rules = {}
            for key in _keys:
                if mode == 'dct':
                    rules[self.func(key)] = dct[key]
                elif mode == 'sql':
                    # dct = sql_record, a list of SQLEntry's
                    rules[self.func(key)] = dct[key].fileval
                else:
                    raise Exception("'mode' must be wrong")
            new_txt = common.template_replace(txt,
                                              rules,
                                              mode='txt',
                                              conv=True,
                                              warn_not_found=warn_not_found,
                                              warn_mult_found=False,
                                              disp=False)
            common.file_write(tgt, new_txt)
Exemple #36
0
assert len(sys.argv) == 2, "need one input arg: nvt or npt"
if sys.argv[1] == 'npt':
    ens_txt = npt_txt
elif sys.argv[1] == 'nvt':    
    ens_txt = nvt_txt
else:
    raise StandardError("only nvt / npt allowed")

# create structure file
st = crys.Structure(coords_frac=np.array([[0.0]*3, [.5]*3]),
                    cryst_const=np.array([2.85]*3 + [60]*3),
                    symbols=['Al','N'])
io.write_lammps('lmp.struct', crys.scell(st,(3,3,3)))

# write lmp.in for nvt or npt
common.file_write('lmp.in', lmp_in_templ.format(ensemble=ens_txt))

# run lammps
common.system("mpirun -np 2 lammps < lmp.in", wait=True)

# read trajectory
trtxt_orig = io.read_lammps_md_txt('log.lammps')
trdcd = io.read_lammps_md_dcd('log.lammps')

# plotting
plots = mpl.prepare_plots(['coords', 'coords_frac', 'velocity', 
                           'cryst_const', 'cell'])
for name,pl in plots.iteritems():
    trtxt = trtxt_orig.copy()
    print name
    xtxt = getattr(trtxt, name)
Exemple #37
0
def create_full_dir(dn):
    os.makedirs(dn)
    for name in ['a', 'b', 'c']:
        file_write(pj(dn, name), 'foo')
Exemple #38
0
# that, only column 1 (time step) and some energy value from column 2 is used.
if use_fourier:
    fourier_in_data = np.zeros((arr.shape[0],7))
    fourier_in_data[:,0] = np.arange(arr.shape[0])
    fourier_in_data[:,4] = arr
    fourier_in_data_fn = pj(fourier_dir, 'fourier_in_data_1d.txt')
    fourier_out_data_fn = pj(fourier_dir, 'fourier_out_data_1d.txt')
    fourier_in_fn = pj(fourier_dir, 'fourier_1d.in')
    fourier_out_fn = pj(fourier_dir, 'fourier_1d.log')
    fourier_in_txt = '%s\n%s\n%e\n%e\n%e\n%i' %(fourier_in_data_fn,
                                                fourier_out_data_fn,
                                                dt/constants.th,
                                                0,
                                                fmax*fmax_extend_fac/(constants.c0*100),
                                                1)
    common.file_write(fourier_in_fn, fourier_in_txt)
    # In order to make picky gfortrans happy, we need to use savetxt(...,
    # fmt="%g") such that the first column is an integer (1 instead of
    # 1.0000e+00). 
    np.savetxt(fourier_in_data_fn, fourier_in_data, fmt='%g')
    common.system("%s < %s > %s" %(fourier_exe, fourier_in_fn, fourier_out_fn))
    fourier_out_data = np.loadtxt(fourier_out_data_fn)
    f3 = fourier_out_data[:,0]*(constants.c0*100) # 1/cm -> Hz
    y3n = num.norm_int(fourier_out_data[:,1], f3)

f1, y1n = cut_norm(y1, dt)
f2, y2n = cut_norm(y2, dt)

figs = []
axs = []
Exemple #39
0
# For the 1d case, we write the time trace in a format suggested in the CPMD
# manual and the fourier.x README file: awk '{ print $1, 0.0, 0.0, 0.0, 0.0,
# 0.0, $2; }' ENERGIES > ekinc.dat where ENERGIES is a CPMD output file. From
# that, only column 1 (time step) and some energy value from column 2 is used.
if use_fourier:
    fourier_in_data = np.zeros((arr.shape[0], 7))
    fourier_in_data[:, 0] = np.arange(arr.shape[0])
    fourier_in_data[:, 4] = arr
    fourier_in_data_fn = pj(fourier_dir, 'fourier_in_data_1d.txt')
    fourier_out_data_fn = pj(fourier_dir, 'fourier_out_data_1d.txt')
    fourier_in_fn = pj(fourier_dir, 'fourier_1d.in')
    fourier_out_fn = pj(fourier_dir, 'fourier_1d.log')
    fourier_in_txt = '%s\n%s\n%e\n%e\n%e\n%i' % (
        fourier_in_data_fn, fourier_out_data_fn, dt / constants.th, 0,
        fmax * fmax_extend_fac / (constants.c0 * 100), 1)
    common.file_write(fourier_in_fn, fourier_in_txt)
    # In order to make picky gfortrans happy, we need to use savetxt(...,
    # fmt="%g") such that the first column is an integer (1 instead of
    # 1.0000e+00).
    np.savetxt(fourier_in_data_fn, fourier_in_data, fmt='%g')
    common.system("%s < %s > %s" %
                  (fourier_exe, fourier_in_fn, fourier_out_fn))
    fourier_out_data = np.loadtxt(fourier_out_data_fn)
    f3 = fourier_out_data[:, 0] * (constants.c0 * 100)  # 1/cm -> Hz
    y3n = num.norm_int(fourier_out_data[:, 1], f3)

f1, y1n = cut_norm(y1, dt)
f2, y2n = cut_norm(y2, dt)

figs = []
axs = []