示例#1
0
    def charge_transfer_from_file(self):
        """
        Returns tuple of dictionaries in order of potential sites
        ({"p": 0.154, "s": 0.078, "d": 0.0, "tot": 0.232}, ...)
        """
        self.cht = []
        for i in range(1, len(self.pot_dict) + 1):

            if len(str(i)) == 1:
                with zopen("{}0{}.dat".format(self.ldos_filename, i), "r") \
                        as fobject:
                    f = fobject.readlines()
                    tot = float(f[1].split()[4])
                    s = float(f[3].split()[2])
                    p = float(f[4].split()[2])
                    d = float(f[5].split()[2])
                    f1 = float(f[6].split()[2])
                self.cht.append({"s": s, "p": p, "d": d, "f": f1, "tot": tot})
            else:
                with zopen(self.ldos_filename + str(i) + ".dat", "r") as fid:
                    f = fid.readlines()
                    tot = float(f[1].split()[4])
                    s = float(f[3].split()[2])
                    p = float(f[4].split()[2])
                    d = float(f[5].split()[2])
                    f1 = float(f[6].split()[2])
                self.cht.append({"s": s, "p": p, "d": d, "f": f1, "tot": tot})

        return tuple(self.cht)
示例#2
0
    def charge_transfer_from_file(filename1, filename2):
        """
        Args:
            filename1:
                name of feff.inp file for run

            filename2:
                ldos filename for run, assume consequetive order, .i.e.,
                ldos01.dat, ldos02.dat....

        Returns:
            dictionary of dictionaries in order of potential sites
            ({"p": 0.154, "s": 0.078, "d": 0.0, "tot": 0.232}, ...)
        """
        cht = OrderedDict()
        pot_string = FeffPot.pot_string_from_file(filename1)
        dicts = FeffPot.pot_dict_from_string(pot_string)
        pot_dict = dicts[1]

        for i in range(0, len(dicts[0]) + 1):
            if len(str(i)) == 1:
                with zopen("{}0{}.dat".format(filename2, i), "r") \
                        as fobject:
                    f = fobject.readlines()
                    s = float(f[3].split()[2])
                    p = float(f[4].split()[2])
                    d = float(f[5].split()[2])
                    f1 = float(f[6].split()[2])
                    tot = float(f[1].split()[4])
                    cht[str(i)] = {
                        pot_dict[i]: {
                            's': s,
                            'p': p,
                            'd': d,
                            'f': f1,
                            'tot': tot
                        }
                    }
            else:
                with zopen(filename2 + str(i) + ".dat", "r") as fid:
                    f = fid.readlines()
                    s = float(f[3].split()[2])
                    p = float(f[4].split()[2])
                    d = float(f[5].split()[2])
                    f1 = float(f[6].split()[2])
                    tot = float(f[1].split()[4])
                    cht[str(i)] = {
                        pot_dict[i]: {
                            's': s,
                            'p': p,
                            'd': d,
                            'f': f1,
                            'tot': tot
                        }
                    }

        return cht
示例#3
0
def gzip_directory(path):
    """
    Gzips all files in a directory.

    Args:
        path:
            Path to directory.
    """
    for f in os.listdir(path):
        if not f.endswith("gz"):
            with zopen(f, 'rb') as f_in, \
                    zopen('{}.gz'.format(f), 'wb') as f_out:
                f_out.writelines(f_in)
            os.remove(f)
示例#4
0
def write_structure(structure, filename):
    """
    Write a structure to a file based on file extension. For example, anything
    ending in a "cif" is assumed to be a Crystallographic Information Format
    file. Supported formats include CIF, POSCAR, CSSR and pymatgen's JSON
    serialized structures.

    Args:
        structure:
            Structure to write
        filename:
            A filename to write to.
    """
    fname = os.path.basename(filename)
    if fnmatch(fname, "*.cif*"):
        writer = CifWriter(structure)
    elif fnmatch(fname, "POSCAR*") or fnmatch(fname, "CONTCAR*"):
        writer = Poscar(structure)
    elif fnmatch(fname.lower(), "*.cssr*"):
        writer = Cssr(structure)
    elif fnmatch(fname, "*.json*") or fnmatch(fname, "*.mson*"):
        with zopen(filename, "w") as f:
            json.dump(structure, f, cls=PMGJSONEncoder)
            return
    else:
        raise ValueError("Unrecognized file extension!")

    writer.write_file(filename)
示例#5
0
def read_structure(filename):
    """
    Reads a structure based on file extension. For example, anything ending in
    a "cif" is assumed to be a Crystallographic Information Format file.
    Supported formats include CIF, POSCAR/CONTCAR, CHGCAR, LOCPOT,
    vasprun.xml, CSSR and pymatgen's JSON serialized structures.

    Args:
        filename:
            A filename to read from.

    Returns:
        A Structure object.
    """
    fname = os.path.basename(filename)
    if fnmatch(fname.lower(), "*.cif*"):
        parser = CifParser(filename)
        return parser.get_structures(True)[0]
    elif fnmatch(fname, "POSCAR*") or fnmatch(fname, "CONTCAR*"):
        return Poscar.from_file(filename, False).structure
    elif fnmatch(fname, "CHGCAR*") or fnmatch(fname, "LOCPOT*"):
        return Chgcar.from_file(filename).structure
    elif fnmatch(fname, "vasprun*.xml*"):
        return Vasprun(filename).final_structure
    elif fnmatch(fname.lower(), "*.cssr*"):
        cssr = Cssr.from_file(filename)
        return cssr.structure
    elif fnmatch(fname, "*.json*") or fnmatch(fname, "*.mson*"):
        with zopen(filename) as f:
            s = json.load(f, cls=PMGJSONDecoder)
            if type(s) != Structure:
                raise IOError("File does not contain a valid serialized "
                              "structure")
            return s
    raise ValueError("Unrecognized file extension!")
示例#6
0
def write_mol(mol, filename):
    """
    Write a molecule to a file based on file extension. For example, anything
    ending in a "xyz" is assumed to be a XYZ file. Supported formats include
    xyz, Gaussian input (gjf|g03|g09|com|inp), and pymatgen's JSON serialized
    molecules.

    Args:
        mol:
            Molecule to write
        filename:
            A filename to write to.
    """
    fname = os.path.basename(filename)
    if fnmatch(fname.lower(), "*.xyz*"):
        return XYZ(mol).write_file(filename)
    elif any([fnmatch(fname.lower(), "*.{}*".format(r))
              for r in ["gjf", "g03", "g09", "com", "inp"]]):
        return GaussianInput(mol).write_file(filename)
    elif fnmatch(fname, "*.json*") or fnmatch(fname, "*.mson*"):
        with zopen(filename, "w") as f:
            return json.dump(mol, f, cls=PMGJSONEncoder)
    else:
        m = re.search("\.(pdb|mol|mdl|sdf|sd|ml2|sy2|mol2|cml|mrv)",
                      filename.lower())
        if m:
            return BabelMolAdaptor(mol).write_file(filename, m.group(1))

    raise ValueError("Unrecognized file extension!")
示例#7
0
    def header_string_from_file(filename):
        """
        Reads Header string from file

        Args:
            filename:
                File name containing the Header data.

        Returns:
            Reads header string.
        """
        with zopen(filename, "r") as fobject:
            f = fobject.readlines()
            feff_header_str = []
            ln = 0
            try:
                feffpmg = f[0].find("pymatgen")
            except IndexError:
                feffpmg = 0

            if feffpmg > 0:
                nsites = int(f[7].split()[2])
                for line in f:
                    ln += 1
                    if ln <= nsites + 8:
                        feff_header_str.append(line)
            else:
                end = 0
                for line in f:
                    if (line[0] == "*" or line[0] == "T") and end == 0:
                        feff_header_str.append(line.replace("\r", ""))
                    else:
                        end = 1

        return ''.join(feff_header_str)
示例#8
0
    def atoms_string_from_file(filename):
        """
        Reads atomic shells from file such as feff.inp or ATOMS file
        The lines are arranged as follows:

        x y z   ipot    Atom Symbol   Distance   Number

        with distance being the shell radius and ipot an integer identifying
        the potential used.

        Args:
            filename:
                file name containing atomic coord data.

        Returns:
            Atoms string.
        """
        with zopen(filename, "r") as fobject:
            f = fobject.readlines()
            coords = 0
            atoms_str = []

            for line in f:
                if coords == 0:
                    find_atoms = line.find("ATOMS")
                    if find_atoms >= 0:
                        coords = 1
                if coords == 1:
                    atoms_str.append(line.replace("\r", ""))

        return FeffAtoms.from_string(''.join(atoms_str))
示例#9
0
def write_structure(structure, filename):
    """
    Write a structure to a file based on file extension. For example, anything
    ending in a "cif" is assumed to be a Crystallographic Information Format
    file. Supported formats include CIF, POSCAR, CSSR and pymatgen's JSON
    serialized structures.

    Args:
        structure:
            Structure to write
        filename:
            A filename to write to.
    """
    fname = os.path.basename(filename)
    if fnmatch(fname, "*.cif*"):
        writer = CifWriter(structure)
    elif fnmatch(fname, "POSCAR*") or fnmatch(fname, "CONTCAR*"):
        writer = Poscar(structure)
    elif fnmatch(fname.lower(), "*.cssr*"):
        writer = Cssr(structure)
    elif fnmatch(fname, "*.json*") or fnmatch(fname, "*.mson*"):
        with zopen(filename, "w") as f:
            json.dump(structure, f, cls=PMGJSONEncoder)
            return
    else:
        raise ValueError("Unrecognized file extension!")

    writer.write_file(filename)
示例#10
0
def read_structure(filename):
    """
    Reads a structure based on file extension. For example, anything ending in
    a "cif" is assumed to be a Crystallographic Information Format file.
    Supported formats include CIF, POSCAR/CONTCAR, CHGCAR, LOCPOT,
    vasprun.xml, CSSR and pymatgen's JSON serialized structures.

    Args:
        filename:
            A filename to read from.

    Returns:
        A Structure object.
    """
    fname = os.path.basename(filename)
    if fnmatch(fname.lower(), "*.cif*"):
        parser = CifParser(filename)
        return parser.get_structures(True)[0]
    elif fnmatch(fname, "POSCAR*") or fnmatch(fname, "CONTCAR*"):
        return Poscar.from_file(filename, False).structure
    elif fnmatch(fname, "CHGCAR*") or fnmatch(fname, "LOCPOT*"):
        return Chgcar.from_file(filename).structure
    elif fnmatch(fname, "vasprun*.xml*"):
        return Vasprun(filename).final_structure
    elif fnmatch(fname.lower(), "*.cssr*"):
        cssr = Cssr.from_file(filename)
        return cssr.structure
    elif fnmatch(fname, "*.json*") or fnmatch(fname, "*.mson*"):
        with zopen(filename) as f:
            s = json.load(f, cls=PMGJSONDecoder)
            if type(s) != Structure:
                raise IOError("File does not contain a valid serialized "
                              "structure")
            return s
    raise ValueError("Unrecognized file extension!")
示例#11
0
def write_mol(mol, filename):
    """
    Write a molecule to a file based on file extension. For example, anything
    ending in a "xyz" is assumed to be a XYZ file. Supported formats include
    xyz, Gaussian input (gjf|g03|g09|com|inp), and pymatgen's JSON serialized
    molecules.

    Args:
        mol:
            Molecule to write
        filename:
            A filename to write to.
    """
    fname = os.path.basename(filename)
    if fnmatch(fname.lower(), "*.xyz*"):
        return XYZ(mol).write_file(filename)
    elif any([
            fnmatch(fname.lower(), "*.{}*".format(r))
            for r in ["gjf", "g03", "g09", "com", "inp"]
    ]):
        return GaussianInput(mol).write_file(filename)
    elif fnmatch(fname, "*.json*") or fnmatch(fname, "*.mson*"):
        with zopen(filename, "w") as f:
            return json.dump(mol, f, cls=PMGJSONEncoder)
    else:
        m = re.search("\.(pdb|mol|mdl|sdf|sd|ml2|sy2|mol2|cml|mrv)",
                      filename.lower())
        if m:
            return BabelMolAdaptor(mol).write_file(filename, m.group(1))

    raise ValueError("Unrecognized file extension!")
示例#12
0
    def atoms_string_from_file(filename):
        """
        Reads atomic shells from file such as feff.inp or ATOMS file
        The lines are arranged as follows:

        x y z   ipot    Atom Symbol   Distance   Number

        with distance being the shell radius and ipot an integer identifying
        the potential used.

        Args:
            filename:
                file name containing atomic coord data.

        Returns:
            Atoms string.
        """
        with zopen(filename, "r") as fobject:
            f = fobject.readlines()
            coords = 0
            atoms_str = []

            for line in f:
                if coords == 0:
                    find_atoms = line.find("ATOMS")
                    if find_atoms >= 0:
                        coords = 1
                if coords == 1:
                    atoms_str.append(line.replace("\r", ""))

        return ''.join(atoms_str)
示例#13
0
def write_structure(structure, filename):
    """
    Write a structure to a file based on file extension. For example, anything
    ending in a "cif" is assumed to be a Crystallographic Information Format
    file. Supported formats include CIF, POSCAR, CSSR and pymatgen's JSON
    serialized structures.

    Args:
        structure:
            Structure to write
        filename:
            A filename to write to.
    """
    lower_filename = os.path.basename(filename).lower()
    if re.search("\.cif", lower_filename):
        writer = CifWriter(structure)
    elif lower_filename.startswith("poscar") \
            or lower_filename.startswith("contcar"):
        writer = Poscar(structure)
    elif re.search("\.cssr", lower_filename):
        writer = Cssr(structure)
    elif re.search("\.[mj]son", lower_filename):
        with zopen(lower_filename, "w") as f:
            json.dump(structure, f, cls=PMGJSONEncoder)
            return
    else:
        raise ValueError("Unrecognized file extension!")

    writer.write_file(filename)
示例#14
0
文件: feffio.py 项目: isayev/pymatgen
    def charge_transfer_from_file(filename1, filename2):
        """
        Args:
            filename1:
                name of feff.inp file for run

            filename2:
                ldos filename for run, assume consequetive order, .i.e.,
                ldos01.dat, ldos02.dat....

        Returns:
            dictionary of dictionaries in order of potential sites
            ({"p": 0.154, "s": 0.078, "d": 0.0, "tot": 0.232}, ...)
        """
        cht = OrderedDict()
        pot_string = FeffPot.pot_string_from_file(filename1)
        dicts = FeffPot.pot_dict_from_string(pot_string)
        pot_dict = dicts[1]

        for i in range(0, len(dicts[0]) + 1):
            if len(str(i)) == 1:
                with zopen("{}0{}.dat".format(filename2, i), "r") \
                        as fobject:
                    f = fobject.readlines()
                    s = float(f[3].split()[2])
                    p = float(f[4].split()[2])
                    d = float(f[5].split()[2])
                    f1 = float(f[6].split()[2])
                    tot = float(f[1].split()[4])
                    cht[str(i)] = {pot_dict[i]: {'s': s, 'p': p, 'd': d,
                                                 'f': f1,
                                   'tot': tot}}
            else:
                with zopen(filename2 + str(i) + ".dat", "r") as fid:
                    f = fid.readlines()
                    s = float(f[3].split()[2])
                    p = float(f[4].split()[2])
                    d = float(f[5].split()[2])
                    f1 = float(f[6].split()[2])
                    tot = float(f[1].split()[4])
                    cht[str(i)] = {pot_dict[i]: {'s': s, 'p': p, 'd': d,
                                                 'f': f1,
                                   'tot': tot}}

        return cht
示例#15
0
    def write_file(self, filename):
        """
        Writes XYZ to file.

        Args:
            filename:
                File name of output file.
        """
        with zopen(filename, "w") as f:
            f.write(self.__str__())
示例#16
0
    def write_file(self, filename):
        """
        Writes XYZ to file.

        Args:
            filename:
                File name of output file.
        """
        with zopen(filename, "w") as f:
            f.write(self.__str__())
示例#17
0
    def write_to_json_file(self, filename):
        """
        Writes the mson representation to a file.

        Args:
            filename:
                filename to write to. It is recommended that the file extension
                be ".mson".
        """
        with zopen(filename, "wb") as f:
            json.dump(self, f, cls=PMGJSONEncoder)
示例#18
0
    def write_to_json_file(self, filename):
        """
        Writes the mson representation to a file.

        Args:
            filename:
                filename to write to. It is recommended that the file extension
                be ".mson".
        """
        with zopen(filename, "wb") as f:
            json.dump(self, f, cls=PMGJSONEncoder)
示例#19
0
文件: queen.py 项目: sikisis/pymatgen
    def save_data(self, filename):
        """
        Save the assimilated data to a file.

        Args:
            filename:
                filename to save the assimilated data to. Note that if the
                filename ends with gz or bz2, the relevant gzip or bz2
                compression will be applied.
        """
        with zopen(filename, "w") as f:
            json.dump(list(self._data), f, cls=PMGJSONEncoder)
示例#20
0
文件: hive.py 项目: bkappes/pymatgen
def _get_transformation_history(path):
    """
    Checks for a transformations.json* file and returns the history.
    """
    trans_json = glob.glob(os.path.join(path, "transformations.json*"))
    if trans_json:
        try:
            with zopen(trans_json[0]) as f:
                return json.load(f)["history"]
        except:
            return None
    return None
示例#21
0
文件: queen.py 项目: isayev/pymatgen
    def save_data(self, filename):
        """
        Save the assimilated data to a file.

        Args:
            filename:
                filename to save the assimilated data to. Note that if the
                filename ends with gz or bz2, the relevant gzip or bz2
                compression will be applied.
        """
        with zopen(filename, "w") as f:
            json.dump(list(self._data), f, cls=PMGJSONEncoder)
示例#22
0
文件: hive.py 项目: akashneo/pymatgen
def _get_transformation_history(path):
    """
    Checks for a transformations.json* file and returns the history.
    """
    trans_json = glob.glob(os.path.join(path, "transformations.json*"))
    if trans_json:
        try:
            with zopen(trans_json[0]) as f:
                return json.load(f)["history"]
        except:
            return None
    return None
示例#23
0
    def __init__(self, filename):
        self.filename = filename

        with zopen(filename) as f:
            data = f.read()

        chunks = re.split("NWChem Input Module", data)
        if re.search("CITATION", chunks[-1]):
            chunks.pop()
        preamble = chunks.pop(0)
        self.job_info = self._parse_preamble(preamble)
        self.data = map(self._parse_job, chunks)
示例#24
0
    def __init__(self, filename):
        self.filename = filename

        with zopen(filename) as f:
            data = f.read()

        chunks = re.split("NWChem Input Module", data)
        if re.search("CITATION", chunks[-1]):
            chunks.pop()
        preamble = chunks.pop(0)
        self.job_info = self._parse_preamble(preamble)
        self.data = map(self._parse_job, chunks)
示例#25
0
    def from_file(filename):
        """
        Creates GaussianInput from a file.

        Args:
            filename:
                Gaussian input filename

        Returns:
            GaussianInput object
        """
        with zopen(filename, "r") as f:
            return GaussianInput.from_string(f.read())
示例#26
0
    def from_file(filename):
        """
        Creates XYZ object from a file.

        Args:
            filename:
                XYZ filename

        Returns:
            XYZ object
        """
        with zopen(filename) as f:
            return ZeoVoronoiXYZ.from_string(f.read())
示例#27
0
文件: zeoio.py 项目: bkappes/pymatgen
    def from_file(filename):
        """
        Creates XYZ object from a file.

        Args:
            filename:
                XYZ filename

        Returns:
            XYZ object
        """
        with zopen(filename) as f:
            return ZeoVoronoiXYZ.from_string(f.read())
示例#28
0
    def from_file(filename):
        """
        Reads a CSSR file to a Cssr object.

        Args:
            filename:
                Filename to read from.

        Returns:
            Cssr object.
        """
        with zopen(filename, "r") as f:
            return Cssr.from_string(f.read())
示例#29
0
 def from_file(filename):
     """
     Reads a CSSR file to a ZeoCssr object.
     
     Args:
         filename:
             Filename to read from.
     
     Returns:
         ZeoCssr object.
     """
     with zopen(filename, "r") as f:
         return ZeoCssr.from_string(f.read())
示例#30
0
    def from_file(filename):
        """
        Creates GaussianInput from a file.

        Args:
            filename:
                Gaussian input filename

        Returns:
            GaussianInput object
        """
        with zopen(filename, "r") as f:
            return GaussianInput.from_string(f.read())
示例#31
0
    def from_file(cls, filename):
        """
        Read an NwInput from a file. Currently tested to work with
        files generated from this class itself.

        Args:
            filename:
                Filename to parse.

        Returns:
            NwInput object
        """
        with zopen(filename) as f:
            return cls.from_string(f.read())
示例#32
0
    def from_file(cls, filename):
        """
        Read an NwInput from a file. Currently tested to work with
        files generated from this class itself.

        Args:
            filename:
                Filename to parse.

        Returns:
            NwInput object
        """
        with zopen(filename) as f:
            return cls.from_string(f.read())
示例#33
0
 def __init__(self, filename, occupancy_tolerance=1.):
     """
     Args:
         filename:
             Cif filename. bzipped or gzipped cifs are fine too.
         occupancy_tolerance:
             If total occupancy of a site is between 1 and
             occupancy_tolerance, the occupancies will be scaled down to 1.
     """
     self._occupancy_tolerance = occupancy_tolerance
     if isinstance(filename, basestring):
         with zopen(filename, "r") as f:
             self._cif = CifFile.ReadCif(f)
     else:
         self._cif = CifFile.ReadCif(filename)
示例#34
0
 def __init__(self, filename, occupancy_tolerance=1.):
     """
     Args:
         filename:
             Cif filename. bzipped or gzipped cifs are fine too.
         occupancy_tolerance:
             If total occupancy of a site is between 1 and
             occupancy_tolerance, the occupancies will be scaled down to 1.
     """
     self._occupancy_tolerance = occupancy_tolerance
     if isinstance(filename, basestring):
         with zopen(filename, "r") as f:
             self._cif = CifFile.ReadCif(f)
     else:
         self._cif = CifFile.ReadCif(filename)
示例#35
0
def pmg_dump(obj, filename, **kwargs):
    """
    Dump an object to a json file using PMGJSONEncoder. Note that these
    objects can be lists, dicts or otherwise nested pymatgen objects that
    support the to_dict and from_dict MSONAble protocol.

    Args:
        obj:
            Object to dump.
        filename:
            Filename of file to open. Can be gzipped or bzipped.
        \*\*kwargs:
            Any of the keyword arguments supported by the json.load method.
    """
    return json.dump(obj, zopen(filename, "w"), cls=PMGJSONEncoder, **kwargs)
示例#36
0
def pmg_dump(obj, filename, **kwargs):
    """
    Dump an object to a json file using PMGJSONEncoder. Note that these
    objects can be lists, dicts or otherwise nested pymatgen objects that
    support the to_dict and from_dict MSONAble protocol.

    Args:
        obj:
            Object to dump.
        filename:
            Filename of file to open. Can be gzipped or bzipped.
        **kwargs:
            Any of the keyword arguments supported by the json.load method.
    """
    return json.dump(obj, zopen(filename, "w"), cls=PMGJSONEncoder, **kwargs)
示例#37
0
def pmg_load(filename, **kwargs):
    """
    Loads a json file and deserialize it with PMGJSONDecoder.

    Args:
        filename:
            Filename of file to open. Can be gzipped or bzipped.
        **kwargs:
            Any of the keyword arguments supported by the json.load method.

    Returns:
        Deserialized pymatgen object. Note that these objects can be lists,
        dicts or otherwise nested pymatgen objects that support the to_dict
        and from_dict MSONAble protocol.
    """
    return json.load(zopen(filename), cls=PMGJSONDecoder, **kwargs)
示例#38
0
def pmg_load(filename, **kwargs):
    """
    Loads a json file and deserialize it with PMGJSONDecoder.

    Args:
        filename:
            Filename of file to open. Can be gzipped or bzipped.
        \*\*kwargs:
            Any of the keyword arguments supported by the json.load method.

    Returns:
        Deserialized pymatgen object. Note that these objects can be lists,
        dicts or otherwise nested pymatgen objects that support the to_dict
        and from_dict MSONAble protocol.
    """
    return json.load(zopen(filename), cls=PMGJSONDecoder, **kwargs)
示例#39
0
文件: cifio.py 项目: bkappes/pymatgen
 def __init__(self, filename, occupancy_tolerance=1.):
     """
     Args:
         filename:
             Cif filename. bzipped or gzipped cifs are fine too.
         occupancy_tolerance:
             If total occupancy of a site is between 1 and
             occupancy_tolerance, the occupancies will be scaled down to 1.
     """
     self._occupancy_tolerance = occupancy_tolerance
     if isinstance(filename, basestring):
         with zopen(filename, "r") as f:
             # We use this round-about way to clean up the CIF first.
             stream = cStringIO.StringIO(_clean_cif(f.read()))
             self._cif = CifFile.ReadCif(stream)
     else:
         self._cif = CifFile.ReadCif(filename)
示例#40
0
    def __init__(self, filename):
        """
        Args:
            filename:
                Name of file containing PROCAR.
        """
        #create and return data object containing the information of a PROCAR
        self.name = ""
        self.data = {}
        self.headers = None
        myocc = 0
        with zopen(filename, "r") as f:
            #lines = list(clean_lines(f.readlines())) #TTM do not clean lines
            lines = list(f.readlines())
            self.name = lines[0]
            kpointexpr = re.compile("^\s*k-point\s+(\d+).*weight = ([0-9\.]+)")
            ionexpr = re.compile("^ion.*")
            expr = re.compile("^\s*([0-9]+)\s+")
            dataexpr = re.compile("[\.0-9]+")
            weight = 0

            for l in lines:
                if l.find("occ.") > -1:
                    myocc = float(l.split()[7])
                if kpointexpr.match(l):
                    m = kpointexpr.match(l)
                    currentKpoint = int(m.group(1))
                    weight = float(m.group(2))
                    if currentKpoint == 1:
                        self.data = dict()
                elif ionexpr.match(l) and self.headers is None:
                    self.headers = l.split()
                    self.headers.pop(0)
                elif expr.match(l):
                    linedata = dataexpr.findall(l)
                    linefloatdata = map(float, linedata)
                    index = int(linefloatdata.pop(0))
                    if index in self.data:
                        if myocc == 1:
                            self.data[index] += np.array(
                                linefloatdata) * weight
                    else:
                        if myocc == 1:
                            self.data[index] = np.array(linefloatdata) * weight
                        else:
                            self.data[index] = 0.0
示例#41
0
    def header_string_from_file(filename):
        """
        Reads Header string from either a HEADER file or feff.inp file
        Will also read a header from a non-pymatgen generated feff.inp file

        Args:
            filename:
                File name containing the Header data.

        Returns:
            Reads header string.
        """
        with zopen(filename, "r") as fobject:
            f = fobject.readlines()
            feff_header_str = []
            ln = 0

            #Checks to see if generated by pymatgen

            try:
                feffpmg = f[0].find("pymatgen")
            except IndexError:
                feffpmg = 0

            #Reads pymatgen generated header or feff.inp file

            if feffpmg > 0:
                nsites = int(f[8].split()[2])
                for line in f:
                    ln += 1
                    if ln <= nsites + 9:
                        feff_header_str.append(line)
            else:

                # Reads header from header from feff.inp file from unknown
                # source

                end = 0
                for line in f:
                    if (line[0] == "*" or line[0] == "T") and end == 0:
                        feff_header_str.append(line.replace("\r", ""))
                    else:
                        end = 1

        return ''.join(feff_header_str)
示例#42
0
文件: feffio.py 项目: isayev/pymatgen
    def header_string_from_file(filename):
        """
        Reads Header string from either a HEADER file or feff.inp file
        Will also read a header from a non-pymatgen generated feff.inp file

        Args:
            filename:
                File name containing the Header data.

        Returns:
            Reads header string.
        """
        with zopen(filename, "r") as fobject:
            f = fobject.readlines()
            feff_header_str = []
            ln = 0

            #Checks to see if generated by pymatgen

            try:
                feffpmg = f[0].find("pymatgen")
            except IndexError:
                feffpmg = 0

            #Reads pymatgen generated header or feff.inp file

            if feffpmg > 0:
                nsites = int(f[8].split()[2])
                for line in f:
                    ln += 1
                    if ln <= nsites + 9:
                        feff_header_str.append(line)
            else:

                # Reads header from header from feff.inp file from unknown
                # source

                end = 0
                for line in f:
                    if (line[0] == "*" or line[0] == "T") and end == 0:
                        feff_header_str.append(line.replace("\r", ""))
                    else:
                        end = 1

        return ''.join(feff_header_str)
示例#43
0
    def __init__(self, filename):
        """
        Args:
            filename:
                Name of file containing PROCAR.
        """
        #create and return data object containing the information of a PROCAR
        self.name = ""
        self.data = {}
        self.headers = None
        myocc=0
        with zopen(filename, "r") as f:
            #lines = list(clean_lines(f.readlines())) #TTM do not clean lines
            lines = list(f.readlines())
            self.name = lines[0]
            kpointexpr = re.compile("^\s*k-point\s+(\d+).*weight = ([0-9\.]+)")
            ionexpr = re.compile("^ion.*")
            expr = re.compile("^\s*([0-9]+)\s+")
            dataexpr = re.compile("[\.0-9]+")
            weight = 0

            for l in lines:
                if l.find("occ.")>-1:
                    myocc=float(l.split()[7])
                if kpointexpr.match(l):
                    m = kpointexpr.match(l)
                    currentKpoint = int(m.group(1))
                    weight = float(m.group(2))
                    if currentKpoint == 1:
                        self.data = dict()
                elif ionexpr.match(l) and self.headers is None:
                    self.headers = l.split()
                    self.headers.pop(0)
                elif expr.match(l):
                    linedata = dataexpr.findall(l)
                    linefloatdata = map(float, linedata)
                    index = int(linefloatdata.pop(0))
                    if index in self.data:
                        if myocc==1:
                            self.data[index] += np.array(linefloatdata) * weight
                    else:
                        if myocc==1:
                            self.data[index] = np.array(linefloatdata) * weight
                        else:
                            self.data[index]=0.0
示例#44
0
    def from_file(filename, check_for_POTCAR=True):
        """
        Reads a Poscar from a file.

        The code will try its best to determine the elements in the POSCAR in
        the following order:
        1. If check_for_POTCAR is True, the code will try to check if a POTCAR
        is in the same directory as the POSCAR and use elements from that by
        default. (This is the VASP default sequence of priority).
        2. If the input file is Vasp5-like and contains element symbols in the
        6th line, the code will use that if check_for_POTCAR is False or there
        is no POTCAR found.
        3. Failing (2), the code will check if a symbol is provided at the end
        of each coordinate.

        If all else fails, the code will just assign the first n elements in
        increasing atomic number, where n is the number of species, to the
        Poscar. For example, H, He, Li, ....  This will ensure at least a
        unique element is assigned to each site and any analysis that does not
        require specific elemental properties should work fine.

        Args:
            filename:
                File name containing Poscar data.
            check_for_POTCAR:
                Whether to check if a POTCAR is present in the same directory
                as the POSCAR. Defaults to True.

        Returns:
            Poscar object.
        """
        dirname = os.path.dirname(os.path.abspath(filename))
        names = None
        if check_for_POTCAR:
            for f in os.listdir(dirname):
                if f == "POTCAR":
                    try:
                        potcar = Potcar.from_file(os.path.join(dirname, f))
                        names = [sym.split("_")[0] for sym in potcar.symbols]
                    except:
                        names = None
        with zopen(filename, "r") as f:
            return Poscar.from_string(f.read(), names)
示例#45
0
def read_mol(filename):
    """
    Reads a molecule based on file extension. For example, anything ending in
    a "xyz" is assumed to be a XYZ file. Supported formats include xyz,
    gaussian input (gjf|g03|g09|com|inp), Gaussian output (.out|and
    pymatgen's JSON serialized molecules. Using openbabel,
    many more extensions are supported but requires openbabel to be installed.

    Args:
        filename:
            A filename to read from.

    Returns:
        A Molecule object.
    """
    fname = os.path.basename(filename)
    if fnmatch(fname.lower(), "*.xyz*"):
        return XYZ.from_file(filename).molecule
    elif any([
            fnmatch(fname.lower(), "*.{}*".format(r))
            for r in ["gjf", "g03", "g09", "com", "inp"]
    ]):
        return GaussianInput.from_file(filename).molecule
    elif any([
            fnmatch(fname.lower(), "*.{}*".format(r))
            for r in ["out", "lis", "log"]
    ]):
        return GaussianOutput(filename).final_structure
    elif fnmatch(fname, "*.json*") or fnmatch(fname, "*.mson*"):
        with zopen(filename) as f:
            s = json.load(f, cls=PMGJSONDecoder)
            if type(s) != Molecule:
                raise IOError("File does not contain a valid serialized "
                              "molecule")
            return s
    else:
        m = re.search("\.(pdb|mol|mdl|sdf|sd|ml2|sy2|mol2|cml|mrv)",
                      filename.lower())
        if m:
            return BabelMolAdaptor.from_file(filename, m.group(1)).pymatgen_mol

    raise ValueError("Unrecognized file extension!")
示例#46
0
    def from_file(filename):
        """
        Reads an Incar object from a file.

        Args:
            filename - Filename for file

        Returns:
            Incar object
        """
        with zopen(filename, "r") as f:
            lines = list(clean_lines(f.readlines()))
        params = {}
        for line in lines:
            m = re.match("(\w+)\s*=\s*(.*)", line)
            if m:
                key = m.group(1).strip()
                val = m.group(2).strip()
                val = Incar.proc_val(key, val)
                params[key] = val
        return Incar(params)
示例#47
0
 def from_file(filename):
     with zopen(filename, "r") as reader:
         fdata = reader.read()
     potcar = Potcar()
     potcar_strings = re.compile(r"\n?(\s*.*?End of Dataset)",
                                 re.S).findall(fdata)
     functionals = []
     for p in potcar_strings:
         single = PotcarSingle(p)
         potcar.append(single)
         functionals.append(single.lexch)
     if len(set(functionals)) != 1:
         raise ValueError("File contains incompatible functionals!")
     else:
         if functionals[0] == "PE":
             functional = "PBE"
         elif functionals[1] == "91":
             functional = "PW91"
         else:
             functional = "LDA"
         potcar.functional = functional
     return potcar
示例#48
0
def read_structure(filename):
    """
    Reads a structure based on file extension. For example, anything ending in
    a "cif" is assumed to be a Crystallographic Information Format file.
    Supported formats include CIF, POSCAR/CONTCAR, CHGCAR, LOCPOT,
    vasprun.xml, CSSR and pymatgen's JSON serialized structures.

    Args:
        filename:
            A filename to read from.

    Returns:
        A Structure object.
    """
    lower_filename = os.path.basename(filename).lower()
    if re.search("\.cif", lower_filename):
        parser = CifParser(filename)
        return parser.get_structures(True)[0]
    elif lower_filename.startswith("poscar") \
            or lower_filename.startswith("contcar"):
        return Poscar.from_file(filename, False).structure
    elif lower_filename.startswith("chgcar") \
            or lower_filename.startswith("locpot"):
        return Chgcar.from_file(filename).structure
    elif re.search("vasprun", lower_filename) \
            and re.search("xml", lower_filename):
        return Vasprun(filename).final_structure
    elif re.search("\.cssr", lower_filename):
        cssr = Cssr.from_file(filename)
        return cssr.structure
    elif re.search("\.[mj]son", lower_filename):
        with zopen(lower_filename) as f:
            s = json.load(f, cls=PMGJSONDecoder)
            if type(s) != Structure:
                raise IOError("File does not contain a valid serialized "
                              "structure")
            return s

    raise ValueError("Unrecognized file extension!")
示例#49
0
def read_mol(filename):
    """
    Reads a molecule based on file extension. For example, anything ending in
    a "xyz" is assumed to be a XYZ file. Supported formats include xyz,
    gaussian input (gjf|g03|g09|com|inp), Gaussian output (.out|and
    pymatgen's JSON serialized molecules. Using openbabel,
    many more extensions are supported but requires openbabel to be installed.

    Args:
        filename:
            A filename to read from.

    Returns:
        A Molecule object.
    """
    fname = os.path.basename(filename)
    if fnmatch(fname.lower(), "*.xyz*"):
        return XYZ.from_file(filename).molecule
    elif any([fnmatch(fname.lower(), "*.{}*".format(r))
              for r in ["gjf", "g03", "g09", "com", "inp"]]):
        return GaussianInput.from_file(filename).molecule
    elif any([fnmatch(fname.lower(), "*.{}*".format(r))
              for r in ["out", "lis", "log"]]):
        return GaussianOutput(filename).final_structure
    elif fnmatch(fname, "*.json*") or fnmatch(fname, "*.mson*"):
        with zopen(filename) as f:
            s = json.load(f, cls=PMGJSONDecoder)
            if type(s) != Molecule:
                raise IOError("File does not contain a valid serialized "
                              "molecule")
            return s
    else:
        m = re.search("\.(pdb|mol|mdl|sdf|sd|ml2|sy2|mol2|cml|mrv)",
                      filename.lower())
        if m:
            return BabelMolAdaptor.from_file(filename,
                                             m.group(1)).pymatgen_mol

    raise ValueError("Unrecognized file extension!")
示例#50
0
 def __init__(self, filename1="feff.inp", filename2="ldos"):
     """
     Args:
         filename1:
             input file of run to obtain structure
         filename2:
             output ldos file of run to obtain dos info, etc.
     """
     self._input_filename = filename1
     self._ldos_filename = filename2
     self.header_str = Header.header_string_from_file(filename1)
     self.pot_string = FeffPot.pot_string_from_file(filename1)
     self._dicts = FeffPot.pot_dict_from_string(self.pot_string)
     self._pot_dict = self._dicts[0]
     self._pot_dict_reverse = self._dicts[1]
     with zopen(self.ldos_filename + "00.dat", "r") as fobject:
         f = fobject.readlines()
     self._efermi = float(f[0].split()[4])
     self._dos_ener = []
     self.ldos = self.ldos_from_file()
     for i in range(0, len(self.ldos[1])):
         self._dos_ener.append(self.ldos[1][i][0])
示例#51
0
    def from_file(filename="feff.inp"):
        """
        Creates a Feff_tag dictionary from a PARAMETER or feff.inp file.

        Args:
            filename:
                Filename for either PARAMETER or feff.inp file

        Returns:
            Feff_tag object
        """
        with zopen(filename, "r") as f:
            lines = list(clean_lines(f.readlines()))
        params = {}
        for line in lines:
            m = re.match("([A-Z]+\d*\d*)\s*(.*)", line)
            if m:
                key = m.group(1).strip()
                val = m.group(2).strip()
                val = FeffTags.proc_val(key, val)
                if key not in ("ATOMS", "POTENTIALS", "END", "TITLE"):
                    params[key] = val
        return FeffTags(params)
示例#52
0
    def pot_string_from_file(filename):
        """
        Reads Potential parameters from a feff.inp or FEFFPOT file.
        The lines are arranged as follows:

          ipot   Z   element   lmax1   lmax2   stoichometry   spinph

        Args:
            filename - file name containing potential data.

        Returns:
            FEFFPOT string.
        """
        with zopen(filename, "r") as f_object:
            f = f_object.readlines()
            ln = -1
            pot_str = ["POTENTIALS\n"]
            pot_tag = -1
            pot_data = 0
            pot_data_over = 1

            for line in f:
                if pot_data_over == 1:
                    ln += 1
                    if pot_tag == -1:
                        pot_tag = line.find("POTENTIALS")
                        ln = 0
                    if pot_tag >= 0 and ln > 0 and pot_data_over > 0:
                        try:
                            if int(line.split()[0]) == pot_data:
                                pot_data += 1
                                pot_str.append(line.replace("\r", ""))
                        except (ValueError, IndexError):
                            if pot_data > 0:
                                pot_data_over = 0
        return ''.join(pot_str)
示例#53
0
    def from_file(filename1='feff.inp', filename2='ldos'):
        """"
        Creates FeffLdos object from raw Feff ldos files by
        by assuming they are numbered consequetively, i.e. ldos01.dat
        ldos02.dat...

        Args:
            filename1:
                input file of run to obtain structure
            filename2:
                output ldos file of run to obtain dos info, etc.
        """
        ldos_filename = filename2
        header_str = Header.header_string_from_file(filename1)
        header = Header.from_string(header_str)
        structure = header.struct
        nsites = structure.num_sites
        pot_string = FeffPot.pot_string_from_file(filename1)
        dicts = FeffPot.pot_dict_from_string(pot_string)
        pot_dict = dicts[0]

        with zopen(ldos_filename + "00.dat", "r") as fobject:
            f = fobject.readlines()
        efermi = float(f[0].split()[4])

        dos_energies = []
        ldos = {}

        for i in range(1, len(pot_dict) + 1):
            if len(str(i)) == 1:
                ldos[i] = np.loadtxt("{}0{}.dat".format(ldos_filename, i))
            else:
                ldos[i] = np.loadtxt("{}{}.dat".format(ldos_filename, i))

        for i in range(0, len(ldos[1])):
            dos_energies.append(ldos[1][i][0])

        all_pdos = []
        vorb = {
            "s": Orbital.s,
            "p": Orbital.py,
            "d": Orbital.dxy,
            "f": Orbital.f0
        }
        forb = {"s": 0, "p": 1, "d": 2, "f": 3}

        dlength = len(ldos[1])

        for i in range(nsites):
            pot_index = pot_dict[structure.species[i].symbol]
            all_pdos.append(defaultdict(dict))
            for k, v in vorb.items():
                density = [
                    ldos[pot_index][j][forb[k] + 1] for j in range(dlength)
                ]
                updos = density
                downdos = None
                if downdos:
                    all_pdos[-1][v] = {Spin.up: updos, Spin.down: downdos}
                else:
                    all_pdos[-1][v] = {Spin.up: updos}

        pdos = all_pdos
        vorb2 = {0: Orbital.s, 1: Orbital.py, 2: Orbital.dxy, 3: Orbital.f0}
        pdoss = {
            structure[i]: {v: pdos[i][v]
                           for v in vorb2.values()}
            for i in range(len(pdos))
        }

        forb = {"s": 0, "p": 1, "d": 2, "f": 3}

        tdos = [0] * dlength
        for i in range(nsites):
            pot_index = pot_dict[structure.species[i].symbol]
            for v in forb.values():
                density = [ldos[pot_index][j][v + 1] for j in range(dlength)]
                for j in range(dlength):
                    tdos[j] = tdos[j] + density[j]
        tdos = {Spin.up: tdos}

        dos = Dos(efermi, dos_energies, tdos)
        complete_dos = CompleteDos(structure, dos, pdoss)
        charge_transfer = FeffLdos.charge_transfer_from_file(
            filename1, filename2)
        return FeffLdos(complete_dos, charge_transfer)
示例#54
0
    def post_process(cls, dir_name, d):
        """
        Simple post-processing for various files other than the vasprun.xml.
        Called by generate_task_doc. Modify this if your runs have other
        kinds of processing requirements.

        Args:
            dir_name:
                The dir_name.
            d:
                Current doc generated.
        """
        logger.info("Post-processing dir:{}".format(dir_name))

        fullpath = os.path.abspath(dir_name)

        # VASP input generated by pymatgen's alchemy has a
        # transformations.json file that keeps track of the origin of a
        # particular structure. This is extremely useful for tracing back a
        # result. If such a file is found, it is inserted into the task doc
        # as d["transformations"]
        transformations = {}
        filenames = glob.glob(os.path.join(fullpath, "transformations.json*"))
        if len(filenames) >= 1:
            with zopen(filenames[0], "rb") as f:
                transformations = json.load(f)
                try:
                    m = re.match("(\d+)-ICSD",
                                 transformations["history"][0]["source"])
                    if m:
                        d["icsd_id"] = int(m.group(1))
                except ValueError:
                    pass
        else:
            logger.warning("Transformations file does not exist.")

        other_parameters = transformations.get("other_parameters")
        new_tags = None
        if other_parameters:
            # We don't want to leave tags or authors in the
            # transformations file because they'd be copied into
            # every structure generated after this one.
            new_tags = other_parameters.pop("tags", None)
            new_author = other_parameters.pop("author", None)
            if new_author:
                d["author"] = new_author
            if not other_parameters:  # if dict is now empty remove it
                transformations.pop("other_parameters")

        d["transformations"] = transformations

        # Calculations done using custodian has a custodian.json,
        # which tracks the jobs performed and any errors detected and fixed.
        # This is useful for tracking what has actually be done to get a
        # result. If such a file is found, it is inserted into the task doc
        # as d["custodian"]
        filenames = glob.glob(os.path.join(fullpath, "custodian.json*"))
        if len(filenames) >= 1:
            with zopen(filenames[0], "rb") as f:
                d["custodian"] = json.load(f)

        # Parse OUTCAR for additional information and run stats that are
        # generally not in vasprun.xml.
        try:
            run_stats = {}
            for filename in glob.glob(os.path.join(fullpath, "OUTCAR*")):
                outcar = Outcar(filename)
                i = 1 if re.search("relax2", filename) else 0
                taskname = "relax2" if re.search("relax2",
                                                 filename) else "relax1"
                d["calculations"][i]["output"]["outcar"] = outcar.to_dict
                run_stats[taskname] = outcar.run_stats
        except:
            logger.error("Bad OUTCAR for {}.".format(fullpath))

        try:
            overall_run_stats = {}
            for key in [
                    "Total CPU time used (sec)", "User time (sec)",
                    "System time (sec)", "Elapsed time (sec)"
            ]:
                overall_run_stats[key] = sum(
                    [v[key] for v in run_stats.values()])
            run_stats["overall"] = overall_run_stats
        except:
            logger.error("Bad run stats for {}.".format(fullpath))

        d["run_stats"] = run_stats

        #Convert to full uri path.
        d["dir_name"] = get_uri(dir_name)

        if new_tags:
            d["tags"] = new_tags

        logger.info("Post-processed " + fullpath)
示例#55
0
    def _parse(self, filename):

        start_patt = re.compile(" \(Enter \S+l101\.exe\)")
        route_patt = re.compile(" #[pPnNtT]*.*")
        charge_mul_patt = re.compile("Charge\s+=\s*([-\\d]+)\s+"
                                     "Multiplicity\s+=\s*(\d+)")
        num_basis_func_patt = re.compile("([0-9]+)\s+basis functions")
        pcm_patt = re.compile("Polarizable Continuum Model")
        stat_type_patt = re.compile("imaginary frequencies")
        scf_patt = re.compile("E\(.*\)\s*=\s*([-\.\d]+)\s+")
        mp2_patt = re.compile("EUMP2\s*=\s*(.*)")
        oniom_patt = re.compile("ONIOM:\s+extrapolated energy\s*=\s*(.*)")
        termination_patt = re.compile("(Normal|Error) termination of Gaussian")
        std_orientation_patt = re.compile("Standard orientation")
        end_patt = re.compile("--+")
        orbital_patt = re.compile("Alpha\s*\S+\s*eigenvalues --(.*)")
        thermo_patt = re.compile("(Zero-point|Thermal) correction(.*)="
                                 "\s+([\d\.-]+)")

        self.properly_terminated = False
        self.is_pcm = False
        self.stationary_type = "Minimum"
        self.structures = []
        self.corrections = {}
        self.energies = []
        self.pcm = None

        coord_txt = []
        read_coord = 0
        orbitals_txt = []
        parse_stage = 0
        num_basis_found = False
        terminated = False

        with zopen(filename) as f:
            for line in f:
                if parse_stage == 0:
                    if start_patt.search(line):
                        parse_stage = 1
                    elif route_patt.search(line):
                        self.route = {}
                        for tok in line.split():
                            sub_tok = tok.strip().split("=")
                            key = sub_tok[0].upper()
                            self.route[key] = sub_tok[1].upper() \
                                if len(sub_tok) > 1 else ""
                            m = re.match("(\w+)/([^/]+)", key)
                            if m:
                                self.functional = m.group(1)
                                self.basis_set = m.group(2)
                elif parse_stage == 1:
                    if charge_mul_patt.search(line):
                        m = charge_mul_patt.search(line)
                        self.charge = int(m.group(1))
                        self.spin_mult = int(m.group(2))
                        parse_stage = 2
                elif parse_stage == 2:

                    if self.is_pcm:
                        self._check_pcm(line)

                    if "FREQ" in self.route and thermo_patt.search(line):
                        m = thermo_patt.search(line)
                        if m.group(1) == "Zero-point":
                            self.corrections["Zero-point"] = float(m.group(3))
                        else:
                            key = m.group(2).strip(" to ")
                            self.corrections[key] = float(m.group(3))

                    if read_coord:
                        if not end_patt.search(line):
                            coord_txt.append(line)
                        else:
                            read_coord = (read_coord + 1) % 4
                            if not read_coord:
                                sp = []
                                coords = []
                                for l in coord_txt[2:]:
                                    toks = l.split()
                                    sp.append(Element.from_Z(int(toks[1])))
                                    coords.append(map(float, toks[3:6]))
                                self.structures.append(Molecule(sp, coords))
                    elif termination_patt.search(line):
                        m = termination_patt.search(line)
                        if m.group(1) == "Normal":
                            self.properly_terminated = True
                        terminated = True
                    elif (not num_basis_found) and \
                            num_basis_func_patt.search(line):
                        m = num_basis_func_patt.search(line)
                        self.num_basis_func = int(m.group(1))
                        num_basis_found = True
                    elif (not self.is_pcm) and pcm_patt.search(line):
                        self.is_pcm = True
                        self.pcm = {}
                    elif "FREQ" in self.route and "OPT" in self.route and \
                            stat_type_patt.search(line):
                        self.stationary_type = "Saddle"
                    elif mp2_patt.search(line):
                        m = mp2_patt.search(line)
                        self.energies.append(
                            float(m.group(1).replace("D", "E")))
                    elif oniom_patt.search(line):
                        m = oniom_patt.matcher(line)
                        self.energies.append(float(m.group(1)))
                    elif scf_patt.search(line):
                        m = scf_patt.search(line)
                        self.energies.append(float(m.group(1)))
                    elif std_orientation_patt.search(line):
                        coord_txt = []
                        read_coord = 1
                    elif orbital_patt.search(line):
                        orbitals_txt.append(line)
        if not terminated:
            raise IOError("Bad Gaussian output file.")
示例#56
0
文件: queen.py 项目: sikisis/pymatgen
 def load_data(self, filename):
     """
     Load assimilated data from a file
     """
     with zopen(filename, "r") as f:
         self._data = json.load(f, cls=PMGJSONDecoder)
示例#57
0
 def write_file(self, filename):
     with zopen(filename, "w") as f:
         f.write(self.__str__())