예제 #1
0
    def _from_file(self):
        """Create rudimentary dictionary of entries from a
        file.

        """

        kpoints = utils.read_from_file(self._file_path, self._file_handler)
        kpoints_dict = self._from_list(kpoints)
        return kpoints_dict
예제 #2
0
파일: doscar.py 프로젝트: espenfl/parsevasp
    def _from_file(self):
        """
        Create a dictionary of entries from a
        file and store them in the this instance's data dictionary.

        """

        doscar = utils.read_from_file(self._file_path, self._file_handler, encoding='utf8')
        self._from_list(doscar)
예제 #3
0
    def _from_file(self):
        """Create rudimentary dictionary of entries from a
        file.

        """

        incar = utils.read_from_file(self._file_path,
                                     self._file_handler,
                                     encoding='utf8')
        return incar
예제 #4
0
    def _from_file(self):
        """Create rudimentary dictionary of entries from a
        file.

        """

        poscar = utils.read_from_file(self._file_path,
                                      self._file_handler,
                                      encoding='utf8')
        poscar_dict = self._from_list(poscar)
        return poscar_dict
예제 #5
0
파일: chgcar.py 프로젝트: espenfl/parsevasp
    def _from_file(self):
        """
        Load CHGCAR into NumPy arrays.

        This method is presently not optimized to use as little memory as possible.

        """
        content = utils.read_from_file(self._file_path,
                                       self._file_handler,
                                       lines=False)
        # Extract header
        temp = content.split('\n\n', 1)
        header = temp[0]
        content = temp[1]
        header = header.split('\n')
        # comment = header[0]
        scaling = float(header[1])
        lattice_vectors = np.zeros((3, 3))
        for i in range(3):
            # Read and scale lattice vectors
            lattice_vectors[i] = scaling * np.array(
                [float(item) for item in header[i + 2].split()])
            # Calculate volume for later scaling
        volume = 1.0
        if volume:
            volume = np.dot(lattice_vectors[0],
                            np.cross(lattice_vectors[1], lattice_vectors[2]))
        # First line of content should now be NGXF, NGYF, NGZF
        temp = content.split('\n', 1)
        ngf_string = temp[0]
        content = temp[1]
        ngf = [int(item) for item in ngf_string.split()]
        # Need to reverse as CHGCAR is x fastest, while we want
        # to comply with z fastest (C order).
        ngf.reverse()
        # Check how many datasets we have
        content = content.split(ngf_string)
        num_datasets = len(content)
        # First dataset is always there
        self._data['total'] = np.fromstring(
            content[0].split('augmentation occupancies')[0],
            dtype=float,
            sep=' ').reshape(ngf) / volume
        if num_datasets == 2:
            # Collinear spin
            self._data['magnetization'] = np.fromstring(
                content[1].split('augmentation occupancies')[0],
                dtype=float,
                sep=' ').reshape(ngf) / volume
        elif num_datasets == 4:
            # Non-collinear spin
            self._data['magnetization'] = {}
            self._data['magnetization']['x'] = np.fromstring(
                content[1].split('augmentation occupancies')[0],
                dtype=float,
                sep=' ').reshape(ngf) / volume
            self._data['magnetization']['y'] = np.fromstring(
                content[2].split('augmentation occupancies')[0],
                dtype=float,
                sep=' ').reshape(ngf) / volume
            self._data['magnetization']['z'] = np.fromstring(
                content[3].split('augmentation occupancies')[0],
                dtype=float,
                sep=' ').reshape(ngf) / volume