Пример #1
0
    def get_data_nmtuple(self, itemp, estep, spins=None):
        nkpt = self.ebands.nkpt
        spins = range(self.ebands.nsppol) if spins is None else spins

        emesh, emin, emax = self.get_emesh_eminmax(estep)
        nene = len(emesh)
        #print("nkpt", nkpt, "nene", nene)
        data = np.zeros((nkpt, nene))

        # aw: [nwr, ntemp, max_nbcalc, nkcalc, nsppol] array
        for spin in spins:
            for ik in range(nkpt):
                for band in range(self.ebands.nband_sk[spin, ik]):
                    w = self.aw_meshes[spin, ik, band]
                    aw = self.aw[spin, ik, band, itemp]
                    data[ik] += UnivariateSpline(w,
                                                 aw,
                                                 k=self.k,
                                                 s=self.s,
                                                 ext=self.ext)(emesh)

        return dict2namedtuple(data=data,
                               emesh=emesh,
                               emin=emin,
                               emax=emax,
                               spins=spins,
                               nkpt=nkpt)
Пример #2
0
    def get_spectral_functions(self, step=0.01, width=0.02):
        """
        Args:
            step: Energy step (eV) of the linear mesh.
            width: Standard deviation (eV) of the gaussian.

        Return:
            mesh, sfw, int_sfw
        """
        # Compute linear mesh.
        epad = 3.0 * width
        e_min = self.uf_eigens.min() - epad
        e_max = self.uf_eigens.max() + epad
        nw = int(1 + (e_max - e_min) / step)
        mesh, step = np.linspace(e_min, e_max, num=nw, endpoint=True, retstep=True)

        sfw = np.zeros((self.nss, self.uf_nkpt, nw))
        for spin in range(self.nss):
            for ik in range(self.uf_nkpt):
                for band in range(self.nband):
                    e = self.uf_eigens[spin, ik, band]
                    sfw[spin, ik] += self.uf_weights[spin, ik, band] * gaussian(mesh, width, center=e)

        from scipy.integrate import cumtrapz
        int_sfw = cumtrapz(sfw, x=mesh, initial=0.0)

        return dict2namedtuple(mesh=mesh, sfw=sfw, int_sfw=int_sfw)
Пример #3
0
    def find_jobs_torun(self, max_njobs):
        """
        Find entries whose results have not been yet calculated.

        Args:
            select_formulas:
        """
        jobs, got = [], 0
        for struct_type, formula, data in self.iter_struct_formula_data():
            if got == max_njobs: break
            if data in ("scheduled", "failed"): continue
            if data is None:
                symbols = list(set(species_from_formula(formula)))
                pseudos = self.table.pseudos_with_symbols(symbols)

                job = dict2namedtuple(formula=formula,
                                      struct_type=struct_type,
                                      pseudos=pseudos)
                self[struct_type][formula] = "scheduled"
                jobs.append(job)
                got += 1

        # Update the database.
        if jobs: self.json_write()
        return jobs
Пример #4
0
    def get_spectral_functions(self, step=0.01, width=0.02):
        """
        Args:
            step: Energy step (eV) of the linear mesh.
            width: Standard deviation (eV) of the gaussian.

        Return:
            mesh, sfw, int_sfw
        """
        # Compute linear mesh.
        epad = 3.0 * width
        e_min = self.uf_eigens.min() - epad
        e_max = self.uf_eigens.max() + epad
        nw = int(1 + (e_max - e_min) / step)
        mesh, step = np.linspace(e_min,
                                 e_max,
                                 num=nw,
                                 endpoint=True,
                                 retstep=True)

        sfw = np.zeros((self.nss, self.uf_nkpt, nw))
        for spin in range(self.nss):
            for ik in range(self.uf_nkpt):
                for band in range(self.nband):
                    e = self.uf_eigens[spin, ik, band]
                    sfw[spin,
                        ik] += self.uf_weights[spin, ik, band] * gaussian(
                            mesh, width, center=e)

        from scipy.integrate import cumtrapz
        int_sfw = cumtrapz(sfw, x=mesh, initial=0.0)

        return dict2namedtuple(mesh=mesh, sfw=sfw, int_sfw=int_sfw)
Пример #5
0
    def get_thermodynamic_properties(self, tstart=0, tstop=800, num=100):
        """
        Generates all the thermodynamic properties corresponding to all the volumes using the phonon DOS.

        Args:
            tstart: The starting value (in Kelvin) of the temperature mesh.
            tstop: The end value (in Kelvin) of the mesh.
            num: int, optional Number of samples to generate. Default is 100.

        Returns:
            `namedtuple` with the following attributes for all the volumes:

                tmesh: numpy array with the list of temperatures. Shape (num).
                cv: constant-volume specific heat, in eV/K. Shape (nvols, num).
                free_energy: free energy, in eV. Shape (nvols, num).
                entropy: entropy, in eV/K. Shape (nvols, num).
                zpe: zero point energy in eV. Shape (nvols).
        """
        tmesh = np.linspace(tstart, tstop, num)
        cv = self._get_thermodynamic_prop("cv", tstart, tstop, num)
        free_energy = self._get_thermodynamic_prop("free_energy", tstart, tstop, num)
        entropy = self._get_thermodynamic_prop("entropy", tstart, tstop, num)
        zpe  = np.zeros(self.nvols)

        for i, dos in zip(self.ind_doses, self.doses):
            zpe[i] = dos.zero_point_energy

        dos_vols = self.volumes[self.ind_doses]
        missing_vols = self.volumes[self._ind_energy_only]

        fit_params = np.polyfit(dos_vols, zpe[self.ind_doses], self.fit_degree)
        zpe[self._ind_energy_only] = np.poly1d(fit_params)(missing_vols)

        return dict2namedtuple(tmesh=tmesh, cv=cv, free_energy=free_energy, entropy=entropy,
                               zpe=zpe)
Пример #6
0
    def find_last_timden_file(self):
        """
        ABINIT produces lots of out_TIM1_DEN files for each step and we need to find the lat
        one in order to prepare the restart or to connect other tasks to the structural relaxation.

        This function finds all the TIM?_DEN files in self and return a namedtuple (path, step)
        where `path` is the path of the last TIM?_DEN file and step is the iteration number.
        Returns None if the directory does not contain TIM?_DEN files.
        """
        regex = re.compile(r"out_TIM(\d+)_DEN(.nc)?$")

        timden_paths = [
            f for f in self.list_filepaths()
            if regex.match(os.path.basename(f))
        ]
        if not timden_paths: return None

        # Build list of (step, path) tuples.
        stepfile_list = []
        for path in timden_paths:
            name = os.path.basename(path)
            match = regex.match(name)
            step, ncext = match.groups()
            stepfile_list.append((int(step), path))

        # DSU sort.
        last = sorted(stepfile_list, key=lambda t: t[0])[-1]
        return dict2namedtuple(step=last[0], path=last[1])
Пример #7
0
    def get_thermodynamic_properties(self, tstart=0, tstop=800, num=100):
        """
        Generates all the thermodynamic properties corresponding to all the volumes using the phonon DOS.

        Args:
            tstart: The starting value (in Kelvin) of the temperature mesh.
            tstop: The end value (in Kelvin) of the mesh.
            num: int, optional Number of samples to generate. Default is 100.

        Returns:
            `namedtuple` with the following attributes for all the volumes:

                tmesh: numpy array with the list of temperatures. Shape (num).
                cv: constant-volume specific heat, in eV/K. Shape (nvols, num).
                free_energy: free energy, in eV. Shape (nvols, num).
                entropy: entropy, in eV/K. Shape (nvols, num).
                zpe: zero point energy in eV. Shape (nvols).
        """
        tmesh = np.linspace(tstart, tstop, num)
        cv = np.zeros((self.nvols, num))
        free_energy = np.zeros((self.nvols, num))
        entropy = np.zeros((self.nvols, num))
        internal_energy = np.zeros((self.nvols, num))
        zpe  = np.zeros(self.nvols)

        for i, d in enumerate(self.doses):
            cv[i] = d.get_cv(tstart, tstop, num).values
            free_energy[i] = d.get_free_energy(tstart, tstop, num).values
            entropy[i] = d.get_entropy(tstart, tstop, num).values
            zpe[i] = d.zero_point_energy

        return dict2namedtuple(tmesh=tmesh, cv=cv, free_energy=free_energy, entropy=entropy,
                               zpe=zpe)
Пример #8
0
    def find_last_timden_file(self):
        """
        ABINIT produces lots of out_TIM1_DEN files for each step and we need to find the lat
        one in order to prepare the restart or to connect other tasks to the structural relaxation.

        This function finds all the TIM?_DEN files in self and return a namedtuple (path, step)
        where `path` is the path of the last TIM?_DEN file and step is the iteration number.
        Returns None if the directory does not contain TIM?_DEN files.
        """
        regex = re.compile(r"out_TIM(\d+)_DEN(.nc)?$")

        timden_paths = [f for f in self.list_filepaths() if regex.match(os.path.basename(f))]
        if not timden_paths: return None

        # Build list of (step, path) tuples.
        stepfile_list = []
        for path in timden_paths:
            name = os.path.basename(path)
            match = regex.match(name)
            step, ncext = match.groups()
            stepfile_list.append((int(step), path))

        # DSU sort.
        last = sorted(stepfile_list, key=lambda t: t[0])[-1]
        return dict2namedtuple(step=last[0], path=last[1])
Пример #9
0
    def find_1den_files(self):
        """
        Abinit adds the idir-ipert index at the end of the 1DEN file and this breaks the extension
        e.g. out_DEN1. This method scans the files in the directories and returns a list of namedtuple
        Each named tuple gives the `path` of the 1DEN file and the `pertcase` index.
        """
        regex = re.compile(r"out_DEN(\d+)(\.nc)?$")
        den_paths = [
            f for f in self.list_filepaths()
            if regex.match(os.path.basename(f))
        ]
        if not den_paths: return None

        # Build list of (pertcase, path) tuples.
        pertfile_list = []
        for path in den_paths:
            name = os.path.basename(path)
            match = regex.match(name)
            pertcase, ncext = match.groups()
            pertfile_list.append((int(pertcase), path))

        # DSU sort.
        pertfile_list = sorted(pertfile_list, key=lambda t: t[0])
        return [
            dict2namedtuple(pertcase=item[0], path=item[1])
            for item in pertfile_list
        ]
Пример #10
0
    def get_doses(self, method="gaussian", step=0.1, width=0.2):
        """
        Compute the electronic DOS on a linear mesh.

        Args:
            method: String defining the method for the computation of the DOS.
            step: Energy step (eV) of the linear mesh.
            width: Standard deviation (eV) of the gaussian.

        Returns: |ElectronDos| object.
        """
        self.kpoints.check_weights()
        edos = self.ddks[0].ebands.get_edos(method=method, step=step, width=width)
        values = np.zeros((self.nsppol, nw))
        mesh = edos[0].mesh
        #vmod_skb = self.vskb
        if method == "gaussian":
            for spin in range(self.nsppol):
                for k, kpoint in enumerate(self.kpoints):
                    wk = kpoint.weight
                    for band in range(self.nband):
                        e = self.eigens[spin, k, band]
                        values[spin] += wk * vmod[spin, k, band] * gaussian(mesh, width, center=e)
        else:
            raise NotImplementedError("Method %s is not supported" % method)

        vdos_spin = [Function1D(mesh, values[spin]) for spin in range(self.nsppol)]
        vdos = 2 * Function1D(mesh, values[0]) if self.nsppol == 1 else vdos_spin[0] + vdos_spin[1]

        return dict2namedtuple(edos=edos, vdos=vdos, vdos_spin=vdos_spin)
Пример #11
0
    def get_msq_tmesh(self, tmesh, iatom_list=None, what_list=("displ", "vel")):
        """
        Compute mean square displacement for each atom in `iatom_list` as a function of T.
        in Cartesian coordinates and atomic-units.

        Args:
            tmesh: array-like with temperatures in Kelvin.
            iatom_list: List of atom sites to compute. None if all aomts are wanted.
            what_list: "displ" for displacement, "vel" for velocity tensor.

        Return:
            namedtuple with (tmesh=tmesh, displ=msq_d, vel=msq_v)

            msq_d = np.empty((natom, 3, 3, nt))
        """
        tmesh = np.array(tmesh)
        nt = len(tmesh)

        # Frequency mesh starts at iomin to avoid 1/0 and ignore eventual negative frequencies.
        for iomin, w in enumerate(self.wmesh):
            if w > 1e-12: break
        else:
            raise ValueError("Cannot find index such that wmesh[i] > 1e-12 !!!")
        wvals = self.wmesh[iomin:]
        nw = len(wvals)

        # We will compute: Ucart(T, k, ij) = 1/M_k \int dw (n(w) + 1/2) g_ij(w) / w for the k-atom in a.u.
        # Calculate Bose-Einstein occupation factors only once for each T (instead of for each atom).
        npht = np.zeros((nt, nw))
        for it, temp in enumerate(tmesh):
            npht[it] = abu.occ_be(wvals, temp * abu.kb_HaK) + 0.5

        natom = len(self.structure)
        msq_d = np.empty((natom, 3, 3, nt))
        msq_v = np.empty((natom, 3, 3, nt))
        what_list = list_strings(what_list)

        # Perform frequency integration to get tensor(T)
        from scipy.integrate import simps
        if iatom_list is not None: iatom_list = set(iatom_list)
        for iatom in range(natom):
            if iatom_list is not None and iatom not in iatom_list: continue
            symbol = self.structure[iatom].specie.symbol
            for it in range(nt):
                fn = self.values[iatom, :, :, iomin:] * npht[it]
                if "displ" in what_list:
                    # Mean square displacement for each atom as a function of T (bohr^2).
                    ys = fn / wvals
                    fact = 1.0 / (self.amu_symbol[symbol] * abu.amu_emass)
                    msq_d[iatom, :, :, it] = simps(ys, x=wvals) * fact * abu.Bohr_Ang ** 2
                if "vel" in what_list:
                    # Mean square velocity for each atom as a function of T (bohr^2/atomic time unit^2)"
                    ys = fn * wvals
                    fact = 1.0 / (self.amu_symbol[symbol] * abu.amu_emass)
                    msq_v[iatom, :, :, it] = simps(ys, x=wvals) * fact # * abu.velocity_at_to_si ** 2

        return dict2namedtuple(tmesh=tmesh, displ=msq_d, vel=msq_v)
Пример #12
0
    def get_eos_fits_dataframe(self, eos_names="murnaghan"):
        """
        Fit energy as function of volume to get the equation of state,
        equilibrium volume, bulk modulus and its derivative wrt to pressure.

        Args:
            eos_names: String or list of strings with EOS names.
                For the list of available models, see pymatgen.analysis.eos.

        Return:
            (fits, dataframe) namedtuple.
                fits is a list of ``EOSFit object``
                dataframe is a |pandas-DataFrame| with the final results.
        """
        # Read volumes and energies from the GSR files.
        energies, volumes = [], []
        for label, gsr in self.items():
            energies.append(float(gsr.energy))
            volumes.append(float(gsr.structure.volume))

        # Order data by volumes if needed.
        if np.any(np.diff(volumes) < 0):
            ves = sorted(zip(volumes, energies), key=lambda t: t[0])
            volumes = [t[0] for t in ves]
            energies = [t[1] for t in ves]

        # Note that eos.fit expects lengths in Angstrom, and energies in eV.
        # I'm also monkey-patching the plot method.
        from pymatgen.analysis.eos import EOS
        if eos_names == "all":
            # Use all the available models.
            eos_names = [
                n for n in EOS.MODELS
                if n not in ("deltafactor", "numerical_eos")
            ]
        else:
            eos_names = list_strings(eos_names)

        fits, index, rows = [], [], []
        for eos_name in eos_names:
            try:
                fit = EOS(eos_name=eos_name).fit(volumes, energies)
            except Exception as exc:
                cprint("EOS %s raised exception:\n%s" % (eos_name, str(exc)))
                continue

            # Replace plot with plot_ax method
            fit.plot = fit.plot_ax
            fits.append(fit)
            index.append(eos_name)
            rows.append(
                OrderedDict([(aname, getattr(fit, aname))
                             for aname in ("v0", "e0", "b0_GPa", "b1")]))

        dataframe = pd.DataFrame(
            rows, index=index, columns=list(rows[0].keys()) if rows else None)
        return dict2namedtuple(fits=fits, dataframe=dataframe)
Пример #13
0
    def eval_line(self, point1, point2, num=200, cartesian=False, kpoint=None):
        """
        Interpolate values along a line.

        Args:
            point1: First point of the line. Accepts 3d vector or integer.
                The vector is in reduced coordinates unless `cartesian == True`.
                If integer, the first point of the line is given by the i-th site of the structure
                e.g. `point1=0, point2=1` gives the line passing through the first two atoms.
            point2: Second point of the line. Same API as `point1`.
            num: Number of points sampled along the line.
            cartesian: By default, `point1` and `point1` are interpreted as points in fractional
                coordinates (if not integers). Use True to pass points in cartesian coordinates.
            kpoint: k-point in reduced coordinates. If not None, the phase-factor e^{ikr} is included.

        Return: named tuple with
            site1, site2: None if the points do not represent atomic sites.
            points: Points in fractional coords.
            dist: the distance of points along the line in Ang.
            values: numpy array of shape [ndt, num] with interpolated values.
        """
        site1 = None
        if duck.is_intlike(point1):
            if point1 > len(self.structure):
                raise ValueError("point1: %s > natom: %s" %
                                 (point1, len(self.structure)))
            site1 = self.structure[point1]
            point1 = site1.coords if cartesian else site1.frac_coords

        site2 = None
        if duck.is_intlike(point2):
            if point2 > len(self.structure):
                raise ValueError("point2: %s > natom: %s" %
                                 (point2, len(self.structure)))
            site2 = self.structure[point2]
            point2 = site2.coords if cartesian else site2.frac_coords

        point1 = np.reshape(point1, (3, ))
        point2 = np.reshape(point2, (3, ))
        if cartesian:
            red_from_cart = self.structure.lattice.inv_matrix.T
            point1 = np.dot(red_from_cart, point1)
            point2 = np.dot(red_from_cart, point2)

        p21 = point2 - point1
        line_points = np.reshape(
            [alpha * p21 for alpha in np.linspace(0, 1, num=num)], (-1, 3))
        dist = self.structure.lattice.norm(line_points)
        line_points += point1

        return dict2namedtuple(site1=site1,
                               site2=site2,
                               points=line_points,
                               dist=dist,
                               values=self.eval_points(line_points,
                                                       kpoint=kpoint))
Пример #14
0
    def check_update(self):
        """
        Check consistency between the pseudo potential table and the database and upgrade it
        This usually happens when new pseudopotentials have been added to the dojo directory.
        (very likely) or when pseudos have been removed (unlikely!)

        Returns: namedtuple with the following attributes.
            nrec_removed
            nrec_added
        """
        nrec_removed, nrec_added = 0, 0
        missing = defaultdict(list)

        for formula, species in self.gbrv_formula_and_species:
            # Get **all** the possible combinations for these species.
            comb_list = self.dojo_pptable.all_combinations_for_elements(
                set(species))

            # Check consistency between records and pseudos!
            # This is gonna be slow if we have several possibilities!
            records = self[formula]
            recidx_found = []
            for pseudos in comb_list:
                for i, rec in enumerate(records):
                    if rec.matches_pseudos(pseudos):
                        recidx_found.append(i)
                        break

                else:
                    missing[formula].append(pseudos)

            # Remove stale records (if any)
            num_found = len(recidx_found)
            if num_found != len(records):
                num_stale = len(records) - num_found
                print("Found %s stale records" % num_stale)
                nrec_removed += num_stale
                self[formula] = [records[i] for i in recidx_found]

        if missing:
            for formula, pplist in missing.items():
                for pseudos in pplist:
                    nrec_removed += 1
                    self[formula].append(
                        GbrvRecord(self.struct_type, formula, pseudos,
                                   self.dojo_pptable))

        if missing or nrec_removed:
            print("Updating database.")
            self.json_write()

        return dict2namedtuple(nrec_removed=nrec_removed,
                               nrec_added=nrec_added)
Пример #15
0
    def get_eos_fits_dataframe(self, eos_names="murnaghan"):
        """
        Fit energy as function of volume to get the equation of state,
        equilibrium volume, bulk modulus and its derivative wrt to pressure.

        Args:
            eos_names: String or list of strings with EOS names.
                For the list of available models, see pymatgen.analysis.eos.

        Return:
            (fits, dataframe) namedtuple.
                fits is a list of ``EOSFit object``
                dataframe is a |pandas-DataFrame| with the final results.
        """
        # Read volumes and energies from the GSR files.
        energies, volumes = [], []
        for label, gsr in self.items():
            energies.append(float(gsr.energy))
            volumes.append(float(gsr.structure.volume))

        # Order data by volumes if needed.
        if np.any(np.diff(volumes) < 0):
            ves = sorted(zip(volumes, energies), key=lambda t: t[0])
            volumes = [t[0] for t in ves]
            energies = [t[1] for t in ves]

        # Note that eos.fit expects lengths in Angstrom, and energies in eV.
        # I'm also monkey-patching the plot method.
        from pymatgen.analysis.eos import EOS
        if eos_names == "all":
            # Use all the available models.
            eos_names = [n for n in EOS.MODELS if n not in ("deltafactor", "numerical_eos")]
        else:
            eos_names = list_strings(eos_names)

        fits, index, rows = [], [], []
        for eos_name in eos_names:
            try:
                fit = EOS(eos_name=eos_name).fit(volumes, energies)
            except Exception as exc:
                cprint("EOS %s raised exception:\n%s" % (eos_name, str(exc)))
                continue

            # Replace plot with plot_ax method
            fit.plot = fit.plot_ax
            fits.append(fit)
            index.append(eos_name)
            rows.append(OrderedDict([(aname, getattr(fit, aname)) for aname in
                ("v0", "e0", "b0_GPa", "b1")]))

        dataframe = pd.DataFrame(rows, index=index, columns=list(rows[0].keys()) if rows else None)
        return dict2namedtuple(fits=fits, dataframe=dataframe)
Пример #16
0
    def symeq(self, k1_frac_coords, k2_frac_coords, atol=None):
        """
        Test whether two k-points in fractional coordinates are symmetry equivalent
        i.e. if there's a symmetry operations TO (including time-reversal T, if present)
        such that::

            TO(k1) = k2 + G0

        Return: namedtuple with::

            isym: The index of the symmetry operation such that TS(k1) = k2 + G0
                Set to -1 if k1 and k2 are not related by symmetry.
            op: Symmetry operation.
            g0: numpy vector.
        """
        for isym, sym in enumerate(self):
            sk_coords = sym.rotate_k(k1_frac_coords, wrap_tows=False)
            if issamek(sk_coords, k2_frac_coords, atol=atol):
                g0 = sym.rotate_k(k1_frac_coords) - k2_frac_coords
                return dict2namedtuple(isym=isym, op=self[isym], g0=g0)

        return dict2namedtuple(isym=-1, op=None, g0=None)
Пример #17
0
    def symeq(self, k1_frac_coords, k2_frac_coords, atol=None):
        """
        Test whether two k-points in fractional coordinates are symmetry equivalent
        i.e. if there's a symmetry operations TO (including time-reversal T, if present)
	such that::

            TO(k1) = k2 + G0

	Return: namedtuple with::

            isym: The index of the symmetry operation such that TS(k1) = k2 + G0
                Set to -1 if k1 and k2 are not related by symmetry.
            op: Symmetry operation.
            g0: numpy vector.
        """
        for isym, sym in enumerate(self):
            sk_coords = sym.rotate_k(k1_frac_coords, wrap_tows=False)
            if issamek(sk_coords, k2_frac_coords, atol=atol):
                g0 = sym.rotate_k(k1_frac_coords) - k2_frac_coords
                return dict2namedtuple(isym=isym, op=self[isym], g0=g0)

        return dict2namedtuple(isym=-1, op=None, g0=None)
Пример #18
0
    def fit_to_frequency(self,
                         fit_function=None,
                         units="eV",
                         min_fit_eta=None,
                         max_fit_eta=None):
        """
        Uses the energies and the displacements to calculate the phonon frequency corresponding to the quadratic
        term of the fit.
        The fit is performed with scipy.optimize.curve_fit based on the function given in input and can also be
        limited number to a subset of the values of the displacements.


        Args:
            fit_function: a function that will be used to fit the data. The first parameter should be the coefficient
                of the quadratic term. If None a simple quadratic fit will be used.
            units: units of the output frequency. Possible values in ("eV", "meV", "Ha", "cm-1", "Thz").
                Case-insensitive.
            min_fit_eta: if not None represents minimum value allowed for the (signed) eta to be used in the fit.
            max_fit_eta: if not None represents maximum value allowed for the (signed) eta to be used in the fit.

        Returns:
            A namedtuple with 'freq': the values of the frequency extracted from the fit,
            'fit_params': the parameters obtained from the fit, 'cov': the estimated covariance of fit_params
            (see scipy.optimize.curve_fit documentation for more details).
        """

        if self.energies is None:
            raise ValueError("The energies are required to calculate the fit")

        if min_fit_eta is None:
            min_fit_eta = self.etas.min()
        if max_fit_eta is None:
            max_fit_eta = self.etas.max()

        indices = np.where((min_fit_eta <= self.etas)
                           & (self.etas <= max_fit_eta))

        if fit_function is None:
            fit_function = quadratic_fit_function

        etas = self.etas[indices]
        energies = np.array(self.energies)[indices]

        params, cov = optimize.curve_fit(fit_function, etas, energies)

        # frequency in eV.
        freq = self._quad_coeff_to_freq(params[0])

        return dict2namedtuple(freq=freq * phfactor_ev2units(units),
                               fit_params=params,
                               cov=cov)
Пример #19
0
    def _get_atomview(self, view, select_symbols=None, verbose=0):
        """
        Helper function used to select (inequivalent||all) atoms depending on view.
        Uses spglib to find inequivalent sites.

        Args:
            view: "inequivalent" to show only inequivalent atoms. "all" for all sites.
            select_symbols: String or list of strings with chemical symbols.
                Used to select only atoms of this type.

        Return named tuple with:

                * iatom_list: list of site index.
                * wyckoffs: Wyckoff letters
                * site_labels: Labels for each site in `iatom_list` e.g Si2a
        """
        natom = len(self.structure)
        if natom == 1: verbose = False
        if verbose:
            print(
                "Calling spglib to find inequivalent sites. Magnetic symmetries (if any) are not taken into account."
            )

        ea = self.structure.spget_equivalent_atoms(printout=verbose > 0)

        # Define iatom_list depending on view
        if view == "all":
            iatom_list = np.arange(natom)
        elif view == "inequivalent":
            iatom_list = ea.irred_pos
        else:
            raise ValueError("Wrong value for view: %s" % str(view))

        # Filter by element symbol.
        if select_symbols is not None:
            select_symbols = set(list_strings(select_symbols))
            iatom_list = [
                i for i in iatom_list
                if self.structure[i].specie.symbol in select_symbols
            ]
            iatom_list = np.array(iatom_list, dtype=np.int)

        # Slice full arrays.
        wyckoffs = ea.wyckoffs[iatom_list]
        wyck_labels = ea.wyck_labels[iatom_list]
        site_labels = ea.site_labels[iatom_list]

        return dict2namedtuple(iatom_list=iatom_list,
                               wyckoffs=wyckoffs,
                               wyck_labels=wyck_labels,
                               site_labels=site_labels)
Пример #20
0
    def check_update(self):
        """
        Check consistency between the pseudo potential table and the database and upgrade it
        This usually happens when new pseudopotentials have been added to the dojo directory.
        (very likely) or when pseudos have been removed (unlikely!)

        Returns: namedtuple with the following attributes.
            nrec_removed
            nrec_added
        """
        nrec_removed, nrec_added = 0, 0
        missing = defaultdict(list)

        for formula, species in self.gbrv_formula_and_species:
            # Get **all** the possible combinations for these species.
            comb_list = self.dojo_pptable.all_combinations_for_elements(set(species))

            # Check consistency between records and pseudos!
            # This is gonna be slow if we have several possibilities!
            records = self[formula]
            recidx_found = []
            for pseudos in comb_list:
                for i, rec in enumerate(records):
                    if rec.matches_pseudos(pseudos):
                        recidx_found.append(i)
                        break

                else:
                    missing[formula].append(pseudos)

            # Remove stale records (if any)
            num_found = len(recidx_found)
            if  num_found != len(records):
                num_stale = len(records) - num_found
                print("Found %s stale records" % num_stale)
                nrec_removed += num_stale
                self[formula] = [records[i] for i in recidx_found]

        if missing:
            for formula, pplist in missing.items():
                for pseudos in pplist:
                    nrec_removed += 1
                    self[formula].append(GbrvRecord(self.struct_type, formula, pseudos, self.dojo_pptable))

        if missing or nrec_removed:
            print("Updating database.")
            self.json_write()

        return dict2namedtuple(nrec_removed=nrec_removed, nrec_added=nrec_added)
Пример #21
0
    def eval_line(self, point1, point2, num=200, cartesian=False, kpoint=None):
        """
        Interpolate values along a line.

        Args:
            point1: First point of the line. Accepts 3d vector or integer.
                The vector is in reduced coordinates unless `cartesian == True`.
                If integer, the first point of the line is given by the i-th site of the structure
                e.g. `point1=0, point2=1` gives the line passing through the first two atoms.
            point2: Second point of the line. Same API as `point1`.
            num: Number of points sampled along the line.
            cartesian: By default, `point1` and `point1` are interpreted as points in fractional
                coordinates (if not integers). Use True to pass points in cartesian coordinates.
            kpoint: k-point in reduced coordinates. If not None, the phase-factor e^{ikr} is included.

        Return: named tuple with
            site1, site2: None if the points do not represent atomic sites.
            points: Points in fractional coords.
            dist: the distance of points along the line in Ang.
            values: numpy array of shape [ndt, num] with interpolated values.
        """
        site1 = None
        if duck.is_intlike(point1):
            if point1 > len(self.structure):
                raise ValueError("point1: %s > natom: %s" % (point1, len(self.structure)))
            site1 = self.structure[point1]
            point1 = site1.coords if cartesian else site1.frac_coords

        site2 = None
        if duck.is_intlike(point2):
            if point2 > len(self.structure):
                raise ValueError("point2: %s > natom: %s" % (point2, len(self.structure)))
            site2 = self.structure[point2]
            point2 = site2.coords if cartesian else site2.frac_coords

        point1 = np.reshape(point1, (3,))
        point2 = np.reshape(point2, (3,))
        if cartesian:
            red_from_cart = self.structure.lattice.inv_matrix.T
            point1 = np.dot(red_from_cart, point1)
            point2 = np.dot(red_from_cart, point2)

        p21 = point2 - point1
        line_points = np.reshape([alpha * p21 for alpha in np.linspace(0, 1, num=num)], (-1, 3))
        dist = self.structure.lattice.norm(line_points)
        line_points += point1

        return dict2namedtuple(site1=site1, site2=site2, points=line_points, dist=dist,
                               values=self.eval_points(line_points, kpoint=kpoint))
Пример #22
0
    def validate(self):
        """
        Run ABINIT in dry mode to validate the input file.

        Return:
            `namedtuple` with the following attributes:

                retcode: Return code. 0 if OK.
                log_file:  log file of the Abinit run, use log_file.read() to access its content.
                stderr_file: stderr file of the Abinit run. use stderr_file.read() to access its content.

        Raises:
            `RuntimeError` if executable is not in $PATH.
        """
        task = AbinitTask.temp_shell_task(inp=self)
        retcode = task.start_and_wait(autoparal=False, exec_args=["--dry-run"])
        return dict2namedtuple(retcode=retcode, log_file=task.log_file, stderr_file=task.stderr_file)
Пример #23
0
    def validate(self):
        """
        Run ABINIT in dry mode to validate the input file.

        Return:
            `namedtuple` with the following attributes:

                retcode: Return code. 0 if OK.
                log_file:  log file of the Abinit run, use log_file.read() to access its content.
                stderr_file: stderr file of the Abinit run. use stderr_file.read() to access its content.

        Raises:
            `RuntimeError` if executable is not in $PATH.
        """
        task = AbinitTask.temp_shell_task(inp=self) 
        retcode = task.start_and_wait(autoparal=False, exec_args=["--dry-run"])
        return dict2namedtuple(retcode=retcode, log_file=task.log_file, stderr_file=task.stderr_file)
Пример #24
0
    def get_data_nmtuple(self, itemp, estep, spins=None):
        nkpt = self.ebands.nkpt
        spins = range(self.ebands.nsppol) if spins is None else spins

        emesh, emin, emax = self.get_emesh_eminmax(estep)
        nene = len(emesh)
        #print("nkpt", nkpt, "nene", nene)
        data = np.zeros((nkpt, nene))

        # aw: [nwr, ntemp, max_nbcalc, nkcalc, nsppol] array
        for spin in spins:
            for ik in range(nkpt):
                for band in range(self.ebands.nband_sk[spin, ik]):
                    w = self.aw_meshes[spin, ik, band]
                    aw = self.aw[spin, ik, band, itemp]
                    data[ik] += UnivariateSpline(w, aw, k=self.k, s=self.s, ext=self.ext)(emesh)

        return dict2namedtuple(data=data, emesh=emesh, emin=emin, emax=emax, spins=spins, nkpt=nkpt)
Пример #25
0
    def fit_energies(self, tstart=0, tstop=800, num=100):
        """
        Performs a fit of the energies as a function of the volume at different temperatures.

        Args:
            tstart: The starting value (in Kelvin) of the temperature mesh.
            tstop: The end value (in Kelvin) of the mesh.
            num: int, optional Number of samples to generate. Default is 100.

        Returns:
            `namedtuple` with the following attributes::

                tot_en: numpy array with shape (nvols, num) with the energies used for the fit
                fits: list of subclasses of pymatgen.analysis.eos.EOSBase, depending on the type of
                    eos chosen. Contains the fit for the energies at the different temperatures.
                min_en: numpy array with the minimum energies for the list of temperatures
                min_vol: numpy array with the minimum volumes for the list of temperatures
                temp: numpy array with the temperatures considered

        """

        tmesh = np.linspace(tstart, tstop, num)

        # array with phonon energies and shape (n_vol, n_temp)
        ph_energies = self.get_vib_free_energies(tstart, tstop, num)

        tot_en = self.energies[np.newaxis, :].T + ph_energies + self.volumes[
            np.newaxis, :].T * self.pressure / abu.eVA3_GPa

        # list of fits objects, one for each temperature
        fits = [self.eos.fit(self.volumes, e) for e in tot_en.T]

        # list of minimum volumes and energies, one for each temperature
        min_volumes = np.array([fit.v0 for fit in fits])
        min_energies = np.array([fit.e0 for fit in fits])

        return dict2namedtuple(tot_en=tot_en,
                               fits=fits,
                               min_en=min_energies,
                               min_vol=min_volumes,
                               temp=tmesh)
Пример #26
0
    def get_thermodynamic_properties(self, tstart=0, tstop=800, num=100):
        """
        Generates the thermodynamic properties corresponding to all the structures, either from the phonon
        frequencies or from a fit of the know values..

        Args:
            tstart: The starting value (in Kelvin) of the temperature mesh.
            tstop: The end value (in Kelvin) of the mesh.
            num: int, optional Number of samples to generate. Default is 100.

        Returns:
            `namedtuple` with the following attributes for all the volumes:

                tmesh: numpy array with the list of temperatures. Shape (num).
                cv: constant-volume specific heat, in eV/K. Shape (nvols, num).
                free_energy: free energy, in eV. Shape (nvols, num).
                entropy: entropy, in eV/K. Shape (nvols, num).
                zpe: zero point energy in eV. Shape (nvols).
        """

        w = self.fitted_frequencies

        tmesh = np.linspace(tstart, tstop, num)
        weights = self.grun.doses['qpoints'].weights

        free_energy = np.zeros((self.nvols, num))
        cv = np.zeros((self.nvols, num))
        entropy = np.zeros((self.nvols, num))
        zpe = np.zeros(self.nvols)

        for i in range(self.nvols):
            free_energy[i] = [get_free_energy(w[i], weights, t) for t in tmesh]
            cv[i] = [get_cv(w[i], weights, t) for t in tmesh]
            entropy[i] = [get_entropy(w[i], weights, t) for t in tmesh]
            zpe[i] = get_zero_point_energy(w, weights)

        return dict2namedtuple(tmesh=tmesh,
                               cv=cv,
                               free_energy=free_energy,
                               entropy=entropy,
                               zpe=zpe)
Пример #27
0
    def find_1den_files(self):
        """
        Abinit adds the idir-ipert index at the end of the 1DEN file and this breaks the extension
        e.g. out_DEN1. This method scans the files in the directories and returns a list of namedtuple
        Each named tuple gives the `path` of the 1DEN file and the `pertcase` index.
        """
        regex = re.compile(r"out_DEN(\d+)(\.nc)?$")
        den_paths = [f for f in self.list_filepaths() if regex.match(os.path.basename(f))]
        if not den_paths: return None

        # Build list of (pertcase, path) tuples.
        pertfile_list = []
        for path in den_paths:
            name = os.path.basename(path)
            match = regex.match(name)
            pertcase, ncext = match.groups()
            pertfile_list.append((int(pertcase), path))

        # DSU sort.
        pertfile_list = sorted(pertfile_list, key=lambda t: t[0])
        return [dict2namedtuple(pertcase=item[0], path=item[1]) for item in pertfile_list]
Пример #28
0
    def get_thermodynamic_properties(self, tstart=0, tstop=800, num=100):
        """
        Generates all the thermodynamic properties corresponding to all the volumes using the phonon DOS.

        Args:
            tstart: The starting value (in Kelvin) of the temperature mesh.
            tstop: The end value (in Kelvin) of the mesh.
            num: int, optional Number of samples to generate. Default is 100.

        Returns:
            `namedtuple` with the following attributes for all the volumes:

                tmesh: numpy array with the list of temperatures. Shape (num).
                cv: constant-volume specific heat, in eV/K. Shape (nvols, num).
                free_energy: free energy, in eV. Shape (nvols, num).
                entropy: entropy, in eV/K. Shape (nvols, num).
                zpe: zero point energy in eV. Shape (nvols).
        """
        tmesh = np.linspace(tstart, tstop, num)
        cv = self._get_thermodynamic_prop("cv", tstart, tstop, num)
        free_energy = self._get_thermodynamic_prop("free_energy", tstart,
                                                   tstop, num)
        entropy = self._get_thermodynamic_prop("entropy", tstart, tstop, num)
        zpe = np.zeros(self.nvols)

        for i, dos in zip(self.ind_doses, self.doses):
            zpe[i] = dos.zero_point_energy

        dos_vols = self.volumes[self.ind_doses]
        missing_vols = self.volumes[self._ind_energy_only]

        fit_params = np.polyfit(dos_vols, zpe[self.ind_doses], self.fit_degree)
        zpe[self._ind_energy_only] = np.poly1d(fit_params)(missing_vols)

        return dict2namedtuple(tmesh=tmesh,
                               cv=cv,
                               free_energy=free_energy,
                               entropy=entropy,
                               zpe=zpe)
Пример #29
0
    def get_thermodynamic_properties(self, tstart=0, tstop=800, num=100):
        """
        Generates the thermodynamic properties corresponding to all the structures, either from the phonon
        frequencies or from a fit of the know values..

        Args:
            tstart: The starting value (in Kelvin) of the temperature mesh.
            tstop: The end value (in Kelvin) of the mesh.
            num: int, optional Number of samples to generate. Default is 100.

        Returns:
            `namedtuple` with the following attributes for all the volumes:

                tmesh: numpy array with the list of temperatures. Shape (num).
                cv: constant-volume specific heat, in eV/K. Shape (nvols, num).
                free_energy: free energy, in eV. Shape (nvols, num).
                entropy: entropy, in eV/K. Shape (nvols, num).
                zpe: zero point energy in eV. Shape (nvols).
        """

        w = self.fitted_frequencies

        tmesh = np.linspace(tstart, tstop, num)
        weights = self.grun.doses['qpoints'].weights

        free_energy = np.zeros((self.nvols, num))
        cv = np.zeros((self.nvols, num))
        entropy = np.zeros((self.nvols, num))
        zpe = np.zeros(self.nvols)

        for i in range(self.nvols):
            free_energy[i] = [get_free_energy(w[i], weights, t) for t in tmesh]
            cv[i] = [get_cv(w[i], weights, t) for t in tmesh]
            entropy[i] = [get_entropy(w[i], weights, t) for t in tmesh]
            zpe[i] = get_zero_point_energy(w, weights)

        return dict2namedtuple(tmesh=tmesh, cv=cv, free_energy=free_energy, entropy=entropy,
                               zpe=zpe)
Пример #30
0
    def find_jobs_torun(self, max_njobs):
        """
        Find entries whose results have not been yet calculated.

        Args:
            select_formulas:
        """
        jobs, got = [], 0
        for struct_type, formula, data in self.iter_struct_formula_data():
            if got == max_njobs: break
            if data in ("scheduled", "failed"): continue
            if data is None:
                symbols = list(set(species_from_formula(formula)))
                pseudos = self.table.pseudos_with_symbols(symbols)

                job = dict2namedtuple(formula=formula, struct_type=struct_type, pseudos=pseudos)
                self[struct_type][formula] = "scheduled"
                jobs.append(job)
                got += 1

        # Update the database.
        if jobs: self.json_write()
        return jobs
Пример #31
0
    def fit_energies(self, tstart=0, tstop=800, num=100):
        """
        Performs a fit of the energies as a function of the volume at different temperatures.

        Args:
            tstart: The starting value (in Kelvin) of the temperature mesh.
            tstop: The end value (in Kelvin) of the mesh.
            num: int, optional Number of samples to generate. Default is 100.

        Returns:
            `namedtuple` with the following attributes::

                tot_en: numpy array with shape (nvols, num) with the energies used for the fit
                fits: list of subclasses of pymatgen.analysis.eos.EOSBase, depending on the type of
                    eos chosen. Contains the fit for the energies at the different temperatures.
                min_en: numpy array with the minimum energies for the list of temperatures
                min_vol: numpy array with the minimum volumes for the list of temperatures
                temp: numpy array with the temperatures considered

        """

        tmesh = np.linspace(tstart, tstop, num)

        # array with phonon energies and shape (n_vol, n_temp)
        ph_energies = self.get_vib_free_energies(tstart, tstop, num)

        tot_en = self.energies[np.newaxis, :].T + ph_energies + self.volumes[np.newaxis, :].T * self.pressure / abu.eVA3_GPa

        # list of fits objects, one for each temperature
        fits = [self.eos.fit(self.volumes, e) for e in tot_en.T]

        # list of minimum volumes and energies, one for each temperature
        min_volumes = np.array([fit.v0 for fit in fits])
        min_energies = np.array([fit.e0 for fit in fits])

        return dict2namedtuple(tot_en=tot_en, fits=fits, min_en=min_energies, min_vol=min_volumes, temp=tmesh)
Пример #32
0
    def get_thermodynamic_properties(self, tstart=0, tstop=800, num=100):
        """
        Generates all the thermodynamic properties corresponding to all the volumes using the phonon DOS.

        Args:
            tstart: The starting value (in Kelvin) of the temperature mesh.
            tstop: The end value (in Kelvin) of the mesh.
            num: int, optional Number of samples to generate. Default is 100.

        Returns:
            `namedtuple` with the following attributes for all the volumes:

                tmesh: numpy array with the list of temperatures. Shape (num).
                cv: constant-volume specific heat, in eV/K. Shape (nvols, num).
                free_energy: free energy, in eV. Shape (nvols, num).
                entropy: entropy, in eV/K. Shape (nvols, num).
                zpe: zero point energy in eV. Shape (nvols).
        """
        tmesh = np.linspace(tstart, tstop, num)
        cv = np.zeros((self.nvols, num))
        free_energy = np.zeros((self.nvols, num))
        entropy = np.zeros((self.nvols, num))
        internal_energy = np.zeros((self.nvols, num))
        zpe = np.zeros(self.nvols)

        for i, d in enumerate(self.doses):
            cv[i] = d.get_cv(tstart, tstop, num).values
            free_energy[i] = d.get_free_energy(tstart, tstop, num).values
            entropy[i] = d.get_entropy(tstart, tstop, num).values
            zpe[i] = d.zero_point_energy

        return dict2namedtuple(tmesh=tmesh,
                               cv=cv,
                               free_energy=free_energy,
                               entropy=entropy,
                               zpe=zpe)
Пример #33
0
def finite_diff(arr, h, order=1, acc=4, index=None):
    """
    Compute the derivative of order `order` by finite difference.
    For each point in arr, the function tries to use central differences
    and fallbacks to forward/backward approximations for points that are close to the extrema.
    Note that high accuracy levels can fail and raise `ValueError` if not enough points are available in `arr`.

    Args:
        arr: Input array with y-values.
        h: Spacing along x
        order: Derivative order
        acc: accuracy level.
        index: If not None, gives the index of the single element in arr where the derivative is wanted.
            In this case a namedtuple with the derivative, the number of points used and the mode is returned

    Return:
        numpy array or (value, npts, mode) if index is not None .
    """
    arr = np.asarray(arr)

    if np.iscomplexobj(arr):
        raise ValueError("Derivatives of complex functions are not supported!")

    # Retrieve weights.
    try:
        centr_ws = central_fdiff_weights[order][acc]
    except KeyError:
        raise ValueError("Centeral diff weights for order: %s, and accuracy: %s are missing!" % (order, acc))

    npsum = np.sum
    ders = np.empty(arr.shape)
    n = len(arr)
    cpad = len(centr_ws) // 2

    for i in range(n):
        if index is not None and i != index: continue
        start = i - cpad
        stop = i + cpad + 1

        if start >= 0 and stop <= n:
            # Can do central difference.
            ders[i] = npsum(centr_ws * arr[start:stop])
            npts = len(centr_ws)
            mode = "central"

        elif start < 0:
            # Try forward.
            forw_ws = forward_fdiff_weights[order][acc]
            stop = i + len(forw_ws)
            if stop > n:
                raise ValueError(
                        ("\n\tDon't have enough points for index: %s in array of lenght: %s\n" +
                         "\tto compute forward finite difference with order: %s, and acc: %s (num_weights: %s)\n" +
                         "\tDecrease acc or increase the number of sampling points.") % (i, n, order, acc, len(forw_ws)))
            ders[i] = npsum(forw_ws * arr[i:stop])
            npts = len(forw_ws)
            mode = "forward"

        elif stop > n:
            # Try backward.
            back_ws = backward_fdiff_weights[order][acc]
            start = i - len(back_ws) + 1
            if start < 0:
                raise ValueError(
                    ("\n\tDon't have enough points for index: %s in array of length: %s\n" +
                    "\tto compute backward finite difference with order: %s, and acc: %s (num_weights: %s)\n" +
                    "\tDecrease acc or increase the number of sampling points.") % (i, n, order, acc, len(back_ws)))
            ders[i] = npsum(back_ws * arr[start:i+1])
            npts = len(back_ws)
            mode = "backward"

    if index is None:
        return ders / (h ** order)
    else:
        return dict2namedtuple(value=ders[index] / (h ** order), npts=npts, mode=mode)
Пример #34
0
    def anacompare_phdos(self, nqsmalls, asr=2, chneut=1, dipdip=1, dos_method="tetra", ngqpt=None, 
                         num_cpus=None, stream=sys.stdout): 
        """
        Args:
            nqsmalls: List of integers defining the q-mesh for the DOS. Each integer gives 
            the number of divisions to be used to sample the smallest reciprocal lattice vector.
            asr, chneut, dipdp: Anaddb input variable. See official documentation.
            dos_method: Technique for DOS computation in  Possible choices: "tetra", "gaussian" or "gaussian:0.001 eV".
                In the later case, the value 0.001 eV is used as gaussian broadening
            ngqpt: Number of divisions for the q-mesh in the DDB file. Auto-detected if None (default)
            num_cpus: Number of CPUs (threads) used to parallellize the calculation of the DOSes. Autodetected if None.
            stream: File-like object used for printing.

        Return:
            `namedtuple` with the following attributes:

                phdoses: List of :class:`PhononDos` objects
                plotter: :class:`PhononDosPlotter` object. Use plotter.plot() to visualize the results.
        """
        num_cpus = get_ncpus() if num_cpus is None else num_cpus
        if num_cpus <= 0: num_cpus = 1
        num_cpus = min(num_cpus, len(nqsmalls))

        # TODO: anaget_phdos
        def do_work(nqsmall):
            _, phdos_file = self.anaget_phbst_and_phdos_files(
                nqsmall=nqsmall, ndivsm=1, asr=asr, chneut=chneut, dipdip=dipdip, dos_method=dos_method, ngqpt=ngqpt)
            return phdos_file.phdos                                                                                          

        if num_cpus == 1:
            # Sequential version
            phdoses = [do_work(nqs) for nqs in nqsmalls]

        else:
            # Threads
            print("Computing %d phonon DOS with %d threads" % (len(nqsmalls), num_cpus) )
            phdoses = [None] * len(nqsmalls)

            def worker():
                while True:
                    nqsm, phdos_index = q.get()
                    phdos = do_work(nqsm)
                    phdoses[phdos_index] = phdos
                    q.task_done()

            from threading import Thread
            try:
                from Queue import Queue # py2k
            except ImportError:
                from queue import Queue # py3k

            q = Queue()
            for i in range(num_cpus):
                 t = Thread(target=worker)
                 t.daemon = True
                 t.start()

            for i, nqsmall in enumerate(nqsmalls):
                q.put((nqsmall, i))

            # block until all tasks are done
            q.join()       
    
        # Compute relative difference wrt last phonon DOS. Be careful because the DOSes may be defined 
        # on different frequency meshes ==> spline on the mesh of the last DOS. 
        last_mesh, converged = phdoses[-1].mesh, False
        for i, phdos in enumerate(phdoses[:-1]):
            splined_dos = phdos.spline_on_mesh(last_mesh)
            abs_diff = (splined_dos - phdoses[-1]).abs()
            print(" Delta(Phdos[%d] - Phdos[%d]) / Phdos[%d]: %f" % 
                (i, len(phdoses)-1, len(phdoses)-1, abs_diff.integral().values[-1]), file=stream)

        # Fill the plotter.
        plotter = PhononDosPlotter()
        for nqsmall, phdos in zip(nqsmalls, phdoses):
            plotter.add_phdos(label="nqsmall %d" % nqsmall, phdos=phdos)

        return dict2namedtuple(phdoses=phdoses, plotter=plotter)
Пример #35
0
    def submit(self, **kwargs):
        """
        Submit a job script that will run the schedulers with `abirun.py`.

        Args:
            verbose: Verbosity level
            dry_run: Don't submit the script if dry_run. Default: False

        Returns:
            namedtuple with attributes:
                retcode: Return code as returned by the submission script.
                qjob: :class:`QueueJob` object.
                num_flows_inbatch: Number of flows executed by the batch script

            Return code of the job script submission.
        """
        verbose, dry_run = kwargs.pop("verbose",
                                      0), kwargs.pop("dry_run", False)

        if not self.flows:
            print("Cannot submit an empty list of flows!")
            return 0

        if hasattr(self, "qjob"):
            # This usually happens when we have loaded the object from pickle
            # and we have already submitted to batch script to the queue.
            # At this point we need to understand if the previous batch job
            # is still running before trying to submit it again. There are three cases:
            #
            # 1) The batch script has completed withing timelimit and therefore
            #    the pid_file has been removed by the script. In this case, we
            #    should not try to submit it again.

            # 2) The batch script has been killed due to timelimit (other reasons are possible
            #    but we neglect them). In this case the pid_file exists but there's no job with
            #    this pid runnig and we can resubmit it again.

            # 3) The batch script is still running.
            print("BatchLauncher has qjob %s" % self.qjob)

            if not self.batch_pid_file.exists:
                print(
                    "It seems that the batch script reached the end. Wont' try to submit it again"
                )
                return 0

            msg = (
                "Here I have to understand if qjob is in the queue."
                " but I need an abstract API that can retrieve info from the queue id"
            )
            raise RuntimeError(msg)

            # TODO: Temptative API
            if self.qjob.in_status("Running|Queued"):
                print("Job is still running. Cannot submit")
            else:
                del self.qjob

        script, num_flows_inbatch = self._get_script_nflows()

        if num_flows_inbatch == 0:
            print(
                "All flows have reached all_ok! Batch script won't be submitted"
            )
            return 0

        if verbose:
            print("*** submission script ***")
            print(script)

        # Write the script.
        self.script_file.write(script)
        self.script_file.chmod(0o740)

        # Builf the flow.
        for flow in self.flows:
            flow.build_and_pickle_dump()

        # Submit the task and save the queue id.
        if dry_run: return -1

        print("Will submit %s flows in batch script" % len(self.flows))
        self.qjob, process = self.qadapter.submit_to_queue(
            self.script_file.path)

        # Save the queue id in the pid file
        # The file will be removed by the job script if execution is completed.
        self.batch_pidfile.write(str(self.qjob.qid))

        self.pickle_dump()
        process.wait()

        return dict2namedtuple(retcode=process.returncode,
                               qjob=self.qjob,
                               num_flows_inbatch=num_flows_inbatch)
Пример #36
0
    def submit(self, **kwargs):
        """
        Submit a job script that will run the schedulers with `abirun.py`.

        Args:
            verbose: Verbosity level
            dry_run: Don't submit the script if dry_run. Default: False

        Returns:
            namedtuple with attributes:
                retcode: Return code as returned by the submission script.
                qjob: :class:`QueueJob` object.
                num_flows_inbatch: Number of flows executed by the batch script

            Return code of the job script submission.
        """
        verbose, dry_run = kwargs.pop("verbose", 0), kwargs.pop("dry_run", False)

        if not self.flows:
            print("Cannot submit an empty list of flows!")
            return 0

        if hasattr(self, "qjob"):
            # This usually happens when we have loaded the object from pickle
            # and we have already submitted to batch script to the queue.
            # At this point we need to understand if the previous batch job
            # is still running before trying to submit it again. There are three cases:
            #
            # 1) The batch script has completed withing timelimit and therefore
            #    the pid_file has been removed by the script. In this case, we
            #    should not try to submit it again.

            # 2) The batch script has been killed due to timelimit (other reasons are possible
            #    but we neglect them). In this case the pid_file exists but there's no job with
            #    this pid runnig and we can resubmit it again.

            # 3) The batch script is still running.
            print("BatchLauncher has qjob %s" % self.qjob)

            if not self.batch_pid_file.exists:
                print("It seems that the batch script reached the end. Wont' try to submit it again")
                return 0

            msg = ("Here I have to understand if qjob is in the queue."
                   " but I need an abstract API that can retrieve info from the queue id")
            raise RuntimeError(msg)

            # TODO: Temptative API
            if self.qjob.in_status("Running|Queued"):
                print("Job is still running. Cannot submit")
            else:
                del self.qjob

        script, num_flows_inbatch = self._get_script_nflows()

        if num_flows_inbatch == 0:
            print("All flows have reached all_ok! Batch script won't be submitted")
            return 0

        if verbose:
            print("*** submission script ***")
            print(script)

        # Write the script.
        self.script_file.write(script)
        self.script_file.chmod(0o740)

        # Builf the flow.
        for flow in self.flows:
            flow.build_and_pickle_dump()

        # Submit the task and save the queue id.
        if dry_run: return -1

        print("Will submit %s flows in batch script" % len(self.flows))
        self.qjob, process = self.qadapter.submit_to_queue(self.script_file.path)

        # Save the queue id in the pid file
        # The file will be removed by the job script if execution is completed.
        self.batch_pidfile.write(str(self.qjob.qid))

        self.pickle_dump()
        process.wait()

        return dict2namedtuple(retcode=process.returncode, qjob=self.qjob,
                               num_flows_inbatch=num_flows_inbatch)