Example #1
0
def get_wf_bulk_modulus(structure,
                        deformations,
                        vasp_input_set=None,
                        vasp_cmd="vasp",
                        db_file=None,
                        user_kpoints_settings=None,
                        eos="vinet",
                        tag=None,
                        user_incar_settings=None):
    """
    Returns the workflow that computes the bulk modulus by fitting to the given equation of state.

    Args:
        structure (Structure): input structure.
        deformations (list): list of deformation matrices(list of lists).
        vasp_input_set (VaspInputSet): for the static deformation calculations
        vasp_cmd (str): vasp command to run.
        db_file (str): path to the db file.
        user_kpoints_settings (dict): example: {"grid_density": 7000}
        eos (str): equation of state used for fitting the energies and the volumes.
            supported equation of states: "quadratic", "murnaghan", "birch", "birch_murnaghan",
            "pourier_tarantola", "vinet", "deltafactor". See pymatgen.analysis.eos.py
        tag (str): something unique to identify the tasks in this workflow. If None a random uuid
            will be assigned.
        user_incar_settings (dict):

    Returns:
        Workflow
    """

    tag = tag or "bulk_modulus group: >>{}<<".format(str(uuid4()))

    deformations = [Deformation(defo_mat) for defo_mat in deformations]

    vis_static = vasp_input_set or MPStaticSet(
        structure=structure,
        force_gamma=True,
        lepsilon=False,
        user_kpoints_settings=user_kpoints_settings,
        user_incar_settings=user_incar_settings)

    wf_bulk_modulus = get_wf_deformations(structure,
                                          deformations,
                                          name="bulk_modulus deformation",
                                          vasp_input_set=vis_static,
                                          vasp_cmd=vasp_cmd,
                                          db_file=db_file,
                                          tag=tag)

    fw_analysis = Firework(FitEOSToDb(tag=tag, db_file=db_file, eos=eos),
                           name="fit equation of state")

    wf_bulk_modulus.append_wf(Workflow.from_Firework(fw_analysis),
                              wf_bulk_modulus.leaf_fw_ids)

    wf_bulk_modulus.name = "{}:{}".format(
        structure.composition.reduced_formula, "Bulk modulus")

    return wf_bulk_modulus
Example #2
0
def get_wf_thermal_expansion(structure, deformations, vasp_input_set=None, vasp_cmd="vasp",
                             db_file=None, user_kpoints_settings=None, t_step=10, t_min=0,
                             t_max=1000, mesh=(20, 20, 20), eos="vinet", pressure=0.0,
                             copy_vasp_outputs=False,
                             tag=None):
    """
    Returns quasi-harmonic thermal expansion workflow.
    Note: phonopy package is required for the final analysis step.

    Args:
        structure (Structure): input structure.
        deformations (list): list of deformation matrices(list of lists).
        vasp_input_set (VaspInputSet)
        vasp_cmd (str): vasp command to run.
        db_file (str): path to the db file.
        user_kpoints_settings (dict): example: {"grid_density": 7000}
        t_step (float): temperature step (in K)
        t_min (float): min temperature (in K)
        t_max (float): max temperature (in K)
        mesh (list/tuple): reciprocal space density
        eos (str): equation of state used for fitting the energies and the volumes.
            options supported by phonopy: "vinet", "murnaghan", "birch_murnaghan".
            Note: pymatgen supports more options than phonopy. see pymatgen.analysis.eos.py
        copy_vasp_outputs (bool): whether or not copy the outputs from the previous calc
            (usually structure optimization) before the deformations are performed.
        pressure (float): in GPa
        tag (str): something unique to identify the tasks in this workflow. If None a random uuid
            will be assigned.

    Returns:
        Workflow
    """
    try:
        from phonopy import Phonopy
    except ImportError:
        logger.warning("'phonopy' package NOT installed. Required for the final analysis step.")

    tag = tag or "thermal_expansion group: >>{}<<".format(str(uuid4()))

    deformations = [Deformation(defo_mat) for defo_mat in deformations]

    vis_static = vasp_input_set or MPStaticSet(structure, force_gamma=True, lepsilon=True,
                                               user_kpoints_settings=user_kpoints_settings)
    wf_alpha = get_wf_deformations(structure, deformations, name="thermal_expansion deformation",
                                   vasp_cmd=vasp_cmd, db_file=db_file, tag=tag,
                                   copy_vasp_outputs=copy_vasp_outputs,
                                   vasp_input_set=vis_static)

    fw_analysis = Firework(ThermalExpansionCoeffToDb(tag=tag, db_file=db_file, t_step=t_step,
                                                     t_min=t_min, t_max=t_max, mesh=mesh, eos=eos,
                                                     pressure=pressure),
                           name="Thermal expansion")

    wf_alpha.append_wf(Workflow.from_Firework(fw_analysis), wf_alpha.leaf_fw_ids)

    wf_alpha.name = "{}:{}".format(structure.composition.reduced_formula, "thermal expansion")

    return wf_alpha
Example #3
0
def append_fw_wf(orig_wf, fw_wf):
    """
    Add the given firework or workflow to the end of the provided workflow. If there are multiple
    leaf nodes the newly added firework/workflow will depend on all of them.

    Args:
        orig_wf (Workflow): The original workflow object.
        fw_wf (Firework/Workflow): The firework or workflow object to be appended to orig_wf.
    """
    new_wf = fw_wf
    if isinstance(fw_wf, Firework):
        new_wf = Workflow.from_Firework(new_wf)
    orig_wf.append_wf(new_wf, orig_wf.leaf_fw_ids)
Example #4
0
def get_wf_exafs_paths(absorbing_atom, structure, paths, degeneracies=None, edge="K", radius=10.0,
                       feff_input_set="pymatgen.io.feff.sets.MPEXAFSSet", feff_cmd="feff",
                       db_file=None, metadata=None, user_tag_settings=None, use_primitive=False,
                       labels=None, filepad_file=None):
    """
    Returns FEFF EXAFS spectroscopy workflow that generates the scattering amplitudes for the given
    list of scattering paths. The scattering amplitude output files(feffNNNN.dat files) are
    inserted to filepad(see fireworks.utilities.filepad.py) on completion.

    Args:
        absorbing_atom (str/int): absorbing atom symbol or site index. If the symbol is given,
             then the returned workflow will have fireworks for each absorbing site with the
             same symbol.
        structure (Structure): input structure
        paths (list): list of paths. path = list of site indices.
        degeneracies (list): list of path degeneracies.
        edge (str): absorption edge. Example: K, L1, L2, L3
        radius (float): cluster radius in angstroms. Ignored for K space calculations
        feff_input_set (str or FeffDictSet subclass): The inputset for setting params. If string
                then the entire path to the class must be provided
                e.g. "pymatgen.io.feff.sets.MPEXAFSSet"
        feff_cmd (str): path to the feff binary
        db_file (str):  path to the db file.
        metadata (dict): meta data
        user_tag_settings (dict): override feff default tag settings
        use_primitive (bool): convert the structure to primitive form. This helps to
            reduce the number of fireworks in the workflow if the absorbing atom is
            specified by its atomic symbol.
        labels ([str]): list of labels for the scattering amplitudes file contents inserted into
            filepad. Useful for fetching the data from filepad later.
        filepad_file (str): path to filepad connection settings file.

    Returns:
        Workflow
    """
    labels = labels or []
    wflow = get_wf_xas(absorbing_atom, structure, feff_input_set, edge, radius, feff_cmd,
                       db_file, metadata, user_tag_settings, use_primitive)
    paths_fw = EXAFSPathsFW(absorbing_atom, structure, paths, degeneracies=degeneracies, edge=edge,
                            radius=radius, name="EXAFS Paths", feff_input_set=feff_input_set,
                            feff_cmd=feff_cmd, labels=labels, filepad_file=filepad_file)
    # append the scattering paths firework to the regular EXAFS workflow.
    paths_wf = Workflow.from_Firework(paths_fw)
    wflow.append_wf(paths_wf, wflow.leaf_fw_ids)
    return wflow
Example #5
0
def get_wf_bulk_modulus(structure, deformations, vasp_input_set=None, vasp_cmd="vasp", db_file=None,
                        user_kpoints_settings=None, eos="vinet", tag=None, user_incar_settings=None):
    """
    Returns the workflow that computes the bulk modulus by fitting to the given equation of state.

    Args:
        structure (Structure): input structure.
        deformations (list): list of deformation matrices(list of lists).
        vasp_input_set (VaspInputSet): for the static deformation calculations
        vasp_cmd (str): vasp command to run.
        db_file (str): path to the db file.
        user_kpoints_settings (dict): example: {"grid_density": 7000}
        eos (str): equation of state used for fitting the energies and the volumes.
            supported equation of states: "quadratic", "murnaghan", "birch", "birch_murnaghan",
            "pourier_tarantola", "vinet", "deltafactor". See pymatgen.analysis.eos.py
        tag (str): something unique to identify the tasks in this workflow. If None a random uuid
            will be assigned.
        user_incar_settings (dict):

    Returns:
        Workflow
    """

    tag = tag or "bulk_modulus group: >>{}<<".format(str(uuid4()))

    deformations = [Deformation(defo_mat) for defo_mat in deformations]

    vis_static = vasp_input_set or MPStaticSet(structure=structure, force_gamma=True, lepsilon=False,
                                               user_kpoints_settings=user_kpoints_settings,
                                               user_incar_settings=user_incar_settings)

    wf_bulk_modulus = get_wf_deformations(structure, deformations, name="bulk_modulus deformation",
                                          vasp_input_set=vis_static, vasp_cmd=vasp_cmd,
                                          db_file=db_file, tag=tag)

    fw_analysis = Firework(FitEOSToDb(tag=tag, db_file=db_file, eos=eos), name="fit equation of state")

    wf_bulk_modulus.append_wf(Workflow.from_Firework(fw_analysis), wf_bulk_modulus.leaf_fw_ids)

    wf_bulk_modulus.name = "{}:{}".format(structure.composition.reduced_formula, "Bulk modulus")

    return wf_bulk_modulus
Example #6
0
def get_wf_magnetic_deformation(structure, c=None, vis=None):
    """
    Minimal workflow to obtain magnetic deformation proxy, as
    defined by Bocarsly et al. 2017, doi: 10.1021/acs.chemmater.6b04729

    Args:
        structure: input structure, must be structure with magnetic
    elements, such that pymatgen will initalize ferromagnetic input by
    default -- see MPRelaxSet.yaml for list of default elements
        c: Workflow config dict, in the same format
    as in presets/core.py and elsewhere in atomate
        vis: A VaspInputSet to use for the first FW

    Returns: Workflow
    """

    if not structure.is_ordered:
        raise ValueError(
            "Please obtain an ordered approximation of the input structure."
        )

    structure = structure.get_primitive_structure(use_site_props=True)

    # using a uuid for book-keeping,
    # in a similar way to other workflows
    uuid = str(uuid4())

    c_defaults = {"vasp_cmd": VASP_CMD, "db_file": DB_FILE}
    if c:
        c.update(c_defaults)
    else:
        c = c_defaults

    wf = get_wf(structure, "magnetic_deformation.yaml", common_params=c, vis=vis)

    fw_analysis = Firework(
        MagneticDeformationToDB(
            db_file=DB_FILE, wf_uuid=uuid, to_db=c.get("to_db", True)
        ),
        name="MagneticDeformationToDB",
    )

    wf.append_wf(Workflow.from_Firework(fw_analysis), wf.leaf_fw_ids)

    wf = add_common_powerups(wf, c)

    if c.get("ADD_WF_METADATA", ADD_WF_METADATA):
        wf = add_wf_metadata(wf, structure)

    wf = add_additional_fields_to_taskdocs(
        wf,
        {
            "wf_meta": {
                "wf_uuid": uuid,
                "wf_name": "magnetic_deformation",
                "wf_version": __magnetic_deformation_wf_version__,
            }
        },
    )

    return wf
Example #7
0
def get_wf_gibbs_free_energy(structure, deformations, vasp_input_set=None, vasp_cmd="vasp",
                             db_file=None, user_kpoints_settings=None, t_step=10, t_min=0,
                             t_max=1000, mesh=(20, 20, 20), eos="vinet", qha_type="debye_model",
                             pressure=0.0, poisson=0.25, anharmonic_contribution=False,
                             metadata=None, tag=None):
    """
    Returns quasi-harmonic gibbs free energy workflow.
    Note: phonopy package is required for the final analysis step if qha_type="phonopy"

    Args:
        structure (Structure): input structure.
        deformations (list): list of deformation matrices(list of lists).
        vasp_input_set (VaspInputSet)
        vasp_cmd (str): vasp command to run.
        db_file (str): path to the db file.
        user_kpoints_settings (dict): example: {"grid_density": 7000}
        t_step (float): temperature step (in K)
        t_min (float): min temperature (in K)
        t_max (float): max temperature (in K)
        mesh (list/tuple): reciprocal space density
        eos (str): equation of state used for fitting the energies and the volumes.
            options supported by phonopy: "vinet", "murnaghan", "birch_murnaghan".
            Note: pymatgen supports more options than phonopy. see pymatgen.analysis.eos.py
        qha_type(str): quasi-harmonic approximation type: "debye_model" or "phonopy",
            default is "debye_model"
        pressure (float): in GPa
        poisson (float): poisson ratio
        anharmonic_contribution (bool): consider anharmonic contributions to
            Gibbs energy from the Debye model. Defaults to False.
        metadata (dict): meta data
        tag (str): something unique to identify the tasks in this workflow. If None a random uuid
            will be assigned.

    Returns:
        Workflow
    """

    tag = tag or "gibbs group: >>{}<<".format(str(uuid4()))

    deformations = [Deformation(defo_mat) for defo_mat in deformations]

    # static input set for the transmuter fireworks
    vis_static = vasp_input_set
    if vis_static is None:
        lepsilon = False
        if qha_type not in ["debye_model"]:
            lepsilon = True
            try:
                from phonopy import Phonopy
            except ImportError:
                raise RuntimeError("'phonopy' package is NOT installed but is required for the final "
                                   "analysis step; you can alternatively switch to the qha_type to "
                                   "'debye_model' which does not require 'phonopy'.")
        vis_static = MPStaticSet(structure, force_gamma=True, lepsilon=lepsilon,
                                 user_kpoints_settings=user_kpoints_settings)

    wf_gibbs = get_wf_deformations(structure, deformations, name="gibbs deformation",
                                   vasp_cmd=vasp_cmd, db_file=db_file, tag=tag, metadata=metadata,
                                   vasp_input_set=vis_static)

    fw_analysis = Firework(GibbsAnalysisToDb(tag=tag, db_file=db_file, t_step=t_step, t_min=t_min,
                                             t_max=t_max, mesh=mesh, eos=eos, qha_type=qha_type,
                                             pressure=pressure, poisson=poisson, metadata=metadata,
                                             anharmonic_contribution=anharmonic_contribution,),
                           name="Gibbs Free Energy")

    wf_gibbs.append_wf(Workflow.from_Firework(fw_analysis), wf_gibbs.leaf_fw_ids)

    wf_gibbs.name = "{}:{}".format(structure.composition.reduced_formula, "gibbs free energy")

    return wf_gibbs
Example #8
0
def get_wf_magnetic_deformation(structure, c=None, vis=None):
    """
    Minimal workflow to obtain magnetic deformation proxy, as
    defined by Bocarsly et al. 2017, doi: 10.1021/acs.chemmater.6b04729

    Args:
        structure: input structure, must be structure with magnetic
    elements, such that pymatgen will initalize ferromagnetic input by
    default -- see MPRelaxSet.yaml for list of default elements
        c: Workflow config dict, in the same format
    as in presets/core.py and elsewhere in atomate
        vis: A VaspInputSet to use for the first FW

    Returns: Workflow
    """

    if not structure.is_ordered:
        raise ValueError(
            "Please obtain an ordered approximation of the input structure.")

    structure = structure.get_primitive_structure(use_site_props=True)

    # using a uuid for book-keeping,
    # in a similar way to other workflows
    uuid = str(uuid4())

    c_defaults = {"vasp_cmd": VASP_CMD, "db_file": DB_FILE}
    if c:
        c.update(c_defaults)
    else:
        c = c_defaults

    wf = get_wf(structure,
                "magnetic_deformation.yaml",
                common_params=c,
                vis=vis)

    fw_analysis = Firework(
        MagneticDeformationToDb(db_file=DB_FILE,
                                wf_uuid=uuid,
                                to_db=c.get("to_db", True)),
        name="MagneticDeformationToDb",
    )

    wf.append_wf(Workflow.from_Firework(fw_analysis), wf.leaf_fw_ids)

    wf = add_common_powerups(wf, c)

    if c.get("ADD_WF_METADATA", ADD_WF_METADATA):
        wf = add_wf_metadata(wf, structure)

    wf = add_additional_fields_to_taskdocs(
        wf,
        {
            "wf_meta": {
                "wf_uuid": uuid,
                "wf_name": "magnetic_deformation",
                "wf_version": __magnetic_deformation_wf_version__,
            }
        },
    )

    return wf
Example #9
0
def get_wf_elastic_constant(structure,
                            metadata,
                            strain_states=None,
                            stencils=None,
                            db_file=None,
                            conventional=False,
                            order=2,
                            vasp_input_set=None,
                            analysis=True,
                            sym_reduce=False,
                            tag='elastic',
                            copy_vasp_outputs=False,
                            **kwargs):
    """
    Returns a workflow to calculate elastic constants.

    Firework 1 : write vasp input set for structural relaxation,
                 run vasp,
                 pass run location,
                 database insertion.

    Firework 2 - number of total deformations: Static runs on the deformed structures

    last Firework : Analyze Stress/Strain data and fit the elastic tensor

    Args:
        structure (Structure): input structure to be optimized and run.
        strain_states (list of Voigt-notation strains): list of ratios of nonzero elements
            of Voigt-notation strain, e. g. [(1, 0, 0, 0, 0, 0), (0, 1, 0, 0, 0, 0), etc.].
        stencils (list of floats, or list of list of floats): values of strain to multiply
            by for each strain state, i. e. stencil for the perturbation along the strain
            state direction, e. g. [-0.01, -0.005, 0.005, 0.01].  If a list of lists,
            stencils must correspond to each strain state provided.
        db_file (str): path to file containing the database credentials.
        conventional (bool): flag to convert input structure to conventional structure,
            defaults to False.
        order (int): order of the tensor expansion to be determined.  Defaults to 2 and
            currently supports up to 3.
        vasp_input_set (VaspInputSet): vasp input set to be used.  Defaults to static
            set with ionic relaxation parameters set.  Take care if replacing this,
            default ensures that ionic relaxation is done and that stress is calculated
            for each vasp run.
        analysis (bool): flag to indicate whether analysis task should be added
            and stresses and strains passed to that task
        sym_reduce (bool): Whether or not to apply symmetry reductions
        tag (str):
        copy_vasp_outputs (bool): whether or not to copy previous vasp outputs.
        kwargs (keyword arguments): additional kwargs to be passed to get_wf_deformations

    Returns:
        Workflow
    """
    # Convert to conventional if specified
    if conventional:
        structure = SpacegroupAnalyzer(
            structure).get_conventional_standard_structure()

    uis_elastic = {
        "IBRION": 2,
        "NSW": 99,
        "ISIF": 2,
        "ISTART": 1,
        "PREC": "High"
    }
    vis = vasp_input_set or MPStaticSet(structure,
                                        user_incar_settings=uis_elastic)

    strains = []
    if strain_states is None:
        strain_states = get_default_strain_states(order)
    if stencils is None:
        stencils = [np.linspace(-0.01, 0.01, 5 +
                                (order - 2) * 2)] * len(strain_states)
    if np.array(stencils).ndim == 1:
        stencils = [stencils] * len(strain_states)
    for state, stencil in zip(strain_states, stencils):
        strains.extend(
            [Strain.from_voigt(s * np.array(state)) for s in stencil])

    # Remove zero strains
    strains = [strain for strain in strains if not (abs(strain) < 1e-10).all()]
    # Adding the zero strains for the purpose of calculating at finite pressure or thermal expansion
    _strains = [Strain.from_deformation([[1, 0, 0], [0, 1, 0], [0, 0, 1]])]
    strains.extend(_strains)
    """
    """
    vstrains = [strain.voigt for strain in strains]
    if np.linalg.matrix_rank(vstrains) < 6:
        # TODO: check for sufficiency of input for nth order
        raise ValueError(
            "Strain list is insufficient to fit an elastic tensor")

    deformations = [s.get_deformation_matrix() for s in strains]
    """
    print(strains)
    print(deformations)
    """

    if sym_reduce:
        # Note this casts deformations to a TensorMapping
        # with unique deformations as keys to symmops
        deformations = symmetry_reduce(deformations, structure)

    wf_elastic = get_wf_deformations(structure,
                                     deformations,
                                     tag=tag,
                                     db_file=db_file,
                                     vasp_input_set=vis,
                                     copy_vasp_outputs=copy_vasp_outputs,
                                     **kwargs)
    if analysis:
        defo_fws_and_tasks = get_fws_and_tasks(
            wf_elastic,
            fw_name_constraint="deformation",
            task_name_constraint="Transmuted")
        for idx_fw, idx_t in defo_fws_and_tasks:
            defo = \
            wf_elastic.fws[idx_fw].tasks[idx_t]['transformation_params'][0][
                'deformation']
            pass_dict = {
                'strain': Deformation(defo).green_lagrange_strain.tolist(),
                'stress': '>>output.ionic_steps.-1.stress',
                'deformation_matrix': defo
            }
            if sym_reduce:
                pass_dict.update({'symmops': deformations[defo]})

            mod_spec_key = "deformation_tasks->{}".format(idx_fw)
            pass_task = pass_vasp_result(pass_dict=pass_dict,
                                         mod_spec_key=mod_spec_key)
            wf_elastic.fws[idx_fw].tasks.append(pass_task)

        fw_analysis = Firework(ElasticTensorToDb(structure=structure,
                                                 db_file=db_file,
                                                 order=order,
                                                 fw_spec_field='tags',
                                                 metadata=metadata,
                                                 vasp_input_set=vis),
                               name="Analyze Elastic Data",
                               spec={"_allow_fizzled_parents": True})
        wf_elastic.append_wf(Workflow.from_Firework(fw_analysis),
                             wf_elastic.leaf_fw_ids)

    wf_elastic.name = "{}:{}".format(structure.composition.reduced_formula,
                                     "elastic constants")

    return wf_elastic
Example #10
0
def get_wf_elastic_constant(structure, strain_states=None, stencils=None,
                            db_file=None,
                            conventional=False, order=2, vasp_input_set=None,
                            analysis=True,
                            sym_reduce=False, tag='elastic',
                            copy_vasp_outputs=False, **kwargs):
    """
    Returns a workflow to calculate elastic constants.

    Firework 1 : write vasp input set for structural relaxation,
                 run vasp,
                 pass run location,
                 database insertion.

    Firework 2 - number of total deformations: Static runs on the deformed structures

    last Firework : Analyze Stress/Strain data and fit the elastic tensor

    Args:
        structure (Structure): input structure to be optimized and run.
        strain_states (list of Voigt-notation strains): list of ratios of nonzero elements
            of Voigt-notation strain, e. g. [(1, 0, 0, 0, 0, 0), (0, 1, 0, 0, 0, 0), etc.].
        stencils (list of floats, or list of list of floats): values of strain to multiply
            by for each strain state, i. e. stencil for the perturbation along the strain
            state direction, e. g. [-0.01, -0.005, 0.005, 0.01].  If a list of lists,
            stencils must correspond to each strain state provided.
        db_file (str): path to file containing the database credentials.
        conventional (bool): flag to convert input structure to conventional structure,
            defaults to False.
        order (int): order of the tensor expansion to be determined.  Defaults to 2 and
            currently supports up to 3.
        vasp_input_set (VaspInputSet): vasp input set to be used.  Defaults to static
            set with ionic relaxation parameters set.  Take care if replacing this,
            default ensures that ionic relaxation is done and that stress is calculated
            for each vasp run.
        analysis (bool): flag to indicate whether analysis task should be added
            and stresses and strains passed to that task
        sym_reduce (bool): Whether or not to apply symmetry reductions
        tag (str):
        copy_vasp_outputs (bool): whether or not to copy previous vasp outputs.
        kwargs (keyword arguments): additional kwargs to be passed to get_wf_deformations

    Returns:
        Workflow
    """
    # Convert to conventional if specified
    if conventional:
        structure = SpacegroupAnalyzer(
            structure).get_conventional_standard_structure()

    uis_elastic = {"IBRION": 2, "NSW": 99, "ISIF": 2, "ISTART": 1,
                   "PREC": "High"}
    vis = vasp_input_set or MPStaticSet(structure,
                                        user_incar_settings=uis_elastic)
    strains = []
    if strain_states is None:
        strain_states = get_default_strain_states(order)
    if stencils is None:
        stencils = [np.linspace(-0.01, 0.01, 5 + (order - 2) * 2)] * len(
            strain_states)
    if np.array(stencils).ndim == 1:
        stencils = [stencils] * len(strain_states)
    for state, stencil in zip(strain_states, stencils):
        strains.extend(
            [Strain.from_voigt(s * np.array(state)) for s in stencil])

    # Remove zero strains
    strains = [strain for strain in strains if not (abs(strain) < 1e-10).all()]
    vstrains = [strain.voigt for strain in strains]
    if np.linalg.matrix_rank(vstrains) < 6:
        # TODO: check for sufficiency of input for nth order
        raise ValueError("Strain list is insufficient to fit an elastic tensor")

    deformations = [s.get_deformation_matrix() for s in strains]

    if sym_reduce:
        # Note this casts deformations to a TensorMapping
        # with unique deformations as keys to symmops
        deformations = symmetry_reduce(deformations, structure)

    wf_elastic = get_wf_deformations(structure, deformations, tag=tag,
                                     db_file=db_file,
                                     vasp_input_set=vis,
                                     copy_vasp_outputs=copy_vasp_outputs,
                                     **kwargs)
    if analysis:
        defo_fws_and_tasks = get_fws_and_tasks(wf_elastic,
                                               fw_name_constraint="deformation",
                                               task_name_constraint="Transmuted")
        for idx_fw, idx_t in defo_fws_and_tasks:
            defo = \
            wf_elastic.fws[idx_fw].tasks[idx_t]['transformation_params'][0][
                'deformation']
            pass_dict = {
                'strain': Deformation(defo).green_lagrange_strain.tolist(),
                'stress': '>>output.ionic_steps.-1.stress',
                'deformation_matrix': defo}
            if sym_reduce:
                pass_dict.update({'symmops': deformations[defo]})

            mod_spec_key = "deformation_tasks->{}".format(idx_fw)
            pass_task = pass_vasp_result(pass_dict=pass_dict,
                                         mod_spec_key=mod_spec_key)
            wf_elastic.fws[idx_fw].tasks.append(pass_task)

        fw_analysis = Firework(
            ElasticTensorToDb(structure=structure, db_file=db_file,
                              order=order, fw_spec_field='tags'),
            name="Analyze Elastic Data", spec={"_allow_fizzled_parents": True})
        wf_elastic.append_wf(Workflow.from_Firework(fw_analysis),
                             wf_elastic.leaf_fw_ids)

    wf_elastic.name = "{}:{}".format(structure.composition.reduced_formula,
                                     "elastic constants")

    return wf_elastic
Example #11
0
def get_wf_hubbard_hund_linresp(structure,
                                user_incar_settings=None,
                                relax_nonmagnetic=True,
                                spin_polarized=True,
                                applied_potential_range=(-0.2, 0.2),
                                num_evals=9,
                                site_indices_perturb=None,
                                species_perturb=None,
                                find_nearest_sites=True,
                                parallel_scheme=0,
                                ediff_tight=None,
                                c=None):
    """
    Compute Hubbard U (and Hund J) on-site interaction values using GGA+U
    linear response method proposed by Cococcioni et. al.
    (DOI: 10.1103/PhysRevB.71.035105)
    and the spin-polarized response formalism developed by Linscott et. al.
    (DOI: 10.1103/PhysRevB.98.235157)

    This workflow relies on the constrained on-site potential functional implemented in VASP, 
    with a helpful tutorial found here: 
    https://www.vasp.at/wiki/index.php/Calculate_U_for_LSDA%2BU

    Args:
        structure:
        user_incar_settings: user INCAR settings
        relax_nonmagnetic: Restart magnetic SCF runs from 
    non-magnetic calculation, using WAVECAR
        spin_polarized: Perform spin-dependent perturbations
        applied_potential_range: Bounds of applied potential
        num_evals: Number of perturbation evalutaions
        site_indices_perturb: (must specify if species_perturb=None) 
    List of site indices within
    Structure indicating perturbation sites; 
        species_perturb: (must specify if site_indices_perturb=None) 
    List of names of species (string)
    of sites to perturb; First site of that species
    is selected in the structure
        find_nearest_sites: If set to true and species_perturb != None, 
    the closest sites (by the Structure distance matrix) will be selected 
    in the response analysis to account for inter-site screening effects
        parallel_scheme: 0 - (default) self-consistent (SCF)
    runs use WAVECAR from non-self consistent (NSCF) run
    at same applied potential; 1 - SCF runs use WAVECAR
    from ground-state (V=0) run. 
    While reusing the WAVECAR from NSCF run in SCF run may be more 
    efficient (parallel_scheme: 0), the user may also choose to 
    remove the dependency between NSCF and SCF runs 
    (parallel_scheme: 1)
        ediff_tight: Final energy convergence tolerance, 
    if restarting from a previous run
    (if not specified, will default to pymatgen default EDIFF)
        c: Workflow config dict, in the same format
    as in presets/core.py and elsewhere in atomate

    Returns: Workflow
    """

    if not structure.is_ordered:
        raise ValueError(
            "Please obtain an ordered approximation of the input structure.")

    if not site_indices_perturb:
        site_indices_perturb = []

    if species_perturb:

        if find_nearest_sites:
            site_indices_perturb = find_closest_sites(structure,
                                                      species_perturb)
        else:
            for specie_u in species_perturb:
                found_specie = False
                for s in range(len(structure)):
                    site = structure[s]
                    if (Element(str(site.specie)) == Element(specie_u)) \
                       and (s not in site_indices_perturb):
                        found_specie = True
                        break
                if not found_specie:
                    raise ValueError("Could not find specie(s) in structure.")
                site_indices_perturb.append(s)

    elif not site_indices_perturb:
        logger.warning("Sites for computing U value are not specified. "
                       "Computing U for first site in structure. ")

    site_indices_perturb = list(tuple(site_indices_perturb))
    num_perturb = len(site_indices_perturb)

    sites_perturb = []
    for site_index_perturb in site_indices_perturb:
        site = structure[site_index_perturb]
        sites_perturb.append(site)

    structure.remove_sites(indices=site_indices_perturb)

    for site in sites_perturb:
        structure.insert(i=0,
                         species=site.specie,
                         coords=site.frac_coords,
                         properties=site.properties)

    # using a uuid for book-keeping,
    # in a similar way to other workflows
    uuid = str(uuid4())

    c_defaults = {"vasp_cmd": VASP_CMD, "db_file": DB_FILE}
    if c:
        c.update(c_defaults)
    else:
        c = c_defaults

    # Calculate groundstate

    # set user_incar_settings
    if not user_incar_settings:
        user_incar_settings = {}

    # setup VASP input sets
    uis_gs, uis_ldau, val_dict, vis_ldau = init_linresp_input_sets(
        user_incar_settings, structure, num_perturb)

    fws = []
    index_fw_gs = [0]

    ediff_default = vis_ldau.incar['EDIFF']
    if not ediff_tight:
        ediff_tight = 0.1 * ediff_default

    append_linresp_ground_state_fws(fws, structure, num_perturb, index_fw_gs,
                                    uis_gs, relax_nonmagnetic, ediff_default,
                                    ediff_tight)

    # generate list of applied on-site potentials in linear response
    applied_potential_value_list = []
    for counter_perturb in range(num_perturb):
        applied_potential_values = np.linspace(applied_potential_range[0],
                                               applied_potential_range[1],
                                               num_evals)
        applied_potential_values = np.around(applied_potential_values,
                                             decimals=9)

        if 0.0 in applied_potential_values:
            applied_potential_values = list(applied_potential_values)
            applied_potential_values.pop(applied_potential_values.index(0.0))
            applied_potential_values = np.array(applied_potential_values)

        applied_potential_value_list.append(applied_potential_values.copy())

    for counter_perturb in range(num_perturb):

        applied_potential_values = applied_potential_value_list[
            counter_perturb]

        for v in applied_potential_values:

            append_linresp_perturb_fws(v, fws, structure, counter_perturb,
                                       num_perturb, index_fw_gs, uis_ldau,
                                       val_dict, spin_polarized,
                                       relax_nonmagnetic, ediff_default,
                                       ediff_tight, parallel_scheme)

    wf = Workflow(fws)

    fw_analysis = Firework(
        HubbardHundLinRespToDb(num_perturb=num_perturb,
                               spin_polarized=spin_polarized,
                               relax_nonmagnetic=relax_nonmagnetic,
                               db_file=DB_FILE,
                               wf_uuid=uuid),
        name="HubbardHundLinRespToDb",
    )

    wf.append_wf(Workflow.from_Firework(fw_analysis), wf.leaf_fw_ids)

    wf = add_common_powerups(wf, c)

    if c.get("ADD_WF_METADATA", ADD_WF_METADATA):
        wf = add_wf_metadata(wf, structure)

    wf = add_additional_fields_to_taskdocs(
        wf,
        {
            "wf_meta": {
                "wf_uuid": uuid,
                "wf_name": "hubbard_hund_linresp",
                "wf_version": __hubbard_hund_linresp_wf_version__,
            }
        },
    )

    return wf
Example #12
0
from fireworks import LaunchPad, Firework, Workflow
from fireworks.core.rocket_launcher import launch_rocket
from fireworks.examples.custom_firetasks.hello_world.hello_world_task import HelloTask

if __name__ == "__main__":
    # initialize the database
    lp = LaunchPad()  # you might need to modify the connection settings here
    # lp.reset()  # uncomment this line and set the appropriate parameters if you want to reset the database

    # create the workflow and store it in the database
    my_fw = Firework([HelloTask()])
    my_wflow = Workflow.from_Firework(my_fw)
    lp.add_wf(my_wflow)

    # run the workflow
    launch_rocket(lp)
Example #13
0
def get_wf_gibbs_free_energy(structure, deformations, vasp_input_set=None, vasp_cmd="vasp",
                             db_file=None, user_kpoints_settings=None, t_step=10, t_min=0,
                             t_max=1000, mesh=(20, 20, 20), eos="vinet", qha_type="debye_model",
                             pressure=0.0, poisson=0.25, anharmonic_contribution=False,
                             metadata=None, tag=None):
    """
    Returns quasi-harmonic gibbs free energy workflow.
    Note: phonopy package is required for the final analysis step if qha_type="phonopy"

    Args:
        structure (Structure): input structure.
        deformations (list): list of deformation matrices(list of lists).
        vasp_input_set (VaspInputSet)
        vasp_cmd (str): vasp command to run.
        db_file (str): path to the db file.
        user_kpoints_settings (dict): example: {"grid_density": 7000}
        t_step (float): temperature step (in K)
        t_min (float): min temperature (in K)
        t_max (float): max temperature (in K)
        mesh (list/tuple): reciprocal space density
        eos (str): equation of state used for fitting the energies and the volumes.
            options supported by phonopy: "vinet", "murnaghan", "birch_murnaghan".
            Note: pymatgen supports more options than phonopy. see pymatgen.analysis.eos.py
        qha_type(str): quasi-harmonic approximation type: "debye_model" or "phonopy",
            default is "debye_model"
        pressure (float): in GPa
        poisson (float): poisson ratio
        anharmonic_contribution (bool): consider anharmonic contributions to
            Gibbs energy from the Debye model. Defaults to False.
        metadata (dict): meta data
        tag (str): something unique to identify the tasks in this workflow. If None a random uuid
            will be assigned.

    Returns:
        Workflow
    """

    tag = tag or "gibbs group: >>{}<<".format(str(uuid4()))

    deformations = [Deformation(defo_mat) for defo_mat in deformations]

    # static input set for the transmuter fireworks
    vis_static = vasp_input_set
    if vis_static is None:
        lepsilon = False
        if qha_type not in ["debye_model"]:
            lepsilon = True
            try:
                from phonopy import Phonopy
            except ImportError:
                raise RuntimeError("'phonopy' package is NOT installed but is required for the final "
                                   "analysis step; you can alternatively switch to the qha_type to "
                                   "'debye_model' which does not require 'phonopy'.")
        vis_static = MPStaticSet(structure, force_gamma=True, lepsilon=lepsilon,
                                 user_kpoints_settings=user_kpoints_settings)

    wf_gibbs = get_wf_deformations(structure, deformations, name="gibbs deformation",
                                   vasp_cmd=vasp_cmd, db_file=db_file, tag=tag, metadata=metadata,
                                   vasp_input_set=vis_static)

    fw_analysis = Firework(GibbsAnalysisToDb(tag=tag, db_file=db_file, t_step=t_step, t_min=t_min,
                                             t_max=t_max, mesh=mesh, eos=eos, qha_type=qha_type,
                                             pressure=pressure, poisson=poisson, metadata=metadata,
                                             anharmonic_contribution=anharmonic_contribution,),
                           name="Gibbs Free Energy")

    wf_gibbs.append_wf(Workflow.from_Firework(fw_analysis), wf_gibbs.leaf_fw_ids)

    wf_gibbs.name = "{}:{}".format(structure.composition.reduced_formula, "gibbs free energy")

    return wf_gibbs