示例#1
0
    def workflow(self, model):
        """
        @brief    Create a FireWorks Workflow object performing initialisation.
        @details
                  The workflow

        @param model surrogate model object.

        @return Workflow object
        """
        ## Call the newPoints method to receive a list of dictionaries each
        #  dictionary representing one data point.
        p = self.newPoints(model)
        if len(p):
            wf = model.exactTasks(p)
            wf.append_wf(
                model.parameterFittingStrategy().workflow(model),
                wf.leaf_fw_ids
            )
            return wf

        elif not len(p) and len(model.substituteModels):
            wf = Workflow([])
            for sm in model.substituteModels:
                wf.append_wf(
                    sm.initialisationStrategy().workflow(sm),
                    []
                )
            return wf

        else:
            return Workflow([])
示例#2
0
 def __init__(self, *args, **kwargs):
     '''
     :param args:       (VaspFirework objects) objects to create Workflow from.  No limit
                        on the amount of VaspInputInterface objects to be given.  Entered as just
                        comma separated objects passed to class.
     :param deps_dict:  (dict) specifies the dependency of the VaspInputInterface objects given.  
                        If no dependency is given, Firworks are assumed to be 
                        sequentially dependent.
     :param name        (str) Name given to Workflow
     '''
     self.fws = []
     self.name = kwargs.get('name', 'Sequential WF')
     self.deps_dict = kwargs.get('deps_dict', {})
     self.dependency = {}
     if self.deps_dict:
         for i in self.deps_dict.keys():
             fw_deps = []
             for j in self.deps_dict[i]:
                 fw_deps.append(j.Firework)                    
             self.dependency[i.Firework]=fw_deps
     self.deps = True if self.dependency else False
     for id, fw_task in enumerate(args):
         self.fws.append(fw_task.Firework)
         if not self.deps and id != 0:
             self.dependency[self.fws[id-1]]=[fw_task.Firework]
     self.wf = Workflow(self.fws, self.dependency, name=self.name)
     # Try to establish connection with Launchpad
     try:
         self.LaunchPad=LaunchPad.from_file(os.path.join(os.environ["HOME"], ".fireworks", "my_launchpad.yaml"))
     except:
         self.LaunchPad = None
示例#3
0
def use_custodian(original_wf, fw_name_constraint=None, custodian_params=None):
    """
    Replaces all tasks with "RunVasp*" (e.g. RunVaspDirect) to be
    RunVaspCustodian. Thus, this powerup adds error correction into VASP
    runs if not originally present and/or modifies the correction behavior.

    Args:
        original_wf (Workflow): original workflow
        fw_name_constraint (str): Only apply changes to FWs where fw_name contains this substring.
            For example, use custodian only for certain runs, or set job_type to
            "double_relaxation_run" only for structure optimization run, or set different
            handler_group for different runs.
        custodian_params (dict): A dict of parameters for RunVaspCustodian. e.g., use it to set
            a "scratch_dir" or "handler_group".
    """

    custodian_params = custodian_params if custodian_params else {}
    wf_dict = original_wf.to_dict()
    vasp_fws_and_tasks = get_fws_and_tasks(original_wf, fw_name_constraint=fw_name_constraint,
                                           task_name_constraint="RunVasp")

    for idx_fw, idx_t in vasp_fws_and_tasks:
        if "vasp_cmd" not in custodian_params:
            custodian_params["vasp_cmd"] = \
                wf_dict["fws"][idx_fw]["spec"]["_tasks"][idx_t]["vasp_cmd"]

        wf_dict["fws"][idx_fw]["spec"]["_tasks"][idx_t] = \
            RunVaspCustodian(**custodian_params).to_dict()

    return Workflow.from_dict(wf_dict)
示例#4
0
def add_tags(original_wf, tags_list):
    """
    Adds tags to all Fireworks in the Workflow, WF metadata,
     as well as additional_fields for the VaspDrone to track them later
     (e.g. all fireworks and vasp tasks related to a research project)

    Args:
        original_wf (Workflow)
        tags_list: list of tags parameters (list of strings)
    """
    wf_dict = original_wf.to_dict()

    # WF metadata
    if "tags" in wf_dict["metadata"]:
        wf_dict["metadata"]["tags"].extend(tags_list)
    else:
        wf_dict["metadata"]["tags"] = tags_list

    # FW metadata
    for idx_fw in range(len(original_wf.fws)):
        if "tags" in wf_dict["fws"][idx_fw]["spec"]:
            wf_dict["fws"][idx_fw]["spec"]["tags"].extend(tags_list)
        else:
            wf_dict["fws"][idx_fw]["spec"]["tags"] = tags_list

    # Drone
    for idx_fw, idx_t in get_fws_and_tasks(original_wf, task_name_constraint="VaspToDbTask"):
        if "tags" in wf_dict["fws"][idx_fw]["spec"]["_tasks"][idx_t]["additional_fields"]:
            wf_dict["fws"][idx_fw]["spec"]["_tasks"][idx_t]["additional_fields"]["tags"].extend(tags_list)
        else:
            wf_dict["fws"][idx_fw]["spec"]["_tasks"][idx_t]["additional_fields"]["tags"] = tags_list

    return Workflow.from_dict(wf_dict)
示例#5
0
    def workflow(self, model):
        """
        """
        # Get initial data
        points = self.newPoints(model)

        # Save initial data in database
        model.updateFitDataFromFwSpec(points)
        model.updateMinMax()
        model.save()

        wf = Workflow( [], name='initialising to dataset')
        wf.append_wf( model.parameterFittingStrategy().workflow(model),
                      wf.leaf_fw_ids)

        return wf
示例#6
0
def use_scratch_dir(original_wf, scratch_dir):
    """
    For all RunVaspCustodian tasks, add the desired scratch dir.

    :param original_wf:
    :param scratch_dir: The scratch dir to use. Supports env_chk
    """
    wf_dict = original_wf.to_dict()
    for idx_fw, idx_t in get_fws_and_tasks(original_wf, task_name_constraint="RunVaspCustodian"):
        wf_dict["fws"][idx_fw]["spec"]["_tasks"][idx_t]["scratch_dir"] = scratch_dir
    return Workflow.from_dict(wf_dict)
示例#7
0
 def add_fw(self, fw_task, deps=None):
     self.fws.append(fw_task.Firework)
     if deps:
         for i in deps.keys():
             fw_deps = []
             for j in deps[i]:
                 fw_deps.append(j.Firework)
             self.dependency[i.Firework]=fw_deps
     else:
         id = len(self.fws) - 2
         self.dependency[self.fws[id]]=[fw_task.Firework]
     self.wf=Workflow(self.fws, self.dependency, name=self.name)
示例#8
0
def add_additional_fields_to_taskdocs(original_wf, update_dict=None):
    """
    For all VaspToDbTasks in a given workflow, add information 
    to "additional_fields" to be placed in the task doc.
    
    Args:
        original_wf (Workflow)
        update_dict (Dict): dictionary to add additional_fields
    """
    wf_dict = original_wf.to_dict()
    for idx_fw, idx_t in get_fws_and_tasks(original_wf, task_name_constraint="VaspToDbTask"):
        wf_dict["fws"][idx_fw]["spec"]["_tasks"][idx_t]["additional_fields"].update(update_dict)
    return Workflow.from_dict(wf_dict)
示例#9
0
def add_trackers(original_wf):
    """
    Every FireWork that runs VASP also tracks the OUTCAR and OSZICAR using FWS Trackers.

    Args:
        original_wf (Workflow)

    """
    tracker1 = Tracker('OUTCAR', nlines=25, allow_zipped=True)
    tracker2 = Tracker('OSZICAR', nlines=25, allow_zipped=True)
    wf_dict = original_wf.to_dict()
    for idx_fw, idx_t in get_fws_and_tasks(original_wf, task_name_constraint="RunVasp"):
        if "_trackers" in wf_dict["fws"][idx_fw]["spec"]:
            wf_dict["fws"][idx_fw]["spec"]["_trackers"].extend([tracker1, tracker2])
        else:
            wf_dict["fws"][idx_fw]["spec"]["_trackers"] = [tracker1, tracker2]
    return Workflow.from_dict(wf_dict)
示例#10
0
def remove_custodian(original_wf, fw_name_constraint=None):
    """
    Replaces all tasks with "RunVasp*" (e.g. RunVaspCustodian) to be
    RunVaspDirect.

    Args:
        original_wf (Workflow): original workflow
        fw_name_constraint (str): Only apply changes to FWs where fw_name
            contains this substring.
    """
    wf_dict = original_wf.to_dict()
    vasp_fws_and_tasks = get_fws_and_tasks(original_wf, fw_name_constraint=fw_name_constraint,
                                           task_name_constraint="RunVasp")
    for idx_fw, idx_t in vasp_fws_and_tasks:
        vasp_cmd = wf_dict["fws"][idx_fw]["spec"]["_tasks"][idx_t]["vasp_cmd"]
        wf_dict["fws"][idx_fw]["spec"]["_tasks"][idx_t] = RunVaspDirect(vasp_cmd=vasp_cmd).to_dict()
    return Workflow.from_dict(wf_dict)
示例#11
0
def add_modify_incar(original_wf, modify_incar_params=None, fw_name_constraint=None):
    """
    Every FireWork that runs VASP has a ModifyIncar task just beforehand. For example, allows
    you to modify the INCAR based on the Worker using env_chk or using hard-coded changes.

    Args:
        original_wf (Workflow)
        modify_incar_params (dict) - dict of parameters for ModifyIncar.
        fw_name_constraint (str) - Only apply changes to FWs where fw_name contains this substring.

    """
    modify_incar_params = modify_incar_params or {"incar_update": ">>incar_update<<"}
    wf_dict = original_wf.to_dict()
    for idx_fw, idx_t in get_fws_and_tasks(original_wf, fw_name_constraint=fw_name_constraint,
                                           task_name_constraint="RunVasp"):
        wf_dict["fws"][idx_fw]["spec"]["_tasks"].insert(idx_t, ModifyIncar(**modify_incar_params).to_dict())
    return Workflow.from_dict(wf_dict)
示例#12
0
def add_small_gap_multiply(original_wf, gap_cutoff, density_multiplier, fw_name_constraint=None):
    """
    In all FWs with specified name constraints, add a 'small_gap_multiply' parameter that
    multiplies the k-mesh density of compounds with gap < gap_cutoff by density multiplier.
    Note that this powerup only works on FireWorks with the appropriate WriteVasp* tasks that
    accept the small_gap_multiply argument...

    :param original_wf:
    :param gap_cutoff:
    :param density_multiplier:
    :param fw_name_constraint:
    """
    wf_dict = original_wf.to_dict()
    for idx_fw, idx_t in get_fws_and_tasks(original_wf, fw_name_constraint=fw_name_constraint,
                                           task_name_constraint="WriteVasp"):
        wf_dict["fws"][idx_fw]["spec"]["_tasks"][idx_t]["small_gap_multiply"] = [gap_cutoff, density_multiplier]
    return Workflow.from_dict(wf_dict)
示例#13
0
def get_wf_exafs_paths(absorbing_atom, structure, paths, degeneracies=None, edge="K", radius=10.0,
                       feff_input_set="pymatgen.io.feff.sets.MPEXAFSSet", feff_cmd="feff",
                       db_file=None, metadata=None, user_tag_settings=None, use_primitive=False,
                       labels=None, filepad_file=None):
    """
    Returns FEFF EXAFS spectroscopy workflow that generates the scattering amplitudes for the given
    list of scattering paths. The scattering amplitude output files(feffNNNN.dat files) are
    inserted to filepad(see fireworks.utilities.filepad.py) on completion.

    Args:
        absorbing_atom (str/int): absorbing atom symbol or site index. If the symbol is given,
             then the returned workflow will have fireworks for each absorbing site with the
             same symbol.
        structure (Structure): input structure
        paths (list): list of paths. path = list of site indices.
        degeneracies (list): list of path degeneracies.
        edge (str): absorption edge. Example: K, L1, L2, L3
        radius (float): cluster radius in angstroms. Ignored for K space calculations
        feff_input_set (str or FeffDictSet subclass): The inputset for setting params. If string
                then the entire path to the class must be provided
                e.g. "pymatgen.io.feff.sets.MPEXAFSSet"
        feff_cmd (str): path to the feff binary
        db_file (str):  path to the db file.
        metadata (dict): meta data
        user_tag_settings (dict): override feff default tag settings
        use_primitive (bool): convert the structure to primitive form. This helps to
            reduce the number of fireworks in the workflow if the absorbing atom is
            specified by its atomic symbol.
        labels ([str]): list of labels for the scattering amplitudes file contents inserted into
            filepad. Useful for fetching the data from filepad later.
        filepad_file (str): path to filepad connection settings file.

    Returns:
        Workflow
    """
    labels = labels or []
    wflow = get_wf_xas(absorbing_atom, structure, feff_input_set, edge, radius, feff_cmd,
                       db_file, metadata, user_tag_settings, use_primitive)
    paths_fw = EXAFSPathsFW(absorbing_atom, structure, paths, degeneracies=degeneracies, edge=edge,
                            radius=radius, name="EXAFS Paths", feff_input_set=feff_input_set,
                            feff_cmd=feff_cmd, labels=labels, filepad_file=filepad_file)
    # append the scattering paths firework to the regular EXAFS workflow.
    paths_wf = Workflow.from_Firework(paths_fw)
    wflow.append_wf(paths_wf, wflow.leaf_fw_ids)
    return wflow
示例#14
0
def tag_fws(original_wf, tag, fw_name_constraint=None):
    """
    Tags VASP Fworker(s) of a Workflow; e.g. it can be used to run large-memory jobs on a separate queue

    Args:
        original_wf (Workflow):
        tag (string): user-defined tag to be added under fw.spec._fworker (e.g. "large memory", "big", etc)
        fw_name_constraint (string): name of the fireworks to be modified (all if None is passed)

    Returns:
        modified workflow with tagged Fworkers
    """
    wf_dict = original_wf.to_dict()
    for idx_fw, idx_t in get_fws_and_tasks(original_wf, fw_name_constraint=fw_name_constraint,
                                           task_name_constraint="RunVasp"):
        wf_dict["fws"][idx_fw]["spec"]["_fworker"] = tag

    return Workflow.from_dict(wf_dict)
示例#15
0
def add_namefile(original_wf, use_slug=True):
    """
    Every FireWork begins by writing an empty file with the name
    "FW--<fw.name>". This makes it easy to figure out what jobs are in what
    launcher directories, e.g. "ls -l launch*/FW--*" from within a "block" dir.

    Args:
        original_wf (Workflow)
        use_slug (bool): whether to replace whitespace-type chars with a slug
    """
    wf_dict = original_wf.to_dict()
    for idx, fw in enumerate(wf_dict["fws"]):
        fname = "FW--{}".format(fw["name"])
        if use_slug:
            fname = get_slug(fname)
        wf_dict["fws"][idx]["spec"]["_tasks"].insert(0, FileWriteTask(
            files_to_write=[{"filename": fname, "contents": ""}]).to_dict())
    return Workflow.from_dict(wf_dict)
示例#16
0
def modify_to_soc(original_wf, nbands, structure=None, modify_incar_params=None, fw_name_constraint=None):
    """
    Takes a regular workflow and transforms its VASP fireworkers that are specified with
    fw_name_constraints to non-collinear calculations taking spin orbit coupling into account.

    Args:
        original_wf (Workflow)
        nbands (int): number of bands selected by the user (for now)
        structure (Structure)
        modify_incar_params ({}): a dictionary containing the setting for modyfining the INCAR (e.g. {"ICHARG": 11})
        fw_name_constraint (string): name of the fireworks to be modified (all if None is passed)

    Returns:
        modified Workflow with SOC
    """

    wf_dict = original_wf.to_dict()
    if structure is None:
        try:
            sid = get_fws_and_tasks(original_wf, fw_name_constraint="structure optimization",
                                    task_name_constraint="RunVasp")[0][0]
            structure = Structure.from_dict(wf_dict["fws"][sid]["spec"]["_tasks"][1]["vasp_input_set"]["structure"])
        except:
            raise ValueError("For this workflow, the structure must be provided as an input")
    magmom = ""
    for i in structure:
        magmom += "0 0 0.6 "
    # TODO: add saxis as an input parameter with default being (0 0 1)
    modify_incar_params = modify_incar_params or {"incar_update": {"LSORBIT": "T", "NBANDS": nbands, "MAGMOM": magmom,
                                                    "ISPIN": 1, "LMAXMIX": 4, "ISYM": 0}}

    for idx_fw, idx_t in get_fws_and_tasks(original_wf, fw_name_constraint=fw_name_constraint,
                                           task_name_constraint="RunVasp"):
        if "nscf" in wf_dict["fws"][idx_fw]["name"]:
            wf_dict["fws"][idx_fw]["spec"]["_tasks"][idx_t]["vasp_cmd"] = ">>vasp_ncl<<"
            wf_dict["fws"][idx_fw]["spec"]["_tasks"].insert(idx_t, ModifyIncar(**modify_incar_params).to_dict())

        wf_dict["fws"][idx_fw]["name"] += " soc"

    for idx_fw, idx_t in get_fws_and_tasks(original_wf, fw_name_constraint=fw_name_constraint,
                                           task_name_constraint="RunBoltztrap"):
        wf_dict["fws"][idx_fw]["name"] += " soc"

    return Workflow.from_dict(wf_dict)
示例#17
0
def add_trackers(original_wf, tracked_files=None, nlines=25):
    """
    Every FireWork that runs VASP also tracks the OUTCAR, OSZICAR, etc using FWS Trackers.

    Args:
        original_wf (Workflow)
        tracked_files (list) : list of files to be tracked
        nlines (int): number of lines at the end of files to be tracked
    """
    if tracked_files is None:
        tracked_files = ["OUTCAR", "OSZICAR"]
    trackers = [Tracker(f, nlines=nlines, allow_zipped=True) for f in tracked_files]
    wf_dict = original_wf.to_dict()
    for idx_fw, idx_t in get_fws_and_tasks(original_wf, task_name_constraint="RunVasp"):
        if "_trackers" in wf_dict["fws"][idx_fw]["spec"]:
            wf_dict["fws"][idx_fw]["spec"]["_trackers"].extend(trackers)
        else:
            wf_dict["fws"][idx_fw]["spec"]["_trackers"] = trackers
    return Workflow.from_dict(wf_dict)
示例#18
0
def use_fake_vasp(original_wf):
    """
    Replaces all tasks with "RunVasp" (e.g. RunVaspDirect) to be
    RunVaspFake. Thus, we do not actually run VASP but copy
    pre-determined inputs and outputs.

    Args:
        original_wf (Workflow)
    """
    wf_dict = original_wf.to_dict()
    for idx_fw, fw in enumerate(original_wf.fws):
        for job_type in fake_dirs.keys():
            if job_type in fw.name:
                for idx_t, t in enumerate(fw.tasks):
                    if "RunVasp" in str(t):
                        wf_dict["fws"][idx_fw]["spec"]["_tasks"][idx_t] = \
                            RunVaspFake(fake_dir=fake_dirs[job_type]).to_dict()

    return Workflow.from_dict(wf_dict)
示例#19
0
def get_wf_bulk_modulus(structure, deformations, vasp_input_set=None, vasp_cmd="vasp", db_file=None,
                        user_kpoints_settings=None, eos="vinet", tag=None, user_incar_settings=None):
    """
    Returns the workflow that computes the bulk modulus by fitting to the given equation of state.

    Args:
        structure (Structure): input structure.
        deformations (list): list of deformation matrices(list of lists).
        vasp_input_set (VaspInputSet): for the static deformation calculations
        vasp_cmd (str): vasp command to run.
        db_file (str): path to the db file.
        user_kpoints_settings (dict): example: {"grid_density": 7000}
        eos (str): equation of state used for fitting the energies and the volumes.
            supported equation of states: "quadratic", "murnaghan", "birch", "birch_murnaghan",
            "pourier_tarantola", "vinet", "deltafactor". See pymatgen.analysis.eos.py
        tag (str): something unique to identify the tasks in this workflow. If None a random uuid
            will be assigned.
        user_incar_settings (dict):

    Returns:
        Workflow
    """

    tag = tag or "bulk_modulus group: >>{}<<".format(str(uuid4()))

    deformations = [Deformation(defo_mat) for defo_mat in deformations]

    vis_static = vasp_input_set or MPStaticSet(structure=structure, force_gamma=True, lepsilon=False,
                                               user_kpoints_settings=user_kpoints_settings,
                                               user_incar_settings=user_incar_settings)

    wf_bulk_modulus = get_wf_deformations(structure, deformations, name="bulk_modulus deformation",
                                          vasp_input_set=vis_static, vasp_cmd=vasp_cmd,
                                          db_file=db_file, tag=tag)

    fw_analysis = Firework(FitEOSToDb(tag=tag, db_file=db_file, eos=eos), name="fit equation of state")

    wf_bulk_modulus.append_wf(Workflow.from_Firework(fw_analysis), wf_bulk_modulus.leaf_fw_ids)

    wf_bulk_modulus.name = "{}:{}".format(structure.composition.reduced_formula, "Bulk modulus")

    return wf_bulk_modulus
示例#20
0
def use_fake_vasp(original_wf, ref_dirs, params_to_check=None):
    """
    Replaces all tasks with "RunVasp" (e.g. RunVaspDirect) to be
    RunVaspFake. Thus, we do not actually run VASP but copy
    pre-determined inputs and outputs.

    Args:
        original_wf (Workflow)
        ref_dirs (dict): key=firework name, value=path to the reference vasp calculation directory
        params_to_check (list): optional list of incar parameters to check.
    """
    if not params_to_check:
        params_to_check = ["ISPIN", "ENCUT", "ISMEAR", "SIGMA", "IBRION", "LORBIT", "NBANDS", "LMAXMIX"]
    wf_dict = original_wf.to_dict()
    for idx_fw, fw in enumerate(original_wf.fws):
        for job_type in ref_dirs.keys():
            if job_type in fw.name:
                for idx_t, t in enumerate(fw.tasks):
                    if "RunVasp" in str(t):
                        wf_dict["fws"][idx_fw]["spec"]["_tasks"][idx_t] = \
                            RunVaspFake(ref_dir=ref_dirs[job_type], params_to_check=params_to_check).to_dict()
    return Workflow.from_dict(wf_dict)
示例#21
0
from fireworks import Firework, Workflow, FWorker, LaunchPad, ScriptTask
from fireworks.core.rocket_launcher import rapidfire

if __name__ == "__main__":
    # set up the LaunchPad and reset it
    launchpad = LaunchPad()
    # launchpad.reset('', require_password=False)

    # define four individual FireWorks used in the Workflow
    task1 = ScriptTask.from_str('echo "Ingrid is the CEO."')
    task2 = ScriptTask.from_str('echo "Jill is a manager."')
    task3 = ScriptTask.from_str('echo "Jack is a manager."')
    task4 = ScriptTask.from_str('echo "Kip is an intern."')

    fw1 = Firework(task1)
    fw2 = Firework(task2)
    fw3 = Firework(task3)
    fw4 = Firework(task4)

    # assemble Workflow from FireWorks and their connections by id
    workflow = Workflow([fw1, fw2, fw3, fw4], {
        fw1: [fw2, fw3],
        fw2: [fw4],
        fw3: [fw4]
    })

    # store workflow and launch it locally
    launchpad.add_wf(workflow)
    rapidfire(launchpad, FWorker())
示例#22
0
    def setUp(self):
        # define the individual FireWorks used in the Workflow
        # Parent Firework
        fw_p = Firework(ScriptTask.from_str(
            'echo "Cronus is the ruler of titans"', {'store_stdout': True}),
                        name="parent",
                        fw_id=1)
        # Sibling fireworks
        #fw_s1 = Firework(ScriptTask.from_str(
        #    'echo "Zeus is son of Cronus"',
        #    {'store_stdout':True}), name="sib1", fw_id=2, parents=fw_p)
        # Timed firework
        fw_s1 = Firework(PyTask(func='time.sleep', args=[5]),
                         name="sib1",
                         fw_id=2,
                         parents=fw_p)
        fw_s2 = Firework(ScriptTask.from_str(
            'echo "Poisedon is brother of Zeus"', {'store_stdout': True}),
                         name="sib2",
                         fw_id=3,
                         parents=fw_p)
        fw_s3 = Firework(ScriptTask.from_str('echo "Hades is brother of Zeus"',
                                             {'store_stdout': True}),
                         name="sib3",
                         fw_id=4,
                         parents=fw_p)
        fw_s4 = Firework(ScriptTask.from_str(
            'echo "Demeter is sister & wife of Zeus"', {'store_stdout': True}),
                         name="sib4",
                         fw_id=5,
                         parents=fw_p)
        fw_s5 = Firework(ScriptTask.from_str(
            'echo "Lapetus is son of Oceanus"', {'store_stdout': True}),
                         name="cousin1",
                         fw_id=6)
        # Children fireworks
        fw_c1 = Firework(ScriptTask.from_str('echo "Ares is son of Zeus"',
                                             {'store_stdout': True}),
                         name="c1",
                         fw_id=7,
                         parents=fw_s1)
        fw_c2 = Firework(ScriptTask.from_str(
            'echo "Persephone is daughter of Zeus & Demeter and wife of Hades"',
            {'store_stdout': True}),
                         name="c2",
                         fw_id=8,
                         parents=[fw_s1, fw_s4])
        fw_c3 = Firework(ScriptTask.from_str(
            'echo "Makaria is daughter of Hades & Persephone"',
            {'store_stdout': True}),
                         name="c3",
                         fw_id=9,
                         parents=[fw_s3, fw_c2])
        fw_c4 = Firework(ScriptTask.from_str(
            'echo "Dione is descendant of Lapetus"', {'store_stdout': True}),
                         name="c4",
                         fw_id=10,
                         parents=fw_s5)
        fw_c5 = Firework(ScriptTask.from_str(
            'echo "Aphrodite is son of of Zeus and Dione"',
            {'store_stdout': True}),
                         name="c5",
                         fw_id=11,
                         parents=[fw_s1, fw_c4])
        fw_c6 = Firework(ScriptTask.from_str(
            'echo "Atlas is son of of Lapetus"', {'store_stdout': True}),
                         name="c6",
                         fw_id=12,
                         parents=fw_s5)
        fw_c7 = Firework(ScriptTask.from_str(
            'echo "Maia is daughter of Atlas"', {'store_stdout': True}),
                         name="c7",
                         fw_id=13,
                         parents=fw_c6)
        fw_c8 = Firework(ScriptTask.from_str(
            'echo "Hermes is daughter of Maia and Zeus"',
            {'store_stdout': True}),
                         name="c8",
                         fw_id=14,
                         parents=[fw_s1, fw_c7])

        # assemble Workflow from FireWorks and their connections by id
        workflow = Workflow([
            fw_p, fw_s1, fw_s2, fw_s3, fw_s4, fw_s5, fw_c1, fw_c2, fw_c3,
            fw_c4, fw_c5, fw_c6, fw_c7, fw_c8
        ])
        self.lp.add_wf(workflow)

        # Give names to fw_ids
        self.zeus_fw_id = 2
        self.zeus_child_fw_ids = set([7, 8, 9, 11, 14])
        self.lapetus_desc_fw_ids = set([6, 10, 12, 13])
        self.zeus_sib_fw_ids = set([3, 4, 5])
        self.par_fw_id = 1
        self.all_ids = self.zeus_child_fw_ids | self.lapetus_desc_fw_ids | \
                       self.zeus_sib_fw_ids | set([self.zeus_fw_id]) | \
                       set([self.par_fw_id])

        self.old_wd = os.getcwd()
示例#23
0
文件: core.py 项目: FilipchukB/P1
def get_wf_xas(absorbing_atom,
               structure,
               feff_input_set="pymatgen.io.feff.sets.MPXANESSet",
               edge="K",
               radius=10.0,
               feff_cmd="feff",
               db_file=None,
               metadata=None,
               user_tag_settings=None,
               use_primitive=False):
    """
    Returns FEFF XANES/EXAFS spectroscopy workflow.

    Args:
        absorbing_atom (str/int): absorbing atom symbol or site index. If the symbol is given,
             then the returned workflow will have fireworks for each absorbing site with the
             same symbol.
        structure (Structure): input structure
        feff_input_set (str or FeffDictSet subclass): The inputset for setting params. If string
                then either the entire path to the class or spectrum type must be provided
                e.g. "pymatgen.io.feff.sets.MPXANESSet" or "XANES"
        edge (str): absorption edge. Example: K, L1, L2, L3
        radius (float): cluster radius in angstroms. Ignored for K space calculations
        feff_cmd (str): path to the feff binary
        db_file (str):  path to the db file.
        metadata (dict): meta data
        user_tag_settings (dict): override feff default tag settings
        use_primitive (bool): convert the structure to primitive form. This helps to
            reduce the number of fireworks in the workflow if the absorbing atom is
            specified by its atomic symbol.

    Returns:
        Workflow
    """
    if use_primitive:
        structure = structure.get_primitive_structure()

    # get the absorbing atom site index/indices
    ab_atom_indices = get_absorbing_atom_indices(structure, absorbing_atom)

    override_default_feff_params = {"user_tag_settings": user_tag_settings}

    spectrum_type = get_feff_input_set_obj(feff_input_set, ab_atom_indices[0],
                                           structure).__class__.__name__[2:-3]

    # add firework for each absorbing atom site index
    fws = []
    for ab_idx in ab_atom_indices:
        fw_metadata = dict(metadata) if metadata else {}
        fw_metadata["absorbing_atom_index"] = ab_idx
        fw_name = "{}-{}-{}".format(spectrum_type, edge, ab_idx)
        fws.append(
            XASFW(ab_idx,
                  structure,
                  edge=edge,
                  radius=radius,
                  feff_input_set=feff_input_set,
                  feff_cmd=feff_cmd,
                  db_file=db_file,
                  metadata=fw_metadata,
                  name=fw_name,
                  override_default_feff_params=override_default_feff_params))

    wf_metadata = dict(metadata) if metadata else {}
    wf_metadata["absorbing_atom_indices"] = list(ab_atom_indices)
    wfname = "{}:{}:{} edge".format(structure.composition.reduced_formula,
                                    "{} spectroscopy".format(spectrum_type),
                                    edge)

    return Workflow(fws, name=wfname, metadata=wf_metadata)
示例#24
0
文件: core.py 项目: FilipchukB/P1
def get_wf_exafs_paths(absorbing_atom,
                       structure,
                       paths,
                       degeneracies=None,
                       edge="K",
                       radius=10.0,
                       feff_input_set="pymatgen.io.feff.sets.MPEXAFSSet",
                       feff_cmd="feff",
                       db_file=None,
                       metadata=None,
                       user_tag_settings=None,
                       use_primitive=False,
                       labels=None,
                       filepad_file=None):
    """
    Returns FEFF EXAFS spectroscopy workflow that generates the scattering amplitudes for the given
    list of scattering paths. The scattering amplitude output files(feffNNNN.dat files) are
    inserted to filepad(see fireworks.utilities.filepad.py) on completion.

    Args:
        absorbing_atom (str/int): absorbing atom symbol or site index. If the symbol is given,
             then the returned workflow will have fireworks for each absorbing site with the
             same symbol.
        structure (Structure): input structure
        paths (list): list of paths. path = list of site indices.
        degeneracies (list): list of path degeneracies.
        edge (str): absorption edge. Example: K, L1, L2, L3
        radius (float): cluster radius in angstroms. Ignored for K space calculations
        feff_input_set (str or FeffDictSet subclass): The inputset for setting params. If string
                then the entire path to the class must be provided
                e.g. "pymatgen.io.feff.sets.MPEXAFSSet"
        feff_cmd (str): path to the feff binary
        db_file (str):  path to the db file.
        metadata (dict): meta data
        user_tag_settings (dict): override feff default tag settings
        use_primitive (bool): convert the structure to primitive form. This helps to
            reduce the number of fireworks in the workflow if the absorbing atom is
            specified by its atomic symbol.
        labels ([str]): list of labels for the scattering amplitudes file contents inserted into
            filepad. Useful for fetching the data from filepad later.
        filepad_file (str): path to filepad connection settings file.

    Returns:
        Workflow
    """
    labels = labels or []
    wflow = get_wf_xas(absorbing_atom, structure, feff_input_set, edge, radius,
                       feff_cmd, db_file, metadata, user_tag_settings,
                       use_primitive)
    paths_fw = EXAFSPathsFW(absorbing_atom,
                            structure,
                            paths,
                            degeneracies=degeneracies,
                            edge=edge,
                            radius=radius,
                            name="EXAFS Paths",
                            feff_input_set=feff_input_set,
                            feff_cmd=feff_cmd,
                            labels=labels,
                            filepad_file=filepad_file)
    # append the scattering paths firework to the regular EXAFS workflow.
    paths_wf = Workflow.from_Firework(paths_fw)
    wflow.append_wf(paths_wf, wflow.leaf_fw_ids)
    return wflow
示例#25
0
                      name=name,
                      username=username,
                      password=password)

db = connect('hydrogenated-prototypes-mamunm.db')

for d in db.select():
    atoms = d.toatoms()
    atoms.info = d.data.parameters
    search_keys = d.key_value_pairs

    encoding = atoms_to_encode(atoms)

    t0 = PyTask(func='catkit.flow.fwio.encode_to_atoms', args=[encoding])

    t1 = PyTask(func='catkit.flow.fwespresso.get_potential_energy',
                stored_data_varname='trajectory')

    firework = Firework(
        [t0, t1],
        spec={
            'tags': search_keys,
            '_priority': 3,
            'connectivity': d.data.connectivity,
            'surface_atoms': d.data.surface_atoms
        },
        name='hydrogenated-species-1')

    workflow = Workflow([firework])
    launchpad.add_wf(workflow)
示例#26
0
def get_wf_magnetic_deformation(structure, c=None, vis=None):
    """
    Minimal workflow to obtain magnetic deformation proxy, as
    defined by Bocarsly et al. 2017, doi: 10.1021/acs.chemmater.6b04729

    Args:
        structure: input structure, must be structure with magnetic
    elements, such that pymatgen will initalize ferromagnetic input by
    default -- see MPRelaxSet.yaml for list of default elements
        c: Workflow config dict, in the same format
    as in presets/core.py and elsewhere in atomate
        vis: A VaspInputSet to use for the first FW

    Returns: Workflow
    """

    if not structure.is_ordered:
        raise ValueError(
            "Please obtain an ordered approximation of the input structure.")

    structure = structure.get_primitive_structure(use_site_props=True)

    # using a uuid for book-keeping,
    # in a similar way to other workflows
    uuid = str(uuid4())

    c_defaults = {"vasp_cmd": VASP_CMD, "db_file": DB_FILE}
    if c:
        c.update(c_defaults)
    else:
        c = c_defaults

    wf = get_wf(structure,
                "magnetic_deformation.yaml",
                common_params=c,
                vis=vis)

    fw_analysis = Firework(
        MagneticDeformationToDB(db_file=DB_FILE,
                                wf_uuid=uuid,
                                to_db=c.get("to_db", True)),
        name="MagneticDeformationToDB",
    )

    wf.append_wf(Workflow.from_Firework(fw_analysis), wf.leaf_fw_ids)

    wf = add_common_powerups(wf, c)

    if c.get("ADD_WF_METADATA", ADD_WF_METADATA):
        wf = add_wf_metadata(wf, structure)

    wf = add_additional_fields_to_taskdocs(
        wf,
        {
            "wf_meta": {
                "wf_uuid": uuid,
                "wf_name": "magnetic_deformation",
                "wf_version": __magnetic_deformation_wf_version__,
            }
        },
    )

    return wf
示例#27
0
def md_relax_wf(mol, name, **kwargs):
    fws, links_dict = md_relax_fws(mol, name, **kwargs)
    return Workflow(fws, links_dict, name)
示例#28
0
文件: wflows.py 项目: imkimhy/dfttk
def get_wf_gibbs(structure, num_deformations=7, deformation_fraction=(-0.05, 0.1),
                 phonon=False, phonon_supercell_matrix=None,
                 t_min=5, t_max=2000, t_step=5,
                 vasp_cmd=None, db_file=None, metadata=None, name='EV_QHA'):
    """
    E - V
    curve

    workflow
    Parameters
    ------
    structure: pymatgen.Structure
    num_deformations: int
    deformation_fraction: float
        Can be a float (a single value) or a 2-type of a min,max deformation fraction.
        Default is (-0.05, 0.1) leading to volumes of 0.95-1.10. A single value gives plus/minus
        by default.
    phonon : bool
        Whether to do a phonon calculation. Defaults to False, meaning the Debye model.
    phonon_supercell_matrix : list
        3x3 array of the supercell matrix, e.g. [[2,0,0],[0,2,0],[0,0,2]]. Must be specified if phonon is specified.
    t_min : float
        Minimum temperature
    t_step : float
        Temperature step size
    t_max : float
        Maximum temperature (inclusive)
    vasp_cmd : str
        Command to run VASP. If None (the default) is passed, the command will be looked up in the FWorker.
    db_file : str
        Points to the database JSON file. If None (the default) is passed, the path will be looked up in the FWorker.
    name : str
        Name of the workflow
    metadata : dict
        Metadata to include
    """
    vasp_cmd = vasp_cmd or VASP_CMD
    db_file = db_file or DB_FILE

    metadata = metadata or {}
    tag = metadata.get('tag', '{}'.format(str(uuid4())))
    if 'tag' not in metadata.keys():
        metadata['tag'] = tag

    if isinstance(deformation_fraction, (list, tuple)):
        deformations = np.linspace(1+deformation_fraction[0], 1+deformation_fraction[1], num_deformations)
    else:
        deformations = np.linspace(1-deformation_fraction, 1+deformation_fraction, num_deformations)

    # follow a scheme of
    # 1. Full relax + symmetry check
    # 2. If symmetry check fails, detour to 1. Volume relax, 2. inflection detection
    # 3. Inflection detection
    # 4. Static EV
    # 5. Phonon EV
    fws = []
    static_calcs = []
    phonon_calcs = []
    # for each FW, we set the structure to the original structure to verify to ourselves that the
    # volume deformed structure is set by input set.

    # Full relax
    vis = RelaxSet(structure)
    full_relax_fw = OptimizeFW(structure, symmetry_tolerance=0.05, job_type='normal', name='Full relax', prev_calc_loc=False, vasp_input_set=vis, vasp_cmd=vasp_cmd, db_file=db_file, metadata=metadata, spec={'_preserve_fworker': True})
    fws.append(full_relax_fw)

    for i, deformation in enumerate(deformations):
        vis = StaticSet(structure)
        static = StaticFW(structure, scale_lattice=deformation, name='structure_{}-static'.format(i), vasp_input_set=vis, vasp_cmd=vasp_cmd, db_file=db_file, metadata=metadata, parents=full_relax_fw)
        fws.append(static)
        static_calcs.append(static)

        if phonon:
            vis = ForceConstantsSet(structure)
            phonon_fw = PhononFW(structure, phonon_supercell_matrix, t_min=t_min, t_max=t_max, t_step=t_step,
                     name='structure_{}-phonon'.format(i), vasp_input_set=vis,
                     vasp_cmd=vasp_cmd, db_file=db_file, metadata=metadata,
                     prev_calc_loc=True, parents=static)
            fws.append(phonon_fw)
            phonon_calcs.append(static)
            phonon_calcs.append(phonon_fw)

    # always do a Debye after the static calculations. That way we can set up a phonon calculation, do a Debye fitting, then do the phonon if we want.
    debye_fw = Firework(QHAAnalysis(phonon=False, t_min=t_min, t_max=t_max, t_step=t_step, db_file=db_file, tag=tag, metadata=metadata), parents=static_calcs, name="{}-qha_analysis-Debye".format(structure.composition.reduced_formula))
    fws.append(debye_fw)
    if phonon:
        # do a Debye run before the phonon, so they can be done in stages.
        phonon_fw = Firework(QHAAnalysis(phonon=True, t_min=t_min, t_max=t_max, t_step=t_step, db_file=db_file, tag=tag, metadata=metadata), parents=phonon_calcs, name="{}-qha_analysis-phonon".format(structure.composition.reduced_formula))
        fws.append(phonon_fw)

    wfname = "{}:{}".format(structure.composition.reduced_formula, name)

    return Workflow(fws, name=wfname, metadata=metadata)
示例#29
0
 def _copy_wf(self, wf):
     return Workflow.from_dict(wf.to_dict())
 def _copy_wf(self, wf):
     return Workflow.from_dict(wf.to_dict())
def get_ase_wflows(
    structures,
    parameters={},
    calculator='VASP',
    to_db=True,
    db_file=None,
    optimizer=None,
    fmax=None,
    identifiers=None,
    calculator_module='ase.calculators.{}',
):
    """
    A function to generate an arbitrary number of DFT calculations in a single workflow.
    This is designed to be very simple, no stringing workflows together, just throw in
    the structures and parameters and go. You may pass in a list of structures or a single
    structure. Likewise, you may pass in a single dictionary of parameters of a list 
    corresponding to the list of structures. If a list of structures is passed in with only
    a single parameter dict it is assumed that you want to use the same parameters for all
    the calculations

    inputs:
        structures (ASE Atoms object/list): a single ASE atoms object or a list of atoms
                        objects for the structure(s) you'd like to calculate
        parameters (dict/list): a dictionary of list of dictionaries containing the input
                        arguments for the ASE calculators you're going to run. The list
                        of dictionaries must correspond to the list of structures. If 
                        only one dictionary is provided the same dictionary is used for
                        all structures.
        calculator (str): the name of the calculator to be imported
        to_db (bool): If True, the firework will attmept to connect to a mongodb
                        and store the results there from the calculator.todict
                        function and the atoms object.
        identifier (str/list/dict): a tag to be placed in the mongodb entry to 
                        identify this run from others, can be any type that can go
                        into a mongodb.
        db_file (str): the path to a mongodb json file containing information on
                         how to access the database. Details on this file (and an
                         example) are in the exmaple directory.
        optimizer (str): if this variable is left as None no optimization is performed.
                         Otherwise, input a string containing the ASE optimizer you'd 
                         like to use.
                         (https://wiki.fysik.dtu.dk/ase/ase/optimize.html)
        calculator_module (str): the location of the calculator you want to use.
                         The default should be 'ase.calculators'. Otherwise
                         something like 'espresso.espresso' might be what you want


    returns:
        workflows(list): a list of fireworks workflows
    """
    fws = []

    # initialize the default location of calculators if none is given
    if calculator_module == 'ase.calculators.{}':
        calculator_module = calculator_module.format(calculator.lower())

    # check inputs
    if type(structures) != list:
        structures = [structures]
    if type(
            parameters
    ) != list:  # If no list of parameters is given, use the same for all
        parameters = [parameters] * len(structures)
    if type(identifiers) != list:
        identifiers = [identifiers] * len(structures)
    if len(parameters) != len(structures):
        raise Exception(
            'The number of parameter dictionaries did not match the number of strucutures'
        )

    # build the workflow from individual fireworks
    if optimizer is not None:
        if fmax is None:
            fmax = 0.05
        # for ASE optimization runs
        for struct, param, identifier in zip(structures, parameters,
                                             identifiers):
            name = struct.get_chemical_formula()
            fws.append(
                ASE_Optimize_FW(
                    atoms_dict(struct),
                    param,
                    calculator=calculator,
                    to_db=to_db,
                    fmax=fmax,
                    db_file=db_file,
                    identifier=identifier,
                    calculator_module=calculator_module,
                ))
    else:
        # for simple runs
        if fmax is not None:
            Warning(
                'fmax was set, but an optimizer was not chosen, thus no optimization will be performed. To run an optimization, pass in the optimizer argument'
            )
        for struct, param, identifier in zip(structures, parameters,
                                             identifiers):
            fws.append(
                ASE_Run_FW(
                    atoms=atoms_dict(struct),
                    parameters=param,
                    calculator=calculator,
                    to_db=to_db,
                    db_file=db_file,
                    identifier=identifier,
                    calculator_module=calculator_module,
                ))

    return Workflow(fws, name="{} calculations wf, e.g.,".format(len(fws)))
示例#32
0
    def get_unfinished_jobs(self, sp_params, name_pre="single_point", dirs=None, max_cores=24):
        """
        Look for jobs where optimization and frequency calculations have
        successfully completed, but single-point has not. Then, for these cases,
        construct a workflow which will only run the sp job.

        :param sp_params: dict containing input parameters for single-point job
        :param name_pre: str representing prefix for all jobs.
        :param dirs: list of subdirectories to check for unfinished jobs.
            Default None, meaning that all subdirectories will be checked.
        :param max_cores: max_cores (int): Maximum number of cores to
            parallelize over. Defaults to 24.
        :return:
        """

        if not self.subdirs:
            raise RuntimeError("Cannot run get_reaction_set_workflow();"
                               "Need reactions components to be isolated in"
                               "different subdirectories.")

        fws = []

        all_dirs = [d for d in listdir(self.base_dir)
                    if isdir(join(self.base_dir, d))]

        molecules_cleared = []

        appropriate_dirs = all_dirs

        if dirs is not None:
            appropriate_dirs = [d for d in appropriate_dirs if d in dirs]

        for d in appropriate_dirs:
            path = join(self.base_dir, d)
            file_map = associate_qchem_to_mol(self.base_dir, d)

            for key, values in file_map.items():
                mol_id = extract_id(key)

                if mol_id in molecules_cleared:
                    continue

                freq_complete = False
                sp_complete = False

                in_files = values["in"]
                out_files = values["out"]

                # Check if this molecule has finished freq, sp
                # If there is no sp output file, or if the sp output file did
                # not complete, then we may proceed
                for out_file in out_files:
                    if "freq" in out_file:
                        freq_out = QCOutput(join(path, out_file))

                        if freq_out.data.get("completion", []):
                           freq_complete = True
                    elif "sp" in out_file:
                        sp_out = QCOutput(join(path, out_file))

                        if sp_out.data.get("completion", []):
                            sp_complete = True

                if freq_complete and not sp_complete:
                    # Check if there is already an sp input file
                    freq_in_file = None

                    for in_file in in_files:
                        if "freq" in in_file:
                            freq_in_file = in_file

                    if freq_in_file is None:
                        # We could parse output files to get previous input
                        # information, but we should try to keep all input
                        # files in the same directory
                        continue
                    else:
                        infile = join(path, key.replace(".mol", "") + ".in")
                        outfile = join(path, key.replace(".mol", "") + ".out")
                        qclogfile = join(path, key.replace(".mol", "") + ".qclog")

                        freq_in_file = QCInput.from_file(join(path,
                                                              freq_in_file))
                        mol = freq_in_file.molecule

                        fw = SinglePointFW(molecule=mol,
                                           name="{}: {}/{}".format(name_pre, d, mol_id),
                                           qchem_cmd="qchem -slurm",
                                           multimode="openmp",
                                           input_file=infile,
                                           output_file=outfile,
                                           qclog_file=qclogfile,
                                           max_cores=max_cores,
                                           sp_params=sp_params)

                        fws.append(fw)
                        molecules_cleared.append(mol_id)

        return Workflow(fws)
示例#33
0
    def get_reaction_set_workflow(self, name_pre="opt_freq_sp", max_cores=64,
                                  qchem_input_params=None,
                                  sp_params=None):
        """Generates a Fireworks Workflow to find the structures and energies of
        the reactants and products of a single reaction.

        Note: as written now, this function will only work if self.subdirs is
        True; that is, only if each reaction is in a separate subdirectory.
        Later additions could allow for some other means of specifying the
        separate reactions within a single directory.

        :param name_pre: str indicating the prefix which should be used for all
        Firework names
        :param max_cores: int specifying number of processes/threads that can
        be used for this workflow.
        :param qchem_input_params: dict
        :param sp_params: For OptFreqSPFW, single-point calculations can be
        treated differently from Opt and Freq. In this case, another dict
        for sp must be used.

        :return: Workflow
        """

        if not self.subdirs:
            raise RuntimeError("Cannot run get_reaction_set_workflow();"
                               "Need reactions components to be isolated in"
                               "different subdirectories.")

        fws = []

        dirs = [d for d in listdir(self.base_dir) if isdir(join(self.base_dir, d))]

        # Only set up a workflow if it is worthwhile (the reaction actually
        # proceeds as written, and all atoms add up)
        appropriate_dirs = self.check_appropriate_dirs(dirs)

        if self.db is not None:
            all_fws = self.db.collection.find()

            # Keep track of which molecules have already been run as jobs before
            molecules_registered = [extract_id(fw["task_label"])
                                    for fw in all_fws]
        else:
            molecules_registered = []

        for d in appropriate_dirs:
            path = join(self.base_dir, d)
            files = [f for f in listdir(path) if isfile(join(path, f)) and f.endswith(".mol")]
            rcts = [f for f in files if f.startswith(self.reactant_pre)]
            pros = [f for f in files if f.startswith(self.product_pre)]

            for i, rct in enumerate(rcts):
                mol_id = rct.rstrip(".mol").split("_")[-1]

                if mol_id in molecules_registered:
                    continue
                else:
                    molecules_registered.append(mol_id)

                mol = get_molecule(join(self.base_dir, d, rct))

                infile = join(path, self.reactant_pre + str(i) + ".in")
                outfile = join(path, self.reactant_pre + str(i) + ".out")

                fw = OptFreqSPFW(molecule=mol,
                                 name="{}: {}/{}".format(name_pre, d, rct),
                                 qchem_cmd="qchem -slurm",
                                 input_file=infile,
                                 output_file=outfile,
                                 qclog_file=join(path, self.reactant_pre + str(i) + ".qclog"),
                                 max_cores=max_cores,
                                 qchem_input_params=qchem_input_params,
                                 sp_params=sp_params,
                                 db_file=self.db_file)

                fws.append(fw)

            for i, pro in enumerate(pros):
                mol_id = pro.rstrip(".mol").split("_")[-1]

                if mol_id in molecules_registered:
                    continue
                else:
                    molecules_registered.append(mol_id)

                mol = get_molecule(join(self.base_dir, d, pro))

                infile = join(path, self.product_pre + str(i) + ".in")
                outfile = join(path, self.product_pre + str(i) + ".out")

                fw = OptFreqSPFW(molecule=mol,
                                 name="{}: {}/{}".format(name_pre, d, pro),
                                 qchem_cmd="qchem -slurm",
                                 input_file=infile,
                                 output_file=outfile,
                                 qclog_file=join(path, self.product_pre + str(i) + ".qclog"),
                                 max_cores=max_cores,
                                 qchem_input_params=qchem_input_params,
                                 sp_params=sp_params,
                                 db_file=self.db_file)

                fws.append(fw)

        return Workflow(fws)
示例#34
0
文件: nosier.py 项目: adiv2/tf2jan
def processfile(runfile):
    """Function to process testopia run yaml file and create workflows, add them 
    to run in fireworks"""
    with open(runfile, 'r') as f:
        run_details = yaml.load(f)
    testcases = run_details['test_run']['cases']
    print 'testcases:\n'
    print testcases
    testcasetype = type(testcases)
    print testcasetype
    run_id = int(run_details['test_run']['run_id'])
    print run_id
    environment_id = int(run_details['test_run']['environment_id'])
    print environment_id
    tcms = Testopia.from_config('/var/dt/tf/etc/testopia.cfg')
    environment_details = tcms.environment_get(environment_id)
    print environment_details
    rundetailsfromtcms = tcms.testrun_get(run_id)
    product_version = rundetailsfromtcms['product_version']
    build_id = rundetailsfromtcms['build_id']
    buildinfo = tcms.build_get(build_id)
    print buildinfo
    build_name = buildinfo['name']
    print "build name: " + build_name
    print "product_version " + product_version
    environment_name = environment_details['name']
    print environment_name
    environment_file = '/var/dt/tf/etc/environments/' + environment_name + '.py'
    environment_filepyc = environment_file + 'c'
    if os.path.isfile(environment_filepyc):
        print "environment pyc file is present, deleting it"
        os.remove(environment_filepyc)
    else:
        print "No cached environment pyc file found"
    print environment_file
    testsonfire = []
    fwsequence = {}
    fwkey = ''
    fwvalue = ''
    for testcase in testcases.keys():
        case_id = int(testcase)
        testcase_name = run_details['test_run']['cases'][testcase]['summary']
        argsf = [
            run_id, case_id, build_id, environment_id, environment_name,
            environment_file, testcase_name, product_version, build_name
        ]
        fw_test = Firework(PyTask(func='HookFW.runCase', args=argsf))
        print "argsf are:"
        print argsf
        testsonfire.append(fw_test)
        if fwvalue:
            fwsequence[fwvalue] = fw_test
            fwvalue = fw_test
        else:
            fwvalue = fw_test

    #To be run as last firework in the workflow, to compile logs for the entire set of testcases

    rebotcmd = "cd /var/dt/tf/logs/" + str(
        run_id
    ) + '; rebot -N "DTTF" -R */*.xml; ln -s report.html index.html; echo ok '
    fw_test = Firework(ScriptTask.from_str(rebotcmd))
    testsonfire.append(fw_test)
    fwsequence[fwvalue] = fw_test
    print "tests on fire:"
    print testsonfire
    print "test sequence:"
    print fwsequence
    workflow = Workflow(testsonfire, fwsequence)
    launchpad = LaunchPad()
    launchpad.add_wf(workflow)
示例#35
0
def get_wf_torsion_potential(molecule,
                             atom_indexes,
                             angles,
                             rem,
                             name="torsion_potential",
                             qchem_cmd=">>qchem_cmd<<",
                             multimode=">>multimode<<",
                             max_cores=">>max_cores<<",
                             db_file=None,
                             **kwargs):
    """
    Returns a workflow to the torsion potential for a molecule.

    Firework 1 : write QChem input for an optimization,
                 run Qchem,
                 parse output and insert into db,
                 pass relaxed molecule to fw_spec and on to fw2,

    Firework 2 : rotate molecule torsion to a particular angle,
                 write QChem input for an optimization,
                 run Qchem,
                 parse output and insert into db

    last Firework : add analysis code at some point

      Args:
            molecule (Molecule): Input molecule (needs to be a pymatgen molecule object)
            atom_indexes (list of ints): list of atom indexes in the torsion angle to be rotated (i.e. [6, 8, 9, 10])
            angles (list of floats): list of all the torsion angles to run
            rem (list of two rem dictionaries): a list with two rem dictionaries, one for the first optimization and
            one for the second constrained optimization
            name (str): Name for the workflow.
            qchem_cmd (str): Command to run QChem. Defaults to qchem.
            multimode (str): Parallelization scheme, either openmp or mpi.
            input_file (str): Name of the QChem input file. Defaults to mol.qin.
            output_file (str): Name of the QChem output file. Defaults to mol.qout.
            max_cores (int): Maximum number of cores to parallelize over. Defaults to 32.
            qchem_input_params (dict): Specify kwargs for instantiating the input set parameters.
                                       For example, if you want to change the DFT_rung, you should
                                       provide: {"DFT_rung": ...}. Defaults to None.
            db_file (str): Path to file specifying db credentials to place output parsing.
            **kwargs: Other kwargs that are passed to Firework.__init__.

    Returns: Workflow
    """
    fws = []

    # Optimize the starting molecule fw1
    fw1 = OptimizeFW(molecule=molecule,
                     name="initial_opt",
                     qchem_cmd=qchem_cmd,
                     multimode=multimode,
                     max_cores=max_cores,
                     db_file=db_file,
                     **kwargs)
    for idx_t, t in enumerate(fw1.tasks):
        if "WriteInputFromIOSet" in str(t):
            fw1.tasks[idx_t] = WriteCustomInput(molecule=molecule, rem=rem[0])
    fws.append(fw1)

    # Loop to generate all the different rotated molecule optimizations
    for angle in angles:
        rot_opt_fw = OptimizeFW(name=("opt_" + str(int(angle))),
                                qchem_cmd=qchem_cmd,
                                multimode=multimode,
                                max_cores=max_cores,
                                db_file=db_file,
                                parents=fw1,
                                **kwargs)
        rot_task = RotateTorsion(atom_indexes=atom_indexes, angle=angle)
        rot_opt_fw.tasks.insert(0, rot_task)
        # define opt section
        opt_line = "tors {a} {b} {c} {d} {ang}".format(a=atom_indexes[0],
                                                       b=atom_indexes[1],
                                                       c=atom_indexes[2],
                                                       d=atom_indexes[3],
                                                       ang=angle)
        opt = {"CONSTRAINT": [opt_line]}
        for idx_t, t in enumerate(rot_opt_fw.tasks):
            if "WriteInputFromIOSet" in str(t):
                rot_opt_fw.tasks[idx_t] = WriteCustomInput(rem=rem[1], opt=opt)
        fws.append(rot_opt_fw)

    wfname = "{}:{}".format(molecule.composition.reduced_formula, name)

    return Workflow(fws, name=wfname)
示例#36
0
def get_wf_slab(slab,
                include_bulk_opt=False,
                adsorbates=None,
                ads_structures_params=None,
                vasp_cmd="vasp",
                db_file=None,
                add_molecules_in_box=False):
    """
    Gets a workflow corresponding to a slab calculation along with optional
    adsorbate calcs and precursor oriented unit cell optimization

    Args:
        slabs (list of Slabs or Structures): slabs to calculate
        include_bulk_opt (bool): whether to include bulk optimization,
            this flag sets the slab fireworks to be TransmuterFWs based
            on bulk optimization of oriented unit cells
        adsorbates ([Molecule]): list of molecules to place as adsorbates
        ads_structures_params (dict): parameters to be supplied as
            kwargs to AdsorbateSiteFinder.generate_adsorption_structures
        add_molecules_in_box (boolean): flag to add calculation of
            adsorbate molecule energies to the workflow
        db_file (string): path to database file
        vasp_cmd (string): vasp command

    Returns:
        Workflow
    """
    fws, parents = [], []

    if adsorbates is None:
        adsorbates = []

    if ads_structures_params is None:
        ads_structures_params = {}

    # Add bulk opt firework if specified
    if include_bulk_opt:
        oriented_bulk = slab.oriented_unit_cell
        vis = MPSurfaceSet(oriented_bulk, bulk=True)
        fws.append(
            OptimizeFW(structure=oriented_bulk,
                       vasp_input_set=vis,
                       vasp_cmd=vasp_cmd,
                       db_file=db_file))
        parents = fws[-1]

    name = slab.composition.reduced_formula
    if getattr(slab, "miller_index", None):
        name += "_{}".format(slab.miller_index)
    # Create slab fw and add it to list of fws
    slab_fw = get_slab_fw(slab,
                          include_bulk_opt,
                          db_file=db_file,
                          vasp_cmd=vasp_cmd,
                          parents=parents,
                          name="{} slab optimization".format(name))
    fws.append(slab_fw)

    for adsorbate in adsorbates:
        ads_slabs = AdsorbateSiteFinder(slab).generate_adsorption_structures(
            adsorbate, **ads_structures_params)
        for n, ads_slab in enumerate(ads_slabs):
            # Create adsorbate fw
            ads_name = "{}-{} adsorbate optimization {}".format(
                adsorbate.composition.formula, name, n)
            adsorbate_fw = get_slab_fw(ads_slab,
                                       include_bulk_opt,
                                       db_file=db_file,
                                       vasp_cmd=vasp_cmd,
                                       parents=parents,
                                       name=ads_name)
            fws.append(adsorbate_fw)

    if isinstance(slab, Slab):
        name = "{}_{} slab workflow".format(
            slab.composition.reduced_composition, slab.miller_index)
    else:
        name = "{} slab workflow".format(slab.composition.reduced_composition)

    wf = Workflow(fws, name=name)

    # Add optional molecules workflow
    if add_molecules_in_box:
        molecule_wf = get_wf_molecules(adsorbates,
                                       db_file=db_file,
                                       vasp_cmd=vasp_cmd)
        wf.append_wf(molecule_wf)

    return wf
示例#37
0
文件: raman.py 项目: FilipchukB/P1
def get_wf_raman_spectra(structure,
                         modes=None,
                         step_size=0.005,
                         vasp_cmd="vasp",
                         db_file=None):
    """
    Raman susceptibility tensor workflow:
        Calculation of phonon normal modes followed by the computation of dielectric tensor for
        structures displaced along the normal modes. Finally the dielectric tensors corresponding
        to each mode are used to compute the Raman susceptibility tensor using finite difference
        (central difference scheme).

    Args:
        structure (Structure): Input structure.
        modes (tuple/list): list of modes for which the Raman spectra need to be calculated.
            The default is to use all the 3N modes.
        step_size (float): site displacement along the normal mode in Angstroms. Used to compute
            the finite difference(central difference scheme) first derivative of the dielectric
            constant along the normal modes.
        vasp_cmd (str): vasp command to run.
        db_file (str): path to file containing the database credentials.

    Returns:
        Workflow
    """
    modes = modes or range(3 * len(structure))
    vis = MPRelaxSet(structure, force_gamma=True)
    # displacements in + and - direction along the normal mode so that the central difference scheme
    # can be used for the evaluation of Raman tensor (derivative of epsilon wrt displacement)
    displacements = [-step_size, step_size]

    fws = []

    # Structure optimization
    fw_opt = OptimizeFW(structure=structure,
                        vasp_input_set=vis,
                        ediffg=None,
                        vasp_cmd=vasp_cmd,
                        db_file=db_file)
    fws.append(fw_opt)

    # Static run: compute the normal modes and pass
    fw_leps = DFPTFW(structure=structure,
                     vasp_cmd=vasp_cmd,
                     db_file=db_file,
                     parents=fw_opt,
                     pass_nm_results=True)

    fws.append(fw_leps)

    # Static runs to compute epsilon for each mode and displacement along that mode.
    fws_nm_disp = []
    for mode in modes:
        for disp in displacements:
            fws_nm_disp.append(
                RamanFW(mode,
                        disp,
                        structure=structure,
                        parents=fw_leps,
                        vasp_cmd=vasp_cmd,
                        db_file=db_file))
    fws.extend(fws_nm_disp)

    # Compute the Raman susceptibility tensor
    fw_analysis = Firework(RamanTensorToDb(db_file=db_file),
                           parents=fws[:],
                           name="{}-{}".format(
                               structure.composition.reduced_formula,
                               "raman analysis"))
    fws.append(fw_analysis)

    wfname = "{}:{}".format(structure.composition.reduced_formula,
                            "raman spectra")
    return Workflow(fws, name=wfname)
示例#38
0
def end_point_wf(mp_id, pair_index, image_num, moving_cation, col):
    """

    :param col: collection name
    :param mp_id: mp_id in the db_scripts
    :param pair_index: pair_index in the db_scripts
    :param image_num: index number of the image on the path from the db_scripts
    :param moving_cation: pymatgen.Element object, represeting the moving cation, e.g. Element('Mg')
    :return:
    """
    MyDB.db_access().connect()
    collection = MyDB.db_access().collection(col)

    doc = collection.find_one({'mp-id': mp_id, 'pair_index': pair_index})

    # Calculation that is already successful
    if 'MEP_energy' in doc.keys() and ("image_{}".format(image_num)
                                       in doc['MEP_energy']):
        if doc['MEP_energy']['image_{}'.format(
                image_num)]['status'] == 'success':
            return
        else:
            # Calculation that has halted halfway due to errors
            if 'CONTCAR_struct' in doc['MEP_energy']['image_{}'.format(
                    image_num)].keys():
                struct = Structure.from_dict(doc["MEP_energy"][
                    "image_{}".format(image_num)]["CONTCAR_struct"])
            # Calculation that has not been run before
            else:
                struct = Structure.from_dict(doc['gamma_structure'])
                cation_site = PeriodicSite.from_dict(doc['path'][image_num])
                struct.insert(0,
                              cation_site.specie,
                              cation_site.frac_coords,
                              properties=doc['path'][image_num]['properties'])
    else:
        struct = Structure.from_dict(doc['gamma_structure'])
        cation_site = PeriodicSite.from_dict(doc['path'][image_num])
        struct.insert(0,
                      cation_site.specie,
                      cation_site.frac_coords,
                      properties=doc['path'][image_num]['properties'])

    task1 = WritePointRunInput(structure=struct.as_dict(),
                               moving_cation=moving_cation.as_dict())
    task2 = PointCustodianRun(handlers='all')
    task3 = PointRunAnalyze()

    fw = Firework(
        [task1, task2, task3],
        spec={
            "mp_id": mp_id,
            "pair_index": pair_index,
            "image_num": image_num,
            "collection": col,
            "_queueadapter": {
                'nnodes': 128,
                'walltime': '10:00:00',
                'queue': 'Q.JCESR',
                'job_name': "{}_{}".format(doc["pretty_formula"], image_num)
            }
        })

    wf_list = [fw]
    wf_depend = {}
    wf = Workflow(wf_list, wf_depend)

    MyDB.db_access().close()
    return wf
示例#39
0
def get_wf_elastic_constant(structure,
                            strain_states=None,
                            stencils=None,
                            db_file=None,
                            conventional=False,
                            order=2,
                            vasp_input_set=None,
                            analysis=True,
                            sym_reduce=False,
                            tag='elastic',
                            copy_vasp_outputs=False,
                            **kwargs):
    """
    Returns a workflow to calculate elastic constants.

    Firework 1 : write vasp input set for structural relaxation,
                 run vasp,
                 pass run location,
                 database insertion.

    Firework 2 - number of total deformations: Static runs on the deformed structures

    last Firework : Analyze Stress/Strain data and fit the elastic tensor

    Args:
        structure (Structure): input structure to be optimized and run.
        strain_states (list of Voigt-notation strains): list of ratios of nonzero elements
            of Voigt-notation strain, e. g. [(1, 0, 0, 0, 0, 0), (0, 1, 0, 0, 0, 0), etc.].
        stencils (list of floats, or list of list of floats): values of strain to multiply
            by for each strain state, i. e. stencil for the perturbation along the strain
            state direction, e. g. [-0.01, -0.005, 0.005, 0.01].  If a list of lists,
            stencils must correspond to each strain state provided.
        db_file (str): path to file containing the database credentials.
        conventional (bool): flag to convert input structure to conventional structure,
            defaults to False.
        order (int): order of the tensor expansion to be determined.  Defaults to 2 and
            currently supports up to 3.
        vasp_input_set (VaspInputSet): vasp input set to be used.  Defaults to static
            set with ionic relaxation parameters set.  Take care if replacing this,
            default ensures that ionic relaxation is done and that stress is calculated
            for each vasp run.
        analysis (bool): flag to indicate whether analysis task should be added
            and stresses and strains passed to that task
        sym_reduce (bool): Whether or not to apply symmetry reductions
        tag (str):
        copy_vasp_outputs (bool): whether or not to copy previous vasp outputs.
        kwargs (keyword arguments): additional kwargs to be passed to get_wf_deformations

    Returns:
        Workflow
    """
    # Convert to conventional if specified
    if conventional:
        structure = SpacegroupAnalyzer(
            structure).get_conventional_standard_structure()

    uis_elastic = {
        "IBRION": 2,
        "NSW": 99,
        "ISIF": 2,
        "ISTART": 1,
        "PREC": "High"
    }
    vis = vasp_input_set or MPStaticSet(structure,
                                        user_incar_settings=uis_elastic)
    strains = []
    if strain_states is None:
        strain_states = get_default_strain_states(order)
    if stencils is None:
        stencils = [np.linspace(-0.01, 0.01, 5 +
                                (order - 2) * 2)] * len(strain_states)
    if np.array(stencils).ndim == 1:
        stencils = [stencils] * len(strain_states)
    for state, stencil in zip(strain_states, stencils):
        strains.extend(
            [Strain.from_voigt(s * np.array(state)) for s in stencil])

    # Remove zero strains
    strains = [strain for strain in strains if not (abs(strain) < 1e-10).all()]
    vstrains = [strain.voigt for strain in strains]
    if np.linalg.matrix_rank(vstrains) < 6:
        # TODO: check for sufficiency of input for nth order
        raise ValueError(
            "Strain list is insufficient to fit an elastic tensor")

    deformations = [s.deformation_matrix for s in strains]

    if sym_reduce:
        deformations = symmetry_reduce(deformations, structure)

    wf_elastic = get_wf_deformations(structure,
                                     deformations,
                                     tag=tag,
                                     db_file=db_file,
                                     vasp_input_set=vis,
                                     copy_vasp_outputs=copy_vasp_outputs,
                                     **kwargs)
    if analysis:
        defo_fws_and_tasks = get_fws_and_tasks(
            wf_elastic,
            fw_name_constraint="deformation",
            task_name_constraint="Transmuted")
        for idx_fw, idx_t in defo_fws_and_tasks:
            defo = \
            wf_elastic.fws[idx_fw].tasks[idx_t]['transformation_params'][0][
                'deformation']
            pass_dict = {
                'strain': Deformation(defo).green_lagrange_strain.tolist(),
                'stress': '>>output.ionic_steps.-1.stress',
                'deformation_matrix': defo
            }
            if sym_reduce:
                pass_dict.update(
                    {'symmops': get_tkd_value(deformations, defo)})

            mod_spec_key = "deformation_tasks->{}".format(idx_fw)
            pass_task = pass_vasp_result(pass_dict=pass_dict,
                                         mod_spec_key=mod_spec_key)
            wf_elastic.fws[idx_fw].tasks.append(pass_task)

        fw_analysis = Firework(ElasticTensorToDb(structure=structure,
                                                 db_file=db_file,
                                                 order=order,
                                                 fw_spec_field='tags'),
                               name="Analyze Elastic Data",
                               spec={"_allow_fizzled_parents": True})
        wf_elastic.append_wf(Workflow.from_Firework(fw_analysis),
                             wf_elastic.leaf_fw_ids)

    wf_elastic.name = "{}:{}".format(structure.composition.reduced_formula,
                                     "elastic constants")

    return wf_elastic
示例#40
0
    def get_wf(self,
               scan=False,
               perform_bader=True,
               num_orderings_hard_limit=16,
               c=None):
        """
        Retrieve the FireWorks workflow.

        Args:
            scan: if True, use the SCAN functional instead of GGA+U, since
        the SCAN functional has shown to have improved performance for
        magnetic systems in some cases
            perform_bader: if True, make sure the "bader" binary is in your
        path, will use Bader analysis to calculate atom-projected magnetic
        moments
            num_orderings_hard_limit: will make sure total number of magnetic
        orderings does not exceed this number even if there are extra orderings
        of equivalent symmetry
            c: additional config dict (as used elsewhere in atomate)

        Returns: FireWorks Workflow

        """

        c_defaults = {"VASP_CMD": VASP_CMD, "DB_FILE": DB_FILE}
        c = c or {}
        for k, v in c_defaults:
            if k not in c:
                c[k] = v

        fws = []
        analysis_parents = []

        # trim total number of orderings (useful in high-throughput context)
        # this is somewhat course, better to reduce num_orderings kwarg and/or
        # change enumeration strategies
        ordered_structures = self.ordered_structures
        ordered_structure_origins = self.ordered_structure_origins

        def _add_metadata(structure):
            """
            For book-keeping, store useful metadata with the Structure
            object for later database ingestion including workflow
            version and a UUID for easier querying of all tasks generated
            from the workflow.

            Args:
                structure: Structure

            Returns: TransformedStructure
            """
            # this could be further improved by storing full transformation
            # history, but would require an improved transformation pipeline
            return TransformedStructure(
                structure, other_parameters={"wf_meta": self.wf_meta})

        ordered_structures = [
            _add_metadata(struct) for struct in ordered_structures
        ]

        if (num_orderings_hard_limit
                and len(self.ordered_structures) > num_orderings_hard_limit):
            ordered_structures = self.ordered_structures[
                0:num_orderings_hard_limit]
            ordered_structure_origins = self.ordered_structure_origins[
                0:num_orderings_hard_limit]
            logger.warning("Number of ordered structures exceeds hard limit, "
                           "removing last {} structures.".format(
                               len(self.ordered_structures) -
                               len(ordered_structures)))
            # always make sure input structure is included
            if self.input_index and self.input_index > num_orderings_hard_limit:
                ordered_structures.append(
                    self.ordered_structures[self.input_index])
                ordered_structure_origins.append(
                    self.ordered_structure_origins[self.input_index])

        # default incar settings
        user_incar_settings = {"ISYM": 0, "LASPH": True, "EDIFFG": -0.05}
        if scan:
            # currently, using SCAN relaxation as a static calculation also
            # since it is typically high quality enough, but want to make
            # sure we are also writing the AECCAR* files
            user_incar_settings.update({"LAECHG": True})
        user_incar_settings.update(c.get("user_incar_settings", {}))
        c["user_incar_settings"] = user_incar_settings

        for idx, ordered_structure in enumerate(ordered_structures):

            analyzer = CollinearMagneticStructureAnalyzer(ordered_structure)

            name = " ordering {} {} -".format(idx, analyzer.ordering.value)

            if not scan:

                vis = MPRelaxSet(ordered_structure,
                                 user_incar_settings=user_incar_settings)

                # relax
                fws.append(
                    OptimizeFW(
                        ordered_structure,
                        vasp_input_set=vis,
                        vasp_cmd=c["VASP_CMD"],
                        db_file=c["DB_FILE"],
                        max_force_threshold=0.05,
                        half_kpts_first_relax=False,
                        name=name + " optimize",
                    ))

                # static
                fws.append(
                    StaticFW(
                        ordered_structure,
                        vasp_cmd=c["VASP_CMD"],
                        db_file=c["DB_FILE"],
                        name=name + " static",
                        prev_calc_loc=True,
                        parents=fws[-1],
                        vasptodb_kwargs={
                            'parse_chgcar': True,
                            'parse_aeccar': True
                        },
                        allow_fizzled_parents=
                        True  # so a failed optimize doesn't crash workflow
                    ))

            else:

                # wf_scan_opt is just a single FireWork so can append it directly
                scan_fws = wf_scan_opt(ordered_structure, c=c).fws
                # change name for consistency with non-SCAN
                new_name = scan_fws[0].name.replace("structure optimization",
                                                    name + " optimize")
                scan_fws[0].name = new_name
                scan_fws[0].tasks[-1]["additional_fields"][
                    "task_label"] = new_name
                fws += scan_fws

            analysis_parents.append(fws[-1])

        fw_analysis = Firework(
            MagneticOrderingsToDB(
                db_file=c["DB_FILE"],
                wf_uuid=self.uuid,
                parent_structure=self.sanitized_structure,
                origins=ordered_structure_origins,
                input_index=self.input_index,
                perform_bader=perform_bader,
                scan=scan,
            ),
            name="Magnetic Orderings Analysis",
            parents=analysis_parents,
            spec={"_allow_fizzled_parents": True},
        )
        fws.append(fw_analysis)

        formula = self.sanitized_structure.composition.reduced_formula
        wf_name = "{} - magnetic orderings".format(formula)
        if scan:
            wf_name += " - SCAN"
        wf = Workflow(fws, name=wf_name)

        wf = add_additional_fields_to_taskdocs(wf, {"wf_meta": self.wf_meta})

        tag = "magnetic_orderings group: >>{}<<".format(self.uuid)
        wf = add_tags(wf, [tag, ordered_structure_origins])

        return wf
示例#41
0
 def __init__(self, *args, **kwargs):
     Workflow.__init__(self, *args, **kwargs)
示例#42
0
    def run_task(self, fw_spec):
        from mpmorph.fireworks import powerups
        from mpmorph.fireworks.core import MDFW

        # Load Structure from Poscar
        _poscar = Poscar.from_file("CONTCAR.gz")
        structure = _poscar.structure

        # Get convergence parameters from spec
        converge_params = self["converge_params"]
        avg_fraction = converge_params.get("avg_fraction", 0.5)
        convergence_vars = dict(converge_params["converge_type"])
        if "ionic" not in convergence_vars.keys():
            convergence_vars["ionic"] = 0.0005
        rescale_params = self.get("rescale_params", {})

        # Load Data from OUTCAR
        search_keys = [
            'external', 'kinetic energy EKIN', '% ion-electron', 'ETOTAL'
        ]
        key_map = {
            'density': 'external',
            'kinetic energy': 'kinetic energy EKIN',
            'ionic': '% ion-electron',
            'total energy': 'ETOTAL'
        }
        outcar_data = md_data.get_MD_data("./OUTCAR.gz",
                                          search_keys=search_keys)

        # Check for convergence
        converged = {}
        _index = search_keys.index(key_map["density"])
        _data = np.transpose(outcar_data)[_index].copy()
        pressure = np.mean(_data[int(avg_fraction * (len(_data) - 1)):])
        if "density" in convergence_vars.keys():
            if np.abs(pressure) >= convergence_vars["density"]:
                converged["density"] = False
            else:
                converged["density"] = True

        if "kinetic energy" in convergence_vars.keys():
            _index = search_keys.index(key_map["kinetic energy"])
            energy = np.transpose(outcar_data)[_index].copy()
            norm_energy = (energy / structure.num_sites) / np.mean(
                energy / structure.num_sites) - 1
            if np.abs(np.mean(norm_energy[-500:]) - np.mean(norm_energy)
                      ) > convergence_vars["kinetic energy"]:
                converged["kinetic energy"] = False
            else:
                converged["kinetic energy"] = True

        _index = search_keys.index(key_map["ionic"])
        energy = np.transpose(outcar_data)[_index].copy()
        norm_energies = energy / structure.num_sites
        mu, std = stats.norm.fit(norm_energies)
        mu1, std1 = stats.norm.fit(norm_energies[0:int(len(norm_energies) /
                                                       2)])
        mu2, std2 = stats.norm.fit(norm_energies[int(len(norm_energies) / 2):])
        if np.abs((mu2 - mu1) / mu) < convergence_vars["ionic"]:
            converged["ionic"] = True
        else:
            converged["ionic"] = False

        # Spawn Additional Fireworks
        if not all([item[1] for item in converged.items()]):
            density_spawn_count = converge_params["density_spawn_count"]
            energy_spawn_count = converge_params["energy_spawn_count"]
            max_rescales = converge_params["max_rescales"]
            max_energy_runs = 3  # Set max energy convergence runs to default of 3

            run_specs = self["run_specs"]
            md_params = self["md_params"]
            optional_params = self.get("optional_fw_params", {})

            tag_id = self.get("tag_id", "")

            if density_spawn_count >= max_rescales:
                return FWAction(defuse_children=True)
            elif energy_spawn_count >= max_energy_runs:
                # Too many energy rescales... Just continue with the production runs
                return FWAction(stored_data={
                    'pressure': pressure,
                    'energy': mu,
                    'density_calculated': True
                })
            elif not converged.get("density", True):
                rescale_args = {
                    "initial_pressure": pressure * 1000,
                    "initial_temperature": 1,
                    "beta": 0.0000005
                }
                rescale_args = recursive_update(rescale_args, rescale_params)

                # Spawn fw
                fw = MDFW(
                    structure,
                    name=f'density_run_{density_spawn_count + 1}-{tag_id}',
                    previous_structure=False,
                    **run_specs,
                    **md_params,
                    **optional_params)
                converge_params["density_spawn_count"] += 1
                _spawner_args = {
                    "converge_params": converge_params,
                    "rescale_params": rescale_params,
                    "run_specs": run_specs,
                    "md_params": md_params,
                    "optional_fw_params": optional_params,
                    "tag_id": tag_id
                }
                fw = powerups.add_rescale_volume(fw, **rescale_args)
                fw = powerups.add_pass_pv(fw)
                fw = powerups.add_converge_task(fw, **_spawner_args)
                wf = Workflow([fw])
                return FWAction(detours=wf,
                                stored_data={
                                    'pressure': pressure,
                                    'energy': mu
                                })
            else:
                fw = MDFW(structure,
                          name=f'energy_run_{energy_spawn_count + 1}-{tag_id}',
                          previous_structure=False,
                          **run_specs,
                          **md_params,
                          **optional_params)
                converge_params["energy_spawn_count"] += 1
                _spawner_args = {
                    "converge_params": converge_params,
                    "rescale_params": rescale_params,
                    "run_specs": run_specs,
                    "md_params": md_params,
                    "optional_fw_params": optional_params,
                    "tag_id": tag_id
                }
                fw = powerups.add_pass_pv(fw)
                fw = powerups.add_converge_task(fw, **_spawner_args)
                wf = Workflow([fw])
                return FWAction(detours=wf,
                                stored_data={
                                    'pressure': pressure,
                                    'energy': mu
                                })
        else:
            return FWAction(stored_data={
                'pressure': pressure,
                'energy': mu,
                'density_calculated': True
            })
示例#43
0
def get_wf_slab(slab, include_bulk_opt=False, adsorbates=None,
                ads_structures_params=None, vasp_cmd="vasp",
                db_file=None, add_molecules_in_box=False):
    """
    Gets a workflow corresponding to a slab calculation along with optional
    adsorbate calcs and precursor oriented unit cell optimization

    Args:
        slabs (list of Slabs or Structures): slabs to calculate
        include_bulk_opt (bool): whether to include bulk optimization,
            this flag sets the slab fireworks to be TransmuterFWs based
            on bulk optimization of oriented unit cells
        adsorbates ([Molecule]): list of molecules to place as adsorbates
        ads_structures_params (dict): parameters to be supplied as
            kwargs to AdsorbateSiteFinder.generate_adsorption_structures
        add_molecules_in_box (boolean): flag to add calculation of
            adsorbate molecule energies to the workflow
        db_file (string): path to database file
        vasp_cmd (string): vasp command

    Returns:
        Workflow
    """
    fws, parents = [], []

    if adsorbates is None:
        adsorbates = []

    if ads_structures_params is None:
        ads_structures_params = {}

    # Add bulk opt firework if specified
    if include_bulk_opt:
        oriented_bulk = slab.oriented_unit_cell
        vis = MPSurfaceSet(oriented_bulk, bulk=True)
        fws.append(OptimizeFW(structure=oriented_bulk, vasp_input_set=vis,
                              vasp_cmd=vasp_cmd, db_file=db_file))
        parents = fws[-1]

    name = slab.composition.reduced_formula
    if getattr(slab, "miller_index", None):
        name += "_{}".format(slab.miller_index)
    # Create slab fw and add it to list of fws
    slab_fw = get_slab_fw(slab, include_bulk_opt, db_file=db_file,
                          vasp_cmd=vasp_cmd, parents=parents,
                          name="{} slab optimization".format(name))
    fws.append(slab_fw)

    for adsorbate in adsorbates:
        ads_slabs = AdsorbateSiteFinder(slab).generate_adsorption_structures(
            adsorbate, **ads_structures_params)
        for n, ads_slab in enumerate(ads_slabs):
            # Create adsorbate fw
            ads_name = "{}-{} adsorbate optimization {}".format(
                adsorbate.composition.formula, name, n)
            adsorbate_fw = get_slab_fw(
                ads_slab, include_bulk_opt, db_file=db_file, vasp_cmd=vasp_cmd,
                parents=parents, name=ads_name)
            fws.append(adsorbate_fw)

    if isinstance(slab, Slab):
        name = "{}_{} slab workflow".format(
            slab.composition.reduced_composition, slab.miller_index)
    else:
        name = "{} slab workflow".format(slab.composition.reduced_composition)

    wf = Workflow(fws, name=name)

    # Add optional molecules workflow
    if add_molecules_in_box:
        molecule_wf = get_wf_molecules(adsorbates, db_file=db_file,
                                       vasp_cmd=vasp_cmd)
        wf.append_wf(molecule_wf)

    return wf
示例#44
0
from fireworks import LaunchPad, Firework, Workflow
from fireworks.core.rocket_launcher import launch_rocket
from fireworks.examples.custom_firetasks.hello_world.hello_world_task import HelloTask

if __name__ == "__main__":
    # initialize the database
    lp = LaunchPad()  # you might need to modify the connection settings here
    # lp.reset()  # uncomment this line and set the appropriate parameters if you want to reset the database

    # create the workflow and store it in the database
    my_fw = Firework([HelloTask()])
    my_wflow = Workflow.from_Firework(my_fw)
    lp.add_wf(my_wflow)

    # run the workflow
    launch_rocket(lp)
示例#45
0
class VaspWorkflow():
    """
    A VaspWorkflow encapsulates multiple VaspFirework objects into a Single Workflow.  
    If the kwarg "dependency" is not set, it will create a Sequential Workflow where the next 
    Firework in the Workflow will not start before the currently running Firework in the 
    Workflow completes.

    Parameters:
        -args (obj):        List of VaspFirework objects
        -deps_dict {dict}:  Specifies the dependency of the VaspInputInterface objects given.
                            If no dependency is given, Firworks are assumed to be
                            sequentially dependent.
        -name (str):        Name to be given to the Workflow


    Example:
        VaspWorkflow(FW1, FW2, FW3, FW4, deps_dict={FW1: [FW2, FW3], FW2: [FW4], FW3: [FW4]}, name='Example WF')
        
        This will create a Workflow containing the 4 given VaspFirework objects
        with a Workflow name of 'Example WF' and the given dependencies.
        Dependency Dictionary Explanation:
            In the above example, FW2 and FW3 will not start before FW1 is complete.
            Likewise, FW4 depends on the completion of FW2 and FW3 before starting.
    """

    def __init__(self, *args, **kwargs):
        '''
        :param args:       (VaspFirework objects) objects to create Workflow from.  No limit
                           on the amount of VaspInputInterface objects to be given.  Entered as just
                           comma separated objects passed to class.
        :param deps_dict:  (dict) specifies the dependency of the VaspInputInterface objects given.  
                           If no dependency is given, Firworks are assumed to be 
                           sequentially dependent.
        :param name        (str) Name given to Workflow
        '''
        self.fws = []
        self.name = kwargs.get('name', 'Sequential WF')
        self.deps_dict = kwargs.get('deps_dict', {})
        self.dependency = {}
        if self.deps_dict:
            for i in self.deps_dict.keys():
                fw_deps = []
                for j in self.deps_dict[i]:
                    fw_deps.append(j.Firework)                    
                self.dependency[i.Firework]=fw_deps
        self.deps = True if self.dependency else False
        for id, fw_task in enumerate(args):
            self.fws.append(fw_task.Firework)
            if not self.deps and id != 0:
                self.dependency[self.fws[id-1]]=[fw_task.Firework]
        self.wf = Workflow(self.fws, self.dependency, name=self.name)
        # Try to establish connection with Launchpad
        try:
            self.LaunchPad=LaunchPad.from_file(os.path.join(os.environ["HOME"], ".fireworks", "my_launchpad.yaml"))
        except:
            self.LaunchPad = None


    def add_fw(self, fw_task, deps=None):
        self.fws.append(fw_task.Firework)
        if deps:
            for i in deps.keys():
                fw_deps = []
                for j in deps[i]:
                    fw_deps.append(j.Firework)
                self.dependency[i.Firework]=fw_deps
        else:
            id = len(self.fws) - 2
            self.dependency[self.fws[id]]=[fw_task.Firework]
        self.wf=Workflow(self.fws, self.dependency, name=self.name)


    def to_file(self,filename):
        self.wf.to_file(filename)


    def add_wf_to_launchpad(self):
        if self.LaunchPad:
            self.LaunchPad.add_wf(self.Workflow)
        else:
            print("No connection to LaunchPad. \n"
                "Use 'to_file(<filename>)' to write a yaml file\n"
                "to manually add Workflow to LaunchPad later.\n")
示例#46
0
文件: core.py 项目: FilipchukB/P1
def get_wf_eels(absorbing_atom,
                structure=None,
                feff_input_set="pymatgen.io.feff.sets.MPELNESSet",
                edge="K",
                radius=10.,
                beam_energy=100,
                beam_direction=None,
                collection_angle=1,
                convergence_angle=1,
                user_eels_settings=None,
                user_tag_settings=None,
                feff_cmd="feff",
                db_file=None,
                metadata=None,
                use_primitive=False):
    """
    Returns FEFF ELNES/EXELFS spectroscopy workflow.

    Args:
        absorbing_atom (str): absorbing atom symbol
        structure (Structure): input structure. If None and mp_id is provided, the corresponding
            structure will be fetched from the Materials Project db.
        feff_input_set (str or FeffDictSet subclass): The inputset for setting params. If string
                then either the entire path to the class or spectrum type must be provided
                e.g. "pymatgen.io.feff.sets.MPELNESSet" or "ELNES"
        edge (str): absorption edge. K, L1, L2, L3
        radius (float): cluster radius in angstroms. Ignored for reciprocal space calculations
        beam_energy (float): the incident beam energy in keV
        beam_direction (list): incident beam direction. Default is none ==> the spectrum will be
            averaged over all directions.
        collection_angle (float): collection angle in mrad
        convergence_angle (float): convergence angle in mrad
        user_eels_settings (dict): override default eels settings.
        user_tag_settings (dict): override other general feff default tag settings.
        feff_cmd (str): path to the feff binary
        db_file (str):  path to the db file.
        metadata (dict): meta data
        use_primitive (bool): convert the structure to primitive form. This helps to
            reduce the number of fireworks in the workflow if the absorbing atoms is
            specified by its atomic symbol.

    Returns:
        Workflow
    """
    if use_primitive:
        structure = structure.get_primitive_structure()

    # get the absorbing atom site index/indices
    ab_atom_indices = get_absorbing_atom_indices(structure, absorbing_atom)

    override_default_feff_params = {"user_tag_settings": user_tag_settings}

    spectrum_type = get_feff_input_set_obj(feff_input_set, ab_atom_indices[0],
                                           structure).__class__.__name__[2:-3]

    # add firework for each absorbing atom site index
    fws = []
    for ab_idx in ab_atom_indices:
        fw_metadata = dict(metadata) if metadata else {}
        fw_metadata["absorbing_atom_index"] = ab_idx
        fw_name = "{}-{}-{}".format(spectrum_type, edge, ab_idx)
        fws.append(
            EELSFW(ab_idx,
                   structure,
                   feff_input_set=feff_input_set,
                   edge=edge,
                   radius=radius,
                   beam_energy=beam_energy,
                   beam_direction=beam_direction,
                   collection_angle=collection_angle,
                   convergence_angle=convergence_angle,
                   user_eels_settings=user_eels_settings,
                   feff_cmd=feff_cmd,
                   db_file=db_file,
                   metadata=fw_metadata,
                   name=fw_name,
                   override_default_feff_params=override_default_feff_params))

    wfname = "{}:{}:{} edge".format(structure.composition.reduced_formula,
                                    "{} spectroscopy".format(spectrum_type),
                                    edge)
    wf_metadata = dict(metadata) if metadata else {}
    wf_metadata["absorbing_atom_indices"] = list(ab_atom_indices)

    return Workflow(fws, name=wfname, metadata=wf_metadata)
示例#47
0
def get_wf_from_spec_dict(structure, wfspec):
    """
    Load a WF from a structure and a spec dict. This allows simple
    custom workflows to be constructed quickly via a YAML file.

    Args:
        structure (Structure): An input structure object.
        wfspec (dict): A dict specifying workflow. A sample of the dict in
            YAML format for the usual MP workflow is given as follows:

            ```
            fireworks:
            - fw: matmethods.vasp.fireworks.core.OptimizeFW
            - fw: matmethods.vasp.fireworks.core.StaticFW
              params:
                parents: 0
            - fw: matmethods.vasp.fireworks.core.NonSCFUniformFW
              params:
                parents: 1
            - fw: matmethods.vasp.fireworks.core.NonSCFLineFW
              params:
                parents: 1
            common_params:
              db_file: db.json
              $vasp_cmd: $HOME/opt/vasp
            name: bandstructure
            ```

            The `fireworks` key is a list of Fireworks; it is expected that
            all such Fireworks have "structure" as the first argument and
            other optional arguments following that. Each Firework is specified
            via "fw": <explicit path>.

            You can pass arguments into the constructor using the special
            keyword `params`, which is a dict. Any param starting with a $ will
            be expanded using environment variables.If multiple fireworks share
            the same `params`, you can use `common_params` to specify a common
            set of arguments that are passed to all fireworks. Local params
            take precedent over global params.

            Another special keyword is `parents`, which provides
            the *indices* of the parents of that particular Firework in the
            list. This allows you to link the Fireworks into a logical
            workflow.

            Finally, `name` is used to set the Workflow name
            (structure formula + name) which can be helpful in record keeping.

    Returns:
        Workflow
    """

    dec = MontyDecoder()

    def process_params(d):
        decoded = {}
        for k, v in d.items():
            if k.startswith("$"):
                if isinstance(v, list):
                    v = [os.path.expandvars(i) for i in v]
                elif isinstance(v, dict):
                    v = {k2: os.path.expandvars(v2) for k2, v2 in v.items()}
                else:
                    v = os.path.expandvars(v)
            decoded[k.strip("$")] = dec.process_decoded(v)
        return decoded

    fws = []
    common_params = process_params(wfspec.get("common_params", {}))
    for d in wfspec["fireworks"]:
        modname, classname = d["fw"].rsplit(".", 1)
        cls_ = load_class(modname, classname)
        params = process_params(d.get("params", {}))
        for k in common_params:
            if k not in params:  # common params don't override local params
                params[k] = common_params[k]
        if "parents" in params:
            if isinstance(params["parents"], int):
                params["parents"] = fws[params["parents"]]
            else:
                p = []
                for parent_idx in params["parents"]:
                    p.append(fws[parent_idx])
                params["parents"] = p
        fws.append(cls_(structure, **params))

    wfname = "{}:{}".format(structure.composition.reduced_formula, wfspec["name"]) if \
        wfspec.get("name") else structure.composition.reduced_formula
    return Workflow(fws, name=wfname)
示例#48
0
    def get_modified_molecule_workflow(self, directory, reactant, index,
                                       func_group, qchem_input_params,
                                       sp_params, bond_order=1, do_rct=True,
                                       new_dir=None):
        """
        Modify a reactant molecule, mimic that change in the product, and then
        create a workflow with the modified molecules (and any other molecules
        not already in the database).

        Note: this function will check if a substitution is "allowed"; that is,


        :param directory: Subdirectory where the reaction files are.
        :param reactant: File name of the reactant to be modified. It MUST be
            a reactant, and cannot be the product molecule.
        :param index: Index (in the reactant molecule) where the functional
            group is to be substituted.
        :param func_group: Either a string representing a functional group (from
            pymatgen.structure.core.FunctionalGroups), or a Molecule with a
            dummy atom X.
        :param bond_order: Order of the bond between the functional group and
            the base molecule. Default 1, for single bond.
        :param do_rct: If True (default), calculate both modified reactant and
            modified product; if False, only calculate for the product.
        :param new_dir: Name for new directory to store modified molecules.
            Default is None.
        :return:
        """

        base_path = join(self.base_dir, directory)
        mol_files = [f for f in listdir(base_path) if isfile(join(base_path, f)) and
                     f.endswith(".mol")]
        # For this workflow, assume a single product
        rct_file = [f for f in mol_files if f == reactant][0]
        pro_file = [f for f in mol_files if f.startswith(self.product_pre)][0]

        # Set up - strategy to extract bond orders
        # Node match for isomorphism check
        strat = OpenBabelNN()
        nm = iso.categorical_node_match("specie", "C")

        # Set up molecule graphs, including node attributes
        rct_mg = MoleculeGraph.with_local_env_strategy(get_molecule(join(base_path, rct_file)),
                                                       strat,
                                                       reorder=False,
                                                       extend_structure=False)
        rct_mg.set_node_attributes()
        rct_graph = rct_mg.graph.to_undirected()

        pro_mg = MoleculeGraph.with_local_env_strategy(get_molecule(join(base_path, pro_file)),
                                                       strat,
                                                       reorder=False,
                                                       extend_structure=False)
        pro_mg.set_node_attributes()
        pro_graph = pro_mg.graph.to_undirected()

        # To determine the subgraph of pro_mg that is derived from the reactant
        matcher = iso.GraphMatcher(pro_graph, rct_graph,
                                   node_match=nm)

        if not matcher.subgraph_is_isomorphic():
            raise RuntimeError("Cannot find reactant molecule within product "
                               "molecule.")
        else:
            for mm in matcher.subgraph_isomorphisms_iter():
                mapping = mm

        # Reverse mapping
        mapping = {mapping[i]: i for i in mapping.keys()}

        new_path = None
        if new_dir is not None:
            try:
                os.mkdir(join(self.base_dir, new_dir))
            except FileExistsError:
                print("New directory {} already exists in {}".format(new_dir, self.base_dir))

            new_path = join(self.base_dir, new_dir)

        rct_mg.substitute_group(index, func_group, OpenBabelNN,
                                bond_order=bond_order,
                                extend_structure=False)
        pro_mg.substitute_group(mapping[index], func_group, OpenBabelNN,
                                bond_order=bond_order,
                                extend_structure=False)

        rct_name = rct_file.replace(".mol", "{}{}".format(func_group, index))
        pro_name = pro_file.replace(".mol", "{}{}".format(func_group, index))

        if new_path is None:
            new_path = base_path

        rct_mg.molecule.to(fmt="mol", filename=join(new_path, rct_name + ".mol"))
        pro_mg.molecule.to(fmt="mol", filename=join(new_path, pro_name + ".mol"))

        for mol_file in mol_files:
            if mol_file != pro_file and mol_file != rct_file:
                shutil.copyfile(join(base_path, mol_file), join(new_path, mol_file))

        fws = []

        fws.append(OptFreqSPFW(molecule=pro_mg.molecule,
                               name="Modification: {}/{}".format(new_path, pro_name),
                               qchem_cmd="qchem -slurm",
                               input_file=join(new_path, pro_name + ".in"),
                               output_file=join(new_path, pro_name + ".out"),
                               qclog_file=join(new_path, pro_name + ".qclog"),
                               max_cores=24,
                               qchem_input_params=qchem_input_params,
                               sp_params=sp_params,
                               db_file=self.db_file))

        if do_rct:
            fws.append(OptFreqSPFW(molecule=rct_mg.molecule,
                                   name="Modification: {}/{}".format(new_path,
                                                                     rct_name),
                                   qchem_cmd="qchem -slurm",
                                   input_file=join(new_path, rct_name + ".in"),
                                   output_file=join(new_path,
                                                    rct_name + ".out"),
                                   qclog_file=join(new_path,
                                                   rct_name + ".qclog"),
                                   max_cores=24,
                                   qchem_input_params=qchem_input_params,
                                   sp_params=sp_params,
                                   db_file=self.db_file))

        return Workflow(fws)
示例#49
0
    def add_wflow(self, params, name):
        # create an atoms object and encode it
        atoms = read(self.poscar_file)
        if self.mode == 'perturbations':
            ch_symbols = atoms.get_chemical_symbols()
            atom_ucalc = ch_symbols[self.dummy_position]
            ch_symbols[self.dummy_position] = self.dummy_atom
            atoms.set_chemical_symbols(ch_symbols)
            encode = atoms_to_encode(atoms)
        else:
            encode = atoms_to_encode(atoms)

        # here we will collect all fireworks of our workflow
        fireworks = []

        if self.mode == 'relax':
            relax_firetask = VaspCalculationTask(
                calc_params=params,
                encode=encode,
                magmoms=self.magmoms,
            )
            relax_firework = Firework([relax_firetask],
                                      name='relax',
                                      spec={'_pass_job_info': True},
                                      fw_id=0)
            fireworks.append([relax_firework])

            energy_params = {}
            for key, value in params.items():
                if key not in [
                        'ediffg', 'ibrion', 'isif', 'nsw', 'potim', 'ismear',
                        'sigma'
                ]:
                    energy_params[key] = value
            energy_params['ismear'] = -5
            energy_params['sigma'] = 0.05
            energy_params['nelm'] = 200

            # calculate energy
            if self.magmoms.any():
                sp_firetask = VaspCalculationTask(
                    calc_params=energy_params,
                    magmoms='previous',
                )
            else:
                sp_firetask = VaspCalculationTask(
                    calc_params=energy_params,
                    magmoms=self.magmoms,
                )
        else:
            sp_firetask = VaspCalculationTask(
                calc_params=params,
                encode=encode,
                magmoms=self.magmoms,
            )

        sp_firework = Firework(
            [sp_firetask],
            name='singlepoint',
            spec={'_pass_job_info': True},
            fw_id=1,
        )
        fireworks.append([sp_firework])

        if self.mode == 'perturbations':
            next_id = 2
            nsc_fireworks = []
            sc_fireworks = []
            out_fireworks = []
            for perturbation in self.pert_values:
                nsc_firetask = VaspCalculationTask(
                    calc_params=params,
                    encode=encode,
                    magmoms=self.magmoms,
                    pert_step='NSC',
                    pert_value=perturbation,
                    dummy_atom=self.dummy_atom,
                    atom_ucalc=atom_ucalc,
                )

                nsc_firework = Firework(
                    [nsc_firetask],
                    name='nsc',
                    spec={'_pass_job_info': True},
                    fw_id=next_id,
                )

                nsc_fireworks.append(nsc_firework)
                next_id += 1

                sc_firetask = VaspCalculationTask(
                    calc_params=params,
                    encode=encode,
                    magmoms=self.magmoms,
                    pert_step='SC',
                    pert_value=perturbation,
                    dummy_atom=self.dummy_atom,
                    atom_ucalc=atom_ucalc,
                )

                sc_firework = Firework(
                    [sc_firetask],
                    name='sc',
                    spec={'_pass_job_info': True},
                    fw_id=next_id,
                )

                sc_fireworks.append(sc_firework)
                next_id += 1

                out_firetask = WriteChargesTask(
                    filename='charges.txt',
                    pert_value=perturbation,
                    dummy_atom=self.dummy_atom,
                )
                out_firework = Firework(
                    [out_firetask],
                    name='write_charges',
                    spec={
                        '_queueadapter': {
                            'ntasks': 1,
                            'walltime': '00:30:00'
                        }
                    },
                    fw_id=next_id,
                )

                out_fireworks.append(out_firework)
                next_id += 1

            fireworks.append(nsc_fireworks)
            fireworks.append(sc_fireworks)
            fireworks.append(out_fireworks)
        else:
            # write output
            output_firetask = WriteOutputTask(
                system=name,
                filename=
                f"{atoms.get_chemical_formula(mode='hill', empirical=True)}_{self.mode}.txt",
                initial_magmoms=self.magmoms,
                read_enthalpy=False,
                energy_convergence=self.energy_convergence,
            )
            output_firework = Firework(
                [output_firetask],
                name='write_output',
                spec={'_queueadapter': {
                    'ntasks': 1,
                    'walltime': '00:30:00'
                }},
                fw_id=2,
            )
            fireworks.append([output_firework])

        # package the fireworks into a workflow and submit to the launchpad
        flat_fireworks = [fw for sublist in fireworks for fw in sublist]

        links_dict = {}
        for i, level in enumerate(fireworks[:-1]):
            next_level = fireworks[i + 1]
            if len(level) == 1:
                links_dict[level[0].fw_id] = [
                    item.fw_id for item in next_level
                ]
            elif len(next_level) == 1:
                for fw in level:
                    links_dict[fw.fw_id] = [next_level[0].fw_id]
            else:
                for j, fw in enumerate(level):
                    links_dict[fw.fw_id] = [next_level[j].fw_id]

        workflow = Workflow(flat_fireworks, name=name, links_dict=links_dict)
        launchpad.add_wf(workflow)
示例#50
0
def get_wf_elastic_constant(structure, strain_states=None, stencils=None,
                            db_file=None,
                            conventional=False, order=2, vasp_input_set=None,
                            analysis=True,
                            sym_reduce=False, tag='elastic',
                            copy_vasp_outputs=False, **kwargs):
    """
    Returns a workflow to calculate elastic constants.

    Firework 1 : write vasp input set for structural relaxation,
                 run vasp,
                 pass run location,
                 database insertion.

    Firework 2 - number of total deformations: Static runs on the deformed structures

    last Firework : Analyze Stress/Strain data and fit the elastic tensor

    Args:
        structure (Structure): input structure to be optimized and run.
        strain_states (list of Voigt-notation strains): list of ratios of nonzero elements
            of Voigt-notation strain, e. g. [(1, 0, 0, 0, 0, 0), (0, 1, 0, 0, 0, 0), etc.].
        stencils (list of floats, or list of list of floats): values of strain to multiply
            by for each strain state, i. e. stencil for the perturbation along the strain
            state direction, e. g. [-0.01, -0.005, 0.005, 0.01].  If a list of lists,
            stencils must correspond to each strain state provided.
        db_file (str): path to file containing the database credentials.
        conventional (bool): flag to convert input structure to conventional structure,
            defaults to False.
        order (int): order of the tensor expansion to be determined.  Defaults to 2 and
            currently supports up to 3.
        vasp_input_set (VaspInputSet): vasp input set to be used.  Defaults to static
            set with ionic relaxation parameters set.  Take care if replacing this,
            default ensures that ionic relaxation is done and that stress is calculated
            for each vasp run.
        analysis (bool): flag to indicate whether analysis task should be added
            and stresses and strains passed to that task
        sym_reduce (bool): Whether or not to apply symmetry reductions
        tag (str):
        copy_vasp_outputs (bool): whether or not to copy previous vasp outputs.
        kwargs (keyword arguments): additional kwargs to be passed to get_wf_deformations

    Returns:
        Workflow
    """
    # Convert to conventional if specified
    if conventional:
        structure = SpacegroupAnalyzer(
            structure).get_conventional_standard_structure()

    uis_elastic = {"IBRION": 2, "NSW": 99, "ISIF": 2, "ISTART": 1,
                   "PREC": "High"}
    vis = vasp_input_set or MPStaticSet(structure,
                                        user_incar_settings=uis_elastic)
    strains = []
    if strain_states is None:
        strain_states = get_default_strain_states(order)
    if stencils is None:
        stencils = [np.linspace(-0.01, 0.01, 5 + (order - 2) * 2)] * len(
            strain_states)
    if np.array(stencils).ndim == 1:
        stencils = [stencils] * len(strain_states)
    for state, stencil in zip(strain_states, stencils):
        strains.extend(
            [Strain.from_voigt(s * np.array(state)) for s in stencil])

    # Remove zero strains
    strains = [strain for strain in strains if not (abs(strain) < 1e-10).all()]
    vstrains = [strain.voigt for strain in strains]
    if np.linalg.matrix_rank(vstrains) < 6:
        # TODO: check for sufficiency of input for nth order
        raise ValueError("Strain list is insufficient to fit an elastic tensor")

    deformations = [s.get_deformation_matrix() for s in strains]

    if sym_reduce:
        # Note this casts deformations to a TensorMapping
        # with unique deformations as keys to symmops
        deformations = symmetry_reduce(deformations, structure)

    wf_elastic = get_wf_deformations(structure, deformations, tag=tag,
                                     db_file=db_file,
                                     vasp_input_set=vis,
                                     copy_vasp_outputs=copy_vasp_outputs,
                                     **kwargs)
    if analysis:
        defo_fws_and_tasks = get_fws_and_tasks(wf_elastic,
                                               fw_name_constraint="deformation",
                                               task_name_constraint="Transmuted")
        for idx_fw, idx_t in defo_fws_and_tasks:
            defo = \
            wf_elastic.fws[idx_fw].tasks[idx_t]['transformation_params'][0][
                'deformation']
            pass_dict = {
                'strain': Deformation(defo).green_lagrange_strain.tolist(),
                'stress': '>>output.ionic_steps.-1.stress',
                'deformation_matrix': defo}
            if sym_reduce:
                pass_dict.update({'symmops': deformations[defo]})

            mod_spec_key = "deformation_tasks->{}".format(idx_fw)
            pass_task = pass_vasp_result(pass_dict=pass_dict,
                                         mod_spec_key=mod_spec_key)
            wf_elastic.fws[idx_fw].tasks.append(pass_task)

        fw_analysis = Firework(
            ElasticTensorToDb(structure=structure, db_file=db_file,
                              order=order, fw_spec_field='tags'),
            name="Analyze Elastic Data", spec={"_allow_fizzled_parents": True})
        wf_elastic.append_wf(Workflow.from_Firework(fw_analysis),
                             wf_elastic.leaf_fw_ids)

    wf_elastic.name = "{}:{}".format(structure.composition.reduced_formula,
                                     "elastic constants")

    return wf_elastic
示例#51
0
def get_wf_gibbs_free_energy(structure, deformations, vasp_input_set=None, vasp_cmd="vasp",
                             db_file=None, user_kpoints_settings=None, t_step=10, t_min=0,
                             t_max=1000, mesh=(20, 20, 20), eos="vinet", qha_type="debye_model",
                             pressure=0.0, poisson=0.25, anharmonic_contribution=False,
                             metadata=None, tag=None):
    """
    Returns quasi-harmonic gibbs free energy workflow.
    Note: phonopy package is required for the final analysis step if qha_type="phonopy"

    Args:
        structure (Structure): input structure.
        deformations (list): list of deformation matrices(list of lists).
        vasp_input_set (VaspInputSet)
        vasp_cmd (str): vasp command to run.
        db_file (str): path to the db file.
        user_kpoints_settings (dict): example: {"grid_density": 7000}
        t_step (float): temperature step (in K)
        t_min (float): min temperature (in K)
        t_max (float): max temperature (in K)
        mesh (list/tuple): reciprocal space density
        eos (str): equation of state used for fitting the energies and the volumes.
            options supported by phonopy: "vinet", "murnaghan", "birch_murnaghan".
            Note: pymatgen supports more options than phonopy. see pymatgen.analysis.eos.py
        qha_type(str): quasi-harmonic approximation type: "debye_model" or "phonopy",
            default is "debye_model"
        pressure (float): in GPa
        poisson (float): poisson ratio
        anharmonic_contribution (bool): consider anharmonic contributions to
            Gibbs energy from the Debye model. Defaults to False.
        metadata (dict): meta data
        tag (str): something unique to identify the tasks in this workflow. If None a random uuid
            will be assigned.

    Returns:
        Workflow
    """

    tag = tag or "gibbs group: >>{}<<".format(str(uuid4()))

    deformations = [Deformation(defo_mat) for defo_mat in deformations]

    # static input set for the transmuter fireworks
    vis_static = vasp_input_set
    if vis_static is None:
        lepsilon = False
        if qha_type not in ["debye_model"]:
            lepsilon = True
            try:
                from phonopy import Phonopy
            except ImportError:
                raise RuntimeError("'phonopy' package is NOT installed but is required for the final "
                                   "analysis step; you can alternatively switch to the qha_type to "
                                   "'debye_model' which does not require 'phonopy'.")
        vis_static = MPStaticSet(structure, force_gamma=True, lepsilon=lepsilon,
                                 user_kpoints_settings=user_kpoints_settings)

    wf_gibbs = get_wf_deformations(structure, deformations, name="gibbs deformation",
                                   vasp_cmd=vasp_cmd, db_file=db_file, tag=tag, metadata=metadata,
                                   vasp_input_set=vis_static)

    fw_analysis = Firework(GibbsAnalysisToDb(tag=tag, db_file=db_file, t_step=t_step, t_min=t_min,
                                             t_max=t_max, mesh=mesh, eos=eos, qha_type=qha_type,
                                             pressure=pressure, poisson=poisson, metadata=metadata,
                                             anharmonic_contribution=anharmonic_contribution,),
                           name="Gibbs Free Energy")

    wf_gibbs.append_wf(Workflow.from_Firework(fw_analysis), wf_gibbs.leaf_fw_ids)

    wf_gibbs.name = "{}:{}".format(structure.composition.reduced_formula, "gibbs free energy")

    return wf_gibbs
示例#52
0
def get_wf_magnetic_deformation(structure, c=None, vis=None):
    """
    Minimal workflow to obtain magnetic deformation proxy, as
    defined by Bocarsly et al. 2017, doi: 10.1021/acs.chemmater.6b04729

    Args:
        structure: input structure, must be structure with magnetic
    elements, such that pymatgen will initalize ferromagnetic input by
    default -- see MPRelaxSet.yaml for list of default elements
        c: Workflow config dict, in the same format
    as in presets/core.py and elsewhere in atomate
        vis: A VaspInputSet to use for the first FW

    Returns: Workflow
    """

    if not structure.is_ordered:
        raise ValueError(
            "Please obtain an ordered approximation of the input structure."
        )

    structure = structure.get_primitive_structure(use_site_props=True)

    # using a uuid for book-keeping,
    # in a similar way to other workflows
    uuid = str(uuid4())

    c_defaults = {"vasp_cmd": VASP_CMD, "db_file": DB_FILE}
    if c:
        c.update(c_defaults)
    else:
        c = c_defaults

    wf = get_wf(structure, "magnetic_deformation.yaml", common_params=c, vis=vis)

    fw_analysis = Firework(
        MagneticDeformationToDB(
            db_file=DB_FILE, wf_uuid=uuid, to_db=c.get("to_db", True)
        ),
        name="MagneticDeformationToDB",
    )

    wf.append_wf(Workflow.from_Firework(fw_analysis), wf.leaf_fw_ids)

    wf = add_common_powerups(wf, c)

    if c.get("ADD_WF_METADATA", ADD_WF_METADATA):
        wf = add_wf_metadata(wf, structure)

    wf = add_additional_fields_to_taskdocs(
        wf,
        {
            "wf_meta": {
                "wf_uuid": uuid,
                "wf_name": "magnetic_deformation",
                "wf_version": __magnetic_deformation_wf_version__,
            }
        },
    )

    return wf
示例#53
0
def get_wf_deformations(structure,
                        deformations,
                        name="deformation",
                        vasp_input_set=None,
                        lepsilon=False,
                        vasp_cmd="vasp",
                        db_file=None,
                        user_kpoints_settings=None,
                        pass_stress_strain=False,
                        tag="",
                        relax_deformed=False):
    """
    Returns a structure deformation workflow.

    Firework 1 : structural relaxation

    Firework 2 - len(deformations): Deform the optimized structure and run static calculations.


    Args:
        structure (Structure): input structure to be optimized and run
        deformations (list of 3x3 array-likes): list of deformations
        name (str): some appropriate name for the transmuter fireworks.
        vasp_input_set (DictVaspInputSet): vasp input set.
        lepsilon (bool): whether or not compute static dielectric constant/normal modes
        vasp_cmd (str): command to run
        db_file (str): path to file containing the database credentials.
        user_kpoints_settings (dict): example: {"grid_density": 7000}
        pass_stress_strain (bool): if True, stress and strain will be parsed and passed on.
        tag (str): some unique string that will be appended to the names of the fireworks so that
            the data from those tagged fireworks can be queried later during the analysis.

    Returns:
        Workflow
    """
    # input set for relaxation
    vis_relax = vasp_input_set or MPRelaxSet(structure, force_gamma=True)
    if user_kpoints_settings:
        v = vis_relax.as_dict()
        v.update({"user_kpoints_settings": user_kpoints_settings})
        vis_relax = vis_relax.__class__.from_dict(v)

    uis_static = {"ISIF": 2, "ISTART": 1}
    if relax_deformed:
        uis_static["IBRION"] = 2

    # static input set
    vis_static = MPStaticSet(structure,
                             force_gamma=True,
                             lepsilon=lepsilon,
                             user_kpoints_settings=user_kpoints_settings,
                             user_incar_settings=uis_static)

    # Structure optimization firework
    fws = [
        OptimizeFW(structure=structure,
                   vasp_input_set=vis_relax,
                   vasp_cmd=vasp_cmd,
                   db_file=db_file,
                   name="{} structure optimization".format(tag))
    ]

    # Deformation fireworks with the task to extract and pass stress-strain appended to it.
    for deformation in deformations:
        fw = TransmuterFW(name="{} {}".format(tag, name),
                          structure=structure,
                          transformations=['DeformStructureTransformation'],
                          transformation_params=[{
                              "deformation":
                              deformation.tolist()
                          }],
                          vasp_input_set=vis_static,
                          copy_vasp_outputs=True,
                          parents=fws[0],
                          vasp_cmd=vasp_cmd,
                          db_file=db_file)
        if pass_stress_strain:
            fw.spec['_tasks'].append(
                PassStressStrainData(
                    deformation=deformation.tolist()).to_dict())
        fws.append(fw)

    wfname = "{}:{}".format(structure.composition.reduced_formula, name)

    return Workflow(fws, name=wfname)