예제 #1
0
파일: glue_tasks.py 프로젝트: FilipchukB/P1
    def setup_copy(self, from_dir, to_dir=None, filesystem=None, files_to_copy=None,
                   exclude_files=None, from_path_dict=None, suffix=None, 
                   fw_spec=None, continue_on_missing=False):
        """
        setup the copy i.e setup the from directory, filesystem, destination directory etc.

        Args:
            from_dir (str)
            to_dir (str)
            filesystem (str)
            files_to_copy (list): if None all the files in the from_dir will be copied
            exclude_files (list): list of file names to be excluded.
            suffix (str): suffix to append to each filename when copying 
                (e.g., rename 'INCAR' to 'INCAR.precondition')
            continue_on_missing(bool): Whether to continue copying when a file
                in filenames is missing. Defaults to False.
            from_path_dict (dict): dict specification of the path. If specified must contain atleast
                the key "path" that specifies the path to the from_dir.
        """
        from_path_dict = from_path_dict or {}
        from_dir = env_chk(from_dir, fw_spec, strict=False) or from_path_dict.get("path", None)
        filesystem = filesystem or from_path_dict.get("filesystem", None)
        if from_dir is None:
            raise ValueError("Must specify from_dir!")
        self.fileclient = FileClient(filesystem=filesystem)
        self.from_dir = self.fileclient.abspath(from_dir)
        self.to_dir = env_chk(to_dir, fw_spec, strict=False) or os.getcwd()
        exclude_files = exclude_files or []
        self.files_to_copy = files_to_copy or [f for f in self.fileclient.listdir(self.from_dir) if f not in exclude_files]
        self.suffix = suffix
        self.continue_on_missing = continue_on_missing
예제 #2
0
    def run_task(self, fw_spec):

        handler_groups = {
            "default": [VaspErrorHandler(), MeshSymmetryErrorHandler(), UnconvergedErrorHandler(),
                        NonConvergingErrorHandler(),PotimErrorHandler(), PositiveEnergyErrorHandler(),
                        FrozenJobErrorHandler()],
            "strict": [VaspErrorHandler(), MeshSymmetryErrorHandler(), UnconvergedErrorHandler(),
                       NonConvergingErrorHandler(),PotimErrorHandler(), PositiveEnergyErrorHandler(),
                       FrozenJobErrorHandler(), AliasingErrorHandler()],
            "md": [VaspErrorHandler(), NonConvergingErrorHandler()],
            "no_handler": []
            }

        vasp_cmd = env_chk(self["vasp_cmd"], fw_spec)
        if isinstance(vasp_cmd, six.string_types):
            vasp_cmd = os.path.expandvars(vasp_cmd)
            vasp_cmd = shlex.split(vasp_cmd)

        # initialize variables
        job_type = self.get("job_type", "normal")
        scratch_dir = env_chk(self.get("scratch_dir"), fw_spec)
        gzip_output = self.get("gzip_output", True)
        max_errors = self.get("max_errors", 5)
        auto_npar = env_chk(self.get("auto_npar"), fw_spec, strict=False, default=False)
        gamma_vasp_cmd = env_chk(self.get("gamma_vasp_cmd"), fw_spec, strict=False, default=None)
        if gamma_vasp_cmd:
            gamma_vasp_cmd = shlex.split(gamma_vasp_cmd)

        # construct jobs
        if job_type == "normal":
            jobs = [VaspJob(vasp_cmd, auto_npar=auto_npar, gamma_vasp_cmd=gamma_vasp_cmd)]
        elif job_type == "double_relaxation_run":
            jobs = VaspJob.double_relaxation_run(vasp_cmd, auto_npar=auto_npar, ediffg=self.get("ediffg"),
                                                 half_kpts_first_relax=False)
        elif job_type == "full_opt_run":
            jobs = VaspJob.full_opt_run(vasp_cmd, auto_npar=auto_npar, ediffg=self.get("ediffg"),
                                        max_steps=5, half_kpts_first_relax=False)
        else:
            raise ValueError("Unsupported job type: {}".format(job_type))

        # construct handlers
        handlers = handler_groups[self.get("handler_group", "default")]

        if self.get("max_force_threshold"):
            handlers.append(MaxForceErrorHandler(max_force_threshold=self["max_force_threshold"]))

        if self.get("wall_time"):
            handlers.append(WalltimeHandler(wall_time=self["wall_time"]))

        validators = [VasprunXMLValidator(), VaspFilesValidator()]

        c = Custodian(handlers, jobs, validators=validators, max_errors=max_errors,
                      scratch_dir=scratch_dir, gzipped_output=gzip_output)

        c.run()
예제 #3
0
    def run_task(self, fw_spec):
        cmd = env_chk(self["qchem_cmd"], fw_spec)
        scratch_dir = env_chk(self.get("scratch_dir"), fw_spec)
        if scratch_dir == None:
            scratch_dir = "/dev/shm/qcscratch/"
        os.putenv("QCSCRATCH", scratch_dir)

        logger.info("Running command: {}".format(cmd))
        return_code = subprocess.call(cmd, shell=True)
        logger.info("Command {} finished running with return code: {}".format(
            cmd, return_code))
예제 #4
0
    def run_task(self, fw_spec):
        # get the directory that contains the dir to parse
        calc_dir = os.getcwd()
        if "calc_dir" in self:
            calc_dir = self["calc_dir"]
        elif self.get("calc_loc"):
            calc_dir = get_calc_loc(self["calc_loc"],
                                    fw_spec["calc_locs"])["path"]

        # parse the calc directory
        logger.info("PARSING DIRECTORY: {} USING DRONE: {}".format(
            calc_dir, self["drone"].__class__.__name__))
        # get the database connection
        db_file = env_chk(self.get("db_file"), fw_spec)

        drone = self["drone"].__class__()
        task_doc = drone.assimilate(calc_dir)
        if not db_file:
            with open("task.json", "w") as f:
                f.write(json.dumps(task_doc, default=DATETIME_HANDLER))
        else:
            mmdb_str = self["mmdb"]
            modname, classname = mmdb_str.strip().rsplit(".", 1)
            cls_ = load_class(modname, classname)
            db = cls_.from_db_file(db_file)

            # insert the task document
            t_id = db.insert(task_doc)
            logger.info(f"Finished parsing with task_id: {t_id}")

        return FWAction(
            stored_data={"task_id": task_doc.get("task_id", None)},
            defuse_children=(task_doc["state"] != "successful"),
        )
예제 #5
0
    def run_task(self, fw_spec):

        # Get num of electrons and bands from static calc
        uuid = self["wf_uuid"]
        db_file = env_chk(self.get("db_file"), fw_spec)
        db = VaspCalcDb.from_db_file(db_file, admin=True)
        db.collection = db.db["tasks"]

        task_doc = db.collection.find_one(
            {"wf_meta.wf_uuid": uuid, "task_label": "static"}, ["input.parameters"]
        )

        nelec = int(task_doc["input"]["parameters"]["NELECT"])
        nbands = int(task_doc["input"]["parameters"]["NBANDS"])

        w90_file = [
            "num_wann = %d" % (nelec),
            "num_bands = %d" % (nelec),  # 1 band / elec with SOC
            "spinors=.true.",
            "num_iter 0",
            "shell_list 1",
            "exclude_bands %d-%d" % (nelec + 1, 2 * nbands),
        ]

        w90_file = "\n".join(w90_file)

        with open("wannier90.win", "w") as f:
            f.write(w90_file)

        return FWAction()
예제 #6
0
    def run_task(self, fw_spec):

        irvsp = self["irvsp_out"] or fw_spec["irvsp_out"]

        irvsp = jsanitize(irvsp)

        additional_fields = self.get("additional_fields", {})
        d = additional_fields.copy()
        d["wf_uuid"] = self["wf_uuid"]
        d["formula"] = fw_spec["formula"]
        d["efermi"] = fw_spec["efermi"]
        d["structure"] = fw_spec["structure"]
        d["irvsp"] = irvsp

        # store the results
        db_file = env_chk(self.get("db_file"), fw_spec)
        if not db_file:
            with open("irvsp.json", "w") as f:
                f.write(json.dumps(d, default=DATETIME_HANDLER))
        else:
            db = VaspCalcDb.from_db_file(db_file, admin=True)
            db.collection = db.db["irvsp"]
            db.collection.insert_one(d)
            logger.info("IRVSP calculation complete.")
        return FWAction()
예제 #7
0
파일: exchange.py 프로젝트: samblau/atomate
    def run_task(self, fw_spec):

        db_file = env_chk(self["db_file"], fw_spec)
        wf_uuid = self["wf_uuid"]

        hmodel = loadfn("heisenberg_model.json")

        # Exchange collection
        mmdb = VaspCalcDb.from_db_file(db_file, admin=True)
        mmdb.collection = mmdb.db["exchange"]

        hmodel_dict = hmodel.as_dict()

        parent_structure = hmodel.structures[0]
        formula_pretty = parent_structure.composition.reduced_formula

        wf_meta = {"wf_uuid": wf_uuid}
        task_doc = {
            "wf_meta": wf_meta,
            "formula_pretty": formula_pretty,
            "nn_cutoff": hmodel.cutoff,
            "nn_tol": hmodel.tol,
            "heisenberg_model": hmodel_dict,
            "task_name": "heisenberg model",
        }

        if fw_spec.get("tags", None):
            task_doc["tags"] = fw_spec["tags"]

        mmdb.collection.insert_one(task_doc)
예제 #8
0
파일: ftasks.py 프로젝트: kcbhamu/dfttk
    def run_task(self, fw_spec):
        symm_check_data = self.check_symmetry()
        passed = symm_check_data["symmetry_checks_passed"]
        cur_isif = symm_check_data["isif"]
        next_steps = self.get_next_steps(passed, cur_isif)

        symm_check_data.update({
            "tag": self["tag"],
            "metadata": self.get("metadata"),
            "version_info": {
                "atomate": atomate_ver,
                "dfttk": dfttk_ver,
                "pymatgen": pymatgen_ver,
            },
            "next_steps": next_steps,
        })

        # write to JSON for debugging purposes
        with open('relaxation_check_summary.json', 'w') as fp:
            json.dump(symm_check_data, fp)

        self.db_file = env_chk(self.get("db_file"), fw_spec)
        vasp_db = VaspCalcDb.from_db_file(self.db_file, admin=True)
        vasp_db.db['relaxations'].insert_one(symm_check_data)

        return FWAction(detours=self.get_detour_workflow(next_steps, symm_check_data['final_energy_per_atom']))
예제 #9
0
    def run_task(self, fw_spec):
        calc_locs = list(fw_spec.get("calc_locs", []))
        calc_locs.append({"name": self["name"],
                          "filesystem": env_chk(self.get('filesystem', None), fw_spec),
                          "path": self.get("path", os.getcwd())})

        return FWAction(mod_spec=[{'_push_all': {'calc_locs': calc_locs}}])
예제 #10
0
    def run_task(self, fw_spec):

        from pymatgen.analysis.eos import EOS

        tag = self["tag"]
        db_file = env_chk(self.get("db_file"), fw_spec)
        summary_dict = {"eos": self["eos"]}

        mmdb = MMVaspDb.from_db_file(db_file, admin=True)
        # get the optimized structure
        d = mmdb.collection.find_one({"task_label": "{} structure optimization".format(tag)})
        structure = Structure.from_dict(d["calcs_reversed"][-1]["output"]['structure'])
        summary_dict["structure"] = structure.as_dict()

        # get the data(energy, volume, force constant) from the deformation runs
        docs = mmdb.collection.find({"task_label": {"$regex": "{} bulk_modulus*".format(tag)},
                                     "formula_pretty": structure.composition.reduced_formula})
        energies = []
        volumes = []
        for d in docs:
            s = Structure.from_dict(d["calcs_reversed"][-1]["output"]['structure'])
            energies.append(d["calcs_reversed"][-1]["output"]['energy'])
            volumes.append(s.volume)
        summary_dict["energies"] = energies
        summary_dict["volumes"] = volumes

        # fit the equation of state
        eos = EOS(self["eos"])
        eos_fit = eos.fit(volumes, energies)
        summary_dict["results"] = dict(eos_fit.results)

        with open("bulk_modulus.json", "w") as f:
            f.write(json.dumps(summary_dict, default=DATETIME_HANDLER))

        logger.info("BULK MODULUS CALCULATION COMPLETE")
예제 #11
0
    def run_task(self, fw_spec):
        # get the directory that contains the dir to parse
        calc_dir = os.getcwd()
        if "calc_dir" in self:
            calc_dir = self["calc_dir"]
        elif self.get("calc_loc"):
            calc_dir = get_calc_loc(self["calc_loc"], fw_spec["calc_locs"])["path"]

        # parse the calc directory
        logger.info("PARSING DIRECTORY: {} USING DRONE: {}".format(
            calc_dir, self['drone'].__class__.__name__))
        # get the database connection
        db_file = env_chk(self.get('db_file'), fw_spec)

        drone = self['drone'].__class__()
        task_doc = drone.assimilate(calc_dir)
        if not db_file:
            with open("task.json", "w") as f:
                f.write(json.dumps(task_doc, default=DATETIME_HANDLER))
        else:
            mmdb = self["mmdb"]
            db = mmdb.__class__.from_db_file(db_file)
            # insert the task document
            t_id = db.insert(task_doc)
            logger.info("Finished parsing with task_id: {}".format(t_id))

        return FWAction(stored_data={"task_id": task_doc.get("task_id", None)},
                        defuse_children=(task_doc["state"] != "successful"))
예제 #12
0
    def run_task(self, fw_spec):
        calc_locs = list(fw_spec.get("calc_locs", []))
        calc_locs.append({"name": self["name"],
                          "filesystem": env_chk(self.get('filesystem', None), fw_spec),
                          "path": self.get("path", os.getcwd())})

        return FWAction(mod_spec=[{'_push_all': {'calc_locs': calc_locs}}])
예제 #13
0
파일: ftasks.py 프로젝트: imkimhy/dfttk
    def run_task(self, fw_spec):

        tag = self["tag"]
        metadata = self.get('metadata', {})
        metadata['tag'] = tag

        unitcell = Structure.from_file('POSCAR-unitcell')
        supercell_matrix = self['supercell_matrix']
        temperatures, f_vib, s_vib, cv_vib, force_constants = get_f_vib_phonopy(
            unitcell,
            supercell_matrix,
            vasprun_path='vasprun.xml',
            t_min=self['t_min'],
            t_max=self['t_max'],
            t_step=self['t_step'])
        if isinstance(supercell_matrix, np.ndarray):
            supercell_matrix = supercell_matrix.tolist()  # make serializable
        thermal_props_dict = {
            'volume': unitcell.volume,
            'F_vib': f_vib.tolist(),
            'CV_vib': cv_vib.tolist(),
            'S_vib': s_vib.tolist(),
            'temperatures': temperatures.tolist(),
            'force_constants': force_constants.tolist(),
            'metadata': metadata,
            'unitcell': unitcell.as_dict(),
            'supercell_matrix': supercell_matrix,
        }

        # insert into database
        db_file = env_chk(self["db_file"], fw_spec)
        vasp_db = VaspCalcDb.from_db_file(db_file, admin=True)
        vasp_db.db['phonon'].insert_one(thermal_props_dict)
예제 #14
0
    def run_task(self, fw_spec):
        notes = self.get('notes', None)
        tag_id = self['tag_id']

        # get the database connection
        db_file = env_chk(self.get('db_file'), fw_spec)
        mmdb = VaspMDCalcDb.from_db_file(db_file, admin=True)
        mmdb.db.trajectories.find_one_and_delete({"runs_label": tag_id})
        runs = mmdb.db['tasks'].find(
            {"task_label": re.compile(f'.*prod_run.*{tag_id}.*')})

        runs_sorted = sorted(
            runs,
            key=lambda x: int(re.findall('run[_-](\d+)', x['task_label'])[0]))

        # Remove duplicates of the same run (if they exist)
        labels = [result['task_label'] for result in runs_sorted]
        nums = [int(re.findall('run[_-](\d+)', label)[0]) for label in labels]
        duplicates = np.where((nums - np.roll(nums, 1)) == 0)[0]
        runs_sorted = [
            runs_sorted[i] for i in range(len(runs_sorted))
            if i not in duplicates
        ]

        trajectory_doc = runs_to_trajectory_doc(runs_sorted, db_file, tag_id,
                                                notes)

        mmdb.db.trajectories.insert_one(trajectory_doc)
예제 #15
0
    def run_task(self, fw_spec):

        wf_uuid = self["wf_uuid"]

        surfaces = ["kx_0", "kx_1", "ky_0", "ky_1", "kz_0", "kz_1"]

        d = {
            "wf_uuid": wf_uuid,
            "formula": fw_spec["formula"],
            "reduced_formula": fw_spec["reduced_formula"],
            "structure": fw_spec["structure"],
        }

        for surface in surfaces:
            if surface in fw_spec.keys():
                d[surface] = fw_spec[surface]

        d = jsanitize(d)

        # store the results
        db_file = env_chk(self.get("db_file"), fw_spec)
        if not db_file:
            with open("z2pack.json", "w") as f:
                f.write(json.dumps(d, default=DATETIME_HANDLER))
        else:
            db = VaspCalcDb.from_db_file(db_file, admin=True)
            db.collection = db.db["z2pack"]
            db.collection.insert_one(d)
            logger.info("Z2Pack surface calculation complete.")

        return FWAction()
예제 #16
0
    def run_task(self, fw_spec):

        tag = self["tag"]
        db_file = env_chk(self.get("db_file"), fw_spec)
        t_step = self.get("t_step", 10)
        t_min = self.get("t_min", 0)
        t_max = self.get("t_max", 1000)
        mesh = self.get("mesh", [20, 20, 20])
        eos = self.get("eos", "vinet")
        qha_type = self.get("qha_type", "debye_model")
        pressure = self.get("pressure", 0.0)
        gibbs_summary_dict = {}

        mmdb = MMVaspDb.from_db_file(db_file, admin=True)
        # get the optimized structure
        d = mmdb.collection.find_one({"task_label": "{} structure optimization".format(tag)})
        structure = Structure.from_dict(d["calcs_reversed"][-1]["output"]['structure'])
        gibbs_summary_dict["structure"] = structure.as_dict()

        # get the data(energy, volume, force constant) from the deformation runs
        docs = mmdb.collection.find({"task_label": {"$regex": "{} gibbs*".format(tag)},
                                     "formula_pretty": structure.composition.reduced_formula})
        energies = []
        volumes = []
        force_constants = []
        for d in docs:
            s = Structure.from_dict(d["calcs_reversed"][-1]["output"]['structure'])
            energies.append(d["calcs_reversed"][-1]["output"]['energy'])
            if qha_type not in ["debye_model"]:
                force_constants.append(d["calcs_reversed"][-1]["output"]['force_constants'])
            volumes.append(s.volume)
        gibbs_summary_dict["energies"] = energies
        gibbs_summary_dict["volumes"] = volumes
        if qha_type not in ["debye_model"]:
            gibbs_summary_dict["force_constants"] = force_constants

        G, T = None, None
        # use debye model
        if qha_type in ["debye_model"]:

            from atomate.tools.analysis import get_debye_model_gibbs

            G, T = get_debye_model_gibbs(energies, volumes, structure, t_min, t_step, t_max, eos,
                                         pressure)

        # use the phonopy interface
        else:

            from atomate.tools.analysis import get_phonopy_gibbs

            G, T = get_phonopy_gibbs(energies, volumes, force_constants, structure, t_min, t_step,
                                     t_max, mesh, eos, pressure)

        gibbs_summary_dict["G"] = G
        gibbs_summary_dict["T"] = T

        with open("gibbs.json", "w") as f:
            f.write(json.dumps(gibbs_summary_dict, default=DATETIME_HANDLER))
        logger.info("GIBBS FREE ENERGY CALCULATION COMPLETE")
예제 #17
0
    def run_task(self, fw_spec):

        from atomate.vasp.analysis.phonopy import get_phonopy_thermal_expansion

        tag = self["tag"]
        db_file = env_chk(self.get("db_file"), fw_spec)
        t_step = self.get("t_step", 10)
        t_min = self.get("t_min", 0)
        t_max = self.get("t_max", 1000)
        mesh = self.get("mesh", [20, 20, 20])
        eos = self.get("eos", "vinet")
        pressure = self.get("pressure", 0.0)
        summary_dict = {}

        mmdb = VaspCalcDb.from_db_file(db_file, admin=True)
        # get the optimized structure
        d = mmdb.collection.find_one(
            {"task_label": "{} structure optimization".format(tag)})
        structure = Structure.from_dict(
            d["calcs_reversed"][-1]["output"]['structure'])
        summary_dict["structure"] = structure.as_dict()
        summary_dict["formula_pretty"] = structure.composition.reduced_formula

        # get the data(energy, volume, force constant) from the deformation runs
        docs = mmdb.collection.find({
            "task_label": {
                "$regex": "{} thermal_expansion*".format(tag)
            },
            "formula_pretty":
            structure.composition.reduced_formula
        })
        energies = []
        volumes = []
        force_constants = []
        for d in docs:
            s = Structure.from_dict(
                d["calcs_reversed"][-1]["output"]['structure'])
            energies.append(d["calcs_reversed"][-1]["output"]['energy'])
            volumes.append(s.volume)
            force_constants.append(
                d["calcs_reversed"][-1]["output"]['force_constants'])
        summary_dict["energies"] = energies
        summary_dict["volumes"] = volumes
        summary_dict["force_constants"] = force_constants

        alpha, T = get_phonopy_thermal_expansion(energies, volumes,
                                                 force_constants, structure,
                                                 t_min, t_step, t_max, mesh,
                                                 eos, pressure)

        summary_dict["alpha"] = alpha
        summary_dict["T"] = T

        with open("thermal_expansion.json", "w") as f:
            f.write(json.dumps(summary_dict, default=DATETIME_HANDLER))

        # TODO: @matk86 - there needs to be a way to insert this into a database! And also
        # a builder to put it into materials collection... -computron
        logger.info("Thermal expansion coefficient calculation complete.")
예제 #18
0
    def run_task(self, fw_spec):
        cmd = env_chk(self["cmd"], fw_spec)
        if self.get("expand_vars", False):
            cmd = os.path.expandvars(cmd)

        logger.info("Running command: {}".format(cmd))
        return_code = subprocess.call(cmd, shell=True)
        logger.info("Command {} finished running with returncode: {}".format(cmd, return_code))
예제 #19
0
    def run_task(self, fw_spec):
        cmd = env_chk(self["vasp_cmd"], fw_spec)
        if self.get("expand_vars", False):
            cmd = os.path.expandvars(cmd)

        logger.info("Running command: {}".format(cmd))
        return_code = subprocess.call(cmd, shell=True)
        logger.info("Command {} finished running with returncode: {}".format(cmd, return_code))
예제 #20
0
    def run_task(self, fw_spec):
        additional_fields = self.get("additional_fields", {})

        # pass the additional_fields first to avoid overriding BoltztrapAnalyzer items
        d = additional_fields.copy()

        btrap_dir = os.path.join(os.getcwd(), "boltztrap")
        d["boltztrap_dir"] = btrap_dir

        bta = BoltztrapAnalyzer.from_files(btrap_dir)
        d.update(bta.as_dict())
        d["scissor"] = bta.intrans["scissor"]

        # trim the output
        for x in ['cond', 'seebeck', 'kappa', 'hall', 'mu_steps', 'mu_doping', 'carrier_conc']:
            del d[x]

        if not self.get("hall_doping"):
            del d["hall_doping"]

        bandstructure_dir = os.getcwd()
        d["bandstructure_dir"] = bandstructure_dir

        # add the structure
        v, o = get_vasprun_outcar(bandstructure_dir, parse_eigen=False, parse_dos=False)
        structure = v.final_structure
        d["structure"] = structure.as_dict()
        d["formula_pretty"] = structure.composition.reduced_formula
        d.update(get_meta_from_structure(structure))

        # add the spacegroup
        sg = SpacegroupAnalyzer(Structure.from_dict(d["structure"]), 0.1)
        d["spacegroup"] = {"symbol": sg.get_space_group_symbol(),
                           "number": sg.get_space_group_number(),
                           "point_group": sg.get_point_group_symbol(),
                           "source": "spglib",
                           "crystal_system": sg.get_crystal_system(),
                           "hall": sg.get_hall()}

        d["created_at"] = datetime.utcnow()

        db_file = env_chk(self.get('db_file'), fw_spec)

        if not db_file:
            del d["dos"]
            with open(os.path.join(btrap_dir, "boltztrap.json"), "w") as f:
                f.write(json.dumps(d, default=DATETIME_HANDLER))
        else:
            mmdb = VaspCalcDb.from_db_file(db_file, admin=True)

            # dos gets inserted into GridFS
            dos = json.dumps(d["dos"], cls=MontyEncoder)
            fsid, compression = mmdb.insert_gridfs(dos, collection="dos_boltztrap_fs",
                                                   compress=True)
            d["dos_boltztrap_fs_id"] = fsid
            del d["dos"]

            mmdb.db.boltztrap.insert(d)
예제 #21
0
    def run_task(self, fw_spec):
        additional_fields = self.get("additional_fields", {})

        # pass the additional_fields first to avoid overriding BoltztrapAnalyzer items
        d = additional_fields.copy()

        btrap_dir = os.path.join(os.getcwd(), "boltztrap")
        d["boltztrap_dir"] = btrap_dir

        bta = BoltztrapAnalyzer.from_files(btrap_dir)
        d.update(bta.as_dict())
        d["scissor"] = bta.intrans["scissor"]

        # trim the output
        for x in ['cond', 'seebeck', 'kappa', 'hall', 'mu_steps', 'mu_doping', 'carrier_conc']:
            del d[x]

        if not self.get("hall_doping"):
            del d["hall_doping"]

        bandstructure_dir = os.getcwd()
        d["bandstructure_dir"] = bandstructure_dir

        # add the structure
        v, o = get_vasprun_outcar(bandstructure_dir, parse_eigen=False, parse_dos=False)
        structure = v.final_structure
        d["structure"] = structure.as_dict()
        d["formula_pretty"] = structure.composition.reduced_formula
        d.update(get_meta_from_structure(structure))

        # add the spacegroup
        sg = SpacegroupAnalyzer(Structure.from_dict(d["structure"]), 0.1)
        d["spacegroup"] = {"symbol": sg.get_space_group_symbol(),
                           "number": sg.get_space_group_number(),
                           "point_group": sg.get_point_group_symbol(),
                           "source": "spglib",
                           "crystal_system": sg.get_crystal_system(),
                           "hall": sg.get_hall()}

        d["created_at"] = datetime.utcnow()

        db_file = env_chk(self.get('db_file'), fw_spec)

        if not db_file:
            del d["dos"]
            with open(os.path.join(btrap_dir, "boltztrap.json"), "w") as f:
                f.write(json.dumps(d, default=DATETIME_HANDLER))
        else:
            mmdb = VaspCalcDb.from_db_file(db_file, admin=True)

            # dos gets inserted into GridFS
            dos = json.dumps(d["dos"], cls=MontyEncoder)
            fsid, compression = mmdb.insert_gridfs(dos, collection="dos_boltztrap_fs",
                                                   compress=True)
            d["dos_boltztrap_fs_id"] = fsid
            del d["dos"]

            mmdb.db.boltztrap.insert(d)
예제 #22
0
    def run_task(self, fw_spec):
        # get the directory that contains the QChem dir to parse
        calc_dir = os.getcwd()
        if "calc_dir" in self:
            calc_dir = self["calc_dir"]
        elif self.get("calc_loc"):
            calc_dir = get_calc_loc(self["calc_loc"],
                                    fw_spec["calc_locs"])["path"]
        input_file = self.get("input_file", "mol.qin")
        output_file = self.get("output_file", "mol.qout")
        multirun = self.get("multirun", False)

        # parse the QChem directory
        logger.info("PARSING DIRECTORY: {}".format(calc_dir))

        additional_fields = self.get("additional_fields", [])

        drone = QChemDrone(additional_fields=additional_fields)

        # assimilate (i.e., parse)
        task_doc = drone.assimilate(
            path=calc_dir,
            input_file=input_file,
            output_file=output_file,
            multirun=multirun)

        if "tags" in fw_spec:
            task_doc.update({"tags": fw_spec["tags"]})

        # Check for additional keys to set based on the fw_spec
        if self.get("fw_spec_field"):
            task_doc.update({self.get("fw_spec_field"): fw_spec.get(self.get("fw_spec_field"))})

        # Update fw_spec with final/optimized structure
        update_spec = {}
        if task_doc.get("output").get("optimized_molecule"):
            update_spec["prev_calc_molecule"] = task_doc["output"]["optimized_molecule"]
            update_spec["prev_calc_mulliken"] = task_doc["output"]["mulliken"]
            if "RESP" in task_doc["output"]:
                update_spec["prev_calc_resp"] = task_doc["output"]["RESP"]
            elif "ESP" in task_doc["output"]:
                update_spec["prev_calc_esp"] = task_doc["output"]["ESP"]

        # get the database connection
        db_file = env_chk(self.get("db_file"), fw_spec)

        # db insertion or taskdoc dump
        if not db_file:
            with open(os.path.join(calc_dir, "task.json"), "w") as f:
                f.write(json.dumps(task_doc, default=DATETIME_HANDLER))
        else:
            mmdb = QChemCalcDb.from_db_file(db_file, admin=True)
            t_id = mmdb.insert(task_doc)
            logger.info("Finished parsing with task_id: {}".format(t_id))

        return FWAction(
            stored_data={"task_id": task_doc.get("task_id", None)},
            update_spec=update_spec)
예제 #23
0
    def run_task(self, fw_spec):
        nm_norms = np.array(fw_spec["normalmodes"]["norms"])
        nm_eigenvals = np.array(fw_spec["normalmodes"]["eigenvals"])
        structure = fw_spec["normalmodes"]["structure"]
        masses = np.array([site.specie.data['Atomic mass'] for site in structure])
        # the eigenvectors read from vasprun.xml are not divided by sqrt(M_i)
        nm_norms = nm_norms / np.sqrt(masses)

        # To get the actual eigenvals, the values read from vasprun.xml must be multiplied by -1.
        # frequency_i = sqrt(-e_i)
        # To convert the frequency to THZ: multiply sqrt(-e_i) by 15.633
        # To convert the frequency to cm^-1: multiply sqrt(-e_i) by 82.995
        nm_frequencies = np.sqrt(np.abs(nm_eigenvals)) * 82.995  # cm^-1

        d = {"structure": structure.as_dict(),
             "normalmodes": {"eigenvals": fw_spec["normalmodes"]["eigenvals"],
                             "eigenvecs": fw_spec["normalmodes"]["eigenvecs"]
                             },
             "frequencies": nm_frequencies.tolist()
             }

        mode_disps = fw_spec["raman_epsilon"].keys()
        # store the displacement & epsilon for each mode in a dictionary
        modes_eps_dict = defaultdict(list)
        for md in mode_disps:
            modes_eps_dict[fw_spec["raman_epsilon"][md]["mode"]].append(
                [fw_spec["raman_epsilon"][md]["displacement"],
                 fw_spec["raman_epsilon"][md]["epsilon"]])

        # raman tensor = finite difference derivative of epsilon wrt displacement.
        raman_tensor_dict = {}
        scale = np.sqrt(structure.volume/2.0) / 4.0 / np.pi
        for k, v in modes_eps_dict.items():
            raman_tensor = (np.array(v[0][1]) - np.array(v[1][1])) / (v[0][0] - v[1][0])
            # frequency in cm^-1
            omega = nm_frequencies[k]
            if nm_eigenvals[k] > 0:
                logger.warn("Mode: {} is UNSTABLE. Freq(cm^-1) = {}".format(k, -omega))
            raman_tensor = scale * raman_tensor * np.sum(nm_norms[k]) / np.sqrt(omega)
            raman_tensor_dict[str(k)] = raman_tensor.tolist()

        d["raman_tensor"] = raman_tensor_dict
        d["state"] = "successful"

        # store the results
        db_file = env_chk(self.get("db_file"), fw_spec)
        if not db_file:
            with open("raman.json", "w") as f:
                f.write(json.dumps(d, default=DATETIME_HANDLER))
        else:
            db = MMVaspDb.from_db_file(db_file, admin=True)
            db.collection = db.db["raman"]
            db.collection.insert_one(d)
            logger.info("RAMAN TENSOR CALCULATION COMPLETE")
        logger.info("The frequencies are in the units of cm^-1")
        logger.info("To convert the frequency to THz: multiply by 0.1884")
        return FWAction()
예제 #24
0
파일: ftasks.py 프로젝트: ghifi37/dfttk
 def run_task(self, fw_spec):
     from atomate.vasp.database import VaspCalcDb
     db_file = env_chk(self["db_file"], fw_spec)
     vasp_db = VaspCalcDb.from_db_file(db_file, admin=True)
     content = {}
     content["metadata"] = self.get('metadata', {})
     content["path"] = fw_spec["calc_locs"][-1]["path"]
     content["Pos_Shape_relax"] = self.get('Pos_Shape_relax')
     vasp_db.db["relax"].insert_one(content)
예제 #25
0
    def run_task(self, fw_spec):
        cmd = env_chk(self["cmd"], fw_spec)
        if self.get("expand_vars", False):
            cmd = os.path.expandvars(cmd)

        logger.info(f"Running command: {cmd}")
        return_code = subprocess.call(cmd, shell=True)
        logger.info(
            f"Command {cmd} finished running with returncode: {return_code}")
예제 #26
0
    def run_task(self, fw_spec):
        lobster_cmd = env_chk(self.get("lobster_cmd"), fw_spec)
        gzip_output = self.get("gzip_output", True)
        gzip_WAVECAR = self.get("gzip_WAVECAR", False)
        if gzip_WAVECAR:
            add_files_to_gzip = VASP_OUTPUT_FILES
        else:
            add_files_to_gzip = [f for f in VASP_OUTPUT_FILES if f not in ["WAVECAR"]]

        handler_groups = {"default": [], "no_handler": []}
        validator_groups = {
            "default": [
                LobsterFilesValidator(),
                EnoughBandsValidator(output_filename="lobsterout"),
            ],
            "strict": [
                ChargeSpillingValidator(output_filename="lobsterout"),
                LobsterFilesValidator(),
                EnoughBandsValidator(output_filename="lobsterout"),
            ],
            "no_validator": [],
        }

        handler_group = self.get("handler_group", "default")
        if isinstance(handler_group, str):
            handlers = handler_groups[handler_group]
        else:
            handlers = handler_group

        validator_group = self.get("validator_group", "default")
        if isinstance(validator_group, str):
            validators = validator_groups[validator_group]
        else:
            validators = handler_group

        # LobsterJob gzips output files, Custodian would gzip all output files (even slurm)
        jobs = [
            LobsterJob(
                lobster_cmd=lobster_cmd,
                output_file="lobster.out",
                stderr_file="std_err_lobster.txt",
                gzipped=gzip_output,
                add_files_to_gzip=add_files_to_gzip,
            )
        ]
        c = Custodian(
            handlers=handlers,
            jobs=jobs,
            validators=validators,
            gzipped_output=False,
            max_errors=5,
        )
        c.run()

        if os.path.exists(zpath("custodian.json")):
            stored_custodian_data = {"custodian": loadfn(zpath("custodian.json"))}
            return FWAction(stored_data=stored_custodian_data)
예제 #27
0
파일: ftasks.py 프로젝트: kcbhamu/dfttk
 def run_task(self, fw_spec):
     db_file = env_chk(self["db_file"], fw_spec)
     vasp_db = VaspCalcDb.from_db_file(db_file, admin=True)
     content = {}
     content["metadata"] = self.get('metadata', {})
     content["path"] = fw_spec["calc_locs"][-1]["path"]
     content["run_isif2"] = self.get('run_isif2')
     content["pass_isif4"] = self.get('pass_isif4')
     vasp_db.db["relax"].insert_one(content)
예제 #28
0
    def run_task(self, fw_spec):
        # get the directory that contains the VASP dir to parse
        calc_dir = os.getcwd()
        if "calc_dir" in self:
            calc_dir = self["calc_dir"]
        elif self.get("calc_loc"):
            calc_dir = get_calc_loc(self["calc_loc"],
                                    fw_spec["calc_locs"])["path"]

        # parse the VASP directory
        logger.info("PARSING DIRECTORY: {}".format(calc_dir))

        drone = VaspDrone(additional_fields=self.get("additional_fields"),
                          parse_dos=self.get("parse_dos", False),
                          bandstructure_mode=self.get("bandstructure_mode",
                                                      False))

        # assimilate (i.e., parse)
        task_doc = drone.assimilate(calc_dir)

        # Check for additional keys to set based on the fw_spec
        if self.get("fw_spec_field"):
            task_doc.update(fw_spec[self.get("fw_spec_field")])

        # get the database connection
        db_file = env_chk(self.get('db_file'), fw_spec)

        # db insertion or taskdoc dump
        if not db_file:
            with open("task.json", "w") as f:
                f.write(json.dumps(task_doc, default=DATETIME_HANDLER))
        else:
            mmdb = VaspMDCalcDb.from_db_file(db_file, admin=True)

            # prevent duplicate insertion
            mmdb.db.tasks.find_one_and_delete({
                'formula_pretty':
                task_doc['formula_pretty'],
                'task_label':
                task_doc['task_label']
            })

            t_id = mmdb.insert_task(
                task_doc,
                parse_dos=self.get("parse_dos", False),
                parse_bs=bool(self.get("bandstructure_mode", False)),
                md_structures=self.get("md_structures", True))

            logger.info("Finished parsing with task_id: {}".format(t_id))

        if self.get("defuse_unsuccessful", True):
            defuse_children = (task_doc["state"] != "successful")
        else:
            defuse_children = False

        return FWAction(stored_data={"task_id": task_doc.get("task_id", None)},
                        defuse_children=defuse_children)
예제 #29
0
    def run_task(self, fw_spec):

        from pymatgen.analysis.eos import EOS

        eos = self.get("eos", "vinet")
        tag = self["tag"]
        db_file = env_chk(self.get("db_file"), fw_spec)
        summary_dict = {"eos": eos}
        to_db = self.get("to_db", True)

        # collect and store task_id of all related tasks to make unique links with "tasks" collection
        all_task_ids = []

        mmdb = VaspCalcDb.from_db_file(db_file, admin=True)
        # get the optimized structure
        d = mmdb.collection.find_one({"task_label": "{} structure optimization".format(tag)})
        all_task_ids.append(d["task_id"])
        structure = Structure.from_dict(d["calcs_reversed"][-1]["output"]['structure'])
        summary_dict["structure"] = structure.as_dict()
        summary_dict["formula_pretty"] = structure.composition.reduced_formula

        # get the data(energy, volume, force constant) from the deformation runs
        docs = mmdb.collection.find({"task_label": {"$regex": "{} bulk_modulus*".format(tag)},
                                     "formula_pretty": structure.composition.reduced_formula})
        energies = []
        volumes = []
        for d in docs:
            s = Structure.from_dict(d["calcs_reversed"][-1]["output"]['structure'])
            energies.append(d["calcs_reversed"][-1]["output"]['energy'])
            volumes.append(s.volume)
            all_task_ids.append(d["task_id"])
        summary_dict["energies"] = energies
        summary_dict["volumes"] = volumes
        summary_dict["all_task_ids"] = all_task_ids

        # fit the equation of state
        eos = EOS(eos)
        eos_fit = eos.fit(volumes, energies)
        summary_dict["bulk_modulus"] = eos_fit.b0_GPa

        # TODO: find a better way for passing tags of the entire workflow to db - albalu
        if fw_spec.get("tags", None):
            summary_dict["tags"] = fw_spec["tags"]
        summary_dict["results"] = dict(eos_fit.results)
        summary_dict["created_at"] = datetime.utcnow()

        # db_file itself is required but the user can choose to pass the results to db or not
        if to_db:
            mmdb.collection = mmdb.db["eos"]
            mmdb.collection.insert_one(summary_dict)
        else:
            with open("bulk_modulus.json", "w") as f:
                f.write(json.dumps(summary_dict, default=DATETIME_HANDLER))

        # TODO: @matk86 - there needs to be a builder to put it into materials collection... -computron
        logger.info("Bulk modulus calculation complete.")
예제 #30
0
    def run_task(self, fw_spec):

        from pymatgen.analysis.eos import EOS

        eos = self.get("eos", "vinet")
        tag = self["tag"]
        db_file = env_chk(self.get("db_file"), fw_spec)
        summary_dict = {"eos": eos}
        to_db = self.get("to_db", True)

        # collect and store task_id of all related tasks to make unique links with "tasks" collection
        all_task_ids = []

        mmdb = VaspCalcDb.from_db_file(db_file, admin=True)
        # get the optimized structure
        d = mmdb.collection.find_one({"task_label": "{} structure optimization".format(tag)})
        all_task_ids.append(d["task_id"])
        structure = Structure.from_dict(d["calcs_reversed"][-1]["output"]['structure'])
        summary_dict["structure"] = structure.as_dict()
        summary_dict["formula_pretty"] = structure.composition.reduced_formula

        # get the data(energy, volume, force constant) from the deformation runs
        docs = mmdb.collection.find({"task_label": {"$regex": "{} bulk_modulus*".format(tag)},
                                     "formula_pretty": structure.composition.reduced_formula})
        energies = []
        volumes = []
        for d in docs:
            s = Structure.from_dict(d["calcs_reversed"][-1]["output"]['structure'])
            energies.append(d["calcs_reversed"][-1]["output"]['energy'])
            volumes.append(s.volume)
            all_task_ids.append(d["task_id"])
        summary_dict["energies"] = energies
        summary_dict["volumes"] = volumes
        summary_dict["all_task_ids"] = all_task_ids

        # fit the equation of state
        eos = EOS(eos)
        eos_fit = eos.fit(volumes, energies)
        summary_dict["bulk_modulus"] = eos_fit.b0_GPa

        # TODO: find a better way for passing tags of the entire workflow to db - albalu
        if fw_spec.get("tags", None):
            summary_dict["tags"] = fw_spec["tags"]
        summary_dict["results"] = dict(eos_fit.results)
        summary_dict["created_at"] = datetime.utcnow()

        # db_file itself is required but the user can choose to pass the results to db or not
        if to_db:
            mmdb.collection = mmdb.db["eos"]
            mmdb.collection.insert_one(summary_dict)
        else:
            with open("bulk_modulus.json", "w") as f:
                f.write(json.dumps(summary_dict, default=DATETIME_HANDLER))

        # TODO: @matk86 - there needs to be a builder to put it into materials collection... -computron
        logger.info("Bulk modulus calculation complete.")
예제 #31
0
    def run_task(self, fw_spec):
        # get the database connection to the approx_neb collection
        db_file = env_chk(self["db_file"], fw_spec)
        mmdb = VaspCalcDb.from_db_file(db_file, admin=True)
        mmdb.collection = mmdb.db["approx_neb"]

        # get approx_neb doc specified by wf_uuid
        wf_uuid = self.get("approx_neb_wf_uuid")
        approx_neb_doc = mmdb.collection.find_one({"wf_uuid": wf_uuid}, {
            "pathfinder": 1,
            "images": 1
        })

        # get structures stored in "pathfinder" field
        pathfinder_key = self["pathfinder_key"]
        structure_docs = approx_neb_doc["pathfinder"][pathfinder_key]["images"]
        fixed_index = approx_neb_doc["pathfinder"][pathfinder_key][
            "relax_site_indexes"]

        # apply selective dynamics to get images (list of pymatgen structures)
        fixed_specie = self["mobile_specie"]
        scheme = self["selective_dynamics_scheme"]
        images = self.get_images_list(structure_docs, scheme, fixed_index,
                                      fixed_specie)

        # assemble images output to be stored in approx_neb collection
        images_output = []
        for n, image in enumerate(images):
            images_output.append({
                "index": n,
                "input_structure": image.as_dict(),
                "selective_dynamics_scheme": scheme,
            })

        # store images output in approx_neb collection
        if "images" not in approx_neb_doc.keys():
            images_subdoc = {}
        else:
            images_subdoc = approx_neb_doc["images"]
        images_subdoc.update({pathfinder_key: images_output})

        mmdb.collection.update_one(
            {"wf_uuid": wf_uuid},
            {
                "$set": {
                    "images": images_subdoc,
                    "last_updated": datetime.utcnow()
                }
            },
        )

        return FWAction(stored_data={
            "wf_uuid": wf_uuid,
            "images_output": images_output
        })
예제 #32
0
    def run_task(self, fw_spec):
        self.xml = self.get("xml", None)
        self.kmesh_factor = self.get("kmesh_factor", 1)
        with open("KPOINTS", "r") as f:
            kpoints = f.readlines()
        with open("INCAR", "r") as f:
            incar = f.readlines()
        if self.kmesh_factor > 1:
            shutil.copyfile("INCAR", "INCAR.nscf")
            shutil.copyfile("KPOINTS", "KPOINTS.nscf")
            shutil.copyfile("INCAR.Static", "INCAR")
            shutil.copyfile("KPOINTS.Static", "KPOINTS")
        if self.xml is not None:
            with open(self.xml, 'rb') as f:
                xmldata = f.read()
            binxmldata = gzip.compress(bytes(xmldata))
            with open("DOSCAR", 'rb') as f:
                doscar = f.read()
            bindoscar = gzip.compress(bytes(doscar))
            #with gzip.open("zen.txt.gz", "wb") as f:
            #f.write(bindata)
            self.db_file = env_chk(self.get("db_file"), fw_spec)
            self.vasp_db = VaspCalcDb.from_db_file(self.db_file, admin=True)

            structure = self.get('structure', Structure.from_file('POSCAR'))

            xml_data = {
                'metadata': {
                    'tag': self.get('tag')
                },
                'hostname':
                socket.gethostname(),
                self.xml.replace(".", "_") + "_gz":
                bson.Binary(pickle.dumps(binxmldata)),
                'DOSCAR_gz':
                bson.Binary(pickle.dumps(bindoscar)),
                'volume':
                structure.volume,
                'INCAR':
                incar,
                'KPOINTS':
                kpoints,
                'VASP':
                get_code_version(),
                'last_updated':
                datetime.datetime.utcnow(),
                'US_eastern_time':
                datetime.datetime.now(pytz.timezone('US/Eastern')),
                'structure':
                structure.as_dict(),
                'formula_pretty':
                structure.composition.reduced_formula
            }
            self.vasp_db.db['xmlgz'].insert_one(xml_data)
예제 #33
0
    def run_task(self, fw_spec):
        cmd = self.get("qchem_cmd")
        scratch_dir = env_chk(self.get("scratch_dir"), fw_spec)
        if scratch_dir == None:
            scratch_dir = "/dev/shm/qcscratch/"
        os.putenv("QCSCRATCH", scratch_dir)

        logger.info("Running command: {}".format(cmd))
        return_code = subprocess.call(cmd, shell=True)
        logger.info("Command {} finished running with return code: {}".format(
            cmd, return_code))
예제 #34
0
    def run_task(self, fw_spec):
        # get the directory that contains the VASP dir to parse
        calc_dir = os.getcwd()
        if "calc_dir" in self:
            calc_dir = self["calc_dir"]
        elif self.get("calc_loc"):
            calc_dir = get_calc_loc(self["calc_loc"],
                                    fw_spec["calc_locs"])["path"]

        # parse the VASP directory
        logger.info("PARSING DIRECTORY: {}".format(calc_dir))

        drone = VaspDrone(additional_fields=self.get("additional_fields"),
                          parse_dos=self.get("parse_dos", False),
                          compress_dos=1,
                          bandstructure_mode=self.get("bandstructure_mode",
                                                      False),
                          compress_bs=1)

        # assimilate (i.e., parse)
        task_doc = drone.assimilate(calc_dir)

        # Check for additional keys to set based on the fw_spec
        if self.get("fw_spec_field"):
            task_doc.update(fw_spec[self.get("fw_spec_field")])

        # get the database connection
        db_file = env_chk(self.get('db_file'), fw_spec)

        # db insertion or taskdoc dump
        if not db_file:
            with open("task.json", "w") as f:
                f.write(json.dumps(task_doc, default=DATETIME_HANDLER))
        else:
            mmdb = VaspCalcDb.from_db_file(db_file, admin=True)
            t_id = mmdb.insert_task(task_doc,
                                    parse_dos=self.get("parse_dos", False),
                                    parse_bs=bool(
                                        self.get("bandstructure_mode", False)))
            logger.info("Finished parsing with task_id: {}".format(t_id))

        defuse_children = False
        if task_doc["state"] != "successful":
            if self.get("defuse_unsuccessful", DEFUSE_UNSUCCESSFUL) is True:
                defuse_children = True
            elif self.get("defuse_unsuccessful", DEFUSE_UNSUCCESSFUL).lower() \
                    == "fizzle":
                raise RuntimeError(
                    "VaspToDb indicates that job is not successful "
                    "(perhaps your job did not converge within the "
                    "limit of electronic/ionic iterations)!")

        return FWAction(stored_data={"task_id": task_doc.get("task_id", None)},
                        defuse_children=defuse_children)
예제 #35
0
    def run_task(self, fw_spec):

        incar_name = self.get("input_filename", "INCAR")
        incar = Incar.from_file(incar_name)

        incar_update = env_chk(self.get('incar_update'), fw_spec)
        incar_multiply = env_chk(self.get('incar_multiply'), fw_spec)
        incar_dictmod = env_chk(self.get('incar_dictmod'), fw_spec)

        if incar_update:
            incar.update(incar_update)

        if incar_multiply:
            for k in incar_multiply:
                incar[k] = incar[k] * incar_multiply[k]

        if incar_dictmod:
            apply_mod(incar_dictmod, incar)

        incar.write_file(self.get("output_filename", "INCAR"))
예제 #36
0
    def run_task(self, fw_spec):
        nm_eigenvecs = np.array(fw_spec["normalmodes"]["eigenvecs"])
        nm_eigenvals = np.array(fw_spec["normalmodes"]["eigenvals"])
        nm_norms = np.linalg.norm(nm_eigenvecs, axis=2)
        structure = fw_spec["normalmodes"]["structure"]
        masses = np.array([site.specie.data['Atomic mass'] for site in structure])
        nm_norms = nm_norms / np.sqrt(masses)  # eigenvectors in vasprun.xml are not divided by sqrt(M_i)
        # To get the actual eigenvals, the values read from vasprun.xml must be multiplied by -1.
        # frequency_i = sqrt(-e_i)
        # To convert the frequency to THZ: multiply sqrt(-e_i) by 15.633
        # To convert the frequency to cm^-1: multiply sqrt(-e_i) by 82.995
        nm_frequencies = np.sqrt(np.abs(nm_eigenvals)) * 82.995  # cm^-1

        d = {"structure": structure.as_dict(),
             "formula_pretty": structure.composition.reduced_formula,
             "normalmodes": {"eigenvals": fw_spec["normalmodes"]["eigenvals"],
                             "eigenvecs": fw_spec["normalmodes"]["eigenvecs"]
                             },
             "frequencies": nm_frequencies.tolist()}

        # store the displacement & epsilon for each mode in a dictionary
        mode_disps = fw_spec["raman_epsilon"].keys()
        modes_eps_dict = defaultdict(list)
        for md in mode_disps:
            modes_eps_dict[fw_spec["raman_epsilon"][md]["mode"]].append(
                [fw_spec["raman_epsilon"][md]["displacement"],
                 fw_spec["raman_epsilon"][md]["epsilon"]])

        # raman tensor = finite difference derivative of epsilon wrt displacement.
        raman_tensor_dict = {}
        scale = np.sqrt(structure.volume/2.0) / 4.0 / np.pi
        for k, v in modes_eps_dict.items():
            raman_tensor = (np.array(v[0][1]) - np.array(v[1][1])) / (v[0][0] - v[1][0])
            # frequency in cm^-1
            omega = nm_frequencies[k]
            if nm_eigenvals[k] > 0:
                logger.warn("Mode: {} is UNSTABLE. Freq(cm^-1) = {}".format(k, -omega))
            raman_tensor = scale * raman_tensor * np.sum(nm_norms[k]) / np.sqrt(omega)
            raman_tensor_dict[str(k)] = raman_tensor.tolist()

        d["raman_tensor"] = raman_tensor_dict
        d["state"] = "successful"

        # store the results
        db_file = env_chk(self.get("db_file"), fw_spec)
        if not db_file:
            with open("raman.json", "w") as f:
                f.write(json.dumps(d, default=DATETIME_HANDLER))
        else:
            db = VaspCalcDb.from_db_file(db_file, admin=True)
            db.collection = db.db["raman"]
            db.collection.insert_one(d)
            logger.info("Raman tensor calculation complete.")
        return FWAction()
예제 #37
0
    def test_env_chk(self):
        fw_spec_valid = {"_fw_env": {"hello": "there"}}
        fw_spec_invalid = {}

        self.assertEqual(env_chk("hello", fw_spec_valid), "hello")
        self.assertEqual(env_chk([1, 2, 3], fw_spec_valid), [1, 2, 3])
        self.assertEqual(env_chk(defaultdict(int), fw_spec_valid), defaultdict(int))
        self.assertEqual(env_chk(">>hello<<", fw_spec_valid), "there")
        self.assertRaises(KeyError, env_chk, ">>hello1<<", fw_spec_valid)
        self.assertEqual(env_chk(">>hello1<<", fw_spec_valid, False), None)

        self.assertRaises(KeyError, env_chk, ">>hello<<", fw_spec_invalid)
        self.assertEqual(env_chk(">>hello<<", fw_spec_invalid, False), None)
        self.assertEqual(env_chk(">>hello<<", fw_spec_invalid, False, "fallback"), "fallback")

        self.assertEqual(env_chk(None, fw_spec_valid, False), None)
        self.assertEqual(env_chk(None, fw_spec_valid, False, "fallback"), "fallback")
예제 #38
0
    def run_task(self, fw_spec):

        kpoints_name = self.get("input_filename", "KPOINTS")
        kpoints = Kpoints.from_file(kpoints_name)

        kpoints_update = env_chk(self.get("kpoints_update"), fw_spec)

        if kpoints_update:
            for key, value in kpoints_update.items():
                setattr(kpoints, key, value)

        kpoints.write_file(self.get("output_filename", "KPOINTS"))
예제 #39
0
파일: exchange.py 프로젝트: samblau/atomate
    def run_task(self, fw_spec):

        db_file = env_chk(self["db_file"], fw_spec)
        wf_uuid = self["wf_uuid"]

        # Get Heisenberg models from db
        mmdb = VaspCalcDb.from_db_file(db_file, admin=True)
        mmdb.collection = mmdb.db["exchange"]

        # Get documents
        docs = list(
            mmdb.collection.find({"wf_meta.wf_uuid": wf_uuid},
                                 ["heisenberg_model", "nn_cutoff"]))

        hmodels = [
            HeisenbergModel.from_dict(d["heisenberg_model"]) for d in docs
        ]
        cutoffs = [d["nn_cutoff"] for d in docs]

        # Take <J> only if no static calcs
        if self["average"]:
            hmodel = hmodels[0].as_dict()
        else:
            # Check for J_ij convergence
            converged_list = []
            for cutoff, hmodel in zip(cutoffs, hmodels):
                ex_params = hmodel.ex_params
                converged = True

                # J_ij exists
                if len(ex_params) > 1:
                    E0 = abs(ex_params["E0"])
                    for k, v in ex_params.items():
                        if k != "E0":
                            if abs(v) > E0:  # |J_ij| > |E0| unphysical!
                                converged = False
                else:  # Only <J> was computed
                    converged = False

                if converged:
                    converged_list.append(hmodel.as_dict())

            # If multiple Heisenberg Models converged, take the maximal cutoff
            if len(converged_list) > 0:
                hmodel = converged_list[-1]  # Largest cutoff
            else:  # No converged models
                hmodel = None

        # Update FW spec with converged hmodel or None
        update_spec = {"converged_heisenberg_model": hmodel}

        return FWAction(update_spec=update_spec)
예제 #40
0
    def run_task(self, fw_spec):

        # Get optimized structure
        # TODO: will this find the correct path if the workflow is rerun from the start?
        optimize_loc = fw_spec["calc_locs"][0]["path"]
        logger.info("PARSING INITIAL OPTIMIZATION DIRECTORY: {}".format(optimize_loc))
        drone = VaspDrone()
        optimize_doc = drone.assimilate(optimize_loc)
        opt_struct = Structure.from_dict(optimize_doc["calcs_reversed"][0]["output"]["structure"])

        d = {"analysis": {}, "deformation_tasks": fw_spec["deformation_tasks"],
             "initial_structure": self['structure'].as_dict(),
             "optimized_structure": opt_struct.as_dict()}
        if fw_spec.get("tags",None):
            d["tags"] = fw_spec["tags"]
        dtypes = fw_spec["deformation_tasks"].keys()
        defos = [fw_spec["deformation_tasks"][dtype]["deformation_matrix"]
                 for dtype in dtypes]
        stresses = [fw_spec["deformation_tasks"][dtype]["stress"] for dtype in dtypes]
        stress_dict = {IndependentStrain(defo) : Stress(stress) for defo, stress
                       in zip(defos, stresses)}

        logger.info("ANALYZING STRESS/STRAIN DATA")
        # DETERMINE IF WE HAVE 6 "UNIQUE" deformations
        if len(set([de[:3] for de in dtypes])) == 6:
            # Perform Elastic tensor fitting and analysis
            result = ElasticTensor.from_stress_dict(stress_dict)
            d["elastic_tensor"] = result.voigt.tolist()
            kg_average = result.kg_average
            d.update({"K_Voigt": kg_average[0], "G_Voigt": kg_average[1],
                      "K_Reuss": kg_average[2], "G_Reuss": kg_average[3],
                      "K_Voigt_Reuss_Hill": kg_average[4],
                      "G_Voigt_Reuss_Hill": kg_average[5]})
            d["universal_anisotropy"] = result.universal_anisotropy
            d["homogeneous_poisson"] = result.homogeneous_poisson

        else:
            raise ValueError("Fewer than 6 unique deformations")

        d["state"] = "successful"

        # Save analysis results in json or db
        db_file = env_chk(self.get('db_file'), fw_spec)
        if not db_file:
            with open("elasticity.json", "w") as f:
                f.write(json.dumps(d, default=DATETIME_HANDLER))
        else:
            db = MMVaspDb.from_db_file(db_file, admin=True)
            db.collection = db.db["elasticity"]
            db.collection.insert_one(d)
            logger.info("ELASTIC ANALYSIS COMPLETE")
        return FWAction()
예제 #41
0
파일: ftasks.py 프로젝트: kcbhamu/dfttk
    def run_task(self, fw_spec):

        handler_groups = {
            "default": [VaspErrorHandler(), MeshSymmetryErrorHandler(), UnconvergedErrorHandler(),
                        NonConvergingErrorHandler(),PotimErrorHandler(),
                        PositiveEnergyErrorHandler(), FrozenJobErrorHandler(), StdErrHandler(),
                        DriftErrorHandler()],
            "strict": [VaspErrorHandler(), MeshSymmetryErrorHandler(), UnconvergedErrorHandler(),
                       NonConvergingErrorHandler(),PotimErrorHandler(),
                       PositiveEnergyErrorHandler(), FrozenJobErrorHandler(),
                       StdErrHandler(), AliasingErrorHandler(), DriftErrorHandler()],
            "md": [VaspErrorHandler(), NonConvergingErrorHandler()],
            "no_handler": []
            }

        vasp_cmd = env_chk(self["vasp_cmd"], fw_spec)

        if isinstance(vasp_cmd, six.string_types):
            vasp_cmd = os.path.expandvars(vasp_cmd)
            vasp_cmd = shlex.split(vasp_cmd)

        # initialize variables
        scratch_dir = env_chk(self.get("scratch_dir"), fw_spec)
        gzip_output = self.get("gzip_output", True)
        max_errors = self.get("max_errors", 5)
        auto_npar = env_chk(self.get("auto_npar"), fw_spec, strict=False, default=False)
        gamma_vasp_cmd = env_chk(self.get("gamma_vasp_cmd"), fw_spec, strict=False, default=None)

        jobs = [VaspJob(vasp_cmd, auto_npar=auto_npar, gamma_vasp_cmd=gamma_vasp_cmd)]

        # construct handlers
        handlers = handler_groups[self.get("handler_group", "default")]

        validators = []

        c = Custodian(handlers, jobs, validators=validators, max_errors=max_errors,
                      scratch_dir=scratch_dir, gzipped_output=gzip_output)

        c.run()
예제 #42
0
    def run_task(self, fw_spec):

        mpr = MPRester(env_chk(self.get("MAPI_KEY"), fw_spec))
        vasprun, outcar = get_vasprun_outcar(self.get("calc_dir", "."), parse_dos=False, parse_eigen=False)

        my_entry = vasprun.get_computed_entry(inc_structure=False)
        stored_data = mpr.get_stability([my_entry])[0]

        if stored_data["e_above_hull"] > self.get("ehull_cutoff", 0.05):
            return FWAction(stored_data=stored_data, exit=True, defuse_workflow=True)

        else:
            return FWAction(stored_data=stored_data)
예제 #43
0
    def run_task(self, fw_spec):

        ref_file = self.get("json_filename", "task.json")
        calc_dir = self.get("calc_dir", os.getcwd())
        with open(os.path.join(calc_dir, ref_file), "r") as fp:
            task_doc = json.load(fp)

        db_file = env_chk(self.get('db_file'), fw_spec)
        if not db_file:
            with open("task.json", "w") as f:
                f.write(json.dumps(task_doc, default=DATETIME_HANDLER))
        else:
            mmdb = VaspCalcDb.from_db_file(db_file, admin=True)
            mmdb.insert(task_doc)
예제 #44
0
    def run_task(self, fw_spec):

        # load INCAR
        incar_name = self.get("input_filename", "INCAR")
        incar = Incar.from_file(incar_name)

        # process FireWork env values via env_chk
        incar_update = env_chk(self.get("incar_update"), fw_spec)
        incar_multiply = env_chk(self.get("incar_multiply"), fw_spec)
        incar_dictmod = env_chk(self.get("incar_dictmod"), fw_spec)

        if incar_update:
            incar.update(incar_update)

        if incar_multiply:
            for k in incar_multiply:
                incar[k] = incar[k] * incar_multiply[k]

        if incar_dictmod:
            apply_mod(incar_dictmod, incar)

        # write INCAR
        incar.write_file(self.get("output_filename", "INCAR"))
예제 #45
0
    def run_task(self, fw_spec):

        incar_name = self.get("input_filename", "INCAR")
        incar = Incar.from_file(incar_name)

        incar_update = env_chk(self.get('incar_update'), fw_spec)
        incar_multiply = env_chk(self.get('incar_multiply'), fw_spec)
        incar_dictmod = env_chk(self.get('incar_dictmod'), fw_spec)

        if incar_update:
            incar.update(incar_update)

        if incar_multiply:
            for k in incar_multiply:
                if hasattr(incar[k], '__iter__'):  # is list-like
                    incar[k] = list(np.multiply(incar[k], incar_multiply[k]))
                else:
                    incar[k] = incar[k] * incar_multiply[k]

        if incar_dictmod:
            apply_mod(incar_dictmod, incar)

        incar.write_file(self.get("output_filename", "INCAR"))
예제 #46
0
    def run_task(self, fw_spec):
        potcar_symbols = env_chk(self.get("potcar_symbols"), fw_spec)
        functional = self.get("functional", None)
        potcar_name = self.get("input_filename", "POTCAR")
        potcar = Potcar.from_file(potcar_name)

        # Replace PotcarSingles corresponding to elements
        # contained in potcar_symbols
        for n, psingle in enumerate(potcar):
            if psingle.element in potcar_symbols:
                potcar[n] = PotcarSingle.from_symbol_and_functional(
                    potcar_symbols[psingle.element], functional)

        potcar.write_file(self.get("output_filename", "POTCAR"))
예제 #47
0
    def run_task(self, fw_spec):
        # get the directory that contains the QChem dir to parse
        calc_dir = os.getcwd()
        if "calc_dir" in self:
            calc_dir = self["calc_dir"]
        elif self.get("calc_loc"):
            calc_dir = get_calc_loc(self["calc_loc"],
                                    fw_spec["calc_locs"])["path"]
        input_file = self.get("input_file", "mol.qin")
        output_file = self.get("output_file", "mol.qout")
        multirun = self.get("multirun", False)

        # parse the QChem directory
        logger.info("PARSING DIRECTORY: {}".format(calc_dir))

        drone = QChemDrone(additional_fields=self.get("additional_fields"))

        # assimilate (i.e., parse)
        task_doc = drone.assimilate(
            path=calc_dir,
            input_file=input_file,
            output_file=output_file,
            multirun=multirun)

        # Check for additional keys to set based on the fw_spec
        if self.get("fw_spec_field"):
            task_doc.update(fw_spec[self.get("fw_spec_field")])

        # Update fw_spec with final/optimized structure
        update_spec = {}
        if task_doc.get("output").get("optimized_molecule"):
            update_spec["prev_calc_molecule"] = task_doc["output"][
                "optimized_molecule"]

        # get the database connection
        db_file = env_chk(self.get("db_file"), fw_spec)

        # db insertion or taskdoc dump
        if not db_file:
            with open(os.path.join(calc_dir, "task.json"), "w") as f:
                f.write(json.dumps(task_doc, default=DATETIME_HANDLER))
        else:
            mmdb = QChemCalcDb.from_db_file(db_file, admin=True)
            t_id = mmdb.insert(task_doc)
            logger.info("Finished parsing with task_id: {}".format(t_id))

        return FWAction(
            stored_data={"task_id": task_doc.get("task_id", None)},
            update_spec=update_spec)
예제 #48
0
    def run_task(self, fw_spec):

        from atomate.vasp.analysis.phonopy import get_phonopy_thermal_expansion

        tag = self["tag"]
        db_file = env_chk(self.get("db_file"), fw_spec)
        t_step = self.get("t_step", 10)
        t_min = self.get("t_min", 0)
        t_max = self.get("t_max", 1000)
        mesh = self.get("mesh", [20, 20, 20])
        eos = self.get("eos", "vinet")
        pressure = self.get("pressure", 0.0)
        summary_dict = {}

        mmdb = VaspCalcDb.from_db_file(db_file, admin=True)
        # get the optimized structure
        d = mmdb.collection.find_one({"task_label": "{} structure optimization".format(tag)})
        structure = Structure.from_dict(d["calcs_reversed"][-1]["output"]['structure'])
        summary_dict["structure"] = structure.as_dict()
        summary_dict["formula_pretty"] = structure.composition.reduced_formula

        # get the data(energy, volume, force constant) from the deformation runs
        docs = mmdb.collection.find({"task_label": {"$regex": "{} thermal_expansion*".format(tag)},
                                     "formula_pretty": structure.composition.reduced_formula})
        energies = []
        volumes = []
        force_constants = []
        for d in docs:
            s = Structure.from_dict(d["calcs_reversed"][-1]["output"]['structure'])
            energies.append(d["calcs_reversed"][-1]["output"]['energy'])
            volumes.append(s.volume)
            force_constants.append(d["calcs_reversed"][-1]["output"]['force_constants'])
        summary_dict["energies"] = energies
        summary_dict["volumes"] = volumes
        summary_dict["force_constants"] = force_constants

        alpha, T = get_phonopy_thermal_expansion(energies, volumes, force_constants, structure,
                                                 t_min, t_step, t_max, mesh, eos, pressure)

        summary_dict["alpha"] = alpha
        summary_dict["T"] = T

        with open("thermal_expansion.json", "w") as f:
            f.write(json.dumps(summary_dict, default=DATETIME_HANDLER))

        # TODO: @matk86 - there needs to be a way to insert this into a database! And also
        # a builder to put it into materials collection... -computron
        logger.info("Thermal expansion coefficient calculation complete.")
예제 #49
0
    def run_task(self, fw_spec):

        from atomate.tools.analysis import get_phonopy_thermal_expansion

        tag = self["tag"]
        db_file = env_chk(self.get("db_file"), fw_spec)
        t_step = self.get("t_step", 10)
        t_min = self.get("t_min", 0)
        t_max = self.get("t_max", 1000)
        mesh = self.get("mesh", [20, 20, 20])
        eos = self.get("eos", "vinet")
        pressure = self.get("pressure", 0.0)
        summary_dict = {}

        mmdb = MMVaspDb.from_db_file(db_file, admin=True)
        # get the optimized structure
        d = mmdb.collection.find_one({"task_label": "{} structure optimization".format(tag)})
        structure = Structure.from_dict(d["calcs_reversed"][-1]["output"]['structure'])
        summary_dict["structure"] = structure.as_dict()

        # get the data(energy, volume, force constant) from the deformation runs
        docs = mmdb.collection.find({"task_label": {"$regex": "{} thermal_expansion*".format(tag)},
                                     "formula_pretty": structure.composition.reduced_formula})
        energies = []
        volumes = []
        force_constants = []
        for d in docs:
            s = Structure.from_dict(d["calcs_reversed"][-1]["output"]['structure'])
            energies.append(d["calcs_reversed"][-1]["output"]['energy'])
            volumes.append(s.volume)
            force_constants.append(d["calcs_reversed"][-1]["output"]['force_constants'])
        summary_dict["energies"] = energies
        summary_dict["volumes"] = volumes
        summary_dict["force_constants"] = force_constants

        alpha, T = get_phonopy_thermal_expansion(energies, volumes, force_constants, structure,
                                                 t_min, t_step, t_max, mesh, eos, pressure)

        summary_dict["alpha"] = alpha
        summary_dict["T"] = T

        with open("thermal_expansion.json", "w") as f:
            f.write(json.dumps(summary_dict, default=DATETIME_HANDLER))

        logger.info("THERMAL EXPANSION COEFF CALCULATION COMPLETE")
예제 #50
0
    def run_task(self, fw_spec):
        lammps_input = self["lammps_input"]
        diffusion_params = self.get("diffusion_params", {})

        # get the directory that contains the LAMMPS dir to parse
        calc_dir = os.getcwd()
        if "calc_dir" in self:
            calc_dir = self["calc_dir"]
        elif self.get("calc_loc"):
            calc_dir = get_calc_loc(self["calc_loc"], fw_spec["calc_locs"])["path"]

        # parse the directory
        logger.info("PARSING DIRECTORY: {}".format(calc_dir))
        d = {}
        d["dir_name"] = os.path.abspath(os.getcwd())
        d["last_updated"] = datetime.today()
        d["input"] = lammps_input.as_dict()
        log_file = lammps_input.config_dict["log"]
        if isinstance(lammps_input.config_dict["dump"], list):
            dump_file = lammps_input.config_dict["dump"][0].split()[4]
        else:
            dump_file = lammps_input.config_dict["dump"].split()[4]
        is_forcefield = hasattr(lammps_input.lammps_data, "bonds_data")
        lammpsrun = LammpsRun(lammps_input.data_filename, dump_file, log_file, is_forcefield=is_forcefield)
        d["natoms"] = lammpsrun.natoms
        d["nmols"] = lammpsrun.nmols
        d["box_lengths"] = lammpsrun.box_lengths
        d["mol_masses"] = lammpsrun.mol_masses
        d["mol_config"] = lammpsrun.mol_config
        if diffusion_params:
            diffusion_analyzer = lammpsrun.get_diffusion_analyzer(**diffusion_params)
            d["analysis"]["diffusion"] = diffusion_analyzer.get_summary_dict()
        db_file = env_chk(self.get('db_file'), fw_spec)

        # db insertion
        if not db_file:
            with open("task.json", "w") as f:
                f.write(json.dumps(d, default=DATETIME_HANDLER))
        else:
            mmdb = MMLammpsDb.from_db_file(db_file)
            # insert the task document
            t_id = mmdb.insert(d)
            logger.info("Finished parsing with task_id: {}".format(t_id))
        return FWAction(stored_data={"task_id": d.get("task_id", None)})
예제 #51
0
    def run_task(self, fw_spec):

        # get the directory that contains the LAMMPS run parse.
        calc_dir = os.getcwd()
        if "calc_dir" in self:
            calc_dir = self["calc_dir"]
        elif self.get("calc_loc"):
            calc_dir = get_calc_loc(self["calc_loc"], fw_spec["calc_locs"])["path"]

        # parse the directory
        logger.info("PARSING DIRECTORY: {}".format(calc_dir))

        drone = LammpsDrone(additional_fields=self.get("additional_fields"),
                            diffusion_params=self.get("diffusion_params", None))

        task_doc = drone.assimilate(calc_dir, input_filename=self["input_filename"],
                                    log_filename=self.get("log_filename", "log.lammps"),
                                    is_forcefield=self.get("is_forcefield", False),
                                    data_filename=self.get("data_filename", None),
                                    dump_files=self.get("dump_filenames", None))

        # Check for additional keys to set based on the fw_spec
        if self.get("fw_spec_field"):
            task_doc.update(fw_spec[self.get("fw_spec_field")])

        db_file = env_chk(self.get('db_file'), fw_spec)

        # db insertion
        if not db_file:
            with open("task.json", "w") as f:
                f.write(json.dumps(task_doc, default=DATETIME_HANDLER))
        else:
            mmdb = LammpsCalcDb.from_db_file(db_file)
            # insert the task document
            t_id = mmdb.insert(task_doc)
            logger.info("Finished parsing with task_id: {}".format(t_id))

        return FWAction(stored_data={"task_id": task_doc.get("task_id", None)})
예제 #52
0
    def run_task(self, fw_spec):
        calc_dir = os.getcwd()
        if "calc_dir" in self:
            calc_dir = self["calc_dir"]
        elif self.get("calc_loc"):
            calc_dir = get_calc_loc(self["calc_loc"], fw_spec["calc_locs"])["path"]

        logger.info("PARSING DIRECTORY: {}".format(calc_dir))

        db_file = env_chk(self.get('db_file'), fw_spec)

        cluster_dict = None
        tags = Tags.from_file(filename="feff.inp")
        if "RECIPROCAL" not in tags:
            cluster_dict = Atoms.cluster_from_file("feff.inp").as_dict()
        doc = {"input_parameters": tags.as_dict(),
               "cluster": cluster_dict,
               "structure": self["structure"].as_dict(),
               "absorbing_atom": self["absorbing_atom"],
               "spectrum_type": self["spectrum_type"],
               "spectrum": np.loadtxt(os.path.join(calc_dir, self["output_file"])).tolist(),
               "edge": self.get("edge", None),
               "metadata": self.get("metadata", None),
               "dir_name": os.path.abspath(os.getcwd()),
               "last_updated": datetime.today()}

        if not db_file:
            with open("feff_task.json", "w") as f:
                f.write(json.dumps(doc, default=DATETIME_HANDLER))
        # db insertion
        else:
            db = MMFeffDb.from_db_file(db_file, admin=True)
            db.insert(doc)

        logger.info("Finished parsing the spectrum")

        return FWAction(stored_data={"task_id": doc.get("task_id", None)})
예제 #53
0
    def run_task(self, fw_spec):

        wfid = list(filter(lambda x: 'wfid' in x, fw_spec['tags'])).pop()
        db_file = env_chk(self.get("db_file"), fw_spec)
        vaspdb = VaspCalcDb.from_db_file(db_file, admin=True)

        # ferroelectric workflow groups calculations by generated wfid tag
        polarization_tasks = vaspdb.collection.find({"tags": wfid, "task_label": {"$regex": ".*polarization"}})

        tasks = []
        outcars = []
        structure_dicts = []
        sort_weight = []
        energies_per_atom = []
        energies = []
        zval_dicts = []

        for p in polarization_tasks:
            # Grab data from each polarization task
            energies_per_atom.append(p['calcs_reversed'][0]['output']['energy_per_atom'])
            energies.append(p['calcs_reversed'][0]['output']['energy'])
            tasks.append(p['task_label'])
            outcars.append(p['calcs_reversed'][0]['output']['outcar'])
            structure_dicts.append(p['calcs_reversed'][0]['input']['structure'])
            zval_dicts.append(p['calcs_reversed'][0]['output']['outcar']['zval_dict'])

            # Add weight for sorting
            # Want polarization calculations in order of nonpolar to polar for Polarization object

            # This number needs to be bigger than the number of calculations
            max_sort_weight = 1000000

            if 'nonpolar_polarization' in p['task_label']:
                sort_weight.append(0)
            elif "polar_polarization" in p['task_label']:
                sort_weight.append(max_sort_weight)
            elif "interpolation_" in p['task_label']:
                num = 0
                part = re.findall(r'interpolation_[0-9]+_polarization', p['task_label'])
                if part != []:
                    part2 = re.findall(r'[0-9]+', part.pop())
                    if part2 != []:
                        num = part2.pop()
                sort_weight.append(max_sort_weight - int(num))

        # Sort polarization tasks
        # nonpolar -> interpolation_n -> interpolation_n-1 -> ...  -> interpolation_1 -> polar
        data = zip(tasks, structure_dicts, outcars, energies_per_atom, energies, sort_weight)
        data = sorted(data,key=lambda x: x[-1])

        # Get the tasks, structures, etc in sorted order from the zipped data.
        tasks, structure_dicts, outcars, energies_per_atom, energies, sort_weight = zip(*data)

        structures = [Structure.from_dict(structure) for structure in structure_dicts]

        # If LCALCPOL = True then Outcar will parse and store the pseudopotential zvals.
        zval_dict = zval_dicts.pop()

        # Assumes that we want to calculate the ionic contribution to the dipole moment.
        # VASP's ionic contribution is sometimes strange.
        # See pymatgen.analysis.ferroelectricity.polarization.Polarization for details.
        p_elecs = [outcar['p_elec'] for outcar in outcars]
        p_ions = [get_total_ionic_dipole(structure, zval_dict) for structure in structures]

        polarization = Polarization(p_elecs, p_ions, structures)

        p_change = polarization.get_polarization_change().A1.tolist()
        p_norm = polarization.get_polarization_change_norm()
        polarization_max_spline_jumps = polarization.max_spline_jumps()
        same_branch = polarization.get_same_branch_polarization_data(convert_to_muC_per_cm2=True)
        raw_elecs, raw_ions = polarization.get_pelecs_and_pions()
        quanta = polarization.get_lattice_quanta(convert_to_muC_per_cm2=True)

        energy_trend = EnergyTrend(energies_per_atom)
        energy_max_spline_jumps = energy_trend.max_spline_jump()

        polarization_dict = {}

        def split_abc(var, var_name):
            d = {}
            for i, j in enumerate('abc'):
                d.update({var_name + "_{}".format(j): var[:, i].A1.tolist()})
            return d

        # Add some sort of id for the structures? Like cid but more general?
        # polarization_dict.update({'cid': cid})

        # General information
        polarization_dict.update({'pretty_formula': structures[0].composition.reduced_formula})
        polarization_dict.update({'wfid': wfid})
        polarization_dict.update({'task_label_order': tasks})

        # Polarization information
        polarization_dict.update({'polarization_change': p_change})
        polarization_dict.update({'polarization_change_norm': p_norm})
        polarization_dict.update({'polarization_max_spline_jumps': polarization_max_spline_jumps})
        polarization_dict.update(split_abc(same_branch, "same_branch_polarization"))
        polarization_dict.update(split_abc(raw_elecs, "raw_electron_polarization"))
        polarization_dict.update(split_abc(raw_ions, "raw_electron_polarization"))
        polarization_dict.update(split_abc(quanta, "polarization_quanta"))
        polarization_dict.update({"zval_dict": zval_dict})

        # Energy information
        polarization_dict.update({'energy_per_atom_max_spline_jumps': energy_max_spline_jumps})
        polarization_dict.update({"energies": energies})
        polarization_dict.update({"energies_per_atom": energies_per_atom})
        polarization_dict.update({'outcars': outcars})
        polarization_dict.update({"structures": structure_dicts})

        # Write all the info to db.
        coll = vaspdb.db["polarization_tasks"]
        coll.insert_one(polarization_dict)
예제 #54
0
    def run_task(self, fw_spec):
        # get the directory that contains the VASP dir to parse
        calc_dir = os.getcwd()
        if "calc_dir" in self:
            calc_dir = self["calc_dir"]
        elif self.get("calc_loc"):
            calc_dir = get_calc_loc(self["calc_loc"], fw_spec["calc_locs"])["path"]

        # parse the VASP directory
        logger.info("PARSING DIRECTORY: {}".format(calc_dir))

        drone = VaspDrone(additional_fields=self.get("additional_fields"),
                          parse_dos=self.get("parse_dos", False),
                          bandstructure_mode=self.get("bandstructure_mode", False),
                          parse_chgcar=self.get("parse_chgcar", False),
                          parse_aeccar=self.get("parse_aeccar", False))

        # assimilate (i.e., parse)
        task_doc = drone.assimilate(calc_dir)

        # Check for additional keys to set based on the fw_spec
        if self.get("fw_spec_field"):
            task_doc.update(fw_spec[self.get("fw_spec_field")])

        # get the database connection
        db_file = env_chk(self.get('db_file'), fw_spec)

        # db insertion or taskdoc dump
        if not db_file:
            with open("task.json", "w") as f:
                f.write(json.dumps(task_doc, default=DATETIME_HANDLER))
        else:
            mmdb = VaspCalcDb.from_db_file(db_file, admin=True)
            t_id = mmdb.insert_task(
                task_doc, use_gridfs=self.get("parse_dos", False)
                or bool(self.get("bandstructure_mode", False))
                or self.get("parse_chgcar", False)
                or self.get("parse_aeccar", False))
            logger.info("Finished parsing with task_id: {}".format(t_id))

        defuse_children = False
        if task_doc["state"] != "successful":
            defuse_unsuccessful = self.get("defuse_unsuccessful",
                                           DEFUSE_UNSUCCESSFUL)
            if defuse_unsuccessful is True:
                defuse_children = True
            elif defuse_unsuccessful is False:
                pass
            elif defuse_unsuccessful == "fizzle":
                raise RuntimeError(
                    "VaspToDb indicates that job is not successful "
                    "(perhaps your job did not converge within the "
                    "limit of electronic/ionic iterations)!")
            else:
                raise RuntimeError("Unknown option for defuse_unsuccessful: "
                                   "{}".format(defuse_unsuccessful))

        task_fields_to_push = self.get("task_fields_to_push", None)
        update_spec = {}
        if task_fields_to_push:
            if isinstance(task_fields_to_push, dict):
                for key, path_in_task_doc in task_fields_to_push.items():
                    if has(task_doc, path_in_task_doc):
                        update_spec[key] = get(task_doc, path_in_task_doc)
                    else:
                        logger.warn("Could not find {} in task document. Unable to push to next firetask/firework".format(path_in_task_doc))
            else:
                raise RuntimeError("Inappropriate type {} for task_fields_to_push. It must be a "
                                   "dictionary of format: {key: path} where key refers to a field "
                                   "in the spec and path is a full mongo-style path to a "
                                   "field in the task document".format(type(task_fields_to_push)))

        return FWAction(stored_data={"task_id": task_doc.get("task_id", None)},
                        defuse_children=defuse_children, update_spec=update_spec)
예제 #55
0
 def run_task(self, fw_spec):
     feff_cmd = env_chk(self["feff_cmd"], fw_spec)
     logger.info("Running FEFF using exe: {}".format(feff_cmd))
     return_code = subprocess.call(feff_cmd, shell=True)
     logger.info("FEFF finished running with returncode: {}".format(return_code))
예제 #56
0
    def run_task(self, fw_spec):

        gibbs_dict = {}

        tag = self["tag"]
        t_step = self.get("t_step", 10)
        t_min = self.get("t_min", 0)
        t_max = self.get("t_max", 1000)
        mesh = self.get("mesh", [20, 20, 20])
        eos = self.get("eos", "vinet")
        qha_type = self.get("qha_type", "debye_model")
        pressure = self.get("pressure", 0.0)
        poisson = self.get("poisson", 0.25)
        anharmonic_contribution = self.get("anharmonic_contribution", False)
        gibbs_dict["metadata"] = self.get("metadata", {})

        db_file = env_chk(self.get("db_file"), fw_spec)
        mmdb = VaspCalcDb.from_db_file(db_file, admin=True)
        # get the optimized structure
        d = mmdb.collection.find_one({"task_label": "{} structure optimization".format(tag)},
                                     {"calcs_reversed": 1})
        structure = Structure.from_dict(d["calcs_reversed"][-1]["output"]['structure'])
        gibbs_dict["structure"] = structure.as_dict()
        gibbs_dict["formula_pretty"] = structure.composition.reduced_formula

        # get the data(energy, volume, force constant) from the deformation runs
        docs = mmdb.collection.find({"task_label": {"$regex": "{} gibbs*".format(tag)},
                                     "formula_pretty": structure.composition.reduced_formula},
                                    {"calcs_reversed": 1})
        energies = []
        volumes = []
        force_constants = []
        for d in docs:
            s = Structure.from_dict(d["calcs_reversed"][-1]["output"]['structure'])
            energies.append(d["calcs_reversed"][-1]["output"]['energy'])
            if qha_type not in ["debye_model"]:
                force_constants.append(d["calcs_reversed"][-1]["output"]['force_constants'])
            volumes.append(s.volume)
        gibbs_dict["energies"] = energies
        gibbs_dict["volumes"] = volumes
        if qha_type not in ["debye_model"]:
            gibbs_dict["force_constants"] = force_constants

        try:
            # use quasi-harmonic debye approximation
            if qha_type in ["debye_model"]:

                from pymatgen.analysis.quasiharmonic import QuasiharmonicDebyeApprox

                qhda = QuasiharmonicDebyeApprox(energies, volumes, structure, t_min, t_step, t_max,
                                                eos, pressure=pressure, poisson=poisson,
                                                anharmonic_contribution=anharmonic_contribution)
                gibbs_dict.update(qhda.get_summary_dict())
                gibbs_dict["anharmonic_contribution"] = anharmonic_contribution
                gibbs_dict["success"] = True

            # use the phonopy interface
            else:

                from atomate.vasp.analysis.phonopy import get_phonopy_gibbs

                G, T = get_phonopy_gibbs(energies, volumes, force_constants, structure, t_min,
                                         t_step, t_max, mesh, eos, pressure)
                gibbs_dict["gibbs_free_energy"] = G
                gibbs_dict["temperatures"] = T
                gibbs_dict["success"] = True

        # quasi-harmonic analysis failed, set the flag to false
        except:
            import traceback

            logger.warn("Quasi-harmonic analysis failed!")
            gibbs_dict["success"] = False
            gibbs_dict["traceback"] = traceback.format_exc()
            gibbs_dict['metadata'].update({"task_label_tag": tag})
            gibbs_dict["created_at"] = datetime.utcnow()

        gibbs_dict = jsanitize(gibbs_dict)

        # TODO: @matk86: add a list of task_ids that were used to construct the analysis to DB?
        # -computron
        if not db_file:
            dump_file = "gibbs.json"
            logger.info("Dumping the analysis summary to {}".format(dump_file))
            with open(dump_file, "w") as f:
                f.write(json.dumps(gibbs_dict, default=DATETIME_HANDLER))
        else:
            coll = mmdb.db["gibbs_tasks"]
            coll.insert_one(gibbs_dict)

        logger.info("Gibbs free energy calculation complete.")

        if not gibbs_dict["success"]:
            return FWAction(defuse_children=True)
예제 #57
0
    def run_task(self, fw_spec):
        ref_struct = self['structure']
        d = {
            "analysis": {},
            "initial_structure": self['structure'].as_dict()
        }

        # Get optimized structure
        calc_locs_opt = [cl for cl in fw_spec.get('calc_locs', []) if 'optimiz' in cl['name']]
        if calc_locs_opt:
            optimize_loc = calc_locs_opt[-1]['path']
            logger.info("Parsing initial optimization directory: {}".format(optimize_loc))
            drone = VaspDrone()
            optimize_doc = drone.assimilate(optimize_loc)
            opt_struct = Structure.from_dict(optimize_doc["calcs_reversed"][0]["output"]["structure"])
            d.update({"optimized_structure": opt_struct.as_dict()})
            ref_struct = opt_struct
            eq_stress = -0.1*Stress(optimize_doc["calcs_reversed"][0]["output"]["ionic_steps"][-1]["stress"])
        else:
            eq_stress = None

        if self.get("fw_spec_field"):
            d.update({self.get("fw_spec_field"): fw_spec.get(self.get("fw_spec_field"))})

        # Get the stresses, strains, deformations from deformation tasks
        defo_dicts = fw_spec["deformation_tasks"].values()
        stresses, strains, deformations = [], [], []
        for defo_dict in defo_dicts:
            stresses.append(Stress(defo_dict["stress"]))
            strains.append(Strain(defo_dict["strain"]))
            deformations.append(Deformation(defo_dict["deformation_matrix"]))
            # Add derived stresses and strains if symmops is present
            for symmop in defo_dict.get("symmops", []):
                stresses.append(Stress(defo_dict["stress"]).transform(symmop))
                strains.append(Strain(defo_dict["strain"]).transform(symmop))
                deformations.append(Deformation(defo_dict["deformation_matrix"]).transform(symmop))

        stresses = [-0.1*s for s in stresses]
        pk_stresses = [stress.piola_kirchoff_2(deformation)
                       for stress, deformation in zip(stresses, deformations)]

        d['fitting_data'] = {'cauchy_stresses': stresses,
                             'eq_stress': eq_stress,
                             'strains': strains,
                             'pk_stresses': pk_stresses,
                             'deformations': deformations
                             }

        logger.info("Analyzing stress/strain data")
        # TODO: @montoyjh: what if it's a cubic system? don't need 6. -computron
        # TODO: Can add population method but want to think about how it should
        #           be done. -montoyjh
        order = self.get('order', 2)
        if order > 2:
            method = 'finite_difference'
        else:
            method = self.get('fitting_method', 'finite_difference')

        if method == 'finite_difference':
            result = ElasticTensorExpansion.from_diff_fit(
                    strains, pk_stresses, eq_stress=eq_stress, order=order)
            if order == 2:
                result = ElasticTensor(result[0])
        elif method == 'pseudoinverse':
            result = ElasticTensor.from_pseudoinverse(strains, pk_stresses)
        elif method == 'independent':
            result = ElasticTensor.from_independent_strains(strains, pk_stresses, eq_stress=eq_stress)
        else:
            raise ValueError("Unsupported method, method must be finite_difference, "
                             "pseudoinverse, or independent")

        ieee = result.convert_to_ieee(ref_struct)
        d.update({
            "elastic_tensor": {
                "raw": result.voigt,
                "ieee_format": ieee.voigt
            }
        })
        if order == 2:
            d.update({"derived_properties": ieee.get_structure_property_dict(ref_struct)})
        else:
            soec = ElasticTensor(ieee[0])
            d.update({"derived_properties": soec.get_structure_property_dict(ref_struct)})

        d["formula_pretty"] = ref_struct.composition.reduced_formula
        d["fitting_method"] = method
        d["order"] = order

        d = jsanitize(d)

        # Save analysis results in json or db
        db_file = env_chk(self.get('db_file'), fw_spec)
        if not db_file:
            with open("elasticity.json", "w") as f:
                f.write(json.dumps(d, default=DATETIME_HANDLER))
        else:
            db = VaspCalcDb.from_db_file(db_file, admin=True)
            db.collection = db.db["elasticity"]
            db.collection.insert_one(d)
            logger.info("Elastic analysis complete.")
        
        return FWAction()
예제 #58
0
 def run_task(self, fw_spec):
     vasp_cmd = env_chk(self["vasp_cmd"], fw_spec)
     logger.info("Running VASP using exe: {}".format(vasp_cmd))
     return_code = subprocess.call(vasp_cmd, shell=True)
     logger.info("VASP finished running with returncode: {}".format(return_code))
예제 #59
0
    def run_task(self, fw_spec):

        # initialize variables
        qchem_cmd = env_chk(self["qchem_cmd"], fw_spec)
        multimode = env_chk(self.get("multimode"), fw_spec)
        if multimode == None:
            multimode = "openmp"
        input_file = self.get("input_file", "mol.qin")
        output_file = self.get("output_file", "mol.qout")
        max_cores = env_chk(self["max_cores"], fw_spec)
        qclog_file = self.get("qclog_file", "mol.qclog")
        suffix = self.get("suffix", "")
        scratch_dir = env_chk(self.get("scratch_dir"), fw_spec)
        if scratch_dir == None:
            scratch_dir = "/dev/shm/qcscratch/"
        save_scratch = self.get("save_scratch", False)
        save_name = self.get("save_name", "default_save_name")
        max_errors = self.get("max_errors", 5)
        max_iterations = self.get("max_iterations", 10)
        max_molecule_perturb_scale = self.get("max_molecule_perturb_scale",
                                              0.3)
        job_type = self.get("job_type", "normal")
        gzipped_output = self.get("gzipped_output", True)

        handler_groups = {
            "default": [
                QChemErrorHandler(
                    input_file=input_file, output_file=output_file)
            ],
            "no_handler": []
        }

        # construct jobs
        if job_type == "normal":
            jobs = [
                QCJob(
                    qchem_command=qchem_cmd,
                    max_cores=max_cores,
                    multimode=multimode,
                    input_file=input_file,
                    output_file=output_file,
                    qclog_file=qclog_file,
                    suffix=suffix,
                    scratch_dir=scratch_dir,
                    save_scratch=save_scratch,
                    save_name=save_name)
            ]
        elif job_type == "opt_with_frequency_flattener":
            jobs = QCJob.opt_with_frequency_flattener(
                qchem_command=qchem_cmd,
                multimode=multimode,
                input_file=input_file,
                output_file=output_file,
                qclog_file=qclog_file,
                max_iterations=max_iterations,
                max_molecule_perturb_scale=max_molecule_perturb_scale,
                scratch_dir=scratch_dir,
                save_scratch=save_scratch,
                save_name=save_name,
                max_cores=max_cores)

        else:
            raise ValueError("Unsupported job type: {}".format(job_type))

        # construct handlers
        handlers = handler_groups[self.get("handler_group", "default")]

        c = Custodian(
            handlers,
            jobs,
            max_errors=max_errors,
            gzipped_output=gzipped_output)

        c.run()