示例#1
0
    def run_task(self, fw_spec):

        handler_groups = {
            "default": [VaspErrorHandler(), MeshSymmetryErrorHandler(),
                        UnconvergedErrorHandler(), NonConvergingErrorHandler(),
                        PotimErrorHandler(), PositiveEnergyErrorHandler(),
                        FrozenJobErrorHandler()],
            "strict": [VaspErrorHandler(), MeshSymmetryErrorHandler(),
                       UnconvergedErrorHandler(), NonConvergingErrorHandler(),
                       PotimErrorHandler(), PositiveEnergyErrorHandler(),
                       FrozenJobErrorHandler(), AliasingErrorHandler()],
            "md": [VaspErrorHandler(), NonConvergingErrorHandler()],
            "no_handler": []
            }

        vasp_cmd = env_chk(self["vasp_cmd"], fw_spec)
        if isinstance(vasp_cmd, six.string_types):
            vasp_cmd = os.path.expandvars(vasp_cmd)
            vasp_cmd = shlex.split(vasp_cmd)

        # initialize variables
        job_type = self.get("job_type", "normal")
        scratch_dir = env_chk(self.get("scratch_dir"), fw_spec)
        gzip_output = self.get("gzip_output", True)
        max_errors = self.get("max_errors", 5)
        auto_npar = env_chk(self.get("auto_npar"), fw_spec, strict=False, default=False)
        gamma_vasp_cmd = env_chk(self.get("gamma_vasp_cmd"), fw_spec, strict=False, default=None)
        if gamma_vasp_cmd:
            gamma_vasp_cmd = shlex.split(gamma_vasp_cmd)

        # construct jobs
        if job_type == "normal":
            jobs = [VaspJob(vasp_cmd, auto_npar=auto_npar, gamma_vasp_cmd=gamma_vasp_cmd)]
        elif job_type == "double_relaxation_run":
            jobs = VaspJob.double_relaxation_run(vasp_cmd, auto_npar=auto_npar, ediffg=self.get("ediffg"),
                                                 half_kpts_first_relax=False)
        elif job_type == "full_opt_run":
            jobs = VaspJob.full_opt_run(vasp_cmd, auto_npar=auto_npar, ediffg=self.get("ediffg"),
                                        max_steps=5, half_kpts_first_relax=False)
        else:
            raise ValueError("Unsupported job type: {}".format(job_type))

        # construct handlers
        handlers = handler_groups[self.get("handler_group", "default")]

        if self.get("max_force_threshold"):
            handlers.append(MaxForceErrorHandler(max_force_threshold=self["max_force_threshold"]))

        if self.get("wall_time"):
            handlers.append(WalltimeHandler(wall_time=self["wall_time"]))

        validators = [VasprunXMLValidator()]

        c = Custodian(handlers, jobs, validators=validators, max_errors=max_errors,
                      scratch_dir=scratch_dir, gzipped_output=gzip_output)

        c.run()
示例#2
0
    def run_task(self, fw_spec):
        calc_locs = list(fw_spec.get("calc_locs", []))
        calc_locs.append({"name": self["name"],
                          "filesystem": env_chk(self.get('filesystem', None), fw_spec),
                          "path": self.get("path", os.getcwd())})

        return FWAction(mod_spec=[{'_push_all': {'calc_locs': calc_locs}}])
示例#3
0
    def run_task(self, fw_spec):
        # get the directory that contains the dir to parse
        calc_dir = os.getcwd()
        if "calc_dir" in self:
            calc_dir = self["calc_dir"]
        elif self.get("calc_loc"):
            calc_dir = get_calc_loc(self["calc_loc"],
                                    fw_spec["calc_locs"])["path"]

        # parse the calc directory
        logger.info("PARSING DIRECTORY: {} USING DRONE: {}".format(
            calc_dir, self['drone'].__class__.__name__))
        # get the database connection
        db_file = env_chk(self.get('db_file'), fw_spec)

        drone = self['drone'].__class__()
        task_doc = drone.assimilate(calc_dir)
        if not db_file:
            with open("task.json", "w") as f:
                f.write(json.dumps(task_doc, default=DATETIME_HANDLER))
        else:
            mmdb = self["mmdb"]
            db = mmdb.__class__.from_db_file(db_file)
            # insert the task document
            t_id = db.insert(task_doc)
            logger.info("Finished parsing with task_id: {}".format(t_id))

        return FWAction(stored_data={"task_id": task_doc.get("task_id", None)},
                        defuse_children=(task_doc["state"] != "successful"))
示例#4
0
    def run_task(self, fw_spec):
        # get the directory that contains the dir to parse
        calc_dir = os.getcwd()
        if "calc_dir" in self:
            calc_dir = self["calc_dir"]
        elif self.get("calc_loc"):
            calc_dir = get_calc_loc(self["calc_loc"], fw_spec["calc_locs"])["path"]

        # parse the calc directory
        logger.info("PARSING DIRECTORY: {} USING DRONE: {}".format(
            calc_dir, self['drone'].__class__.__name__))
        # get the database connection
        db_file = env_chk(self.get('db_file'), fw_spec)

        drone = self['drone'].__class__()
        task_doc = drone.assimilate(calc_dir)
        if not db_file:
            with open("task.json", "w") as f:
                f.write(json.dumps(task_doc, default=DATETIME_HANDLER))
        else:
            db_config = get_settings(db_file)
            db = MMDb(host=db_config["host"], port=db_config["port"],
                      database=db_config["database"],
                      user=db_config.get("admin_user"), password=db_config.get("admin_password"),
                      collection=db_config["collection"])
            # insert the task document
            t_id = db.insert(task_doc)
            logger.info("Finished parsing with task_id: {}".format(t_id))

        return FWAction(stored_data={"task_id": task_doc.get("task_id", None)},
                        defuse_children=(task_doc["state"] != "successful"))
示例#5
0
    def run_task(self, fw_spec):

        from matmethods.tools.analysis import get_phonopy_thermal_expansion

        tag = self["tag"]
        db_file = env_chk(self.get("db_file"), fw_spec)
        t_step = self.get("t_step", 10)
        t_min = self.get("t_min", 0)
        t_max = self.get("t_max", 1000)
        mesh = self.get("mesh", [20, 20, 20])
        eos = self.get("eos", "vinet")
        pressure = self.get("pressure", 0.0)
        summary_dict = {}

        mmdb = MMVaspDb.from_db_file(db_file, admin=True)
        # get the optimized structure
        d = mmdb.collection.find_one(
            {"task_label": "{} structure optimization".format(tag)})
        structure = Structure.from_dict(
            d["calcs_reversed"][-1]["output"]['structure'])
        summary_dict["structure"] = structure.as_dict()

        # get the data(energy, volume, force constant) from the deformation runs
        docs = mmdb.collection.find({
            "task_label": {
                "$regex": "{} thermal_expansion*".format(tag)
            },
            "formula_pretty":
            structure.composition.reduced_formula
        })
        energies = []
        volumes = []
        force_constants = []
        for d in docs:
            s = Structure.from_dict(
                d["calcs_reversed"][-1]["output"]['structure'])
            energies.append(d["calcs_reversed"][-1]["output"]['energy'])
            volumes.append(s.volume)
            force_constants.append(
                d["calcs_reversed"][-1]["output"]['force_constants'])
        summary_dict["energies"] = energies
        summary_dict["volumes"] = volumes
        summary_dict["force_constants"] = force_constants

        alpha, T = get_phonopy_thermal_expansion(energies, volumes,
                                                 force_constants, structure,
                                                 t_min, t_step, t_max, mesh,
                                                 eos, pressure)

        summary_dict["alpha"] = alpha
        summary_dict["T"] = T

        with open("thermal_expansion.json", "w") as f:
            f.write(json.dumps(summary_dict, default=DATETIME_HANDLER))

        logger.info("THERMAL EXPANSION COEFF CALCULATION COMPLETE")
示例#6
0
    def run_task(self, fw_spec):
        btrap_dir = os.path.join(os.getcwd(), "boltztrap")
        bta = BoltztrapAnalyzer.from_files(btrap_dir)
        d = bta.as_dict()
        d["boltztrap_dir"] = btrap_dir

        # trim the output
        for x in [
                'cond', 'seebeck', 'kappa', 'hall', 'mu_steps', 'mu_doping',
                'carrier_conc'
        ]:
            del d[x]

        if not self.get("hall_doping"):
            del d["hall_doping"]

        # add the structure
        bandstructure_dir = os.getcwd()
        v, o = get_vasprun_outcar(bandstructure_dir,
                                  parse_eigen=False,
                                  parse_dos=False)
        structure = v.final_structure
        d["structure"] = structure.as_dict()
        d.update(get_meta_from_structure(structure))
        d["bandstructure_dir"] = bandstructure_dir

        # add the spacegroup
        sg = SpacegroupAnalyzer(Structure.from_dict(d["structure"]), 0.1)
        d["spacegroup"] = {
            "symbol": sg.get_space_group_symbol(),
            "number": sg.get_space_group_number(),
            "point_group": sg.get_point_group_symbol(),
            "source": "spglib",
            "crystal_system": sg.get_crystal_system(),
            "hall": sg.get_hall()
        }

        d["created_at"] = datetime.utcnow()

        db_file = env_chk(self.get('db_file'), fw_spec)

        if not db_file:
            with open(os.path.join(btrap_dir, "boltztrap.json"), "w") as f:
                f.write(json.dumps(d, default=DATETIME_HANDLER))
        else:
            mmdb = MMVaspDb.from_db_file(db_file, admin=True)

            # dos gets inserted into GridFS
            dos = json.dumps(d["dos"], cls=MontyEncoder)
            fsid, compression = mmdb.insert_gridfs(
                dos, collection="dos_boltztrap_fs", compress=True)
            d["dos_boltztrap_fs_id"] = fsid
            del d["dos"]

            mmdb.db.boltztrap.insert(d)
示例#7
0
    def run_task(self, fw_spec):

        mpr = MPRester(env_chk(self.get("MAPI_KEY"), fw_spec))
        vasprun, outcar = get_vasprun_outcar(self.get("calc_dir", "."), parse_dos=False, parse_eigen=False)

        my_entry = vasprun.get_computed_entry(inc_structure=False)
        stored_data = mpr.get_stability([my_entry])[0]

        if stored_data["e_above_hull"] > self.get("ehull_cutoff", 0.05):
            return FWAction(stored_data=stored_data, exit=True, defuse_workflow=True)

        else:
            return FWAction(stored_data=stored_data)
示例#8
0
    def run_task(self, fw_spec):

        # Get optimized structure
        # TODO: will this find the correct path if the workflow is rerun from the start?
        optimize_loc = fw_spec["calc_locs"][0]["path"]
        logger.info("PARSING INITIAL OPTIMIZATION DIRECTORY: {}".format(optimize_loc))
        drone = VaspDrone()
        optimize_doc = drone.assimilate(optimize_loc)
        opt_struct = Structure.from_dict(optimize_doc["calcs_reversed"][0]["output"]["structure"])

        d = {"analysis": {}, "deformation_tasks": fw_spec["deformation_tasks"],
             "initial_structure": self['structure'].as_dict(),
             "optimized_structure": opt_struct.as_dict()}

        dtypes = fw_spec["deformation_tasks"].keys()
        defos = [fw_spec["deformation_tasks"][dtype]["deformation_matrix"]
                 for dtype in dtypes]
        stresses = [fw_spec["deformation_tasks"][dtype]["stress"] for dtype in dtypes]
        stress_dict = {IndependentStrain(defo) : Stress(stress) for defo, stress
                       in zip(defos, stresses)}

        logger.info("ANALYZING STRESS/STRAIN DATA")
        # DETERMINE IF WE HAVE 6 "UNIQUE" deformations
        if len(set([de[:3] for de in dtypes])) == 6:
            # Perform Elastic tensor fitting and analysis
            result = ElasticTensor.from_stress_dict(stress_dict)
            d["elastic_tensor"] = result.voigt.tolist()
            kg_average = result.kg_average
            d.update({"K_Voigt": kg_average[0], "G_Voigt": kg_average[1],
                      "K_Reuss": kg_average[2], "G_Reuss": kg_average[3],
                      "K_Voigt_Reuss_Hill": kg_average[4],
                      "G_Voigt_Reuss_Hill": kg_average[5]})
            d["universal_anisotropy"] = result.universal_anisotropy
            d["homogeneous_poisson"] = result.homogeneous_poisson

        else:
            raise ValueError("Fewer than 6 unique deformations")

        d["state"] = "successful"

        # Save analysis results in json or db
        db_file = env_chk(self.get('db_file'), fw_spec)
        if not db_file:
            with open("elasticity.json", "w") as f:
                f.write(json.dumps(d, default=DATETIME_HANDLER))
        else:
            db = MMDb.from_db_file(db_file, admin=True)
            db.collection = db.db["elasticity"]
            db.collection.insert_one(d)
            logger.info("ELASTIC ANALYSIS COMPLETE")
        return FWAction()
示例#9
0
    def run_task(self, fw_spec):

        # load INCAR
        incar_name = self.get("input_filename", "INCAR")
        incar = Incar.from_file(incar_name)

        # process FireWork env values via env_chk
        incar_update = env_chk(self.get('incar_update'), fw_spec)
        incar_multiply = env_chk(self.get('incar_multiply'), fw_spec)
        incar_dictmod = env_chk(self.get('incar_dictmod'), fw_spec)

        if incar_update:
            incar.update(incar_update)

        if incar_multiply:
            for k in incar_multiply:
                incar[k] = incar[k] * incar_multiply[k]

        if incar_dictmod:
            apply_mod(incar_dictmod, incar)

        # write INCAR
        incar.write_file(self.get("output_filename", "INCAR"))
示例#10
0
    def run_task(self, fw_spec):
        btrap_dir = os.path.join(os.getcwd(), "boltztrap")
        bta = BoltztrapAnalyzer.from_files(btrap_dir)
        d = bta.as_dict()
        d["boltztrap_dir"] = btrap_dir

        # trim the output
        for x in ['cond', 'seebeck', 'kappa', 'hall', 'mu_steps',
                  'mu_doping', 'carrier_conc']:
            del d[x]

        if not self.get("hall_doping"):
            del d["hall_doping"]

        # add the structure
        bandstructure_dir = os.getcwd()
        v, o = get_vasprun_outcar(bandstructure_dir, parse_eigen=False,
                                  parse_dos=False)
        structure = v.final_structure
        d["structure"] = structure.as_dict()
        d.update(get_meta_from_structure(structure))
        d["bandstructure_dir"] = bandstructure_dir

        # add the spacegroup
        sg = SpacegroupAnalyzer(Structure.from_dict(d["structure"]), 0.1)
        d["spacegroup"] = {"symbol": sg.get_spacegroup_symbol(),
                           "number": sg.get_spacegroup_number(),
                           "point_group": sg.get_point_group(),
                           "source": "spglib",
                           "crystal_system": sg.get_crystal_system(),
                           "hall": sg.get_hall()}

        d["created_at"] = datetime.utcnow()

        db_file = env_chk(self.get('db_file'), fw_spec)

        if not db_file:
            with open(os.path.join(btrap_dir, "boltztrap.json"), "w") as f:
                f.write(json.dumps(d, default=DATETIME_HANDLER))
        else:
            mmdb = MMDb.from_db_file(db_file, admin=True)

            # dos gets inserted into GridFS
            dos = json.dumps(d["dos"], cls=MontyEncoder)
            fsid, compression = mmdb.insert_gridfs(
                dos, collection="dos_boltztrap_fs", compress=True)
            d["dos_boltztrap_fs_id"] = fsid
            del d["dos"]

            mmdb.db.boltztrap.insert(d)
示例#11
0
    def run_task(self, fw_spec):

        # load INCAR
        incar_name = self.get("input_filename", "INCAR")
        incar = Incar.from_file(incar_name)

        # process FireWork env values via env_chk
        incar_update = env_chk(self.get('incar_update'), fw_spec)
        incar_multiply = env_chk(self.get('incar_multiply'), fw_spec)
        incar_dictmod = env_chk(self.get('incar_dictmod'), fw_spec)

        if incar_update:
            incar.update(incar_update)

        if incar_multiply:
            for k in incar_multiply:
                incar[k] = incar[k] * incar_multiply[k]

        if incar_dictmod:
            apply_mod(incar_dictmod, incar)

        # write INCAR
        incar.write_file(self.get("output_filename", "INCAR"))
示例#12
0
    def run_task(self, fw_spec):
        lammps_input = self["lammps_input"]
        diffusion_params = self.get("diffusion_params", {})

        # get the directory that contains the LAMMPS dir to parse
        calc_dir = os.getcwd()
        if "calc_dir" in self:
            calc_dir = self["calc_dir"]
        elif self.get("calc_loc"):
            calc_dir = get_calc_loc(self["calc_loc"],
                                    fw_spec["calc_locs"])["path"]

        # parse the directory
        logger.info("PARSING DIRECTORY: {}".format(calc_dir))
        d = {}
        d["dir_name"] = os.path.abspath(os.getcwd())
        d["last_updated"] = datetime.today()
        d["input"] = lammps_input.as_dict()
        log_file = lammps_input.config_dict["log"]
        if isinstance(lammps_input.config_dict["dump"], list):
            dump_file = lammps_input.config_dict["dump"][0].split()[4]
        else:
            dump_file = lammps_input.config_dict["dump"].split()[4]
        is_forcefield = hasattr(lammps_input.lammps_data, "bonds_data")
        lammpsrun = LammpsRun(lammps_input.data_filename,
                              dump_file,
                              log_file,
                              is_forcefield=is_forcefield)
        d["natoms"] = lammpsrun.natoms
        d["nmols"] = lammpsrun.nmols
        d["box_lengths"] = lammpsrun.box_lengths
        d["mol_masses"] = lammpsrun.mol_masses
        d["mol_config"] = lammpsrun.mol_config
        if diffusion_params:
            diffusion_analyzer = lammpsrun.get_diffusion_analyzer(
                **diffusion_params)
            d["analysis"]["diffusion"] = diffusion_analyzer.get_summary_dict()
        db_file = env_chk(self.get('db_file'), fw_spec)

        # db insertion
        if not db_file:
            with open("task.json", "w") as f:
                f.write(json.dumps(d, default=DATETIME_HANDLER))
        else:
            mmdb = MMLammpsDb.from_db_file(db_file)
            # insert the task document
            t_id = mmdb.insert(d)
            logger.info("Finished parsing with task_id: {}".format(t_id))
        return FWAction(stored_data={"task_id": d.get("task_id", None)})
示例#13
0
    def test_env_chk(self):
        fw_spec_valid = {"_fw_env": {"hello": "there"}}
        fw_spec_invalid = {}

        self.assertEqual(env_chk("hello", fw_spec_valid), "hello")
        self.assertEqual(env_chk([1, 2, 3], fw_spec_valid), [1, 2, 3])
        self.assertEqual(env_chk(defaultdict(int), fw_spec_valid),
                         defaultdict(int))
        self.assertEqual(env_chk(">>hello<<", fw_spec_valid), "there")
        self.assertRaises(KeyError, env_chk, ">>hello1<<", fw_spec_valid)
        self.assertEqual(env_chk(">>hello1<<", fw_spec_valid, False), None)

        self.assertRaises(KeyError, env_chk, ">>hello<<", fw_spec_invalid)
        self.assertEqual(env_chk(">>hello<<", fw_spec_invalid, False), None)
        self.assertEqual(env_chk(">>hello<<", fw_spec_invalid,
                                 False, "fallback"), "fallback")

        self.assertEqual(env_chk(None, fw_spec_valid, False), None)
        self.assertEqual(env_chk(None, fw_spec_valid, False, "fallback"),
                         "fallback")
示例#14
0
    def test_env_chk(self):
        fw_spec_valid = {"_fw_env": {"hello": "there"}}
        fw_spec_invalid = {}

        self.assertEqual(env_chk("hello", fw_spec_valid), "hello")
        self.assertEqual(env_chk([1, 2, 3], fw_spec_valid), [1, 2, 3])
        self.assertEqual(env_chk(defaultdict(int), fw_spec_valid),
                         defaultdict(int))
        self.assertEqual(env_chk(">>hello<<", fw_spec_valid), "there")
        self.assertRaises(KeyError, env_chk, ">>hello1<<", fw_spec_valid)
        self.assertEqual(env_chk(">>hello1<<", fw_spec_valid, False), None)

        self.assertRaises(KeyError, env_chk, ">>hello<<", fw_spec_invalid)
        self.assertEqual(env_chk(">>hello<<", fw_spec_invalid, False), None)
        self.assertEqual(
            env_chk(">>hello<<", fw_spec_invalid, False, "fallback"),
            "fallback")

        self.assertEqual(env_chk(None, fw_spec_valid, False), None)
        self.assertEqual(env_chk(None, fw_spec_valid, False, "fallback"),
                         "fallback")
示例#15
0
    def run_task(self, fw_spec):

        mpr = MPRester(env_chk(self.get("MAPI_KEY"), fw_spec))
        vasprun, outcar = get_vasprun_outcar(self.get("calc_dir", "."),
                                             parse_dos=False,
                                             parse_eigen=False)

        my_entry = vasprun.get_computed_entry(inc_structure=False)
        stored_data = mpr.get_stability([my_entry])[0]

        if stored_data["e_above_hull"] > self.get("ehull_cutoff", 0.05):
            return FWAction(stored_data=stored_data,
                            exit=True,
                            defuse_workflow=True)

        else:
            return FWAction(stored_data=stored_data)
示例#16
0
    def run_task(self, fw_spec):
        lammps_input = self["lammps_input"]
        diffusion_params = self.get("diffusion_params", {})

        # get the directory that contains the LAMMPS dir to parse
        calc_dir = os.getcwd()
        if "calc_dir" in self:
            calc_dir = self["calc_dir"]
        elif self.get("calc_loc"):
            calc_dir = get_calc_loc(self["calc_loc"], fw_spec["calc_locs"])["path"]

        # parse the directory
        logger.info("PARSING DIRECTORY: {}".format(calc_dir))
        d = {}
        d["dir_name"] = os.path.abspath(os.getcwd())
        d["last_updated"] = datetime.today()
        d["input"] = lammps_input.as_dict()
        log_file = lammps_input.config_dict["log"]
        if isinstance(lammps_input.config_dict["dump"], list):
            dump_file = lammps_input.config_dict["dump"][0].split()[4]
        else:
            dump_file = lammps_input.config_dict["dump"].split()[4]
        is_forcefield = hasattr(lammps_input.lammps_data, "bonds_data")
        lammpsrun = LammpsRun(lammps_input.data_filename, dump_file, log_file, is_forcefield=is_forcefield)
        d["natoms"] = lammpsrun.natoms
        d["nmols"] = lammpsrun.nmols
        d["box_lengths"] = lammpsrun.box_lengths
        d["mol_masses"] = lammpsrun.mol_masses
        d["mol_config"] = lammpsrun.mol_config
        if diffusion_params:
            diffusion_analyzer = lammpsrun.get_diffusion_analyzer(**diffusion_params)
            d["analysis"]["diffusion"] = diffusion_analyzer.get_summary_dict()
        db_file = env_chk(self.get('db_file'), fw_spec)

        # db insertion
        if not db_file:
            with open("task.json", "w") as f:
                f.write(json.dumps(d, default=DATETIME_HANDLER))
        else:
            mmdb = MMDb.from_db_file(db_file, admin=True)
            # insert the task document
            t_id = mmdb.insert(d)
            logger.info("Finished parsing with task_id: {}".format(t_id))
        return FWAction(stored_data={"task_id": d.get("task_id", None)})
示例#17
0
    def run_task(self, fw_spec):

        from pymatgen.analysis.eos import EOS

        tag = self["tag"]
        db_file = env_chk(self.get("db_file"), fw_spec)
        summary_dict = {"eos": self["eos"]}

        mmdb = MMVaspDb.from_db_file(db_file, admin=True)
        # get the optimized structure
        d = mmdb.collection.find_one(
            {"task_label": "{} structure optimization".format(tag)})
        structure = Structure.from_dict(
            d["calcs_reversed"][-1]["output"]['structure'])
        summary_dict["structure"] = structure.as_dict()

        # get the data(energy, volume, force constant) from the deformation runs
        docs = mmdb.collection.find({
            "task_label": {
                "$regex": "{} bulk_modulus*".format(tag)
            },
            "formula_pretty":
            structure.composition.reduced_formula
        })
        energies = []
        volumes = []
        for d in docs:
            s = Structure.from_dict(
                d["calcs_reversed"][-1]["output"]['structure'])
            energies.append(d["calcs_reversed"][-1]["output"]['energy'])
            volumes.append(s.volume)
        summary_dict["energies"] = energies
        summary_dict["volumes"] = volumes

        # fit the equation of state
        eos = EOS(self["eos"])
        eos_fit = eos.fit(volumes, energies)
        summary_dict["results"] = dict(eos_fit.results)

        with open("bulk_modulus.json", "w") as f:
            f.write(json.dumps(summary_dict, default=DATETIME_HANDLER))

        logger.info("BULK MODULUS CALCULATION COMPLETE")
示例#18
0
    def run_task(self, fw_spec):
        vasp_cmd = env_chk(self["vasp_cmd"], fw_spec)

        logger.info("Running VASP using exe: {}".format(vasp_cmd))
        return_code = subprocess.call(vasp_cmd, shell=True)
        logger.info("VASP finished running with returncode: {}".format(return_code))
示例#19
0
    def run_task(self, fw_spec):
        nm_norms = np.array(fw_spec["normalmodes"]["norms"])
        nm_eigenvals = np.array(fw_spec["normalmodes"]["eigenvals"])
        structure = fw_spec["normalmodes"]["structure"]
        masses = np.array(
            [site.specie.data['Atomic mass'] for site in structure])
        # the eigenvectors read from vasprun.xml are not divided by sqrt(M_i)
        nm_norms = nm_norms / np.sqrt(masses)

        # To get the actual eigenvals, the values read from vasprun.xml must be multiplied by -1.
        # frequency_i = sqrt(-e_i)
        # To convert the frequency to THZ: multiply sqrt(-e_i) by 15.633
        # To convert the frequency to cm^-1: multiply sqrt(-e_i) by 82.995
        nm_frequencies = np.sqrt(np.abs(nm_eigenvals)) * 82.995  # cm^-1

        d = {
            "structure": structure.as_dict(),
            "normalmodes": {
                "eigenvals": fw_spec["normalmodes"]["eigenvals"],
                "eigenvecs": fw_spec["normalmodes"]["eigenvecs"]
            },
            "frequencies": nm_frequencies.tolist()
        }

        mode_disps = fw_spec["raman_epsilon"].keys()
        # store the displacement & epsilon for each mode in a dictionary
        modes_eps_dict = defaultdict(list)
        for md in mode_disps:
            modes_eps_dict[fw_spec["raman_epsilon"][md]["mode"]].append([
                fw_spec["raman_epsilon"][md]["displacement"],
                fw_spec["raman_epsilon"][md]["epsilon"]
            ])

        # raman tensor = finite difference derivative of epsilon wrt displacement.
        raman_tensor_dict = {}
        scale = np.sqrt(structure.volume / 2.0) / 4.0 / np.pi
        for k, v in modes_eps_dict.items():
            raman_tensor = (np.array(v[0][1]) - np.array(v[1][1])) / (v[0][0] -
                                                                      v[1][0])
            # frequency in cm^-1
            omega = nm_frequencies[k]
            if nm_eigenvals[k] > 0:
                logger.warn("Mode: {} is UNSTABLE. Freq(cm^-1) = {}".format(
                    k, -omega))
            raman_tensor = scale * raman_tensor * np.sum(
                nm_norms[k]) / np.sqrt(omega)
            raman_tensor_dict[str(k)] = raman_tensor.tolist()

        d["raman_tensor"] = raman_tensor_dict
        d["state"] = "successful"

        # store the results
        db_file = env_chk(self.get("db_file"), fw_spec)
        if not db_file:
            with open("raman.json", "w") as f:
                f.write(json.dumps(d, default=DATETIME_HANDLER))
        else:
            db = MMVaspDb.from_db_file(db_file, admin=True)
            db.collection = db.db["raman"]
            db.collection.insert_one(d)
            logger.info("RAMAN TENSOR CALCULATION COMPLETE")
        logger.info("The frequencies are in the units of cm^-1")
        logger.info("To convert the frequency to THz: multiply by 0.1884")
        return FWAction()
示例#20
0
 def run_task(self, fw_spec):
     vasp_cmd = env_chk(self["vasp_cmd"], fw_spec)
     logger.info("Running VASP using exe: {}".format(vasp_cmd))
     return_code = subprocess.call(vasp_cmd, shell=True)
     logger.info(
         "VASP finished running with returncode: {}".format(return_code))
示例#21
0
    def run_task(self, fw_spec):

        handler_groups = {
            "default": [
                VaspErrorHandler(),
                MeshSymmetryErrorHandler(),
                UnconvergedErrorHandler(),
                NonConvergingErrorHandler(),
                PotimErrorHandler(),
                PositiveEnergyErrorHandler(),
                FrozenJobErrorHandler()
            ],
            "strict": [
                VaspErrorHandler(),
                MeshSymmetryErrorHandler(),
                UnconvergedErrorHandler(),
                NonConvergingErrorHandler(),
                PotimErrorHandler(),
                PositiveEnergyErrorHandler(),
                FrozenJobErrorHandler(),
                AliasingErrorHandler()
            ],
            "md": [VaspErrorHandler(),
                   NonConvergingErrorHandler()],
            "no_handler": []
        }

        vasp_cmd = env_chk(self["vasp_cmd"], fw_spec)
        if isinstance(vasp_cmd, six.string_types):
            vasp_cmd = os.path.expandvars(vasp_cmd)
            vasp_cmd = shlex.split(vasp_cmd)

        # initialize variables
        job_type = self.get("job_type", "normal")
        scratch_dir = env_chk(self.get("scratch_dir"), fw_spec)
        gzip_output = self.get("gzip_output", True)
        max_errors = self.get("max_errors", 5)
        auto_npar = env_chk(self.get("auto_npar"),
                            fw_spec,
                            strict=False,
                            default=False)
        gamma_vasp_cmd = env_chk(self.get("gamma_vasp_cmd"),
                                 fw_spec,
                                 strict=False,
                                 default=None)
        if gamma_vasp_cmd:
            gamma_vasp_cmd = shlex.split(gamma_vasp_cmd)

        # construct jobs
        if job_type == "normal":
            jobs = [
                VaspJob(vasp_cmd,
                        auto_npar=auto_npar,
                        gamma_vasp_cmd=gamma_vasp_cmd)
            ]
        elif job_type == "double_relaxation_run":
            jobs = VaspJob.double_relaxation_run(vasp_cmd,
                                                 auto_npar=auto_npar,
                                                 ediffg=self.get("ediffg"),
                                                 half_kpts_first_relax=False)
        elif job_type == "full_opt_run":
            jobs = VaspJob.full_opt_run(vasp_cmd,
                                        auto_npar=auto_npar,
                                        ediffg=self.get("ediffg"),
                                        max_steps=5,
                                        half_kpts_first_relax=False)
        else:
            raise ValueError("Unsupported job type: {}".format(job_type))

        # construct handlers
        handlers = handler_groups[self.get("handler_group", "default")]

        if self.get("max_force_threshold"):
            handlers.append(
                MaxForceErrorHandler(
                    max_force_threshold=self["max_force_threshold"]))

        if self.get("wall_time"):
            handlers.append(WalltimeHandler(wall_time=self["wall_time"]))

        validators = [VasprunXMLValidator()]

        c = Custodian(handlers,
                      jobs,
                      validators=validators,
                      max_errors=max_errors,
                      scratch_dir=scratch_dir,
                      gzipped_output=gzip_output)

        c.run()
示例#22
0
    def run_task(self, fw_spec):

        tag = self["tag"]
        db_file = env_chk(self.get("db_file"), fw_spec)
        t_step = self.get("t_step", 10)
        t_min = self.get("t_min", 0)
        t_max = self.get("t_max", 1000)
        mesh = self.get("mesh", [20, 20, 20])
        eos = self.get("eos", "vinet")
        qha_type = self.get("qha_type", "debye_model")
        pressure = self.get("pressure", 0.0)
        gibbs_summary_dict = {}

        mmdb = MMVaspDb.from_db_file(db_file, admin=True)
        # get the optimized structure
        d = mmdb.collection.find_one(
            {"task_label": "{} structure optimization".format(tag)})
        structure = Structure.from_dict(
            d["calcs_reversed"][-1]["output"]['structure'])
        gibbs_summary_dict["structure"] = structure.as_dict()

        # get the data(energy, volume, force constant) from the deformation runs
        docs = mmdb.collection.find({
            "task_label": {
                "$regex": "{} gibbs*".format(tag)
            },
            "formula_pretty":
            structure.composition.reduced_formula
        })
        energies = []
        volumes = []
        force_constants = []
        for d in docs:
            s = Structure.from_dict(
                d["calcs_reversed"][-1]["output"]['structure'])
            energies.append(d["calcs_reversed"][-1]["output"]['energy'])
            volumes.append(s.volume)
            force_constants.append(
                d["calcs_reversed"][-1]["output"]['force_constants'])
        gibbs_summary_dict["energies"] = energies
        gibbs_summary_dict["volumes"] = volumes
        gibbs_summary_dict["force_constants"] = force_constants

        G, T = None, None
        # use debye model
        if qha_type in ["debye_model"]:

            from matmethods.tools.analysis import get_debye_model_gibbs

            G, T = get_debye_model_gibbs(energies, volumes, structure, t_min,
                                         t_step, t_max, eos, pressure)

        # use the phonopy interface
        else:

            from matmethods.tools.analysis import get_phonopy_gibbs

            G, T = get_phonopy_gibbs(energies, volumes, force_constants,
                                     structure, t_min, t_step, t_max, mesh,
                                     eos, pressure)

        gibbs_summary_dict["G"] = G
        gibbs_summary_dict["T"] = T

        with open("gibbs.json", "w") as f:
            f.write(json.dumps(gibbs_summary_dict, default=DATETIME_HANDLER))
        logger.info("GIBBS FREE ENERGY CALCULATION COMPLETE")
示例#23
0
    def run_task(self, fw_spec):
        # get the directory that contains the VASP dir to parse
        calc_dir = os.getcwd()
        if "calc_dir" in self:
            calc_dir = self["calc_dir"]
        elif self.get("calc_loc"):
            calc_dir = get_calc_loc(self["calc_loc"],
                                    fw_spec["calc_locs"])["path"]

        # parse the VASP directory
        logger.info("PARSING DIRECTORY: {}".format(calc_dir))
        # get the database connection
        db_file = env_chk(self.get('db_file'), fw_spec)

        drone = VaspDrone(additional_fields=self.get("additional_fields"),
                          parse_dos=self.get("parse_dos", False),
                          compress_dos=1,
                          bandstructure_mode=self.get("bandstructure_mode",
                                                      False),
                          compress_bs=1)

        # assimilate (i.e., parse)
        task_doc = drone.assimilate(calc_dir)

        # Check for additional fields to add in the fw_spec
        if self.get("fw_spec_field"):
            task_doc.update(fw_spec[self.get("fw_spec_field")])

        # db insertion
        if not db_file:
            with open("task.json", "w") as f:
                f.write(json.dumps(task_doc, default=DATETIME_HANDLER))
        else:
            mmdb = MMVaspDb.from_db_file(db_file, admin=True)

            # insert dos into GridFS
            if self.get("parse_dos") and "calcs_reversed" in task_doc:
                for idx, x in enumerate(task_doc["calcs_reversed"]):
                    if "dos" in task_doc["calcs_reversed"][idx]:
                        if idx == 0:  # only store most recent DOS
                            dos = json.dumps(
                                task_doc["calcs_reversed"][idx]["dos"],
                                cls=MontyEncoder)
                            gfs_id, compression_type = mmdb.insert_gridfs(
                                dos, "dos_fs")
                            task_doc["calcs_reversed"][idx][
                                "dos_compression"] = compression_type
                            task_doc["calcs_reversed"][idx][
                                "dos_fs_id"] = gfs_id
                        del task_doc["calcs_reversed"][idx]["dos"]

            # insert band structure into GridFS
            if self.get("bandstructure_mode") and "calcs_reversed" in task_doc:
                for idx, x in enumerate(task_doc["calcs_reversed"]):
                    if "bandstructure" in task_doc["calcs_reversed"][idx]:
                        if idx == 0:  # only store most recent band structure
                            bs = json.dumps(task_doc["calcs_reversed"][idx]
                                            ["bandstructure"],
                                            cls=MontyEncoder)
                            gfs_id, compression_type = mmdb.insert_gridfs(
                                bs, "bandstructure_fs")
                            task_doc["calcs_reversed"][idx][
                                "bandstructure_compression"] = compression_type
                            task_doc["calcs_reversed"][idx][
                                "bandstructure_fs_id"] = gfs_id
                        del task_doc["calcs_reversed"][idx]["bandstructure"]

            # insert the task document
            t_id = mmdb.insert(task_doc)

            logger.info("Finished parsing with task_id: {}".format(t_id))

        if self.get("defuse_unsuccessful", True):
            defuse_children = (task_doc["state"] != "successful")
        else:
            defuse_children = False

        return FWAction(stored_data={"task_id": task_doc.get("task_id", None)},
                        defuse_children=defuse_children)
示例#24
0
    def run_task(self, fw_spec):

        # Get optimized structure
        optimize_loc = fw_spec["calc_locs"][0]["path"]
        logger.info("PARSING INITIAL OPTIMIZATION DIRECTORY: {}".format(optimize_loc))
        drone = VaspDrone()
        optimize_doc = drone.assimilate(optimize_loc)
        opt_struct = Structure.from_dict(
            optimize_doc["calcs_reversed"][0]["output"]["structure"])
        
        deformations = fw_spec['deformations']
        d = {"analysis": {}, "deformation_tasks": {},
             "initial_structure": self['structure'].as_dict(), 
             "optimized_structure": opt_struct.as_dict()}
        stress_dict = {}

        dtypes = []
        for deformation in deformations:
            defo = deformation['deformation']
            d_ind = np.nonzero(defo - np.eye(3))
            delta = Decimal((defo - np.eye(3))[d_ind][0])
            # Shorthand is d_X_V, X is voigt index, V is value
            dtype = "_".join(["d", str(reverse_voigt_map[d_ind][0]),
                              "{:.0e}".format(delta)])
            strain = IndependentStrain(defo)
            stress = Stress(deformation['stress'])
            d["deformation_tasks"][dtype] = {'deformation_matrix': defo,
                                             'strain': strain.tolist(),
                                             'stress': deformation['stress']}
            dtypes.append(dtype)
            stress_dict[strain] = stress

        logger.info("ANALYZING STRESS/STRAIN DATA")
        # DETERMINE IF WE HAVE 6 "UNIQUE" deformations
        if len(set([de[:3] for de in dtypes])) == 6:
            # Perform Elastic tensor fitting and analysis
            result = ElasticTensor.from_stress_dict(stress_dict)
            d["elastic_tensor"] = result.voigt.tolist()
            kg_average = result.kg_average
            d.update({"K_Voigt": kg_average[0], "G_Voigt": kg_average[1],
                      "K_Reuss": kg_average[2], "G_Reuss": kg_average[3],
                      "K_Voigt_Reuss_Hill": kg_average[4],
                      "G_Voigt_Reuss_Hill": kg_average[5]})
            d["universal_anisotropy"] = result.universal_anisotropy
            d["homogeneous_poisson"] = result.homogeneous_poisson

        else:
            raise ValueError("Fewer than 6 unique deformations")

        d["state"] = "successful"

        # Save analysis results in json or db
        db_file = env_chk(self.get('db_file'), fw_spec)
        if not db_file:
            with open("elasticity.json", "w") as f:
                f.write(json.dumps(d, default=DATETIME_HANDLER))
        else:
            db = MMDb.from_db_file(db_file, admin=True)
            db.collection = db.db["elasticity"]
            db.collection.insert_one(d)
            logger.info("ELASTIC ANALYSIS COMPLETE")

        return FWAction()
示例#25
0
    def run_task(self, fw_spec):
        # get the directory that contains the VASP dir to parse
        calc_dir = os.getcwd()
        if "calc_dir" in self:
            calc_dir = self["calc_dir"]
        elif self.get("calc_loc"):
            calc_dir = get_calc_loc(self["calc_loc"], fw_spec["calc_locs"])["path"]

        # parse the VASP directory
        logger.info("PARSING DIRECTORY: {}".format(calc_dir))
        # get the database connection
        db_file = env_chk(self.get('db_file'), fw_spec)

        drone = VaspDrone(additional_fields=self.get("additional_fields"),
                          parse_dos=self.get("parse_dos", False), compress_dos=1,
                          bandstructure_mode=self.get("bandstructure_mode", False), compress_bs=1)

        # assimilate (i.e., parse)
        task_doc = drone.assimilate(calc_dir)

        # Check for additional fields to add in the fw_spec
        if self.get("fw_spec_field"):
            task_doc.update(fw_spec[self.get("fw_spec_field")])

        # db insertion
        if not db_file:
            with open("task.json", "w") as f:
                f.write(json.dumps(task_doc, default=DATETIME_HANDLER))
        else:
            mmdb = MMDb.from_db_file(db_file, admin=True)

            # insert dos into GridFS
            if self.get("parse_dos") and "calcs_reversed" in task_doc:
                for idx, x in enumerate(task_doc["calcs_reversed"]):
                    if "dos" in task_doc["calcs_reversed"][idx]:
                        if idx == 0:  # only store most recent DOS
                            dos = json.dumps(task_doc["calcs_reversed"][idx]["dos"], cls=MontyEncoder)
                            gfs_id, compression_type = mmdb.insert_gridfs(dos, "dos_fs")
                            task_doc["calcs_reversed"][idx]["dos_compression"] = compression_type
                            task_doc["calcs_reversed"][idx]["dos_fs_id"] = gfs_id
                        del task_doc["calcs_reversed"][idx]["dos"]

            # insert band structure into GridFS
            if self.get("bandstructure_mode") and "calcs_reversed" in task_doc:
                for idx, x in enumerate(task_doc["calcs_reversed"]):
                    if "bandstructure" in task_doc["calcs_reversed"][idx]:
                        if idx == 0:  # only store most recent band structure
                            bs = json.dumps(task_doc["calcs_reversed"][idx]["bandstructure"], cls=MontyEncoder)
                            gfs_id, compression_type = mmdb.insert_gridfs(bs, "bandstructure_fs")
                            task_doc["calcs_reversed"][idx]["bandstructure_compression"] = compression_type
                            task_doc["calcs_reversed"][idx]["bandstructure_fs_id"] = gfs_id
                        del task_doc["calcs_reversed"][idx]["bandstructure"]

            # insert the task document
            t_id = mmdb.insert(task_doc)

            logger.info("Finished parsing with task_id: {}".format(t_id))

        if self.get("defuse_unsuccessful", True):
            defuse_children = (task_doc["state"] != "successful")
        else:
            defuse_children = False

        return FWAction(stored_data={"task_id": task_doc.get("task_id", None)},
                        defuse_children=defuse_children)
示例#26
0
    def run_task(self, fw_spec):

        # Get optimized structure
        # TODO: will this find the correct path if the workflow is rerun from the start?
        optimize_loc = fw_spec["calc_locs"][0]["path"]
        logger.info(
            "PARSING INITIAL OPTIMIZATION DIRECTORY: {}".format(optimize_loc))
        drone = VaspDrone()
        optimize_doc = drone.assimilate(optimize_loc)
        opt_struct = Structure.from_dict(
            optimize_doc["calcs_reversed"][0]["output"]["structure"])

        d = {
            "analysis": {},
            "deformation_tasks": fw_spec["deformation_tasks"],
            "initial_structure": self['structure'].as_dict(),
            "optimized_structure": opt_struct.as_dict()
        }
        if fw_spec.get("tags", None):
            d["tags"] = fw_spec["tags"]
        dtypes = fw_spec["deformation_tasks"].keys()
        defos = [
            fw_spec["deformation_tasks"][dtype]["deformation_matrix"]
            for dtype in dtypes
        ]
        stresses = [
            fw_spec["deformation_tasks"][dtype]["stress"] for dtype in dtypes
        ]
        stress_dict = {
            IndependentStrain(defo): Stress(stress)
            for defo, stress in zip(defos, stresses)
        }

        logger.info("ANALYZING STRESS/STRAIN DATA")
        # DETERMINE IF WE HAVE 6 "UNIQUE" deformations
        if len(set([de[:3] for de in dtypes])) == 6:
            # Perform Elastic tensor fitting and analysis
            result = ElasticTensor.from_stress_dict(stress_dict)
            d["elastic_tensor"] = result.voigt.tolist()
            kg_average = result.kg_average
            d.update({
                "K_Voigt": kg_average[0],
                "G_Voigt": kg_average[1],
                "K_Reuss": kg_average[2],
                "G_Reuss": kg_average[3],
                "K_Voigt_Reuss_Hill": kg_average[4],
                "G_Voigt_Reuss_Hill": kg_average[5]
            })
            d["universal_anisotropy"] = result.universal_anisotropy
            d["homogeneous_poisson"] = result.homogeneous_poisson

        else:
            raise ValueError("Fewer than 6 unique deformations")

        d["state"] = "successful"

        # Save analysis results in json or db
        db_file = env_chk(self.get('db_file'), fw_spec)
        if not db_file:
            with open("elasticity.json", "w") as f:
                f.write(json.dumps(d, default=DATETIME_HANDLER))
        else:
            db = MMVaspDb.from_db_file(db_file, admin=True)
            db.collection = db.db["elasticity"]
            db.collection.insert_one(d)
            logger.info("ELASTIC ANALYSIS COMPLETE")
        return FWAction()