Example #1
0
    def from_dict(cls, d):
        """
        Reconstitute a DefectEntry object from a dict representation created using
        as_dict().
         Args:
            d (dict): dict representation of DefectEntry.
         Returns:
            DefectEntry object
        """
        defect = MontyDecoder().process_decoded(d["defect"])
        uncorrected_energy = d["uncorrected_energy"]
        corrections = d.get("corrections", None)
        parameters = d.get("parameters", None)
        entry_id = d.get("entry_id", None)

        return cls(
            defect,
            uncorrected_energy,
            corrections=corrections,
            parameters=parameters,
            entry_id=entry_id,
        )
 def from_dict(cls, dd):
     dec = MontyDecoder()
     return cls(mp_symbol=dd['mp_symbol'],
                name=dd['name'],
                alternative_names=dd['alternative_names'],
                IUPAC_symbol=dd['IUPAC_symbol'],
                IUCr_symbol=dd['IUCr_symbol'],
                coordination=dd['coordination'],
                central_site=dd['central_site'],
                points=dd['points'],
                solid_angles=(dd['solid_angles'] if 'solid_angles' in dd
                              else [4.0 * np.pi / dd['coordination']] * dd[
                    'coordination']),
                deactivate=dd['deactivate'],
                faces=dd['_faces'],
                edges=dd['_edges'],
                algorithms=[dec.process_decoded(algo_d)
                            for algo_d in dd['_algorithms']] if dd['_algorithms'] is not None else None,
                equivalent_indices=dd[
                    'equivalent_indices'] if 'equivalent_indices' in dd else None,
                neighbors_sets_hints=[cls.NeighborsSetsHints.from_dict(nbshd)
                                      for nbshd in dd['neighbors_sets_hints']]
                if ('neighbors_sets_hints' in dd and dd['neighbors_sets_hints'] is not None) else None)
Example #3
0
    def normalize(self, mode: Literal["formula_unit", "atom"] = "formula_unit") -> "ComputedEntry":
        """
        Normalize the entry's composition and energy.

        Args:
            mode ("formula_unit" | "atom"): "formula_unit" (the default) normalizes to composition.reduced_formula.
                "atom" normalizes such that the composition amounts sum to 1.
        """

        factor = self._normalization_factor(mode)
        new_composition = self._composition / factor
        new_energy = self._energy / factor

        new_entry_dict = self.as_dict()
        new_entry_dict["composition"] = new_composition.as_dict()
        new_entry_dict["energy"] = new_energy

        # TODO: make sure EnergyAdjustments are _also_ immutable to avoid this hacking
        new_energy_adjustments = MontyDecoder().process_decoded(new_entry_dict["energy_adjustments"])
        for ea in new_energy_adjustments:
            ea.normalize(factor)
        new_entry_dict["energy_adjustments"] = [ea.as_dict() for ea in new_energy_adjustments]

        return self.from_dict(new_entry_dict)
Example #4
0
    def run_task(self, fw_spec):
        dec = MontyDecoder()
        jobs = dec.process_decoded(self["jobs"])
        fw_env = fw_spec.get("_fw_env", {})
        #Override VASP and gamma VASP commands using fw_env
        if fw_env.get("vasp_cmd"):
            for j in jobs:
                j.vasp_cmd = os.path.expandvars(fw_env["vasp_cmd"])
                j.gamma_vasp_cmd = j.gamma_vasp_cmd
                logging.info("Vasp command is {}".format(j.vasp_cmd))
        if fw_env.get("gamma_vasp_cmd"):
            for j in jobs:
                j.gamma_vasp_cmd = os.path.expandvars(fw_env["gamma_vasp_cmd"])
                logging.info("Vasp gamma command is {}".format(
                    j.gamma_vasp_cmd))
        #Override custodian scratch dir.
        cust_params = self.get("custodian_params", {})
        if fw_env.get("scratch_root"):
            cust_params["scratch_dir"] = os.path.expandvars(
                fw_env["scratch_root"])

        logging.info("Running with custodian params %s" % cust_params)
        handlers = [
            VaspErrorHandler(),
            MeshSymmetryErrorHandler(),
            UnconvergedErrorHandler(),
            NonConvergingErrorHandler(),
            PotimErrorHandler()
        ]
        validators = [VasprunXMLValidator()]
        c = Custodian(handlers=[h.as_dict() for h in handlers],
                      jobs=jobs,
                      validators=[v.as_dict() for v in validators],
                      **cust_params)
        output = c.run()
        return FWAction(stored_data=output)
Example #5
0
 def from_dict(cls, d) -> "ComputedStructureEntry":
     """
     :param d: Dict representation.
     :return: ComputedStructureEntry
     """
     dec = MontyDecoder()
     # the first block here is for legacy ComputedEntry that were
     # serialized before we had the energy_adjustments attribute.
     if d["correction"] != 0 and not d.get("energy_adjustments"):
         return cls(
             dec.process_decoded(d["structure"]),
             d["energy"],
             d["correction"],
             parameters={
                 k: dec.process_decoded(v)
                 for k, v in d.get("parameters", {}).items()
             },
             data={k: dec.process_decoded(v) for k, v in d.get("data", {}).items()},
             entry_id=d.get("entry_id", None),
         )
     # this is the preferred / modern way of instantiating ComputedEntry
     # we don't pass correction explicitly because it will be calculated
     # on the fly from energy_adjustments
     return cls(
         dec.process_decoded(d["structure"]),
         d["energy"],
         correction=0,
         energy_adjustments=[
             dec.process_decoded(e) for e in d.get("energy_adjustments", {})
         ],
         parameters={
             k: dec.process_decoded(v) for k, v in d.get("parameters", {}).items()
         },
         data={k: dec.process_decoded(v) for k, v in d.get("data", {}).items()},
         entry_id=d.get("entry_id", None),
     )
Example #6
0
    def setUp(self):
        with pytest.warns(
                FutureWarning,
                match="MaterialsProjectCompatibility will be updated"):
            self.temps = [300, 600, 900, 1200, 1500, 1800]
            self.struct = vasprun.final_structure
            self.num_atoms = self.struct.composition.num_atoms
            self.temp_entries = {
                temp: GibbsComputedStructureEntry(
                    self.struct,
                    -2.436 * self.num_atoms,
                    temp=temp,
                    parameters=vasprun.incar,
                    entry_id="test",
                )
                for temp in self.temps
            }

            with open(
                    os.path.join(PymatgenTest.TEST_FILES_DIR,
                                 "Mn-O_entries.json"), "r") as f:
                data = json.load(f)

            self.mp_entries = [MontyDecoder().process_decoded(d) for d in data]
Example #7
0
    def run_task(self, fw_spec):

        """
            Required Parameters:
                dir (str path): directory containing the vasp inputs
                jobs (VaspJob): Contains the cmd needed to run vasp
            Optional Parameters:
                custodian_params (dict **kwargs): Contains the job and the
                    scratch directory for a custodian run
                handlers (list of custodian handlers): Defaults to empty list
        """

        dec = MontyDecoder()
        dir = dec.process_decoded(self['dir'])
        cwd = dec.process_decoded(self['cwd'])

        # Change to the directory with the vasp inputs to run custodian
        os.chdir(cwd+dir)

        handlers = dec.process_decoded(self.get('handlers', []))
        jobs = dec.process_decoded(self['jobs'])
        max_errors = dec.process_decoded(self['max_errors'])

        fw_env = fw_spec.get("_fw_env", {})
        cust_params = self.get("custodian_params", {})

        # Get the scratch directory
        if fw_env.get('scratch_root'):
            cust_params['scratch_dir'] = os.path.expandvars(
                fw_env['scratch_root'])

        c = Custodian(handlers=handlers, jobs=jobs, max_errors=max_errors, gzipped_output=True, **cust_params)

        output = c.run()

        return FWAction(stored_data=output)
Example #8
0
    def from_spec(cls, spec):
        """
        Load a Custodian instance where the jobs are specified from a
        structure and a spec dict. This allows simple
        custom job sequences to be constructed quickly via a YAML file.

        Args:
            spec (dict): A dict specifying job. A sample of the dict in
                YAML format for the usual MP workflow is given as follows

                ```
                jobs:
                - jb: custodian.vasp.jobs.VaspJob
                  params:
                    final: False
                    suffix: .relax1
                - jb: custodian.vasp.jobs.VaspJob
                  params:
                    final: True
                    suffix: .relax2
                    settings_override: {"file": "CONTCAR", "action": {"_file_copy": {"dest": "POSCAR"}}
                jobs_common_params:
                  vasp_cmd: /opt/vasp
                handlers:
                - hdlr: custodian.vasp.handlers.VaspErrorHandler
                - hdlr: custodian.vasp.handlers.AliasingErrorHandler
                - hdlr: custodian.vasp.handlers.MeshSymmetryHandler
                validators:
                - vldr: custodian.vasp.validators.VasprunXMLValidator
                custodian_params:
                  scratch_dir: /tmp
                ```

                The `jobs` key is a list of jobs. Each job is
                specified via "job": <explicit path>, and all parameters are
                specified via `params` which is a dict.

                `common_params` specify a common set of parameters that are
                passed to all jobs, e.g., vasp_cmd.

        Returns:
            Custodian instance.
        """

        dec = MontyDecoder()

        def load_class(dotpath):
            modname, classname = dotpath.rsplit(".", 1)
            mod = __import__(modname, globals(), locals(), [classname], 0)
            return getattr(mod, classname)

        def process_params(d):
            decoded = {}
            for k, v in d.items():
                if k.startswith("$"):
                    if isinstance(v, list):
                        v = [os.path.expandvars(i) for i in v]
                    elif isinstance(v, dict):
                        v = {
                            k2: os.path.expandvars(v2)
                            for k2, v2 in v.items()
                        }
                    else:
                        v = os.path.expandvars(v)
                decoded[k.strip("$")] = dec.process_decoded(v)
            return decoded

        jobs = []
        common_params = process_params(spec.get("jobs_common_params", {}))

        for d in spec["jobs"]:
            cls_ = load_class(d["jb"])
            params = process_params(d.get("params", {}))
            params.update(common_params)
            jobs.append(cls_(**params))

        handlers = []
        for d in spec.get("handlers", []):
            cls_ = load_class(d["hdlr"])
            params = process_params(d.get("params", {}))
            handlers.append(cls_(**params))

        validators = []
        for d in spec.get("validators", []):
            cls_ = load_class(d["vldr"])
            params = process_params(d.get("params", {}))
            validators.append(cls_(**params))

        custodian_params = process_params(spec.get("custodian_params", {}))

        return cls(jobs=jobs,
                   handlers=handlers,
                   validators=validators,
                   **custodian_params)
Example #9
0
 def __setstate__(self, d):
     del d["@class"]
     del d["@module"]
     md = MontyDecoder()
     d = md.process_decoded(d)
     self.__init__(**d)
Example #10
0
    def from_dict(cls, d):
        if isinstance(d["model"], str):
            d["model"] = dynamic_import(d["model"])

        decoder = MontyDecoder()
        return cls(**{k: decoder.process_decoded(v) for k, v in d.items()})
Example #11
0
 def __setstate__(self, d):
     d = {k: v for k, v in d.items() if not k.startswith("@")}
     d = MontyDecoder().process_decoded(d)
     self.__init__(**d)
Example #12
0
 def from_dict(cls, d):
     decoded = {
         k: MontyDecoder().process_decoded(v)
         for k, v in d.items() if not k.startswith("@")
     }
     return cls(**decoded)
Example #13
0
 def from_dict(cls, d):
     entry = MontyDecoder().process_decoded(d["entry"])
     return cls(d["composition"], entry)
Example #14
0
    def test_from_dict_from_json(self):
        # Test with non-canonical symbol
        for original_quantity in chain.from_iterable(self.quantities_custom_symbol.values()):
            q = StorageQuantity.from_quantity(original_quantity)
            d = q.as_dict()
            q_from_dict = StorageQuantity.from_dict(d)
            self.assertIsInstance(q_from_dict, StorageQuantity)
            self.assertEqual(q_from_dict._data_type, "NumQuantity")
            self.assertEqual(q_from_dict.symbol, q.symbol)
            self.assertTrue(np.isclose(q_from_dict.value, q.magnitude))
            self.assertEqual(q_from_dict.units, q.units)
            self.assertListEqual(q_from_dict.tags, q.tags)
            self.assertEqual(q_from_dict, q)
            self.rec_provenance_tree_check(q_from_dict.provenance, original_quantity.provenance)

            json_dict = jsanitize(q, strict=True)
            q_from_json_dict = MontyDecoder().process_decoded(json_dict)
            self.assertIsInstance(q_from_json_dict, StorageQuantity)
            self.assertEqual(q_from_json_dict._data_type, "NumQuantity")
            self.assertEqual(q_from_json_dict.symbol, q.symbol)
            self.assertTrue(np.isclose(q_from_json_dict.value, q.magnitude))
            self.assertEqual(q_from_json_dict.units, q.units)
            self.assertListEqual(q_from_json_dict.tags, q.tags)
            self.assertEqual(q_from_json_dict.provenance, original_quantity.provenance)
            self.assertEqual(q_from_json_dict, q)
            self.rec_provenance_tree_check(q_from_json_dict.provenance, original_quantity.provenance,
                                           from_dict=True)

        # Test with canonical symbol
        for original_quantity in chain.from_iterable(self.quantities_canonical_symbol.values()):
            q = StorageQuantity.from_quantity(original_quantity)
            d = q.as_dict()
            q_from_dict = StorageQuantity.from_dict(d)
            self.assertIsInstance(q_from_dict, StorageQuantity)
            self.assertEqual(q_from_dict._data_type, "NumQuantity")
            self.assertEqual(q_from_dict.symbol, q.symbol)
            self.assertTrue(np.isclose(q_from_dict.value, q.magnitude))
            self.assertEqual(q_from_dict.units, q.units)
            self.assertListEqual(q_from_dict.tags, q.tags)
            self.assertEqual(q_from_dict, q)
            self.rec_provenance_tree_check(q_from_dict.provenance, original_quantity.provenance)

            json_dict = jsanitize(q, strict=True)
            q_from_json_dict = MontyDecoder().process_decoded(json_dict)
            self.assertIsInstance(q_from_json_dict, StorageQuantity)
            self.assertEqual(q_from_json_dict._data_type, "NumQuantity")
            self.assertEqual(q_from_json_dict.symbol, q.symbol)
            self.assertTrue(np.isclose(q_from_json_dict.magnitude, q.magnitude))
            self.assertEqual(q_from_json_dict.units, q.units)
            self.assertListEqual(q_from_json_dict.tags, q.tags)
            self.assertEqual(q_from_json_dict.provenance, original_quantity.provenance)
            self.assertEqual(q_from_json_dict, q)
            self.rec_provenance_tree_check(q_from_json_dict.provenance, original_quantity.provenance,
                                           from_dict=True)

        # Test with quantity with uncertainty, custom symbol
        original_quantity = self.quantity_with_uncertainty
        q = StorageQuantity.from_quantity(original_quantity)
        d = q.as_dict()
        q_from_dict = StorageQuantity.from_dict(d)
        self.assertIsInstance(q_from_dict, StorageQuantity)
        self.assertEqual(q_from_dict._data_type, "NumQuantity")
        self.assertEqual(q_from_dict.symbol, q.symbol)
        self.assertTrue(np.isclose(q_from_dict.value, q.magnitude))
        self.assertEqual(q_from_dict.units, q.units)
        self.assertTrue(np.isclose(q_from_dict.uncertainty, q.uncertainty))
        self.assertListEqual(q_from_dict.tags, q.tags)
        self.assertEqual(q_from_dict, q)
        self.rec_provenance_tree_check(q_from_dict.provenance, original_quantity.provenance)

        json_dict = jsanitize(q, strict=True)
        q_from_json_dict = MontyDecoder().process_decoded(json_dict)
        self.assertIsInstance(q_from_json_dict, StorageQuantity)
        self.assertEqual(q_from_json_dict._data_type, "NumQuantity")
        self.assertEqual(q_from_json_dict.symbol, q.symbol)
        self.assertTrue(np.isclose(q_from_json_dict.magnitude, q.magnitude))
        self.assertEqual(q_from_json_dict.units, q.units)
        self.assertTrue(np.isclose(q_from_json_dict.uncertainty, q.uncertainty))
        self.assertListEqual(q_from_json_dict.tags, q.tags)
        self.assertEqual(q_from_json_dict.provenance, original_quantity.provenance)
        self.assertEqual(q_from_json_dict, q)
        self.rec_provenance_tree_check(q_from_json_dict.provenance, original_quantity.provenance,
                                       from_dict=True)

        # Test with object quantity
        original_quantity = self.object_quantity
        q = StorageQuantity.from_quantity(original_quantity)
        d = q.as_dict()
        q_from_dict = StorageQuantity.from_dict(d)
        self.assertIsInstance(q_from_dict, StorageQuantity)
        self.assertEqual(q_from_dict._data_type, "ObjQuantity")
        self.assertEqual(q_from_dict.symbol, q.symbol)
        self.assertEqual(q_from_dict.value, q.value)
        self.assertListEqual(q_from_dict.tags, q.tags)
        self.assertEqual(q_from_dict, q)
        self.rec_provenance_tree_check(q_from_dict.provenance, original_quantity.provenance)

        json_dict = jsanitize(q, strict=True)
        q_from_json_dict = MontyDecoder().process_decoded(json_dict)
        self.assertIsInstance(q_from_json_dict, StorageQuantity)
        self.assertEqual(q_from_json_dict._data_type, "ObjQuantity")
        self.assertEqual(q_from_json_dict.symbol, q.symbol)
        self.assertEqual(q_from_json_dict.value, q.value)
        self.assertListEqual(q_from_json_dict.tags, q.tags)
        self.assertEqual(q_from_json_dict, q)
        self.rec_provenance_tree_check(q_from_json_dict.provenance, original_quantity.provenance,
                                       from_dict=True)
Example #15
0
 def from_dict(cls, d):
     decoded_entries = MontyDecoder().process_decoded(d['entries'])
     return cls(decoded_entries, d.get('comp_dict'),
                d.get('conc_dict'))
Example #16
0
 def from_dict(cls, d):
     dec = MontyDecoder()
     entries = dec.process_decoded(d["original_entries"])
     terminal_compositions = dec.process_decoded(d["terminal_compositions"])
     return cls(entries, terminal_compositions,
                d["normalize_terminal_compositions"])
Example #17
0
 def from_dict(cls, d):
     entries = MontyDecoder().process_decoded(d["all_entries"])
     elements = MontyDecoder().process_decoded(d["elements"])
     return cls(entries, d["chempots"], elements)
Example #18
0
    def process_item(self, item):
        # Define quantities corresponding to materials doc fields
        # Attach quantities to materials
        item = MontyDecoder().process_decoded(item)
        logger.info("Populating material for %s", item['task_id'])
        material = Material()

        if 'created_at' in item.keys():
            date_created = item['created_at']
        else:
            date_created = ""

        provenance = ProvenanceElement(
            source={
                "source": self.source_name,
                "source_key": item['task_id'],
                "date_created": date_created
            })

        for mkey, property_name in self.materials_symbol_map.items():
            value = get(item, mkey)
            if value:
                material.add_quantity(
                    QuantityFactory.create_quantity(property_name,
                                                    value,
                                                    provenance=provenance))

        # Add custom things, e. g. computed entry
        computed_entry = get_entry(item)
        material.add_quantity(
            QuantityFactory.create_quantity("computed_entry",
                                            computed_entry,
                                            provenance=provenance))
        material.add_quantity(
            QuantityFactory.create_quantity("external_identifier_mp",
                                            item['task_id'],
                                            provenance=provenance))

        input_quantities = material.get_quantities()

        # Use graph to generate expanded quantity pool
        logger.info("Evaluating graph for %s", item['task_id'])
        graph = Graph()
        graph.remove_models({
            "dimensionality_cheon":
            DEFAULT_MODEL_DICT['dimensionality_cheon'],
            "dimensionality_gorai":
            DEFAULT_MODEL_DICT['dimensionality_gorai']
        })
        new_material = graph.evaluate(material)

        # Format document and return
        logger.info("Creating doc for %s", item['task_id'])
        # Gives the initial inputs that were used to derive properties of a
        # certain material.

        doc = {
            "inputs":
            [StorageQuantity.from_quantity(q) for q in input_quantities]
        }
        for symbol, quantity in new_material.get_aggregated_quantities().items(
        ):
            all_qs = new_material._symbol_to_quantity[symbol]
            # Only add new quantities
            # TODO: Condition insufficiently general.
            #       Can end up with initial quantities added as "new quantities"
            if len(all_qs) == 1 and list(all_qs)[0] in input_quantities:
                continue
            # Write out all quantities as dicts including the
            # internal ID for provenance tracing
            qs = [StorageQuantity.from_quantity(q).as_dict() for q in all_qs]
            # THE listing of all Quantities of a given symbol.
            sub_doc = {
                "quantities": qs,
                "mean": unumpy.nominal_values(quantity.value).tolist(),
                "std_dev": unumpy.std_devs(quantity.value).tolist(),
                "units":
                quantity.units.format_babel() if quantity.units else None,
                "title": quantity._symbol_type.display_names[0]
            }
            # Symbol Name -> Sub_Document, listing all Quantities of that type.
            doc[symbol.name] = sub_doc

        doc.update({
            "task_id": item["task_id"],
            "pretty_formula": item["pretty_formula"]
        })
        return jsanitize(doc, strict=True)
 def from_dict(cls, d):
     dec = MontyDecoder()
     reactants = [dec.process_decoded(e) for e in d["reactants"]]
     products = [dec.process_decoded(e) for e in d["products"]]
     return cls(reactants, products)
 def from_dict(cls, d):
     dec = MontyDecoder()
     return cls(dec.process_decoded(d["voltage_pairs"]),
                dec.process_decoded(d["working_ion_entry"]),
                Composition(d["initial_comp"]))
Example #21
0
 def from_dict(cls, d):
     chempots = {Element(symbol): u for symbol, u in d["chempots"].items()}
     entry = MontyDecoder().process_decoded(d["entry"])
     return cls(entry, chempots, d["name"])
Example #22
0
    def test_value_lookup_to_quantity(self):
        def rec_verify_lookup(p_lookup, p_original):
            self.assertIsInstance(p_lookup, ProvenanceElement)
            for v in p_lookup.inputs or []:
                self.assertIsInstance(v, BaseQuantity)
                v_orig = [x for x in p_original.inputs
                          if x._internal_id == v._internal_id]
                self.assertEqual(len(v_orig), 1)
                v_orig = v_orig[0]
                self.assertIsNotNone(v.value)
                if isinstance(v, NumQuantity):
                    self.assertTrue(np.isclose(v.value, v_orig.value))
                    if v_orig.uncertainty:
                        self.assertTrue(np.isclose(v.uncertainty, v_orig.uncertainty))
                else:
                    self.assertEqual(v.value, v_orig.value)
                rec_verify_lookup(v.provenance, v_orig.provenance)

        lookup_dict = self.get_lookup_dict()
        lookup_fun = self.lookup_fun

        quantities = \
            list(chain.from_iterable(self.quantities_custom_symbol.values())) + \
            list(chain.from_iterable(self.quantities_canonical_symbol.values())) + \
            [self.quantity_with_uncertainty, self.object_quantity]

        for q in quantities:
            json_dict = jsanitize(StorageQuantity.from_quantity(q), strict=True)
            sq_json = MontyDecoder().process_decoded(json_dict)
            if sq_json.provenance.inputs:
                for v in sq_json.provenance.inputs:
                    self.assertIsNone(v.value)
            q_json_dict = sq_json.to_quantity(lookup=lookup_dict)
            q_json_fun = sq_json.to_quantity(lookup=lookup_fun)
            q_json_reconstruct_dict = StorageQuantity.reconstruct_quantity(json_dict, lookup_dict)
            q_json_reconstruct_fun = StorageQuantity.reconstruct_quantity(json_dict, lookup_fun)
            for q_json in (q_json_dict, q_json_fun, q_json_reconstruct_dict, q_json_reconstruct_fun):
                self.assertIsInstance(q_json, type(q))
                if isinstance(q_json, NumQuantity):
                    self.assertTrue(np.isclose(q_json.value, q.value))
                    if q.uncertainty:
                        self.assertTrue(np.isclose(q_json.uncertainty, q.uncertainty))
                else:
                    self.assertEqual(q_json.value, q.value)
                rec_verify_lookup(q_json.provenance, q.provenance)

            if q.provenance.inputs:
                with self.assertRaises(ValueError):
                    # Needs lookup but doesn't get a lookup container
                    sq_json.to_quantity()
                with self.assertRaises(ValueError):
                    # Needs lookup but doesn't get a lookup container
                    StorageQuantity.reconstruct_quantity(json_dict)

                with self.assertRaises(ValueError):
                    sq_json.to_quantity(lookup=self.lookup_fun_missing_value)

                with self.assertRaises(ValueError):
                    StorageQuantity.reconstruct_quantity(json_dict,
                                                         lookup=self.lookup_fun_missing_value)

                with self.assertRaises(TypeError):
                    sq_json.to_quantity(lookup=self.lookup_fun_incorrect_type)

                with self.assertRaises(TypeError):
                    StorageQuantity.reconstruct_quantity(json_dict,
                                                         lookup=self.lookup_fun_incorrect_type)

                key = q.provenance.inputs[0]._internal_id
                key_lookup = lookup_dict.pop(key)
                with self.assertRaises(ValueError):
                    sq_json.to_quantity(lookup=lookup_dict)
                with self.assertRaises(ValueError):
                    StorageQuantity.reconstruct_quantity(json_dict, lookup=lookup_dict)
                with self.assertRaises(ValueError):
                    sq_json.to_quantity(lookup=self.lookup_fun_key_not_found)
                with self.assertRaises(ValueError):
                    StorageQuantity.reconstruct_quantity(json_dict, lookup=self.lookup_fun_key_not_found)
                lookup_dict[key] = key_lookup

                with self.assertRaises(TypeError):
                    sq_json.to_quantity(lookup='This is not a lookup')
                with self.assertRaises(TypeError):
                    StorageQuantity.reconstruct_quantity(json_dict, lookup='This is not a lookup')
Example #23
0
def get_wf_from_spec_dict(structure, wfspec, common_param_updates=None):
    """
    Load a WF from a structure and a spec dict. This allows simple
    custom workflows to be constructed quickly via a YAML file.

    Args:
        structure (Structure): An input structure object.
        wfspec (dict): A dict specifying workflow. A sample of the dict in
            YAML format for the usual MP workflow is given as follows:

            ```
            fireworks:
            - fw: atomate.vasp.fireworks.core.OptimizeFW
            - fw: atomate.vasp.fireworks.core.StaticFW
              params:
                parents: 0
            - fw: atomate.vasp.fireworks.core.NonSCFUniformFW
              params:
                parents: 1
            - fw: atomate.vasp.fireworks.core.NonSCFLineFW
              params:
                parents: 1
            common_params:
              db_file: db.json
              $vasp_cmd: $HOME/opt/vasp
            name: bandstructure
            metadata:
                tag: testing_workflow
            ```

            The `fireworks` key is a list of Fireworks; it is expected that
            all such Fireworks have "structure" as the first argument and
            other optional arguments following that. Each Firework is specified
            via "fw": <explicit path>.

            You can pass arguments into the constructor using the special
            keyword `params`, which is a dict. Any param starting with a $ will
            be expanded using environment variables.If multiple fireworks share
            the same `params`, you can use `common_params` to specify a common
            set of arguments that are passed to all fireworks. Local params
            take precedent over global params.

            Another special keyword is `parents`, which provides
            the *indices* of the parents of that particular Firework in the
            list. This allows you to link the Fireworks into a logical
            workflow.

            Finally, `name` is used to set the Workflow name
            (structure formula + name) which can be helpful in record keeping.
        common_param_updates (dict): A dict specifying any user-specified updates to common_params

    Returns:
        Workflow
    """

    dec = MontyDecoder()

    def process_params(d):
        decoded = {}
        for k, v in d.items():
            if k.startswith("$"):
                if isinstance(v, list):
                    v = [os.path.expandvars(i) for i in v]
                elif isinstance(v, dict):
                    v = {k2: os.path.expandvars(v2) for k2, v2 in v.items()}
                else:
                    v = os.path.expandvars(v)
            decoded[k.strip("$")] = dec.process_decoded(v)
        return decoded

    fws = []
    common_params = process_params(wfspec.get("common_params", {}))
    if common_param_updates:
        common_params.update(common_param_updates)
    for d in wfspec["fireworks"]:
        modname, classname = d["fw"].rsplit(".", 1)
        cls_ = load_class(modname, classname)
        params = process_params(d.get("params", {}))
        for k in common_params:
            if k not in params:  # common params don't override local params
                params[k] = common_params[k]
        if "parents" in params:
            if isinstance(params["parents"], int):
                params["parents"] = fws[params["parents"]]
            else:
                p = []
                for parent_idx in params["parents"]:
                    p.append(fws[parent_idx])
                params["parents"] = p
        fws.append(cls_(structure=structure, **params))

    wfname = "{}:{}".format(structure.composition.reduced_formula, wfspec["name"]) if \
        wfspec.get("name") else structure.composition.reduced_formula

    return Workflow(fws, name=wfname, metadata=wfspec.get("metadata"))
Example #24
0
from datetime import datetime
from enum import Enum
from typing import List

from monty.json import MontyDecoder
from pydantic import BaseModel, Field, validator

from pymatgen.core.structure import Structure
from pymatgen.core.composition import Composition
from pymatgen.core.periodic_table import Element
from pymatgen.io.vasp import Incar, Poscar, Kpoints, Potcar
from pymatgen.core.trajectory import Trajectory

monty_decoder = MontyDecoder()


class TaskType(str, Enum):
    """
    The type of calculation
    """

    EMPTY = ""
    GGA_NSCF_Line = "GGA NSCF Line"
    GGA_NSCF_Uniform = "GGA NSCF Uniform"
    GGA_Static = "GGA Static"
    GGA_Structure_Optimization = "GGA Structure Optimization"
    GGA_U_NSCF_Line = "GGA+U NSCF Line"
    GGA_U_NSCF_Uniform = "GGA+U NSCF Uniform"
    GGA_U_Static = "GGA+U Static"
    GGA_U_Structure_Optimization = "GGA+U Structure Optimization"
Example #25
0
"""

import datetime
import json
import os
import re
from warnings import warn

from monty.json import MontyDecoder, MSONable, jsanitize

from pymatgen.core.structure import Structure
from pymatgen.io.cif import CifParser
from pymatgen.io.vasp.inputs import Poscar
from pymatgen.io.vasp.sets import MPRelaxSet

dec = MontyDecoder()


class TransformedStructure(MSONable):
    """
    Container object for new structures that include history of
    transformations.

    Each transformed structure is made up of a sequence of structures with
    associated transformation history.
    """
    def __init__(self,
                 structure,
                 transformations=None,
                 history=None,
                 other_parameters=None):
Example #26
0
    def process(self, item):
        if self.graph_parallel and not self.allow_child_process and \
                current_process().name != "MainProcess":
            logger.warning(
                "It appears derive_quantities() is running "
                "in a child process, possibly in a parallelized "
                "Runner.\nThis is not recommended and will deteriorate "
                "performance.")
        # Define quantities corresponding to materials doc fields
        # Attach quantities to materials
        item = MontyDecoder().process_decoded(item)
        logger.info("Populating material for %s", item['task_id'])
        material = Material()

        if 'created_at' in item.keys():
            date_created = item['created_at']
        else:
            date_created = None

        provenance = ProvenanceElement(
            source={
                "source": self.source_name,
                "source_key": item['task_id'],
                "date_created": date_created
            })

        for mkey, property_name in self.materials_symbol_map.items():
            value = pydash.get(item, mkey)
            if value:
                material.add_quantity(
                    QuantityFactory.create_quantity(
                        property_name,
                        value,
                        units=Registry("units").get(property_name, None),
                        provenance=provenance))

        # Add custom things, e. g. computed entry
        computed_entry = get_entry(item)
        if computed_entry:
            material.add_quantity(
                QuantityFactory.create_quantity("computed_entry",
                                                computed_entry,
                                                provenance=provenance))
        else:
            logger.info("Unable to create computed entry for {}".format(
                item['task_id']))
        material.add_quantity(
            QuantityFactory.create_quantity("external_identifier_mp",
                                            item['task_id'],
                                            provenance=provenance))

        input_quantities = material.symbol_quantities_dict

        # Use graph to generate expanded quantity pool
        logger.info("Evaluating graph for %s", item['task_id'])

        new_material = self._graph_evaluator.evaluate(
            material, timeout=self.graph_timeout)

        # Format document and return
        logger.info("Creating doc for %s", item['task_id'])
        # Gives the initial inputs that were used to derive properties of a
        # certain material.

        doc = {
            "inputs": [
                StorageQuantity.from_quantity(q)
                for q in chain.from_iterable(input_quantities.values())
            ]
        }

        for symbol, quantities in new_material.symbol_quantities_dict.items():
            # If no new quantities of a given symbol were derived (i.e. if the initial
            # input quantity/ies is/are the only one/s listed in the new material) then don't add
            # that quantity to the propnet entry document as a derived quantity.
            if len(quantities) == len(input_quantities[symbol]):
                continue
            sub_doc = {}
            try:
                # Write out all quantities as dicts including the
                # internal ID for provenance tracing
                qs = [
                    jsanitize(StorageQuantity.from_quantity(q), strict=True)
                    for q in quantities
                ]
            except AttributeError as ex:
                # Check to see if this is an error caused by an object
                # that is not JSON serializable
                msg = ex.args[0]
                if "object has no attribute 'as_dict'" in msg:
                    # Write error to db and logger
                    errmsg = "Quantity of Symbol '{}' is not ".format(symbol.name) + \
                        "JSON serializable. Cannot write quantities to database!"
                    logger.error(errmsg)
                    sub_doc['error'] = errmsg
                    qs = []
                else:
                    # If not, re-raise the error
                    raise ex
            sub_doc['quantities'] = qs
            doc[symbol.name] = sub_doc

        aggregated_quantities = new_material.get_aggregated_quantities()

        for symbol, quantity in aggregated_quantities.items():
            if symbol.name not in doc:
                # No new quantities were derived
                continue
            # Store mean and std dev for aggregated quantities
            sub_doc = {
                "mean": unumpy.nominal_values(quantity.value).tolist(),
                "std_dev": unumpy.std_devs(quantity.value).tolist(),
                "units":
                quantity.units.format_babel() if quantity.units else None,
                "title": quantity.symbol.display_names[0]
            }
            # Symbol Name -> Sub_Document, listing all Quantities of that type.
            doc[symbol.name].update(sub_doc)

        doc.update({
            "task_id": item["task_id"],
            "pretty_formula": item.get("pretty_formula"),
            "deprecated": item.get("deprecated", False)
        })

        if self.include_sandboxed:
            doc.update({'sbxn': item.get("sbxn", [])})

        return jsanitize(doc, strict=True)
Example #27
0
 def last_updated_dict_ok(cls, v):
     return MontyDecoder().process_decoded(v)
Example #28
0
def object_hook(d):
    """
    For use with msgpack.unpackb(dict, object_hook=object_hook.).  Supports
    Monty's as_dict protocol, numpy arrays and datetime.
    """
    return MontyDecoder().process_decoded(d)
Example #29
0
 def from_dict(cls, d):
     from monty.json import MontyDecoder
     dec = MontyDecoder()
     return cls(dec.process_decoded(d["entries"]),
                dec.process_decoded(d["working_ion_entry"]))
Example #30
0
 def from_dict(cls, d):
     dec = MontyDecoder()
     return cls(handler=dec.process_decoded(d['handler']),
                actions=d['actions'],
                event=dec.process_decoded(d['event']),
                reset=d['reset'])