def as_dict(self): enc = MontyEncoder() return {'mode': self.mode, 'comment': self.comment, 'num_kpts': self.num_kpts, 'kpts': enc.default(np.array(self.kpts)), 'kpt_shifts': self.kpt_shifts, 'kpts_weights': self.kpts_weights, 'use_symmetries': self.use_symmetries, 'use_time_reversal': self.use_time_reversal, 'chksymbreak': self.chksymbreak, '@module': self.__class__.__module__, '@class': self.__class__.__name__}
def as_dict(self): enc = MontyEncoder() return {'mode': self.mode.name, 'comment': self.comment, 'num_kpts': self.num_kpts, 'kpts': enc.default(np.array(self.kpts)), 'kpt_shifts': self.kpt_shifts, 'kpts_weights': self.kpts_weights, 'use_symmetries': self.use_symmetries, 'use_time_reversal': self.use_time_reversal, 'chksymbreak': self.chksymbreak, '@module': self.__class__.__module__, '@class': self.__class__.__name__}
def as_dict(self): """ Note: stores the real and imaginary part of the dielectric tensor separately, due to issues with JSON serializing complex numbers. Returns: dict: Dictionary representation of the DielTensor instance. """ d = dict() d["energies"] = MontyEncoder().default(self.energies) d["real_diel"] = MontyEncoder().default(self.dielectric_tensor.real) d["imag_diel"] = MontyEncoder().default(self.dielectric_tensor.imag) return d
def test_entry(self): enc = MontyEncoder() dec = MontyDecoder() entry = ComputedEntry("Fe2O3", 2.3) jsonstr = enc.encode(entry) d = dec.decode(jsonstr) self.assertEqual(type(d), ComputedEntry) #Check list of entries entries = [entry, entry, entry] jsonstr = enc.encode(entries) d = dec.decode(jsonstr) for i in d: self.assertEqual(type(i), ComputedEntry) self.assertEqual(len(d), 3)
def as_dict(self): d = super(Slab, self).as_dict() d["@module"] = self.__class__.__module__ d["@class"] = self.__class__.__name__ d["oriented_unit_cell"] = self.oriented_unit_cell.as_dict() d["miller_index"] = self.miller_index d["shift"] = self.shift d["scale_factor"] = MontyEncoder().default(self.scale_factor) d["reconstruction"] = self.reconstruction d["energy"] = self.energy return d
def as_dict(self): """Serialize the reference as a dict.""" schema = self.output_schema schema_dict = MontyEncoder().default( schema) if schema is not None else None data = { "@module": self.__class__.__module__, "@class": self.__class__.__name__, "@version": None, "uuid": self.uuid, "attributes": self.attributes, "output_schema": schema_dict, } return data
def test_serialization(): import json from monty.json import MontyDecoder, MontyEncoder from jobflow import Job test_job = Job(function=add, function_args=(1, ), function_kwargs={"b": 2}) uuid = test_job.uuid encoded_job = json.loads(MontyEncoder().encode(test_job)) decoded_job = MontyDecoder().process_decoded(encoded_job) assert decoded_job.uuid == uuid
def test_serialization(): import json from monty.json import MontyDecoder, MontyEncoder from jobflow import Flow flow = Flow([]) flow_host = Flow([flow]) host_uuid = flow_host.uuid encoded_flow = json.loads(MontyEncoder().encode(flow_host)) decoded_flow = MontyDecoder().process_decoded(encoded_flow) assert decoded_flow.jobs[0].host == host_uuid
def test_pandas(self): cls = ClassContainingDataFrame(df=pd.DataFrame([{ "a": 1, "b": 1 }, { "a": 1, "b": 2 }])) d = json.loads(MontyEncoder().encode(cls)) self.assertEqual(d["df"]["@module"], "pandas") self.assertEqual(d["df"]["@class"], "DataFrame") obj = ClassContainingDataFrame.from_dict(d) self.assertIsInstance(obj, ClassContainingDataFrame) self.assertIsInstance(obj.df, pd.DataFrame) self.assertEqual(list(obj.df.a), [1, 1])
def default(obj): """ For use with msgpack.packb(obj, default=default). Supports Monty's as_dict protocol, numpy arrays and datetime. """ return MontyEncoder().default(obj)
def run_task(self, fw_spec): # get the database connection db_file = env_chk(self["db_file"], fw_spec) mmdb = VaspCalcDb.from_db_file(db_file, admin=True) wf_uuid = self["approx_neb_wf_uuid"] # check if provided approx_neb_wf_uuid is unique # e.g. not already used in approx_neb collection approx_neb_db = mmdb.db["approx_neb"] if approx_neb_db.count_documents({"wf_uuid": wf_uuid}) != 0: raise ValueError( "Provided approx_neb_wf_uuid is not unique. A unique workflow id is required for querying in the approx_neb workflow." ) # update host task doc (from host_task_id) with unique wf_uuid # (tracks approx_neb workflows generated from this host task doc) t_id = self.get("host_task_id", fw_spec.get("host_task_id")) host_tasks_doc = mmdb.collection.find_one_and_update( { "task_id": t_id, "approx_neb.calc_type": "host" }, {"$push": { "approx_neb.wf_uuids": wf_uuid }}, ) if host_tasks_doc == None: raise ValueError( "Error updating approx neb host with task_id: {}".format(t_id)) # Initialize and store select host task doc fields in approx_neb_doc # (to be stored in the approx_neb collection) approx_neb_doc = { "wf_uuid": wf_uuid, "host": { "dir_name": host_tasks_doc["dir_name"], "chemsys": host_tasks_doc["chemsys"], "formula_pretty": host_tasks_doc["formula_pretty"], "input_structure": host_tasks_doc["input"]["structure"], "output": host_tasks_doc["output"], "task_id": host_tasks_doc["task_id"], }, "end_points": [], } # ensure tags and additional_fields are the same # in both the approx_neb and tasks collections additional_fields = self.get("additional_fields", {}) if isinstance(additional_fields, dict): for key, value in additional_fields.items(): if key not in approx_neb_doc.keys(): approx_neb_doc[key] = value tags = self.get("tags") if tags: approx_neb_doc["tags"] = tags # insert approx_neb_doc in the approx_neb collection of provided database # includes fix to ensure approx_neb_doc is a json serializable dict approx_neb_doc = MontyEncoder().encode(approx_neb_doc) approx_neb_doc = loads(approx_neb_doc) approx_neb_doc["last_updated"] = datetime.utcnow() mmdb.collection = mmdb.db["approx_neb"] mmdb.collection.insert_one(approx_neb_doc) # Update fw_spec with approx_neb_doc and store wf_uuid # in launches collection for record keeping return FWAction(stored_data={ "wf_uuid": wf_uuid, "approx_neb_doc": approx_neb_doc })