Пример #1
0
    def as_dict(self):
        """
        Because some dicts have tuple keys, some sanitization is required for json compatibility.
        """

        d = {}
        d["@module"] = self.__class__.__module__
        d["@class"] = self.__class__.__name__
        d["@version"] = __version__
        d["formula"] = self.formula
        d["structures"] = [s.as_dict() for s in self.structures]
        d["energies"] = self.energies
        d["cutoff"] = self.cutoff
        d["tol"] = self.tol
        d["sgraphs"] = [sgraph.as_dict() for sgraph in self.sgraphs]
        d["dists"] = self.dists
        d["ex_params"] = self.ex_params
        d["javg"] = self.javg
        d["igraph"] = self.igraph.as_dict()

        # Sanitize tuple & int keys
        d["ex_mat"] = jsanitize(self.ex_mat)
        d["nn_interactions"] = jsanitize(self.nn_interactions)
        d["unique_site_ids"] = jsanitize(self.unique_site_ids)
        d["wyckoff_ids"] = jsanitize(self.wyckoff_ids)

        return d
 def as_dict(self):
     return {"@module": self.__class__.__module__,
             "@class": self.__class__.__name__,
             "light_structure_environments": self.light_structure_environments.as_dict(),
             "connectivity_graph": jsanitize(nx.to_dict_of_dicts(self._graph)),
             "environment_subgraphs": {env_key: jsanitize(nx.to_dict_of_dicts(subgraph))
                                       for env_key, subgraph in self.environment_subgraphs.items()}}
Пример #3
0
def index(request):
    ctx = RequestContext(request)
    if request.user.is_authenticated():
        API_KEY = request.user.api_key
        ENDPOINT = request.build_absolute_uri(get_endpoint())
        with UWSI2Rester(API_KEY, endpoint=ENDPOINT) as mpr:
            try:
                contribs = mpr.get_uwsi2_contributions()
                if not contribs:
                    raise Exception('No contributions found for UW/SI2 Explorer!')
                ranges = {}
                for contrib in contribs:
                    df = contrib['table']
                    df.columns = list(df.columns[:-1]) + ['El.']
                    for col in df.columns[:-1]:
                        low, upp = min(df[col]), max(df[col])
                        if col not in ranges:
                            ranges[col] = [low, upp]
                        else:
                            if low < ranges[col][0]:
                                ranges[col][0] = low
                            if upp > ranges[col][1]:
                                ranges[col][1] = upp
                    contrib['table'] = get_backgrid_table(df)
                ranges = jsanitize(ranges)
                contribs = jsanitize(contribs)
            except Exception as ex:
                ctx.update({'alert': str(ex)})
    else:
        ctx.update({'alert': 'Please log in!'})
    return render_to_response("uwsi2_explorer_index.html", locals(), ctx)
Пример #4
0
    def test_as_dict_as_json(self):
        # Check symbols to be sure they are still ok
        for name, sym in self.generate_symbols().items():
            compare_dict = sym.as_dict()
            self.assertDictEqual(compare_dict, self.custom_syms_as_dicts[name])
            compare_dict = jsanitize(sym, strict=True)
            self.assertDictEqual(compare_dict, self.custom_symbols_json[name])

        # Quantities with custom symbols
        for sym, q in self.quantities_custom_symbol.items():
            for qq, expected_dict, expected_json in zip(q,
                                                        self.sq_custom_sym_as_dicts[sym],
                                                        self.sq_custom_sym_json[sym]):
                sq = StorageQuantity.from_quantity(qq)
                compare_dict = sq.as_dict()
                self.assertDictEqual(compare_dict, expected_dict)
                compare_dict = jsanitize(sq, strict=True)
                self.assertDictEqual(compare_dict, expected_json)

        # Quantities with canonical symbols directly calculated from a real model
        for sym, q in self.quantities_canonical_symbol.items():
            for qq, expected_dict, expected_json in zip(q,
                                                        self.sq_canonical_sym_as_dicts_no_value[sym],
                                                        self.sq_canonical_sym_json_no_value[sym]):
                sq = StorageQuantity.from_quantity(qq)
                compare_dict = sq.as_dict()
                self.assertTrue(np.isclose(qq.magnitude, compare_dict['value']))
                compare_dict.pop('value')
                self.assertDictEqual(compare_dict, expected_dict)
                compare_dict = jsanitize(sq, strict=True)
                self.assertTrue(np.isclose(qq.magnitude, compare_dict['value']))
                compare_dict.pop('value')
                self.assertDictEqual(compare_dict, expected_json)

        # Quantity with uncertainty (calculated from mean), using custom symbols
        sq = StorageQuantity.from_quantity(self.quantity_with_uncertainty)
        compare_dict = sq.as_dict()
        self.assertTrue(np.isclose(self.quantity_with_uncertainty.magnitude, compare_dict['value']))
        uncertainty_value = compare_dict['uncertainty']
        self.assertTrue(np.isclose(self.quantity_with_uncertainty.uncertainty.magnitude,
                                   uncertainty_value))
        compare_dict.pop('value')
        compare_dict.pop('uncertainty')
        self.assertDictEqual(self.sq_with_uncertainty_as_dict_no_numbers, compare_dict)

        compare_dict = jsanitize(sq, strict=True)
        self.assertTrue(np.isclose(self.quantity_with_uncertainty.magnitude, compare_dict['value']))
        uncertainty_value = compare_dict['uncertainty']
        self.assertTrue(np.isclose(self.quantity_with_uncertainty.uncertainty.magnitude,
                                   uncertainty_value))
        compare_dict.pop('value')
        compare_dict.pop('uncertainty')
        self.assertDictEqual(self.sq_with_uncertainty_json_no_numbers, compare_dict)

        # Quantity that is an object, using a custom symbol
        sq = StorageQuantity.from_quantity(self.object_quantity)
        compare_dict = sq.as_dict()
        self.assertDictEqual(self.sq_object_as_dict, compare_dict)
        compare_dict = jsanitize(sq, strict=True)
        self.assertDictEqual(self.sq_object_json, compare_dict)
Пример #5
0
 def as_dict(self):
     """
     Dict representation of the TransformedStructure.
     """
     d = self.final_structure.as_dict()
     d["@module"] = self.__class__.__module__
     d["@class"] = self.__class__.__name__
     d["history"] = jsanitize(self.history)
     d["last_modified"] = str(datetime.datetime.utcnow())
     d["other_parameters"] = jsanitize(self.other_parameters)
     return d
Пример #6
0
 def as_dict(self):
     """
     Dict representation of the TransformedStructure.
     """
     d = self.final_structure.as_dict()
     d["@module"] = self.__class__.__module__
     d["@class"] = self.__class__.__name__
     d["history"] = jsanitize(self.history)
     d["version"] = __version__
     d["last_modified"] = str(datetime.datetime.utcnow())
     d["other_parameters"] = jsanitize(self.other_parameters)
     return d
Пример #7
0
    def as_dict(self):
        """
        Dict representation of NEBAnalysis.

        Returns:
            JSON serializable dict representation.
        """
        return {"@module": self.__class__.__module__,
                "@class": self.__class__.__name__,
                'r': jsanitize(self.r),
                'energies': jsanitize(self.energies),
                'forces': jsanitize(self.forces),
                'structures': [s.as_dict() for s in self.structures]}
Пример #8
0
    def as_dict(self):
        """
        Dict representation of NEBAnalysis.

        Returns:
            JSON serializable dict representation.
        """
        return {"@module": self.__class__.__module__,
                "@class": self.__class__.__name__,
                'r': jsanitize(self.r),
                'energies': jsanitize(self.energies),
                'forces': jsanitize(self.forces),
                'structures': [s.as_dict() for s in self.structures]}
Пример #9
0
    def as_dict(self):
        """
        Dict representation of NEBAnalysis.

        Returns:
            JSON-serializable dict representation.
        """
        return {
            "@module": type(self).__module__,
            "@class": type(self).__name__,
            "r": jsanitize(self.r),
            "energies": jsanitize(self.energies),
            "forces": jsanitize(self.forces),
            "structures": [s.as_dict() for s in self.structures],
        }
Пример #10
0
    def test_jsanitize(self):
        #clean_json should have no effect on None types.
        d = {"hello": 1, "world": None}
        clean = jsanitize(d)
        self.assertIsNone(clean["world"])
        self.assertEqual(json.loads(json.dumps(d)), json.loads(json.dumps(
            clean)))

        d = {"hello": GoodMSONClass(1, 2)}
        self.assertRaises(TypeError, json.dumps, d)
        clean = jsanitize(d)
        self.assertIsInstance(clean["hello"], six.string_types)
        clean_strict = jsanitize(d, strict=True)
        self.assertEqual(clean_strict["hello"]["a"], 1)
        self.assertEqual(clean_strict["hello"]["b"], 2)
Пример #11
0
def main(size, U, phase, P=0):
    fout = 'size_%s/profile_U%s' % (size, U)
    from tBG.molecule.structures import QuantumDotQC
    spin_sides = spin_sides_distribution(phase)
    R = size2R(size)
    qc = QuantumDotQC()
    qc.regular_polygon(12, R, OV_orientation=15)
    with open(fout, 'a') as f:
        f.write('num_electron: %s\n' % len(qc.coords))
        f.write('phase: %s\n' % phase)
    qc.add_hopping_wannier(P=P)
    n_up, n_dn, e_tot, converged = calc_spin_dist_iteration(qc,
                                                            U,
                                                            spin_sides,
                                                            mix0=0.001,
                                                            mix1=0.001,
                                                            prec=1.e-4,
                                                            T=10,
                                                            nmax=500,
                                                            fout=fout)
    with open('size_%s/struct.obj' % size, 'wb') as f:
        pickle.dump(qc, f)
    out = {'n_up': n_up, 'n_dn': n_dn, 'e_tot': e_tot, 'converged': converged}
    with open('size_%s/U_%s/initPhase_%s.json' % (size, U, phase), 'w') as f:
        json.dump(jsanitize(out), f)
Пример #12
0
    def run_task(self, fw_spec):
        lp, fw_id = get_lp_and_fw_id_from_task(self, fw_spec)

        wf = lp.get_wf_by_fw_id(fw_id)
        wf_module = importlib.import_module(wf.metadata['workflow_module'])
        wf_class = getattr(wf_module, wf.metadata['workflow_class'])

        database = fw_spec['mongo_database']
        if self.criteria is not None:
            entry = database.get_entry(criteria=self.criteria)
        else:
            entry = {}

        inserted = []
        for root_key, method_name in self.insertion_data.items():
            get_results_method = getattr(wf_class, method_name)
            results = get_results_method(wf)
            for key, val in results.items():
                entry[key] = jsanitize(val)
                inserted.append(key)

        if self.criteria is not None:
            database.save_entry(entry=entry)
        else:
            database.insert_entry(entry=entry)

        logging.info("Inserted data:\n{}".format('- {}\n'.join(inserted)))
        return FWAction()
Пример #13
0
    def update_targets(self, items):
        """
        Inserts the new task_types into the task_types collection

        Args:
            items ([[dict]]): a list of list of thermo dictionaries to update
        """
        items = list(filter(None, items))

        bta_fs = gridfs.GridFS(self.materials.database,
                               self.bta_fs) if self.bta_fs else None

        if len(items) > 0:
            self.logger.info("Updating {} boltztrap docs".format(len(items)))

            for doc in items:
                if self.bta_fs:
                    bta_doc = dict(doc["bta"])
                    bta_json = json.dumps(jsanitize(bta_doc))
                    bta_gz = zlib.compress(bta_json)
                    bs_oid = bta_fs.put(bta_gz)
                    doc['bta_oid'] = bta_oid
                    doc['bta_compression'] = "zlib"

                del doc["bta"]

            self.boltztrap.update(items)

        else:
            self.logger.info("No items to update")
Пример #14
0
    def run_task(self, fw_spec):

        wf_uuid = self["wf_uuid"]

        surfaces = ["kx_0", "kx_1", "ky_0", "ky_1", "kz_0", "kz_1"]

        d = {
            "wf_uuid": wf_uuid,
            "formula": fw_spec["formula"],
            "reduced_formula": fw_spec["reduced_formula"],
            "structure": fw_spec["structure"],
        }

        for surface in surfaces:
            if surface in fw_spec.keys():
                d[surface] = fw_spec[surface]

        d = jsanitize(d)

        # store the results
        db_file = env_chk(self.get("db_file"), fw_spec)
        if not db_file:
            with open("z2pack.json", "w") as f:
                f.write(json.dumps(d, default=DATETIME_HANDLER))
        else:
            db = VaspCalcDb.from_db_file(db_file, admin=True)
            db.collection = db.db["z2pack"]
            db.collection.insert_one(d)
            logger.info("Z2Pack surface calculation complete.")

        return FWAction()
Пример #15
0
    def run_task(self, fw_spec):

        irvsp = self["irvsp_out"] or fw_spec["irvsp_out"]

        irvsp = jsanitize(irvsp)

        additional_fields = self.get("additional_fields", {})
        d = additional_fields.copy()
        d["wf_uuid"] = self["wf_uuid"]
        d["formula"] = fw_spec["formula"]
        d["efermi"] = fw_spec["efermi"]
        d["structure"] = fw_spec["structure"]
        d["irvsp"] = irvsp

        # store the results
        db_file = env_chk(self.get("db_file"), fw_spec)
        if not db_file:
            with open("irvsp.json", "w") as f:
                f.write(json.dumps(d, default=DATETIME_HANDLER))
        else:
            db = VaspCalcDb.from_db_file(db_file, admin=True)
            db.collection = db.db["irvsp"]
            db.collection.insert_one(d)
            logger.info("IRVSP calculation complete.")
        return FWAction()
Пример #16
0
    def process_item(self, item):

        new_style_mat = item["material"]

        mat = old_style_mat(new_style_mat)
        add_es(mat, new_style_mat)

        if item.get("xrd", None):
            xrd = item["xrd"]
            add_xrd(mat, xrd)

        if item.get("dielectric", None):
            dielectric = item["dielectric"]
            add_dielectric(mat, dielectric)

        if item.get("elastic", None):
            elastic = item["elastic"]
            add_elastic(mat, elastic)

        if item.get("thermo", None):
            thermo = item["thermo"]
            add_thermo(mat, thermo)

        if item.get("dois", None):
            doi = item["dois"]
            add_dois(mat, doi)

        snl = item.get("snl", {})
        add_snl(mat, snl)
        add_magnetism(mat)
        sandbox_props(mat)
        has_fields(mat)
        return jsanitize(mat)
Пример #17
0
    def process_item(self, item):

        new_mat = item["material"]

        mat = old_style_mat(new_mat)

        if item.get("xrd", None):
            xrd = item["xrd"]
            add_xrd(mat, xrd)

        if item.get("electronic_structure", None):
            es = item["electronic_structure"]
            add_es(mat, new_mat, es)

        if item.get("piezo", None):
            pass

        if item.get("elasticity", None):
            pass

        if item.get("thermo", None):
            thermo = item["thermo"]
            add_thermo(mat, thermo)

        if item.get("snl", None):
            snl = item["snl"]
            add_snl(mat, snl)

            if item.get("icsds", None):
                icsds = item["icsds"]
                if icsds:
                    add_icsd(mat, icsds)

        sandbox_props(mat)
        return jsanitize(mat)
Пример #18
0
    def update_autots_data_docs(self,
                                docs: List[Dict],
                                key: Optional[str] = "path"):
        """
        Update a number of data docs at once.

        Args:
            docs (list of dicts): Task docs to be updated.
            key (str): Database key to query

        Returns:
            None
        """

        requests = list()

        for doc in docs:
            if not doc.get("datumid", None):
                doc["datumid"] = self.database["counter"].find_one_and_update(
                    {"_id": "datumid"}, {"$inc": {
                        "c": 1
                    }},
                    return_document=ReturnDocument.AFTER)["c"]
            doc = jsanitize(doc, allow_bson=True)

            requests.append(ReplaceOne({key: doc[key]}, doc, upsert=True))

        if len(requests) > 0:
            self.database[self.autots_data_collection].bulk_write(
                requests, ordered=False)
Пример #19
0
def contains_flow_or_job(obj: Any) -> bool:
    """
    Find whether an object contains any :obj:`Flow` or :obj:`Job` objects.

    Parameters
    ----------
    obj
        An object.

    Returns
    -------
    bool
        Whether the object contains any Flows or jobs.
    """
    from monty.json import jsanitize

    from jobflow.core.flow import Flow
    from jobflow.core.job import Job

    if isinstance(obj, (Flow, Job)):
        # if the argument is an flow or job then stop there
        return True

    elif isinstance(obj, (float, int, str, bool)):
        # argument is a primitive, we won't find an flow or job here
        return False

    obj = jsanitize(obj, strict=True)

    # recursively find any reference classes
    locations = find_key_value(obj, "@class", "Flow")
    locations += find_key_value(obj, "@class", "Job")

    return len(locations) > 0
Пример #20
0
def get_doc_json(collection_name, uid):
    settings = CSETTINGS[collection_name]
    criteria = {
        settings["unique_key"]: process(uid, settings["unique_key_type"])
    }
    doc = DB[collection_name].find_one(criteria)
    return jsonify(jsanitize(doc))
Пример #21
0
    def assimilate(self, path, input_file, output_file, multirun):
        """
        Parses qchem input and output files and insert the result into the db.

        Args:
            path (str): Path to the directory containing output file
            input_file (str): base name of the input file(s)
            output_file (str): base name of the output file(s)
            multirun (bool): Whether the job to parse includes multiple
                             calculations in one input / output pair.

        Returns:
            d (dict): a task dictionary
        """
        logger.info("Getting task doc for base dir :{}".format(path))
        qcinput_files = self.filter_files(path, file_pattern=input_file)
        qcoutput_files = self.filter_files(path, file_pattern=output_file)
        if len(qcinput_files) != len(qcoutput_files):
            raise AssertionError("Inequal number of input and output files!")
        if len(qcinput_files) > 0 and len(qcoutput_files) > 0:
            d = self.generate_doc(path, qcinput_files, qcoutput_files,
                                  multirun)
            self.post_process(path, d)
        else:
            raise ValueError("Either input or output not found!")
        self.validate_doc(d)
        return jsanitize(d, strict=True, allow_bson=True)
Пример #22
0
 def dielectric(self,uc_type='prim',output_dir='./'):
     if uc_type == 'prim':
         uc_type = 'primitive'
         stru = self.stru_prim.copy()
     elif uc_type == 'conv':
         uc_type = 'conventional'
         stru = self.stru_conv.copy()
     path = os.path.join(output_dir,self.name)
     inputs = MPStaticSet(stru).all_input
     transf = {'history':[{'source':self.mpid,'unit_cell':uc_type}],'defect_type':'dielectric'}
     incar = inputs['INCAR']
     kpoints = Kpoints.automatic_gamma_density(stru,2000)
     if self.is_spin_polarized:
         incar['ISPIN']=2
     else:
         incar['ISPIN']=1
     incar['IBRION']=8
     incar['LEPSILON']=True
     incar['LPEAD']=True
     incar['EDIFF']=0.000001
     incar['LWAVE']=False
     incar['LCHARG']=False
     incar['ISMEAR']=0
     incar['ALGO']="Normal"
     incar['SIGMA']=0.01
     del incar['NSW'], incar['LVHAR'], incar['LAECHG']
     os.mkdir(path)
     f=open(path+"/transformations.json",'w')
     f.write(json.dumps(jsanitize(transf)))
     inputs['POTCAR'].write_file(path+"/POTCAR")
     incar.write_file(path+"/INCAR")
     kpoints.write_file(path+"/KPOINTS")
     inputs['POSCAR'].write_file(path+"/POSCAR")
Пример #23
0
    def insert(self, d, update_duplicates=True):
        """
        Insert the task document ot the database collection.

        Args:
            d (dict): task document
            update_duplicates (bool): whether to update the duplicates
        """
        result = self.collection.find_one({"dir_name": d["dir_name"]}, ["dir_name", "task_id"])
        if result is None or update_duplicates:
            d["last_updated"] = datetime.datetime.utcnow()
            if result is None:
                if ("task_id" not in d) or (not d["task_id"]):
                    d["task_id"] = self.db.counter.find_one_and_update(
                        {"_id": "taskid"}, {"$inc": {"c": 1}},
                        return_document=ReturnDocument.AFTER)["c"]
                logger.info("Inserting {} with taskid = {}".format(d["dir_name"], d["task_id"]))
            elif update_duplicates:
                d["task_id"] = result["task_id"]
                logger.info("Updating {} with taskid = {}".format(d["dir_name"], d["task_id"]))
            d = jsanitize(d, allow_bson=True)
            self.collection.update_one({"dir_name": d["dir_name"]},
                                       {"$set": d}, upsert=True)
            return d["task_id"]
        else:
            logger.info("Skipping duplicate {}".format(d["dir_name"]))
            return None
Пример #24
0
 def PBEsol_relaxation(self,output_dir='./'):
     path = os.path.join(output_dir,self.name)
     inputs = MPRelaxSet(self.stru).all_input
     incar = inputs['INCAR']
     if self.is_spin_polarized:
         incar['ISPIN']=2
     else:
         incar['ISPIN']=1
     enmax = round(max([i.PSCTR['ENMAX'] for i in inputs['POTCAR']])*1.3)
     incar['ENCUT'] = int(enmax)
     incar["SYSTEM"]=self.name + "_PBEsol_Relax"
     incar["ALGO"] = "Normal"
     incar["EDIFF"] = 0.0001
     incar["EDIFFG"] = 0.001
     incar["ISMEAR"] = 0
     incar["GGA"] = "PS"
     incar["NSW"] = 99
     profile = {'source':self.mpid,'calculation':'PBEsol_relaxation'}
     os.mkdir(path)
     f=open(os.path.join(path,'profile.json'),'w')
     f.write(json.dumps(jsanitize(profile)))
     inputs['KPOINTS'].write_file(path+"/KPOINTS")
     inputs['POSCAR'].write_file(path+"/POSCAR")
     inputs['POTCAR'].write_file(path+"/POTCAR")
     incar.write_file(path+"/INCAR")
Пример #25
0
 def elastic_moduli(self,output_dir='./'):
     stru = self.stru_prim.copy()
     inputs = MPStaticSet(stru).all_input
     path = os.path.join(output_dir,self.name)
     transf = {'history':[{'source':self.mpid}],'defect_type':'elastic_moduli'}
     incar = inputs['INCAR']
     if self.is_spin_polarized:
         incar['ISPIN']=2
     else:
         incar['ISPIN']=1
     incar['IBRION']=6
     incar['ISIF'] = 3
     incar['EDIFF'] = 1e-6
     incar['ISMEAR']=0
     incar['POTIM']=0.015
     del incar['NSW'], incar['LVHAR'], incar['LAECHG']
     os.mkdir(path)
     f=open(path+"/transformations.json",'w')
     f.write(json.dumps(jsanitize(transf)))
     enmax = round(max([i.PSCTR['ENMAX'] for i in inputs['POTCAR']])*1.3)
     incar['ENCUT'] = int(enmax)
     inputs['POTCAR'].write_file(path+"/POTCAR")
     incar.write_file(path+"/INCAR")
     inputs['KPOINTS'].write_file(path+"/KPOINTS")
     inputs['POSCAR'].write_file(path+"/POSCAR")
Пример #26
0
    def update(self, docs, update_lu=True, key=None, **kwargs):
        """
        Function to update associated MongoStore collection.

        Args:
            docs: list of documents
        """

        for d in docs:

            d = jsanitize(d, allow_bson=True)

            # document-level validation is optional
            validates = True
            if self.validator:
                validates = self.validator.is_valid(d)
                if not validates:
                    if self.validator.strict:
                        raise ValueError(self.validator.validation_errors(d))
                    else:
                        self.logger.error(self.validator.validation_errors(d))

            if validates:
                if isinstance(key, list):
                    search_doc = {k: d[k] for k in key}
                elif key:
                    search_doc = {key: d[key]}
                else:
                    search_doc = {self.key: d[self.key]}
                if update_lu:
                    d[self.lu_field] = datetime.utcnow()
                self.collection.insert_one(d)
Пример #27
0
    def get_phonon_properties(self, item):
        """
        Extracts the phonon properties from the item
        """

        # the temp dir should still exist when using the objects as some readings are done lazily
        with tempfile.TemporaryDirectory() as workdir:
            phbst_file, phdos_file, ananc_file, labels_list = self.run_anaddb(item, workdir=workdir)

            phbands = phbst_file.phbands
            phbands.read_non_anal_from_file(phbst_file.filepath)

            symm_line_bands = self.get_pmg_bs(phbands, labels_list)

            complete_dos = phdos_file.to_pymatgen()
            phdos = phdos_file.phdos

            tstart, tstop, nt = 5, 800, 160
            temp = np.linspace(tstart, tstop, nt)

            thermo = {"temperature": temp.tolist(),
                      "entropy": phdos.get_entropy(tstart, tstop, nt).values.tolist(),
                      "cv": phdos.get_cv(tstart, tstop, nt).values.tolist(),
                      "free_energy": phdos.get_free_energy(tstart, tstop, nt).values.tolist(),
                      }

            data = {"dos": complete_dos.as_dict(),
                    "bs": symm_line_bands.as_dict(),
                    "thermodynamic": thermo,
                    "becs": ananc_file.becs.values.tolist()}

            return jsanitize(data)
Пример #28
0
    def as_dict(self) -> dict:
        """Serialize the job as a dictionary."""
        d = super().as_dict()

        # fireworks can't serialize functions and classes, so explicitly serialize to
        # the job recursively using monty to avoid issues
        return jsanitize(d, strict=True, enum_values=True)
Пример #29
0
    def insert(self, d, update_duplicates=True):
        """
        Insert the task document ot the database collection.

        Args:
            d (dict): task document
            update_duplicates (bool): whether to update the duplicates
        """
        result = self.collection.find_one({"dir_name": d["dir_name"]}, ["dir_name", "task_id"])
        if result is None or update_duplicates:
            d["last_updated"] = datetime.datetime.today()
            if result is None:
                if ("task_id" not in d) or (not d["task_id"]):
                    d["task_id"] = self.db.counter.find_one_and_update(
                        {"_id": "taskid"}, {"$inc": {"c": 1}},
                        return_document=ReturnDocument.AFTER)["c"]
                logger.info("Inserting {} with taskid = {}".format(d["dir_name"], d["task_id"]))
            elif update_duplicates:
                d["task_id"] = result["task_id"]
                logger.info("Updating {} with taskid = {}".format(d["dir_name"], d["task_id"]))
            d = jsanitize(d, allow_bson=True)
            self.collection.update_one({"dir_name": d["dir_name"]},
                                       {"$set": d}, upsert=True)
            return d["task_id"]
        else:
            logger.info("Skipping duplicate {}".format(d["dir_name"]))
            return None
Пример #30
0
    def as_dict(self):
        """
        Bson-serializable dict representation of the ConnectedComponent object.

        Returns:
            dict: Bson-serializable dict representation of the ConnectedComponent object.
        """
        nodes = {"{:d}".format(node.isite): (node, data) for node, data in self._connected_subgraph.nodes(data=True)}
        node2stringindex = {node: strindex for strindex, (node, data) in nodes.items()}
        dict_of_dicts = nx.to_dict_of_dicts(self._connected_subgraph)
        new_dict_of_dicts = {}
        for n1, n2dict in dict_of_dicts.items():
            in1 = node2stringindex[n1]
            new_dict_of_dicts[in1] = {}
            for n2, edges_dict in n2dict.items():
                in2 = node2stringindex[n2]
                new_dict_of_dicts[in1][in2] = {}
                for ie, edge_data in edges_dict.items():
                    ied = self._edgekey_to_edgedictkey(ie)
                    new_dict_of_dicts[in1][in2][ied] = jsanitize(edge_data)
        return {
            "@module": self.__class__.__module__,
            "@class": self.__class__.__name__,
            "nodes": {strindex: (node.as_dict(), data) for strindex, (node, data) in nodes.items()},
            "graph": new_dict_of_dicts,
        }
Пример #31
0
def get_doc(uid):
    criteria = {
        SETTINGS["unique_key"]: parse_criteria(uid, SETTINGS["unique_key_type"])}
    doc = COLL.find_one(criteria)
    return make_response(render_template(
        'doc.html', doc=json.dumps(jsanitize(doc)))
    )
Пример #32
0
    def process_item(self, item):

        self.logger.debug("Processing: {}".format(item[self.materials.key]))

        try:
            mat = old_style_mat(item)

            # These functions convert data from old style to new style
            add_es(mat, item)
            add_xrd(mat, item)
            add_elastic(mat, item)
            add_bonds(mat, item)
            add_propnet(mat, item)
            add_snl(mat, item)
            check_relaxation(mat, item)
            add_cifs(mat)
            add_meta(mat)
            add_thermo(mat, item)

            processed = jsanitize(mat)

        except Exception as e:
            self.logger.error(traceback.format_exc())
            processed = {"error": str(e)}

        key, lu_field = self.materials.key, self.materials.lu_field
        out = {
            self.website.key: item[key],
            self.website.lu_field: self.website.lu_func[1](
                self.materials.lu_func[0](item[lu_field])
            ),
        }
        out.update(processed)
        return out
Пример #33
0
def test_find_key():
    from monty.json import MSONable, jsanitize

    from jobflow.utils import find_key

    data = {"a": [0, {"b": 1, "x": 3}], "c": {"d": {"x": 3}}}
    result = find_key(data, "x")
    assert len(result) == 2
    assert ["c", "d"] in result
    assert ["a", 1] in result

    result = find_key(data, "x", include_end=True)
    assert len(result) == 2
    assert ["c", "d", "x"] in result
    assert ["a", 1, "x"] in result

    data = {"a": {"x": {"x": 1}}, "b": {"x": 0}}
    result = find_key(data, "x", nested=False)
    assert len(result) == 2
    assert ["a"] in result
    assert ["b"] in result

    result = find_key(data, "x", nested=True)
    assert len(result) == 3
    assert ["a", "x"] in result
    assert ["a"] in result
    assert ["b"] in result

    class MyObj(MSONable):
        def __init__(self, a):
            self.a = a

    data = {"a": [0, {"b": 1, "x": 3}], "c": {"d": {"x": MyObj(a=1)}}}
    data = jsanitize(data, strict=True)
    assert find_key(data, MyObj) == [["c", "d", "x"]]
Пример #34
0
 def publish(self, event):
     data = json.dumps(jsanitize(event, strict=True))
     CHANNEL_THREAD_LOCK.acquire()
     with open(self._filename, "a+") as f:
         f.write(data)
         f.write("\n")
     CHANNEL_THREAD_LOCK.release()
Пример #35
0
def get_ids(collection_name):
    """
    Returns unique ids
    """
    settings = CSETTINGS[collection_name]
    doc = DB[collection_name].distinct(settings["unique_key"])
    return jsonify(jsanitize(doc))
Пример #36
0
 def as_dict(self):
     d = dict()
     d["data"] = self.data
     d["lines"] = self.lines
     d["text"] = self.text
     d["filename"] = self.filename
     return jsanitize(d, strict=True)
Пример #37
0
    def test_jsanitize(self):
        # clean_json should have no effect on None types.
        d = {"hello": 1, "world": None}
        clean = jsanitize(d)
        self.assertIsNone(clean["world"])
        self.assertEqual(json.loads(json.dumps(d)), json.loads(json.dumps(
            clean)))

        d = {"hello": GoodMSONClass(1, 2, 3)}
        self.assertRaises(TypeError, json.dumps, d)
        clean = jsanitize(d)
        self.assertIsInstance(clean["hello"], six.string_types)
        clean_strict = jsanitize(d, strict=True)
        self.assertEqual(clean_strict["hello"]["a"], 1)
        self.assertEqual(clean_strict["hello"]["b"], 2)

        d = {"dt": datetime.datetime.now()}
        clean = jsanitize(d)
        self.assertIsInstance(clean["dt"], six.string_types)
        clean = jsanitize(d, allow_bson=True)
        self.assertIsInstance(clean["dt"], datetime.datetime)

        d = {"a": ["b", np.array([1, 2, 3])],
             "b": ObjectId.from_datetime(datetime.datetime.now())}
        clean = jsanitize(d)
        self.assertEqual(clean["a"], ['b', [1, 2, 3]])
        self.assertIsInstance(clean["b"], six.string_types)

        rnd_bin = six.binary_type(np.random.rand(10))
        d = {"a": six.binary_type(rnd_bin)}
        clean = jsanitize(d, allow_bson=True)
        self.assertEqual(clean["a"], six.binary_type(rnd_bin))
        self.assertIsInstance(clean["a"], six.binary_type)
Пример #38
0
    def set_abinit_basic_from_abinit_input(self, abinit_input):
        """
        sets the fields of the object from an |AbinitInput| object
        """

        self.structure = abinit_input.structure.as_dict()
        self.ecut = abinit_input['ecut']
        # kpoints may be defined in different ways
        self.nshiftk = jsanitize(abinit_input.get('nshiftk', None))
        self.shiftk = jsanitize(abinit_input.get('shiftk', None))
        self.ngkpt = jsanitize(abinit_input.get('ngkpt', None))
        self.kptrlatt = abinit_input.get('kptrlatt', None)
        self.dilatmx = abinit_input.get('dilatmx', 1)
        self.occopt = abinit_input.get('occopt', 1)
        self.tsmear = abinit_input.get('tsmear', None)

        pseudo_data = AbinitPseudoData()
        pseudo_data.set_pseudos_from_abinit_input(abinit_input)
        self.pseudopotentials = pseudo_data
Пример #39
0
    def get_dos_dict(self):
        """
        Returns the added doses as a json-serializable dict. Note that if you
        have specified smearing for the DOS plot, the densities returned will
        be the smeared densities, not the original densities.

        Returns:
            Dict of dos data. Generally of the form, {label: {'frequencies':..,
            'densities': ...}}
        """
        return jsanitize(self._doses)
Пример #40
0
    def run_task(self, fw_spec):
        # the FW.json/yaml file is mandatory to get the fw_id
        # no need to deserialize the whole FW

        if '_add_launchpad_and_fw_id' in fw_spec:
            lp = self.launchpad
            fw_id = self.fw_id
        else:
            try:
                fw_dict = loadfn('FW.json')
            except IOError:
                try:
                    fw_dict = loadfn('FW.yaml')
                except IOError:
                    raise RuntimeError("Launchpad/fw_id not present in spec and No FW.json nor FW.yaml file present: "
                                       "impossible to determine fw_id")
            lp = LaunchPad.auto_load()
            fw_id = fw_dict['fw_id']

        wf = lp.get_wf_by_fw_id(fw_id)
        wf_module = importlib.import_module(wf.metadata['workflow_module'])
        wf_class = getattr(wf_module, wf.metadata['workflow_class'])

        database = fw_spec['mongo_database']
        if self.criteria is not None:
            entry = database.get_entry(criteria=self.criteria)
        else:
            entry = {}

        inserted = []
        for root_key, method_name in self.insertion_data.items():
            get_results_method = getattr(wf_class, method_name)
            results = get_results_method(wf)
            for key, val in results.items():
                entry[key] = jsanitize(val)
                inserted.append(key)

        if self.criteria is not None:
            database.save_entry(entry=entry)
        else:
            database.insert_entry(entry=entry)

        logging.info("Inserted data:\n{}".format('- {}\n'.join(inserted)))
        return FWAction()
Пример #41
0
    def as_dict(self):

        results = {'gap': self.gap,
                   'mu_steps': self.mu_steps,
                   'cond': self.cond,
                   'seebeck': self.seebeck,
                   'kappa': self.kappa,
                   'hall': self.hall,
                   'warning': self.warning, 'doping': self.doping,
                   'mu_doping': self.mu_doping,
                   'seebeck_doping': self.seebeck_doping,
                   'cond_doping': self.cond_doping,
                   'kappa_doping': self.kappa_doping,
                   'hall_doping': self.hall_doping,
                   'dos': self.dos.as_dict(),
                   'dos_partial': self._dos_partial,
                   'carrier_conc': self.carrier_conc,
                   'vol': self.vol}
        return jsanitize(results)
Пример #42
0
def index(request):
    ctx = RequestContext(request)
    if request.user.is_authenticated():
        API_KEY = request.user.api_key
        ENDPOINT = request.build_absolute_uri(get_endpoint())
        with UWSI2Rester(API_KEY, endpoint=ENDPOINT) as mpr:
            contribs = jsanitize(mpr.get_uwsi2_contributions())
            ranges = None
            for contrib in contribs:
                table = contrib['tables']['data_supporting']
                if ranges is None:
                    col_names = [ c['name'] for c in table['columns'][1:] ] # skip solute string
                    ranges = dict((k, [1e3, -1e3]) for k in col_names)
                for row in table['rows']:
                    for k,v in ranges.iteritems():
                        val = float(row[k])
                        if val < v[0]: v[0] = val
                        if val > v[1]: v[1] = val
    else:
        ctx.update({'alert': 'Please log in!'})
    return render_to_response("uwsi2_explorer_index.html", locals(), ctx)
Пример #43
0
def get_data():
    cname = request.args.get("collection").split(":")[0]
    settings = CSETTINGS[cname]
    search_string = request.args.get("search_string")
    xaxis = request.args.get("xaxis")
    yaxis = request.args.get("yaxis")

    xaxis = get_mapped_name(settings, xaxis)
    yaxis = get_mapped_name(settings, yaxis)

    projection = [xaxis, yaxis]

    if search_string.strip() != "":
        criteria = process_search_string(search_string, settings)
        data = []
        for r in DB[cname].find(criteria, projection=projection):
            x = _get_val(xaxis, r, None)
            y = _get_val(yaxis, r, None)
            if x and y:
                data.append([x, y])
    else:
        data = []
    return jsonify(jsanitize(data))
Пример #44
0
def autocomplete():
    terms=[]
    criteria={}

    search_string = request.args.get('term')
    cname = request.args.get("collection").split(":")[0]

    collection = DB[cname]
    settings = CSETTINGS[cname]

    # if search looks like a special query, autocomplete values
    for regex in settings["query"]:
        if re.match(r'%s' % regex[1], search_string):
            criteria[regex[0]] = {'$regex' : str(process(search_string, regex[2]))}
            projection = {regex[0]: 1}

            results = collection.find(criteria, projection)
            
            if results:
                terms = [ term[regex[0]] for term in results ]

    # if search looks like a query dict, autocomplete keys
    if not criteria and search_string[0:2] == '{"':
        if search_string.count('"')%2 != 0:
            splitted = search_string.split('"')
            previous = splitted[:-1]
            last = splitted[-1]

            # get list of autocomplete keys from settings
            # generic alternative: use a schema analizer like variety.js
            results = _search_dict(settings["autocomplete_keys"], last)

            if results:
                terms = [ '"'.join(previous + [term]) + '":' for term in results ]

    return jsonify(matching_results=jsanitize(list(set(terms))))
Пример #45
0
 def as_dict(self):
     # sanitize to avoid numpy arrays and serialize PMGSonable objects
     return jsanitize(dict(args=self.args, kwargs=self.kwargs), strict=True)
Пример #46
0
def get_ids(collection_name):
    settings = CSETTINGS[collection_name]
    doc = DB[collection_name].distinct(settings["unique_key"])
    return jsonify(jsanitize(doc))
Пример #47
0
def get_doc_json(collection_name, uid):
    settings = CSETTINGS[collection_name]
    criteria = {
        settings["unique_key"]: process(uid, settings["unique_key_type"])}
    doc = DB[collection_name].find_one(criteria)
    return jsonify(jsanitize(doc))
Пример #48
0
    def as_dict(self):
        d = dict(event_type=self.event_type)
        if self.details:
            d['details'] = jsanitize(self.details, strict=True)

        return d
Пример #49
0
    def run_task(self, fw_spec):
        ref_struct = self['structure']
        d = {
            "analysis": {},
            "initial_structure": self['structure'].as_dict()
        }

        # Get optimized structure
        calc_locs_opt = [cl for cl in fw_spec.get('calc_locs', []) if 'optimiz' in cl['name']]
        if calc_locs_opt:
            optimize_loc = calc_locs_opt[-1]['path']
            logger.info("Parsing initial optimization directory: {}".format(optimize_loc))
            drone = VaspDrone()
            optimize_doc = drone.assimilate(optimize_loc)
            opt_struct = Structure.from_dict(optimize_doc["calcs_reversed"][0]["output"]["structure"])
            d.update({"optimized_structure": opt_struct.as_dict()})
            ref_struct = opt_struct
            eq_stress = -0.1*Stress(optimize_doc["calcs_reversed"][0]["output"]["ionic_steps"][-1]["stress"])
        else:
            eq_stress = None

        if self.get("fw_spec_field"):
            d.update({self.get("fw_spec_field"): fw_spec.get(self.get("fw_spec_field"))})

        # Get the stresses, strains, deformations from deformation tasks
        defo_dicts = fw_spec["deformation_tasks"].values()
        stresses, strains, deformations = [], [], []
        for defo_dict in defo_dicts:
            stresses.append(Stress(defo_dict["stress"]))
            strains.append(Strain(defo_dict["strain"]))
            deformations.append(Deformation(defo_dict["deformation_matrix"]))
            # Add derived stresses and strains if symmops is present
            for symmop in defo_dict.get("symmops", []):
                stresses.append(Stress(defo_dict["stress"]).transform(symmop))
                strains.append(Strain(defo_dict["strain"]).transform(symmop))
                deformations.append(Deformation(defo_dict["deformation_matrix"]).transform(symmop))

        stresses = [-0.1*s for s in stresses]
        pk_stresses = [stress.piola_kirchoff_2(deformation)
                       for stress, deformation in zip(stresses, deformations)]

        d['fitting_data'] = {'cauchy_stresses': stresses,
                             'eq_stress': eq_stress,
                             'strains': strains,
                             'pk_stresses': pk_stresses,
                             'deformations': deformations
                             }

        logger.info("Analyzing stress/strain data")
        # TODO: @montoyjh: what if it's a cubic system? don't need 6. -computron
        # TODO: Can add population method but want to think about how it should
        #           be done. -montoyjh
        order = self.get('order', 2)
        if order > 2:
            method = 'finite_difference'
        else:
            method = self.get('fitting_method', 'finite_difference')

        if method == 'finite_difference':
            result = ElasticTensorExpansion.from_diff_fit(
                    strains, pk_stresses, eq_stress=eq_stress, order=order)
            if order == 2:
                result = ElasticTensor(result[0])
        elif method == 'pseudoinverse':
            result = ElasticTensor.from_pseudoinverse(strains, pk_stresses)
        elif method == 'independent':
            result = ElasticTensor.from_independent_strains(strains, pk_stresses, eq_stress=eq_stress)
        else:
            raise ValueError("Unsupported method, method must be finite_difference, "
                             "pseudoinverse, or independent")

        ieee = result.convert_to_ieee(ref_struct)
        d.update({
            "elastic_tensor": {
                "raw": result.voigt,
                "ieee_format": ieee.voigt
            }
        })
        if order == 2:
            d.update({"derived_properties": ieee.get_structure_property_dict(ref_struct)})
        else:
            soec = ElasticTensor(ieee[0])
            d.update({"derived_properties": soec.get_structure_property_dict(ref_struct)})

        d["formula_pretty"] = ref_struct.composition.reduced_formula
        d["fitting_method"] = method
        d["order"] = order

        d = jsanitize(d)

        # Save analysis results in json or db
        db_file = env_chk(self.get('db_file'), fw_spec)
        if not db_file:
            with open("elasticity.json", "w") as f:
                f.write(json.dumps(d, default=DATETIME_HANDLER))
        else:
            db = VaspCalcDb.from_db_file(db_file, admin=True)
            db.collection = db.db["elasticity"]
            db.collection.insert_one(d)
            logger.info("Elastic analysis complete.")
        
        return FWAction()
Пример #50
0
    def run_task(self, fw_spec):

        gibbs_dict = {}

        tag = self["tag"]
        t_step = self.get("t_step", 10)
        t_min = self.get("t_min", 0)
        t_max = self.get("t_max", 1000)
        mesh = self.get("mesh", [20, 20, 20])
        eos = self.get("eos", "vinet")
        qha_type = self.get("qha_type", "debye_model")
        pressure = self.get("pressure", 0.0)
        poisson = self.get("poisson", 0.25)
        anharmonic_contribution = self.get("anharmonic_contribution", False)
        gibbs_dict["metadata"] = self.get("metadata", {})

        db_file = env_chk(self.get("db_file"), fw_spec)
        mmdb = VaspCalcDb.from_db_file(db_file, admin=True)
        # get the optimized structure
        d = mmdb.collection.find_one({"task_label": "{} structure optimization".format(tag)},
                                     {"calcs_reversed": 1})
        structure = Structure.from_dict(d["calcs_reversed"][-1]["output"]['structure'])
        gibbs_dict["structure"] = structure.as_dict()
        gibbs_dict["formula_pretty"] = structure.composition.reduced_formula

        # get the data(energy, volume, force constant) from the deformation runs
        docs = mmdb.collection.find({"task_label": {"$regex": "{} gibbs*".format(tag)},
                                     "formula_pretty": structure.composition.reduced_formula},
                                    {"calcs_reversed": 1})
        energies = []
        volumes = []
        force_constants = []
        for d in docs:
            s = Structure.from_dict(d["calcs_reversed"][-1]["output"]['structure'])
            energies.append(d["calcs_reversed"][-1]["output"]['energy'])
            if qha_type not in ["debye_model"]:
                force_constants.append(d["calcs_reversed"][-1]["output"]['force_constants'])
            volumes.append(s.volume)
        gibbs_dict["energies"] = energies
        gibbs_dict["volumes"] = volumes
        if qha_type not in ["debye_model"]:
            gibbs_dict["force_constants"] = force_constants

        try:
            # use quasi-harmonic debye approximation
            if qha_type in ["debye_model"]:

                from pymatgen.analysis.quasiharmonic import QuasiharmonicDebyeApprox

                qhda = QuasiharmonicDebyeApprox(energies, volumes, structure, t_min, t_step, t_max,
                                                eos, pressure=pressure, poisson=poisson,
                                                anharmonic_contribution=anharmonic_contribution)
                gibbs_dict.update(qhda.get_summary_dict())
                gibbs_dict["anharmonic_contribution"] = anharmonic_contribution
                gibbs_dict["success"] = True

            # use the phonopy interface
            else:

                from atomate.vasp.analysis.phonopy import get_phonopy_gibbs

                G, T = get_phonopy_gibbs(energies, volumes, force_constants, structure, t_min,
                                         t_step, t_max, mesh, eos, pressure)
                gibbs_dict["gibbs_free_energy"] = G
                gibbs_dict["temperatures"] = T
                gibbs_dict["success"] = True

        # quasi-harmonic analysis failed, set the flag to false
        except:
            import traceback

            logger.warn("Quasi-harmonic analysis failed!")
            gibbs_dict["success"] = False
            gibbs_dict["traceback"] = traceback.format_exc()
            gibbs_dict['metadata'].update({"task_label_tag": tag})
            gibbs_dict["created_at"] = datetime.utcnow()

        gibbs_dict = jsanitize(gibbs_dict)

        # TODO: @matk86: add a list of task_ids that were used to construct the analysis to DB?
        # -computron
        if not db_file:
            dump_file = "gibbs.json"
            logger.info("Dumping the analysis summary to {}".format(dump_file))
            with open(dump_file, "w") as f:
                f.write(json.dumps(gibbs_dict, default=DATETIME_HANDLER))
        else:
            coll = mmdb.db["gibbs_tasks"]
            coll.insert_one(gibbs_dict)

        logger.info("Gibbs free energy calculation complete.")

        if not gibbs_dict["success"]:
            return FWAction(defuse_children=True)
Пример #51
0
    def run_task(self, fw_spec):
        # import here to prevent import errors in bigger MPCollab
        # get the band structure and nelect from files
        """
        prev_dir = get_loc(fw_spec['prev_vasp_dir'])
        vasprun_loc = zpath(os.path.join(prev_dir, 'vasprun.xml'))
        kpoints_loc = zpath(os.path.join(prev_dir, 'KPOINTS'))

        vr = Vasprun(vasprun_loc)
        bs = vr.get_band_structure(kpoints_filename=kpoints_loc)
        """
        filename = get_slug(
            'JOB--' + fw_spec['mpsnl'].structure.composition.reduced_formula + '--'
            + fw_spec['task_type'])
        with open(filename, 'w+') as f:
            f.write('')

        # get the band structure and nelect from DB
        block_part = get_block_part(fw_spec['prev_vasp_dir'])

        db_dir = os.environ['DB_LOC']
        assert isinstance(db_dir, object)
        db_path = os.path.join(db_dir, 'tasks_db.json')
        with open(db_path) as f:
            creds = json.load(f)
            connection = MongoClient(creds['host'], creds['port'])
            tdb = connection[creds['database']]
            tdb.authenticate(creds['admin_user'], creds['admin_password'])

            props = {"calculations": 1, "task_id": 1, "state": 1, "pseudo_potential": 1, "run_type": 1, "is_hubbard": 1, "hubbards": 1, "unit_cell_formula": 1}
            m_task = tdb.tasks.find_one({"dir_name": block_part}, props)
            if not m_task:
                time.sleep(60)  # only thing to think of is wait for DB insertion(?)
                m_task = tdb.tasks.find_one({"dir_name": block_part}, props)

            if not m_task:
                raise ValueError("Could not find task with dir_name: {}".format(block_part))

            if m_task['state'] != 'successful':
                raise ValueError("Cannot run Boltztrap; parent job unsuccessful")

            nelect = m_task['calculations'][0]['input']['parameters']['NELECT']
            bs_id = m_task['calculations'][0]['band_structure_fs_id']
            print bs_id, type(bs_id)
            fs = gridfs.GridFS(tdb, 'band_structure_fs')
            bs_dict = json.loads(fs.get(bs_id).read())
            bs_dict['structure'] = m_task['calculations'][0]['output']['crystal']
            bs = BandStructure.from_dict(bs_dict)
            print 'Band Structure found:', bool(bs)
            print nelect

            # run Boltztrap
            runner = BoltztrapRunner(bs, nelect)
            dir = runner.run(path_dir=os.getcwd())

            # put the data in the database
            bta = BoltztrapAnalyzer.from_files(dir)

            # 8/21/15 - Anubhav removed fs_id (also see line further below, ted['boltztrap_full_fs_id'] ...)
            # 8/21/15 - this is to save space in MongoDB, as well as non-use of full Boltztrap output (vs rerun)
            """
            data = bta.as_dict()
            data.update(get_meta_from_structure(bs._structure))
            data['snlgroup_id'] = fw_spec['snlgroup_id']
            data['run_tags'] = fw_spec['run_tags']
            data['snl'] = fw_spec['mpsnl']
            data['dir_name_full'] = dir
            data['dir_name'] = get_block_part(dir)
            data['task_id'] = m_task['task_id']
            del data['hall']  # remove because it is too large and not useful
            fs = gridfs.GridFS(tdb, "boltztrap_full_fs")
            btid = fs.put(json.dumps(jsanitize(data)))
            """

            # now for the "sanitized" data
            ted = bta.as_dict()
            del ted['seebeck']
            del ted['hall']
            del ted['kappa']
            del ted['cond']

            # ted['boltztrap_full_fs_id'] = btid
            ted['snlgroup_id'] = fw_spec['snlgroup_id']
            ted['run_tags'] = fw_spec['run_tags']
            ted['snl'] = fw_spec['mpsnl'].as_dict()
            ted['dir_name_full'] = dir
            ted['dir_name'] = get_block_part(dir)
            ted['task_id'] = m_task['task_id']

            ted['pf_doping'] = bta.get_power_factor(output='tensor', relaxation_time=self.TAU)
            ted['zt_doping'] = bta.get_zt(output='tensor', relaxation_time=self.TAU, kl=self.KAPPAL)

            ted['pf_eigs'] = self.get_eigs(ted, 'pf_doping')
            ted['pf_best'] = self.get_extreme(ted, 'pf_eigs')
            ted['pf_best_dope18'] = self.get_extreme(ted, 'pf_eigs', max_didx=3)
            ted['pf_best_dope19'] = self.get_extreme(ted, 'pf_eigs', max_didx=4)
            ted['zt_eigs'] = self.get_eigs(ted, 'zt_doping')
            ted['zt_best'] = self.get_extreme(ted, 'zt_eigs')
            ted['zt_best_dope18'] = self.get_extreme(ted, 'zt_eigs', max_didx=3)
            ted['zt_best_dope19'] = self.get_extreme(ted, 'zt_eigs', max_didx=4)
            ted['seebeck_eigs'] = self.get_eigs(ted, 'seebeck_doping')
            ted['seebeck_best'] = self.get_extreme(ted, 'seebeck_eigs')
            ted['seebeck_best_dope18'] = self.get_extreme(ted, 'seebeck_eigs', max_didx=3)
            ted['seebeck_best_dope19'] = self.get_extreme(ted, 'seebeck_eigs', max_didx=4)
            ted['cond_eigs'] = self.get_eigs(ted, 'cond_doping')
            ted['cond_best'] = self.get_extreme(ted, 'cond_eigs')
            ted['cond_best_dope18'] = self.get_extreme(ted, 'cond_eigs', max_didx=3)
            ted['cond_best_dope19'] = self.get_extreme(ted, 'cond_eigs', max_didx=4)
            ted['kappa_eigs'] = self.get_eigs(ted, 'kappa_doping')
            ted['kappa_best'] = self.get_extreme(ted, 'kappa_eigs', maximize=False)
            ted['kappa_best_dope18'] = self.get_extreme(ted, 'kappa_eigs', maximize=False, max_didx=3)
            ted['kappa_best_dope19'] = self.get_extreme(ted, 'kappa_eigs', maximize=False, max_didx=4)

            try:
	        from mpcollab.thermoelectrics.boltztrap_TE import BoltzSPB
                bzspb = BoltzSPB(ted)
                maxpf_p = bzspb.get_maximum_power_factor('p', temperature=0, tau=1E-14, ZT=False, kappal=0.5,\
                    otherprops=('get_seebeck_mu_eig', 'get_conductivity_mu_eig', \
                                                    'get_thermal_conductivity_mu_eig', 'get_average_eff_mass_tensor_mu'))

                maxpf_n = bzspb.get_maximum_power_factor('n', temperature=0, tau=1E-14, ZT=False, kappal=0.5,\
                    otherprops=('get_seebeck_mu_eig', 'get_conductivity_mu_eig', \
                                                    'get_thermal_conductivity_mu_eig', 'get_average_eff_mass_tensor_mu'))

                maxzt_p = bzspb.get_maximum_power_factor('p', temperature=0, tau=1E-14, ZT=True, kappal=0.5, otherprops=('get_seebeck_mu_eig', 'get_conductivity_mu_eig', \
                                                    'get_thermal_conductivity_mu_eig', 'get_average_eff_mass_tensor_mu'))

                maxzt_n = bzspb.get_maximum_power_factor('n', temperature=0, tau=1E-14, ZT=True, kappal=0.5, otherprops=('get_seebeck_mu_eig', 'get_conductivity_mu_eig', \
                                                    'get_thermal_conductivity_mu_eig', 'get_average_eff_mass_tensor_mu'))

                ted['zt_best_finemesh'] = {'p': maxzt_p, 'n': maxzt_n}
                ted['pf_best_finemesh'] = {'p': maxpf_p, 'n': maxpf_n}
            except:
                import traceback
                traceback.print_exc()
                print 'COULD NOT GET FINE MESH DATA'

            # add is_compatible
            mpc = MaterialsProjectCompatibility("Advanced")
            try:
                func = m_task["pseudo_potential"]["functional"]
                labels = m_task["pseudo_potential"]["labels"]
                symbols = ["{} {}".format(func, label) for label in labels]
                parameters = {"run_type": m_task["run_type"],
                          "is_hubbard": m_task["is_hubbard"],
                          "hubbards": m_task["hubbards"],
                          "potcar_symbols": symbols}
                entry = ComputedEntry(Composition(m_task["unit_cell_formula"]),
                                      0.0, 0.0, parameters=parameters,
                                      entry_id=m_task["task_id"])

                ted["is_compatible"] = bool(mpc.process_entry(entry))
            except:
                traceback.print_exc()
                print 'ERROR in getting compatibility, task_id: {}'.format(m_task["task_id"])
                ted["is_compatible"] = None

            tdb.boltztrap.insert(jsanitize(ted))

            update_spec = {'prev_vasp_dir': fw_spec['prev_vasp_dir'],
                       'boltztrap_dir': os.getcwd(),
                       'prev_task_type': fw_spec['task_type'],
                       'mpsnl': fw_spec['mpsnl'].as_dict(),
                       'snlgroup_id': fw_spec['snlgroup_id'],
                       'run_tags': fw_spec['run_tags'], 'parameters': fw_spec.get('parameters')}

        return FWAction(update_spec=update_spec)
Пример #52
0
 def as_dict(self):
     d = {}
     d["data"] = self.data
     d["text"] = self.text
     d["filename"] = self.filename
     return jsanitize(d, strict=True)