Пример #1
0
def gridfs_in(db, time, input):
    name = input.name
    fs = gridfs.GridFS(db)
    f = fs.open(name, "w")
    f.write(input.read())
    f.close()
Пример #2
0
import config
import loghelper
import my_request
import util
import db
import extract

#logger
loghelper.init_logger("beian", stream=True)
logger = loghelper.get_logger("beian")

#mongo
(mongodb_host, mongodb_port) = config.get_mongodb_config()
mongo = MongoClient(mongodb_host, mongodb_port)
fromdb = mongo.crawler_v2
imgfs = gridfs.GridFS(mongo.gridfs)

#mysql
conn = None

# kafka
kafkaProducer = None
kafkaConsumer = None


def initKafka():
    global kafkaProducer
    global kafkaConsumer

    (url) = config.get_kafka_config()
    kafka = KafkaClient(url)
Пример #3
0
    def _check_run(self, d, mode):
        if mode not in [
                "structure optimization", "static", "nscf uniform",
                "nscf line", "additional field"
        ]:
            raise ValueError("Invalid mode!")

        self.assertEqual(d["formula_pretty"], "Si")
        self.assertEqual(d["formula_anonymous"], "A")
        self.assertEqual(d["nelements"], 1)
        self.assertEqual(d["state"], "successful")
        self.assertAlmostEqual(
            d["calcs_reversed"][0]["output"]["structure"]["lattice"]["a"],
            3.867, 2)
        self.assertEqual(d["output"]["is_gap_direct"], False)

        if mode in ["structure optimization", "static"]:
            self.assertAlmostEqual(d["output"]["energy"], -10.850, 2)
            self.assertAlmostEqual(d["output"]["energy_per_atom"], -5.425, 2)

        if mode == "additional field":
            self.assertAlmostEqual(d["test_additional_field"]["lattice"]["a"],
                                   3.8401979337)

        elif mode in ["ncsf uniform"]:
            self.assertAlmostEqual(d["output"]["energy"], -10.828, 2)
            self.assertAlmostEqual(d["output"]["energy_per_atom"], -5.414, 2)

        self.assertAlmostEqual(d["output"]["bandgap"], 0.65, 1)

        if "nscf" in mode:
            self.assertEqual(
                d["calcs_reversed"][0]["output"]["outcar"]
                ["total_magnetization"], None)
        else:
            self.assertAlmostEqual(
                d["calcs_reversed"][0]["output"]["outcar"]
                ["total_magnetization"], 0, 3)

        self.assertLess(d["run_stats"]["overall"]["Elapsed time (sec)"],
                        180)  # run should take under 3 minutes

        # check the DOS and band structure
        if mode == "nscf uniform" or mode == "nscf line":
            fs = gridfs.GridFS(self.get_task_database(), 'bandstructure_fs')

            # check the band structure
            bs_fs_id = d["calcs_reversed"][0]["bandstructure_fs_id"]
            bs_json = zlib.decompress(fs.get(bs_fs_id).read())
            bs = json.loads(bs_json.decode())
            self.assertEqual(bs["is_spin_polarized"], False)
            self.assertEqual(bs["band_gap"]["direct"], False)
            self.assertAlmostEqual(bs["band_gap"]["energy"], 0.65, 1)
            self.assertEqual(bs["is_metal"], False)

            if mode == "nscf uniform":
                for k in [
                        "is_spin_polarized", "band_gap", "structure",
                        "kpoints", "is_metal", "vbm", "cbm", "labels_dict",
                        "projections", "lattice_rec", "bands"
                ]:
                    self.assertTrue(k in bs)
                    self.assertIsNotNone(bs[k])

                self.assertEqual(bs["@class"], "BandStructure")

            else:
                for k in [
                        "is_spin_polarized", "band_gap", "structure",
                        "kpoints", "is_metal", "vbm", "cbm", "labels_dict",
                        "projections", "lattice_rec", "bands", "branches"
                ]:
                    self.assertTrue(k in bs)
                    self.assertIsNotNone(bs[k])
                self.assertEqual(bs["@class"], "BandStructureSymmLine")

            # check the DOS
            if mode == "nscf uniform":
                fs = gridfs.GridFS(self.get_task_database(), 'dos_fs')
                dos_fs_id = d["calcs_reversed"][0]["dos_fs_id"]

                dos_json = zlib.decompress(fs.get(dos_fs_id).read())
                dos = json.loads(dos_json.decode())
                for k in [
                        "densities", "energies", "pdos", "spd_dos", "atom_dos",
                        "structure"
                ]:
                    self.assertTrue(k in dos)
                    self.assertIsNotNone(dos[k])

                self.assertAlmostEqual(dos["spd_dos"]["p"]["efermi"], 5.625, 1)
                self.assertAlmostEqual(dos["atom_dos"]["Si"]["efermi"], 5.625,
                                       1)
                self.assertAlmostEqual(dos["structure"]["lattice"]["a"], 3.867,
                                       2)
                self.assertAlmostEqual(dos["spd_dos"]["p"]["efermi"], 5.625, 1)
                self.assertAlmostEqual(dos["atom_dos"]["Si"]["efermi"], 5.625,
                                       1)
                self.assertAlmostEqual(dos["structure"]["lattice"]["a"], 3.867,
                                       2)
Пример #4
0
##### Connect to the Database #####
db = MongoClient(connString)
for database in db.database_names():
    if database != "admin" and database != "local":
        db = MongoClient(connString)[database]
        print("--database:" + database)

        ##### Get a model ID and find entries #####
        for setting in db.settings.find(no_cursor_timeout=True):
            modelId = setting.get('_id')
            print("\t--model: " + modelId)
            for colPrefix in [".history", ".stash.json_mpc", ".stash.unity3d"]:
                colName = modelId + colPrefix
                targetCol = colName + ".ref"
                print("\t\t--stash: " + colName)
                fs = gridfs.GridFS(db, colName)
                for entry in fs.find(
                    {"filename": {
                        "$not": re.compile("unityAssets.json$")
                    }}):
                    #### Create Reference BSON #####
                    if not overwrite:
                        if db[targetCol].find_one(
                            {
                                "_id": cleanFileName(entry.filename),
                                "type": "gridfs"
                            }) != None:
                            print("\t\t Found entry for " +
                                  cleanFileName(entry.filename) +
                                  ", skipping...")
                            continue
Пример #5
0
 def get_file(self, job_name, file_name):
     """
     """
     if job_name not in self.colls:
         self.colls[job_name] = gridfs.GridFS(self.user_db, job_name)
     return self.colls[job_name].get_last_version(filename=file_name)
Пример #6
0
 def __init__(self):
     self.db = MongoClient().news
     self.fs = gridfs.GridFS(self.db)
Пример #7
0
# -*- coding:utf-8 -*-

import os

from pymongo import (MongoReplicaSetClient, MongoClient, read_preferences)
import gridfs

import setting

mc = MongoClient(host='localhost')

# test
test = mc['test']
test_files = gridfs.GridFS(mc['test_files'])

# user
user = mc['user']
user_files = gridfs.GridFS(mc['user_files'])
Пример #8
0
    def process(self):
        # Mongodb index for localities
        tindex = self.client.endpoints.taxonIndex
        mindex = self.client.endpoints.mediaIndex3
        # Mongodb gridFS instance
        grid = gridfs.GridFS(self.client.endpoints)

        # returns dictionary of params as defined in endpoint description
        # will throw exception if required param is not present
        params = self.getParams()
        # offset and limit returned as ints with default if not set
        offset = self.offset()
        limit = self.limit()

        if self.paramCount > 0:
            imageRes = None
            res = None
            criteria = {
                'endpoint': 'taxonomy',
                'parameters': {},
                'matchTerms': {
                    'scientificNames': []
                }
            }
            taxonQuery = []
            for p in [{
                    'scientificName':
                ['scientificNames', 'originalScientificName']
            }, {
                    'species': ['species', 'taxonomy.species']
            }, {
                    'genus': ['genus', 'taxonomy.genus']
            }, {
                    'family': ['family', 'taxonomy.family']
            }, {
                    'order': ['order', 'taxonomy.order']
            }, {
                    'class': ['class', 'taxonomy.class']
            }, {
                    'family': ['family', 'taxonomy.family']
            }, {
                    'phylum': ['phylum', 'taxonomy.phylum']
            }, {
                    'kingdom': ['kingdom', 'taxonomy.kingdom']
            }, {
                    'other': ['taxonomy.noRank']
            }]:
                val = p.keys()[0]
                if (params[val]):
                    criteria['parameters'][val] = params[val]
                    for field in p[val]:
                        taxonQuery.append({field: params[val]})
            #for p in ['scientificNames', 'species', 'genus', 'family', 'order', 'class', 'family', 'phylum', 'kingdom', 'other']:
            #	if (params[p]):
            #		criteria['parameters'][p] = params[p]
            #		taxonQuery.append({p: params[p]})

            if (params['fullTaxonomy']):
                criteria['parameters']['fullTaxonomy'] = params['fullTaxonomy']
                taxonQuery.append({
                    '$text': {
                        '$search': '"' + params['fullTaxonomy'] + '"',
                        '$caseSensitive': False
                    }
                })

            if (len(taxonQuery) == 0):
                return self.respondWithError(
                    {"GENERAL": "No valid parameters specified"})
            if len(taxonQuery) > 0:
                res = tindex.find({'$and': taxonQuery})

            d = []
            matches = {'idigbio': [], 'pbdb': []}
            idbCount = 0
            pbdbCount = 0
            if res:
                for i in res:
                    if 'scientificNames' in i:
                        scientificNames = i['scientificNames']
                        for sciName in scientificNames:
                            if sciName not in criteria['matchTerms'][
                                    'scientificNames']:
                                criteria['matchTerms'][
                                    'scientificNames'].append(sciName)
                    if 'taxonomy' in i:
                        taxonomy = i['taxonomy']
                        taxon_ranks = taxonomy.keys()
                        for rank in taxon_ranks:
                            if rank in criteria['matchTerms']:
                                for term in taxonomy[rank]:
                                    if term not in criteria['matchTerms'][
                                            rank]:
                                        criteria['matchTerms'][rank].append(
                                            term)
                            else:
                                criteria['matchTerms'][rank] = []
                                for term in taxonomy[rank]:
                                    criteria['matchTerms'][rank].append(term)

                    if 'pbdbGridFile' in i:
                        pbdbGrids = i['pbdbGridFile']
                        for file in pbdbGrids:
                            pbdb_doc = grid.get(file)
                            pbdb_matches = json.loads(pbdb_doc.read())
                            matches['pbdb'] = matches['pbdb'] + pbdb_matches

                    if 'idbGridFile' in i:
                        if type(i['idbGridFile']) is list:
                            idbGrids = i['idbGridFile']
                            for file in idbGrids:
                                idb_doc = grid.get(file)
                                idb_matches = json.loads(idb_doc.read())
                                matches['idigbio'] = matches[
                                    'idigbio'] + idb_matches
                        else:
                            idb_doc = grid.get(i['idbGridFile'])
                            idb_matches = json.loads(idb_doc.read())
                            matches[
                                'idigbio'] = matches['idigbio'] + idb_matches

            imageQuery = []
            media = []
            if (params['images'] == 'true'):
                imgRes = mindex.find(
                    {'idigbio_uuids': {
                        '$in': matches['idigbio']
                    }})
                for res in imgRes:
                    imgSpecimens = res['mediaURIs']
                    for specimen in imgSpecimens:
                        url = 'https://www.idigbio.org/portal/records/' + specimen
                        links = imgSpecimens[specimen]
                        for link in links:
                            if [url, link] in media:
                                continue
                            media.append([url, link])

            idbCount = len(matches['idigbio'])
            pbdbCount = len(matches['pbdb'])

            item = {
                'matches': {
                    'idigbio': matches['idigbio'],
                    'pbdb': matches['pbdb']
                }
            }
            d.append(item)
            d = self.resolveReferences(d)
            counts = {
                'totalCount': idbCount + pbdbCount,
                'idbCount': idbCount,
                'pbdbCount': pbdbCount
            }

            return self.respond({
                'counts': counts,
                'results': d,
                'criteria': criteria,
                'media': media
            })

        else:
            return self.respondWithDescription()
Пример #9
0
 def create_fs(self, name):
     self.__fs = gridfs.GridFS(database=self.__db, collection=name)
     return True
Пример #10
0
 def __init__(self):
     self.fs = gridfs.GridFS(db_fs)
     if (env["temporal_files_db"]):
         self.fs_temp = gridfs.GridFS(db_temp)
Пример #11
0
def calib_waveform(file: str, box_id: str,
                   mongo_client: pymongo.MongoClient) -> np.ndarray:
    fs = gridfs.GridFS(mongo_client.opq)
    waveform = fs.find_one({"filename": file}).read()
    waveform = to_s16bit(waveform).astype(np.int64)
    return waveform / calibration_constant(box_id, mongo_client)
Пример #12
0
db = MongoClient(connString)

##### Loop through each database other than admin and local #####
for database in db.database_names():
	if database != "admin" and database != "local":
		db = MongoClient(connString)[database]
		print("--database:" + database)

##### Get a model ID #####
		for model in db.settings.find({"federate": {"$ne": True}}, {"_id": 1}):
			model_id = model.get('_id')
			print("\t--model: " + model_id)
			col_name = model_id + ".stash.json_mpc"

##### Check if bson files exist before deleting #####
			fs = gridfs.GridFS(db, col_name)
			unity_asset_files = fs.find({"filename" : {"$regex": "unityAssets.json$" }})
			for asset_file in unity_asset_files:
				rev_id = re.search(r"(?<=revision/).*?(?=/unityAssets\.json)", asset_file.filename).group(0)
				col_name_check = model_id + ".stash.unity3d"
				entry = db[col_name_check].find_one({"_id" : uuid.UUID(rev_id)})
				if entry:
##### Delete unityAssets.json files #####
					print "\t\tdeleting: " + asset_file.filename
					if not dry_run:
						fs.delete(asset_file._id)
				else:
						missing_files.append(asset_file.filename);

if len(missing_files) > 0:
	print "The following asset bundles did not have matching BSON entries:"
Пример #13
0
import gridfs
from typing import List
from pymongo import MongoClient
from webserver.utilities.configuration import configuration

_client = MongoClient(configuration['web']['mongo_url'])
_collection = _client.file_storage
_fs = gridfs.GridFS(_collection)
_collection.fs.files.create_index('uploadDate',
                                  expireAfterSeconds=10 * 24 * 60 * 60)


def write_file(job_id, file_identifier, file_path):
    with open(file_path, "rb") as f:
        index = _fs.put(f, filename=file_identifier, job=job_id)

    return index


def get_file(job_id, file_identifier):
    db_file = _fs.find_one(filter={"filename": file_identifier, "job": job_id})

    if db_file:
        return _fs.get(db_file._id)
    else:
        return None


def get_list_of_files(job_id) -> List:
    file_list = _fs.find(filter={"job": job_id})
Пример #14
0
def gridfs_out(db, time, input, output):
    fs = gridfs.GridFS(db)
    f = fs.open(input.name)
    output.write(f.read())
    f.close()
Пример #15
0
class Base(object):
	mongod_host = '127.0.0.1'
	mongod_port = 27017
	mongod_dbname = 'scandb'
	client = MongoClient(mongod_host, mongod_port)
	fs = gridfs.GridFS(client[mongod_dbname])
Пример #16
0
 def __init__(self):
     self.client = MongoClient('localhost', 27017)
     self.db = self.client.pattern_recognition
     self.fs = gridfs.GridFS(self.db)
Пример #17
0
# --* coding=utf-8 *--
from cStringIO import StringIO
from pymongo import MongoClient
import gridfs
import os
import matplotlib.pyplot as plt
import matplotlib.image as iming
import bson.binary
import numpy as np
if __name__ == '__main__':
    connect = MongoClient('127.0.0.1', 27017)  # 创建连接点
    db = connect.Spider
    # print db.collection_names() 打印出集合中所有的字段名
    imgput = gridfs.GridFS(db)

    dirs = 'F:\image'
    files = os.listdir(dirs)
    for file in files:
        filesname = dirs + '\\' + file
        print filesname
        f = file.split('.')
        datatmp = open(filesname, 'rb')
        data = StringIO(datatmp.read())

        insertimg = imgput.put(data, content_type=f[1], filename=f[0])
        datatmp.close()
Пример #18
0
import argparse
import cStringIO
import mimetypes
import requests
from PIL import Image

from pymongo import Connection
import gridfs

# setup mongo
MONGODB_HOST = 'localhost'
MONGODB_PORT = 27017

# connect to the database & get a gridfs handle
mongo_con = Connection(MONGODB_HOST, MONGODB_PORT)
grid_fs = gridfs.GridFS(mongo_con.test_database)


def serve_pil_image(pil_img):
    """
    see: 
        https://groups.google.com/forum/?fromgroups=#!topic/python-tornado/B19D6ll_uZE
        http://stackoverflow.com/questions/7877282/how-to-send-image-generated-by-pil-to-browser
    """
    img_io = cStringIO.StringIO()
    pil_img.save(img_io, 'JPEG', quality=70)
    img_io.seek(0)
    return send_file(img_io, mimetype='image/jpeg')


def add_image(image_url):
Пример #19
0
with SSHTunnelForwarder(("MONGO_SERVER_IP", 22),
                        ssh_username="******",
                        ssh_pkey="/home/USER/.ssh/KEYFILE",
                        remote_bind_address=("localhost", 27017),
                        local_bind_address=(local_address, port)) as _:
    sleep(1)

    with pymongo.MongoClient(local_address, port=port) as client:
        # code starts here
        processingdb = client['processing']
        filedb = client['files']
        sourcedb = client['source']
        message_collection = processingdb['needs_processing']
        message_cursor = message_collection.find({})

        tars = gridfs.GridFS(filedb, collection='lab1')
        lab_collection = client['processed']['lab1']
        student_cursor = lab_collection.find({"source": {"$exists": False}})

        for student in student_cursor:
            filename = student['submissions'][0]['filename']
            grid_out = tars.find_one({'_id': filename}, no_cursor_timeout=True)

            with tarfile.open(mode="r:gz", fileobj=grid_out) as tar:
                # print(tar.list())
                assert isinstance(tar, tarfile.TarFile)
                # ch = tar.extractfile()
                # print(tar.getnames())
                # print(tar.getmembers())

                path = [x for x in tar.getnames() if 'famtree.c' in x]
Пример #20
0
 def test_unacknowledged(self):
     # w=0 is prohibited.
     with self.assertRaises(ConfigurationError):
         gridfs.GridFS(rs_or_single_client(w=0).pymongo_test)
Пример #21
0
def load(self, path, callback):
    db = __conn__(self)
    fs = gridfs.GridFS(db)
    contents = fs.get(ObjectId(path)).read()
    callback(contents)
Пример #22
0
 def setUpClass(cls):
     super(TestGridfs, cls).setUpClass()
     cls.fs = gridfs.GridFS(cls.db)
     cls.alt = gridfs.GridFS(cls.db, "alt")
Пример #23
0
 def put_data(self, job_name, file_name, data):
     """
     """
     if job_name not in self.colls:
         self.colls[job_name] = gridfs.GridFS(self.user_db, job_name)
     self.colls[job_name].put(data, filename=file_name)
 def __init__(self):
     self.fs = gridfs.GridFS(db_fs)
     self.collection = db_fs["fs.files"]
     if (env["temporal_files_db"]):
         self.fs_temp = gridfs.GridFS(db_temp)
         self.collection_tmp = db_temp["fs.files"]
Пример #25
0
 def list(self, job_name):
     """
     """
     if job_name not in self.colls:
         self.colls[job_name] = gridfs.GridFS(self.user_db, job_name)
     return self.colls[job_name].list()
Пример #26
0
args = parser.parse_args()

file_list = os.listdir(args.data_dir)
total_count = len(file_list)
chunk_size = total_count / args.node_count
# Take care of the last few files at the end
chunk_size = min(chunk_size, args.limit_files)
file_list = file_list[chunk_size * args.node : chunk_size * (args.node + 1)]

#connection = pymongo.Connection('mongodb://*****:*****@ds037637-a.mongolab.com:37637/mongotest')
#connection = pymongo.Connection('mongodb://*****:*****@23.20.124.206:27017/testdb')
connection = pymongo.Connection('mongodb://' + args.user + ':' + args.passwd + '@' + args.server + ':' + args.port + '/' + args.db)
#db = connection['mongotest']
db = connection[args.db]

grid = gridfs.GridFS(db, args.collection)

in_memory_files ={}
for path in file_list:
	f = open(args.data_dir + '/' + path)
	data = f.read()
	f.close()
	in_memory_files[path] = data

print 'All files for node',args.node,'are loaded into memory.'

if args.enable_checkpoint:
	f = open('node_' + str(args.node), 'w')
	f.close()

try:
Пример #27
0
 def __init__(self, connection, name):
     self.name = name
     self.connection = connection
     self.fs = gridfs.GridFS(self.connection, collection=self._filetype)
Пример #28
0
    def run_task(self, fw_spec):
        # import here to prevent import errors in bigger MPCollab
        # get the band structure and nelect from files
        """
        prev_dir = get_loc(fw_spec['prev_vasp_dir'])
        vasprun_loc = zpath(os.path.join(prev_dir, 'vasprun.xml'))
        kpoints_loc = zpath(os.path.join(prev_dir, 'KPOINTS'))

        vr = Vasprun(vasprun_loc)
        bs = vr.get_band_structure(kpoints_filename=kpoints_loc)
        """
        filename = get_slug(
            'JOB--' + fw_spec['mpsnl'].structure.composition.reduced_formula +
            '--' + fw_spec['task_type'])
        with open(filename, 'w+') as f:
            f.write('')

        # get the band structure and nelect from DB
        block_part = get_block_part(fw_spec['prev_vasp_dir'])

        db_dir = os.environ['DB_LOC']
        assert isinstance(db_dir, object)
        db_path = os.path.join(db_dir, 'tasks_db.json')
        with open(db_path) as f:
            creds = json.load(f)
            connection = MongoClient(creds['host'], creds['port'])
            tdb = connection[creds['database']]
            tdb.authenticate(creds['admin_user'], creds['admin_password'])

            props = {
                "calculations": 1,
                "task_id": 1,
                "state": 1,
                "pseudo_potential": 1,
                "run_type": 1,
                "is_hubbard": 1,
                "hubbards": 1,
                "unit_cell_formula": 1
            }
            m_task = tdb.tasks.find_one({"dir_name": block_part}, props)
            if not m_task:
                time.sleep(
                    60)  # only thing to think of is wait for DB insertion(?)
                m_task = tdb.tasks.find_one({"dir_name": block_part}, props)

            if not m_task:
                raise ValueError(
                    "Could not find task with dir_name: {}".format(block_part))

            if m_task['state'] != 'successful':
                raise ValueError(
                    "Cannot run Boltztrap; parent job unsuccessful")

            nelect = m_task['calculations'][0]['input']['parameters']['NELECT']
            bs_id = m_task['calculations'][0]['band_structure_fs_id']
            print bs_id, type(bs_id)
            fs = gridfs.GridFS(tdb, 'band_structure_fs')
            bs_dict = json.loads(fs.get(bs_id).read())
            bs_dict['structure'] = m_task['calculations'][0]['output'][
                'crystal']
            bs = BandStructure.from_dict(bs_dict)
            print 'Band Structure found:', bool(bs)
            print nelect

            # run Boltztrap
            runner = BoltztrapRunner(bs, nelect)
            dir = runner.run(path_dir=os.getcwd())

            # put the data in the database
            bta = BoltztrapAnalyzer.from_files(dir)

            # 8/21/15 - Anubhav removed fs_id (also see line further below, ted['boltztrap_full_fs_id'] ...)
            # 8/21/15 - this is to save space in MongoDB, as well as non-use of full Boltztrap output (vs rerun)
            """
            data = bta.as_dict()
            data.update(get_meta_from_structure(bs._structure))
            data['snlgroup_id'] = fw_spec['snlgroup_id']
            data['run_tags'] = fw_spec['run_tags']
            data['snl'] = fw_spec['mpsnl']
            data['dir_name_full'] = dir
            data['dir_name'] = get_block_part(dir)
            data['task_id'] = m_task['task_id']
            del data['hall']  # remove because it is too large and not useful
            fs = gridfs.GridFS(tdb, "boltztrap_full_fs")
            btid = fs.put(json.dumps(jsanitize(data)))
            """

            # now for the "sanitized" data
            ted = bta.as_dict()
            del ted['seebeck']
            del ted['hall']
            del ted['kappa']
            del ted['cond']

            # ted['boltztrap_full_fs_id'] = btid
            ted['snlgroup_id'] = fw_spec['snlgroup_id']
            ted['run_tags'] = fw_spec['run_tags']
            ted['snl'] = fw_spec['mpsnl'].as_dict()
            ted['dir_name_full'] = dir
            ted['dir_name'] = get_block_part(dir)
            ted['task_id'] = m_task['task_id']

            ted['pf_doping'] = bta.get_power_factor(output='tensor',
                                                    relaxation_time=self.TAU)
            ted['zt_doping'] = bta.get_zt(output='tensor',
                                          relaxation_time=self.TAU,
                                          kl=self.KAPPAL)

            ted['pf_eigs'] = self.get_eigs(ted, 'pf_doping')
            ted['pf_best'] = self.get_extreme(ted, 'pf_eigs')
            ted['pf_best_dope18'] = self.get_extreme(ted,
                                                     'pf_eigs',
                                                     max_didx=3)
            ted['pf_best_dope19'] = self.get_extreme(ted,
                                                     'pf_eigs',
                                                     max_didx=4)
            ted['zt_eigs'] = self.get_eigs(ted, 'zt_doping')
            ted['zt_best'] = self.get_extreme(ted, 'zt_eigs')
            ted['zt_best_dope18'] = self.get_extreme(ted,
                                                     'zt_eigs',
                                                     max_didx=3)
            ted['zt_best_dope19'] = self.get_extreme(ted,
                                                     'zt_eigs',
                                                     max_didx=4)
            ted['seebeck_eigs'] = self.get_eigs(ted, 'seebeck_doping')
            ted['seebeck_best'] = self.get_extreme(ted, 'seebeck_eigs')
            ted['seebeck_best_dope18'] = self.get_extreme(ted,
                                                          'seebeck_eigs',
                                                          max_didx=3)
            ted['seebeck_best_dope19'] = self.get_extreme(ted,
                                                          'seebeck_eigs',
                                                          max_didx=4)
            ted['cond_eigs'] = self.get_eigs(ted, 'cond_doping')
            ted['cond_best'] = self.get_extreme(ted, 'cond_eigs')
            ted['cond_best_dope18'] = self.get_extreme(ted,
                                                       'cond_eigs',
                                                       max_didx=3)
            ted['cond_best_dope19'] = self.get_extreme(ted,
                                                       'cond_eigs',
                                                       max_didx=4)
            ted['kappa_eigs'] = self.get_eigs(ted, 'kappa_doping')
            ted['kappa_best'] = self.get_extreme(ted,
                                                 'kappa_eigs',
                                                 maximize=False)
            ted['kappa_best_dope18'] = self.get_extreme(ted,
                                                        'kappa_eigs',
                                                        maximize=False,
                                                        max_didx=3)
            ted['kappa_best_dope19'] = self.get_extreme(ted,
                                                        'kappa_eigs',
                                                        maximize=False,
                                                        max_didx=4)

            try:
                from mpcollab.thermoelectrics.boltztrap_TE import BoltzSPB
                bzspb = BoltzSPB(ted)
                maxpf_p = bzspb.get_maximum_power_factor('p', temperature=0, tau=1E-14, ZT=False, kappal=0.5,\
                    otherprops=('get_seebeck_mu_eig', 'get_conductivity_mu_eig', \
                                                    'get_thermal_conductivity_mu_eig', 'get_average_eff_mass_tensor_mu'))

                maxpf_n = bzspb.get_maximum_power_factor('n', temperature=0, tau=1E-14, ZT=False, kappal=0.5,\
                    otherprops=('get_seebeck_mu_eig', 'get_conductivity_mu_eig', \
                                                    'get_thermal_conductivity_mu_eig', 'get_average_eff_mass_tensor_mu'))

                maxzt_p = bzspb.get_maximum_power_factor('p', temperature=0, tau=1E-14, ZT=True, kappal=0.5, otherprops=('get_seebeck_mu_eig', 'get_conductivity_mu_eig', \
                                                    'get_thermal_conductivity_mu_eig', 'get_average_eff_mass_tensor_mu'))

                maxzt_n = bzspb.get_maximum_power_factor('n', temperature=0, tau=1E-14, ZT=True, kappal=0.5, otherprops=('get_seebeck_mu_eig', 'get_conductivity_mu_eig', \
                                                    'get_thermal_conductivity_mu_eig', 'get_average_eff_mass_tensor_mu'))

                ted['zt_best_finemesh'] = {'p': maxzt_p, 'n': maxzt_n}
                ted['pf_best_finemesh'] = {'p': maxpf_p, 'n': maxpf_n}
            except:
                import traceback
                traceback.print_exc()
                print 'COULD NOT GET FINE MESH DATA'

            # add is_compatible
            mpc = MaterialsProjectCompatibility("Advanced")
            try:
                func = m_task["pseudo_potential"]["functional"]
                labels = m_task["pseudo_potential"]["labels"]
                symbols = ["{} {}".format(func, label) for label in labels]
                parameters = {
                    "run_type": m_task["run_type"],
                    "is_hubbard": m_task["is_hubbard"],
                    "hubbards": m_task["hubbards"],
                    "potcar_symbols": symbols
                }
                entry = ComputedEntry(Composition(m_task["unit_cell_formula"]),
                                      0.0,
                                      0.0,
                                      parameters=parameters,
                                      entry_id=m_task["task_id"])

                ted["is_compatible"] = bool(mpc.process_entry(entry))
            except:
                traceback.print_exc()
                print 'ERROR in getting compatibility, task_id: {}'.format(
                    m_task["task_id"])
                ted["is_compatible"] = None

            tdb.boltztrap.insert(jsanitize(ted))

            update_spec = {
                'prev_vasp_dir': fw_spec['prev_vasp_dir'],
                'boltztrap_dir': os.getcwd(),
                'prev_task_type': fw_spec['task_type'],
                'mpsnl': fw_spec['mpsnl'].as_dict(),
                'snlgroup_id': fw_spec['snlgroup_id'],
                'run_tags': fw_spec['run_tags'],
                'parameters': fw_spec.get('parameters')
            }

        return FWAction(update_spec=update_spec)
Пример #29
0
from pymongo import MongoClient
import gridfs
import os
client = MongoClient('localhost', 27017)
db = client.zxjd_database
fs = gridfs.GridFS(db)
files = fs.find()
print files.count()
os.chdir('/home/file')
for file in files:
	if file.filename.find('.doc') > 0:
		with open(file.filename, 'wb') as f1:
			f1.write(file.read())
Пример #30
0
import gridfs
from bson.objectid import ObjectId
import re
import os
import platform
from werkzeug import secure_filename
from datetime import datetime

#UPLOAD_LOC = R'C:\Users\Mr.Something\Documents\GitHub\Rulo\static\profilePictures'
if platform.system() == 'Windows':
    UPLOAD_LOC = R'static\profilePictures/'
else:
    UPLOAD_LOC = R'static/profilePictures/'

picsDB = MongoClient().gridfs_example
fs = gridfs.GridFS(picsDB)

conn = Connection()
db = conn["rulo"]
users = db.users
events = db.events


#----------------------PIC STUFF---------------------#
def uploadPicture(picture):
    try:
        filename = secure_filename(picture.filename)
    except AttributeError:
        filename = secure_filename(picture.name)
    if not (os.path.exists(os.path.join(UPLOAD_LOC, filename))):
        print filename