Esempio n. 1
0
 def get_table(self, db_name, mols=True, mol_only=True):
     results = SqliteDict(db_name,
                          encode=json.dumps,
                          decode=json.loads,
                          autocommit=True)
     result_table = pd.DataFrame(results.values())
     print(len(result_table), sum(~result_table['∆∆G'].isna()))
     result_table['LE'] = result_table.apply(LE, 1)
     rank = result_table.apply(ranker, axis=1).rank()
     m = np.nanmax(rank.values)
     result_table['%Rank'] = rank / m * 100
     result_table['N_hits'] = result_table.regarded.apply(
         lambda x: len(x) if str(x) != 'nan' else float('nan'))
     result_table = result_table.loc[~result_table.smiles.isna(
     )].sort_values(['%Rank'], axis=0)
     if mols:
         result_table['mol3D'] = result_table['name'].apply(get_mol3D)
         # result_table['mol2D'] = result_table['name'].apply(get_mol2D)
         PandasTools.AddMoleculeColumnToFrame(result_table, 'smiles',
                                              'mol2D')
         if mol_only:
             result_table = result_table.loc[~result_table.mol3D.isna()]
     return result_table
for word in wordToSlugs.keys():
    if len(word) < 3:
        continue
    ids = sorted([bookmap[slug][0] for slug in wordToSlugs[word]])
    with open(osp.join(WOUT, word), "wt", encoding="utf-8") as fp:
        fp.write("".join(ids))

# make sure CAUTION exists
with open(osp.join(WOUT, "CAUTION"), "at", encoding="utf-8") as fp:
    fp.write("")

# write the AllAvailable file
with open(osp.join(WOUT, "AllAvailable"), "wt", encoding="utf-8") as fp:
    fp.write("%s-%s" % ("0" * Dbooks, last))

# write out a list of the images for possible prefetch...
with open(osp.join(CONTENT, "images.json"), "wt", encoding="utf-8") as fp:
    json.dump([osp.relpath(path, OUT) for path in imagemap.values()], fp)

# record parameters needed by the js
config = {
    "base": args.base,
    "digits": Dbooks,
    "first": "0" * Dbooks,
    "lastReviewed": lastReviewed,
    "last": last,
}
with open(osp.join(CONTENT, "config.json"), "wt") as fp:
    json.dump(config, fp)
Esempio n. 3
0
from zk_snark.to_r1cs import code_to_r1cs_with_inputs
from sqlitedict import SqliteDict
import zlib, pickle, sqlite3
from decimal import Decimal

voters_dict = SqliteDict('./Trusted_users_app.sqlite', autocommit=True)

func = """
def qeval(x):
    y = x**3
    return y + x + 5
"""

voters_list = []

for i in voters_dict.values():
	voters_list.append(i)

def proof(name: str):
    p, G = get_pg()
    with open("CRS", 'r') as crs:
        n = int(crs.readline())
        A, B, C, summ = [], [], [], []

        for i in range(n):
            A.append(float(crs.readline()))
            B.append(float(crs.readline()))
            C.append(float(crs.readline()))

        for i in range(n):
            summ.append(float(crs.readline()))
Esempio n. 4
0
pre_cleaning1 = []
pre_cleaning2 = []
pre_cleaning3 = []
pre_cleaning4 = []
pre_cleaning5 = []
for category in categories:
    c = 0
    print(f"Category = {category}", c)

    pua = SqliteDict(f"./../Sqlite/split_texts/{category}.sqlite",
                     tablename="value",
                     flag="r")

    #pua_clean = SqliteDict(f"{category}_clean.sqlite", tablename="value", journal_mode="OFF")

    for value in pua.values():
        try:
            y = datetime.fromtimestamp(value["timestamp"] // 1000).year
            if y <= 2012:
                pre_cleaning1.append(
                    re.sub("[^A-Za-z]+", ' ',
                           str(value["text"]).lower()))
            elif y <= 2015:
                pre_cleaning2.append(
                    re.sub("[^A-Za-z]+", ' ',
                           str(value["text"]).lower()))
            elif y <= 2017:
                pre_cleaning3.append(
                    re.sub("[^A-Za-z]+", ' ',
                           str(value["text"]).lower()))
            elif y <= 2018:
import json


books = SqliteDict("allbooks.sqlite", autocommit=True)


for page in range(1, 10000):
    if page % 100 == 0:
        print(page)
    url = (
        "http://test.tarheelreader.org/find/"
        "?search=&category=&reviewed=&audience=&language=&"
        f"page={page}&json=1"
    )
    resp = requests.get(url)
    r = resp.json()
    for b in r["books"]:
        if b["slug"] in books:
            continue
        url = "http://test.tarheelreader.org/book-as-json/" f'?slug={b["slug"]}'
        resp = requests.get(url)
        book = resp.json()
        books[b["slug"]] = book
    if not r["more"]:
        break


rows = sorted(books.values(), key=lambda b: b["ID"])
with gzip.open("books.json.gz", "wt", encoding="utf-8") as fp:
    json.dump(rows, fp)
class SQLiteMetadataStore(MetadataStoreInterface):
    name = 'sqlite'

    def __init__(self, db_path: str, no_conns_and_vars=False):
        from sqlitedict import SqliteDict

        self.db_path = db_path
        if not no_conns_and_vars:
            self.conn_connections = SqliteDict(
                self.db_path, tablename=Settings.connections_table_name)
            self.conn_variables = SqliteDict(
                self.db_path, tablename=Settings.variables_table_name)
        self.conn_dag_deployments = SqliteDict(
            self.db_path, tablename=Settings.dag_deployments_table_name)

    def close(self):
        self.conn_connections.close()
        self.conn_variables.close()
        self.conn_dag_deployments.close()

    def exists(self) -> bool:
        return Path(self.db_path).exists()

    def migrate(self):
        open(str(self.db_path), 'a').close()

    def get_connection(self, conn_id: str) -> Connection:
        if conn_id not in self.conn_connections.keys():
            raise MetadataObjectNotFound(f'Connection "{conn_id}" is not set')
        return self.conn_connections[conn_id]

    def get_connections(self,
                        to_dict: bool = False
                        ) -> List[Union[dict, Connection]]:
        return [
            conn.__dict__ if to_dict else conn
            for conn in self.conn_connections.values()
        ]

    def set_connection(self, conn: Connection):
        self.conn_connections[conn.conn_id] = conn
        self.conn_connections.commit()

    def delete_connection(self, conn: Union[str, Connection]):
        del self.conn_connections[conn.conn_id if isinstance(conn, Connection
                                                             ) else conn]
        self.conn_connections.commit()

    def get_variable(self, variable_id: str) -> Variable:
        if variable_id not in self.conn_variables.keys():
            raise MetadataObjectNotFound(
                f'Variable "{variable_id}" is not set')
        return self.conn_variables[variable_id]

    def get_variables(self,
                      to_dict: bool = False) -> List[Union[dict, Variable]]:
        return [
            var.dict_contents() if to_dict else var
            for var in self.conn_variables.values()
        ]

    def set_variable(self, variable: Variable):
        self.conn_variables[variable.id] = variable
        self.conn_variables.commit()

    def delete_variable(self, variable: Union[str, Variable]):
        del self.conn_variables[variable.id if isinstance(variable, Variable
                                                          ) else variable]
        self.conn_variables.commit()

    def get_dag_deployment(self, deployment_hash: str) -> DagDeployment:
        if deployment_hash not in self.conn_connections.keys():
            raise MetadataObjectNotFound(
                f'Dag deployment "{deployment_hash}" is not set')
        return self.conn_connections[deployment_hash]

    def get_dag_deployments(self,
                            to_dict: bool = False
                            ) -> List[Union[dict, DagDeployment]]:
        return [
            x.dict() if to_dict else x
            for x in self.conn_dag_deployments.values()
        ]

    def set_dag_deployment(self, dag_deployment: DagDeployment):
        self.conn_dag_deployments[
            dag_deployment.deployment_hash] = dag_deployment
        self.conn_dag_deployments.commit()