Ejemplo n.º 1
0
of the most recent measurement for the sensor is calculated, then files for last measurement until today are collected
and measurements are added to the database. The assumption is that measurements are available in strict FIFO sequence.
Once a measurement on specific date/time is available, then it is never required to check before this time.
"""

import pandas
import requests
from datetime import date, timedelta
from lib import my_env
from lib import luft_store
from lib.luft_store import *
from pandas.compat import BytesIO

start_date = date.today().replace(day=1)
sensor_ids = ["esp8266-72077"]
cfg = my_env.init_env("luftdaten", __file__)
luft_eng = luft_store.init_session()
known_sensors = []
url_base = os.getenv("URL_BASE")

for sensor_id in sensor_ids:
    # Get timestamp for most recent measurement
    last_measurement = luft_eng.query(Measurement).filter_by(
        sensor_id=sensor_id).max()
    for td in reversed(range(1, 4)):
        dfc = date.today() - timedelta(td)
        ds = dfc.strftime("%Y-%m-%d")
        fn = "data-{id}-{ds}.csv".format(ds=ds, id=sensor_id)
        url = "{url_base}/{fn}".format(url_base=url_base, fn=fn)
        logging.info("URL: {url}".format(url=url))
        res = requests.get(url)
Ejemplo n.º 2
0
"""
This procedure will rebuild the sqlite klamu database
"""

import logging
import os
from lib import my_env
from lib import klamu_store

cfg = my_env.init_env("klamu_migrate", __file__)
logging.info("Start application")
db = os.getenv('KLAMU_DB')
klamu = klamu_store.DirectConn(db)
klamu.rebuild()
logging.info("sqlite klamu rebuild")
logging.info("End application")
Ejemplo n.º 3
0
#!/opt/envs/sendmail/bin/python
"""
This procedure will rebuild the sqlite sendmail database
"""

import logging
from lib import my_env
from lib import info_layer

cfg = my_env.init_env("sendmail", __file__)
logging.info("Start application")
sm = info_layer.DirectConn(cfg)
sm.rebuild()
logging.info("sqlite sendmail rebuild")
logging.info("End application")
Ejemplo n.º 4
0
                        modified=int(time.time()))
        sess.add(frec)
        sess.commit()
        return True
    # Existing file, check if there was an update
    if fr.fh == fh:
        # Same checksum, no need to update
        return False
    else:
        fr.fh = fh
        fr.modified = int(time.time())
        sess.commit()
        return True


cfg = my_env.init_env("visitors", __file__)
logging.info("Start application")
sess = sqlstore.init_session(cfg["Main"]["db"])
logfile = cfg["Main"]["apache_log"]
# Slurp logfile to calculate checksum.
with open(logfile, 'rb') as fp:
    if not file_update("other_vhosts", fp.read()):
        logging.info(
            "No changes detected in {logfile}".format(logfile=logfile))
        raise SystemExit(0)

# logfile other_vhosts_access_log.1 has been updated, load all records in database.
res = sess.query(UserAgent).all()
ua_dict = {}
for rec in res:
    ua_dict[rec.desc] = rec.id
Ejemplo n.º 5
0
"""
This procedure will rebuild the database
"""

import logging
import os
from lib import my_env
from lib import info_layer

my_env.init_env("gnucash", __file__)
logging.info("Start application")
sm = info_layer.DirectConn(os.getenv('ACCOUNTDIR'), os.getenv('ACCOUNTNAME'))
sm.rebuild()
logging.info("Database rebuild")
Ejemplo n.º 6
0
"""
This procedure will rebuild the sqlite lkb database
"""

import logging
from lib import my_env
from lib import lkb_store

cfg = my_env.init_env("lkb_migrate", __file__)
logging.info("Start application")
lkb = lkb_store.DirectConn(cfg)
lkb.rebuild()
logging.info("sqlite lkb rebuild")
logging.info("End application")
Ejemplo n.º 7
0
"""
This is the script to migrate the catw database from mysql to sqlite.
"""

from lib import my_env
from lib import sqlite_store
from lib.sqlite_store import *

cfg = my_env.init_env("catw_migrate", __file__)
db = cfg['Main']['db']
logging.info("Start application")
# cons_sess = mysql.init_session()
catw = sqlite_store.init_session(db=db)
cons_sess = sqlite_store.init_mysql(db=cfg['catw']['db'],
                                    user=cfg['catw']['user'],
                                    pwd=cfg['catw']['passwd'])
tables = [Parameter, Project, Timesheet]
for obj in tables:
    query = cons_sess.query(obj)
    for rec in query:
        attribs = {}
        for col in obj.__table__.columns.keys():
            attribs[col] = getattr(rec, col)
        new_rec = obj(**attribs)
        catw.add(new_rec)
    catw.commit()
Ejemplo n.º 8
0
"""
This script will load the known parameters and load them into the Neo4J database.
"""

import logging
import pandas
from lib import my_env
from lib import neostore

cfg = my_env.init_env("vdab", __file__)
logging.info("Start Application")
ns = neostore.NeoStore(cfg)
vej_file = cfg["Main"]["vej_params"]
df = pandas.read_excel(vej_file, skiprows=1)

node_arr = {}

my_loop = my_env.LoopInfo("Param definitions", 20)
for row in df.iterrows():
    cnt = my_loop.info_loop()
    # Get excel row in dict format
    xl = row[1].to_dict()
    param_name = xl["ParameterNaam"].lower()
    param_val = str(xl["ParameterWaarde"]).lower()
    param_def = xl["Functionele definitie"].lower()
    param = "{n}={v}".format(n=param_name, v=param_val)
    try:
        param_node = node_arr[param]
    except KeyError:
        props = dict(waarde=param_val, definitie=param_def)
        param_node = ns.create_node(param_name, **props)
Ejemplo n.º 9
0
"""
This script will prepare the GNU transaction table by removing double entries from the transactions table.
"""

from lib import my_env
from lib import sqlstore
from lib.sqlstore import *

if __name__ == '__main__':
    cfg = my_env.init_env("qif", __file__)
    logging.info("Start Application")
    sql_eng = sqlstore.init_session(cfg["Main"]["db"])

    # Get Account names and ids for reference in transactions
    accounts = {}
    acc_names = {}
    account_recs = sql_eng.query(Account).all()
    for rec in account_recs:
        accounts[rec.name] = rec
        acc_names[str(rec.id)] = rec.name

    for account in account_recs:
        tx = sql_eng.query(Transaction).filter_by(account_id=account.id).all()
        li = my_env.LoopInfo(account.name, 100)
        cat_account_name = "[{n}]".format(n=account.name)
        for rec in tx:
            li.info_loop()
            recdic = object_as_dict(rec)
            if account.type == "effect":
                # Convert transfer_id back to account
                if recdic["transfer_id"]:
Ejemplo n.º 10
0
"""
This script will convert the russians.net pajek file into nodes and relations file for import in Neo4J.
"""

import logging
import networkx
import os
from lib import my_env

cfg = my_env.init_env("sna", __file__)
logging.info("Start Application")
data_dir = cfg["Data"]["dir"]
efn = cfg["Data"]["egypts"]
ef = os.path.join(data_dir, efn)
logging.info("Read network")
g = networkx.read_pajek(ef)

# Remove all files in import dir
import_dir = cfg["Graph"]["import_dir"]
file_list = os.listdir(import_dir)
for file in file_list:
    os.remove(os.path.join(import_dir, file))

# First write header lines
nf = os.path.join(import_dir, "node_persons_1.csv")
ef = os.path.join(import_dir, "rel_contacts_1.csv")
nhl = "name:ID{delim}:LABEL".format(delim=my_env.delim)
rhl = ":START_ID{delim}:END_ID{delim}:TYPE{delim}weight:INT".format(
    delim=my_env.delim)

nfh = open(nf, "w")
Ejemplo n.º 11
0
#!/opt/envs/gnu/bin/python
"""
This script populates the accounts database and creates the consolidation excel.
"""

# Allow lib to library import path.
import os
import logging
from lib import my_env
from lib.my_env import run_script

scripts = [
    "10_rebuild_db", "20_populate_accounts", "30_populate_transactions",
    "40_populate_price", "50_update_account_currency", "consolidation"
]

cfg = my_env.init_env("gnu", __file__)
logging.info("Start Application")
(fp, filename) = os.path.split(__file__)
for script in scripts:
    logging.info("Run script: {s}.py".format(s=script))
    run_script(fp, "{s}.py".format(s=script))
logging.info("End Application")
Ejemplo n.º 12
0
"""
This script will get the health in elasticsearch.
"""

from lib import elasticlib
from lib import my_env
import pprint

cfg = my_env.init_env("elasticload", __file__)
pp = pprint.PrettyPrinter()
el = elasticlib.Elastic()
res = el.get_health()
pp.pprint(res.json())
Ejemplo n.º 13
0
"""
This script will extract information from Neo4J database and dump it in files that can be used for import into Neo4J.
This script is a test to explore the neo4j database structure.
"""

import logging
import os
from lib import my_env

cfg = my_env.init_env("neodump", __file__)
logging.info("Start Application")

# Get nodestore
db_dir = cfg["NeoDB"]["db_dir"]
neo_db = cfg["NeoDB"]["neo_db"]
nodestore = cfg["NeoDB"]["nodestore"]
nr_len = int(cfg["NeoDB"]["nr_len"])
nsfn = os.path.join(db_dir, neo_db, nodestore)
nsfh = open(nsfn, "rb")
nsf = nsfh.read()
max_nodes = len(nsf) // nr_len
# Collect node IDs
node_ids = []
# Todo - check that record on last position is included, or do I need range(max_nodes+1) - Test with index out of range
for pos in range(max_nodes):
    in_use = nsf[pos*nr_len]
    if in_use:
        node_ids.append(pos)
print("Number of nodes: {nn}".format(nn=len(node_ids)))
print("Nodes: \n{n}".format(n=node_ids[:15]))
logging.info("End Application")
Ejemplo n.º 14
0
"""
This script will find all Content body field containing a specific string. This can be used to locate all nodes with
a customized URL that requires an individual update.
"""
from lib import my_env
from lib import tuin_store
from lib.tuin_store import *

cfg = my_env.init_env("tuin_migrate", __file__)
db = cfg['Main']['db']
logging.info("Start application")
tuin = tuin_store.init_session(db=db)

str2find = "snoeitechnieken-en-onderhoud/plantenfamilies"
contents = tuin.query(Content).filter(
    Content.body.like("%{s2r}%".format(s2r=str2find))).all()
cnt = 0
for content in contents:
    cnt += 1
    print(content.title)
print("Count: {c}".format(c=cnt))
logging.info("End application")
Ejemplo n.º 15
0
"""
import argparse
import logging
import pandas
from lib import my_env
from lib import murcsrest

if __name__ == "__main__":
    # Configure command line arguments
    parser = argparse.ArgumentParser(
        description="Load a Site file into Murcs"
    )
    parser.add_argument('-f', '--filename', type=str, required=True,
                        help='Please provide the site file to load.')
    args = parser.parse_args()
    cfg = my_env.init_env("bellavista", __file__)
    r = murcsrest.MurcsRest(cfg)
    logging.info("Arguments: {a}".format(a=args))

    # Read the file
    df = pandas.read_excel(args.filename)
    my_loop = my_env.LoopInfo("Sites", 20)
    for row in df.iterrows():
        my_loop.info_loop()
        # Get excel row in dict format
        xl = row[1].to_dict()
        siteId = xl.pop("siteId")
        payload = dict(
            siteId=siteId
        )
        for k in xl:
Ejemplo n.º 16
0
import argparse
import logging
import os
import subprocess as sp
from lib import my_env

parser = argparse.ArgumentParser(description="Start or stop a neo4j server")
parser.add_argument('-a',
                    '--action',
                    type=str,
                    required=True,
                    choices=['start', 'stop'],
                    help='Please provide the (start, stop) action.')
args = parser.parse_args()
cfg = my_env.init_env("wolse", __file__)
logging.info("Arguments: {a}".format(a=args))
cmd = os.path.join(cfg["Graph"]["path"], 'bin', cfg["Graph"]["neo4j"])
cmdline = [cmd, args.action, "-verbose"]

module = my_env.get_modulename(__file__)
sof = os.path.join(
    cfg["Main"]["logdir"], "{mod}_{action}_out.log".format(mod=module,
                                                           action=args.action))
sef = os.path.join(
    cfg["Main"]["logdir"], "{mod}_{action}_err.log".format(mod=module,
                                                           action=args.action))
so = open(sof, "w")
se = open(sef, "w")
logging.info("Command: {c}".format(c=args))
try:
Ejemplo n.º 17
0
    acc_query = f"""
                SELECT accounts.guid as guid, name, account_type, cusip, placeholder, code, commodity_guid,
                       mnemonic, namespace
                FROM accounts 
                LEFT JOIN commodities on commodities.guid=commodity_guid
                WHERE parent_guid='{acc_guid}' 
                AND hidden=0
                """
    acc_res = gnudb.get_query(acc_query)
    for acc_row in acc_res:
        handle_account(acc_row, bank_id, parent_id)


# Initialize Environment
projectname = "gnucash"
config = my_env.init_env(projectname, __file__)
logging.info("Start application")

gnudb = info_layer.DirectConn(os.getenv('GNUDIR'), os.getenv('GNUNAME'))
session = info_layer.init_session(os.getenv('ACCOUNTDIR'), os.getenv('ACCOUNTNAME'))
cats = {}
groups = {}

# First populate categories table and remember nid in cats dictionary.
query = "SELECT DISTINCT account_type FROM accounts"
res = gnudb.get_query(query)
for row in res:
    cat = row['account_type']
    if cat in ['STOCK', 'MUTUAL']:
        cat_type = 'stock'
    else:
Ejemplo n.º 18
0
        # SW Product Node does not exist, create one
        sw_prod_node = create_sw_product(f_row)
        # And remember SW Product for following iterations
        sw_prod_nodes[sw_product] = sw_prod_node
    # Remove attributes that are attached to Product, Producent, Version from SW Product
    # Attributes: UITDOVEND_DATUM, UITGEDOOFD_DATUM, PRODUCT, PRODUCENT, VERSIE
    f_row['UITDOVEND_DATUM'] = ''
    f_row['UITGEDOOFD_DATUM'] = ''
    for k in ['VERSIE', 'PRODUCENT', 'PRODUCT']:
        if k in f_row:
            del f_row[k]
    return sw_prod_node, f_row


if __name__ == "__main__":
    cfg = my_env.init_env("cmdb", __file__)
    # Get Neo4J Connetion and clean Database
    ns = NeoStore(cfg, refresh="Yes")
    # Get DataStore object
    ds = DataStore(cfg)
    # Initialize ci_type translation
    ci_type = ds.get_ci_type_translation()
    # Then get all Component rows
    rows = ds.get_components()
    node_obj = {}
    loc_obj = {}
    node_info = my_env.LoopInfo("Nodes", 100)
    for row in rows:
        del row["ID"]
        node_label, row = get_component_type(row, ci_type)