async def main(): try: db.create_tables() await db.setup() await scrape() finally: await db.teardown()
def old_initialize(): engine, session = db.create("sqlite:///db.sqlite3") db.create_tables(engine) p_snps = 0 p_pubs = 0 snps = get_complete_rsids() for s in snps: time.sleep(.1) pubs = get_pmids(s) if len(pubs) > 0: p_snps = p_snps + 1 print("Processed {} snps".format(p_snps)) for p in pubs: if not db.check_snp(session=session, id=s, pub=p): db.add_snp(session, s, p) if not db.check_publication(session=session, id=p): info = get_publication(p) db.add_publication(session, id=p, title=info["title"], abstract=info["abstract"]) p_pubs = p_pubs + 1 print("Processed {} pubs".format(p_pubs)) db.close(session) return ()
def main(): parser = argparse.ArgumentParser(description="Retrieve freebox statistics") parser.add_argument( "-d", "--database", help="Sqlite3 database where to store the statistics (default: %s)" % config.database) parser.add_argument( "-u", "--url", help="URL where the statistics are retrieved (default: %s)" % config.statsurl) args = parser.parse_args() if args.database is not None: config.database = args.database if args.url is not None: config.statsurl = args.url cnx = db.new_connection() cur = cnx.cursor() db.create_tables(cur) p = StatsPage() store_stats(p, cur) store_logs(p, cur) store_netlinks(p, cur) cur.execute("PRAGMA optimize") cnx.commit()
def create_tables(): try: db.create_tables() flash('Tables created', 'success') except Exception: flash('Tables creation failed', 'danger') return redirect(url_for('home'))
def init_db(self, args): database_uri = self._config['database'] sql_echo = self._config.get('sql_echo', True) if not database_exists(database_uri): LOG.info("Initialize the database: %s", database_uri) engine = create_engine(database_uri, echo=sql_echo) create_tables(engine) else: LOG.warning(f"Database {database_uri} already exists!")
def setUp(self): self.app = create_app(config_name='testing') self.daoManager = DAOManagerSqlite() create_tables(self.daoManager.conecction.cursor(), 'app/schema.sql') for i in range(10): user = User('Alvaro{}'.format(i), 'Niño', '16/12/19', '*****@*****.**') self.daoManager.do(DAOManager.USER, DAOManager.CREATE, user) self.daoManager.commit()
def build_database(): logging.info("Build database\n") db.create_tables() db.insert_users() db.insert_seenits() db.insert_posts() db.insert_comments() db.insert_post_upvote() db.insert_comment_upvote()
def main(): db.create_tables() updater = Updater(token=BOT_API, use_context=True) updater.dispatcher.add_handler( MessageHandler(filters=Filters.all, callback=new_event_button_handler)) updater.start_polling() updater.idle()
def write_data(): table_name = 'data' db.create_tables(table_name, 0) data = gen_data() str_data = '' for i in data: str_data += '(' + str(i[0]) + ',\'' + i[1] + '\',' + str( i[2]) + ',' + str(i[3]) + '),' db.insert_data(table_name, str_data[:-1])
def build_database(): db.create_tables() db.insert_users() db.insert_seenits() db.insert_posts() db.insert_comments() db.insert_post_upvote() db.show_seenits() db.show_posts() db.show_comments() db.show_post_upvotes()
def main(): db.create_tables() members = get_users() db.members_insert(members) member_communities = get_communities(members) db.member_community_insert(member_communities) vsu_group = get_vsu_group() db.vsu_community_insert(vsu_group) users = db.select_users_ids() posts = get_vsu_posts() activities = get_activity(posts, users) db.insert_activities(activities)
def main(): if config.env <> 'VMware': cmd = 'bash /etc/rc.key' os.system(cmd) cmd = 'rm -rf /dev/zstor' os.system(cmd) cmd = 'rm -rf /home/zonion/.llog' os.system(cmd) cmd = 'gpg --ignore-time-conflict --homedir /home/gpg --verify /home/zonion/license/key.sig > /tmp/key 2>&1' _, o = execute(cmd, False, logging=False) cmd = 'cat /tmp/key' _, o = execute(cmd, False) m = re.search('Good', o) if not m: cmd = 'touch /home/zonion/.llog' _, o = execute(cmd, False) print 'nokey' cmd = 'cat /home/zonion/license/key' _, o = execute(cmd, False, logging=False) sn = util.get_sn() if sn != o.strip(): cmd = 'touch /home/zonion/.llog' _, o = execute(cmd, False) print 'nokey' cmd = 'rm -rf /tmp/key' os.system(cmd) cmd = 'rm /home/zonion/bitmap.disk/*.bitmap -rf' execute(cmd, False) try: db.create_tables() except Exception as e: log.error(caused(e).detail) raise error.InternalError() if os.path.exists(config.boot.startup_file): log.journal_warning('System may not be poweroffed normally!') if 'monfs' not in config.feature: cmd = 'bash /home/zonion/command/ipsan.sh' os.system(cmd) is_die = os.path.exists('/home/zonion/.llog') if is_die: log.info('NO license.......') return cxt = api.APIContext() cxt.scan_all() log.info('System startup.')
def build_database(): f.writing("Build database\n") db.create_tables() db.insert_users() db.insert_seenits() db.insert_posts() db.insert_comments() db.insert_post_upvote() db.show_seenits() db.show_posts() db.show_comments() db.show_post_upvotes()
def menu(): load_dotenv() db_uri = os.environ["DATABASE_URI"] connection = psycopg2.connect(db_uri) db.create_tables(connection) while (selection := input(MENU_PROMPT)) != "6": try: MENU_OPTIONS[selection](connection) except KeyError: print("\nInvalid option, try again.")
async def main(): parser = argparse.ArgumentParser() parser.add_argument("--stations", type=str) parser.add_argument("--lines", type=str) args = parser.parse_args() db.create_tables() if args.stations: await import_stations(args.stations) if args.lines: await import_lines(args.lines)
async def _main(self): parser = argparse.ArgumentParser() parser.add_argument("--checkpoint", type=str) parser.add_argument("--limit", type=int) parser.add_argument("--job-name", type=str) args = parser.parse_args() db.create_tables() # For batch jobs, use SQLAlchemy directly instead of aiopg. aiopg does # not support server-side cursors and we need those to iterate through # the DB. engine = db.get_sa_engine() checkpoint = None if args.checkpoint: checkpoint = self.CheckpointCls.loads(args.checkpoint) elif args.job_name: checkpoint = self.read_checkpoint(engine, args.job_name) with engine.connect() as conn: query = self.get_query(checkpoint, args.limit) res = conn.execution_options(stream_results=True).execute(query) rowcount = 0 while True: rows = res.fetchmany(BATCH_SIZE) if len(rows) == 0: # results are exhausted break logging.debug("Processing batch of %d rows", len(rows)) if len(self.groups) > 0: await self.process_as_groups(rows) else: await self.process_as_chunks(rows) rowcount += len(rows) checkpoint_data = {col: rows[-1][col] for col in self.cols} checkpoint = self.CheckpointCls(checkpoint_data) logging.info( "Processed %d rows, ending at %s", rowcount, checkpoint, ) logging.info("Checkpoint: %s", checkpoint.dumps()) if args.job_name: self.write_checkpoint(engine, args.job_name, checkpoint)
def main(): db.create_tables(Project) description_string = ( 'Manage development environments on AWS: ' 'https://github.com/looneym/sobotka/blob/master/docs/usage.md') parser = argparse.ArgumentParser(description=description_string) parser.add_argument('action') parser.add_argument('command', nargs='?') parser.add_argument("-o", "--overwrite", help="Overwrite existing key pair", action="store_true") args = parser.parse_args() if args.action == "init": if helpers.has_sudo(): create_project() elif args.action == "list": Project.list_all() elif args.action == "info": print_info() elif args.action == "ssh": ssh() elif args.action == "exec": execute_command(args.command) elif args.action == "push": push() elif args.action == "bootstrap": bootstrap() elif args.action == "run": run() elif args.action == "stop": stop() elif args.action == "destroy": if helpers.has_sudo(): destroy_project() elif args.action == "watch": watch_directory() elif args.action == "logs": get_logs() elif args.action == "key": helpers.create_key_pair(args.overwrite) else: print("Please specify an action")
def main(): if len(sys.argv) != 2: print("usage: %s datadir" % sys.argv[0], file=sys.stderr) return datadir = sys.argv[1] cnx = db.new_connection() cur = cnx.cursor() db.create_tables(cur) for f in sorted(glob.glob(datadir + "/*.csv")): import_data(cur, f) cnx.commit() cur.execute("ANALYZE") cnx.commit()
def test_db_and_wallet(): # create a new local datastore db.create_tables() connection, cursor = db.connect() # create and insert a new wallet (s, v) = wallet.new() db.insert_wallet(s) # mint some coins and verify it has them db.mint_coins(s, 100) print(db.select_wallet(s)) # create and insert a new wallet and verify it has no coins (s2, v2) = wallet.new() db.insert_wallet(s2) print(db.select_wallet(s2))
def shared_setup(): """Set up the file system, GPG, and database""" # Create directories for the file store and the GPG keyring for d in (config.SECUREDROP_ROOT, config.STORE_DIR, config.GPG_KEY_DIR): if not os.path.isdir(d): os.mkdir(d) # Initialize the GPG keyring gpg = gnupg.GPG(gnupghome=config.GPG_KEY_DIR) # Import the journalist key for testing (faster to import a pre-generated # key than to gen a new one every time) for keyfile in ("test_journalist_key.pub", "test_journalist_key.sec"): gpg.import_keys(open(keyfile).read()) # Inititalize the test database import db; db.create_tables() # Do tests that should always run on app startup crypto_util.do_runtime_tests()
def run(self): self.get_user_name() db.create_tables() write_msg(self.user_id, INPUT_TOWN_MESSAGE) for new_event in longpoll.listen(): if new_event.type == VkEventType.MESSAGE_NEW and new_event.to_me: if self.get_city(new_event.message) == 0: write_msg(self.user_id, UNKNOWN_TOWN_MESSAGE) else: self.get_age_range() self.get_sex() self.user = db.User(vk_id=self.user_id, first_name=self.first_name, last_name=self.last_name, range_age=self.age_range, city=self.city) db.add_user(self.user) self.find_dating_user() self.get_top_photos() write_msg(self.user_id, f'Имя: {self.first_name}\n' f'Фамилия: {self.last_name}\nСсылка: @id{self.dating_user_id}', self.top_photos) return self.find_love()
async def main(): routes_for_shape_id = await get_routes_for_shape_id() stops = get_stops() paths = load_paths() additional_paths = load_additional_paths(stops) for (k, v) in additional_paths.items(): assert k not in paths paths[k] = v logging.info("%d distinct shapes", len(paths)) segments = [ RouteSegment(points=path, routes=routes_for_shape_id.get(shape_id, set())) for (shape_id, path) in paths.items() ] graph = make_graph(segments, stops) graph.dedupe() node_count = len(graph.nodes) edge_count = len(graph.edges) # Second dedupe should do nothing if implementation is correct graph.dedupe() assert len(graph.nodes) == node_count assert len(graph.edges) == edge_count graph.coalesce() node_count = len(graph.nodes) edge_count = len(graph.edges) # Second coalesce should do nothing if implementation is correct graph.coalesce() assert len(graph.nodes) == node_count assert len(graph.edges) == edge_count for node in graph.nodes.values(): if len(node.stop_ids) > 0 and len(node.edges) == 0: logging.warning( "Stop IDs %s (%s) disconnected from graph", node.stop_ids, node.point ) db.create_tables() await graph.write(TRANSIT_SYSTEM) await write_trip_paths(routes_for_shape_id, paths)
async def main(): # To run this script, comment out this line. If importing MTA data, # you need to drop the mta_trip_id table, since it does not have a # primary key. Ideally we'd also get some on_conflict_do_update # up in this file if we are updating any existing data with new data. raise Exception("This is not safe to run because it will mess up existing jobs.") db.create_tables() transit_system = gtfs.TransitSystem.NYC_MTA base_dir = sys.argv[1] # This does not work anymore because we have other tables with foreign # keys. # await delete_tables(transit_system) for spec in IMPORT_SPECS: csv_path = os.path.join(base_dir, spec.csv) if os.path.exists(csv_path): await import_generic( transit_system, csv_path, db.get_table(spec.table), spec, )
def shared_setup(): """Set up the file system, GPG, and database""" # Create directories for the file store and the GPG keyring for d in (config.SECUREDROP_ROOT, config.STORE_DIR, config.GPG_KEY_DIR): if not os.path.isdir(d): os.mkdir(d) # Initialize the GPG keyring gpg = gnupg.GPG(gnupghome=config.GPG_KEY_DIR) # Import the journalist key for testing (faster to import a pre-generated # key than to gen a new one every time) for keyfile in ("test_journalist_key.pub", "test_journalist_key.sec"): gpg.import_keys(open(keyfile).read()) # Inititalize the test database import db db.create_tables() # Do tests that should always run on app startup crypto_util.do_runtime_tests()
async def init(loop): app = Application(loop=loop) app['sockets'] = [] app.router.add_route('GET', '/', wshandler) app.router.add_route('POST', '/endpoint', endpointhandler) app.router.add_static('/static', './static') dsn = 'dbname=aiopg_db user=aiopg_user password=password host=127.0.0.1' pool = await aiopg.create_pool(dsn, cursor_factory=psycopg2.extras.RealDictCursor, echo=True) app['db_pool'] = pool asyncio.ensure_future(db.create_tables(pool)) handler = app.make_handler() srv = await loop.create_server(handler, '127.0.0.1', 8080) print("Server started at http://127.0.0.1:8080") return app, srv, handler
def test_00_create_db(self): """create-drop tables.test""" try: delete_tables() except: pass #first execution table_names = create_tables() sql = [] sql.append("SELECT table_schema, table_name") sql.append("FROM information_schema.tables") sql.append("WHERE (table_schema = 'public')") sql.append("ORDER BY table_schema, table_name") connection = DataObject.db_connect() with connection.cursor() as cursor: cursor.execute(" ".join(sql)) list_tables = cursor.fetchall() for _schema, table_name in list_tables: table_names.remove(table_name) self.assertEqual(table_names, [])
import db import json import pandas as pd import sqlite3 from pprint import pprint db_path = "db/test.sqlite3" con = db.connect(db_path) db.create_tables(con) # show_tables(con) tableA = json.load(open('db/tableA.json')) db.insert(con, "tableA",tableA) tableB = json.load(open('db/tableB.json')) db.insert(con, "tableB",tableB) dfA = pd.read_sql_query("select * from tableA limit 10;", con) dfB = pd.read_sql_query("select * from tableB limit 10;", con) ### to debug # print(dfA.dtypes) # print(dfB.dtypes) tableA_B = dfA.join(dfB,on='tac',how="left", lsuffix='_caller', rsuffix='_other') tableA_B.to_csv("tableA_B.csv")
def setUp(self): create_tables()
from db import create_tables, master_password from menu import create_password, menu, show_password create_tables() if not master_password(): exit() choice = menu() while choice.lower() != "q": if choice == "1": create_password() if choice == "2": show_password() choice = menu() exit()
def create_tables(): db.create_tables()
def delete_all(self): """ Delete all entries from the local database table and also any external stores """ # Delete from the local database db.session.query(LandmarkModel).delete() db.session.commit() # Delete hash file try: os.unlink("landmarkdb.mat") except OSError: pass q = queue.FpQueue("ingest_landmark") q.clear_queue() fingerprint.fingerprint_index["landmark"] = { "dbmodel": LandmarkModel, "instance": Landmark } db.create_tables() def stats(): q = queue.FpQueue("ingest_landmark") print "Ingest queue size: %s" % q.size() if __name__ == "__main__": stats()
# coding: utf-8 import os from flask import Flask, g from flask.ext.cors import CORS app = Flask('server') CORS(app) from routes import routes from db import create_tables create_tables() @app.teardown_appcontext def close_connection(exception): t = getattr(g, '_database', None) if t is not None: t.close() path = os.path.dirname(os.path.abspath(__file__)) path = os.path.join(path, 'static/index.html') index_html = open(path).read() @app.route('/') def root(): return index_html for url, v in routes.items(): func = v['func'] app.route(url)(func)
# embeds.exception_w_message(message) # elif args and type(args[0]) == discord.RawMessageUpdateEvent: # logging.error("After Content:{}.".format(args[0].data['content'])) # if args[0].cached_message is not None: # logging.error("Before Content:{}.".format(args[0].cached_message.content)) # Todo: Add more traceback_message = "```python\n{}```".format(traceback.format_exc()) traceback_message = ( traceback_message[:1993] + ' ...```') if len(traceback_message) > 2000 else traceback_message await error_log_channel.send(content=traceback_message, embed=embed) if __name__ == '__main__': with open('config.json') as json_data_file: config = json.load(json_data_file) log.info(f"Connecting to DB @: {config['db_uri']}") db_pool: asyncpg.pool.Pool = asyncio.get_event_loop().run_until_complete( db.create_db_pool(config['db_uri'])) asyncio.get_event_loop().run_until_complete(db.create_tables(db_pool)) bot.config = config bot.db_pool = db_pool bot.load_cogs() bot.run(config['token']) log.info("cleaning Up and shutting down")
def initDb(): db.drop_tables() db.create_tables()
def app_configure(loop): db_url = settings.DB_URL loop.run_until_complete(db.set_bind(db_url)) loop.run_until_complete(drop_tables(db)) loop.run_until_complete(create_tables(db)) yield
def __init__(self): engine = db.connect() engine.connect() db.create_tables(engine) self.SessionMaker = sessionmaker(bind = engine)