def session(request): cfg = { 'db_uri': request.param['uri'], 'schema': SCHEMA, } is_sqlite = request.param['uri'].startswith('sqlite') use_schema = '#' in request.param['uri'] # DB cleanup if is_sqlite and os.path.isfile('test.db'): os.unlink('test.db') else: with connect(cfg): to_clean = [t['table'] for t in SCHEMA] + ['tmp', 'sponsor'] for table in to_clean: if use_schema: table = 'test_schema.' + table qr = 'DROP TABLE IF EXISTS %s' % table if not is_sqlite: qr += ' CASCADE' execute(qr) # Create tables with connect(cfg): create_tables() if request.param['auto']: cfg.pop('schema') with connect(cfg, _auto_rollback=True): View('team', ['name', 'country.name']).write(teams) yield request.param['uri']
def main(): # Install plugins parser = argparse.ArgumentParser() parser.add_argument('action', help='run | init') parser.add_argument('--db', '-d', help='Database uri') parser.add_argument('--schema', '-s', help='Tanker Schema') parser.add_argument('--server', '-S', help='Wsgi server to use', default='wsgiref') parser.add_argument('--debug', '-D', action='store_true', help='Enable debug mode') cli = parser.parse_args() cfg = { 'db_uri': cli.db, 'schema': cli.schema, } install(TankerPlugin(cfg)) app = default_app() if cli.action == 'run': if cli.debug: app.add_hook('after_request', log) logger.setLevel('DEBUG') app.run(host='localhost', port=8080, server=cli.server, debug=cli.debug) elif cli.action == 'init': with connect(cfg): create_tables()
def test_mixed(session): view = View('country', ['name']) view.write([('Italy', )]) countries = [c for c, in view.read()] in_q = Queue() out_q = Queue() # Needed because table creation and content is not committed yet ctx.connection.commit() # We re-use the current config to create a nested context with connect(ctx.cfg): t = TankerThread(target=read, args=(in_q, out_q)) t.start() res = [] for _ in countries: in_q.put('tic') res.append(out_q.get()[1]) # Release thread loop & wait for it in_q.put('tic') t.join() assert 'Italy' in res assert res == countries
def test_manual_conn(session): country_view = View('country', ['name']) res = country_view.read({'name': 'Prussia'}).one() assert res is None # Needed to not lock other connections ctx.connection.commit() # Manually start and stop of the connection cfg = {'db_uri': session, 'schema': SCHEMA} connect(cfg, 'enter') country_view.write([['Prussia']]) connect(cfg, 'leave') # Makes sure result is not lost with connect(cfg): assert country_view.read({'name': 'Prussia'}).one()[0] == 'Prussia'
def write_pg(df): tables = [{ "table": "test", "columns": { "timestamp": "timestamp", "value": "float" }, "key": ["timestamp"], "use-index": "brin", }] cfg = { "db_uri": "postgresql:///test", "schema": tables, } with connect(cfg): create_tables() with connect(cfg): View("test").write(df)
def read(in_q, out_q, cfg): with connect(cfg): t_id = current_thread().ident countries = View('country').read() while True: in_q.get() in_q.task_done() c = countries.one() if c is None: break out_q.put((t_id, c[0]))
def test_nested_read(session): # Needed because table creation and content is not committed yet ctx.connection.commit() # Start read from parent ctx cursor = View('country').read() first = cursor.one() # We re-use the current config to create a nested context with connect(ctx.cfg): nested_res = View('country').read().all() res = [first] + list(cursor) assert res == nested_res
def test_read_thread(session): ''' Test a situation where threads are created outside of any active context (hence dry). ''' cfg = {'db_uri': session, 'schema': SCHEMA} with connect(cfg): create_tables() countries = View('country').read().all() nb_cty = len(countries) assert nb_cty > 2 read_threads = [] out_q = Queue() in_queues = [] for i in range(NB_THREADS): in_q = Queue(maxsize=1) in_queues.append(in_q) t = Thread(target=read, args=(in_q, out_q, cfg)) t.start() read_threads.append(t) # Launch metronome to feed input lists metro_thread = Thread(target=metronome, args=(in_queues, nb_cty)) metro_thread.start() # Loop on results is_full = lambda x : len(x) == nb_cty per_thread = defaultdict(list) while True: t_id, c = out_q.get() out_q.task_done() per_thread[t_id].append(c) if all(map(is_full, per_thread.values())): break # Join everything metro_thread.join() for t in read_threads: t.join()
def wrap(*args, **kwargs): with connect(self.cfg): return callback(*args, **kwargs)
def session(request): cfg = {'db_uri': request.param['uri'], 'schema': SCHEMA} with connect(cfg): create_tables() yield request.param['uri']
from datetime import datetime from tanker import connect, View, yaml_load, create_tables schema = ''' - table: test columns: name: varchar ts: timestamp code: integer value: float index: - name ''' cfg = {'schema': yaml_load(schema)} ts = datetime.now() data = [(str(i), ts, 1, 1) for i in range(100000)] with connect(cfg): create_tables() v = View('test') v.write(data)