def __init__(self, rate=None, amortization_period=None, source=None, database=None, kwargs=None): self.amortization_period = coalesce(amortization_period, AMORTIZATION_PERIOD) self.rate = coalesce(rate, HG_REQUEST_PER_SECOND) self.cache_locker = Lock() self.cache = {} # MAP FROM url TO (ready, headers, response, timestamp) PAIR self.no_cache = {} # VERY SHORT TERM CACHE self.workers = [] self.todo = Queue(APP_NAME+" todo") self.requests = Queue(APP_NAME + " requests", max=int(self.rate * self.amortization_period.seconds)) self.url = URL(source.url) self.db = Sqlite(database) self.inbound_rate = RateLogger("Inbound") self.outbound_rate = RateLogger("hg.mo") if not self.db.query("SELECT name FROM sqlite_master WHERE type='table'").data: with self.db.transaction() as t: t.execute( "CREATE TABLE cache (" " path TEXT PRIMARY KEY, " " headers TEXT, " " response TEXT, " " timestamp REAL " ")" ) self.threads = [ Thread.run(APP_NAME+" worker" + text_type(i), self._worker) for i in range(CONCURRENCY) ] self.limiter = Thread.run(APP_NAME+" limiter", self._rate_limiter) self.cleaner = Thread.run(APP_NAME+" cleaner", self._cache_cleaner)
class Sql: def __init__(self, config): self.db = Sqlite(config) def execute(self, sql, params=None): Log.error("Use a transaction") def commit(self): Log.error("Use a transaction") def rollback(self): Log.error("Use a transaction") def get(self, sql, params=None): if params: for p in params: sql = sql.replace('?', quote_value(p), 1) return self.db.query(sql).data def get_one(self, sql, params=None): return self.get(sql, params)[0] def transaction(self): return Transaction(self.db.transaction()) @property def pending_transactions(self): """ :return: NUMBER OF TRANSACTIONS IN THE QUEUE """ return len(self.db.available_transactions)
def __init__(self, db, kwargs): if is_data(db): self.db = Sqlite(db) elif isinstance(db, Sqlite): self.db = db else: Log.error("Bad db parameter") if not self.db.about(PERMISSION_TABLE): self.setup() self.next_id = id_generator(self.db)
def _setup(): threads = Data() signals = Data() db = Sqlite() db.query("CREATE TABLE my_table (value TEXT)") for name in ["a", "b"]: signals[name] = [{"begin": Signal(), "done": Signal()} for _ in range(4)] threads[name] = Thread.run(name, _work, name, db, signals[name]) return db, threads, signals
def test_bad_exists_properties(self): test = { "data": [{ "~e~": 1 }, { "~e~": 1 }], "query": { "from": TEST_TABLE, "select": [{ "name": "count", "aggregate": "count" }], }, "expecting_list": { "meta": { "format": "value" }, "data": { "count": 2 } } } subtest = wrap(test) cont = self.utils.fill_container(subtest, typed=False) db = Sqlite(filename="metadata.localhost.sqlite") try: with db.transaction() as t: t.execute( "insert into " + quote_column("meta.columns") + "(name, es_type, jx_type, nested_path, es_column, es_index, last_updated) VALUES " + quote_set([ ".", "object", "exists", '["."]', ".", cont.alias, Date.now() ])) except Exception as e: pass try: with db.transaction() as t: t.execute( "insert into " + quote_column("meta.columns") + "(name, es_type, jx_type, nested_path, es_column, es_index, last_updated) VALUES " + quote_set([ "~e~", "long", "exists", '["."]', "~e~", cont.alias, Date.now() ])) except Exception as e: pass self.utils.send_queries(subtest)
def __init__(self, endpoint, push_queue=None, timeout=30, db=None, kwargs=None): self.enabled = True self.num_bad_requests = 0 self.endpoint = endpoint self.timeout = timeout self.push_queue = aws.Queue(push_queue) if push_queue else None self.config = kwargs self.db = Sqlite(filename=coalesce(db.filename, "tuid_client.sqlite"), kwargs=db) if not self.db.query("SELECT name FROM sqlite_master WHERE type='table';").data: with self.db.transaction() as transaction: self._setup(transaction)
def test_transactionqueries(): db = Sqlite() db.query("CREATE TABLE my_table (value TEXT)") with db.transaction() as t: t.execute("INSERT INTO my_table (value) VALUES ('a')") try: result1 = db.query("SELECT * FROM my_table") assert False except Exception as e: assert DOUBLE_TRANSACTION_ERROR in e result2 = t.query("SELECT * FROM my_table") assert result2.data[0][0] == 'a'
def _setup(): threads = Data() signals = Data() db = Sqlite() db.query("CREATE TABLE my_table (value TEXT)") for name in ["a", "b"]: signals[name] = [{ "begin": Signal(), "done": Signal() } for _ in range(4)] threads[name] = Thread.run(name, _work, name, db, signals[name]) return db, threads, signals
def test_nested_transaction2(): db = Sqlite() db.query("CREATE TABLE my_table (value TEXT)") with db.transaction() as t: with db.transaction() as t2: t2.execute("INSERT INTO my_table VALUES ('b')") result = t2.query("SELECT * FROM my_table") assert len(result.data) == 1 assert result.data[0][0] == 'b' t.execute("INSERT INTO my_table VALUES ('a')") _teardown(db, {})
def __init__(self, name, db=None, uid=UID, kwargs=None): """ :param name: NAME FOR THIS TABLE :param db: THE DB TO USE :param uid: THE UNIQUE INDEX FOR THIS TABLE :return: HANDLE FOR TABLE IN db """ global _config if db: self.db = db else: self.db = db = Sqlite() if not _config: # REGISTER sqlite AS THE DEFAULT CONTAINER TYPE from jx_base.container import config as _config if not _config.default: _config.default = {"type": "sqlite", "settings": {"db": db}} self.sf = Snowflake(fact=name, uid=uid, db=db) self._next_guid = generateGuid() self._next_uid = 1 self._make_digits_table() self.uid_accessor = jx.get(self.sf.uid)
def __init__(self, name, db=None, uid=UID, kwargs=None): """ :param name: NAME FOR THIS TABLE :param db: THE DB TO USE :param uid: THE UNIQUE INDEX FOR THIS TABLE :return: HANDLE FOR TABLE IN db """ global _config if isinstance(db, Sqlite): self.db = db else: self.db = db = Sqlite(db) if not _config: # REGISTER sqlite AS THE DEFAULT CONTAINER TYPE from jx_base.container import config as _config if not _config.default: _config.default = {"type": "sqlite", "settings": {"db": db}} ns = Namespace(db=db) self.facts = ns.create_or_replace_facts(fact_name=name) self._next_guid = generateGuid() self._next_uid = 1 self._make_digits_table() self.uid_accessor = jx.get(uid)
def __init__(self, flask_app, db, cookie, table="sessions"): global SINGLTON if SINGLTON: Log.error("Can only handle one session manager at a time") SINGLTON = self if is_data(db): self.db = Sqlite(db) else: self.db = db self.table = table self.cookie = cookie self.cookie.max_lifetime = parse(self.cookie.max_lifetime) self.cookie.inactive_lifetime = parse(self.cookie.inactive_lifetime) if not self.db.about(self.table): self.setup() Thread.run("session monitor", self.monitor)
class Sql: def __init__(self, config): self.db = Sqlite(config) def execute(self, sql, params=None): Log.error("Use a transaction") def commit(self): Log.error("Use a transaction") def rollback(self): Log.error("Use a transaction") def get(self, sql, params=None): if params: for p in params: sql = sql.replace('?', quote_value(p), 1) return self.db.query(sql).data def get_one(self, sql, params=None): return self.get(sql, params)[0] def transaction(self): return Transaction(self.db.transaction())
def __init__(self, flask_app, auth0, permissions, session_manager, device=None): if not auth0.domain: Log.error("expecting auth0 configuration") self.auth0 = auth0 self.permissions = permissions self.session_manager = session_manager # ATTACH ENDPOINTS TO FLASK APP endpoints = auth0.endpoints if not endpoints.login or not endpoints.logout or not endpoints.keep_alive: Log.error("Expecting paths for login, logout and keep_alive") add_flask_rule(flask_app, endpoints.login, self.login) add_flask_rule(flask_app, endpoints.logout, self.logout) add_flask_rule(flask_app, endpoints.keep_alive, self.keep_alive) if device: self.device = device db = self.device.db = Sqlite(device.db) if not db.about("device"): with db.transaction() as t: t.execute( sql_create( "device", { "state": "TEXT PRIMARY KEY", "session_id": "TEXT" }, )) if device.auth0.redirect_uri != text_type( URL(device.home, path=device.endpoints.callback)): Log.error( "expecting home+endpoints.callback == auth0.redirect_uri") add_flask_rule(flask_app, device.endpoints.register, self.device_register) add_flask_rule(flask_app, device.endpoints.status, self.device_status) add_flask_rule(flask_app, device.endpoints.login, self.device_login) add_flask_rule(flask_app, device.endpoints.callback, self.device_callback)
def __init__(self, db=None): global _config if isinstance(db, Sqlite): self.db = db else: self.db = db = Sqlite(db) self.db.create_new_functions() # creating new functions: regexp if not _config: # REGISTER sqlite AS THE DEFAULT CONTAINER TYPE from jx_base.container import config as _config if not _config.default: _config.default = {"type": "sqlite", "settings": {"db": db}} self.setup() self.ns = Namespace(db=db) self.about = QueryTable("meta.about", self) self.next_uid = ( self._gen_ids().__next__ ) # A DELIGHTFUL SOURCE OF UNIQUE INTEGERS
class TuidClient(object): @override def __init__(self, endpoint, push_queue=None, timeout=30, db=None, kwargs=None): self.enabled = True self.num_bad_requests = 0 self.endpoint = endpoint self.timeout = timeout self.push_queue = aws.Queue(push_queue) if push_queue else None self.config = kwargs self.db = Sqlite(filename=coalesce(db.filename, "tuid_client.sqlite"), kwargs=db) if not self.db.query( "SELECT name FROM sqlite_master WHERE type='table';").data: with self.db.transaction() as transaction: self._setup(transaction) def _setup(self, transaction): transaction.execute(""" CREATE TABLE tuid ( revision CHAR(12), file TEXT, tuids TEXT, PRIMARY KEY(revision, file) ) """) def get_tuid(self, branch, revision, file): """ :param branch: BRANCH TO FIND THE REVISION/FILE :param revision: THE REVISION NUNMBER :param file: THE FULL PATH TO A SINGLE FILE :return: A LIST OF TUIDS """ service_response = wrap(self.get_tuids(branch, revision, [file])) for f, t in service_response.items(): return t def get_tuids(self, branch, revision, files): """ GET TUIDS FROM ENDPOINT, AND STORE IN DB :param branch: BRANCH TO FIND THE REVISION/FILE :param revision: THE REVISION NUNMBER :param files: THE FULL PATHS TO THE FILES :return: MAP FROM FILENAME TO TUID LIST """ # SCRUB INPUTS revision = revision[:12] files = [file.lstrip('/') for file in files] with Timer( "ask tuid service for {{num}} files at {{revision|left(12)}}", { "num": len(files), "revision": revision }, silent=not self.enabled): response = self.db.query( "SELECT file, tuids FROM tuid WHERE revision=" + quote_value(revision) + " AND file IN " + quote_list(files)) found = {file: json2value(tuids) for file, tuids in response.data} try: remaining = set(files) - set(found.keys()) new_response = None if remaining: request = wrap({ "from": "files", "where": { "and": [{ "eq": { "revision": revision } }, { "in": { "path": remaining } }, { "eq": { "branch": branch } }] }, "branch": branch, "meta": { "format": "list", "request_time": Date.now() } }) if self.push_queue is not None: if DEBUG: Log.note( "record tuid request to SQS: {{timestamp}}", timestamp=request.meta.request_time) self.push_queue.add(request) else: if DEBUG: Log.note("no recorded tuid request") if not self.enabled: return found new_response = http.post_json(self.endpoint, json=request, timeout=self.timeout) with self.db.transaction() as transaction: command = "INSERT INTO tuid (revision, file, tuids) VALUES " + sql_list( quote_list((revision, r.path, value2json(r.tuids))) for r in new_response.data if r.tuids != None) if not command.endswith(" VALUES "): transaction.execute(command) self.num_bad_requests = 0 found.update( {r.path: r.tuids for r in new_response.data} if new_response else {}) return found except Exception as e: self.num_bad_requests += 1 Till(seconds=SLEEP_ON_ERROR).wait() if self.enabled and self.num_bad_requests >= 3: self.enabled = False Log.error("TUID service has problems.", cause=e) return found
class Permissions: @override def __init__(self, db, kwargs): if is_data(db): self.db = Sqlite(db) elif isinstance(db, Sqlite): self.db = db else: Log.error("Bad db parameter") if not self.db.about(PERMISSION_TABLE): self.setup() self.next_id = id_generator(self.db) def setup(self): with self.db.transaction() as t: t.execute(sql_create(VERSION_TABLE, {"version": "TEXT"})) t.execute(sql_insert(VERSION_TABLE, {"version": "1.0"})) t.execute( sql_create( GROUP_TABLE, { "_id": "LONG PRIMARY KEY", "name": "TEXT", "group": "TEXT", "email": "TEXT", "issuer": "TEXT", "email_verified": "INTEGER", "description": "TEXT", "owner": "LONG", }, )) t.execute( sql_insert( GROUP_TABLE, [ { "_id": 1, "name": "root", "email": "*****@*****.**", "description": "access for security system", }, { "_id": 11, "group": "public", "description": "everyone with confirmed email", "owner": 1, }, { "_id": 12, "group": "mozillians", "description": "people that mozilla authentication has recongized as mozillian", "owner": 1, }, { "_id": 13, "group": "moz-employee", "description": "people that mozilla authentication has recongized as employee", "owner": 1, }, ], )) t.execute( sql_create( RESOURCE_TABLE, { "_id": "LONG PRIMARY KEY", "table": "TEXT", "operation": "TEXT", "owner": "LONG", }, )) t.execute( sql_insert( RESOURCE_TABLE, [ CREATE_TABLE, { "_id": 101, "table": ".", "operation": "update", "owner": 1 }, { "_id": 102, "table": ".", "operation": "from", "owner": 1 }, ], )) t.execute( sql_create( PERMISSION_TABLE, { "user": "******", "resource": "LONG", "owner": "LONG" }, )) t.execute( sql_insert( PERMISSION_TABLE, [ { "user": 12, "resource": 11, "owner": 1 }, { "user": 13, "resource": 11, "owner": 1 }, { "user": 13, "resource": 12, "owner": 1 }, { "user": 1, "resource": 100, "owner": 1 }, { "user": 1, "resource": 101, "owner": 1 }, { "user": 1, "resource": 102, "owner": 1 }, ], )) def create_table_resource(self, table_name, owner): """ CREATE A TABLE, CREATE RESOURCES FOR OPERATIONS, ENSURE CREATOR HAS CONTROL OVER TABLE :param table_name: Create resources for given table :param owner: assign this user as owner :return: """ new_resources = wrap([{ "table": table_name, "operation": op, "owner": 1 } for op in TABLE_OPERATIONS]) self._insert(RESOURCE_TABLE, new_resources) with self.db.transaction() as t: t.execute( sql_insert(PERMISSION_TABLE, [{ "user": owner._id, "resource": r._id, "owner": ROOT_USER._id } for r in new_resources])) def get_or_create_user(self, details): details = wrap(details) issuer = details.sub or details.issuer email = details.email email_verified = details.email_verified if not email: Log.error("Expecting id_token to have claims.email propert") result = self.db.query( sql_query({ "select": ["_id", "email", "issuer"], "from": GROUP_TABLE, "where": { "eq": { "email": email, "issuer": issuer } }, })) if result.data: user = Data(zip(result.header, first(result.data))) user.email_verified = email_verified return user new_user = wrap({ "email": email, "issuer": issuer, "email_verified": email_verified, "owner": ROOT_USER._id }) self._insert(GROUP_TABLE, new_user) return new_user def get_resource(self, table, operation): result = self.db.query( sql_query({ "select": "_id", "from": RESOURCE_TABLE, "where": { "eq": { "table": table, "operation": operation } }, })) if not result.data: Log.error("Expecting to find a resource") return Data(zip(result.header, first(result.data))) def add_permission(self, user, resource, owner): """ :param user: :param resource: :param owner: :return: """ user = wrap(user) resource = wrap(resource) owner = wrap(owner) # DOES owner HAVE ACCESS TO resource? if not self.verify_allowance(owner, resource): Log.error("not allowed to assign resource") # DOES THIS PERMISSION EXIST ALREADY allowance = self.verify_allowance(user, resource) if allowance: if any(r.owner == owner for r in allowance): Log.error("already allowed via {{allowance}}", allowance=allowance) # ALREADY ALLOWED, BUT MULTIPLE PATHS MAY BE OK with self.db.transaction() as t: t.execute( sql_insert(PERMISSION_TABLE, { "user": user._id, "resource": resource._id, "owner": owner._id })) def verify_allowance(self, user, resource): """ VERIFY IF user CAN ACCESS resource :param user: :param resource: :return: ALLOWANCE CHAIN """ user = wrap(user) resource = wrap(resource) resources = self.db.query( sql_query({ "select": ["resource", "owner"], "from": PERMISSION_TABLE, "where": { "eq": { "user": user._id } }, })) for r in resources.data: record = Data(zip(resources.header, r)) if record.resource == resource._id: if record.owner == ROOT_USER._id: return FlatList(vals=[{ "resource": resource, "user": user, "owner": ROOT_USER }]) else: cascade = self.verify_allowance( wrap({"_id": record.owner}), resource) if cascade: cascade.append({ "resource": resource, "user": user, "owner": record.owner }) return cascade else: group = record.resource cascade = self.verify_allowance(wrap({"_id": group}), resource) if cascade: cascade.append({ "group": group, "user": user, "owner": record.owner }) return cascade return [] def find_resource(self, table, operation): result = self.db.query( sql_query({ "from": RESOURCE_TABLE, "where": { "eq": { "table": table, "operation": operation } } })) return first(Data(zip(result.header, r)) for r in result.data) def _insert(self, table, records): records = listwrap(records) keys = {"_id"} for r in records: keys.update(r.keys()) if r._id == None: r._id = self.next_id() with self.db.transaction() as t: t.execute(sql_insert(table, records))
class TuidClient(object): @override def __init__(self, endpoint, push_queue=None, timeout=30, db=None, kwargs=None): self.enabled = True self.num_bad_requests = 0 self.endpoint = endpoint self.timeout = timeout self.push_queue = aws.Queue(push_queue) if push_queue else None self.config = kwargs self.db = Sqlite(filename=coalesce(db.filename, "tuid_client.sqlite"), kwargs=db) if not self.db.query("SELECT name FROM sqlite_master WHERE type='table';").data: with self.db.transaction() as transaction: self._setup(transaction) def _setup(self, transaction): transaction.execute(""" CREATE TABLE tuid ( revision CHAR(12), file TEXT, tuids TEXT, PRIMARY KEY(revision, file) ) """) def get_tuid(self, branch, revision, file): """ :param branch: BRANCH TO FIND THE REVISION/FILE :param revision: THE REVISION NUNMBER :param file: THE FULL PATH TO A SINGLE FILE :return: A LIST OF TUIDS """ service_response = wrap(self.get_tuids(branch, revision, [file])) for f, t in service_response.items(): return t def get_tuids(self, branch, revision, files): """ GET TUIDS FROM ENDPOINT, AND STORE IN DB :param branch: BRANCH TO FIND THE REVISION/FILE :param revision: THE REVISION NUNMBER :param files: THE FULL PATHS TO THE FILES :return: MAP FROM FILENAME TO TUID LIST """ # SCRUB INPUTS revision = revision[:12] files = [file.lstrip('/') for file in files] with Timer( "ask tuid service for {{num}} files at {{revision|left(12)}}", {"num": len(files), "revision": revision}, silent=not self.enabled ): response = self.db.query( "SELECT file, tuids FROM tuid WHERE revision=" + quote_value(revision) + " AND file IN " + quote_list(files) ) found = {file: json2value(tuids) for file, tuids in response.data} try: remaining = set(files) - set(found.keys()) new_response = None if remaining: request = wrap({ "from": "files", "where": {"and": [ {"eq": {"revision": revision}}, {"in": {"path": remaining}}, {"eq": {"branch": branch}} ]}, "branch": branch, "meta": { "format": "list", "request_time": Date.now() } }) if self.push_queue is not None: if DEBUG: Log.note("record tuid request to SQS: {{timestamp}}", timestamp=request.meta.request_time) self.push_queue.add(request) else: if DEBUG: Log.note("no recorded tuid request") if not self.enabled: return found new_response = http.post_json( self.endpoint, json=request, timeout=self.timeout ) with self.db.transaction() as transaction: command = "INSERT INTO tuid (revision, file, tuids) VALUES " + sql_list( quote_list((revision, r.path, value2json(r.tuids))) for r in new_response.data if r.tuids != None ) if not command.endswith(" VALUES "): transaction.execute(command) self.num_bad_requests = 0 found.update({r.path: r.tuids for r in new_response.data} if new_response else {}) return found except Exception as e: self.num_bad_requests += 1 Till(seconds=SLEEP_ON_ERROR).wait() if self.enabled and self.num_bad_requests >= 3: self.enabled = False Log.error("TUID service has problems.", cause=e) return found
@verify_user def private_scoped(user): """A valid access token and an appropriate scope are required to access this route """ if requires_scope(config.auth0.scope): response = ( "Hello from a private endpoint! You need to be authenticated and have a scope of " + config.auth0.scope + " to see this." ) return jsonify(message=response) Log.error("You don't have access to {{scope}}", scope=config.auth0.scope, code=403) add_flask_rule(APP, "/api/public", public) add_flask_rule(APP, "/api/private", private) add_flask_rule(APP, "/api/private-scoped", private_scoped) config = startup.read_settings() constants.set(config.constants) Log.start(config.debug) session_manager = setup_flask_session(APP, config.session) perm = Permissions(Sqlite(config.permissions.store)) auth = Authenticator(APP, config.auth0, perm, session_manager) Log.note("start servers") setup_flask_ssl(APP, config.flask) APP.run(**config.flask)
def __init__(self, name, db=None, uid=UID_PREFIX + "id", exists=False): """ :param name: NAME FOR THIS TABLE :param db: THE DB TO USE :param uid: THE UNIQUE INDEX FOR THIS TABLE :return: HANDLE FOR TABLE IN db """ Container.__init__(self, frum=None) if db: self.db = db else: self.db = db = Sqlite() self.name = name self.uid = listwrap(uid) self.columns = {} for u in uid: if not self.columns.get(u, None): cs = self.columns[u] = set() if u.startswith(UID_PREFIX): cs.add( Column(name=u, table=name, type="integer", es_column=typed_column(u, "integer"), es_index=name)) else: cs.add( Column(name=u, table=name, type="text", es_column=typed_column(u, "text"), es_index=name)) self.uid_accessor = jx.get(self.uid) self.nested_tables = {} # MAP FROM TABLE NAME TO Table OBJECT if exists: # LOAD THE COLUMNS command = "PRAGMA table_info(" + quote_table(name) + ")" details = self.db.query(command) self.columns = {} for r in details: cname = untyped_column(r[1]) ctype = r[2].lower() column = Column(name=cname, table=name, type=ctype, es_column=typed_column(cname, ctype), es_index=name) cs = self.columns.get(name, Null) if not cs: cs = self.columns[name] = set() cs.add(column) else: command = "CREATE TABLE " + quote_table(name) + "(" + \ (",".join(_quote_column(c) + " " + c.type for u, cs in self.columns.items() for c in cs)) + \ ", PRIMARY KEY (" + \ (", ".join(_quote_column(c) for u in self.uid for c in self.columns[u])) + \ "))" self.db.execute(command)
class Cache(object): """ For Caching hg.mo requests """ @override def __init__(self, rate=None, amortization_period=None, source=None, database=None, kwargs=None): self.amortization_period = coalesce(amortization_period, AMORTIZATION_PERIOD) self.rate = coalesce(rate, HG_REQUEST_PER_SECOND) self.cache_locker = Lock() self.cache = {} # MAP FROM url TO (ready, headers, response, timestamp) PAIR self.no_cache = {} # VERY SHORT TERM CACHE self.workers = [] self.todo = Queue(APP_NAME+" todo") self.requests = Queue(APP_NAME + " requests", max=int(self.rate * self.amortization_period.seconds)) self.url = URL(source.url) self.db = Sqlite(database) self.inbound_rate = RateLogger("Inbound") self.outbound_rate = RateLogger("hg.mo") if not self.db.query("SELECT name FROM sqlite_master WHERE type='table'").data: with self.db.transaction() as t: t.execute( "CREATE TABLE cache (" " path TEXT PRIMARY KEY, " " headers TEXT, " " response TEXT, " " timestamp REAL " ")" ) self.threads = [ Thread.run(APP_NAME+" worker" + text_type(i), self._worker) for i in range(CONCURRENCY) ] self.limiter = Thread.run(APP_NAME+" limiter", self._rate_limiter) self.cleaner = Thread.run(APP_NAME+" cleaner", self._cache_cleaner) def _rate_limiter(self, please_stop): try: max_requests = self.requests.max recent_requests = [] while not please_stop: now = Date.now() too_old = now - self.amortization_period recent_requests = [t for t in recent_requests if t > too_old] num_recent = len(recent_requests) if num_recent >= max_requests: space_free_at = recent_requests[0] + self.amortization_period (please_stop | Till(till=space_free_at.unix)).wait() continue for _ in xrange(num_recent, max_requests): request = self.todo.pop() now = Date.now() recent_requests.append(now) self.requests.add(request) except Exception as e: Log.warning("failure", cause=e) def _cache_cleaner(self, please_stop): while not please_stop: now = Date.now() too_old = now-CACHE_RETENTION remove = set() with self.cache_locker: for path, (ready, headers, response, timestamp) in self.cache: if timestamp < too_old: remove.add(path) for r in remove: del self.cache[r] (please_stop | Till(seconds=CACHE_RETENTION.seconds / 2)).wait() def please_cache(self, path): """ :return: False if `path` is not to be cached """ if path.endswith("/tip"): return False if any(k in path for k in ["/json-annotate/", "/json-info/", "/json-log/", "/json-rev/", "/rev/", "/raw-rev/", "/raw-file/", "/json-pushes", "/pushloghtml", "/file/"]): return True return False def request(self, method, path, headers): now = Date.now() self.inbound_rate.add(now) ready = Signal(path) # TEST CACHE with self.cache_locker: pair = self.cache.get(path) if pair is None: self.cache[path] = (ready, None, None, now) if pair is not None: # REQUEST IS IN THE QUEUE ALREADY, WAIT ready, headers, response, then = pair if response is None: ready.wait() with self.cache_locker: ready, headers, response, timestamp = self.cache.get(path) with self.db.transaction() as t: t.execute("UPDATE cache SET timestamp=" + quote_value(now) + " WHERE path=" + quote_value(path) + " AND timestamp<" + quote_value(now)) return Response( response, status=200, headers=json.loads(headers) ) # TEST DB db_response = self.db.query("SELECT headers, response FROM cache WHERE path=" + quote_value(path)).data if db_response: headers, response = db_response[0] with self.db.transaction() as t: t.execute("UPDATE cache SET timestamp=" + quote_value(now) + " WHERE path=" + quote_value(path) + " AND timestamp<" + quote_value(now)) with self.cache_locker: self.cache[path] = (ready, headers, response.encode('latin1'), now) ready.go() return Response( response, status=200, headers=json.loads(headers) ) # MAKE A NETWORK REQUEST self.todo.add((ready, method, path, headers, now)) ready.wait() with self.cache_locker: ready, headers, response, timestamp = self.cache[path] return Response( response, status=200, headers=json.loads(headers) ) def _worker(self, please_stop): while not please_stop: pair = self.requests.pop(till=please_stop) if please_stop: break ready, method, path, req_headers, timestamp = pair try: url = self.url / path self.outbound_rate.add(Date.now()) response = http.request(method, url, req_headers) del response.headers['transfer-encoding'] resp_headers = value2json(response.headers) resp_content = response.raw.read() please_cache = self.please_cache(path) if please_cache: with self.db.transaction() as t: t.execute("INSERT INTO cache (path, headers, response, timestamp) VALUES" + quote_list((path, resp_headers, resp_content.decode('latin1'), timestamp))) with self.cache_locker: self.cache[path] = (ready, resp_headers, resp_content, timestamp) except Exception as e: Log.warning("problem with request to {{path}}", path=path, cause=e) with self.cache_locker: ready, headers, response = self.cache[path] del self.cache[path] finally: ready.go()
class SqliteSessionInterface(FlaskSessionInterface): """STORE SESSION DATA IN SQLITE :param db: Sqlite database :param table: The table name you want to use. :param use_signer: Whether to sign the session id cookie or not. """ @override def __init__(self, flask_app, db, cookie, table="sessions"): global SINGLTON if SINGLTON: Log.error("Can only handle one session manager at a time") SINGLTON = self if is_data(db): self.db = Sqlite(db) else: self.db = db self.table = table self.cookie = cookie self.cookie.max_lifetime = parse(self.cookie.max_lifetime) self.cookie.inactive_lifetime = parse(self.cookie.inactive_lifetime) if not self.db.about(self.table): self.setup() Thread.run("session monitor", self.monitor) def create_session(self, session): session.session_id = generate_sid() session.permanent = True session.expires = (Date.now() + self.cookie.max_lifetime).unix def monitor(self, please_stop): while not please_stop: # Delete expired session try: with self.db.transaction() as t: t.execute("DELETE FROM " + quote_column(self.table) + SQL_WHERE + sql_lt(expires=Date.now().unix)) except Exception as e: Log.warning("problem with session expires", cause=e) (please_stop | Till(seconds=60)).wait() def setup(self): with self.db.transaction() as t: t.execute( sql_create( self.table, { "session_id": "TEXT PRIMARY KEY", "data": "TEXT", "last_used": "NUMBER", "expires": "NUMBER", }, )) def cookie_data(self, session): return { "session_id": session.session_id, "expires": session.expires, "inactive_lifetime": self.cookie.inactive_lifetime.seconds, } def update_session(self, session_id, props): """ UPDATE GIVEN SESSION WITH PROPERTIES :param session_id: :param props: :return: """ now = Date.now().unix session = self.get_session(session_id) for k, v in props.items(): session[k] = v session.last_used = now record = { "session_id": session_id, "data": value2json(session), "expires": session.expires, "last_used": session.last_used, } with self.db.transaction() as t: t.execute(SQL_UPDATE + quote_column(self.table) + SQL_SET + sql_list(sql_eq(**{k: v}) for k, v in record.items()) + SQL_WHERE + sql_eq(session_id=session_id)) def get_session(self, session_id): now = Date.now().unix result = self.db.query( sql_query({ "from": self.table, "where": { "eq": { "session_id": session_id } } })) saved_record = first(Data(zip(result.header, r)) for r in result.data) if not saved_record or saved_record.expires <= now: return Data() session = json2value(saved_record.data) DEBUG and Log.note("record from db {{session}}", session=saved_record) return session @register_thread def open_session(self, app, request): session_id = request.headers.get("Authorization") DEBUG and Log.note("got session_id {{session|quote}}", session=session_id) if not session_id: return Data() return self.get_session(session_id) @register_thread def save_session(self, app, session, response): if not session or not session.keys(): return if not session.session_id: session.session_id = generate_sid() session.permanent = True DEBUG and Log.note("save session {{session}}", session=session) now = Date.now().unix session_id = session.session_id result = self.db.query( sql_query({ "from": self.table, "where": { "eq": { "session_id": session_id } } })) saved_record = first(Data(zip(result.header, r)) for r in result.data) expires = min(session.expires, now + self.cookie.inactive_lifetime.seconds) if saved_record: DEBUG and Log.note("found session {{session}}", session=saved_record) saved_record.data = value2json(session) saved_record.expires = expires saved_record.last_used = now with self.db.transaction() as t: t.execute("UPDATE " + quote_column(self.table) + SQL_SET + sql_list( sql_eq(**{k: v}) for k, v in saved_record.items()) + SQL_WHERE + sql_eq(session_id=session_id)) else: new_record = { "session_id": session_id, "data": value2json(session), "expires": expires, "last_used": now, } DEBUG and Log.note("new record for db {{session}}", session=new_record) with self.db.transaction() as t: t.execute(sql_insert(self.table, new_record))
def __init__(self, name, db=None, uid=GUID, exists=False, kwargs=None): """ :param name: NAME FOR THIS TABLE :param db: THE DB TO USE :param uid: THE UNIQUE INDEX FOR THIS TABLE :return: HANDLE FOR TABLE IN db """ global _config Container.__init__(self, frum=None) if db: self.db = db else: self.db = db = Sqlite() if not _config: from pyLibrary.queries.containers import config as _config if not _config.default: _config.default = {"type": "sqlite", "settings": {"db": db}} self.name = name self.uid = listwrap(uid) self._next_uid = 1 self._make_digits_table() self.uid_accessor = jx.get(self.uid) self.nested_tables = OrderedDict( ) # MAP FROM NESTED PATH TO Table OBJECT, PARENTS PROCEED CHILDREN self.nested_tables["."] = self self.columns = Index( keys=[join_field(["names", self.name])] ) # MAP FROM DOCUMENT ABS PROPERTY NAME TO THE SET OF SQL COLUMNS IT REPRESENTS (ONE FOR EACH REALIZED DATATYPE) if not exists: for u in self.uid: if u == GUID: pass else: c = Column(names={name: u}, type="string", es_column=typed_column(u, "string"), es_index=name) self.add_column_to_schema(self.nested_tables, c) command = ("CREATE TABLE " + quote_table(name) + "(" + (",".join([quoted_UID + " INTEGER"] + [ _quote_column(c) + " " + sql_types[c.type] for u, cs in self.columns.items() for c in cs ])) + ", PRIMARY KEY (" + (", ".join([quoted_UID] + [ _quote_column(c) for u in self.uid for c in self.columns[u] ])) + "))") self.db.execute(command) else: # LOAD THE COLUMNS command = "PRAGMA table_info(" + quote_table(name) + ")" details = self.db.query(command) for r in details: cname = untyped_column(r[1]) ctype = r[2].lower() column = Column(names={name: cname}, type=ctype, nested_path=['.'], es_column=typed_column(cname, ctype), es_index=name) self.add_column_to_schema(self.columns, column)
def __init__(self, config): self.db = Sqlite(config)