def update_attributes(self, username, hostname, user_attrs): """Change the attributes of one existing user.""" LOG.debug("Changing the user attributes") LOG.debug("User is %s" % username) user = self._get_user(username, hostname) db_access = set() grantee = set() with LocalSqlClient(get_engine()) as client: q = query.Query() q.columns = ["grantee", "table_schema"] q.tables = ["information_schema.SCHEMA_PRIVILEGES"] q.group = ["grantee", "table_schema"] q.where = ["privilege_type != 'USAGE'"] t = text(str(q)) db_result = client.execute(t) for db in db_result: grantee.add(db['grantee']) if db['grantee'] == "'%s'@'%s'" % (user.name, user.host): db_name = db['table_schema'] db_access.add(db_name) with LocalSqlClient(get_engine()) as client: uu = query.UpdateUser(user.name, host=user.host, clear=user_attrs.get('password'), new_user=user_attrs.get('name'), new_host=user_attrs.get('host')) t = text(str(uu)) client.execute(t) if user_attrs.get('name') is not None: if user_attrs['name'] not in grantee: if user_attrs.get('host') is None: host = user.host else: host = user_attrs.get('host') self.grant_access(user_attrs['name'], host, db_access)
def run(self, **kw): conn = safe_engine().connect() for rs in chain(self.network.relation_schemata, self.network.entity_schemata): q = rs.cls.view() conn.execute(text(str(q)), current=True) return conn.execute(text(self.query), **kw)
def upgrade(engine): c = engine.connect() files = list(c.execute(r"select files.id, " " files.filename, " " files.title, " " content_items.created_by " " from files" " join content_items on content_items.id = files.id" )) for id, filename, title, who in files: engine.echo = True c.execute(text("SET ututi.active_user TO :uid"), uid=who) new_filename = cleanupFileName(filename) new_title = cleanupFileName(title) if (new_filename != filename or new_title != title): c.execute(text("update files" " set title = :title," " filename = :filename" " where id = :id"), title=new_title, filename=new_filename, id=id)
def mass_transit_cleanup(): """Delete and vacuum mass transit live location data older than configured interval, for example MASS_TRANSIT_LIVE_KEEP_DAYS = 7""" # keep all data if nothing configured days = app.config.get("MASS_TRANSIT_LIVE_KEEP_DAYS") if not days: return # Snap deletion to daystart; be noisy as these can take quite a long time. # Also delete martians from the future, no use in preferring those forever. log.info("Deleting mass_transit_data older than %d days...", days) query = text(""" DELETE FROM mass_transit_data WHERE time < date_trunc('day', now() - interval ':days days') OR time > date_trunc('day', now() + interval '2 days')""") delrows = db.engine.execute(query, days=days).rowcount log.info("Deleted %d rows of mass_transit_data.", delrows) if not delrows: return # vacuum to reuse space; cannot be wrapped in transaction, so another conn log.info("Vacuuming and analyzing mass_transit_data...") query = text("VACUUM ANALYZE mass_transit_data") conn = db.engine.connect().execution_options(isolation_level="AUTOCOMMIT") conn.execute(query) conn.close() log.info("Vacuuming and analyzing mass_transit_data complete.")
def __create_postgre_resource(self, app, key): bind = metadata.bind # Creates a connection with permission to create users and databases metadata.bind = self.__generate_connection_uri( self.__root_user, self.__root_password) # Create database cannot be executed as a transaction block so we # should change the isolation level to create a database metadata.bind.engine.connect().\ connection.connection.set_isolation_level(0) # Generate data and queries database_name, user_name, password = self.__create_random_data() # It is needed to concatenate this way to avoid the usual but # incompatible way to escape strings while executing some admin-level # sql commands sql_create_user = \ "CREATE USER " + user_name + \ " WITH PASSWORD '" + password + "';" sql_create_database = \ "CREATE DATABASE " + database_name + \ " WITH OWNER " + user_name + \ " ENCODING 'UTF8';" # @TODO: Check if it is correct # Show the queries print("LOG: " + sql_create_user) print("LOG: " + sql_create_database) # Perform the queries text(sql_create_user, metadata.bind).execute(user=user_name, password=password) text(sql_create_database, metadata.bind).execute(database=database_name) #text(sql_revoke_permissions, metadata.bind).execute(user = user_name) # Restores the old database metadata.bind = bind return database_name, user_name, password
def update_attributes(self, username, hostname, user_attrs): """Change the attributes of an existing user.""" LOG.debug("Changing user attributes for user %s." % username) user = self._get_user(username, hostname) new_name = user_attrs.get('name') new_host = user_attrs.get('host') new_password = user_attrs.get('password') if new_name or new_host or new_password: with self.local_sql_client(self.mysql_app.get_engine()) as client: if new_password is not None: uu = sql_query.SetPassword(user.name, host=user.host, new_password=new_password) t = text(str(uu)) client.execute(t) if new_name or new_host: uu = sql_query.RenameUser(user.name, host=user.host, new_user=new_name, new_host=new_host) t = text(str(uu)) client.execute(t)
def enable_root(cls): """Enable the root user global access and/or reset the root password""" user = models.MySQLUser() user.name = "root" user.host = "%" user.password = generate_random_password() with LocalSqlClient(get_engine()) as client: print client try: cu = query.CreateUser(user.name, host=user.host) t = text(str(cu)) client.execute(t, **cu.keyArgs) except exc.OperationalError as err: # Ignore, user is already created, just reset the password # TODO(rnirmal): More fine grained error checking later on LOG.debug(err) with LocalSqlClient(get_engine()) as client: print client uu = query.UpdateUser(user.name, host=user.host, clear=user.password) t = text(str(uu)) client.execute(t) LOG.debug("CONF.root_grant: %s CONF.root_grant_option: %s" % (CONF.root_grant, CONF.root_grant_option)) g = query.Grant( permissions=CONF.root_grant, user=user.name, host=user.host, grant_option=CONF.root_grant_option, clear=user.password, ) t = text(str(g)) client.execute(t) return user.serialize()
def init_udl_tenant_sequences(udl2_conf): # Create and sync sequence for each tenant on udl database if it doesn't exist with get_udl_connection() as udl_conn: all_tenants = udl2_conf.get(PRODUCTION_NAMESPACE) udl_schema_name = udl2_conf.get(UDL_NAMESPACE).get(Constants.DB_SCHEMA) # dict to keep track of tenant sequence values for each tenant defined in the ini all_tenant_sequences = {} for tenant in all_tenants: tenant_seq_name = Constants.TENANT_SEQUENCE_NAME(tenant) tenant_schema_name = all_tenants.get(tenant).get(Constants.DB_SCHEMA) # unique identifier for each tenant key = all_tenants.get(tenant).get(Constants.URL) + ':' + tenant_schema_name # check if we have already visited the tenant prod schema if not key in all_tenant_sequences: with get_prod_connection(tenant) as prod_conn: prod_seq_result = prod_conn.execute(text("select nextval(\'{schema_name}.{seq_name} \')". format(schema_name=tenant_schema_name, seq_name=Constants.SEQUENCE_NAME))) all_tenant_sequences[key] = prod_seq_result.fetchone()[0] # check if the global tenant sequence exists in udl database if not sequence_exists(udl_conn, tenant_seq_name): # create sequence if does not exist udl_conn.execute(CreateSequence(Sequence(name=tenant_seq_name, increment=1))) # update and set the current val for the tenant sequence udl_conn.execute(text("select setval(\'{schema_name}.{seq_name} \', {value}, {called})". format(schema_name=udl_schema_name, seq_name=tenant_seq_name, value=all_tenant_sequences[key], called=True)))
def list_users(self): """List users that have access to the database""" LOG.debug(_("---Listing Users---")) users = [] client = LocalSqlClient(get_engine()) with client: mysql_user = models.MySQLUser() t = text("""select User from mysql.user where host != 'localhost';""") result = client.execute(t) LOG.debug("result = " + str(result)) for row in result: LOG.debug("user = "******"""SELECT grantee, table_schema from information_schema.SCHEMA_PRIVILEGES group by grantee, table_schema;""") db_result = client.execute(t) for db in db_result: matches = re.match("^'(.+)'@", db['grantee']) if matches is not None and \ matches.group(1) == mysql_user.name: mysql_db = models.MySQLDatabase() mysql_db.name = db['table_schema'] mysql_user.databases.append(mysql_db.serialize()) users.append(mysql_user.serialize()) LOG.debug("users = " + str(users)) return users
def enable_root(self, root_password=None): """Enable the root user global access and/or reset the root password. """ user = models.MySQLRootUser(root_password) with self.local_sql_client(self.mysql_app.get_engine()) as client: print(client) try: cu = sql_query.CreateUser(user.name, host=user.host) t = text(str(cu)) client.execute(t, **cu.keyArgs) except exc.OperationalError as err: # Ignore, user is already created, just reset the password # TODO(rnirmal): More fine grained error checking later on LOG.debug(err) with self.local_sql_client(self.mysql_app.get_engine()) as client: print(client) uu = sql_query.UpdateUser(user.name, host=user.host, clear=user.password) t = text(str(uu)) client.execute(t) LOG.debug("CONF.root_grant: %s CONF.root_grant_option: %s." % (CONF.root_grant, CONF.root_grant_option)) g = sql_query.Grant(permissions=CONF.root_grant, user=user.name, host=user.host, grant_option=CONF.root_grant_option, clear=user.password) t = text(str(g)) client.execute(t) return user.serialize()
def rebuild_spatial_db(): conn = spatial_model.get_spatial_db_connection() res = conn.execute(text('SELECT id FROM collection WHERE id = :id'), {'id': TEST_COLLECTION_ID}) if res.rowcount: conn.execute(text('DELETE FROM collection WHERE id = :id'), {'id': TEST_COLLECTION_ID})
def test_within_distance(self): """ Because SDO_WITHIN_DISTANCE requires a spatial index for the geometry used as first parameter, we have to insert out test geometries into tables, unlike to the other databases. Note that Oracle uses meter as unit for the tolerance value for geodetic coordinate systems (like 4326)! """ # test if SDO_functions._within_distance is called correctly eq_(session.query(Spot).filter(functions._within_distance(Spot.spot_location, 'POINT(0 0)', 0)).count(), 1) eq_(session.query(Spot).filter(functions._within_distance(Spot.spot_location, 'POINT(0 0)', 0.1)).count(), 1) eq_(session.query(Spot).filter(functions._within_distance(Spot.spot_location, 'POINT(9 9)', 100000)).count(), 0) eq_(session.query(Spot).filter(functions._within_distance(Spot.spot_location, 'Polygon((-5 -5, 5 -5, 5 5, -5 5, -5 -5))', 0)).count(), 3) eq_(session.query(Spot).filter(functions._within_distance(Spot.spot_location, 'Polygon((-10 -10, 10 -10, 10 10, -10 10, -10 -10))', 0)).count(), 4) eq_(session.query(Spot).filter(functions._within_distance(Spot.spot_location, 'Polygon((-10 -10, 10 -10, 10 10, -10 10, -10 -10))', 200000)).count(), 5) # test if SDO_GEOM.functions._within_distance is called correctly eq_(session.scalar(select([text('1')], from_obj=['dual']).where( functions._within_distance('POINT(0 0)', 'POINT(0 0)', 0, {'tol' : 0.00000005}))), 1) eq_(session.scalar(select([text('1')], from_obj=['dual']).where( functions._within_distance('POINT(0 0)', 'POINT(0 0)', 0, {'dim1' : text(diminfo), 'dim2' : text(diminfo)}))), 1)
def update_attributes(self, username, hostname, user_attrs): """Change the attributes of an existing user.""" LOG.debug("Changing user attributes for user %s." % username) user = self._get_user(username, hostname) db_access = set() grantee = set() with self.local_sql_client(self.mysql_app.get_engine()) as client: q = sql_query.Query() q.columns = ["grantee", "table_schema"] q.tables = ["information_schema.SCHEMA_PRIVILEGES"] q.group = ["grantee", "table_schema"] q.where = ["privilege_type != 'USAGE'"] t = text(str(q)) db_result = client.execute(t) for db in db_result: grantee.add(db['grantee']) if db['grantee'] == "'%s'@'%s'" % (user.name, user.host): db_name = db['table_schema'] db_access.add(db_name) with self.local_sql_client(self.mysql_app.get_engine()) as client: uu = sql_query.UpdateUser(user.name, host=user.host, clear=user_attrs.get('password'), new_user=user_attrs.get('name'), new_host=user_attrs.get('host')) t = text(str(uu)) client.execute(t) uname = user_attrs.get('name') or username host = user_attrs.get('host') or hostname find_user = "******" % (uname, host) if find_user not in grantee: self.grant_access(uname, host, db_access)
def test_protocol_count_filter_box(self): """Get the feature count with a box as filter""" proto = Protocol(session, Spot) request = FakeRequest({}) request.params['bbox'] = '-10,-10,10,10' eq_(proto.count(request), '4') request.params['tolerance'] = '200000' eq_(proto.count(request), '5') # query features that are inside a bbox that uses a different CRS # note that we either have to specify a tolerance ('tol') or # dimension information ('dim1' and 'dim2') filter = create_default_filter(request, Spot, additional_params={'tol': '0.005'}) request.params['bbox'] = '-12.3364241712925,-10.0036833569465,7.66304367998925,9.9979519038951' request.params['epsg'] = '2210' request.params['tolerance'] = '0' eq_(proto.count(request, filter=filter), '5') # dimension information array for 54004 # see http://download.oracle.com/docs/cd/E11882_01/appdev.112/e11830/sdo_objrelschema.htm#i1010905 diminfo = "MDSYS.SDO_DIM_ARRAY("\ "MDSYS.SDO_DIM_ELEMENT('LONGITUDE', -20037508, 20037508, 0.005),"\ "MDSYS.SDO_DIM_ELEMENT('LATITUDE', -19929239, 19929239, 0.005)"\ ")" request.params['bbox'] = '-975862.822682856,-999308.345117013,1027887.98627823,999373.702609189' request.params['epsg'] = '54004' # Oracles SRID number for World Mercator filter = create_default_filter(request, Spot, additional_params={'dim1': text(diminfo), 'dim2' : text(diminfo)}) eq_(proto.count(request, filter=filter), '3')
def update_attributes(self, username, hostname, user_attrs): """Change the attributes of one existing user.""" LOG.debug("Changing the user attributes") LOG.debug("User is %s" % username) user = self._get_user(username, hostname) db_access = set() grantee = set() with LocalSqlClient(get_engine()) as client: q = sql_query.Query() q.columns = ["grantee", "table_schema"] q.tables = ["information_schema.SCHEMA_PRIVILEGES"] q.group = ["grantee", "table_schema"] q.where = ["privilege_type != 'USAGE'"] t = text(str(q)) db_result = client.execute(t) for db in db_result: grantee.add(db["grantee"]) if db["grantee"] == "'%s'@'%s'" % (user.name, user.host): db_name = db["table_schema"] db_access.add(db_name) with LocalSqlClient(get_engine()) as client: uu = sql_query.UpdateUser( user.name, host=user.host, clear=user_attrs.get("password"), new_user=user_attrs.get("name"), new_host=user_attrs.get("host"), ) t = text(str(uu)) client.execute(t) uname = user_attrs.get("name") or username host = user_attrs.get("host") or hostname find_user = "******" % (uname, host) if find_user not in grantee: self.grant_access(uname, host, db_access)
def upgrade(): op.create_table( 'User', sa.Column('id', sa.Integer, primary_key=True), sa.Column('is_created', sa.TIMESTAMP(), server_default=text('CURRENT_TIMESTAMP'), nullable=False), sa.Column('is_updated', sa.TIMESTAMP(), server_default=text('CURRENT_TIMESTAMP'), nullable=False), sa.Column('is_deleted', sa.TIMESTAMP(), nullable=True), sqlite_autoincrement=True, ) op.create_table( 'UserAttribute', sa.Column('user_id', sa.Integer, primary_key=True), sa.Column('is_created', sa.TIMESTAMP(), server_default=text('CURRENT_TIMESTAMP'), nullable=False), sa.Column('is_updated', sa.TIMESTAMP(), server_default=text('CURRENT_TIMESTAMP'), nullable=False), sa.Column('name', sa.Unicode, default=u''), sa.Column('email', sa.Unicode, default=u''), sa.Column('password', sa.Unicode, default=u''), sa.Column('first_name', sa.Unicode, default=u''), sa.Column('middle_name', sa.Unicode, default=u''), sa.Column('last_name', sa.Unicode, default=u''), sa.Column('address', sa.Unicode, default=u''), sa.Column('birth_date', sa.DateTime, nullable=True), sa.Column('contact', sa.Unicode, default=u''), )
def cluster_legs(limit): """New leg ends and places are clustered live by triggers; this can be used to cluster data created earlier.""" print("cluster_legs up to", limit) with db.engine.begin() as t: t.execute(text("SELECT legs_cluster(:limit)"), limit=limit) t.execute(text("SELECT leg_ends_cluster(:limit)"), limit=limit)
def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine t = Table(TABLE_NAME, meta, autoload=True) default = (expression.text('0') if migrate_engine.name == 'sqlite' else expression.text('false')) preserve_ephemeral_col = Column(COLUMN_NAME, Boolean, server_default=default) t.create_column(preserve_ephemeral_col)
def has_table(self, connection, table_name, schema=None): if schema is None: schema=self.default_schema_name stmt = select([column('tablename')], from_obj=[text('dbc.tablesvx')]).where( and_(text('creatorname=:user'), text('tablename=:name'))) res = connection.execute(stmt, user=schema, name=table_name).fetchone() return res is not None
def get_table_names(self, connection, schema=None, **kw): if schema is None: schema = self.default_schema_name stmt = select([column('tablename')], from_obj = [text('dbc.TablesVX')]).where( and_(text('creatorname = :user'), or_(text('tablekind=\'T\''), text('tablekind=\'O\'')))) res = connection.execute(stmt, user=schema).fetchall() return [self.normalize_name(name['tablename']) for name in res]
def __delete_mysql_resource(self, database_name, user_name): bind = metadata.bind # Creates a connection with permission to create users and databases metadata.bind = self.__generate_connection_uri(self.__root_user, self.__root_password) # Generate data and queries sql_delete_database = "DROP DATABASE `:database`;" sql_delete_user = "******" # Perform the queries text(sql_delete_database, metadata.bind).execute(database = database_name) text(sql_delete_user, metadata.bind).execute(user = user_name) # Restores the old database_name metadata.bind = bind
def drop_all_tables_and_sequences(): for table in get_table_list_from_db(): try: execute(text("DROP TABLE %s CASCADE" % table)) except (SQLAlchemyError, e): print(e) for seq in get_seq_list_from_db(): try: execute(text("DROP SEQUENCE %s CASCADE" % table)) except (SQLAlchemyError, e): print(e)
def _get_select_waypoints_for_routes(): waypoint_type = text('\'' + WAYPOINT_TYPE + '\'') route_type = text('\'' + ROUTE_TYPE + '\'') select_linked_waypoints = \ select([ Association.child_document_id.label('route_id'), Association.parent_document_id.label('waypoint_id') ]). \ where( and_( Association.parent_document_type == waypoint_type, Association.child_document_type == route_type)). \ cte('linked_waypoints') select_waypoint_parents = \ select([ select_linked_waypoints.c.route_id, Association.parent_document_id.label('waypoint_id') ]). \ select_from(join( select_linked_waypoints, Association, and_( Association.child_document_id == select_linked_waypoints.c.waypoint_id, Association.parent_document_type == waypoint_type ))). \ cte('waypoint_parents') select_waypoint_grandparents = \ select([ select_waypoint_parents.c.route_id, Association.parent_document_id.label('waypoint_id') ]). \ select_from(join( select_waypoint_parents, Association, and_( Association.child_document_id == select_waypoint_parents.c.waypoint_id, Association.parent_document_type == waypoint_type ))). \ cte('waypoint_grandparents') return union( select_linked_waypoints.select(), select_waypoint_parents.select(), select_waypoint_grandparents.select() ). \ cte('all_waypoints')
def generate_distance_data(): user_ids = db.engine.execute(text("SELECT id FROM users;")) last_midnight = datetime.datetime.now().replace( hour=0, minute=0, second=0, microsecond=0) for id_row in user_ids: time = get_max_time_from_table("time", "travelled_distances", "user_id", id_row["id"]) + timedelta(days=1) update_user_distances(id_row["id"], time, last_midnight, False) # update rankings based on ratings query = text(""" SELECT DISTINCT time FROM travelled_distances WHERE ranking IS NULL AND total_distance IS NOT NULL""") for row in db.engine.execute(query): generate_rankings(row[0])
def delete_table_keys(table): #clean_delete_table(table) capabilities = ["Create", "Select", "Update", "Delete"] for action in capabilities: init_key = action +" "+ str(table).title() sql = "DELETE FROM content_roles WHERE content_base_id IN" \ " (SELECT id FROM content_base WHERE name = '{0}');".format(init_key) sql2 = "DELETE FROM content_base WHERE content_base.id IN" \ " (SELECT id FROM content_base WHERE name = '{0}');".format(init_key) r = text(sql) r2 = text(sql2) _execute(r) _execute(r2) Base.metadata._remove_table(table, 'public')
def get_user_id(email = None, session_id = None): """ Helper function that returns the user_id for a given email address """ if email is not None: result = db.session.execute( text("SELECT aaa.get_user_id_by_email(:email)", bindparams=[bindparam('email', email)])) return result.first()[0] if session_id is not None: result = db.session.execute( text("SELECT aaa.get_user_id_by_session_id(:session)", bindparams=[bindparam('session', session_id)])) return result.first()[0] return None
def list_users(self, limit=None, marker=None, include_marker=False): """List users that have access to the database""" LOG.debug(_("---Listing Users---")) users = [] client = LocalSqlClient(get_engine()) with client: mysql_user = models.MySQLUser() q = Query() q.columns = ['User'] q.tables = ['mysql.user'] q.where = ["host != 'localhost'"] q.order = ['User'] if marker: q.where.append("User %s '%s'" % (INCLUDE_MARKER_OPERATORS[include_marker], marker)) if limit: q.limit = limit + 1 t = text(str(q)) result = client.execute(t) next_marker = None LOG.debug("result = " + str(result)) for count, row in enumerate(result): if count >= limit: break LOG.debug("user = "******"^'(.+)'@", db['grantee']) if (matches is not None and matches.group(1) == mysql_user.name): mysql_db = models.MySQLDatabase() mysql_db.name = db['table_schema'] mysql_user.databases.append(mysql_db.serialize()) users.append(mysql_user.serialize()) if result.rowcount <= limit: next_marker = None LOG.debug("users = " + str(users)) return users, next_marker
def _generate_root_password(client): """Generate and set a random root password and forget about it.""" localhost = "localhost" uu = sql_query.UpdateUser("root", host=localhost, clear=utils.generate_random_password()) t = text(str(uu)) client.execute(t)
def test_pk_default(self): Table( 'simple_items', self.metadata, Column('id', INTEGER, primary_key=True, server_default=text('uuid_generate_v4()')) ) assert self.generate_code() == """\
def list_users(self, limit=None, marker=None, include_marker=False): """List users that have access to the database.""" ''' SELECT User, Host, Marker FROM (SELECT User, Host, CONCAT(User, '@', Host) as Marker FROM mysql.user ORDER BY 1, 2) as innerquery WHERE Marker > :marker ORDER BY Marker LIMIT :limit; ''' LOG.debug("---Listing Users---") users = [] with self.local_sql_client(self.mysql_app.get_engine()) as client: mysql_user = models.MySQLUser() iq = sql_query.Query() # Inner query. iq.columns = ['User', 'Host', "CONCAT(User, '@', Host) as Marker"] iq.tables = ['mysql.user'] iq.order = ['User', 'Host'] innerquery = str(iq).rstrip(';') oq = sql_query.Query() # Outer query. oq.columns = ['User', 'Host', 'Marker'] oq.tables = ['(%s) as innerquery' % innerquery] oq.where = ["Host != 'localhost'"] oq.order = ['Marker'] if marker: oq.where.append("Marker %s '%s'" % (INCLUDE_MARKER_OPERATORS[include_marker], marker)) if limit: oq.limit = limit + 1 t = text(str(oq)) result = client.execute(t) next_marker = None LOG.debug("result = " + str(result)) for count, row in enumerate(result): if count >= limit: break LOG.debug("user = "******"users = " + str(users)) return users, next_marker
def change_passwords(self, users): """Change the passwords of one or more existing users.""" LOG.debug("Changing the password of some users.") with LocalSqlClient(get_engine()) as client: for item in users: LOG.debug("Changing password for user %s." % item) user_dict = { '_name': item['name'], '_host': item['host'], '_password': item['password'] } user = models.MySQLUser() user.deserialize(user_dict) LOG.debug("\tDeserialized: %s." % user.__dict__) uu = sql_query.UpdateUser(user.name, host=user.host, clear=user.password) t = text(str(uu)) client.execute(t)
def GetRolesForUser(self,userName): ''' Get all the roles that the user, with the given username, belongs to. ''' #Create string builder object sb = [] sb.append("select rolname from pg_user ") sb.append("join pg_auth_members on (pg_user.usesysid=pg_auth_members.member) ") sb.append("join pg_roles on (pg_roles.oid=pg_auth_members.roleid) ") sb.append("where pg_user.usename=:uname") sql = ''.join(sb) t = text(sql) result = self._execute(t,uname = userName) #Iterate through result proxy to get the rolenames userRoles = [] for row in result: userRoles.append(row["rolname"]) return userRoles
def CreateRole(self,roleName,description='',grantSchema = 'public'): ''' Create a new role ''' sql = [] sql.append("CREATE ROLE %s CREATEROLE;"%(roleName,)) if description != "": sql.append("COMMENT ON ROLE %s is '%s';"%(roleName,description)) ''' Grant privileges to the new role so that users in this role can be able to access the tables and relations. The specified schema will have all the tables and relations granted with all privileges. ''' sql.append("GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA %s TO %s;"%(grantSchema,roleName)) sqlStr = ''.join(sql) t = text(sqlStr) self._execute(t)
def GetUsersInRole(self,roleName): ''' Get all users in the given role ''' users = [] sb = [] sb.append("select usename from pg_user ") sb.append("join pg_auth_members on (pg_user.usesysid = pg_auth_members.member) ") sb.append("join pg_roles on (pg_roles.oid = pg_auth_members.roleid) ") sb.append("where pg_roles.rolname = :rlname") sql = ''.join(sb) t = text(sql) result = self._execute(t,rlname = roleName) for row in result: users.append(row["usename"]) return users
def seed_id(cls): # pylint:disable=no-self-argument '''returns data_seed_id if the latter is not None, else net.sta.loc.cha by querying the relative channel and station''' # Needed note: To know what we are doing in 'sel' below, please look: # http://docs.sqlalchemy.org/en/latest/orm/extensions/hybrid.html#correlated-subquery-relationship-hybrid # Notes # - we use limit(1) cause we might get more than one # result. Regardless of why it happens (because we don't join or apply a distinct?) # it is relevant for us to get the first result which has the requested # network+station and location + channel strings # - the label(...) at the end makes all the difference. The doc is, as always, unclear # http://docs.sqlalchemy.org/en/latest/core/sqlelement.html#sqlalchemy.sql.expression.label dot = text("'.'") sel = select([concat(Station.network, dot, Station.station, dot, Channel.location, dot, Channel.channel)]).\ where((Channel.id == cls.channel_id) & (Station.id == Channel.station_id)).limit(1).\ label('seedidentifier') return case([(cls.data_seed_id.isnot(None), cls.data_seed_id)], else_=sel)
class DatasetFeature(BaseModel): """A feature tied to a specific dataset. Has a pixel geometry and incorporates elevation data. """ __tablename__ = "dataset_feature" id = Column(Integer, primary_key=True) type = Column(String(64)) # Polymorphic discriminator column geometry = Column(Geometry(srid=SRID)) date_created = Column(DateTime,server_default=text("now()"), nullable=False) mapping = property(lambda self: self.__geo_interface__) shape = property(lambda self: to_shape(self.geometry)) dataset = relationship("Dataset", backref='features') @property def __geo_interface__(self): return dict( type="Feature", geometry=mapping(to_shape(self.geometry))) _class = Column("class", String, ForeignKey('feature_class.id')) dataset_id = Column(String(64), ForeignKey('dataset.id')) extracted = Column(ARRAY(Float, dimensions=2,zero_indexes=True)) # Column to track whether the dataset_id # was set using a script or user-specified dataset_id_autoset = Column(Boolean, default=False, nullable=False, server_default="0") from .extract import extract def __init__(self,*args,**kwargs): self.extract() @property def array(self): return N.array(self.extracted) @property def length(self): return db.session.scalar( func.ST_Length(self.geometry))
def drop_cascade_table(table_name): """ Safely deletes the table with the specified name using the CASCADE option. :param table_name: Name of the database table. :type table_name: str :return: Returns True if the operation succeeded. otherwise False. :rtype: bool """ del_com = u'DROP TABLE IF EXISTS {0} CASCADE;'.format(table_name) t = text(del_com) try: _execute(t) return True #Error if the current user is not the owner. except SQLAlchemyError: return False
def get_stats(tid): db = get_db() with db.begin() as conn: match_table = conn.execute( text( """select match.id, match.corp_id, match.runner_id, match.corp_score, match.runner_score, corp_plr.p_name, corp_plr.corp_id, corp_plr.is_bye, runner_plr.p_name, runner_plr.runner_id, runner_plr.is_bye FROM match LEFT join player corp_plr ON match.corp_id = corp_plr.id LEFT JOIN player runner_plr ON match.runner_id = runner_plr.id WHERE corp_plr.is_bye = false AND runner_plr.is_bye = false AND match.tid = :tid""", { "tid": tid }, ).fetchall()) return match_table
def put(self, taxi_id): parser = DataJSONParser() t, last_update_at = self.get_descriptions(taxi_id) new_status = parser.get_data()[0]['status'] if new_status != t[0]['vehicle_description_status'] or\ t[0]['taxi_last_update_at'] is None or\ t[0]['taxi_last_update_at'] <= (datetime.now() - timedelta(hours=4)): cur = current_app.extensions['sqlalchemy'].db.session.\ connection().connection.cursor() cur.execute("UPDATE vehicle_description SET status=%s WHERE id=%s", (new_status, t[0]['vehicle_description_id'])) to_set = ['last_update_at = %s', [datetime.now()]] if t[0]['taxi_current_hail_id']: hail = models.Hail.query.from_statement( text("SELECT * from hail where id=:hail_id")).params( hail_id=t[0]['taxi_current_hail_id']).one() hail_status, current_hail_id = models.Taxi.get_new_hail_status( hail.id, new_status, hail._status) if hail_status: hail.status = hail_status to_set[0] += ", current_hail_id = %s" to_set[1].append(current_hail_id) query = "UPDATE taxi SET {} WHERE id = %s".format(to_set[0]) cur.execute(query, (to_set[1] + [t[0]['taxi_id']])) current_app.extensions['sqlalchemy'].db.session.commit() models.RawTaxi.flush(taxi_id) t[0]['vehicle_description_status'] = new_status taxi_id_operator = "{}:{}".format(taxi_id, current_user.email) if t[0]['vehicle_description_status'] == 'free': redis_store.zrem(current_app.config['REDIS_NOT_AVAILABLE'], taxi_id_operator) else: redis_store.zadd(current_app.config['REDIS_NOT_AVAILABLE'], 0., taxi_id_operator) taxi_m = marshal( { 'data': [models.RawTaxi.generate_dict(t, operator=current_user.email)] }, taxi_model) taxi_m['data'][0]['operator'] = current_user.email taxi_m['data'][0]['last_update'] = last_update_at return taxi_m
def get_sample_values_fields(db_engine: Engine, metadata: MetaData, table: str, fields: list, sampling: int = 0): tb = metadata.tables.get(table) if 100 > sampling > 0: sample_t = tb.tablesample(sampling, seed=text('{}'.format(SEED))) sample_fields = [sample_t.columns[f] for f in fields] query = select(sample_fields).select_from(sample_t) else: query = select([tb.columns.get(f) for f in fields]).select_from(tb) res: ResultProxy = db_engine.execute(query) values = [] for r in res: values.append(tuple(r.values())) res.close() return values
def _generate_root_password(client): """Generate, set, and preserve a random password for root@localhost when invoking mysqladmin to determine the execution status of the mysql service. """ localhost = "localhost" new_password = utils.generate_random_password() uu = sql_query.SetPassword( models.MySQLUser.root_username, host=localhost, new_password=new_password) t = text(str(uu)) client.execute(t) # Save the password to root's private .my.cnf file root_sect = {'client': {'user': '******', 'password': new_password, 'host': localhost}} operating_system.write_file('/root/.my.cnf', root_sect, codec=IniCodec(), as_root=True)
class Account(db.Model, UserMixin): id = db.Column(db.Integer, primary_key=True, autoincrement=True) password = db.Column(db.String(128), nullable=False) code = db.Column(UUIDType(), doc='随机码,用于密码校验,提升安全性', nullable=False) active = db.Column(db.Boolean, default=True, doc='只有激活状态的用户才能正常使用本站', nullable=False) roles = db.relationship('Role', secondary=roles_users, backref=db.backref('account', lazy='dynamic')) date_joined = db.Column(db.DateTime, server_default=text('NOW()'), nullable=False) pass
def grant_access(self, username, hostname, databases): """Grant a user permission to use a given database.""" user = self._get_user(username, hostname) mydb = models.ValidatedMySQLDatabase() with LocalSqlClient(get_engine()) as client: for database in databases: try: mydb.name = database except ValueError: raise exception.BadRequest( _("Grant access to %s is not allowed") % database) g = sql_query.Grant(permissions='ALL', database=mydb.name, user=user.name, host=user.host, hashed=user.password) t = text(str(g)) client.execute(t)
def load_int_sbac_asmt(self): table = 'int_sbac_asmt' with open(os.path.join(self.data_dir, 'INT_SBAC_ASMT_DELETE.csv') ) as f, get_udl_connection() as conn: cf = csv.reader(f, delimiter=',', quoting=csv.QUOTE_ALL) header = next(cf) header.insert(0, 'record_sid') for row in cf: # set record_sid = 7 in this func test row.insert(0, str(7)) (columns, values, params) = self.generate_insert_items(header, row) insert_query = text(self.insert_sql.format( staging_schema=self.udl2_conf['udl2_db_conn']['db_schema'], staging_table=table, columns_string=", ".join(columns), value_string=", ".join(values)), bindparams=params) conn.execute(insert_query)
def get_selects(self): """ Constructs select queries for this aggregation Returns: a dictionary of group : queries pairs where group are the same keys as groups queries is a list of Select queries, one for each date in dates """ queries = {} for group, groupby in self.groups.items(): intervals = self.intervals[group] queries[group] = [] for date in self.dates: columns = [ groupby, ex.literal_column("'%s'::date" % date).label( self.output_date_column), ] columns += list( chain(*[ self._get_aggregates_sql(i, date, group) for i in intervals ])) gb_clause = make_sql_clause(groupby, ex.literal_column) if self.join_with_cohort_table: from_obj = ex.text( f"(select from_obj.* from (" f"(select * from {self.from_obj}) from_obj join {self.state_table} cohort on ( " "cohort.entity_id = from_obj.entity_id and " f"cohort.{self.output_date_column} = '{date}'::date)" ")) cohorted_from_obj") else: from_obj = self.from_obj query = ex.select(columns=columns, from_obj=from_obj).group_by(gb_clause) query = query.where(self.where(date, intervals)) queries[group].append(query) return queries
def insert(source): sql = text("""INSERT INTO {schema}.ar_payment_trans(unit_kd, kode, created, create_uid, nama, alamat, tahun, nilai, rekening_kd, rekening_nm, ref_kode, ref_nama, tgl_trans, kecamatan_kd, kecamatan_nm, kelurahan_kd, kelurahan_nm, is_kota, sumber_nm, sumber_id, posted, pokok, denda, bunga, jth_tempo, npwpd) VALUES(:unit_kd, :kode, :created, :create_uid, :nama, :alamat, :tahun, :nilai, :rekening_kd, :rekening_nm, :ref_kode, :ref_nama, :tgl_trans, :kecamatan_kd, :kecamatan_nm, :kelurahan_kd, :kelurahan_nm, :is_kota, :sumber_nm, :sumber_id,0, :pokok, :denda, :bunga, :jth_tempo, :npwpd)""" .format(schema=schema)) eng_dst.execute( sql, kode=source['kode'], created=datetime.now(), #updated = datetime.now(), create_uid=1, nama=source['nama'], alamat=source['alamat'], tahun=source['tahun'], nilai=source['nilai'], ref_kode=source['ref_kode'], rekening_kd=source['rekening_kd'], rekening_nm=source['rekening_nm'], ref_nama=source['ref_nama'], tgl_trans=source['tgl_trans'], kecamatan_kd=source['kecamatan_kd'], kecamatan_nm=source['kecamatan_nm'], kelurahan_kd=source['kelurahan_kd'], kelurahan_nm=source['kelurahan_nm'], is_kota=source['is_kota'], sumber_nm=source['sumber_nm'], sumber_id=source['sumber_id'], unit_kd=source['unit_kd'], pokok=source['pokok'], denda=source['denda'], bunga=source['bunga'], jth_tempo=source['jth_tempo'], npwpd=source['npwpd'], )
def _migrate_table(session, orm_class): changed = False for column_name, column in orm_class.__dict__.items(): if column_name[0] != '_': try: session.query(column).first() except OperationalError as err: log.debug("%s: %s", column_name, err.args[0]) if column.default is not None: if sys.version_info < (3, 0): if isinstance(column.default.arg, unicode): column.default.arg = column.default.arg.encode( 'utf-8') if column.default is None: column_default = "" else: if isinstance(column.default.arg, bool): column_default = "DEFAULT {}".format( int(column.default.arg)) else: column_default = "DEFAULT `{}`".format( column.default.arg) if isinstance(column.type, JSON): column_type = "JSON" else: column_type = column.type alter_table = text("ALTER TABLE %s ADD COLUMN `%s` %s %s" % (orm_class.__tablename__, column_name, column_type, column_default)) log.debug(alter_table) session.execute(alter_table) changed = True except json.decoder.JSONDecodeError as e: log.error("Database corrupt column: {}".format(column_name)) log.debug(e) if changed: try: session.commit() except OperationalError: session.rollback()
def migrate(): if not engine.dialect.has_table(engine.connect(), "permissions_added"): PermissionAdded.__table__.create(bind=engine) for sql in session.execute( text("select sql from sqlite_master where type='table'")): if 'CREATE TABLE gdrive_ids' in sql[0]: currUniqueConstraint = 'UNIQUE (gdrive_id)' if currUniqueConstraint in sql[0]: sql = sql[0].replace(currUniqueConstraint, 'UNIQUE (gdrive_id, path)') sql = sql.replace(GdriveId.__tablename__, GdriveId.__tablename__ + '2') session.execute(sql) session.execute( "INSERT INTO gdrive_ids2 (id, gdrive_id, path) SELECT id, " "gdrive_id, path FROM gdrive_ids;") session.commit() session.execute('DROP TABLE %s' % 'gdrive_ids') session.execute('ALTER TABLE gdrive_ids2 RENAME to gdrive_ids') break
def view_details(self, view): """ Gets the view definition/query used to create it. :param view: The name of the view. :type view: String :return: The definition/query. :rtype: String """ if view in pg_views(): t = text('SELECT definition ' 'FROM pg_views ' 'WHERE viewname=:view_name;') result = _execute(t, view_name=view) definition = [] for row in result: definition.append(row[0]) return definition[0]
class UserDB(db.Model): __tablename__ = "user" __table_args__ = (UniqueConstraint("name_user", name="ix_name_user"), { "schema": "public" }) id_user = Column("id_user", INTEGER, Sequence("user_id", 1, 1), primary_key=True) _name_user = Column("name_user", VARCHAR, unique=True, nullable=False) hash = Column(CHAR(60), nullable=True, server_default=text("NULL")) # Bcrypt hash is 60 characters maximum in length. is_active = Column(BOOLEAN, nullable=False, server_default=text("TRUE")) is_verified = Column(BOOLEAN, nullable=False, server_default=text("FALSE")) is_resetforced = Column(BOOLEAN, nullable=False, server_default=text("FALSE")) is_resetrequested = Column(BOOLEAN, nullable=False, server_default=text("FALSE")) timestamp_create = Column(TIMESTAMP, nullable=False, server_default=text("CURRENT_TIMESTAMP")) contact = relationship("UserContact", backref=backref("user", uselist=False)) record_usernotification = relationship("UserNotification", backref=backref("user")) record_userlogin = relationship("UserLogin", backref=backref("user")) @hybrid_property def name_user(self): return self._name_user @name_user.setter def name_user(self, value): if value: self._name_user = value.strip().lower() else: self._name_user = None def __init__(self, name_user, **kwargs): self.name_user = name_user try: self.hash = kwargs["hash"] except KeyError: self.hash = None
def get_columns(self, connection, table_name, schema=None, **kw): helpView = False if schema is None: schema = self.default_schema_name if int(self.server_version_info.split('.')[0]) < 16: dbc_columninfo = 'dbc.ColumnsV' #Check if the object us a view stmt = select([column('tablekind')],\ from_obj=[text('dbc.tablesV')]).where(\ and_(text('DatabaseName=:schema'),\ text('TableName=:table_name'),\ text("tablekind='V'"))) res = connection.execute(stmt, schema=schema, table_name=table_name).rowcount helpView = (res == 1) else: dbc_columninfo = 'dbc.ColumnsQV' stmt = select([column('columnname'), column('columntype'),\ column('columnlength'), column('chartype'),\ column('decimaltotaldigits'), column('decimalfractionaldigits'),\ column('columnformat'),\ column('nullable'), column('defaultvalue'), column('idcoltype')],\ from_obj=[text(dbc_columninfo)]).where(\ and_(text('DatabaseName=:schema'),\ text('TableName=:table_name'))) res = connection.execute(stmt, schema=schema, table_name=table_name).fetchall() #If this is a view in pre-16 version, get types for individual columns if helpView: res = [ self._get_column_help(connection, schema, table_name, r['columnname']) for r in res ] return [self._get_column_info(row) for row in res]
def change_passwords(self, users): """Change the passwords of one or more existing users.""" LOG.debug("Changing the password of some users.") with mysql_util.SqlClient(self.mysql_app.get_engine(), use_flush=True) as client: for item in users: LOG.debug("Changing password for user %s.", item) user_dict = { '_name': item['name'], '_host': item['host'], '_password': item['password'] } user = models.MySQLUser.deserialize(user_dict) uu = sql_query.SetPassword(user.name, host=user.host, new_password=user.password, ds=CONF.datastore_manager, ds_version=CONF.datastore_version) t = text(str(uu)) client.execute(t)
def geometryType(tableName, spatialColumnName, schemaName="public"): """ Returns a tuple of geometry type and EPSG code of the given column name in the table within the given schema. """ sql = u"select type,srid from geometry_columns where f_table_name = :tbname " \ "and f_geometry_column = :spcolumn and f_table_schema = :tbschema" t = text(sql) result = _execute(t, tbname=tableName, spcolumn=spatialColumnName, tbschema=schemaName) geomType, epsg_code = "", -1 for r in result: geomType = r["type"] epsg_code = r["srid"] break return (geomType, epsg_code)
def get(self, hail_id): models.db.session.expire_all() hail = models.Hail.get_or_404(hail_id) self.filter_access(hail) hail.taxi_relation = models.Taxi.query.from_statement( text("SELECT * FROM taxi where id=:taxi_id")).params( taxi_id=hail.taxi_id).one() return_ = marshal({"data": [hail]}, hail_model) if hail._status in ('finished', 'customer_on_board', 'timeout_accepted_by_customer'): return_['data'][0]['taxi']['position']['lon'] = 0.0 return_['data'][0]['taxi']['position']['lat'] = 0.0 return_['data'][0]['taxi']['last_update'] = 0 else: return_['data'][0]['taxi']['crowfly_distance'] = vincenty( (return_['data'][0]['taxi']['position']['lat'], return_['data'][0]['taxi']['position']['lon']), (return_['data'][0]['customer_lat'], return_['data'][0]['customer_lon'])).kilometers return return_
def spatial_tables(exclude_views=False): """ Returns a list of spatial table names in the STDM database. """ t = text("select DISTINCT f_table_name from geometry_columns") result = _execute(t) spTables = [] views = pg_views() for r in result: spTable = r["f_table_name"] if exclude_views: tableIndex = getIndex(views, spTable) if tableIndex == -1: spTables.append(spTable) else: spTables.append(spTable) return spTables
def pg_views(schema="public"): """ Returns the views in the given schema minus the default PostGIS views. """ t = text("SELECT table_name FROM information_schema.tables WHERE table_schema = :tschema and table_type = :tbtype " \ "ORDER BY table_name ASC") result = _execute(t, tschema=schema, tbtype="VIEW") pgViews = [] for r in result: viewName = r["table_name"] #Remove default PostGIS tables viewIndex = getIndex(_postGISViews, viewName) if viewIndex == -1: pgViews.append(viewName) return pgViews
def feed_ratingindex(): off = request.args.get("offset") or 0 entries = calibre_db.session.query(db.Ratings, func.count('books_ratings_link.book').label('count'), (db.Ratings.rating / 2).label('name')) \ .join(db.books_ratings_link)\ .join(db.Books)\ .filter(calibre_db.common_filters()) \ .group_by(text('books_ratings_link.rating'))\ .order_by(db.Ratings.rating).all() pagination = Pagination( (int(off) / (int(config.config_books_per_page)) + 1), config.config_books_per_page, len(entries)) element = list() for entry in entries: element.append(FeedObject(entry[0].id, "{} Stars".format(entry.name))) return render_xml_template('feed.xml', listelements=element, folder='opds.feed_ratings', pagination=pagination)
def list_databases(self, limit=None, marker=None, include_marker=False): """List databases the user created on this mysql instance.""" LOG.debug("---Listing Databases---") ignored_database_names = "'%s'" % "', '".join(CONF.ignore_dbs) LOG.debug("The following database names are on ignore list and will " "be omitted from the listing: %s" % ignored_database_names) databases = [] with LocalSqlClient(get_engine()) as client: q = sql_query.Query() q.columns = [ 'schema_name as name', 'default_character_set_name as charset', 'default_collation_name as collation', ] q.tables = ['information_schema.schemata'] q.where = ["schema_name NOT IN (" + ignored_database_names + ")"] q.order = ['schema_name ASC'] if limit: q.limit = limit + 1 if marker: q.where.append( "schema_name %s '%s'" % (INCLUDE_MARKER_OPERATORS[include_marker], marker)) t = text(str(q)) database_names = client.execute(t) next_marker = None LOG.debug("database_names = %r." % database_names) for count, database in enumerate(database_names): if count >= limit: break LOG.debug("database = %s." % str(database)) mysql_db = models.MySQLDatabase() mysql_db.name = database[0] next_marker = mysql_db.name mysql_db.character_set = database[1] mysql_db.collate = database[2] databases.append(mysql_db.serialize()) LOG.debug("databases = " + str(databases)) if database_names.rowcount <= limit: next_marker = None return databases, next_marker
def write_log_db(log_data=None): '''写入日志到数据库 ''' table_name = 'l_operating_log' try: sql = '''INSERT INTO `%s` (`UserID`,`WorkCode`,`UserName`,`OperaType`,`LogInfo`,`IP`,`CreateTime`,`Remark`) VALUES (:UserID,:WorkCode,:UserName,:OperaType,:LogInfo,:IP,:CreateTime,:Remark)''' if web.ctx.session.user: nUserID = web.ctx.session.user['UserID'] sWorkCode = web.ctx.session.user['UserCode'] sUserName = web.ctx.session.user['UserName'] else: nUserID = '' sWorkCode = '' sUserName = '' if web.input().get('sRemark'): sRemark = web.input().get('sRemark') else: sRemark = '' web.ctx.cur_dbconn.execute( text(sql % table_name), UserID=nUserID, WorkCode=sWorkCode, UserName=sUserName, OperaType=web.ctx.apiDesc if web.ctx.apiDesc else '', LogInfo=obj_to_json(log_data), IP=web.ctx.ip, CreateTime=datetime.now(), Remark=sRemark) except: from utils.trace_except import get_cur_except print get_cur_except()[0] pass return FuncResult(success=True)
class BearerTokenModel(db.Model): __tablename__ = 'bearer_tokens' __table_args__ = {'mysql_charset': 'utf8', 'extend_existing': True} id = db.Column(BIGINT(20, unsigned=True), primary_key=True, autoincrement=True, index=True) app_id = db.Column(BIGINT(20, unsigned=True), db.ForeignKey('clients.id', ondelete='CASCADE'), nullable=False) application = db.relationship('ClientModel', lazy='joined') owner_id = db.Column(BIGINT(20, unsigned=True), db.ForeignKey('users.id'), nullable=False) user = db.relationship('UserModel', backref='bearer_token', lazy='joined') token_type = db.Column(db.String(80), nullable=False) access_token = db.Column(db.String(255), unique=True, nullable=False) refresh_token = db.Column(db.String(255), unique=True, nullable=False) expires_date = db.Column(DATETIME, nullable=False) scope = db.Column(TEXT, nullable=False) created_date = db.Column(TIMESTAMP, default=datetime.datetime.utcnow, server_default=text('CURRENT_TIMESTAMP')) @property def client_id(self): credential = self.application.credential if credential is None: return None return credential[0].client_id @property def expires(self): return self.expires_date @property def scopes(self): if self.scope: return self.scope.split() return []