def insert_record(connection: Connection, ss_identifier: str, source_system: str): connection.execute(f""" INSERT INTO [lms].[LMSUser] ([SourceSystemIdentifier] ,[SourceSystem] ,[UserRole] ,[SISUserIdentifier] ,[LocalUserIdentifier] ,[Name] ,[EmailAddress] ,[SourceCreateDate] ,[SourceLastModifiedDate] ,[CreateDate] ,[LastModifiedDate] ,[DeletedAt]) VALUES (N'{ss_identifier}' ,N'{source_system}' ,N'student' ,N'{ss_identifier}' ,N'{ss_identifier}' ,N'{ss_identifier}' ,N'{ss_identifier}' ,NULL ,NULL ,CAST(N'2021-01-01 00:00:00' AS DateTime) ,CAST(N'2021-01-01 00:00:00' AS DateTime) ,NULL ) """)
def insert_record( connection: Connection, ss_identifier: str, source_system: str, section_identifier: int, user_identifier: int, ): connection.execute(f""" INSERT INTO [lms].[LMSUserLMSSectionAssociation] ([LMSSectionIdentifier] ,[LMSUserIdentifier] ,[SourceSystemIdentifier] ,[SourceSystem] ,[EnrollmentStatus] ,[SourceCreateDate] ,[SourceLastModifiedDate] ,[CreateDate] ,[LastModifiedDate] ,[DeletedAt]) VALUES ({section_identifier} ,{user_identifier} ,N'{ss_identifier}' ,N'{source_system}' ,N'Active' ,NULL ,NULL ,CAST(N'2021-01-01 00:00:00' AS DateTime) ,CAST(N'2021-01-01 00:00:00' AS DateTime) ,NULL ) """)
def insert_record(connection: Connection, ss_identifier: str, source_system: str): connection.execute( f""" insert into lms.lmssystemactivity (sourcesystemidentifier ,sourcesystem ,lmsuseridentifier ,activitytype ,activitydatetime ,activitystatus ,parentsourcesystemidentifier ,activitytimeinminutes ,sourcecreatedate ,sourcelastmodifieddate ,deletedat ,createdate ,lastmodifieddate) values ('{ss_identifier}' ,'{source_system}' ,1 ,'sign-in' ,'2021-01-01 00:00:00' ,'active' ,null ,null ,null ,null ,null ,'2021-01-01 00:00:00' ,'2021-01-01 00:00:00' ) """ )
def init_db(conn: Connection) -> None: """ Configures the target schema in which the tweets data will be stored, creates the schema and the table if not existing yet :param conn: SQLAlchemy connection object """ logger.info(f"{Fore.YELLOW}Initializing database ...{Style.RESET_ALL}") # Create specified schema if not exists if not conn.dialect.has_schema(conn, schema_name): logger.info( f"{Fore.YELLOW}Schema {schema_name} does not exist, creating it ...{Style.RESET_ALL}" ) conn.execute(schema.CreateSchema(schema_name)) logger.info( f"{Fore.GREEN}Schema {schema_name} successfully created !{Style.RESET_ALL}" ) else: logger.info( f"{Fore.GREEN}Schema {schema_name} was found, continuing database initialization " f"...{Style.RESET_ALL}") # Create tables Base.metadata.create_all(bind=conn) logger.info( f"{Fore.GREEN}Schema {schema_name} successfully configured !{Style.RESET_ALL}" )
def insert_record(connection: Connection, ss_identifier: str, source_system: str): connection.execute( f""" INSERT INTO [lms].[LMSSection] ([SourceSystemIdentifier] ,[SourceSystem] ,[SISSectionIdentifier] ,[Title] ,[SectionDescription] ,[Term] ,[LMSSectionStatus] ,[SourceCreateDate] ,[SourceLastModifiedDate] ,[CreateDate] ,[LastModifiedDate] ,[DeletedAt]) VALUES (N'{ss_identifier}' ,N'{source_system}' ,N'{ss_identifier}' ,N'{ss_identifier}' ,N'{ss_identifier}' ,N'{ss_identifier}' ,N'Archived' ,NULL ,NULL ,CAST(N'2021-01-01 00:00:00' AS DateTime) ,CAST(N'2021-01-01 00:00:00' AS DateTime) ,NULL ) """ )
def insert_record(connection: Connection, ss_identifier: str, source_system: str): connection.execute(f""" INSERT INTO [lms].[LMSSystemActivity] ([SourceSystemIdentifier] ,[SourceSystem] ,[LMSUserIdentifier] ,[ActivityType] ,[ActivityDateTime] ,[ActivityStatus] ,[ParentSourceSystemIdentifier] ,[ActivityTimeInMinutes] ,[SourceCreateDate] ,[SourceLastModifiedDate] ,[DeletedAt] ,[CreateDate] ,[LastModifiedDate]) VALUES (N'{ss_identifier}' ,N'{source_system}' ,1 ,N'sign-in' ,CAST(N'2021-01-01 00:00:00' AS DateTime) ,N'active' ,NULL ,NULL ,NULL ,NULL ,NULL ,CAST(N'2021-01-01 00:00:00' AS DateTime) ,CAST(N'2021-01-01 00:00:00' AS DateTime) ) """)
def replace_into(self, __conn: Connection, id: int, name: str): sql = f""" replace into {self.table_name} (id, name) values (:id, :name) """ stat = sqlalchemy.text(sql) __conn.execute(stat, locals())
def set_perm( # pylint: disable=unused-argument self, mapper: Mapper, connection: Connection, target: "BaseDatasource") -> None: """ Set the datasource permissions. :param mapper: The table mapper :param connection: The DB-API connection :param target: The mapped instance being persisted """ link_table = target.__table__ if target.perm != target.get_perm(): connection.execute(link_table.update().where( link_table.c.id == target.id).values(perm=target.get_perm())) if (hasattr(target, "schema_perm") and target.schema_perm != target.get_schema_perm()): connection.execute( link_table.update().where(link_table.c.id == target.id).values( schema_perm=target.get_schema_perm())) pvm_names = [] if target.__tablename__ in {"dbs", "clusters"}: pvm_names.append(("database_access", target.get_perm())) else: pvm_names.append(("datasource_access", target.get_perm())) if target.schema: pvm_names.append(("schema_access", target.get_schema_perm())) # TODO(bogdan): modify slice permissions as well. for permission_name, view_menu_name in pvm_names: permission = self.find_permission(permission_name) view_menu = self.find_view_menu(view_menu_name) pv = None if not permission: permission_table = ( self.permission_model.__table__ # pylint: disable=no-member ) connection.execute( permission_table.insert().values(name=permission_name)) permission = self.find_permission(permission_name) if not view_menu: view_menu_table = ( self.viewmenu_model.__table__ # pylint: disable=no-member ) connection.execute( view_menu_table.insert().values(name=view_menu_name)) view_menu = self.find_view_menu(view_menu_name) if permission and view_menu: pv = (self.get_session.query( self.permissionview_model).filter_by( permission=permission, view_menu=view_menu).first()) if not pv and permission and view_menu: permission_view_table = ( self.permissionview_model.__table__ # pylint: disable=no-member ) connection.execute(permission_view_table.insert().values( permission_id=permission.id, view_menu_id=view_menu.id))
def reset_identity_sequence_number( connection: Connection, table: str, column: str ) -> None: sql = f""" select setval(pg_get_serial_sequence('{table}', '{column}'), (select max({column}) from {table}) ); """ connection.execute(sql)
def refresh_materialized_view(connection: Connection, view: Table): """Execute :sql:`REFRESH MATERIALIZED VIEW CONCURRENTLY` for the given `view`. :param connection: A valid SQLAlchemy connection :param view: The view to refresh """ logger.debug('Refreshing materialized view "%s"', view.name) preparer = connection.dialect.identifier_preparer connection.execute('REFRESH MATERIALIZED VIEW CONCURRENTLY {view}'.format( view=preparer.format_table(view)))
def lock_table(connection: Connection, target_table: Table): """ Lock a table using a PostgreSQL advisory lock The OID of the table in the pg_class relation is used as lock id. :param connection: DB connection :param target_table: Table object """ logger.debug('Locking table "%s"', target_table.name) oid = connection.execute( select([column("oid")]).select_from(table("pg_class")).where( (column("relname") == target_table.name))).scalar() connection.execute(select([func.pg_advisory_xact_lock(oid)])).scalar()
def create_user(connection: Connection, username: str, password: str, firstname: str, lastname: str, age: int, city: str, sex: str): query = text(""" insert into user (username, password, firstname, lastname, age, city, sex) values (:username, :password, :firstname, :lastname, :age, :city, :sex) """) connection.execute(query, username=username, password=password, firstname=firstname, lastname=lastname, age=age, city=city, sex=sex)
def insert(conn: Connection, hash_: str, type_: EntityTypeId, name: str) -> Optional[Entity]: """Insert new enitity, if the hash doesn't already exist. No-op if exists""" conn.execute( """ INSERT INTO Entity (Hash, Type, Name) SELECT :hash, :type, :name WHERE NOT EXISTS (SELECT * FROM Entity WHERE Hash = :hash) """, hash=hash_, type=type_, name=name, ) return get_by_hash(conn, hash_)
def insert_assignment( connection: Connection, ss_identifier: str, source_system: str, identifier: int, section_identifier: int, ): connection.execute( f""" insert into lms.assignment (assignmentidentifier ,sourcesystemidentifier ,sourcesystem ,lmssectionidentifier ,title ,assignmentcategory ,assignmentdescription ,startdatetime ,enddatetime ,duedatetime ,maxpoints ,sourcecreatedate ,sourcelastmodifieddate ,createdate ,lastmodifieddate ,deletedat) overriding system value values ({identifier} ,'{ss_identifier}' ,'{source_system}' ,'{section_identifier}' ,'{ss_identifier}' ,'online_upload' ,'{ss_identifier}' ,'2021-01-01 00:00:00' ,'2021-01-01 00:00:00' ,'2021-01-01 00:00:00' ,100 ,null ,null ,'2021-01-01 00:00:00' ,'2021-01-01 00:00:00' ,null ) """ ) reset_identity_sequence_number(connection, "lms.assignment", "assignmentidentifier")
def get_all_dhcp_leases( dhcp_lease_table: Table, connection: Connection, subnet: Optional[netaddr.IPNetwork] = None, limit: Optional[int] = None, ) -> Iterator[Tuple[datetime, netaddr.EUI, netaddr.IPAddress, Optional[str], Optional[str], ]]: """ Return all dhcp leases. :param dhcp_lease_table: DHCP lease table :param connection: A SQLAlchemy connection :param subnet: Limit leases to subnet :param limit: Maximum number of leases :return: An iterator that yields (Expires-At, MAC, IP-Address, Hostname, Client-ID)-tuples """ query = select([ dhcp_lease_table.c.ExpiresAt, dhcp_lease_table.c.MAC, dhcp_lease_table.c.IPAddress, dhcp_lease_table.c.Hostname, dhcp_lease_table.c.ClientID, ]).order_by(dhcp_lease_table.c.IPAddress.asc()) if subnet is not None: query = query.where(dhcp_lease_table.c.IPAddress.op('<<=') <= subnet) if limit is not None: query = query.limit(limit) return iter(connection.execute(query))
def get_sessions_of_mac( connection: Connection, mac: netaddr.EUI, when: Optional[DatetimeRange] = None, limit: Optional[int] = None, ) -> Iterator[Tuple[netaddr.IPAddress, str, datetime, datetime]]: """ Return accounting sessions of a particular MAC address ordered by Session-Start-Time descending. :param connection: A SQLAlchemy connection :param str mac: MAC address :param when: Range in which Session-Start-Time must be within :param limit: Maximum number of records :return: An iterator that yields (NAS-IP-Address, NAS-Port-Id, Session-Start-Time, Session-Stop-Time)-tuples ordered by Session-Start-Time descending """ logger.debug('Getting all sessions for MAC "%s"', mac) query = (select([ radacct.c.NASIPAddress, radacct.c.NASPortId, radacct.c.AcctStartTime, radacct.c.AcctStopTime ]).where(and_(radacct.c.UserName == mac)).order_by( radacct.c.AcctStartTime.desc())) if when is not None: query.where(radacct.c.AcctStartTime.op('<@') <= func.tstzrange(*when)) if limit is not None: query = query.limit(limit) return iter(connection.execute(query))
def get_user_interests(connection: Connection, user_id: int) -> List[Tuple[str]]: query = text(""" select interest.name from interest join user_to_interest on interest.id = user_to_interest.interest where user_to_interest.user = :user_id """) return connection.execute(query, user_id=user_id).fetchall()
def get_auth_attempts_of_mac( connection: Connection, mac: netaddr.EUI, when: Optional[DatetimeRange] = None, limit: Optional[int] = None, ) -> Iterator[Tuple[netaddr.IPAddress, str, str, Groups, Attributes, datetime]]: """ Return auth attempts of a particular MAC address order by Auth-Date descending. :param connection: A SQLAlchemy connection :param mac: MAC address :param when: Range in which Auth-Date must be within :param limit: Maximum number of records :return: An iterator that yields (NAS-IP-Address, NAS-Port-Id, Packet-Type, Groups, Reply, Auth-Date)-tuples ordered by Auth-Date descending """ logger.debug('Getting all auth attempts of MAC %s', mac) query = (select([ radpostauth.c.NASIPAddress, radpostauth.c.NASPortId, radpostauth.c.PacketType, radpostauth.c.Groups, radpostauth.c.Reply, radpostauth.c.AuthDate ]).where(and_(radpostauth.c.UserName == mac)).order_by( radpostauth.c.AuthDate.desc())) if when is not None: query.where(radpostauth.c.AuthDate.op('<@') <= func.tstzrange(*when)) if limit is not None: query = query.limit(limit) return iter(connection.execute(query))
def get_auth_attempts_at_port( connection: Connection, nas_ip_address: netaddr.IPAddress, nas_port_id: str, when: Optional[DatetimeRange] = None, limit: Optional[int] = None, ) -> Iterator[Tuple[str, str, Groups, Attributes, datetime]]: """ Return auth attempts at a particular port of an NAS ordered by Auth-Date descending. :param connection: A SQLAlchemy connection :param nas_ip_address: NAS IP address :param nas_port_id: NAS Port ID :param when: Range in which Auth-Date must be within :param limit: Maximum number of records :return: An iterator that yields (User-Name, Packet-Type, Groups, Reply, Auth-Date)-tuples ordered by Auth-Date descending """ logger.debug('Getting all auth attempts at port %2$s of %1$s', nas_ip_address, nas_port_id) query = (select([ radpostauth.c.UserName, radpostauth.c.PacketType, radpostauth.c.Groups, radpostauth.c.Reply, radpostauth.c.AuthDate ]).where( and_(radpostauth.c.NASIPAddress == nas_ip_address, radpostauth.c.NASPortId == nas_port_id)).order_by( radpostauth.c.AuthDate.desc())) if when is not None: query.where(radpostauth.c.AuthDate.op('<@') <= func.tstzrange(*when)) if limit is not None: query = query.limit(limit) return iter(connection.execute(query))
def _get_server_version_info(self, connection: Connection) -> Tuple[int, ...]: query = 'SELECT version()' res = connection.execute(sql.text(query)).scalar() match = self.__version_pattern.match(res) version = int(match.group(1)) if match else 0 return tuple([version])
def _get_columns(self, connection: Connection, table_name: str, schema: str = None, **kw) -> List[Dict[str, Any]]: schema = schema or self._get_default_schema_name(connection) query = dedent( """ SELECT "column_name", "data_type", "column_default", UPPER("is_nullable") AS "is_nullable" FROM "information_schema"."columns" WHERE "table_schema" = :schema AND "table_name" = :table ORDER BY "ordinal_position" ASC """ ).strip() res = connection.execute(sql.text(query), schema=schema, table=table_name) columns = [] for record in res: column = dict( name=record.column_name, type=datatype.parse_sqltype(record.data_type), nullable=record.is_nullable == "YES", default=record.column_default, ) columns.append(column) return columns
def get_user_friends(connection: Connection, user_id: int) -> List[Tuple[int, str, str]]: query = text(""" select user.id, user.firstname, user.lastname from friends join user on friends.user_two = user.id where friends.user_one = :user_id """) return connection.execute(query, user_id=user_id).fetchall()
def get_user(connection: Connection, username: Optional[str] = None, user_id: Optional[int] = None) -> Tuple: query = text(f""" select * from user where {'username = :username' if username else 'id = :user_id'} """) return connection.execute(query, username=username, user_id=user_id).fetchone()
def db(connection: Connection) -> Generator: transaction = connection.begin() session = TestSession(bind=connection) try: yield session finally: transaction.rollback() session.close()
def get_table_names(self, connection: Connection, schema: str = None, **kw) -> List[str]: query = 'SHOW TABLES' if schema: query = f'{query} FROM {self.identifier_preparer.quote_identifier(schema)}' res = connection.execute(sql.text(query)) return [row.Table for row in res]
def _get_server_version_info(self, connection: Connection) -> Any: query = "SELECT version()" try: res = connection.execute(sql.text(query)) version = res.scalar() return tuple([version]) except TrinoUserError as e: logger.debug(f"Failed to get server version: {e.message}") return None
def get_schema_names(self, connection: Connection, **kw) -> List[str]: query = dedent( """ SELECT "schema_name" FROM "information_schema"."schemata" """ ).strip() res = connection.execute(sql.text(query)) return [row.schema_name for row in res]
def unique_sounds(conn: Connection, corpus_id: int) -> [str]: sql = """ select distinct rhyme_sound from phrases p join corpora_sources cs on p.source_id = cs.source_id where corpus_id = :corpus_id """ result = conn.execute(sa.text(sql).params(corpus_id=corpus_id)).fetchall() return list(filter(lambda r: r is not None, map(lambda r: r[0], result)))
def get_users_by_name(connection: Connection, firstname: str, lastname: str) -> List[Tuple]: query = text(""" select firstname, lastname from user where firstname like :firstname and lastname like :lastname order by id """) return connection.execute(query, firstname=f"{firstname}%", lastname=f"{lastname}%").fetchall()
def insert_user_section_association( connection: Connection, ss_identifier: str, source_system: str, identifier: int, user_identifier: int, section_identifier: int, ): connection.execute( f""" insert into lms.lmsuserlmssectionassociation (lmsuserlmssectionassociationidentifier ,lmssectionidentifier ,lmsuseridentifier ,sourcesystemidentifier ,sourcesystem ,enrollmentstatus ,sourcecreatedate ,sourcelastmodifieddate ,createdate ,lastmodifieddate ,deletedat) overriding system value values ({identifier} ,'{section_identifier}' ,'{user_identifier}' ,'{ss_identifier}' ,'{source_system}' ,'active' ,null ,null ,'2021-01-01 00:00:00' ,'2021-01-01 00:00:00' ,null ) """ ) reset_identity_sequence_number( connection, "lms.lmsuserlmssectionassociation", "lmsuserlmssectionassociationidentifier", )
def has_schema(self, connection: Connection, schema: str) -> bool: query = dedent( """ SELECT "schema_name" FROM "information_schema"."schemata" WHERE "schema_name" = :schema """ ).strip() res = connection.execute(sql.text(query), schema=schema) return res.first() is not None
def aggregate(self): print "AGGREGATING...after ", self.delay t = text( """ SELECT u.service_id, u.resource_owner, u.resource_id, u.location, um.metric_id, sum(um.value), strftime('%Y-%m-%d', u.event_time) FROM usage u, usagemetrics um where um.usage_id = u.id group by u.service_id, u.resource_owner, u.resource_id, u.location, strftime('%Y-%m-%d', u.event_time) """, bind=self.engine, ) conn = Connection(self.engine) result = conn.execute(t) Session = sessionmaker(bind=self.engine) session = Session() """ blah = session.query(UsageMetric).group_by( UsageMetric.service_id, UsageMetric.resource_owner, UsageMetric.resource_id, UsageMetric.location, UsageMetric.metric_id, func.day(UsageMetric.event_time)).all() for r in blah: print r """ self.process_results(result, session) conn.close() session.close() self.timer.cancel()