def test_nine(self): x = column("x") self.assert_compile( and_(x == 7, x == 9, false(), x == 5), "false" ) self.assert_compile( ~and_(x == 7, x == 9, false(), x == 5), "true" )
def filter(self, joins): arch_name = self.get_xml_attr('arch', unicode, None) try: arch = Arch.by_name(arch_name) except ValueError: return (joins, false()) osmajor = self.get_xml_attr('osmajor', unicode, None) if not osmajor: return (joins, false()) osminor = self.get_xml_attr('osminor', unicode, None) or None clause = System.compatible_with_distro_tree(arch, osmajor, osminor) return (joins, clause)
def get_query(self): if current_user.is_anonymous: return Plan.query.filter(sql.false()) else: return Plan.query\ .filter(Plan.user_id == current_user.id)\ .order_by(sql.desc(Plan.created_at))
def _apply_filters_to_query(self, query, model, filters, context=None): if filters: for key, value in six.iteritems(filters): column = getattr(model, key, None) # NOTE(kevinbenton): if column is a hybrid property that # references another expression, attempting to convert to # a boolean will fail so we must compare to None. # See "An Important Expression Language Gotcha" in: # docs.sqlalchemy.org/en/rel_0_9/changelog/migration_06.html if column is not None: if not value: query = query.filter(sql.false()) return query if isinstance(column, associationproxy.AssociationProxy): # association proxies don't support in_ so we have to # do multiple equals matches query = query.filter( or_(*[column == v for v in value])) else: query = query.filter(column.in_(value)) elif key == 'shared' and hasattr(model, 'rbac_entries'): # translate a filter on shared into a query against the # object's rbac entries query = query.outerjoin(model.rbac_entries) rbac = model.rbac_entries.property.mapper.class_ matches = [rbac.target_tenant == '*'] if context: matches.append(rbac.target_tenant == context.tenant_id) # any 'access_as_shared' records that match the # wildcard or requesting tenant is_shared = and_(rbac.action == 'access_as_shared', or_(*matches)) if not value[0]: # NOTE(kevinbenton): we need to find objects that don't # have an entry that matches the criteria above so # we use a subquery to exclude them. # We can't just filter the inverse of the query above # because that will still give us a network shared to # our tenant (or wildcard) if it's shared to another # tenant. # This is the column joining the table to rbac via # the object_id. We can't just use model.id because # subnets join on network.id so we have to inspect the # relationship. join_cols = model.rbac_entries.property.local_columns oid_col = list(join_cols)[0] is_shared = ~oid_col.in_( query.session.query(rbac.object_id). filter(is_shared) ) query = query.filter(is_shared) for _nam, hooks in six.iteritems(self._model_query_hooks.get(model, {})): result_filter = hooks.get('result_filters', None) if isinstance(result_filter, six.string_types): result_filter = getattr(self, result_filter, None) if result_filter: query = result_filter(query, filters) return query
def populate_package_groups(packages): """ Adds `visible_groups` field to package objects. It contains a list of PackageGroup objects that are visible to current user - user is on Group's ACL. Global groups are visible to everyone. Ideally, this would be expressed using a SQLA relationship instead, but realtionships don't allow additional inputs (current user). :param packages: object with base_id attribute that allows adding attributes """ base_map = {} for package in packages: package.visible_groups = [] base_map[package.base_id] = package filter_expr = PackageGroup.namespace == None if g.user: filter_expr |= GroupACL.user_id == g.user.id query = ( db.query(PackageGroupRelation) .options(contains_eager(PackageGroupRelation.group)) .filter( PackageGroupRelation.base_id.in_(base_map.keys()) if base_map else false() ) .join(PackageGroup) .filter(filter_expr) .order_by(PackageGroup.namespace, PackageGroup.name) ) if g.user: query = query.outerjoin(GroupACL) for r in query: base_map[r.base_id].visible_groups.append(r.group)
def get_build_task_queue(cls): """ Returns BuildChroots which are - waiting to be built or - older than 2 hours and unfinished """ # todo: filter out build without package query = (models.BuildChroot.query.join(models.Build) .filter(models.Build.canceled == false()) .filter(or_( models.BuildChroot.status == helpers.StatusEnum("pending"), models.BuildChroot.status == helpers.StatusEnum("starting"), and_( # We are moving ended_on to the BuildChroot, now it should be reliable, # so we don't want to reschedule failed chroots # models.BuildChroot.status.in_([ # # Bug 1206562 - Cannot delete Copr because it incorrectly thinks # # there are unfinished builds. Solution: `failed` but unfinished # # (ended_on is null) builds should be rescheduled. # # todo: we need to be sure that correct `failed` set is set together wtih `ended_on` # helpers.StatusEnum("running"), # helpers.StatusEnum("failed") #]), models.BuildChroot.status == helpers.StatusEnum("running"), models.BuildChroot.started_on < int(time.time() - 1.1 * MAX_BUILD_TIMEOUT), models.BuildChroot.ended_on.is_(None) )) )) query = query.order_by(models.BuildChroot.build_id.asc()) return query
def get_permissions_query(model_names, permission_type='read', permission_model=None): """Prepare the query based on the allowed contexts and resources for each of the required objects(models). """ type_queries = [] for model_name in model_names: contexts, resources = query_helpers.get_context_resource( model_name=model_name, permission_type=permission_type, permission_model=permission_model ) if contexts is not None: if resources: resource_sql = and_( MysqlRecordProperty.type == model_name, MysqlRecordProperty.key.in_(resources)) else: resource_sql = false() type_query = or_( and_( MysqlRecordProperty.type == model_name, context_query_filter(MysqlRecordProperty.context_id, contexts) ), resource_sql) type_queries.append(type_query) return and_( MysqlRecordProperty.type.in_(model_names), or_(*type_queries))
def upgrade(): op.create_table( u'channel', sa.Column('id', sa.Integer(), nullable=False, primary_key=True), sa.Column('name', sa.Unicode(64), nullable=False, unique=True, index=True), sa.Column('introduction', sa.Unicode(1024), nullable=True), sa.Column('date_created', sa.DateTime(timezone=True), nullable=False, index=True, server_default=sa.func.current_timestamp()), ) op.create_table( u'article', sa.Column('id', sa.Integer(), nullable=False, primary_key=True), sa.Column('channel_id', sa.Integer(), nullable=False, index=True), sa.Column('is_sticky', sa.Boolean(), server_default=sql.false(), nullable=False), sa.Column('title', sa.Unicode(64), nullable=False, unique=True, index=True), sa.Column('date_published', sa.DateTime(timezone=True), nullable=False, index=True, server_default=sa.func.current_timestamp()), sa.Column('date_created', sa.DateTime(timezone=True), nullable=False, index=True, server_default=sa.func.current_timestamp()), ) op.create_table( u'article_content', sa.Column('id', sa.Integer(), sa.ForeignKey('article.id'), nullable=False, primary_key=True), sa.Column('content', sa.UnicodeText(), nullable=False), )
def has_property(cls, prop, when=None): # TODO Use joins property_granted_select = select( [null()], from_obj=[ Property.__table__, PropertyGroup.__table__, Membership.__table__ ] ).where( and_( Property.name == prop, Property.property_group_id == PropertyGroup.id, PropertyGroup.id == Membership.group_id, Membership.user_id == cls.id, Membership.active(when) ) ) #.cte("property_granted_select") return and_( not_(exists( property_granted_select.where( Property.granted == false()) )), exists( property_granted_select.where( Property.granted == true() ) ) ).self_group().label("has_property_" + prop)
def has_property(self, prop): property_granted_select = select( [null()], from_obj=[ Property.__table__, PropertyGroup.__table__, Membership.__table__ ] ).where( and_( Property.name == prop, Property.property_group_id == PropertyGroup.id, PropertyGroup.id == Membership.group_id, Membership.user_id == self.id, Membership.active ) ) #.cte("property_granted_select") return and_( not_(exists( property_granted_select.where( Property.granted == false()) )), exists( property_granted_select.where( Property.granted == true() ) ) )
def upgrade(): op.add_column( 'gp_application_policy_groups', sa.Column('shared', sa.Boolean, nullable=True, server_default=sql.false()) )
def bounding_box_query(ne_lat, ne_lng, sw_lat, sw_lng, start_date, end_date, fatal, severe, light, inaccurate, show_markers=True, is_thin=False, yield_per=None): # example: # ne_lat=32.36292402647484&ne_lng=35.08873443603511&sw_lat=32.29257266524761&sw_lng=34.88445739746089 # >>> m = Marker.bounding_box_query(32.36, 35.088, 32.292, 34.884) # >>> m.count() # 250 if not show_markers: return Marker.query.filter(sql.false()) accurate = not inaccurate markers = Marker.query \ .filter(Marker.longitude <= ne_lng) \ .filter(Marker.longitude >= sw_lng) \ .filter(Marker.latitude <= ne_lat) \ .filter(Marker.latitude >= sw_lat) \ .filter(Marker.created >= start_date) \ .filter(Marker.created < end_date) \ .order_by(desc(Marker.created)) if yield_per: markers = markers.yield_per(yield_per) if accurate: markers = markers.filter(Marker.locationAccuracy == 1) if not fatal: markers = markers.filter(Marker.severity != 1) if not severe: markers = markers.filter(Marker.severity != 2) if not light: markers = markers.filter(Marker.severity != 3) if is_thin: markers = markers.options(load_only("id", "longitude", "latitude")) return markers
def allForAccount(self, account, order=None, filters={}, onOrAfter=None, before=None): """ Return all transactions for the given account with the applied filters """ transactions = [] with self.session() as session: accountQuery = self.getTransactionsForAccountQuery(session, account) transfersQuery = session.query(self.table_class).join(Transfer).filter(Transfer.account == account) resultQuery = accountQuery.union(transfersQuery) for column in filters: unionQuery = session.query(self.table_class).filter(sql.false()) for value in filters[column]: tempQuery = accountQuery.filter(column==value) unionQuery = unionQuery.union(tempQuery) resultQuery = resultQuery.intersect(unionQuery) if onOrAfter is not None: resultQuery = resultQuery.filter(Transaction.date >= onOrAfter) elif before is not None: resultQuery = resultQuery.filter(Transaction.date < before) if order is None: transactions = resultQuery.order_by(Transaction.date).all() transactions.reverse() else: transactions = resultQuery.order_by(order).all() return transactions# + account.transfers
def upgrade(): op.create_table( 'reservations', sa.Column('id', sa.String(length=36), nullable=False), sa.Column('tenant_id', sa.String(length=255), nullable=True), sa.Column('expiration', sa.DateTime(), nullable=True), sa.PrimaryKeyConstraint('id')) op.create_table( 'resourcedeltas', sa.Column('resource', sa.String(length=255), nullable=False), sa.Column('reservation_id', sa.String(length=36), nullable=False), sa.Column('amount', sa.Integer(), nullable=True), sa.ForeignKeyConstraint(['reservation_id'], ['reservations.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('resource', 'reservation_id')) op.create_table( 'quotausages', sa.Column('tenant_id', sa.String(length=255), nullable=False, primary_key=True, index=True), sa.Column('resource', sa.String(length=255), nullable=False, primary_key=True, index=True), sa.Column('dirty', sa.Boolean(), nullable=False, server_default=sql.false()), sa.Column('in_use', sa.Integer(), nullable=False, server_default='0'), sa.Column('reserved', sa.Integer(), nullable=False, server_default='0'))
def all_children(self,typ): from .objtyp import ObjType if is_undefined(typ): from sqlalchemy.sql import false return ObjType.q.filter(false()) return ObjType.get_mod(typ).q.filter_by(parent=self)
def _array_union(queries): """ Union of all valid queries in array """ clean_queries = [q for q in queries if q] if not clean_queries: return db.session.query(Relationship.source_id).filter(sql.false()) query = clean_queries.pop() return query.union(*clean_queries)
def _get_min_service_version(self, context, binary): meta = MetaData(bind=db_session.get_engine(context=context)) services = Table('services', meta, autoload=True) return select([sqlfunc.min(services.c.version)]).select_from( services).where(and_( services.c.binary == binary, services.c.deleted == 0, services.c.forced_down == false())).scalar()
def get_query(self): if current_user.is_anonymous: return News.query.filter(sql.false()) else: return News.query\ .join(Schedule)\ .filter(Schedule.owner_id == current_user.id)\ .order_by(sql.desc(News.created))
def _get_by_disabled_from_db(context, disabled): if disabled: return context.session.query(api_models.CellMapping).filter_by( disabled=true()).order_by(asc(api_models.CellMapping.id)).all() else: return context.session.query(api_models.CellMapping).filter_by( disabled=false()).order_by(asc( api_models.CellMapping.id)).all()
def test_true_false(self): self.assert_compile( sql.false(), "0" ) self.assert_compile( sql.true(), "1" )
def reschedule_routers_from_down_agents(self): """Reschedule routers from down l3 agents if admin state is up.""" # give agents extra time to handle transient failures agent_dead_limit = cfg.CONF.agent_down_time * 2 # check for an abrupt clock change since last check. if a change is # detected, sleep for a while to let the agents check in. tdelta = timeutils.utcnow() - getattr(self, "_clock_jump_canary", timeutils.utcnow()) if timeutils.total_seconds(tdelta) > cfg.CONF.agent_down_time: LOG.warn( _LW( "Time since last L3 agent reschedule check has " "exceeded the interval between checks. Waiting " "before check to allow agents to send a heartbeat " "in case there was a clock adjustment." ) ) time.sleep(agent_dead_limit) self._clock_jump_canary = timeutils.utcnow() context = n_ctx.get_admin_context() cutoff = timeutils.utcnow() - datetime.timedelta(seconds=agent_dead_limit) down_bindings = ( context.session.query(RouterL3AgentBinding) .join(agents_db.Agent) .filter(agents_db.Agent.heartbeat_timestamp < cutoff, agents_db.Agent.admin_state_up) .outerjoin( l3_attrs_db.RouterExtraAttributes, l3_attrs_db.RouterExtraAttributes.router_id == RouterL3AgentBinding.router_id, ) .filter( sa.or_( l3_attrs_db.RouterExtraAttributes.ha == sql.false(), l3_attrs_db.RouterExtraAttributes.ha == sql.null(), ) ) ) try: for binding in down_bindings: LOG.warn( _LW( "Rescheduling router %(router)s from agent %(agent)s " "because the agent did not report to the server in " "the last %(dead_time)s seconds." ), {"router": binding.router_id, "agent": binding.l3_agent_id, "dead_time": agent_dead_limit}, ) try: self.reschedule_router(context, binding.router_id) except (l3agentscheduler.RouterReschedulingFailed, n_rpc.RemoteError): # Catch individual router rescheduling errors here # so one broken one doesn't stop the iteration. LOG.exception(_LE("Failed to reschedule router %s"), binding.router_id) except db_exc.DBError: # Catch DB errors here so a transient DB connectivity issue # doesn't stop the loopingcall. LOG.exception(_LE("Exception encountered during router " "rescheduling."))
def _extend_query_with_workspace_filter(self, query): if is_within_workspace(self.context): userids = list(get_workspace_user_ids(self.context)) if userids: query = query.filter(User.userid.in_(userids)) else: # Avoid filter for a empty list. query = query.filter(sql.false()) return query
def get_build_importing_queue(cls): """ Returns BuildChroots which are waiting to be uploaded to dist git """ query = (models.BuildChroot.query.join(models.Build) .filter(models.Build.canceled == false()) .filter(models.BuildChroot.status == helpers.StatusEnum("importing"))) query = query.order_by(models.BuildChroot.build_id.asc()) return query
def domain_query_from_term(term): '''Recursively generate an SQL query from the search terms''' if term.kind == 'expression': if term.category in DOMAIN_QUERIES: return DOMAIN_QUERIES[term.category](term.term) else: return AsDomain.query.filter(sql.false()) elif term.kind == 'operation': left_query = domain_query_from_term(term.left) right_query = domain_query_from_term(term.right) if term.operation == 'except': return left_query.except_(right_query) elif term.operation == 'or': return left_query.union(right_query) elif term.operation == 'and': return left_query.intersect(right_query) return AsDomain.query.filter(sql.false())
def reschedule_routers_from_down_agents(self): """Reschedule routers from down l3 agents if admin state is up.""" agent_dead_limit = self.agent_dead_limit_seconds() self.wait_down_agents('L3', agent_dead_limit) cutoff = self.get_cutoff_time(agent_dead_limit) context = n_ctx.get_admin_context() down_bindings = ( context.session.query(RouterL3AgentBinding). join(agents_db.Agent). filter(agents_db.Agent.heartbeat_timestamp < cutoff, agents_db.Agent.admin_state_up). outerjoin(l3_attrs_db.RouterExtraAttributes, l3_attrs_db.RouterExtraAttributes.router_id == RouterL3AgentBinding.router_id). filter(sa.or_(l3_attrs_db.RouterExtraAttributes.ha == sql.false(), l3_attrs_db.RouterExtraAttributes.ha == sql.null()))) try: agents_back_online = set() for binding in down_bindings: if binding.l3_agent_id in agents_back_online: continue else: agent = self._get_agent(context, binding.l3_agent_id) if agent.is_active: agents_back_online.add(binding.l3_agent_id) continue agent_mode = self._get_agent_mode(binding.l3_agent) if agent_mode == constants.L3_AGENT_MODE_DVR: # rescheduling from l3 dvr agent on compute node doesn't # make sense. Router will be removed from that agent once # there are no dvr serviceable ports on that compute node LOG.warn(_LW('L3 DVR agent on node %(host)s is down. ' 'Not rescheduling from agent in \'dvr\' ' 'mode.'), {'host': binding.l3_agent.host}) continue LOG.warn(_LW( "Rescheduling router %(router)s from agent %(agent)s " "because the agent did not report to the server in " "the last %(dead_time)s seconds."), {'router': binding.router_id, 'agent': binding.l3_agent_id, 'dead_time': agent_dead_limit}) try: self.reschedule_router(context, binding.router_id) except (l3agentscheduler.RouterReschedulingFailed, oslo_messaging.RemoteError): # Catch individual router rescheduling errors here # so one broken one doesn't stop the iteration. LOG.exception(_LE("Failed to reschedule router %s"), binding.router_id) except Exception: # we want to be thorough and catch whatever is raised # to avoid loop abortion LOG.exception(_LE("Exception encountered during router " "rescheduling."))
def process_segment_(): with closing(database.postgresql['session']()) as session: process_segment( session.query( models.segment ).filter( models.segment.status == false(), ).order_by(func.random()).first().id )
def _apply_filters_to_query(self, query, model, filters, context=None): if isinstance(model, UnionModel): # NOTE(kevinbenton): a unionmodel is made up of multiple tables so # we apply the filter to each table for component_model in model.model_map.values(): query = self._apply_filters_to_query(query, component_model, filters, context) return query if filters: for key, value in six.iteritems(filters): column = getattr(model, key, None) # NOTE(kevinbenton): if column is a hybrid property that # references another expression, attempting to convert to # a boolean will fail so we must compare to None. # See "An Important Expression Language Gotcha" in: # docs.sqlalchemy.org/en/rel_0_9/changelog/migration_06.html if column is not None: if not value: query = query.filter(sql.false()) return query query = query.filter(column.in_(value)) elif key == 'shared' and hasattr(model, 'rbac_entries'): # translate a filter on shared into a query against the # object's rbac entries rbac, join_params, oid_col = self._get_rbac_query_params( model) query = query.outerjoin(*join_params, aliased=True) matches = [rbac.target_tenant == '*'] if context: matches.append(rbac.target_tenant == context.tenant_id) # any 'access_as_shared' records that match the # wildcard or requesting tenant is_shared = and_(rbac.action == 'access_as_shared', or_(*matches)) if not value[0]: # NOTE(kevinbenton): we need to find objects that don't # have an entry that matches the criteria above so # we use a subquery to exclude them. # We can't just filter the inverse of the query above # because that will still give us a network shared to # our tenant (or wildcard) if it's shared to another # tenant. is_shared = ~oid_col.in_( query.session.query(rbac.object_id). filter(is_shared) ) query = query.filter(is_shared) for _nam, hooks in six.iteritems(self._model_query_hooks.get(model, {})): result_filter = hooks.get('result_filters', None) if isinstance(result_filter, six.string_types): result_filter = getattr(self, result_filter, None) if result_filter: query = result_filter(query, filters) return query
def bounding_box_query(ne_lat, ne_lng, sw_lat, sw_lng, show_discussions): if not show_discussions: return Marker.query.filter(sql.false()) markers = DiscussionMarker.query \ .filter(DiscussionMarker.longitude <= ne_lng) \ .filter(DiscussionMarker.longitude >= sw_lng) \ .filter(DiscussionMarker.latitude <= ne_lat) \ .filter(DiscussionMarker.latitude >= sw_lat) \ .order_by(desc(DiscussionMarker.created)) return markers
def test_boolean_default(self): t = Table("t", self.metadata, Column("x", Boolean, server_default=sql.false())) t.create(testing.db) testing.db.execute(t.insert()) testing.db.execute(t.insert().values(x=True)) eq_( testing.db.execute(t.select().order_by(t.c.x)).fetchall(), [(False,), (True,)] )
def reschedule_routers_from_down_agents(self): """Reschedule routers from down l3 agents if admin state is up.""" agent_dead_limit = self.agent_dead_limit_seconds() self.wait_down_agents('L3', agent_dead_limit) cutoff = self.get_cutoff_time(agent_dead_limit) context = n_ctx.get_admin_context() try: down_bindings = ( context.session.query(RouterL3AgentBinding). join(agents_db.Agent). filter(agents_db.Agent.heartbeat_timestamp < cutoff, agents_db.Agent.admin_state_up). outerjoin(l3_attrs_db.RouterExtraAttributes, l3_attrs_db.RouterExtraAttributes.router_id == RouterL3AgentBinding.router_id). filter(sa.or_(l3_attrs_db.RouterExtraAttributes.ha == sql.false(), l3_attrs_db.RouterExtraAttributes.ha == sql.null()))) agents_back_online = set() for binding in down_bindings: if binding.l3_agent_id in agents_back_online: continue else: # we need new context to make sure we use different DB # transaction - otherwise we may fetch same agent record # each time due to REPEATABLE_READ isolation level context = n_ctx.get_admin_context() agent = self._get_agent(context, binding.l3_agent_id) if agent.is_active: agents_back_online.add(binding.l3_agent_id) continue LOG.warning(_LW( "Rescheduling router %(router)s from agent %(agent)s " "because the agent did not report to the server in " "the last %(dead_time)s seconds."), {'router': binding.router_id, 'agent': binding.l3_agent_id, 'dead_time': agent_dead_limit}) try: self.reschedule_router(context, binding.router_id) except (l3agentscheduler.RouterReschedulingFailed, oslo_messaging.RemoteError): # Catch individual router rescheduling errors here # so one broken one doesn't stop the iteration. LOG.exception(_LE("Failed to reschedule router %s"), binding.router_id) except Exception: # we want to be thorough and catch whatever is raised # to avoid loop abortion LOG.exception(_LE("Exception encountered during router " "rescheduling."))
class Key(Object): """Ein Schlüssel.""" keylist_id = Column( Integer, ForeignKey(Keylist.id, ondelete='cascade'), nullable=False) keylist = sqlalchemy.orm.relation( 'Keylist', uselist=False, backref='keys', cascade='all') serial = Column(String) member_id = Column(Integer, ForeignKey(Member.id), nullable=True) member = sqlalchemy.orm.relation('Member', uselist=False, backref='keys') rent = Column(DateTime, nullable=True) note = Column(String) lost = Column(Boolean, default=sql.false())
class NsxExtendedSecurityGroupProperties(model_base.BASEV2): __tablename__ = 'nsx_extended_security_group_properties' security_group_id = sa.Column(sa.String(36), sa.ForeignKey('securitygroups.id', ondelete="CASCADE"), primary_key=True) logging = sa.Column(sa.Boolean, default=False, nullable=False) provider = sa.Column(sa.Boolean, default=False, server_default=sql.false(), nullable=False) policy = sa.Column(sa.String(36)) security_group = orm.relationship( securitygroups_db.SecurityGroup, backref=orm.backref('ext_properties', lazy='joined', uselist=False, cascade='delete'))
class Ulinc_campaign(Base): __tablename__ = 'ulinc_campaign' unassigned_ulinc_campaign_id = '943c18f3-74c8-45cf-a396-1ddc89c6b9d2' def __init__(self, ulinc_campaign_id, client_id, janium_campaign_id, ulinc_campaign_name, ulinc_is_active, ulinc_ulinc_campaign_id, ulinc_is_messenger, ulinc_messenger_origin_message): self.ulinc_campaign_id = ulinc_campaign_id self.client_id = client_id self.janium_campaign_id = janium_campaign_id self.ulinc_campaign_name = ulinc_campaign_name self.ulinc_is_active = ulinc_is_active self.ulinc_ulinc_campaign_id = ulinc_ulinc_campaign_id self.ulinc_is_messenger = ulinc_is_messenger self.ulinc_messenger_origin_message = ulinc_messenger_origin_message # Primary Keys ulinc_campaign_id = Column(String(36), primary_key=True) # Foreign Keys client_id = Column(String(36), ForeignKey('client.client_id'), nullable=False) janium_campaign_id = Column(String(36), ForeignKey('janium_campaign.janium_campaign_id'), nullable=True) # Common Columns ulinc_campaign_name = Column(String(512), nullable=False) ulinc_is_active = Column(Boolean, nullable=False, server_default=false()) ulinc_ulinc_campaign_id = Column(String(20), nullable=False) ulinc_is_messenger = Column(Boolean, nullable=False, server_default=false()) # Table Metadata asOfStartTime = Column(DateTime, server_default=func.now()) asOfEndTime = Column(DateTime, server_default=text("'9999-12-31 10:10:10'")) effective_start_date = Column(DateTime, server_default=func.now()) effective_end_date = Column(DateTime, server_default=text("'9999-12-31 10:10:10'")) updatedBy = Column(String(36), server_default=text("'45279d74-b359-49cd-bb94-d75e06ae64bc'")) # SQLAlchemy Relationships and Backreferences contacts = relationship('Contact', backref=backref('contact_ulinc_campaign', uselist=False), lazy=False)
class QuotaUsage(model_base.BASEV2): """Represents the current usage for a given resource.""" resource = sa.Column(sa.String(255), nullable=False, primary_key=True, index=True) tenant_id = sa.Column(sa.String(attr.TENANT_ID_MAX_LEN), nullable=False, primary_key=True, index=True) dirty = sa.Column(sa.Boolean, nullable=False, server_default=sql.false()) in_use = sa.Column(sa.Integer, nullable=False, server_default="0") reserved = sa.Column(sa.Integer, nullable=False, server_default="0")
class MeteringLabel(model_base.BASEV2, model_base.HasId, model_base.HasProject): name = sa.Column(sa.String(db_const.NAME_FIELD_SIZE)) description = sa.Column(sa.String(db_const.LONG_DESCRIPTION_FIELD_SIZE)) rules = orm.relationship(MeteringLabelRule, backref="label", cascade="delete", lazy="subquery") routers = orm.relationship( l3_models.Router, primaryjoin="MeteringLabel.tenant_id==Router.tenant_id", foreign_keys='MeteringLabel.tenant_id', lazy='subquery', uselist=True) shared = sa.Column(sa.Boolean, default=False, server_default=sql.false())
class ExternalNetwork(model_base.BASEV2): network_id = sa.Column(sa.String(36), sa.ForeignKey('networks.id', ondelete="CASCADE"), primary_key=True) # introduced by auto-allocated-topology extension is_default = sa.Column(sa.Boolean(), nullable=False, server_default=sql.false()) # Add a relationship to the Network model in order to instruct # SQLAlchemy to eagerly load this association network = orm.relationship(models_v2.Network, backref=orm.backref("external", lazy='joined', uselist=False, cascade='delete'))
class SubnetPool(standard_attr.HasStandardAttributes, model_base.BASEV2, model_base.HasId, model_base.HasProject): """Represents a neutron subnet pool. """ name = sa.Column(sa.String(db_const.NAME_FIELD_SIZE)) ip_version = sa.Column(sa.Integer, nullable=False) default_prefixlen = sa.Column(sa.Integer, nullable=False) min_prefixlen = sa.Column(sa.Integer, nullable=False) max_prefixlen = sa.Column(sa.Integer, nullable=False) # TODO(imalinovskiy): drop this field when contract migrations will be # allowed again # NOTE(imalinovskiy): this field cannot be removed from model due to # functional test test_models_sync, trailing underscore is required to # prevent conflicts with RBAC code shared_ = sa.Column("shared", sa.Boolean, nullable=False, server_default=sql.false()) is_default = sa.Column(sa.Boolean, nullable=False, server_default=sql.false()) default_quota = sa.Column(sa.Integer, nullable=True) hash = sa.Column(sa.String(36), nullable=False, server_default='') address_scope_id = sa.Column(sa.String(36), nullable=True) prefixes = orm.relationship(SubnetPoolPrefix, backref='subnetpools', cascade='all, delete, delete-orphan', lazy='subquery') rbac_entries = sa.orm.relationship(rbac_db_models.SubnetPoolRBAC, backref='subnetpools', lazy='subquery', cascade='all, delete, delete-orphan') api_collections = [subnetpool_def.COLLECTION_NAME] collection_resource_map = {subnetpool_def.COLLECTION_NAME: subnetpool_def.RESOURCE_NAME} tag_support = True
def get_down_router_bindings(cls, context, cutoff): query = (context.session.query(l3agent.RouterL3AgentBinding).join( agent_model.Agent).filter( agent_model.Agent.heartbeat_timestamp < cutoff, agent_model.Agent.admin_state_up).outerjoin( l3_attrs.RouterExtraAttributes, l3_attrs.RouterExtraAttributes.router_id == l3agent.RouterL3AgentBinding.router_id).filter( sa.or_( l3_attrs.RouterExtraAttributes.ha == sql.false(), l3_attrs.RouterExtraAttributes.ha == sql.null()))) bindings = [ cls._load_object(context, db_obj) for db_obj in query.all() ] return bindings
def reschedule_routers_from_down_agents(self): """Reschedule routers from down l3 agents if admin state is up.""" agent_dead_limit = self.agent_dead_limit_seconds() self.wait_down_agents('L3', agent_dead_limit) cutoff = self.get_cutoff_time(agent_dead_limit) context = n_ctx.get_admin_context() down_bindings = ( context.session.query(RouterL3AgentBinding). join(agents_db.Agent). filter(agents_db.Agent.heartbeat_timestamp < cutoff, agents_db.Agent.admin_state_up). outerjoin(l3_attrs_db.RouterExtraAttributes, l3_attrs_db.RouterExtraAttributes.router_id == RouterL3AgentBinding.router_id). filter(sa.or_(l3_attrs_db.RouterExtraAttributes.ha == sql.false(), l3_attrs_db.RouterExtraAttributes.ha == sql.null()))) try: for binding in down_bindings: agent_mode = self._get_agent_mode(binding.l3_agent) if agent_mode == constants.L3_AGENT_MODE_DVR: # rescheduling from l3 dvr agent on compute node doesn't # make sense. Router will be removed from that agent once # there are no dvr serviceable ports on that compute node LOG.warn(_LW('L3 DVR agent on node %(host)s is down. ' 'Not rescheduling from agent in \'dvr\' ' 'mode.'), {'host': binding.l3_agent.host}) continue LOG.warn(_LW( "Rescheduling router %(router)s from agent %(agent)s " "because the agent did not report to the server in " "the last %(dead_time)s seconds."), {'router': binding.router_id, 'agent': binding.l3_agent_id, 'dead_time': agent_dead_limit}) try: self.reschedule_router(context, binding.router_id) except (l3agentscheduler.RouterReschedulingFailed, oslo_messaging.RemoteError): # Catch individual router rescheduling errors here # so one broken one doesn't stop the iteration. LOG.exception(_LE("Failed to reschedule router %s"), binding.router_id) except Exception: # we want to be thorough and catch whatever is raised # to avoid loop abortion LOG.exception(_LE("Exception encountered during router " "rescheduling."))
class RoleAssignment(db.Model): __tablename__ = "roleassignment" __table_args__ = ( # CheckConstraint( "(CAST(anonymous AS INTEGER) = 1)" " OR " "((CAST(anonymous AS INTEGER) = 0)" " AND " " ((user_id IS NOT NULL AND group_id IS NULL)" " OR " " (user_id IS NULL AND group_id IS NOT NULL)))", name="roleassignment_ck_user_xor_group", ), # UniqueConstraint( "anonymous", "user_id", "group_id", "role", "object_id", name="assignment_mapped_role_unique", ), ) id = Column(Integer, primary_key=True, autoincrement=True, nullable=False) role = Column(RoleType, index=True, nullable=False) anonymous = Column( "anonymous", Boolean, index=True, nullable=True, default=False, server_default=sql.false(), ) user_id = Column(Integer, ForeignKey("user.id", ondelete="CASCADE"), index=True) user = relationship(User, lazy="joined") group_id = Column(Integer, ForeignKey("group.id", ondelete="CASCADE"), index=True) group = relationship(Group, lazy="joined") object_id = Column(Integer, ForeignKey(Entity.id, ondelete="CASCADE"), index=True) object = relationship(Entity, lazy="select")
def _apply_filters_to_query(self, query, model, filters, context=None): if filters: for key, value in six.iteritems(filters): column = getattr(model, key, None) # NOTE(kevinbenton): if column is a hybrid property that # references another expression, attempting to convert to # a boolean will fail so we must compare to None. # See "An Important Expression Language Gotcha" in: # docs.sqlalchemy.org/en/rel_0_9/changelog/migration_06.html if column is not None: if not value: query = query.filter(sql.false()) return query query = query.filter(column.in_(value)) elif key == 'shared' and hasattr(model, 'rbac_entries'): # translate a filter on shared into a query against the # object's rbac entries rbac, join_params, oid_col = self._get_rbac_query_params( model) query = query.outerjoin(*join_params, aliased=True) matches = [rbac.target_tenant == '*'] if context: matches.append(rbac.target_tenant == context.tenant_id) # any 'access_as_shared' records that match the # wildcard or requesting tenant is_shared = and_(rbac.action == 'access_as_shared', or_(*matches)) if not value[0]: # NOTE(kevinbenton): we need to find objects that don't # have an entry that matches the criteria above so # we use a subquery to exclude them. # We can't just filter the inverse of the query above # because that will still give us a network shared to # our tenant (or wildcard) if it's shared to another # tenant. is_shared = ~oid_col.in_( query.session.query( rbac.object_id).filter(is_shared)) query = query.filter(is_shared) for _nam, hooks in six.iteritems( self._model_query_hooks.get(model, {})): result_filter = hooks.get('result_filters', None) if isinstance(result_filter, six.string_types): result_filter = getattr(self, result_filter, None) if result_filter: query = result_filter(query, filters) return query
def get_ids_related_to(object_type, related_type, related_ids=None): """ get ids of objects Get a list of all ids for object with object_type, that are related to any of the objects with type related_type and id in related_ids """ if isinstance(related_ids, (int, long)): related_ids = [related_ids] if not related_ids: return db.session.query(Relationship.source_id).filter(sql.false()) if (object_type in Types.scoped and related_type in Types.all or related_type in Types.scoped and object_type in Types.all): return _assessment_object_mappings(object_type, related_type, related_ids) if (object_type in Types.parents and related_type in Types.all or related_type in Types.parents and object_type in Types.all): return _parent_object_mappings(object_type, related_type, related_ids) destination_ids = db.session.query(Relationship.destination_id).filter( and_( Relationship.destination_type == object_type, Relationship.source_type == related_type, Relationship.source_id.in_(related_ids), )) source_ids = db.session.query(Relationship.source_id).filter( and_( Relationship.source_type == object_type, Relationship.destination_type == related_type, Relationship.destination_id.in_(related_ids), )) queries = [destination_ids, source_ids] queries.extend( get_extension_mappings(object_type, related_type, related_ids)) queries.extend(get_special_mappings(object_type, related_type, related_ids)) if (object_type in Types.trans_scope and related_type in Types.all or object_type in Types.all and related_type in Types.trans_scope): queries.append( _assessment_object_mappings(object_type, related_type, related_ids)) return _array_union(queries)
def reschedule_routers_from_down_agents(self): """Reschedule routers from down l3 agents if admin state is up.""" agent_dead_limit = self.agent_dead_limit_seconds() self.wait_down_agents('L3', agent_dead_limit) cutoff = self.get_cutoff_time(agent_dead_limit) context = n_ctx.get_admin_context() down_bindings = ( context.session.query(RouterL3AgentBinding). join(agents_db.Agent). filter(agents_db.Agent.heartbeat_timestamp < cutoff, agents_db.Agent.admin_state_up). outerjoin(l3_attrs_db.RouterExtraAttributes, l3_attrs_db.RouterExtraAttributes.router_id == RouterL3AgentBinding.router_id). filter(sa.or_(l3_attrs_db.RouterExtraAttributes.ha == sql.false(), l3_attrs_db.RouterExtraAttributes.ha == sql.null()))) try: agents_back_online = set() for binding in down_bindings: if binding.l3_agent_id in agents_back_online: continue else: agent = self._get_agent(context, binding.l3_agent_id) if agent.is_active: agents_back_online.add(binding.l3_agent_id) continue LOG.warn(_LW( "Rescheduling router %(router)s from agent %(agent)s " "because the agent did not report to the server in " "the last %(dead_time)s seconds."), {'router': binding.router_id, 'agent': binding.l3_agent_id, 'dead_time': agent_dead_limit}) try: self.reschedule_router(context, binding.router_id) except (l3agentscheduler.RouterReschedulingFailed, oslo_messaging.RemoteError): # Catch individual router rescheduling errors here # so one broken one doesn't stop the iteration. LOG.exception(_LE("Failed to reschedule router %s"), binding.router_id) except Exception: # we want to be thorough and catch whatever is raised # to avoid loop abortion LOG.exception(_LE("Exception encountered during router " "rescheduling."))
class N1kvVlanAllocation(model_base.BASEV2): """Represents allocation state of vlan_id on physical network.""" __tablename__ = 'cisco_n1kv_vlan_allocations' physical_network = sa.Column(sa.String(64), nullable=False, primary_key=True) vlan_id = sa.Column(sa.Integer, nullable=False, primary_key=True, autoincrement=False) allocated = sa.Column(sa.Boolean, nullable=False, default=False, server_default=sql.false()) network_profile_id = sa.Column(sa.String(36), sa.ForeignKey('cisco_network_profiles.id', ondelete="CASCADE"), nullable=False)
def upgrade(): op.create_table( 'auto_allocated_topologies', sa.Column('tenant_id', sa.String(length=255), primary_key=True), sa.Column('network_id', sa.String(length=36), nullable=False), sa.Column('router_id', sa.String(length=36), nullable=True), sa.ForeignKeyConstraint(['network_id'], ['networks.id'], ondelete='CASCADE'), sa.ForeignKeyConstraint(['router_id'], ['routers.id'], ondelete='SET NULL'), ) op.add_column('externalnetworks', sa.Column('is_default', sa.Boolean(), nullable=False, server_default=sql.false()))
class Vim(model_base.BASE, models_v1.HasId, models_v1.HasTenant, models_v1.Audit): type = sa.Column(sa.String(64), nullable=False) name = sa.Column(sa.String(255), nullable=False) description = sa.Column(sa.Text, nullable=True) placement_attr = sa.Column(types.Json, nullable=True) shared = sa.Column(sa.Boolean, default=True, server_default=sql.true(), nullable=False) is_default = sa.Column(sa.Boolean, default=False, server_default=sql.false(), nullable=False) vim_auth = orm.relationship('VimAuth') status = sa.Column(sa.String(255), nullable=False)
class MalwareVerdict(db.Model): __tablename__ = "malware_verdicts" run_date = Column(DateTime, nullable=False, server_default=sql.func.now()) check_id = Column( ForeignKey("malware_checks.id", onupdate="CASCADE", ondelete="CASCADE"), nullable=False, index=True, ) # TODO: When GH-4440 is resolved, we should remove these CASCADEs to ensure that an # auditable history of malware check verdicts remain in the event that a # Project, Release, or File is removed by it's maintainers. file_id = Column(ForeignKey("release_files.id", ondelete="CASCADE"), nullable=True) release_id = Column(ForeignKey("releases.id", ondelete="CASCADE"), nullable=True) project_id = Column(ForeignKey("projects.id", ondelete="CASCADE"), nullable=True) classification = Column( Enum(VerdictClassification, values_callable=lambda x: [e.value for e in x]), nullable=False, ) confidence = Column( Enum(VerdictConfidence, values_callable=lambda x: [e.value for e in x]), nullable=False, ) message = Column(Text, nullable=True) details = Column(JSONB, nullable=True) manually_reviewed = Column(Boolean, nullable=False, server_default=sql.false()) reviewer_verdict = Column( Enum(VerdictClassification, values_callable=lambda x: [e.value for e in x]), nullable=True, ) full_report_link = Column(String, nullable=True) check = orm.relationship("MalwareCheck", foreign_keys=[check_id], lazy=True) release_file = orm.relationship("File", foreign_keys=[file_id], lazy=True) release = orm.relationship("Release", foreign_keys=[release_id], lazy=True) project = orm.relationship("Project", foreign_keys=[project_id], lazy=True)
def get_switchport_bindings(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): marker_obj = self._get_marker_obj(context, 'switchport_bindings', limit, marker) query = context.session.query( nuage_models.NuageSwitchportBinding, nuage_models.NuageSwitchportMapping.switch_id, nuage_models.NuageSwitchportMapping.port_id) query = query.outerjoin( nuage_models.NuageSwitchportMapping, nuage_models.NuageSwitchportMapping.port_uuid == nuage_models.NuageSwitchportBinding.switchport_uuid) query = query.distinct() if filters: for key, value in six.iteritems(filters): column = getattr(nuage_models.NuageSwitchportBinding, key, None) if column is None: column = getattr(nuage_models.NuageSwitchportMapping, key, None) if column is not None: if not value: query = query.filter(sql.false()) else: query = query.filter(column.in_(value)) if limit and page_reverse and sorts: sorts = [(s[0], not s[1]) for s in sorts] query = sa_utils.paginate_query(query, nuage_models.NuageSwitchportBinding, limit, sorts, marker_obj=marker_obj) items = [ self._make_switchport_binding_dict_from_tuple(c, fields) for c in query ] if limit and page_reverse: items.reverse() return items
class Classifier(db.ModelBase): __tablename__ = "trove_classifiers" __table_args__ = ( Index("trove_class_class_idx", "classifier"), Index("trove_class_id_idx", "id"), ) __repr__ = make_repr("classifier") id = Column(Integer, primary_key=True, nullable=False) classifier = Column(Text, unique=True) deprecated = Column(Boolean, nullable=False, server_default=sql.false()) l2 = Column(Integer) l3 = Column(Integer) l4 = Column(Integer) l5 = Column(Integer)
def filter_(*permissions, **kwargs): """ Constructs a clause to filter all bearers or targets for a given berarer or target. """ bearer = kwargs['bearer'] target = kwargs.get('target') bearer_cls = type_for(bearer) # We need a query object. There are many ways to get one, Either we can # be passed one, or we can make one from the session. We can either be # passed the session, or we can grab the session from the bearer passed. if 'query' in kwargs: query = kwargs['query'] elif 'session' in kwargs: query = kwargs['session'].query(target) else: query = object_session(bearer).query(target) getter = functools.partial(registry.retrieve, bearer=bearer_cls, target=target) try: # Generate a hash of {rulefn: permission} that we can use later # to collect all of the rules. if len(permissions): rules = {getter(permission=x): x for x in permissions} else: rules = {getter(): None} except KeyError: # No rules defined. Default to no permission. return query.filter(sql.false()) # Invoke all the rules and collect the results # Abusing reduce here to invoke each rule and send the return value (query) # from one rule to the next one. In this way the query becomes # increasingly decorated as it marches through the system. # q == query # r = (rulefn, permission) reducer = lambda q, r: r[0](permission=r[1], query=q, bearer=bearer) return reduce(reducer, six.iteritems(rules), query)
class Classifier(db.ModelBase): __tablename__ = "trove_classifiers" __tableargs__ = CheckConstraint( "classifier not ilike 'private ::%'", name="ck_disallow_private_top_level_classifier", ) __repr__ = make_repr("classifier") id = Column(Integer, primary_key=True, nullable=False) classifier = Column(Text, unique=True) deprecated = Column(Boolean, nullable=False, server_default=sql.false()) l2 = Column(Integer) l3 = Column(Integer) l4 = Column(Integer) l5 = Column(Integer)
def _get_path_nodes_by_filter(self, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): qry = self.admin_context.session.query(PathNode) if filters: for key, value in six.iteritems(filters): column = getattr(PathNode, key, None) if column: if not value: qry = qry.filter(sql.false()) else: qry = qry.filter(column == value) return qry
class GeneveAllocation(model_base.BASEV2): __tablename__ = 'ml2_geneve_allocations' geneve_vni = sa.Column(sa.Integer, nullable=False, primary_key=True, autoincrement=False) allocated = sa.Column(sa.Boolean, nullable=False, default=False, server_default=sql.false(), index=True) @classmethod def get_segmentation_id(cls): return cls.geneve_vni
def get_build_task(cls): query = (models.BuildChroot.query.join(models.Build) .filter(models.Build.canceled == false()) .filter(or_( models.BuildChroot.status == helpers.StatusEnum("pending"), and_( models.BuildChroot.status == helpers.StatusEnum("running"), models.BuildChroot.started_on < int(time.time() - 1.1 * MAX_BUILD_TIMEOUT), models.BuildChroot.ended_on.is_(None) ) )) .filter(or_( models.BuildChroot.last_deferred.is_(None), models.BuildChroot.last_deferred < int(time.time() - DEFER_BUILD_SECONDS) )) ).order_by(models.Build.is_background.asc(), models.BuildChroot.build_id.asc()) return query.first()
class OAuthClient(Base): __tablename__ = 'oauth_client' __table_args__ = {'schema': 'bookbrainz'} client_id = Column(UUID(as_uuid=True), primary_key=True, server_default=text('public.uuid_generate_v4()')) client_secret = Column(UUID(as_uuid=True), unique=True, index=True, nullable=False, server_default=text('public.uuid_generate_v4()')) is_confidential = Column(Boolean, nullable=False, server_default=sql.false()) _redirect_uris = Column(UnicodeText, nullable=False, server_default='') _default_scopes = Column(UnicodeText, nullable=False, server_default='') # creator of the client, not required owner_id = Column(Integer, ForeignKey('bookbrainz.user.user_id', deferrable=True), nullable=False) @property def client_type(self): if self.is_confidential: return 'confidential' return 'public' @property def redirect_uris(self): if self._redirect_uris: return self._redirect_uris.split() return [] @property def default_redirect_uri(self): return '' @property def default_scopes(self): if self._default_scopes: return self._default_scopes.split() return []
class RelationshipType(Base): __tablename__ = 'rel_type' __table_args__ = {'schema': 'bookbrainz'} relationship_type_id = Column(Integer, primary_key=True) label = Column(Unicode(255), nullable=False, unique=True) parent_id = Column( Integer, ForeignKey('bookbrainz.rel_type.relationship_type_id', deferrable=True)) child_order = Column(Integer, nullable=False, server_default=text('0')) description = Column(UnicodeText, nullable=False) template = Column(UnicodeText, nullable=False) deprecated = Column(Boolean, nullable=False, server_default=sql.false())
def _apply_filters_to_query(self, query, model, filters): if filters: for key, value in six.iteritems(filters): column = getattr(model, key, None) if column: if not value: query = query.filter(sql.false()) return query query = query.filter(column.in_(value)) for _nam, hooks in six.iteritems( self._model_query_hooks.get(model, {})): result_filter = hooks.get('result_filters', None) if isinstance(result_filter, six.string_types): result_filter = getattr(self, result_filter, None) if result_filter: query = result_filter(query, filters) return query
def has_property(self, prop): property_granted_select = select( [null()], from_obj=[ Property.__table__, PropertyGroup.__table__, Membership.__table__ ]).where( and_(Property.name == prop, Property.property_group_id == PropertyGroup.id, PropertyGroup.id == Membership.group_id, Membership.user_id == self.id, Membership.active)) #.cte("property_granted_select") return and_( not_( exists( property_granted_select.where( Property.granted == false()))), exists(property_granted_select.where(Property.granted == true())))
def get_element_filter(self, view, value): field = self.get_field(view) try: value = self.deserialize(field, value) except ValidationError as e: if self._skip_invalid: return sql.false() raise ApiError( 400, *( self.format_validation_error(message) for message, path in iter_validation_errors(e.messages) ), ) return self.get_filter_clause(view, value)
def get_waiting(cls): """ Return builds that aren't both started and finished (if build start submission fails, we still want to mark the build as non-waiting, if it ended) this has very different goal then get_multiple, so implement it alone """ query = (models.Build.query.join(models.Build.copr) .join(models.User).join(models.BuildChroot) .options(db.contains_eager(models.Build.copr)) .options(db.contains_eager("copr.user")) .filter((models.BuildChroot.started_on.is_(None)) | (models.BuildChroot.started_on < int(time.time() - 7200))) .filter(models.BuildChroot.ended_on.is_(None)) .filter(models.Build.canceled == false()) .order_by(models.Build.submitted_on.asc())) return query