def validate_rbac_policy_create(cls, resource, event, trigger, payload=None): context = payload.context policy = payload.request_body db_obj = obj_db_api.get_object( cls, context.elevated(), id=policy['object_id']) if not db_obj["address_scope_id"]: # Nothing to validate return with db_api.CONTEXT_READER.using(context): rbac_as_model = rbac_db_models.AddressScopeRBAC # Ensure that target project has access to AS shared_to_target_project_or_to_all = ( sa.and_( rbac_as_model.target_project.in_( ["*", policy['target_project']] ), rbac_as_model.object_id == db_obj["address_scope_id"] ) ) matching_policies = model_query.query_with_hooks( context, rbac_db_models.AddressScopeRBAC ).filter(shared_to_target_project_or_to_all).count() if matching_policies == 0: raise ext_rbac.RbacPolicyInitError( object_id=policy['object_id'], reason=_("target project doesn't have access to " "associated address scope."))
def _get_resource(self, context, model, id, for_update=False): resource = None try: if for_update: # To lock the instance for update, return a single # instance, instead of an instance with LEFT OUTER # JOINs that do not work in PostgreSQL query = model_query.query_with_hooks(context, model).options( lazyload('*') ).filter( model.id == id).with_lockmode('update') resource = query.one() else: resource = model_query.get_by_id(context, model, id) except exc.NoResultFound: with excutils.save_and_reraise_exception(reraise=False) as ctx: if issubclass(model, (models.LoadBalancer, models.Listener, models.L7Policy, models.L7Rule, models.PoolV2, models.MemberV2, models.HealthMonitorV2, models.LoadBalancerStatistics, models.SessionPersistenceV2)): raise loadbalancerv2.EntityNotFound(name=model.NAME, id=id) ctx.reraise = True return resource
def update_status(self, context, model, id, provisioning_status=None, operating_status=None): with context.session.begin(subtransactions=True): if issubclass(model, models.LoadBalancer): try: model_db = (model_query.query_with_hooks( context, model).filter(model.id == id).options( orm.noload('vip_port')).one()) except exc.NoResultFound: raise loadbalancerv2.EntityNotFound( name=models.LoadBalancer.NAME, id=id) else: model_db = self._get_resource(context, model, id) if provisioning_status and (model_db.provisioning_status != provisioning_status): model_db.provisioning_status = provisioning_status LOG.debug("Provisioning status of %s (id=%s) updated to: %s", model_db.NAME, model_db.id, provisioning_status) if (operating_status and hasattr(model_db, 'operating_status') and model_db.operating_status != operating_status): model_db.operating_status = operating_status LOG.debug("Operating status of %s (id=%s) updated to: %s", model_db.NAME, model_db.id, operating_status)
def _get_chain_id_by_group_id(self, context, sfc_plugin, portpairgroup_id): chain_group_assoc = model_query.query_with_hooks( context, sfc_db.ChainGroupAssoc).filter_by( portpairgroup_id=portpairgroup_id).first() if chain_group_assoc: return chain_group_assoc['portchain_id'] return None
def _get_tap_id_association(self, context, tap_service_id): try: query = model_query.query_with_hooks(context, TapIdAssociation) return query.filter(TapIdAssociation.tap_service_id == tap_service_id).one() except exc.NoResultFound: raise taas.TapServiceNotFound(tap_id=tap_service_id)
def delete_bgp_speaker_router_insertion(self, context, bsp_id): with db_api.CONTEXT_WRITER.using(context): query = model_query.query_with_hooks( context, model.BgpSpeakerRouterAssociation) query.filter( model.BgpSpeakerRouterAssociation.bgp_speaker_id == bsp_id).delete()
def _validate_flow_classifiers(self, context, fc_ids, pc_id=None): with db_api.CONTEXT_READER.using(context): fcs = [ self._get_flow_classifier(context, fc_id) for fc_id in fc_ids ] for fc in fcs: fc_assoc = fc.chain_classifier_association if fc_assoc and fc_assoc['portchain_id'] != pc_id: raise ext_fc.FlowClassifierInUse(id=fc.id) query = model_query.query_with_hooks(context, PortChain) for port_chain_db in query.all(): if port_chain_db['id'] == pc_id: continue pc_fc_ids = [ assoc['flowclassifier_id'] for assoc in port_chain_db.chain_classifier_associations ] pc_fcs = [ self._get_flow_classifier(context, pc_fc_id) for pc_fc_id in pc_fc_ids ] for pc_fc in pc_fcs: for fc in fcs: fc_cls = fc_db.FlowClassifierDbPlugin if fc_cls.flowclassifier_basic_conflict( pc_fc, fc ): raise ext_sfc.PortChainFlowClassifierInConflict( fc_id=fc['id'], pc_id=port_chain_db['id'], pc_fc_id=pc_fc['id'] )
def _get_tap_id_association(self, context, tap_service_id): try: query = model_query.query_with_hooks(context, TapIdAssociation) return query.filter( TapIdAssociation.tap_service_id == tap_service_id).one() except exc.NoResultFound: raise taas.TapServiceNotFound(tap_id=tap_service_id)
def update_port_detail(self, id, port): with db_api.CONTEXT_WRITER.using(self.admin_context): port_obj = self._get_port_detail(id) for key, value in port.items(): if key == 'path_nodes': pns = [] for pn in value: pn_id = pn['pathnode_id'] self._get_path_node(pn_id) query = model_query.query_with_hooks( self.admin_context, PathPortAssoc) pn_association = query.filter_by( pathnode_id=pn_id, portpair_id=id ).first() if not pn_association: pn_association = PathPortAssoc( pathnode_id=pn_id, portpair_id=id, weight=pn.get('weight', 1) ) pns.append(pn_association) port_obj[key] = pns else: port_obj[key] = value port_obj.update(port) return self._make_port_detail_dict(port_obj)
def _get_chain_id_by_flowclassifier_id(self, context, fc_plugin, flowclassifier_id): chain_classifier_assoc = model_query.query_with_hooks( context, sfc_db.ChainClassifierAssoc).filter_by( flowclassifier_id=flowclassifier_id).first() if chain_classifier_assoc: return chain_classifier_assoc['portchain_id'] return None
def get_fwg_attached_to_port(self, context, port_id): """Return a firewall group ID that is attached to a given port""" fwg_port = model_query.query_with_hooks( context, FirewallGroupPortAssociation).\ filter_by(port_id=port_id).first() if fwg_port: return fwg_port.firewall_group_id return None
def _get_default_fwg_id(self, context, tenant_id): """Returns an id of default firewall group for given tenant or None""" default_fwg = model_query.query_with_hooks( context, FirewallGroup).filter_by( project_id=tenant_id, name=const.DEFAULT_FWG).first() if default_fwg: return default_fwg.id return None
def _get_srv6_encap_rule(self, context, encap_net_id): try: query = model_query.query_with_hooks(context, Srv6EncapRule) return query.filter( Srv6EncapRule.srv6_encap_network_id == encap_net_id).all() except exc.NoResultFound: # TODO(hichihara) pass
def _get_chain_id_by_flowclassifier_id( self, context, fc_plugin, flowclassifier_id): chain_classifier_assoc = model_query.query_with_hooks( context, sfc_db.ChainClassifierAssoc).filter_by( flowclassifier_id=flowclassifier_id).first() if chain_classifier_assoc: return chain_classifier_assoc['portchain_id'] return None
def _delete_port_security_group_bindings(self, context, port_id): with db_api.CONTEXT_WRITER.using(context): query = model_query.query_with_hooks( context, sg_models.SecurityGroupPortBinding) bindings = query.filter( sg_models.SecurityGroupPortBinding.port_id == port_id) for binding in bindings: context.session.delete(binding)
def _get_net_assoc(self, context, assoc_id, bgpvpn_id): try: query = model_query.query_with_hooks(context, BGPVPNNetAssociation) return query.filter( BGPVPNNetAssociation.id == assoc_id, BGPVPNNetAssociation.bgpvpn_id == bgpvpn_id).one() except exc.NoResultFound: raise bgpvpn_ext.BGPVPNNetAssocNotFound(id=assoc_id, bgpvpn_id=bgpvpn_id)
def create_flow_classifier(self, context, flow_classifier): fc = flow_classifier['flow_classifier'] project_id = fc['project_id'] l7_parameters = { key: L7Parameter(key, val) for key, val in fc['l7_parameters'].items()} ethertype = fc['ethertype'] protocol = fc['protocol'] source_port_range_min = fc['source_port_range_min'] source_port_range_max = fc['source_port_range_max'] self._check_port_range_valid(source_port_range_min, source_port_range_max, protocol) destination_port_range_min = fc['destination_port_range_min'] destination_port_range_max = fc['destination_port_range_max'] self._check_port_range_valid(destination_port_range_min, destination_port_range_max, protocol) source_ip_prefix = fc['source_ip_prefix'] self._check_ip_prefix_valid(source_ip_prefix, ethertype) destination_ip_prefix = fc['destination_ip_prefix'] self._check_ip_prefix_valid(destination_ip_prefix, ethertype) logical_source_port = fc['logical_source_port'] logical_destination_port = fc['logical_destination_port'] with db_api.CONTEXT_WRITER.using(context): if logical_source_port is not None: self._get_port(context, logical_source_port) if logical_destination_port is not None: self._get_port(context, logical_destination_port) query = model_query.query_with_hooks(context, FlowClassifier) for flow_classifier_db in query.all(): if self.flowclassifier_conflict( fc, flow_classifier_db ): raise fc_ext.FlowClassifierInConflict( id=flow_classifier_db['id'] ) flow_classifier_db = FlowClassifier( id=uuidutils.generate_uuid(), project_id=project_id, name=fc['name'], description=fc['description'], ethertype=ethertype, protocol=protocol, source_port_range_min=source_port_range_min, source_port_range_max=source_port_range_max, destination_port_range_min=destination_port_range_min, destination_port_range_max=destination_port_range_max, source_ip_prefix=source_ip_prefix, destination_ip_prefix=destination_ip_prefix, logical_source_port=logical_source_port, logical_destination_port=logical_destination_port, l7_parameters=l7_parameters ) context.session.add(flow_classifier_db) return self._make_flow_classifier_dict(flow_classifier_db)
def _get_gateway_device(self, context, id): try: query = model_query.query_with_hooks(context, GatewayDevice) gw_dev_db = query.filter(GatewayDevice.id == id).one() except exc.NoResultFound: raise gw_device_ext.GatewayDeviceNotFound(id=id) return gw_dev_db
def get_mac_learning_state(self, context, port_id): try: query = model_query.query_with_hooks( context, nsx_models.MacLearningState) state = query.filter( nsx_models.MacLearningState.port_id == port_id).one() return state.mac_learning_enabled except exc.NoResultFound: return None
def get_mac_learning_state(self, context, port_id): try: query = model_query.query_with_hooks(context, nsx_models.MacLearningState) state = query.filter( nsx_models.MacLearningState.port_id == port_id).one() return state.mac_learning_enabled except exc.NoResultFound: return None
def _get_tunnel_ip_from_ip_address(self, context, ip): try: query = model_query.query_with_hooks(context, GatewayTunnelIp) gw_tun_ip_db = query.filter( GatewayTunnelIp.tunnel_ip == ip).one() except exc.NoResultFound: pass else: return gw_tun_ip_db
def _update_mac_learning_state(self, context, port_id, enabled): try: query = model_query.query_with_hooks( context, nsx_models.MacLearningState) state = query.filter( nsx_models.MacLearningState.port_id == port_id).one() state.update({mac.MAC_LEARNING: enabled}) except exc.NoResultFound: self._create_mac_learning_state(context, {'id': port_id, mac.MAC_LEARNING: enabled})
def _get_hw_vtep_from_management_ip(self, context, management_ip): try: query = model_query.query_with_hooks(context, GatewayHwVtepDevice) gw_hw_vtep_db = query.filter( GatewayHwVtepDevice.management_ip == management_ip).one() except exc.NoResultFound: pass else: return gw_hw_vtep_db
def _update_mac_learning_state(self, context, port_id, enabled): try: query = model_query.query_with_hooks(context, nsx_models.MacLearningState) state = query.filter( nsx_models.MacLearningState.port_id == port_id).one() state.update({mac.MAC_LEARNING: enabled}) except exc.NoResultFound: self._create_mac_learning_state(context, { 'id': port_id, mac.MAC_LEARNING: enabled })
def _get_gateway_device_from_resource(self, context, resource_type, resource_id): try: device_model = get_type_model_map()[resource_type] query = model_query.query_with_hooks(context, device_model) gw_dev_db = query.filter( device_model.resource_id == resource_id).one() except exc.NoResultFound: pass else: return gw_dev_db
def _get_remote_mac_entry(self, context, id, gateway_device_id): try: query = model_query.query_with_hooks( context, GatewayRemoteMacTable) rmt_db = query.filter(GatewayRemoteMacTable.id == id).one() if rmt_db.device_id != gateway_device_id: raise gw_device_ext.RemoteMacEntryWrongGatewayDevice( id=id, gateway_device_id=gateway_device_id) except exc.NoResultFound: raise gw_device_ext.RemoteMacEntryNotFound(id=id) return rmt_db
def create_flow_classifier(self, context, flow_classifier): fc = flow_classifier['flow_classifier'] project_id = fc['project_id'] l7_parameters = { key: L7Parameter(key, val) for key, val in fc['l7_parameters'].items() } ethertype = fc['ethertype'] protocol = fc['protocol'] source_port_range_min = fc['source_port_range_min'] source_port_range_max = fc['source_port_range_max'] self._check_port_range_valid(source_port_range_min, source_port_range_max, protocol) destination_port_range_min = fc['destination_port_range_min'] destination_port_range_max = fc['destination_port_range_max'] self._check_port_range_valid(destination_port_range_min, destination_port_range_max, protocol) source_ip_prefix = fc['source_ip_prefix'] self._check_ip_prefix_valid(source_ip_prefix, ethertype) destination_ip_prefix = fc['destination_ip_prefix'] self._check_ip_prefix_valid(destination_ip_prefix, ethertype) logical_source_port = fc['logical_source_port'] logical_destination_port = fc['logical_destination_port'] with db_api.CONTEXT_WRITER.using(context): if logical_source_port is not None: self._get_port(context, logical_source_port) if logical_destination_port is not None: self._get_port(context, logical_destination_port) query = model_query.query_with_hooks(context, FlowClassifier) for flow_classifier_db in query.all(): if self.flowclassifier_conflict(fc, flow_classifier_db): raise fc_ext.FlowClassifierInConflict( id=flow_classifier_db['id']) flow_classifier_db = FlowClassifier( id=uuidutils.generate_uuid(), project_id=project_id, name=fc['name'], description=fc['description'], ethertype=ethertype, protocol=protocol, source_port_range_min=source_port_range_min, source_port_range_max=source_port_range_max, destination_port_range_min=destination_port_range_min, destination_port_range_max=destination_port_range_max, source_ip_prefix=source_ip_prefix, destination_ip_prefix=destination_ip_prefix, logical_source_port=logical_source_port, logical_destination_port=logical_destination_port, l7_parameters=l7_parameters) context.session.add(flow_classifier_db) return self._make_flow_classifier_dict(flow_classifier_db)
def get_bgp_speaker_associated_with_router(self, context, router_id): """Gets router associated with a bgp speaker.""" bgp_sp_id = None try: query = model_query.query_with_hooks( context, model.BgpSpeakerRouterAssociation) bsra = query.filter( model.BgpSpeakerRouterAssociation.router_id == router_id).one() bgp_sp_id = bsra['bgp_speaker_id'] except exc.NoResultFound: LOG.debug("the router %s is not attached to any bgp speaker", bgp_sp_id) return bgp_sp_id
def _setup_chain_group_associations( self, context, port_chain, pg_ids ): with db_api.CONTEXT_READER.using(context): chain_group_associations = [] for pg_id in pg_ids: query = model_query.query_with_hooks(context, ChainGroupAssoc) chain_group_association = query.filter_by( portchain_id=port_chain.id, portpairgroup_id=pg_id ).first() if not chain_group_association: chain_group_association = ChainGroupAssoc( portpairgroup_id=pg_id ) chain_group_associations.append(chain_group_association) port_chain.chain_group_associations = chain_group_associations
def _setup_graph_chain_associations(self, context, graph_db, port_chains): with db_api.CONTEXT_READER.using(context): graph_chain_associations = [] for src_chain in port_chains: query = model_query.query_with_hooks(context, GraphChainAssoc) for dst_chain in port_chains[src_chain]: graph_chain_association = query.filter_by( service_graph_id=graph_db.id, src_chain=src_chain, dst_chain=dst_chain).first() if not graph_chain_association: graph_chain_association = GraphChainAssoc( service_graph_id=graph_db.id, src_chain=src_chain, dst_chain=dst_chain ) graph_chain_associations.append(graph_chain_association) graph_db.graph_chain_associations = graph_chain_associations
def _setup_chain_classifier_associations( self, context, port_chain, fc_ids ): with db_api.CONTEXT_READER.using(context): chain_classifier_associations = [] for fc_id in fc_ids: query = model_query.query_with_hooks( context, ChainClassifierAssoc) chain_classifier_association = query.filter_by( portchain_id=port_chain.id, flowclassifier_id=fc_id ).first() if not chain_classifier_association: chain_classifier_association = ChainClassifierAssoc( flowclassifier_id=fc_id ) chain_classifier_associations.append( chain_classifier_association) port_chain.chain_classifier_associations = ( chain_classifier_associations)
def update_path_node(self, id, node): with db_api.CONTEXT_WRITER.using(self.admin_context): node_obj = self._get_path_node(id) for key, value in node.items(): if key == 'portpair_details': pds = [] for pd_id in value: query = model_query.query_with_hooks( self.admin_context, PathPortAssoc) pd_association = query.filter_by( pathnode_id=id, portpair_id=pd_id).first() if not pd_association: pd_association = PathPortAssoc(pathnode_id=id, portpair_id=pd_id) pds.append(pd_association) node_obj[key] = pds else: node_obj[key] = value return self._make_pathnode_dict(node_obj)
def _validate_port_pair_groups(self, context, pg_ids, pc_id=None): with db_api.CONTEXT_READER.using(context): prev_pg_tap_enabled = False for pg_id in pg_ids: pg = self._get_port_pair_group(context, pg_id) curr_pg_tap_enabled = pg['tap_enabled'] if prev_pg_tap_enabled and curr_pg_tap_enabled: raise ext_tap.ConsecutiveTapPPGNotSupported() prev_pg_tap_enabled = curr_pg_tap_enabled query = model_query.query_with_hooks(context, PortChain) for port_chain_db in query.all(): if port_chain_db['id'] == pc_id: continue pc_pg_ids = [ assoc['portpairgroup_id'] for assoc in port_chain_db.chain_group_associations ] if pc_pg_ids and pg_ids and pc_pg_ids == pg_ids: raise ext_sfc.InvalidPortPairGroups( port_pair_groups=pg_ids, port_chain=port_chain_db.id)
def _any_port_chains_in_a_graph(self, context, port_chains=set(), graph_id=None): if not port_chains: return False with db_api.CONTEXT_READER.using(context): query = model_query.query_with_hooks(context, ServiceGraph) for graph_db in query.all(): if graph_db['id'] == graph_id: continue pc_ids = [ assoc['src_chain'] for assoc in graph_db.graph_chain_associations ] pc_ids.extend([ assoc['dst_chain'] for assoc in graph_db.graph_chain_associations ]) if pc_ids and port_chains and set( pc_ids).intersection(port_chains): return True return False
def _validate_port_pair_groups(self, context, pg_ids, pc_id=None): with db_api.CONTEXT_READER.using(context): prev_pg_tap_enabled = False for pg_id in pg_ids: pg = self._get_port_pair_group(context, pg_id) curr_pg_tap_enabled = pg['tap_enabled'] if prev_pg_tap_enabled and curr_pg_tap_enabled: raise ext_tap.ConsecutiveTapPPGNotSupported() else: prev_pg_tap_enabled = curr_pg_tap_enabled query = model_query.query_with_hooks(context, PortChain) for port_chain_db in query.all(): if port_chain_db['id'] == pc_id: continue pc_pg_ids = [ assoc['portpairgroup_id'] for assoc in port_chain_db.chain_group_associations ] if pc_pg_ids and pg_ids and pc_pg_ids == pg_ids: raise ext_sfc.InvalidPortPairGroups( port_pair_groups=pg_ids, port_chain=port_chain_db.id)
def _get_resource(self, context, model, id, for_update=False): resource = None try: if for_update: # To lock the instance for update, return a single # instance, instead of an instance with LEFT OUTER # JOINs that do not work in PostgreSQL query = model_query.query_with_hooks(context, model).options( lazyload('*')).filter( model.id == id).with_lockmode('update') resource = query.one() else: resource = self._get_by_id(context, model, id) except exc.NoResultFound: with excutils.save_and_reraise_exception(reraise=False) as ctx: if issubclass( model, (models.LoadBalancer, models.Listener, models.L7Policy, models.L7Rule, models.PoolV2, models.MemberV2, models.HealthMonitorV2, models.LoadBalancerStatistics, models.SessionPersistenceV2)): raise loadbalancerv2.EntityNotFound(name=model.NAME, id=id) ctx.reraise = True return resource
def update_path_node(self, id, node): with db_api.CONTEXT_WRITER.using(self.admin_context): node_obj = self._get_path_node(id) for key, value in node.items(): if key == 'portpair_details': pds = [] for pd_id in value: query = model_query.query_with_hooks( self.admin_context, PathPortAssoc) pd_association = query.filter_by( pathnode_id=id, portpair_id=pd_id ).first() if not pd_association: pd_association = PathPortAssoc( pathnode_id=id, portpair_id=pd_id ) pds.append(pd_association) node_obj[key] = pds else: node_obj[key] = value return self._make_pathnode_dict(node_obj)
def update_status(self, context, model, id, provisioning_status=None, operating_status=None): with context.session.begin(subtransactions=True): if issubclass(model, models.LoadBalancer): try: model_db = (model_query.query_with_hooks(context, model). filter(model.id == id). options(orm.noload('vip_port')). one()) except exc.NoResultFound: raise loadbalancerv2.EntityNotFound( name=models.LoadBalancer.NAME, id=id) else: model_db = self._get_resource(context, model, id) if provisioning_status and (model_db.provisioning_status != provisioning_status): model_db.provisioning_status = provisioning_status LOG.debug("Provisioning status of %s (id=%s) updated to: %s", model_db.NAME, model_db.id, provisioning_status) if (operating_status and hasattr(model_db, 'operating_status') and model_db.operating_status != operating_status): model_db.operating_status = operating_status LOG.debug("Operating status of %s (id=%s) updated to: %s", model_db.NAME, model_db.id, operating_status)
def create_port_pair(self, context, port_pair): """Create a port pair.""" pp = port_pair['port_pair'] project_id = pp['project_id'] with db_api.CONTEXT_WRITER.using(context): query = model_query.query_with_hooks(context, PortPair) pp_in_use = query.filter_by( ingress=pp['ingress'], egress=pp['egress'] ).first() if pp_in_use: raise ext_sfc.PortPairIngressEgressInUse( ingress=pp['ingress'], egress=pp['egress'], id=pp_in_use['id'] ) service_function_parameters = { key: ServiceFunctionParam( keyword=key, value=jsonutils.dumps(val)) for key, val in pp['service_function_parameters'].items() } ingress = self._get_port(context, pp['ingress']) egress = self._get_port(context, pp['egress']) self._validate_port_pair_ingress_egress(ingress, egress) port_pair_db = PortPair( id=uuidutils.generate_uuid(), name=pp['name'], description=pp['description'], project_id=project_id, ingress=pp['ingress'], egress=pp['egress'], service_function_parameters=service_function_parameters ) context.session.add(port_pair_db) return self._make_port_pair_dict(port_pair_db)
def _model_query(context, model): return model_query.query_with_hooks(context, model)