def create_policy_rule(self, context, rule_cls, policy_id, rule_data): """Create a QoS policy rule. :param context: neutron api request context :type context: neutron.context.Context :param rule_cls: the rule object class :type rule_cls: a class from the rule_object (qos.objects.rule) module :param policy_id: the id of the QosPolicy for which to create the rule :type policy_id: str uuid :param rule_data: the rule data to be applied :type rule_data: dict :returns: a QoS policy rule object """ rule_type = rule_cls.rule_type rule_data = rule_data[rule_type + '_rule'] with db_api.autonested_transaction(context.session): # Ensure that we have access to the policy. policy = policy_object.QosPolicy.get_policy_obj(context, policy_id) checker.check_bandwidth_rule_conflict(policy, rule_data) rule = rule_cls(context, qos_policy_id=policy_id, **rule_data) checker.check_rules_conflict(policy, rule) rule.create() policy.obj_load_attr('rules') self.validate_policy(context, policy) if rule.rule_type == qos_consts.RULE_TYPE_MINIMUM_BANDWIDTH: self.reject_min_bw_rule_updates(context, policy) self.driver_manager.call(qos_consts.UPDATE_POLICY_PRECOMMIT, context, policy) self.driver_manager.call(qos_consts.UPDATE_POLICY, context, policy) return rule
def delete_trunk(self, context, trunk_id): """Delete the specified trunk.""" with db_api.autonested_transaction(context.session): trunk = self._get_trunk(context, trunk_id) rules.trunk_can_be_managed(context, trunk) trunk_port_validator = rules.TrunkPortValidator(trunk.port_id) if trunk_port_validator.can_be_trunked_or_untrunked(context): # NOTE(status_police): when a trunk is deleted, the logical # object disappears from the datastore, therefore there is no # status transition involved. If PRECOMMIT failures occur, # the trunk remains in the status where it was. try: trunk.delete() except Exception as e: with excutils.save_and_reraise_exception(): LOG.warning( 'Trunk driver raised exception when ' 'deleting trunk port %s: %s', trunk_id, str(e)) payload = callbacks.TrunkPayload(context, trunk_id, original_trunk=trunk) registry.notify(resources.TRUNK, events.PRECOMMIT_DELETE, self, payload=payload) else: LOG.info( 'Trunk driver does not consider trunk %s ' 'untrunkable', trunk_id) raise trunk_exc.TrunkInUse(trunk_id=trunk_id) registry.notify(constants.TRUNK, events.AFTER_DELETE, self, payload=payload)
def get_policy_rule(self, context, rule_cls, rule_id, policy_id, fields=None): """Get a QoS policy rule. :param context: neutron api request context :type context: neutron.context.Context :param rule_cls: the rule object class :type rule_cls: a class from the rule_object (qos.objects.rule) module :param rule_id: the id of the QoS policy rule to get :type rule_id: str uuid :param policy_id: the id of the rule's policy :type policy_id: str uuid :returns: a QoS policy rule object :raises: qos_exc.QosRuleNotFound """ with db_api.autonested_transaction(context.session): # Ensure we have access to the policy. policy_object.QosPolicy.get_policy_obj(context, policy_id) rule = rule_cls.get_object(context, id=rule_id) if not rule: raise qos_exc.QosRuleNotFound(policy_id=policy_id, rule_id=rule_id) return rule
def delete_policy_rule(self, context, rule_cls, rule_id, policy_id): """Delete a QoS policy rule. :param context: neutron api request context :type context: neutron.context.Context :param rule_cls: the rule object class :type rule_cls: a class from the rule_object (qos.objects.rule) module :param rule_id: the id of the QosPolicy Rule to delete :type rule_id: str uuid :param policy_id: the id of the rule's policy :type policy_id: str uuid :returns: None """ with db_api.autonested_transaction(context.session): # Ensure we have access to the policy. policy = policy_object.QosPolicy.get_policy_obj(context, policy_id) rule = policy.get_rule_by_id(rule_id) rule.delete() policy.obj_load_attr('rules') if rule.rule_type == qos_consts.RULE_TYPE_MINIMUM_BANDWIDTH: self.reject_min_bw_rule_updates(context, policy) self.driver_manager.call(qos_consts.UPDATE_POLICY_PRECOMMIT, context, policy) self.driver_manager.call(qos_consts.UPDATE_POLICY, context, policy)
def update_trunk(self, context, trunk_id, trunk): """Update information for the specified trunk.""" trunk_data = trunk['trunk'] with db_api.autonested_transaction(context.session): trunk_obj = self._get_trunk(context, trunk_id) original_trunk = copy.deepcopy(trunk_obj) # NOTE(status_police): a trunk status should not change during an # update_trunk(), even in face of PRECOMMIT failures. This is # because only name and admin_state_up are being affected, and # these are DB properties only. trunk_obj.update_fields(trunk_data, reset_changes=True) trunk_obj.update() payload = events.DBEventPayload(context, resource_id=trunk_id, states=(original_trunk, ), desired_state=trunk_obj, request_body=trunk_data) registry.publish(resources.TRUNK, events.PRECOMMIT_UPDATE, self, payload=payload) registry.notify(resources.TRUNK, events.AFTER_UPDATE, self, payload=callbacks.TrunkPayload( context, trunk_id, original_trunk=original_trunk, current_trunk=trunk_obj)) return trunk_obj
def get_policy_rules(self, context, rule_cls, policy_id, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): """Get QoS policy rules. :param context: neutron api request context :type context: neutron.context.Context :param rule_cls: the rule object class :type rule_cls: a class from the rule_object (qos.objects.rule) module :param policy_id: the id of the QosPolicy for which to get rules :type policy_id: str uuid :returns: QoS policy rule objects meeting the search criteria """ with db_api.autonested_transaction(context.session): # Ensure we have access to the policy. policy_object.QosPolicy.get_policy_obj(context, policy_id) filters = filters or dict() filters[qos_consts.QOS_POLICY_ID] = policy_id pager = base_obj.Pager(sorts, limit, page_reverse, marker) return rule_cls.get_objects(context, _pager=pager, **filters)
def delete_trunk(self, context, trunk_id): """Delete the specified trunk.""" with db_api.autonested_transaction(context.session): trunk = self._get_trunk(context, trunk_id) rules.trunk_can_be_managed(context, trunk) trunk_port_validator = rules.TrunkPortValidator(trunk.port_id) if trunk_port_validator.can_be_trunked_or_untrunked(context): # NOTE(status_police): when a trunk is deleted, the logical # object disappears from the datastore, therefore there is no # status transition involved. If PRECOMMIT failures occur, # the trunk remains in the status where it was. trunk.delete() payload = callbacks.TrunkPayload(context, trunk_id, original_trunk=trunk) registry.notify(resources.TRUNK, events.PRECOMMIT_DELETE, self, payload=payload) else: raise trunk_exc.TrunkInUse(trunk_id=trunk_id) registry.notify(resources.TRUNK, events.AFTER_DELETE, self, payload=payload)
def create_ha_port_and_bind(self, plugin, context, router_id, tenant_id, agent, is_manual_scheduling=False): """Creates and binds a new HA port for this agent.""" ctxt = context.elevated() router_db = plugin._get_router(ctxt, router_id) creator = functools.partial(self._add_port_from_net_and_ensure_vr_id, plugin, ctxt, router_db, tenant_id) dep_getter = functools.partial(plugin.get_ha_network, ctxt, tenant_id) dep_creator = functools.partial(plugin._create_ha_network, ctxt, tenant_id) dep_deleter = functools.partial(plugin._delete_ha_network, ctxt) dep_id_attr = 'network_id' # This might fail in case of concurrent calls, which is good for us # as we can skip the rest of this function. binding = self.bind_router(plugin, context, router_id, agent['id'], is_manual_scheduling=is_manual_scheduling, is_ha=True) if not binding: return try: port_binding = utils.create_object_with_dependency( creator, dep_getter, dep_creator, dep_id_attr, dep_deleter)[0] # NOTE(ralonsoh): to be migrated to the new facade that can't be # used with "create_object_with_dependency". with lib_db_api.autonested_transaction(context.session): port_binding.l3_agent_id = agent['id'] except db_exc.DBDuplicateEntry: LOG.debug( "Router %(router)s already scheduled for agent " "%(agent)s", { 'router': router_id, 'agent': agent['id'] }) port_id = port_binding.port_id # Below call will also delete entry from L3HARouterAgentPortBinding # and RouterPort tables plugin._core_plugin.delete_port(context, port_id, l3_port_check=False) except l3_exc.RouterNotFound: LOG.debug( 'Router %s has already been removed ' 'by concurrent operation', router_id) # we try to clear the HA network here in case the port we created # blocked the concurrent router delete operation from getting rid # of the HA network ha_net = plugin.get_ha_network(ctxt, tenant_id) if ha_net: plugin.safe_delete_ha_network(ctxt, ha_net, tenant_id)
def add_subports(self, context, trunk_id, subports): """Add one or more subports to trunk.""" with db_api.autonested_transaction(context.session): trunk = self._get_trunk(context, trunk_id) # Check for basic validation since the request body here is not # automatically validated by the API layer. subports = subports['sub_ports'] subports_validator = rules.SubPortsValidator( self._segmentation_types, subports, trunk['port_id']) subports = subports_validator.validate(context, basic_validation=True) added_subports = [] rules.trunk_can_be_managed(context, trunk) original_trunk = copy.deepcopy(trunk) # NOTE(status_police): the trunk status should transition to # DOWN (and finally in ACTIVE or ERROR), only if it is not in # ERROR status already. A user should attempt to resolve the ERROR # condition before adding more subports to the trunk. Should a # trunk be in DOWN or BUILD state (e.g. when dealing with # multiple concurrent requests), the status is still forced to # DOWN and thus can potentially overwrite an interleaving state # change to ACTIVE. Eventually the driver should bring the status # back to ACTIVE or ERROR. if trunk.status == constants.ERROR_STATUS: raise trunk_exc.TrunkInErrorState(trunk_id=trunk_id) else: trunk.update(status=constants.DOWN_STATUS) for subport in subports: obj = trunk_objects.SubPort( context=context, trunk_id=trunk_id, port_id=subport['port_id'], segmentation_type=subport['segmentation_type'], segmentation_id=subport['segmentation_id']) obj.create() trunk['sub_ports'].append(obj) added_subports.append(obj) payload = callbacks.TrunkPayload(context, trunk_id, current_trunk=trunk, original_trunk=original_trunk, subports=added_subports) if added_subports: registry.notify(resources.SUBPORTS, events.PRECOMMIT_CREATE, self, payload=payload) if added_subports: registry.notify(resources.SUBPORTS, events.AFTER_CREATE, self, payload=payload) return trunk
def get_object(cls, context, **kwargs): # We want to get the policy regardless of its tenant id. We'll make # sure the tenant has permission to access the policy later on. admin_context = context.elevated() with db_api.autonested_transaction(admin_context.session): obj = super(ClassificationGroup, cls).get_object(admin_context, **kwargs) if not obj or not cls.is_accessible(context, obj): return return obj
def get_objects(cls, context, _pager=None, validate_filters=True, **kwargs): with db_api.autonested_transaction(context.session): objects = super(ClassificationBase, cls).get_objects(context, _pager, validate_filters, **kwargs) return objects
def remove_subports(self, context, trunk_id, subports): """Remove one or more subports from trunk.""" subports = subports['sub_ports'] with db_api.autonested_transaction(context.session): trunk = self._get_trunk(context, trunk_id) original_trunk = copy.deepcopy(trunk) rules.trunk_can_be_managed(context, trunk) subports_validator = rules.SubPortsValidator( self._segmentation_types, subports) # the subports are being removed, therefore we do not need to # enforce any specific trunk rules, other than basic validation # of the request body. subports = subports_validator.validate(context, basic_validation=True, trunk_validation=False) current_subports = {p.port_id: p for p in trunk.sub_ports} removed_subports = [] for subport in subports: subport_obj = current_subports.pop(subport['port_id'], None) if not subport_obj: raise trunk_exc.SubPortNotFound(trunk_id=trunk_id, port_id=subport['port_id']) subport_obj.delete() removed_subports.append(subport_obj) del trunk.sub_ports[:] trunk.sub_ports.extend(current_subports.values()) # NOTE(status_police): the trunk status should transition to # DOWN irrespective of the status in which it is in to allow # the user to resolve potential conflicts due to prior add_subports # operations. # Should a trunk be in DOWN or BUILD state (e.g. when dealing # with multiple concurrent requests), the status is still forced # to DOWN. See add_subports() for more details. trunk.update(status=constants.DOWN_STATUS) payload = callbacks.TrunkPayload(context, trunk_id, current_trunk=trunk, original_trunk=original_trunk, subports=removed_subports) if removed_subports: registry.notify(resources.SUBPORTS, events.PRECOMMIT_DELETE, self, payload=payload) if removed_subports: registry.notify(resources.SUBPORTS, events.AFTER_DELETE, self, payload=payload) return trunk
def _ensure_default_firewall_group(self, context, tenant_id): """Create a default firewall group if one doesn't exist for a tenant Returns the default firewall group id for a given tenant. """ exists = self._get_default_fwg_id(context, tenant_id) if exists: return exists try: # NOTE(cby): default fwg not created => we try to create it! with db_api.autonested_transaction(context.session): fwr_ids = self._create_default_firewall_rules( context, tenant_id) ingress_fwp = { 'description': 'Ingress firewall policy', 'firewall_rules': [fwr_ids['in_ipv4'], fwr_ids['in_ipv6']], } egress_fwp = { 'description': 'Egress firewall policy', 'firewall_rules': [fwr_ids['eg_ipv4'], fwr_ids['eg_ipv6']], } ingress_fwp_db = self._create_default_firewall_policy( context, tenant_id, 'ingress', **ingress_fwp) egress_fwp_db = self._create_default_firewall_policy( context, tenant_id, 'egress', **egress_fwp) fwg = { 'name': const.DEFAULT_FWG, 'tenant_id': tenant_id, 'ingress_firewall_policy_id': ingress_fwp_db['id'], 'egress_firewall_policy_id': egress_fwp_db['id'], 'ports': [], 'shared': False, 'status': nl_constants.INACTIVE, 'admin_state_up': True, 'description': 'Default firewall group', } fwg_db = self._create_firewall_group( context, fwg, default_fwg=True) context.session.add(DefaultFirewallGroup( firewall_group_id=fwg_db['id'], project_id=tenant_id)) return fwg_db['id'] except db_exc.DBDuplicateEntry: # NOTE(cby): default fwg created concurrently LOG.debug("Default FWG was concurrently created") return self._get_default_fwg_id(context, tenant_id)
def add_subports(self, context, trunk_id, subports): """Add one or more subports to trunk.""" with db_api.autonested_transaction(context.session): trunk = self._get_trunk(context, trunk_id) # Check for basic validation since the request body here is not # automatically validated by the API layer. subports = subports['sub_ports'] subports_validator = rules.SubPortsValidator( self._segmentation_types, subports, trunk['port_id']) subports = subports_validator.validate( context, basic_validation=True) added_subports = [] rules.trunk_can_be_managed(context, trunk) original_trunk = copy.deepcopy(trunk) # NOTE(status_police): the trunk status should transition to # DOWN (and finally in ACTIVE or ERROR), only if it is not in # ERROR status already. A user should attempt to resolve the ERROR # condition before adding more subports to the trunk. Should a # trunk be in DOWN or BUILD state (e.g. when dealing with # multiple concurrent requests), the status is still forced to # DOWN and thus can potentially overwrite an interleaving state # change to ACTIVE. Eventually the driver should bring the status # back to ACTIVE or ERROR. if trunk.status == constants.ERROR_STATUS: raise trunk_exc.TrunkInErrorState(trunk_id=trunk_id) else: trunk.update(status=constants.DOWN_STATUS) for subport in subports: obj = trunk_objects.SubPort( context=context, trunk_id=trunk_id, port_id=subport['port_id'], segmentation_type=subport['segmentation_type'], segmentation_id=subport['segmentation_id']) obj.create() trunk['sub_ports'].append(obj) added_subports.append(obj) payload = callbacks.TrunkPayload(context, trunk_id, current_trunk=trunk, original_trunk=original_trunk, subports=added_subports) if added_subports: registry.notify(constants.SUBPORTS, events.PRECOMMIT_CREATE, self, payload=payload) if added_subports: registry.notify( constants.SUBPORTS, events.AFTER_CREATE, self, payload=payload) return trunk
def remove_subports(self, context, trunk_id, subports): """Remove one or more subports from trunk.""" subports = subports['sub_ports'] with db_api.autonested_transaction(context.session): trunk = self._get_trunk(context, trunk_id) original_trunk = copy.deepcopy(trunk) rules.trunk_can_be_managed(context, trunk) subports_validator = rules.SubPortsValidator( self._segmentation_types, subports) # the subports are being removed, therefore we do not need to # enforce any specific trunk rules, other than basic validation # of the request body. subports = subports_validator.validate( context, basic_validation=True, trunk_validation=False) current_subports = {p.port_id: p for p in trunk.sub_ports} removed_subports = [] for subport in subports: subport_obj = current_subports.pop(subport['port_id'], None) if not subport_obj: raise trunk_exc.SubPortNotFound(trunk_id=trunk_id, port_id=subport['port_id']) subport_obj.delete() removed_subports.append(subport_obj) del trunk.sub_ports[:] trunk.sub_ports.extend(current_subports.values()) # NOTE(status_police): the trunk status should transition to # DOWN irrespective of the status in which it is in to allow # the user to resolve potential conflicts due to prior add_subports # operations. # Should a trunk be in DOWN or BUILD state (e.g. when dealing # with multiple concurrent requests), the status is still forced # to DOWN. See add_subports() for more details. trunk.update(status=constants.DOWN_STATUS) payload = callbacks.TrunkPayload(context, trunk_id, current_trunk=trunk, original_trunk=original_trunk, subports=removed_subports) if removed_subports: registry.notify(constants.SUBPORTS, events.PRECOMMIT_DELETE, self, payload=payload) if removed_subports: registry.notify( constants.SUBPORTS, events.AFTER_DELETE, self, payload=payload) return trunk
def create_ha_port_and_bind(self, plugin, context, router_id, tenant_id, agent, is_manual_scheduling=False): """Creates and binds a new HA port for this agent.""" ctxt = context.elevated() router_db = plugin._get_router(ctxt, router_id) creator = functools.partial(self._add_port_from_net_and_ensure_vr_id, plugin, ctxt, router_db, tenant_id) dep_getter = functools.partial(plugin.get_ha_network, ctxt, tenant_id) dep_creator = functools.partial(plugin._create_ha_network, ctxt, tenant_id) dep_deleter = functools.partial(plugin._delete_ha_network, ctxt) dep_id_attr = 'network_id' # This might fail in case of concurrent calls, which is good for us # as we can skip the rest of this function. binding = self.bind_router( plugin, context, router_id, agent['id'], is_manual_scheduling=is_manual_scheduling, is_ha=True) if not binding: return try: port_binding = utils.create_object_with_dependency( creator, dep_getter, dep_creator, dep_id_attr, dep_deleter)[0] with lib_db_api.autonested_transaction(context.session): port_binding.l3_agent_id = agent['id'] except db_exc.DBDuplicateEntry: LOG.debug("Router %(router)s already scheduled for agent " "%(agent)s", {'router': router_id, 'agent': agent['id']}) port_id = port_binding.port_id # Below call will also delete entry from L3HARouterAgentPortBinding # and RouterPort tables plugin._core_plugin.delete_port(context, port_id, l3_port_check=False) except l3_exc.RouterNotFound: LOG.debug('Router %s has already been removed ' 'by concurrent operation', router_id) # we try to clear the HA network here in case the port we created # blocked the concurrent router delete operation from getting rid # of the HA network ha_net = plugin.get_ha_network(ctxt, tenant_id) if ha_net: plugin.safe_delete_ha_network(ctxt, ha_net, tenant_id)
def delete_trunk(self, context, trunk_id): """Delete the specified trunk.""" with db_api.autonested_transaction(context.session): trunk = self._get_trunk(context, trunk_id) rules.trunk_can_be_managed(context, trunk) trunk_port_validator = rules.TrunkPortValidator(trunk.port_id) if not trunk_port_validator.is_bound(context): # NOTE(status_police): when a trunk is deleted, the logical # object disappears from the datastore, therefore there is no # status transition involved. If PRECOMMIT failures occur, # the trunk remains in the status where it was. trunk.delete() payload = callbacks.TrunkPayload(context, trunk_id, original_trunk=trunk) registry.notify(constants.TRUNK, events.PRECOMMIT_DELETE, self, payload=payload) else: raise trunk_exc.TrunkInUse(trunk_id=trunk_id) registry.notify(constants.TRUNK, events.AFTER_DELETE, self, payload=payload)
def create_trunk(self, context, trunk): """Create a trunk.""" trunk = self.validate(context, trunk['trunk']) sub_ports = [ trunk_objects.SubPort(context=context, port_id=p['port_id'], segmentation_id=p['segmentation_id'], segmentation_type=p['segmentation_type']) for p in trunk['sub_ports'] ] admin_state_up = trunk.get('admin_state_up', True) # NOTE(status_police): a trunk is created in DOWN status. Depending # on the nature of the create request, a driver may set the status # immediately to ACTIVE if no physical provisioning is required. # Otherwise a transition to BUILD (or ERROR) should be expected # depending on how the driver reacts. PRECOMMIT failures prevent the # trunk from being created altogether. trunk_description = trunk.get('description', "") trunk_obj = trunk_objects.Trunk(context=context, admin_state_up=admin_state_up, id=uuidutils.generate_uuid(), name=trunk.get('name', ""), description=trunk_description, project_id=trunk['tenant_id'], port_id=trunk['port_id'], status=constants.DOWN_STATUS, sub_ports=sub_ports) with db_api.autonested_transaction(context.session): trunk_obj.create() payload = callbacks.TrunkPayload(context, trunk_obj.id, current_trunk=trunk_obj) registry.notify(resources.TRUNK, events.PRECOMMIT_CREATE, self, payload=payload) registry.notify(resources.TRUNK, events.AFTER_CREATE, self, payload=payload) return trunk_obj
def update_policy_rule(self, context, rule_cls, rule_id, policy_id, rule_data): """Update a QoS policy rule. :param context: neutron api request context :type context: neutron.context.Context :param rule_cls: the rule object class :type rule_cls: a class from the rule_object (qos.objects.rule) module :param rule_id: the id of the QoS policy rule to update :type rule_id: str uuid :param policy_id: the id of the rule's policy :type policy_id: str uuid :param rule_data: the new rule data to update :type rule_data: dict :returns: a QoS policy rule object """ rule_type = rule_cls.rule_type rule_data = rule_data[rule_type + '_rule'] with db_api.autonested_transaction(context.session): # Ensure we have access to the policy. policy = policy_object.QosPolicy.get_policy_obj(context, policy_id) # Ensure the rule belongs to the policy. checker.check_bandwidth_rule_conflict(policy, rule_data) policy.get_rule_by_id(rule_id) rule = rule_cls(context, id=rule_id) rule.update_fields(rule_data, reset_changes=True) checker.check_rules_conflict(policy, rule) rule.update() policy.obj_load_attr('rules') self.validate_policy(context, policy) if rule.rule_type == qos_consts.RULE_TYPE_MINIMUM_BANDWIDTH: self.reject_min_bw_rule_updates(context, policy) self.driver_manager.call(qos_consts.UPDATE_POLICY_PRECOMMIT, context, policy) self.driver_manager.call(qos_consts.UPDATE_POLICY, context, policy) return rule
def update_trunk(self, context, trunk_id, trunk): """Update information for the specified trunk.""" trunk_data = trunk['trunk'] with db_api.autonested_transaction(context.session): trunk_obj = self._get_trunk(context, trunk_id) original_trunk = copy.deepcopy(trunk_obj) # NOTE(status_police): a trunk status should not change during an # update_trunk(), even in face of PRECOMMIT failures. This is # because only name and admin_state_up are being affected, and # these are DB properties only. trunk_obj.update_fields(trunk_data, reset_changes=True) trunk_obj.update() payload = events.DBEventPayload( context, resource_id=trunk_id, states=(original_trunk,), desired_state=trunk_obj, request_body=trunk_data) registry.publish(constants.TRUNK, events.PRECOMMIT_UPDATE, self, payload=payload) registry.notify(constants.TRUNK, events.AFTER_UPDATE, self, payload=callbacks.TrunkPayload( context, trunk_id, original_trunk=original_trunk, current_trunk=trunk_obj)) return trunk_obj
def create_trunk(self, context, trunk): """Create a trunk.""" trunk = self.validate(context, trunk['trunk']) sub_ports = [trunk_objects.SubPort( context=context, port_id=p['port_id'], segmentation_id=p['segmentation_id'], segmentation_type=p['segmentation_type']) for p in trunk['sub_ports']] admin_state_up = trunk.get('admin_state_up', True) # NOTE(status_police): a trunk is created in DOWN status. Depending # on the nature of the create request, a driver may set the status # immediately to ACTIVE if no physical provisioning is required. # Otherwise a transition to BUILD (or ERROR) should be expected # depending on how the driver reacts. PRECOMMIT failures prevent the # trunk from being created altogether. trunk_description = trunk.get('description', "") trunk_obj = trunk_objects.Trunk(context=context, admin_state_up=admin_state_up, id=uuidutils.generate_uuid(), name=trunk.get('name', ""), description=trunk_description, project_id=trunk['tenant_id'], port_id=trunk['port_id'], status=constants.DOWN_STATUS, sub_ports=sub_ports) with db_api.autonested_transaction(context.session): trunk_obj.create() payload = callbacks.TrunkPayload(context, trunk_obj.id, current_trunk=trunk_obj) registry.notify( constants.TRUNK, events.PRECOMMIT_CREATE, self, payload=payload) registry.notify( constants.TRUNK, events.AFTER_CREATE, self, payload=payload) return trunk_obj
def _get_policy_id(self, context, rule_cls, rule_id): with db_api.autonested_transaction(context.session): rule_object = rule_cls.get_object(context, id=rule_id) if not rule_object: raise qos_exc.QosRuleNotFound(policy_id="", rule_id=rule_id) return rule_object.qos_policy_id
def db_context_reader(cls, context): """Return read-only session activation decorator.""" if cls.new_facade or cls._use_db_facade(context): return db_api.CONTEXT_READER.using(context) return db_api.autonested_transaction(context.session)
def update_trunk_status(self, context, trunk_id, status): """Update the trunk status to reflect outcome of data plane wiring.""" with db_api.autonested_transaction(context.session): trunk = trunk_objects.Trunk.get_object(context, id=trunk_id) if trunk: trunk.update(status=status)
def create(self): with db_api.autonested_transaction(self.obj_context.session): super(ClassificationBase, self).create()
def set_trunk_status(self, context, trunk_id, status): with db_api.autonested_transaction(context.session): trunk = trunk_objects.Trunk.get_object(context, id=trunk_id) if trunk: trunk.update(status=status)
def get_object(cls, context, **kwargs): with db_api.autonested_transaction(context.session): obj = super(TCPClassification, cls).get_object(context, c_type='tcp', **kwargs) return obj
def db_context_reader(cls, context): """Return read-only session activation decorator.""" if cls.new_facade or cls._use_db_facade(context): return db_api.context_manager.reader.using(context) return lib_db_api.autonested_transaction(context.session)
def _exec(self, method_name, context, kwargs): with db_api.autonested_transaction(context.session): return getattr(self, method_name)(context=context, **kwargs)
def db_context_writer(cls, context): """Return read-write session activation decorator.""" if cls.new_facade or cls._use_db_facade(context): return db_api.CONTEXT_WRITER.using(context) return db_api.autonested_transaction(context.session)