Example #1
0
 def add_addresses(self, context, address_group_id, addresses):
     ag = self._get_address_group(context, address_group_id)
     original_address_group = self._make_address_group_dict(ag)
     addrs_in_ag, addrs_not_in_ag = self._process_requested_addresses(
         ag, addresses['addresses'])
     if addrs_in_ag:
         raise ag_exc.AddressesAlreadyExist(
             addresses=addrs_in_ag, address_group_id=address_group_id)
     for addr in addrs_not_in_ag:
         addr = netaddr.IPNetwork(addr)
         args = {'address_group_id': address_group_id, 'address': addr}
         addr_assoc = ag_obj.AddressAssociation(context, **args)
         addr_assoc.create()
     ag.update()  # reload synthetic fields
     ag_dict = {'address_group': self._make_address_group_dict(ag)}
     registry.publish(resources.ADDRESS_GROUP,
                      events.AFTER_UPDATE,
                      self,
                      payload=events.DBEventPayload(
                          context,
                          resource_id=address_group_id,
                          states=(
                              original_address_group,
                              ag_dict['address_group'],
                          )))
     return ag_dict
Example #2
0
 def create_rbac_policy(self, context, rbac_policy):
     e = rbac_policy['rbac_policy']
     # NOTE(ralonsoh): remove this conversion when "bp/keystone-v3" is
     # widely implemented in all OpenStack projects.
     try:
         e['target_project'] = e.pop('target_tenant')
     except KeyError:
         pass
     try:
         registry.publish(resources.RBAC_POLICY, events.BEFORE_CREATE, self,
                          payload=events.DBEventPayload(
                              context, request_body=e,
                              metadata={'object_type': e['object_type']}))
     except c_exc.CallbackFailure as e:
         raise n_exc.InvalidInput(error_message=e)
     rbac_class = (
         rbac_obj.RBACBaseObject.get_type_class_map()[e['object_type']])
     try:
         rbac_args = {'project_id': e['project_id'],
                      'object_id': e['object_id'],
                      'action': e['action'],
                      'target_project': e['target_project']}
         _rbac_obj = rbac_class(context, **rbac_args)
         _rbac_obj.create()
     except o_exc.NeutronDbObjectDuplicateEntry:
         raise ext_rbac.DuplicateRbacPolicy()
     return self._make_rbac_policy_dict(_rbac_obj)
Example #3
0
def _update_segment_host_mapping_for_agent(resource, event, trigger,
                                           payload=None):
    plugin = payload.metadata.get('plugin')
    agent = payload.desired_state
    host = payload.metadata.get('host')
    context = payload.context

    check_segment_for_agent = getattr(plugin, 'check_segment_for_agent', None)
    if (not check_user_configured_segment_plugin() or
            not check_segment_for_agent):
        return
    phys_nets = _get_phys_nets(agent)
    if not phys_nets:
        return
    start_flag = agent.get('start_flag', None)
    if host in reported_hosts and not start_flag:
        return
    reported_hosts.add(host)
    segments = get_segments_with_phys_nets(context, phys_nets)
    current_segment_ids = {
        segment['id'] for segment in segments
        if check_segment_for_agent(segment, agent)}
    update_segment_host_mapping(context, host, current_segment_ids)
    registry.publish(resources.SEGMENT_HOST_MAPPING, events.AFTER_CREATE,
                     plugin, payload=events.DBEventPayload(
                         context,
                         metadata={
                             'host': host,
                             'current_segment_ids': current_segment_ids}))
 def test_create_default_l2_gateway(self):
     def_bep_name = NSX_DEFAULT_BEP_NAME
     cfg.CONF.set_override("default_bridge_endpoint_profile",
                           def_bep_name, "nsx_v3")
     with mock.patch.object(nsx_v3_driver.NsxV3Driver,
                            '_get_bridge_vlan_tz_id',
                            return_value=['some_tz_id']) as mock_get_tz:
         nsx_v3_driver.NsxV3Driver(mock.MagicMock())
         def_bep_id = (
             self.nsxlib.bridge_endpoint_profile.get_id_by_name_or_id(
                 def_bep_name))
         # fake the callback invoked after init
         registry.publish(resources.PROCESS, events.BEFORE_SPAWN,
                         mock.MagicMock())
         l2gws = self.driver._get_l2_gateways(self.context)
         def_l2gw = None
         for l2gw in l2gws:
             for device in l2gw['devices']:
                 if device['device_name'] == def_bep_id:
                     def_l2gw = l2gw
         self.assertIsNotNone(def_l2gw)
         self.assertTrue(def_l2gw.devices[0].device_name, def_bep_id)
         self.assertTrue(def_l2gw.devices[0].interfaces[0].interface_name,
                         'some_tz_id')
         mock_get_tz.assert_called_once_with({'id': def_bep_id,
                                              'edge_cluster_id': 'meh'})
Example #5
0

        
Example #6
0
        def notify(create_result):
            # Ensure usage trackers for all resources affected by this API
            # operation are marked as dirty
            with db_api.CONTEXT_WRITER.using(request.context):
                # Commit the reservation(s)
                for reservation in reservations:
                    quota.QUOTAS.commit_reservation(request.context,
                                                    reservation.reservation_id)
                resource_registry.set_resources_dirty(request.context)

            notifier_method = self._resource + '.create.end'
            self._notifier.info(request.context, notifier_method,
                                create_result)
            registry.publish(self._resource,
                             events.BEFORE_RESPONSE,
                             self,
                             payload=events.APIEventPayload(
                                 request.context,
                                 notifier_method,
                                 action,
                                 request_body=body,
                                 states=(
                                     {},
                                     create_result,
                                 ),
                                 collection_name=self._collection))
            return create_result
Example #7
0
 def test_publish_payload(self):
     event_payload = events.EventPayload(mock.ANY)
     registry.publish('x', 'y', self, payload=event_payload)
     self.callback_manager.publish.assert_called_with('x',
                                                      'y',
                                                      self,
                                                      payload=event_payload)
Example #8
0
    def _update_router_gw_info(self, context, router_id, info, router=None):
        # Load the router only if necessary
        if not router:
            router = self._get_router(context, router_id)
        with context.session.begin(subtransactions=True):
            old_router = self._make_router_dict(router)
            router.enable_snat = self._get_enable_snat(info)
            router_body = {
                l3_apidef.ROUTER: {
                    l3_apidef.EXTERNAL_GW_INFO: info
                }
            }
            registry.publish(resources.ROUTER,
                             events.PRECOMMIT_UPDATE,
                             self,
                             payload=events.DBEventPayload(
                                 context,
                                 request_body=router_body,
                                 states=(old_router, ),
                                 resource_id=router_id,
                                 desired_state=router))

        # Calls superclass, pass router db object for avoiding re-loading
        super(L3_NAT_dbonly_mixin, self)._update_router_gw_info(context,
                                                                router_id,
                                                                info,
                                                                router=router)
        # Returning the router might come back useful if this
        # method is overridden in child classes
        return router
Example #9
0
    def _router_removed(self, ri, router_id):
        """Delete the router and stop the auxiliary processes

        This stops the auxiliary processes (keepalived, keepvalived-state-
        change, radvd, etc) and deletes the router ports and the namespace.
        The "router_info" cache is updated too at the beginning of the process,
        to avoid any other concurrent process to handle the router being
        deleted. If an exception is raised, the "router_info" cache is
        restored.
        """
        if ri is None:
            LOG.warning(
                "Info for router %s was not found. "
                "Performing router cleanup", router_id)
            self.namespaces_manager.ensure_router_cleanup(router_id)
            return

        registry.publish(resources.ROUTER,
                         events.BEFORE_DELETE,
                         self,
                         payload=events.DBEventPayload(self.context,
                                                       states=(ri, ),
                                                       resource_id=router_id))

        del self.router_info[router_id]
        try:
            ri.delete()
        except Exception:
            with excutils.save_and_reraise_exception():
                self.router_info[router_id] = ri

        registry.notify(resources.ROUTER, events.AFTER_DELETE, self, router=ri)
Example #10
0
    def _set_router_provider(self, resource, event, trigger, payload):
        """Associates a router with a service provider.

        Association is done by flavor_id if it's specified, otherwise it will
        fallback to determining which loaded driver supports the ha/distributed
        attributes associated with the router.
        """
        context = payload.context
        router = payload.latest_state
        router_db = payload.metadata['router_db']
        router_id = payload.resource_id
        if _flavor_specified(router):
            router_db.flavor_id = router['flavor_id']
        drv = self._get_provider_for_create(context, router)
        self._stm.add_resource_association(context, plugin_constants.L3,
                                           drv.name, router_id)
        registry.publish(resources.ROUTER_CONTROLLER,
                         events.PRECOMMIT_ADD_ASSOCIATION,
                         trigger,
                         payload=events.DBEventPayload(
                             context,
                             request_body=router,
                             states=(router_db, ),
                             metadata={
                                 'old_driver': None,
                                 'new_driver': drv
                             },
                             resource_id=router_db.get('id')))
Example #11
0
 def create_address_group(self, context, address_group):
     """Create an address group."""
     fields = address_group['address_group']
     args = {
         'project_id': fields['project_id'],
         'id': uuidutils.generate_uuid(),
         'name': fields['name'],
         'description': fields['description']
     }
     ag = ag_obj.AddressGroup(context, **args)
     ag.create()
     address_group = self._make_address_group_dict(ag)
     registry.publish(resources.ADDRESS_GROUP,
                      events.AFTER_CREATE,
                      self,
                      payload=events.DBEventPayload(
                          context,
                          resource_id=ag.id,
                          states=(address_group, )))
     # NOTE(hangyang): after sent the create notification we then handle
     # adding addresses which will send another update notification
     if fields.get('addresses') is not constants.ATTR_NOT_SPECIFIED:
         self.add_addresses(context, ag.id, fields)
     ag.update()  # reload synthetic fields
     return self._make_address_group_dict(ag)
Example #12
0
 def get_provider_for_router(self, context, router_id):
     """Return the provider driver handle for a router id."""
     driver_name = self._stm.get_provider_names_by_resource_ids(
         context, [router_id]).get(router_id)
     if not driver_name:
         # this is an old router that hasn't been mapped to a provider
         # yet so we do this now
         router = self.l3_plugin.get_router(context, router_id)
         driver = self._attrs_to_driver(router)
         driver_name = driver.name
         with db_api.CONTEXT_WRITER.using(context):
             self._stm.add_resource_association(context,
                                                plugin_constants.L3,
                                                driver_name, router_id)
             registry.publish(resources.ROUTER_CONTROLLER,
                              events.PRECOMMIT_ADD_ASSOCIATION,
                              self,
                              payload=events.DBEventPayload(
                                  context,
                                  states=(router, ),
                                  metadata={
                                      'old_driver': None,
                                      'new_driver': driver
                                  },
                                  resource_id=router_id))
     return self.drivers[driver_name]
Example #13
0
    def delete_segment(self, context, uuid, for_net_delete=False):
        """Delete an existing segment."""
        segment_dict = self.get_segment(context, uuid)
        # Do some preliminary operations before deleting the segment
        registry.publish(resources.SEGMENT, events.BEFORE_DELETE,
                         self.delete_segment,
                         payload=events.DBEventPayload(
                             context, metadata={
                                 'for_net_delete': for_net_delete},
                             states=(segment_dict,),
                             resource_id=uuid))

        # Delete segment in DB
        with db_api.CONTEXT_WRITER.using(context):
            if not network.NetworkSegment.delete_objects(context, id=uuid):
                raise exceptions.SegmentNotFound(segment_id=uuid)
            # Do some preliminary operations before deleting segment in db
            registry.notify(resources.SEGMENT, events.PRECOMMIT_DELETE,
                            self.delete_segment, context=context,
                            segment=segment_dict)

        registry.publish(resources.SEGMENT, events.AFTER_DELETE,
                         self.delete_segment,
                         payload=events.DBEventPayload(
                             context, states=(segment_dict,),
                             resource_id=uuid))
Example #14
0
 def test_subscribe_address_scope_of_subnetpool(self, gp):
     l3_db.L3RpcNotifierMixin()
     registry.publish(resources.SUBNETPOOL_ADDRESS_SCOPE,
                      events.AFTER_UPDATE, mock.ANY,
                      payload=events.DBEventPayload(
                          mock.MagicMock(), resource_id='fake_id'))
     self.assertTrue(gp.return_value.notify_routers_updated.called)
Example #15
0
 def treat_devices_removed(self, devices):
     resync = False
     self.sg_agent.remove_devices_filter(devices)
     for device in devices:
         LOG.info("Attachment %s removed", device)
         details = None
         try:
             details = self.plugin_rpc.update_device_down(self.context,
                                                          device,
                                                          self.agent_id,
                                                          cfg.CONF.host)
         except Exception:
             LOG.exception("Error occurred while removing port %s",
                           device)
             resync = True
         if details and details['exists']:
             LOG.info("Port %s updated.", device)
         else:
             LOG.debug("Device %s not defined on plugin", device)
         port_id = self._clean_network_ports(device)
         try:
             self.ext_manager.delete_port(self.context,
                                          {'device': device,
                                           'port_id': port_id})
         except Exception:
             LOG.exception("Error occurred while processing extensions "
                           "for port removal %s", device)
             resync = True
         registry.publish(local_resources.PORT_DEVICE, events.AFTER_DELETE,
                          self, payload=events.DBEventPayload(
                              self.context, states=(details,),
                              resource_id=device))
     self.mgr.delete_arp_spoofing_protection(devices)
     return resync
Example #16
0
    def _process_updated_router(self, router):
        ri = self.router_info[router['id']]

        router_ha = router.get('ha')
        router_distributed = router.get('distributed')
        if ((router_ha is not None and ri.router.get('ha') != router_ha) or
                (router_distributed is not None and
                 ri.router.get('distributed') != router_distributed)):
            LOG.warning('Type of the router %(id)s changed. '
                        'Old type: ha=%(old_ha)s; distributed=%(old_dvr)s; '
                        'New type: ha=%(new_ha)s; distributed=%(new_dvr)s',
                        {'id': router['id'],
                         'old_ha': ri.router.get('ha'),
                         'old_dvr': ri.router.get('distributed'),
                         'new_ha': router.get('ha'),
                         'new_dvr': router.get('distributed')})
            ri = self._create_router(router['id'], router)
            self.router_info[router['id']] = ri

        is_dvr_snat_agent = (self.conf.agent_mode ==
                             lib_const.L3_AGENT_MODE_DVR_SNAT)
        is_dvr_only_agent = (self.conf.agent_mode in
                             [lib_const.L3_AGENT_MODE_DVR,
                              lib_const.L3_AGENT_MODE_DVR_NO_EXTERNAL])
        old_router_ha_interface = ri.router.get(lib_const.HA_INTERFACE_KEY)
        current_router_ha_interface = router.get(lib_const.HA_INTERFACE_KEY)
        ha_interface_change = ((old_router_ha_interface is None and
                                current_router_ha_interface is not None) or
                               (old_router_ha_interface is not None and
                                current_router_ha_interface is None))
        is_dvr_ha_router = router.get('distributed') and router.get('ha')

        if is_dvr_snat_agent and is_dvr_ha_router and ha_interface_change:
            LOG.debug("Removing HA router %s, since it is not bound to "
                      "the current agent, and recreating regular DVR router "
                      "based on service port requirements.",
                      router['id'])
            if self._safe_router_removed(router['id']):
                self._process_added_router(router)
        else:
            is_ha_router = getattr(ri, 'ha_state', False)
            # For HA routers check that DB state matches actual state
            if router.get('ha') and not is_dvr_only_agent and is_ha_router:
                self.check_ha_state_for_router(
                    router['id'], router.get(lib_const.HA_ROUTER_STATE_KEY))
            ri.router = router
            registry.publish(resources.ROUTER, events.BEFORE_UPDATE, self,
                             payload=events.DBEventPayload(
                                 self.context,
                                 resource_id=router['id'],
                                 states=(ri,)))

            ri.process()
            registry.publish(resources.ROUTER, events.AFTER_UPDATE, self,
                             payload=events.DBEventPayload(
                                 self.context,
                                 resource_id=router['id'],
                                 states=(None, ri)))
            self.l3_ext_manager.update_router(self.context, router)
Example #17
0
 def __init__(self):
     super(PortForwardingPlugin, self).__init__()
     self.push_api = resources_rpc.ResourcesPushRpcApi() \
         if self._rpc_notifications_required else None
     self.l3_plugin = directory.get_plugin(constants.L3)
     self.core_plugin = directory.get_plugin()
     registry.publish(pf_consts.PORT_FORWARDING_PLUGIN, events.AFTER_INIT,
                      self)
Example #18
0
    def __init__(self):
        self._drivers = set()
        self.rpc_required = False
        registry.publish(log_const.LOGGING_PLUGIN, events.AFTER_INIT, self)

        if self.rpc_required:
            self._start_rpc_listeners()
            self.logging_rpc = server_rpc.LoggingApiNotification()
Example #19
0
    def __init__(self):
        self._drivers = set()
        self.rpc_required = False
        registry.publish(log_const.LOGGING_PLUGIN, events.AFTER_INIT, self)

        if self.rpc_required:
            self._start_rpc_listeners()
            self.logging_rpc = server_rpc.LoggingApiNotification()
Example #20
0
    def _ensure_vr_id(self, context, router_db, ha_network):
        router_id = router_db.id
        network_id = ha_network.network_id

        # TODO(kevinbenton): let decorator handle duplicate retry
        # like in review.openstack.org/#/c/367179/1/neutron/db/l3_hamode_db.py
        for count in range(MAX_ALLOCATION_TRIES):
            try:
                # NOTE(kevinbenton): we disallow subtransactions because the
                # retry logic will bust any parent transactions
                with context.session.begin():
                    if router_db.extra_attributes.ha_vr_id:
                        LOG.debug(
                            "Router %(router_id)s has already been "
                            "allocated a ha_vr_id %(ha_vr_id)d!",
                            {'router_id': router_id,
                             'ha_vr_id': router_db.extra_attributes.ha_vr_id})
                        return

                    old_router = self._make_router_dict(router_db)
                    allocated_vr_ids = self._get_allocated_vr_id(context,
                                                                 network_id)
                    available_vr_ids = VR_ID_RANGE - allocated_vr_ids

                    if not available_vr_ids:
                        raise l3ha_exc.NoVRIDAvailable(router_id=router_id)

                    allocation = l3_hamode.L3HARouterVRIdAllocation(
                        context, network_id=network_id,
                        vr_id=available_vr_ids.pop())
                    allocation.create()

                    router_db.extra_attributes.ha_vr_id = allocation.vr_id
                    LOG.debug(
                        "Router %(router_id)s has been allocated a ha_vr_id "
                        "%(ha_vr_id)d.",
                        {'router_id': router_id, 'ha_vr_id': allocation.vr_id})
                    router_body = {l3_apidef.ROUTER:
                            {l3_ext_ha_apidef.HA_INFO: True,
                             'ha_vr_id': allocation.vr_id}}
                    registry.publish(resources.ROUTER, events.PRECOMMIT_UPDATE,
                                     self, payload=events.DBEventPayload(
                                         context, request_body=router_body,
                                         states=(old_router,),
                                         resource_id=router_id,
                                         desired_state=router_db))

                    return allocation.vr_id

            except obj_base.NeutronDbObjectDuplicateEntry:
                LOG.info("Attempt %(count)s to allocate a VRID in the "
                         "network %(network)s for the router %(router)s",
                         {'count': count, 'network': network_id,
                          'router': router_id})

        raise l3ha_exc.MaxVRIDAllocationTriesReached(
            network_id=network_id, router_id=router_id,
            max_tries=MAX_ALLOCATION_TRIES)
Example #21
0
    def _ensure_vr_id(self, context, router_db, ha_network):
        router_id = router_db.id
        network_id = ha_network.network_id

        # TODO(kevinbenton): let decorator handle duplicate retry
        # like in review.openstack.org/#/c/367179/1/neutron/db/l3_hamode_db.py
        for count in range(MAX_ALLOCATION_TRIES):
            try:
                # NOTE(kevinbenton): we disallow subtransactions because the
                # retry logic will bust any parent transactions
                with context.session.begin():
                    if router_db.extra_attributes.ha_vr_id:
                        LOG.debug(
                            "Router %(router_id)s has already been "
                            "allocated a ha_vr_id %(ha_vr_id)d!",
                            {'router_id': router_id,
                             'ha_vr_id': router_db.extra_attributes.ha_vr_id})
                        return

                    old_router = self._make_router_dict(router_db)
                    allocated_vr_ids = self._get_allocated_vr_id(context,
                                                                 network_id)
                    available_vr_ids = VR_ID_RANGE - allocated_vr_ids

                    if not available_vr_ids:
                        raise l3ha_exc.NoVRIDAvailable(router_id=router_id)

                    allocation = l3_hamode.L3HARouterVRIdAllocation(
                        context, network_id=network_id,
                        vr_id=available_vr_ids.pop())
                    allocation.create()

                    router_db.extra_attributes.ha_vr_id = allocation.vr_id
                    LOG.debug(
                        "Router %(router_id)s has been allocated a ha_vr_id "
                        "%(ha_vr_id)d.",
                        {'router_id': router_id, 'ha_vr_id': allocation.vr_id})
                    router_body = {l3_apidef.ROUTER:
                            {l3_ext_ha_apidef.HA_INFO: True,
                             'ha_vr_id': allocation.vr_id}}
                    registry.publish(resources.ROUTER, events.PRECOMMIT_UPDATE,
                                     self, payload=events.DBEventPayload(
                                         context, request_body=router_body,
                                         states=(old_router,),
                                         resource_id=router_id,
                                         desired_state=router_db))

                    return allocation.vr_id

            except obj_base.NeutronDbObjectDuplicateEntry:
                LOG.info("Attempt %(count)s to allocate a VRID in the "
                         "network %(network)s for the router %(router)s",
                         {'count': count, 'network': network_id,
                          'router': router_id})

        raise l3ha_exc.MaxVRIDAllocationTriesReached(
            network_id=network_id, router_id=router_id,
            max_tries=MAX_ALLOCATION_TRIES)
Example #22
0
    def after(self, state):
        resource_name = state.request.context.get('resource')
        collection_name = state.request.context.get('collection')
        neutron_context = state.request.context.get('neutron_context')
        action = pecan_constants.ACTION_MAP.get(state.request.method)
        if not action or action not in ('create', 'update', 'delete'):
            return
        if utils.is_member_action(utils.get_controller(state)):
            return
        if not resource_name:
            LOG.debug("Skipping NotifierHook processing as there was no "
                      "resource associated with the request")
            return
        if state.response.status_int > 300:
            LOG.debug(
                "No notification will be sent due to unsuccessful "
                "status code: %s", state.response.status_int)
            return

        original = {}
        if (action in ('delete', 'update')
                and state.request.context.get('original_resources', [])):
            # We only need the original resource for updates and deletes
            original = state.request.context.get('original_resources')[0]
        if action == 'delete':
            # The object has been deleted, so we must notify the agent with the
            # data of the original object as the payload, but we do not need
            # to pass it in as the original
            result = {resource_name: original}
            original = {}
        else:
            if not state.response.body:
                result = {}
            else:
                result = state.response.json

        notifier_method = '%s.%s.end' % (resource_name, action)
        notifier_action = utils.get_controller(state).plugin_handlers[action]
        registry.publish(resource_name,
                         events.BEFORE_RESPONSE,
                         self,
                         payload=events.APIEventPayload(
                             neutron_context,
                             notifier_method,
                             notifier_action,
                             request_body=state.request.body,
                             states=(
                                 original,
                                 result,
                             ),
                             collection_name=collection_name))

        if action == 'delete':
            resource_id = state.request.context.get('resource_id')
            result[resource_name + '_id'] = resource_id

        self._notifier.info(neutron_context, notifier_method, result)
Example #23
0
 def delete_agent(self, context, id):
     agent = self._get_agent(context, id)
     registry.publish(resources.AGENT,
                      events.BEFORE_DELETE,
                      self,
                      payload=events.DBEventPayload(context,
                                                    states=(agent, ),
                                                    resource_id=id))
     agent.delete()
Example #24
0
    def create_security_group_rule(self, context, security_group_rule):
        res = self._create_security_group_rule(context, security_group_rule)
        registry.publish(resources.SECURITY_GROUP_RULE, events.AFTER_CREATE,
                         self, payload=events.DBEventPayload(
                                   context,
                                   resource_id=res['id'],
                                   states=(res,)))

        return res
Example #25
0
    def add_subports(self, context, trunk_id, subports):
        """Add one or more subports to trunk."""
        with db_api.CONTEXT_WRITER.using(context):
            trunk = self._get_trunk(context, trunk_id)

            # Check for basic validation since the request body here is not
            # automatically validated by the API layer.
            subports = subports['sub_ports']
            subports_validator = rules.SubPortsValidator(
                self._segmentation_types, subports, trunk['port_id'])
            subports = subports_validator.validate(
                context, basic_validation=True)
            added_subports = []

            rules.trunk_can_be_managed(context, trunk)
            original_trunk = copy.deepcopy(trunk)
            # NOTE(status_police): the trunk status should transition to
            # DOWN (and finally in ACTIVE or ERROR), only if it is not in
            # ERROR status already. A user should attempt to resolve the ERROR
            # condition before adding more subports to the trunk. Should a
            # trunk be in DOWN or BUILD state (e.g. when dealing with
            # multiple concurrent requests), the status is still forced to
            # DOWN and thus can potentially overwrite an interleaving state
            # change to ACTIVE. Eventually the driver should bring the status
            # back to ACTIVE or ERROR.
            if trunk.status == constants.TRUNK_ERROR_STATUS:
                raise trunk_exc.TrunkInErrorState(trunk_id=trunk_id)
            trunk.update(status=constants.TRUNK_DOWN_STATUS)

            for subport in subports:
                obj = trunk_objects.SubPort(
                               context=context,
                               trunk_id=trunk_id,
                               port_id=subport['port_id'],
                               segmentation_type=subport['segmentation_type'],
                               segmentation_id=subport['segmentation_id'])
                obj.create()
                trunk['sub_ports'].append(obj)
                added_subports.append(obj)
            payload = events.DBEventPayload(context, resource_id=trunk_id,
                                            states=(original_trunk, trunk,),
                                            metadata={
                                                'subports': added_subports
                                            })
            if added_subports:
                registry.publish(resources.SUBPORTS, events.PRECOMMIT_CREATE,
                                 self, payload=payload)
        if added_subports:
            payload = events.DBEventPayload(context, resource_id=trunk_id,
                                            states=(original_trunk, trunk,),
                                            metadata={
                                                'subports': added_subports
                                            })
            registry.publish(
                resources.SUBPORTS, events.AFTER_CREATE, self, payload=payload)
        return trunk
Example #26
0
    def remove_subports(self, context, trunk_id, subports):
        """Remove one or more subports from trunk."""
        subports = subports['sub_ports']
        with db_api.CONTEXT_WRITER.using(context):
            trunk = self._get_trunk(context, trunk_id)
            original_trunk = copy.deepcopy(trunk)
            rules.trunk_can_be_managed(context, trunk)

            subports_validator = rules.SubPortsValidator(
                self._segmentation_types, subports)
            # the subports are being removed, therefore we do not need to
            # enforce any specific trunk rules, other than basic validation
            # of the request body.
            subports = subports_validator.validate(
                context, basic_validation=True,
                trunk_validation=False)

            current_subports = {p.port_id: p for p in trunk.sub_ports}
            removed_subports = []

            for subport in subports:
                subport_obj = current_subports.pop(subport['port_id'], None)

                if not subport_obj:
                    raise trunk_exc.SubPortNotFound(trunk_id=trunk_id,
                                                    port_id=subport['port_id'])
                subport_obj.delete()
                removed_subports.append(subport_obj)

            del trunk.sub_ports[:]
            trunk.sub_ports.extend(current_subports.values())
            # NOTE(status_police): the trunk status should transition to
            # DOWN irrespective of the status in which it is in to allow
            # the user to resolve potential conflicts due to prior add_subports
            # operations.
            # Should a trunk be in DOWN or BUILD state (e.g. when dealing
            # with multiple concurrent requests), the status is still forced
            # to DOWN. See add_subports() for more details.
            trunk.update(status=constants.TRUNK_DOWN_STATUS)
            payload = events.DBEventPayload(context, resource_id=trunk_id,
                                            states=(original_trunk, trunk,),
                                            metadata={
                                                'subports': removed_subports
                                            })
            if removed_subports:
                registry.publish(resources.SUBPORTS, events.PRECOMMIT_DELETE,
                                 self, payload=payload)
        if removed_subports:
            payload = events.DBEventPayload(context, resource_id=trunk_id,
                                            states=(original_trunk, trunk,),
                                            metadata={
                                                'subports': removed_subports
                                            })
            registry.publish(
                resources.SUBPORTS, events.AFTER_DELETE, self, payload=payload)
        return trunk
Example #27
0
 def _notify_before_router_edge_association(self,
                                            context,
                                            router,
                                            edge_id=None):
     registry.publish(nsxv_constants.SERVICE_EDGE,
                      events.BEFORE_DELETE,
                      self,
                      payload=events.DBEventPayload(context,
                                                    states=(router, ),
                                                    resource_id=edge_id))
Example #28
0
 def _check_router_not_in_use(self, context, router_id):
     try:
         registry.publish(
             resources.ROUTER, events.BEFORE_DELETE, self,
             payload=events.DBEventPayload(context, resource_id=router_id))
     except exceptions.CallbackFailure as e:
         with excutils.save_and_reraise_exception():
             if len(e.errors) == 1:
                 raise e.errors[0].error
             raise l3_exc.RouterInUse(router_id=router_id, reason=e)
Example #29
0
def serve_wsgi(cls):
    try:
        service = cls.create()
        service.start()
    except Exception:
        with excutils.save_and_reraise_exception():
            LOG.exception('Unrecoverable error: please check log '
                          'for details.')
    registry.publish(resources.PROCESS, events.BEFORE_SPAWN, service)
    return service
Example #30
0
    def delete_firewall_rule(self, context, id):
        firewall_rule = self.firewall_db.get_firewall_rule(context, id)
        self.delete_firewall_rule_precommit(context, firewall_rule)
        self.firewall_db.delete_firewall_rule(context, id)
        self.delete_firewall_rule_postcommit(context, firewall_rule)

        payload = events.DBEventPayload(context=context,
                                        resource_id=id,
                                        states=(firewall_rule,))
        registry.publish(
            const.FIREWALL_RULE, events.AFTER_DELETE, self, payload=payload)
Example #31
0
    def delete_firewall_policy(self, context, id):
        firewall_policy = self.firewall_db.get_firewall_policy(context, id)
        self.delete_firewall_policy_precommit(context, firewall_policy)
        self.firewall_db.delete_firewall_policy(context, id)
        self.delete_firewall_policy_postcommit(context, firewall_policy)

        payload = events.DBEventPayload(context=context,
                                        resource_id=id,
                                        states=(firewall_policy,))
        registry.publish(
            const.FIREWALL_POLICY, events.AFTER_UPDATE, self, payload=payload)
Example #32
0
    def __init__(self):
        self._drivers = []
        self.rpc_notifications_required = False
        rpc_registry.provide(self._get_qos_policy_cb, resources.QOS_POLICY)
        # notify any registered QoS driver that we're ready, those will
        # call the driver manager back with register_driver if they
        # are enabled
        registry.publish(qos_consts.QOS_PLUGIN, events.AFTER_INIT, self)

        if self.rpc_notifications_required:
            self.push_api = resources_rpc.ResourcesPushRpcApi()
Example #33
0
    def _set_bridge_name(port, vif_details):
        # REVISIT(rawlin): add BridgeName as a nullable column to the Port
        # model and simply check here if it's set and insert it into the
        # vif_details.

        def set_bridge_name_inner(bridge_name):
            vif_details[portbindings.VIF_DETAILS_BRIDGE_NAME] = bridge_name

        registry.publish(a_const.OVS_BRIDGE_NAME, events.BEFORE_READ,
            set_bridge_name_inner, payload=events.EventPayload(
                None, metadata={'port': port}))
Example #34
0
    def __init__(self):
        self._drivers = []
        self.rpc_notifications_required = False
        rpc_registry.provide(self._get_qos_policy_cb, resources.QOS_POLICY)
        # notify any registered QoS driver that we're ready, those will
        # call the driver manager back with register_driver if they
        # are enabled
        registry.publish(qos_consts.QOS_PLUGIN, events.AFTER_INIT, self)

        if self.rpc_notifications_required:
            self.push_api = resources_rpc.ResourcesPushRpcApi()
Example #35
0
 def create_bgp_speaker(self, context, bgp_speaker):
     bgp_speaker = super(BgpPlugin,
                         self).create_bgp_speaker(context, bgp_speaker)
     registry.publish(dr_resources.BGP_SPEAKER,
                      events.AFTER_CREATE,
                      self,
                      payload=events.DBEventPayload(
                          context,
                          metadata={'plugin': self},
                          states=(bgp_speaker, )))
     return bgp_speaker
Example #36
0
    def _set_bridge_name(port, vif_details):
        # REVISIT(rawlin): add BridgeName as a nullable column to the Port
        # model and simply check here if it's set and insert it into the
        # vif_details.

        def set_bridge_name_inner(bridge_name):
            vif_details[portbindings.VIF_DETAILS_BRIDGE_NAME] = bridge_name

        registry.publish(a_const.OVS_BRIDGE_NAME, events.BEFORE_READ,
            set_bridge_name_inner, payload=events.EventPayload(
                None, metadata={'port': port}))
Example #37
0
def serve_wsgi(cls):

    try:
        service = cls.create()
        service.start()
    except Exception:
        with excutils.save_and_reraise_exception():
            LOG.exception('Unrecoverable error: please check log '
                          'for details.')

    registry.publish(resources.PROCESS, events.BEFORE_SPAWN, service)
    return service
Example #38
0
 def test_create_duplicate_default_l2_gateway_noop(self):
     def_bridge_cluster_name = nsx_v3_mocks.NSX_BRIDGE_CLUSTER_NAME
     cfg.CONF.set_override("default_bridge_cluster",
                           def_bridge_cluster_name, "nsx_v3")
     for i in range(0, 2):
         nsx_v3_driver.NsxV3Driver(mock.MagicMock())
         # fake the callback invoked after init
         registry.publish(resources.PROCESS, events.BEFORE_SPAWN,
                          mock.MagicMock())
     l2gws = self.driver._get_l2_gateways(self.context)
     # Verify whether only one default L2 gateway is created
     self.assertEqual(1, len(l2gws))
Example #39
0
    def _create_segment_db(self, context, segment_id, segment):
        with db_api.CONTEXT_WRITER.using(context):
            network_id = segment['network_id']
            physical_network = segment[extension.PHYSICAL_NETWORK]
            if physical_network == constants.ATTR_NOT_SPECIFIED:
                physical_network = None
            network_type = segment[extension.NETWORK_TYPE]
            segmentation_id = segment[extension.SEGMENTATION_ID]
            if segmentation_id == constants.ATTR_NOT_SPECIFIED:
                segmentation_id = None
            name = segment['name']
            if name == constants.ATTR_NOT_SPECIFIED:
                name = None
            description = segment['description']
            if description == constants.ATTR_NOT_SPECIFIED:
                description = None
            args = {
                'id': segment_id,
                'network_id': network_id,
                'name': name,
                'description': description,
                db.PHYSICAL_NETWORK: physical_network,
                db.NETWORK_TYPE: network_type,
                db.SEGMENTATION_ID: segmentation_id
            }
            # Calculate the index of segment
            segment_index = 0
            segments = self.get_segments(context,
                                         filters={'network_id': [network_id]},
                                         fields=['segment_index'],
                                         sorts=[('segment_index', True)])
            if segments:
                # NOTE(xiaohhui): The new index is the last index + 1, this
                # may cause discontinuous segment_index. But segment_index
                # can functionally work as the order index for segments.
                segment_index = (segments[-1].get('segment_index') + 1)
            args['segment_index'] = segment_index

            new_segment = network.NetworkSegment(context, **args)
            new_segment.create()
            # Do some preliminary operations before committing the segment to
            # db
            registry.publish(resources.SEGMENT,
                             events.PRECOMMIT_CREATE,
                             self,
                             payload=events.DBEventPayload(
                                 context,
                                 resource_id=segment_id,
                                 states=(new_segment, )))
            # The new segment might have been updated by the callbacks
            # subscribed to the PRECOMMIT_CREATE event. So update it in the DB
            new_segment.update()
            return new_segment
Example #40
0
 def _registry_publish(self, res, event, id=None, exc_cls=None,
                       payload=None):
     # NOTE(armax): a callback exception here will prevent the request
     # from being processed. This is a hook point for backend's validation;
     # we raise to propagate the reason for the failure.
     try:
         registry.publish(res, event, self, payload=payload)
     except exceptions.CallbackFailure as e:
         if exc_cls:
             reason = (_('cannot perform %(event)s due to %(reason)s') %
                       {'event': event, 'reason': e})
             raise exc_cls(reason=reason, id=id)
Example #41
0
    def remove_rule(self, context, policy_id, rule_info):
        self.remove_rule_precommit(context, policy_id, rule_info)
        firewall_policy = self.firewall_db.remove_rule(context, policy_id,
                                                       rule_info)
        self.remove_rule_postcommit(context, policy_id, rule_info)
        payload = events.DBEventPayload(context=context,
                                        resource_id=policy_id,
                                        states=(firewall_policy,))

        registry.publish(
            const.FIREWALL_POLICY, events.AFTER_UPDATE, self, payload=payload)
        return firewall_policy
Example #42
0
 def __init__(self):
     self._rpc_backend = None
     self._drivers = []
     self._segmentation_types = {}
     self._interfaces = set()
     self._agent_types = set()
     drivers.register()
     registry.subscribe(rules.enforce_port_deletion_rules,
                        resources.PORT, events.BEFORE_DELETE)
     registry.publish(constants.TRUNK_PLUGIN, events.AFTER_INIT, self)
     for driver in self._drivers:
         LOG.debug('Trunk plugin loaded with driver %s', driver.name)
     self.check_compatibility()
Example #43
0
    def after(self, state):
        resource_name = state.request.context.get('resource')
        collection_name = state.request.context.get('collection')
        neutron_context = state.request.context.get('neutron_context')
        action = pecan_constants.ACTION_MAP.get(state.request.method)
        if not action or action not in ('create', 'update', 'delete'):
            return
        if utils.is_member_action(utils.get_controller(state)):
            return
        if not resource_name:
            LOG.debug("Skipping NotifierHook processing as there was no "
                      "resource associated with the request")
            return
        if state.response.status_int > 300:
            LOG.debug("No notification will be sent due to unsuccessful "
                      "status code: %s", state.response.status_int)
            return

        original = {}
        if (action in ('delete', 'update') and
                state.request.context.get('original_resources', [])):
            # We only need the original resource for updates and deletes
            original = state.request.context.get('original_resources')[0]
        if action == 'delete':
            # The object has been deleted, so we must notify the agent with the
            # data of the original object as the payload, but we do not need
            # to pass it in as the original
            result = {resource_name: original}
            original = {}
        else:
            if not state.response.body:
                result = {}
            else:
                result = state.response.json

        notifier_method = '%s.%s.end' % (resource_name, action)
        notifier_action = utils.get_controller(state).plugin_handlers[action]
        registry.publish(resource_name, events.BEFORE_RESPONSE, self,
                         payload=events.APIEventPayload(
                             neutron_context, notifier_method, notifier_action,
                             request_body=state.request.body,
                             states=(original, result,),
                             collection_name=collection_name))

        if action == 'delete':
            resource_id = state.request.context.get('resource_id')
            result[resource_name + '_id'] = resource_id

        self._notifier.info(neutron_context, notifier_method, result)
Example #44
0
    def delete_firewall_group(self, context, id):
        firewall_group = self.firewall_db.get_firewall_group(context, id)
        if firewall_group['status'] == nl_constants.PENDING_DELETE:
            firewall_group['status'] = nl_constants.ERROR
        self.delete_firewall_group_precommit(context, firewall_group)
        if firewall_group['status'] != nl_constants.PENDING_DELETE:
            # lets driver deleting firewall group later
            self.firewall_db.delete_firewall_group(context, id)
        self.delete_firewall_group_postcommit(context, firewall_group)

        payload = events.DBEventPayload(context=context,
                                        resource_id=id,
                                        states=(firewall_group,))
        registry.publish(
            const.FIREWALL_GROUP, events.AFTER_DELETE, self, payload=payload)
Example #45
0
 def test_create_duplicate_default_l2_gateway_noop(self):
     def_bep_name = NSX_DEFAULT_BEP_NAME
     cfg.CONF.set_override("default_bridge_endpoint_profile",
                           def_bep_name, "nsx_v3")
     with mock.patch.object(nsx_v3_driver.NsxV3Driver,
                            '_get_bridge_vlan_tz_id',
                            return_value=['some_tz_id']):
         for i in range(0, 2):
             nsx_v3_driver.NsxV3Driver(mock.MagicMock())
             # fake the callback invoked after init
             registry.publish(resources.PROCESS, events.BEFORE_SPAWN,
                             mock.MagicMock())
         l2gws = self.driver._get_l2_gateways(self.context)
         # Verify whether only one default L2 gateway is created
         self.assertEqual(1, len(l2gws))
Example #46
0
 def _registry_notify(self, res, event, id=None, exc_cls=None, **kwargs):
     # NOTE(armax): a callback exception here will prevent the request
     # from being processed. This is a hook point for backend's validation;
     # we raise to propagate the reason for the failure.
     try:
         if 'payload' in kwargs:
             # TODO(boden): remove shim once all callbacks use payloads
             registry.publish(res, event, self, payload=kwargs['payload'])
         else:
             registry.notify(res, event, self, **kwargs)
     except exceptions.CallbackFailure as e:
         if exc_cls:
             reason = (_('cannot perform %(event)s due to %(reason)s') %
                       {'event': event, 'reason': e})
             raise exc_cls(reason=reason, id=id)
Example #47
0
    def create_firewall_rule(self, context, firewall_rule):
        request_body = firewall_rule
        with context.session.begin(subtransactions=True):
            firewall_rule = self.firewall_db.create_firewall_rule(
                context, firewall_rule)
            self.create_firewall_rule_precommit(context, firewall_rule)
        self.create_firewall_rule_postcommit(context, firewall_rule)

        payload = events.DBEventPayload(context=context,
                                        resource_id=firewall_rule['id'],
                                        request_body=request_body,
                                        states=(firewall_rule,))
        registry.publish(
            const.FIREWALL_RULE, events.AFTER_CREATE, self, payload=payload)
        return firewall_rule
Example #48
0
 def update_rbac_policy(self, context, id, rbac_policy):
     pol = rbac_policy['rbac_policy']
     entry = self._get_rbac_policy(context, id)
     object_type = entry.db_model.object_type
     try:
         registry.publish(resources.RBAC_POLICY, events.BEFORE_UPDATE, self,
                          payload=events.DBEventPayload(
                              context, request_body=pol,
                              states=(entry,), resource_id=id,
                              metadata={'object_type': object_type}))
     except c_exc.CallbackFailure as ex:
         raise ext_rbac.RbacPolicyInUse(object_id=entry.object_id,
                                        details=ex)
     entry.update_fields(pol)
     entry.update()
     return self._make_rbac_policy_dict(entry)
Example #49
0
    def _router_removed(self, ri, router_id):
        if ri is None:
            LOG.warning("Info for router %s was not found. "
                        "Performing router cleanup", router_id)
            self.namespaces_manager.ensure_router_cleanup(router_id)
            return

        registry.publish(resources.ROUTER, events.BEFORE_DELETE, self,
                         payload=events.DBEventPayload(
                             self.context, states=(ri,),
                             resource_id=router_id))

        ri.delete()
        del self.router_info[router_id]

        registry.notify(resources.ROUTER, events.AFTER_DELETE, self, router=ri)
Example #50
0
    def create_firewall_group(self, context, firewall_group):
        request_body = firewall_group
        with context.session.begin(subtransactions=True):
            firewall_group = self.firewall_db.create_firewall_group(
                context, firewall_group)
            self.create_firewall_group_precommit(context, firewall_group)
            self._update_resource_status(context, firewall_db_v2.FirewallGroup,
                                         firewall_group)
        self.create_firewall_group_postcommit(context, firewall_group)

        payload = events.DBEventPayload(context=context,
                                        resource_id=firewall_group['id'],
                                        request_body=request_body,
                                        states=(firewall_group,))
        registry.publish(
            const.FIREWALL_GROUP, events.AFTER_CREATE, self, payload=payload)
        return firewall_group
Example #51
0
    def update_firewall_policy(self, context, id, firewall_policy_delta):
        old_firewall_policy = self.firewall_db.get_firewall_policy(context, id)
        new_firewall_policy = copy.deepcopy(old_firewall_policy)
        new_firewall_policy.update(firewall_policy_delta)
        self.update_firewall_policy_precommit(context, old_firewall_policy,
                                              new_firewall_policy)
        firewall_policy = self.firewall_db.update_firewall_policy(
            context, id, firewall_policy_delta)
        self.update_firewall_policy_postcommit(context, old_firewall_policy,
                                               firewall_policy)

        payload = events.DBEventPayload(context=context,
                                        resource_id=id,
                                        states=(firewall_policy,))
        registry.publish(
            const.FIREWALL_POLICY, events.AFTER_UPDATE, self, payload=payload)
        return firewall_policy
Example #52
0
 def test_mac_cleared_on_agent_delete_event(self):
     plugin = directory.get_plugin()
     mac_1 = tools.get_random_EUI()
     mac_2 = tools.get_random_EUI()
     self._create_dvr_mac_entry('host_1', mac_1)
     self._create_dvr_mac_entry('host_2', mac_2)
     agent = {'host': 'host_1', 'id': 'a1'}
     with mock.patch.object(plugin, 'notifier') as notifier:
         registry.publish(resources.AGENT, events.BEFORE_DELETE, self,
                          payload=events.DBEventPayload(
                              self.ctx, states=(agent,)))
     mac_list = self.mixin.get_dvr_mac_address_list(self.ctx)
     self.assertEqual(1, len(mac_list))
     for mac in mac_list:
         self.assertIsInstance(mac, dict)
     self.assertEqual('host_2', mac_list[0]['host'])
     notifier.dvr_mac_address_update.assert_called_once_with(
         self.ctx, mac_list)
Example #53
0
    def update_firewall_rule(self, context, id, firewall_rule_delta):
        old_firewall_rule = self.firewall_db.get_firewall_rule(context, id)
        new_firewall_rule = copy.deepcopy(old_firewall_rule)
        new_firewall_rule.update(firewall_rule_delta)
        self.update_firewall_rule_precommit(context, old_firewall_rule,
                                            new_firewall_rule)
        firewall_rule = self.firewall_db.update_firewall_rule(
            context, id, firewall_rule_delta)
        self.update_firewall_rule_postcommit(context, old_firewall_rule,
                                             firewall_rule)

        payload = events.DBEventPayload(context=context,
                                        resource_id=id,
                                        states=(firewall_rule,))
        registry.publish(
            const.FIREWALL_RULE, events.AFTER_UPDATE, self, payload=payload)

        return firewall_rule
Example #54
0
 def test_mac_not_cleared_on_agent_delete_event_with_remaining_agents(self):
     plugin = directory.get_plugin()
     mac_1 = tools.get_random_EUI()
     mac_2 = tools.get_random_EUI()
     self._create_dvr_mac_entry('host_1', mac_1)
     self._create_dvr_mac_entry('host_2', mac_2)
     agent1 = {'host': 'host_1', 'id': 'a1'}
     agent2 = {'host': 'host_1', 'id': 'a2'}
     with mock.patch.object(plugin, 'get_agents', return_value=[agent2]):
         with mock.patch.object(plugin, 'notifier') as notifier:
             registry.publish(resources.AGENT, events.BEFORE_DELETE, self,
                              payload=events.DBEventPayload(
                                  self.ctx, states=(agent1,)))
     mac_list = self.mixin.get_dvr_mac_address_list(self.ctx)
     for mac in mac_list:
         self.assertIsInstance(mac, dict)
     self.assertEqual(2, len(mac_list))
     self.assertFalse(notifier.dvr_mac_address_update.called)
Example #55
0
    def _delete(self, request, id, **kwargs):
        action = self._plugin_handlers[self.DELETE]

        # Check authz
        policy.init()
        parent_id = kwargs.get(self._parent_id_name)
        obj = self._item(request, id, parent_id=parent_id)
        try:
            policy.enforce(request.context,
                           action,
                           obj,
                           pluralized=self._collection)
        except oslo_policy.PolicyNotAuthorized:
            # To avoid giving away information, pretend that it
            # doesn't exist if policy does not authorize SHOW
            with excutils.save_and_reraise_exception() as ctxt:
                if not policy.check(request.context,
                                    self._plugin_handlers[self.SHOW],
                                    obj,
                                    pluralized=self._collection):
                    ctxt.reraise = False
            msg = _('The resource could not be found.')
            raise webob.exc.HTTPNotFound(msg)

        obj_deleter = getattr(self._plugin, action)
        obj_deleter(request.context, id, **kwargs)
        # A delete operation usually alters resource usage, so mark affected
        # usage trackers as dirty
        resource_registry.set_resources_dirty(request.context)
        notifier_method = self._resource + '.delete.end'
        result = {self._resource: self._view(request.context, obj)}
        notifier_payload = {self._resource + '_id': id}
        notifier_payload.update(result)
        self._notifier.info(request.context,
                            notifier_method,
                            notifier_payload)

        registry.publish(self._resource, events.BEFORE_RESPONSE, self,
                         payload=events.APIEventPayload(
                             request.context, notifier_method, action,
                             states=({}, obj, result,),
                             collection_name=self._collection))
Example #56
0
    def update_firewall_group(self, context, id, firewall_group_delta):
        old_firewall_group = self.firewall_db.get_firewall_group(context, id)
        new_firewall_group = copy.deepcopy(old_firewall_group)
        new_firewall_group.update(firewall_group_delta)
        self.update_firewall_group_precommit(context, old_firewall_group,
                                             new_firewall_group)
        firewall_group_delta['status'] = new_firewall_group['status']
        firewall_group = self.firewall_db.update_firewall_group(
            context, id, firewall_group_delta)
        self.update_firewall_group_postcommit(context, old_firewall_group,
                                              firewall_group)

        payload = events.DBEventPayload(context=context,
                                        resource_id=id,
                                        states=(old_firewall_group,
                                                new_firewall_group))
        registry.publish(
            const.FIREWALL_GROUP, events.AFTER_UPDATE, self, payload=payload)

        return firewall_group
Example #57
0
 def delete_rbac_policy(self, context, id):
     entry = self._get_rbac_policy(context, id)
     object_type = entry.db_model.object_type
     try:
         registry.publish(resources.RBAC_POLICY, events.BEFORE_DELETE, self,
                          payload=events.DBEventPayload(
                              context, states=(entry,), resource_id=id,
                              metadata={'object_type': object_type}))
     except c_exc.CallbackFailure as ex:
         raise ext_rbac.RbacPolicyInUse(object_id=entry.object_id,
                                        details=ex)
     # make a dict copy because deleting the entry will nullify its
     # object_id link to network
     entry_dict = entry.to_dict()
     entry.delete()
     registry.publish(resources.RBAC_POLICY, events.AFTER_DELETE, self,
                      payload=events.DBEventPayload(
                          context, states=(entry_dict,), resource_id=id,
                          metadata={'object_type': object_type}))
     self.object_type_cache.pop(id, None)
Example #58
0
        def notify(create_result):
            # Ensure usage trackers for all resources affected by this API
            # operation are marked as dirty
            with db_api.CONTEXT_WRITER.using(request.context):
                # Commit the reservation(s)
                for reservation in reservations:
                    quota.QUOTAS.commit_reservation(
                        request.context, reservation.reservation_id)
                resource_registry.set_resources_dirty(request.context)

            notifier_method = self._resource + '.create.end'
            self._notifier.info(request.context,
                                notifier_method,
                                create_result)
            registry.publish(self._resource, events.BEFORE_RESPONSE, self,
                             payload=events.APIEventPayload(
                                 request.context, notifier_method, action,
                                 request_body=body,
                                 states=({}, create_result,),
                                 collection_name=self._collection))
            return create_result
Example #59
0
 def create_rbac_policy(self, context, rbac_policy):
     e = rbac_policy['rbac_policy']
     try:
         registry.publish(resources.RBAC_POLICY, events.BEFORE_CREATE, self,
                          payload=events.DBEventPayload(
                              context, request_body=e,
                              metadata={'object_type': e['object_type']}))
     except c_exc.CallbackFailure as e:
         raise n_exc.InvalidInput(error_message=e)
     rbac_class = (
         rbac_obj.RBACBaseObject.get_type_class_map()[e['object_type']])
     try:
         rbac_args = {'project_id': e['project_id'],
                      'object_id': e['object_id'],
                      'action': e['action'],
                      'target_tenant': e['target_tenant']}
         _rbac_obj = rbac_class(context, **rbac_args)
         _rbac_obj.create()
     except db_exc.DBDuplicateEntry:
         raise ext_rbac.DuplicateRbacPolicy()
     return self._make_rbac_policy_dict(_rbac_obj)
Example #60
0
    def _update_router_gw_info(self, context, router_id, info, router=None):
        # Load the router only if necessary
        if not router:
            router = self._get_router(context, router_id)
        with context.session.begin(subtransactions=True):
            old_router = self._make_router_dict(router)
            router.enable_snat = self._get_enable_snat(info)
            router_body = {l3_apidef.ROUTER:
                {l3_apidef.EXTERNAL_GW_INFO: info}}
            registry.publish(resources.ROUTER, events.PRECOMMIT_UPDATE, self,
                             payload=events.DBEventPayload(
                                 context, request_body=router_body,
                                 states=(old_router,), resource_id=router_id,
                                 desired_state=router))

        # Calls superclass, pass router db object for avoiding re-loading
        super(L3_NAT_dbonly_mixin, self)._update_router_gw_info(
            context, router_id, info, router=router)
        # Returning the router might come back useful if this
        # method is overridden in child classes
        return router