示例#1
0
 def create_trunk(self, context, trunk):
     """Create a trunk."""
     trunk = self.validate(context, trunk['trunk'])
     sub_ports = [trunk_objects.SubPort(
                      context=context,
                      port_id=p['port_id'],
                      segmentation_id=p['segmentation_id'],
                      segmentation_type=p['segmentation_type'])
                  for p in trunk['sub_ports']]
     admin_state_up = trunk.get('admin_state_up', True)
     # NOTE(status_police): a trunk is created in PENDING status. Depending
     # on the nature of the create request, a driver may set the status
     # immediately to ACTIVE if no physical provisioning is required.
     # Otherwise a transition to BUILD (or ERROR) should be expected
     # depending on how the driver reacts. PRECOMMIT failures prevent the
     # trunk from being created altogether.
     trunk_obj = trunk_objects.Trunk(context=context,
                                     admin_state_up=admin_state_up,
                                     id=uuidutils.generate_uuid(),
                                     name=trunk.get('name', ""),
                                     tenant_id=trunk['tenant_id'],
                                     port_id=trunk['port_id'],
                                     status=constants.PENDING_STATUS,
                                     sub_ports=sub_ports)
     with db_api.autonested_transaction(context.session):
         trunk_obj.create()
         payload = callbacks.TrunkPayload(context, trunk_obj.id,
                                          current_trunk=trunk_obj)
         registry.notify(
             constants.TRUNK, events.PRECOMMIT_CREATE, self,
             payload=payload)
     registry.notify(
         constants.TRUNK, events.AFTER_CREATE, self, payload=payload)
     return trunk_obj
示例#2
0
文件: agent.py 项目: kkxue/neutron
 def _process_updated_router(self, router):
     ri = self.router_info[router['id']]
     ri.router = router
     registry.notify(resources.ROUTER, events.BEFORE_UPDATE,
                     self, router=ri)
     ri.process(self)
     registry.notify(resources.ROUTER, events.AFTER_UPDATE, self, router=ri)
示例#3
0
    def update_security_group(self, context, id, security_group):
        s = security_group['security_group']

        kwargs = {
            'context': context,
            'security_group_id': id,
            'security_group': s,
        }
        self._registry_notify(resources.SECURITY_GROUP, events.BEFORE_UPDATE,
                              exc_cls=ext_sg.SecurityGroupConflict, **kwargs)

        with context.session.begin(subtransactions=True):
            sg = self._get_security_group(context, id)
            if sg['name'] == 'default' and 'name' in s:
                raise ext_sg.SecurityGroupCannotUpdateDefault()
            self._registry_notify(
                    resources.SECURITY_GROUP,
                    events.PRECOMMIT_UPDATE,
                    exc_cls=ext_sg.SecurityGroupConflict, **kwargs)
            sg.update(s)
        sg_dict = self._make_security_group_dict(sg)

        kwargs['security_group'] = sg_dict
        registry.notify(resources.SECURITY_GROUP, events.AFTER_UPDATE, self,
                        **kwargs)
        return sg_dict
示例#4
0
    def delete_security_group(self, context, id):
        filters = {"security_group_id": [id]}
        ports = self._get_port_security_group_bindings(context, filters)
        if ports:
            raise ext_sg.SecurityGroupInUse(id=id)
        # confirm security group exists
        sg = self._get_security_group(context, id)

        if sg["name"] == "default" and not context.is_admin:
            raise ext_sg.SecurityGroupCannotRemoveDefault()
        kwargs = {"context": context, "security_group_id": id, "security_group": sg}
        self._registry_notify(
            resources.SECURITY_GROUP, events.BEFORE_DELETE, exc_cls=ext_sg.SecurityGroupInUse, id=id, **kwargs
        )

        with context.session.begin(subtransactions=True):
            # pass security_group_rule_ids to ensure
            # consistency with deleted rules
            kwargs["security_group_rule_ids"] = [r["id"] for r in sg.rules]
            self._registry_notify(
                resources.SECURITY_GROUP, events.PRECOMMIT_DELETE, exc_cls=ext_sg.SecurityGroupInUse, id=id, **kwargs
            )
            context.session.delete(sg)

        kwargs.pop("security_group")
        registry.notify(resources.SECURITY_GROUP, events.AFTER_DELETE, self, **kwargs)
    def delete_security_group_rule(self, context, id):
        kwargs = {
            'context': context,
            'security_group_rule_id': id
        }
        # NOTE(armax): a callback exception here will prevent the request
        # from being processed. This is a hook point for backend's validation;
        # we raise to propagate the reason for the failure.
        try:
            registry.notify(
                resources.SECURITY_GROUP_RULE, events.BEFORE_DELETE, self,
                **kwargs)
        except exceptions.CallbackFailure as e:
            reason = _('cannot be deleted due to %s') % e
            raise ext_sg.SecurityGroupRuleInUse(id=id, reason=reason)

        with context.session.begin(subtransactions=True):
            query = self._model_query(context, SecurityGroupRule).filter(
                SecurityGroupRule.id == id)
            try:
                # As there is a filter on a primary key it is not possible for
                # MultipleResultsFound to be raised
                context.session.delete(query.one())
            except exc.NoResultFound:
                raise ext_sg.SecurityGroupRuleNotFound(id=id)

        registry.notify(
            resources.SECURITY_GROUP_RULE, events.AFTER_DELETE, self,
            **kwargs)
示例#6
0
文件: db.py 项目: annp/neutron
    def create_segment(self, context, segment):
        """Create a segment."""
        segment = segment['segment']
        segment_id = segment.get('id') or uuidutils.generate_uuid()
        with context.session.begin(subtransactions=True):
            network_id = segment['network_id']
            physical_network = segment[extension.PHYSICAL_NETWORK]
            if physical_network == constants.ATTR_NOT_SPECIFIED:
                physical_network = None
            network_type = segment[extension.NETWORK_TYPE]
            segmentation_id = segment[extension.SEGMENTATION_ID]
            if segmentation_id == constants.ATTR_NOT_SPECIFIED:
                segmentation_id = None
            args = {'id': segment_id,
                    'network_id': network_id,
                    db.PHYSICAL_NETWORK: physical_network,
                    db.NETWORK_TYPE: network_type,
                    db.SEGMENTATION_ID: segmentation_id}
            new_segment = db.NetworkSegment(**args)
            try:
                context.session.add(new_segment)
                context.session.flush([new_segment])
            except db_exc.DBReferenceError:
                raise n_exc.NetworkNotFound(net_id=network_id)
            registry.notify(resources.SEGMENT, events.PRECOMMIT_CREATE, self,
                            context=context, segment=new_segment)

        return self._make_segment_dict(new_segment)
示例#7
0
    def delete_security_group(self, context, id):
        filters = {'security_group_id': [id]}
        ports = self._get_port_security_group_bindings(context, filters)
        if ports:
            raise ext_sg.SecurityGroupInUse(id=id)
        # confirm security group exists
        sg = self._get_security_group(context, id)

        if sg['name'] == 'default' and not context.is_admin:
            raise ext_sg.SecurityGroupCannotRemoveDefault()
        kwargs = {
            'context': context,
            'security_group_id': id,
            'security_group': sg,
        }
        self._registry_notify(resources.SECURITY_GROUP, events.BEFORE_DELETE,
                              exc_cls=ext_sg.SecurityGroupInUse, id=id,
                              **kwargs)

        with context.session.begin(subtransactions=True):
            self._registry_notify(resources.SECURITY_GROUP,
                                  events.PRECOMMIT_DELETE,
                                  exc_cls=ext_sg.SecurityGroupInUse, id=id,
                                  **kwargs)
            context.session.delete(sg)

        kwargs.pop('security_group')
        registry.notify(resources.SECURITY_GROUP, events.AFTER_DELETE, self,
                        **kwargs)
示例#8
0
    def create_security_group(self, context, security_group, default_sg=False):
        """Create security group.

        If default_sg is true that means we are a default security group for
        a given tenant if it does not exist.
        """
        s = security_group["security_group"]
        kwargs = {"context": context, "security_group": s, "is_default": default_sg}
        # NOTE(armax): a callback exception here will prevent the request
        # from being processed. This is a hook point for backend's validation;
        # we raise to propagate the reason for the failure.
        try:
            registry.notify(resources.SECURITY_GROUP, events.BEFORE_CREATE, self, **kwargs)
        except exceptions.CallbackFailure as e:
            raise ext_sg.SecurityGroupConflict(reason=e)

        tenant_id = self._get_tenant_id_for_create(context, s)

        if not default_sg:
            self._ensure_default_security_group(context, tenant_id)

        with context.session.begin(subtransactions=True):
            security_group_db = SecurityGroup(
                id=s.get("id") or (uuidutils.generate_uuid()),
                description=s["description"],
                tenant_id=tenant_id,
                name=s["name"],
            )
            context.session.add(security_group_db)
            if default_sg:
                context.session.add(
                    DefaultSecurityGroup(security_group=security_group_db, tenant_id=security_group_db["tenant_id"])
                )
            for ethertype in ext_sg.sg_supported_ethertypes:
                if default_sg:
                    # Allow intercommunication
                    ingress_rule = SecurityGroupRule(
                        id=uuidutils.generate_uuid(),
                        tenant_id=tenant_id,
                        security_group=security_group_db,
                        direction="ingress",
                        ethertype=ethertype,
                        source_group=security_group_db,
                    )
                    context.session.add(ingress_rule)

                egress_rule = SecurityGroupRule(
                    id=uuidutils.generate_uuid(),
                    tenant_id=tenant_id,
                    security_group=security_group_db,
                    direction="egress",
                    ethertype=ethertype,
                )
                context.session.add(egress_rule)

        secgroup_dict = self._make_security_group_dict(security_group_db)

        kwargs["security_group"] = secgroup_dict
        registry.notify(resources.SECURITY_GROUP, events.AFTER_CREATE, self, **kwargs)
        return secgroup_dict
示例#9
0
    def create_security_group(self, context, security_group, default_sg=False):
        """Create security group.

        If default_sg is true that means we are a default security group for
        a given tenant if it does not exist.
        """
        s = security_group['security_group']
        kwargs = {
            'context': context,
            'security_group': s,
            'is_default': default_sg,
        }

        self._registry_notify(resources.SECURITY_GROUP, events.BEFORE_CREATE,
                              exc_cls=ext_sg.SecurityGroupConflict, **kwargs)

        tenant_id = s['tenant_id']

        if not default_sg:
            self._ensure_default_security_group(context, tenant_id)

        with db_api.autonested_transaction(context.session):
            security_group_db = SecurityGroup(id=s.get('id') or (
                                              uuidutils.generate_uuid()),
                                              description=s['description'],
                                              tenant_id=tenant_id,
                                              name=s['name'])
            context.session.add(security_group_db)
            if default_sg:
                context.session.add(DefaultSecurityGroup(
                    security_group=security_group_db,
                    tenant_id=security_group_db['tenant_id']))
            for ethertype in ext_sg.sg_supported_ethertypes:
                if default_sg:
                    # Allow intercommunication
                    ingress_rule = SecurityGroupRule(
                        id=uuidutils.generate_uuid(), tenant_id=tenant_id,
                        security_group=security_group_db,
                        direction='ingress',
                        ethertype=ethertype,
                        source_group=security_group_db)
                    context.session.add(ingress_rule)

                egress_rule = SecurityGroupRule(
                    id=uuidutils.generate_uuid(), tenant_id=tenant_id,
                    security_group=security_group_db,
                    direction='egress',
                    ethertype=ethertype)
                context.session.add(egress_rule)
                self._registry_notify(resources.SECURITY_GROUP,
                                      events.PRECOMMIT_CREATE,
                                      exc_cls=ext_sg.SecurityGroupConflict,
                                      **kwargs)

        secgroup_dict = self._make_security_group_dict(security_group_db)

        kwargs['security_group'] = secgroup_dict
        registry.notify(resources.SECURITY_GROUP, events.AFTER_CREATE, self,
                        **kwargs)
        return secgroup_dict
示例#10
0
    def _create_security_group_rule(self, context, security_group_rule, validate=True):
        if validate:
            self._validate_security_group_rule(context, security_group_rule)
        rule_dict = security_group_rule["security_group_rule"]
        kwargs = {"context": context, "security_group_rule": rule_dict}
        self._registry_notify(
            resources.SECURITY_GROUP_RULE, events.BEFORE_CREATE, exc_cls=ext_sg.SecurityGroupConflict, **kwargs
        )

        with context.session.begin(subtransactions=True):
            if validate:
                self._check_for_duplicate_rules_in_db(context, security_group_rule)
            db = sg_models.SecurityGroupRule(
                id=(rule_dict.get("id") or uuidutils.generate_uuid()),
                tenant_id=rule_dict["tenant_id"],
                security_group_id=rule_dict["security_group_id"],
                direction=rule_dict["direction"],
                remote_group_id=rule_dict.get("remote_group_id"),
                ethertype=rule_dict["ethertype"],
                protocol=rule_dict["protocol"],
                port_range_min=rule_dict["port_range_min"],
                port_range_max=rule_dict["port_range_max"],
                remote_ip_prefix=rule_dict.get("remote_ip_prefix"),
                description=rule_dict.get("description"),
            )
            context.session.add(db)
            self._registry_notify(
                resources.SECURITY_GROUP_RULE, events.PRECOMMIT_CREATE, exc_cls=ext_sg.SecurityGroupConflict, **kwargs
            )
        res_rule_dict = self._make_security_group_rule_dict(db)
        kwargs["security_group_rule"] = res_rule_dict
        registry.notify(resources.SECURITY_GROUP_RULE, events.AFTER_CREATE, self, **kwargs)
        return res_rule_dict
示例#11
0
def _update_fip_assoc(self, context, fip, floatingip_db, external_port):
    previous_router_id = floatingip_db.router_id
    port_id, internal_ip_address, router_id = (
        self._check_and_get_fip_assoc(context, fip, floatingip_db))
    floatingip_db.update({'fixed_ip_address': internal_ip_address,
                          'fixed_port_id': port_id,
                          'router_id': router_id,
                          'last_known_router_id': previous_router_id})
    next_hop = None
    if router_id:
        router = self._get_router(context.elevated(), router_id)
        gw_port = router.gw_port
        if gw_port:
            for fixed_ip in gw_port.fixed_ips:
                addr = netaddr.IPAddress(fixed_ip.ip_address)
                if addr.version == l3_constants.IP_VERSION_4:
                    next_hop = fixed_ip.ip_address
                    break
    args = {'fixed_ip_address': internal_ip_address,
            'fixed_port_id': port_id,
            'router_id': router_id,
            'last_known_router_id': previous_router_id,
            'floating_ip_address': floatingip_db.floating_ip_address,
            'floating_network_id': floatingip_db.floating_network_id,
            'next_hop': next_hop,
            'context': context}
    registry.notify(resources.FLOATING_IP,
                    events.AFTER_UPDATE,
                    self._update_fip_assoc,
                    **args)
示例#12
0
 def _process_added_router(self, router):
     #import ipdb;ipdb.set_trace()
     self._router_added(router['id'], router)
     ri = self.router_info[router['id']]
     ri.router = router
     ri.process(self)
     registry.notify(resources.ROUTER, events.AFTER_CREATE, self, router=ri)
示例#13
0
    def record_resource_update(self, context, rtype, resource):
        """Takes in an OVO and generates an event on relevant changes.

        A change is deemed to be relevant if it is not stale and if any
        fields changed beyond the revision number and update time.

        Both creates and updates are handled in this function.
        """
        if self._is_stale(rtype, resource):
            LOG.debug("Ignoring stale update for %s: %s", rtype, resource)
            return
        existing = self._type_cache(rtype).get(resource.id)
        self._type_cache(rtype)[resource.id] = resource
        changed_fields = self._get_changed_fields(existing, resource)
        if not changed_fields:
            LOG.debug("Received resource %s update without any changes: %s",
                      rtype, resource.id)
            return
        if existing:
            LOG.debug("Resource %s %s updated (revision_number %s->%s). "
                      "Old fields: %s New fields: %s",
                      rtype, existing.id, existing.revision_number,
                      resource.revision_number,
                      {f: existing.get(f) for f in changed_fields},
                      {f: resource.get(f) for f in changed_fields})
        else:
            LOG.debug("Received new resource %s: %s", rtype, resource)
        # local notification for agent internals to subscribe to
        registry.notify(rtype, events.AFTER_UPDATE, self,
                        context=context, changed_fields=changed_fields,
                        existing=existing, updated=resource,
                        resource_id=resource.id)
示例#14
0
    def _setup_test_create_floatingip(
        self, fip, floatingip_db, router_db):
        port = {
            'id': '1234',
            portbindings.HOST_ID: 'myhost',
            'network_id': 'external_net'
        }

        with mock.patch.object(self.mixin, 'get_router') as grtr,\
                mock.patch.object(self.mixin,
                                  '_get_dvr_service_port_hostid') as vmp,\
                mock.patch.object(
                    self.mixin,
                    '_get_dvr_migrating_service_port_hostid'
                                 ) as mvmp,\
                mock.patch.object(
                    self.mixin,
                    'create_fip_agent_gw_port_if_not_exists') as c_fip,\
                mock.patch.object(l3_dvr_db.l3_db.L3_NAT_db_mixin,
                                  '_update_fip_assoc'):
            grtr.return_value = router_db
            vmp.return_value = 'my-host'
            mvmp.return_value = 'my-future-host'
            registry.notify(resources.FLOATING_IP, events.AFTER_UPDATE, self,
                            context=mock.Mock(), router_id=router_db['id'],
                            fixed_port_id=port['id'], floating_ip_id=fip['id'],
                            floating_network_id=fip['floating_network_id'],
                            fixed_ip_address='1.2.3.4')
            return c_fip
示例#15
0
    def add_subports(self, context, trunk_id, subports):
        """Add one or more subports to trunk."""
        # Check for basic validation since the request body here is not
        # automatically validated by the API layer.
        subports_validator = rules.SubPortsValidator(
            self._segmentation_types, subports)
        subports = subports_validator.validate(context, basic_validation=True)
        added_subports = []

        with db_api.autonested_transaction(context.session):
            trunk = self._get_trunk(context, trunk_id)
            rules.trunk_can_be_managed(context, trunk)
            for subport in subports:
                obj = trunk_objects.SubPort(
                               context=context,
                               trunk_id=trunk_id,
                               port_id=subport['port_id'],
                               segmentation_type=subport['segmentation_type'],
                               segmentation_id=subport['segmentation_id'])
                obj.create()
                trunk['sub_ports'].append(obj)
                added_subports.append(obj)

        registry.notify(
            constants.SUBPORTS, events.AFTER_CREATE, self,
            added_subports=added_subports)
        return trunk
示例#16
0
 def treat_devices_removed(self, devices):
     resync = False
     self.sg_agent.remove_devices_filter(devices)
     for device in devices:
         LOG.info(_LI("Attachment %s removed"), device)
         details = None
         try:
             details = self.plugin_rpc.update_device_down(self.context,
                                                          device,
                                                          self.agent_id,
                                                          cfg.CONF.host)
         except Exception:
             LOG.exception(_LE("Error occurred while removing port %s"),
                           device)
             resync = True
         if details and details['exists']:
             LOG.info(_LI("Port %s updated."), device)
         else:
             LOG.debug("Device %s not defined on plugin", device)
         port_id = self._clean_network_ports(device)
         self.ext_manager.delete_port(self.context,
                                      {'device': device,
                                       'port_id': port_id})
         registry.notify(local_resources.PORT_DEVICE, events.AFTER_DELETE,
                         self, context=self.context, device=device,
                         port_id=port_id)
     if self.prevent_arp_spoofing:
         self.mgr.delete_arp_spoofing_protection(devices)
     return resync
示例#17
0
    def update_security_group(self, context, id, security_group):
        s = security_group['security_group']

        kwargs = {
            'context': context,
            'security_group_id': id,
            'security_group': s,
        }
        # NOTE(armax): a callback exception here will prevent the request
        # from being processed. This is a hook point for backend's validation;
        # we raise to propagate the reason for the failure.
        try:
            registry.notify(
                resources.SECURITY_GROUP, events.BEFORE_UPDATE, self,
                **kwargs)
        except exceptions.CallbackFailure as e:
            raise ext_sg.SecurityGroupConflict(reason=e)

        with context.session.begin(subtransactions=True):
            sg = self._get_security_group(context, id)
            if sg['name'] == 'default' and 'name' in s:
                raise ext_sg.SecurityGroupCannotUpdateDefault()
            sg.update(s)
        sg_dict = self._make_security_group_dict(sg)

        kwargs['security_group'] = sg_dict
        registry.notify(resources.SECURITY_GROUP, events.AFTER_UPDATE, self,
                        **kwargs)
        return sg_dict
示例#18
0
 def delete_port(self, context, port_id, hdn_operator_call=False,
                 l3_port_check=True):
     # if needed, check to see if this is a port owned by
     # a l3-router.  If so, we should prevent deletion.
     # Therefore notify registry so that pre-delete checks can be run
     with context.session.begin(subtransactions=True):
         # _get_port returns a sqlalchemy model
         port = self._get_port(context, port_id)
         if hdn_operator_call:
             # the port must be removed from the DB
             super(HdnNeutronPlugin, self).delete_port(context, port_id)
             LOG.debug("Port delete operation for %s completed",
                       port_id)
             return
         # Put the port in PENDING_DELETE constants.STATUS
         port.status = constants.STATUS_PENDING_DELETE
         # TODO(salv-orlando): Notify callback to disassociate floating IPs
         # on l3 service plugin
     if not hdn_operator_call:
         registry.notify(resources.PORT, events.AFTER_DELETE, self,
                         tenant_id=context.tenant_id,
                         resource_id=port_id)
         # Notify HDN operators
         hdnlib.notify_port_delete({'id': port_id,
                                    'tenant_id': context.tenant_id})
         LOG.debug(_("Queued request to delete port: %s"), port_id)
     else:
         LOG.debug(_("Port %s destroyed"), port_id)
示例#19
0
    def _validate_router_migration(self, context, router_db, router_res):
        """Allow centralized -> distributed state transition only."""
        if (router_db.extra_attributes.distributed and
            router_res.get('distributed') is False):
            LOG.info(_LI("Centralizing distributed router %s "
                         "is not supported"), router_db['id'])
            raise n_exc.BadRequest(
                resource='router',
                msg=_("Migration from distributed router to centralized is "
                      "not supported"))
        elif (not router_db.extra_attributes.distributed and
              router_res.get('distributed')):
            # router should be disabled in order for upgrade
            if router_db.admin_state_up:
                msg = _('Cannot upgrade active router to distributed. Please '
                        'set router admin_state_up to False prior to upgrade.')
                raise n_exc.BadRequest(resource='router', msg=msg)

            # Notify advanced services of the imminent state transition
            # for the router.
            try:
                kwargs = {'context': context, 'router': router_db}
                registry.notify(
                    resources.ROUTER, events.BEFORE_UPDATE, self, **kwargs)
            except exceptions.CallbackFailure as e:
                with excutils.save_and_reraise_exception():
                    # NOTE(armax): preserve old check's behavior
                    if len(e.errors) == 1:
                        raise e.errors[0].error
                    raise l3.RouterInUse(router_id=router_db['id'],
                                         reason=e)
示例#20
0
    def _process_l3_create(self, context, net_data, req_data):
        external = req_data.get(external_net.EXTERNAL)
        external_set = validators.is_attr_set(external)

        if not external_set:
            return

        # TODO(armax): these notifications should switch to *_COMMIT
        # when the event becomes available, as this block is expected
        # to be called within a plugin's session
        if external:
            try:
                registry.notify(
                    resources.EXTERNAL_NETWORK, events.BEFORE_CREATE,
                    self, context=context,
                    request=req_data, network=net_data)
            except c_exc.CallbackFailure as e:
                # raise the underlying exception
                raise e.errors[0].error
            context.session.add(
                ext_net_models.ExternalNetwork(network_id=net_data['id']))
            context.session.add(rbac_db.NetworkRBAC(
                  object_id=net_data['id'], action='access_as_external',
                  target_tenant='*', tenant_id=net_data['tenant_id']))
            registry.notify(
                resources.EXTERNAL_NETWORK, events.AFTER_CREATE,
                self, context=context,
                request=req_data, network=net_data)
        net_data[external_net.EXTERNAL] = external
示例#21
0
 def _process_added_router(self, router):
     self._router_added(router['id'], router)
     ri = self.router_info[router['id']]
     ri.router = router
     ri.process(self)
     registry.notify(resources.ROUTER, events.AFTER_CREATE, self, router=ri)
     self.l3_ext_manager.add_router(self.context, router)
示例#22
0
    def _delete(self, request, id, **kwargs):
        action = self._plugin_handlers[self.DELETE]

        # Check authz
        policy.init()
        parent_id = kwargs.get(self._parent_id_name)
        obj = self._item(request, id, parent_id=parent_id)
        try:
            policy.enforce(request.context,
                           action,
                           obj,
                           pluralized=self._collection)
        except oslo_policy.PolicyNotAuthorized:
            # To avoid giving away information, pretend that it
            # doesn't exist
            msg = _('The resource could not be found.')
            raise webob.exc.HTTPNotFound(msg)

        obj_deleter = getattr(self._plugin, action)
        obj_deleter(request.context, id, **kwargs)
        # A delete operation usually alters resource usage, so mark affected
        # usage trackers as dirty
        resource_registry.set_resources_dirty(request.context)
        notifier_method = self._resource + '.delete.end'
        self._notifier.info(request.context,
                            notifier_method,
                            {self._resource + '_id': id})
        result = {self._resource: self._view(request.context, obj)}
        registry.notify(self._resource, events.BEFORE_RESPONSE, self,
                        context=request.context, data=result,
                        method_name=notifier_method, action=action,
                        original={})
示例#23
0
    def delete_security_group_rule(self, context, id):
        kwargs = {
            'context': context,
            'security_group_rule_id': id
        }
        self._registry_notify(resources.SECURITY_GROUP_RULE,
                              events.BEFORE_DELETE, id=id,
                              exc_cls=ext_sg.SecurityGroupRuleInUse, **kwargs)

        with context.session.begin(subtransactions=True):
            query = self._model_query(context, SecurityGroupRule).filter(
                SecurityGroupRule.id == id)

            self._registry_notify(resources.SECURITY_GROUP_RULE,
                                  events.PRECOMMIT_DELETE,
                                  exc_cls=ext_sg.SecurityGroupRuleInUse, id=id,
                                  **kwargs)

            try:
                # As there is a filter on a primary key it is not possible for
                # MultipleResultsFound to be raised
                context.session.delete(query.one())
            except exc.NoResultFound:
                raise ext_sg.SecurityGroupRuleNotFound(id=id)

        registry.notify(
            resources.SECURITY_GROUP_RULE, events.AFTER_DELETE, self,
            **kwargs)
示例#24
0
    def update_device_up(self, rpc_context, **kwargs):
        """Device is up on agent."""
        agent_id = kwargs.get('agent_id')
        device = kwargs.get('device')
        host = kwargs.get('host')
        LOG.debug("Device %(device)s up at agent %(agent_id)s",
                  {'device': device, 'agent_id': agent_id})
        plugin = manager.NeutronManager.get_plugin()
        port_id = plugin._device_to_port_id(device)
        if (host and not plugin.port_bound_to_host(rpc_context,
                                                   port_id, host)):
            LOG.debug("Device %(device)s not bound to the"
                      " agent host %(host)s",
                      {'device': device, 'host': host})
            return

        port_id = plugin.update_port_status(rpc_context, port_id,
                                            q_const.PORT_STATUS_ACTIVE,
                                            host)
        try:
            # NOTE(armax): it's best to remove all objects from the
            # session, before we try to retrieve the new port object
            rpc_context.session.expunge_all()
            port = plugin._get_port(rpc_context, port_id)
        except exceptions.PortNotFound:
            LOG.debug('Port %s not found during update', port_id)
        else:
            kwargs = {
                'context': rpc_context,
                'port': port,
                'update_device_up': True
            }
            registry.notify(
                resources.PORT, events.AFTER_UPDATE, plugin, **kwargs)
示例#25
0
def _check_subnet_not_used(context, subnet_id):
    try:
        kwargs = {'context': context, 'subnet_id': subnet_id}
        registry.notify(
            resources.SUBNET, events.BEFORE_DELETE, None, **kwargs)
    except exceptions.CallbackFailure as e:
        raise n_exc.SubnetInUse(subnet_id=subnet_id, reason=e)
示例#26
0
    def _process_l3_update(self, context, net_data, req_data):
        try:
            registry.notify(
                resources.EXTERNAL_NETWORK, events.BEFORE_UPDATE,
                self, context=context,
                request=req_data, network=net_data)
        except c_exc.CallbackFailure as e:
            # raise the underlying exception
            raise e.errors[0].error

        new_value = req_data.get(external_net.EXTERNAL)
        net_id = net_data['id']
        if not attributes.is_attr_set(new_value):
            return

        if net_data.get(external_net.EXTERNAL) == new_value:
            return

        if new_value:
            context.session.add(ExternalNetwork(network_id=net_id))
            net_data[external_net.EXTERNAL] = True
        else:
            # must make sure we do not have any external gateway ports
            # (and thus, possible floating IPs) on this network before
            # allow it to be update to external=False
            port = context.session.query(models_v2.Port).filter_by(
                device_owner=DEVICE_OWNER_ROUTER_GW,
                network_id=net_data['id']).first()
            if port:
                raise external_net.ExternalNetworkInUse(net_id=net_id)

            context.session.query(ExternalNetwork).filter_by(
                network_id=net_id).delete()
            net_data[external_net.EXTERNAL] = False
示例#27
0
    def remove_subports(self, context, trunk_id, subports):
        """Remove one or more subports from trunk."""
        with db_api.autonested_transaction(context.session):
            trunk = self._get_trunk(context, trunk_id)
            rules.trunk_can_be_managed(context, trunk)

            subports_validator = rules.SubPortsValidator(
                self._segmentation_types, subports)
            # the subports are being removed, therefore we do not need to
            # enforce any specific trunk rules, other than basic validation
            # of the request body.
            subports = subports_validator.validate(
                context, basic_validation=True,
                trunk_validation=False)

            current_subports = {p.port_id: p for p in trunk.sub_ports}
            removed_subports = []

            for subport in subports:
                subport_obj = current_subports.pop(subport['port_id'], None)

                if not subport_obj:
                    raise trunk_exc.SubPortNotFound(trunk_id=trunk_id,
                                                    port_id=subport['port_id'])
                subport_obj.delete()
                removed_subports.append(subport_obj)

            trunk.sub_ports = list(current_subports.values())
            registry.notify(
                constants.SUBPORTS, events.AFTER_DELETE, self,
                removed_subports=removed_subports)
            return trunk
示例#28
0
    def _update(self, request, id, body, **kwargs):
        body = Controller.prepare_request_body(
            request.context, copy.deepcopy(body), False, self._resource, self._attr_info, allow_bulk=self._allow_bulk
        )
        action = self._plugin_handlers[self.UPDATE]
        # Load object to check authz
        # but pass only attributes in the original body and required
        # by the policy engine to the policy 'brain'
        field_list = [
            name
            for (name, value) in six.iteritems(self._attr_info)
            if (value.get("required_by_policy") or value.get("primary_key") or "default" not in value)
        ]
        # Ensure policy engine is initialized
        policy.init()
        parent_id = kwargs.get(self._parent_id_name)
        orig_obj = self._item(request, id, field_list=field_list, parent_id=parent_id)
        orig_object_copy = copy.copy(orig_obj)
        orig_obj.update(body[self._resource])
        # Make a list of attributes to be updated to inform the policy engine
        # which attributes are set explicitly so that it can distinguish them
        # from the ones that are set to their default values.
        orig_obj[n_const.ATTRIBUTES_TO_UPDATE] = body[self._resource].keys()
        try:
            policy.enforce(request.context, action, orig_obj, pluralized=self._collection)
        except oslo_policy.PolicyNotAuthorized:
            with excutils.save_and_reraise_exception() as ctxt:
                # If a tenant is modifying it's own object, it's safe to return
                # a 403. Otherwise, pretend that it doesn't exist to avoid
                # giving away information.
                if request.context.tenant_id != orig_obj["tenant_id"]:
                    ctxt.reraise = False
            msg = _("The resource could not be found.")
            raise webob.exc.HTTPNotFound(msg)

        obj_updater = getattr(self._plugin, action)
        kwargs = {self._resource: body}
        if parent_id:
            kwargs[self._parent_id_name] = parent_id
        obj = obj_updater(request.context, id, **kwargs)
        # Usually an update operation does not alter resource usage, but as
        # there might be side effects it might be worth checking for changes
        # in resource usage here as well (e.g: a tenant port is created when a
        # router interface is added)
        resource_registry.set_resources_dirty(request.context)

        result = {self._resource: self._view(request.context, obj)}
        notifier_method = self._resource + ".update.end"
        self._notifier.info(request.context, notifier_method, result)
        registry.notify(
            self._resource,
            events.BEFORE_RESPONSE,
            self,
            context=request.context,
            data=result,
            method_name=notifier_method,
            action=action,
            original=orig_object_copy,
        )
        return result
示例#29
0
    def create_network(self, context, network):
        """Instruct HDN operators to create a network

        This function implements the "network create" Neutron API operation.

        @param context - The Neutron context reference. This parameter holds
        a database session (context.session), the identifier of the tenant
        performing the operation (context.tenant_id), and other attributes
        such as a flag to test whether the tenant is an administrator
        (context.is_admin)

        @param network - A dict containing data of the network to be created

        """

        # Set the status of the network as 'PENDING CREATE'
        network['network']['status'] = constants.STATUS_PENDING_CREATE
        with db_api.autonested_transaction(context.session):
            new_net = super(HdnNeutronPlugin, self).create_network(
                context, network)
            self._process_l3_create(context, new_net, network['network'])

        # Use the HDN library to notify operators about the new network
        LOG.debug("Queued request to create network: %s", new_net['id'])
        hdnlib.notify_network_create(new_net)
        # Network is not present in neutron.callbacks.resources
        # TODO(salv-orlando): do not use literal for resource name
        registry.notify('NETWORK', events.AFTER_CREATE, self,
                        tenant_id=context.tenant_id,
                        resource_id=new_net['id'])
        return new_net
示例#30
0
    def delete_security_group(self, context, id):
        filters = {'security_group_id': [id]}
        ports = self._get_port_security_group_bindings(context, filters)
        if ports:
            raise ext_sg.SecurityGroupInUse(id=id)
        # confirm security group exists
        sg = self._get_security_group(context, id)

        if sg['name'] == 'default' and not context.is_admin:
            raise ext_sg.SecurityGroupCannotRemoveDefault()
        kwargs = {
            'context': context,
            'security_group_id': id,
            'security_group': sg,
        }
        # NOTE(armax): a callback exception here will prevent the request
        # from being processed. This is a hook point for backend's validation;
        # we raise to propagate the reason for the failure.
        try:
            registry.notify(
                resources.SECURITY_GROUP, events.BEFORE_DELETE, self,
                **kwargs)
        except exceptions.CallbackFailure as e:
            reason = _('cannot be deleted due to %s') % e
            raise ext_sg.SecurityGroupInUse(id=id, reason=reason)

        with context.session.begin(subtransactions=True):
            context.session.delete(sg)

        kwargs.pop('security_group')
        registry.notify(resources.SECURITY_GROUP, events.AFTER_DELETE, self,
                        **kwargs)
示例#31
0
 def create_net_assoc_postcommit(self, context, net_assoc):
     kwargs = {
         'context': context,
         'bgpvpn_id': net_assoc['bgpvpn_id'],
         'network_id': net_assoc['network_id']
     }
     registry.notify(resources.BGPVPN_NETWORK_ASSOC, events.AFTER_CREATE,
                     self, **kwargs)
示例#32
0
 def __init__(self):
     db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs(
         attributes.PORTS, [_extend_port_trunk_details])
     self._segmentation_types = {}
     registry.subscribe(rules.enforce_port_deletion_rules, resources.PORT,
                        events.BEFORE_DELETE)
     registry.notify(constants.TRUNK_PLUGIN, events.AFTER_INIT, self)
     LOG.debug('Trunk plugin loaded')
示例#33
0
    def _router_added(self, router_id, router):
        ri = self._create_router(router_id, router)
        registry.notify(resources.ROUTER, events.BEFORE_CREATE,
                        self, router=ri)

        self.router_info[router_id] = ri

        ri.initialize(self.process_monitor)
示例#34
0
 def create_security_group_rule(self, context, security_group_rule):
     res = self._create_security_group_rule(context, security_group_rule)
     registry.notify(resources.SECURITY_GROUP_RULE,
                     events.AFTER_CREATE,
                     self,
                     context=context,
                     security_group_rule=res)
     return res
示例#35
0
 def delete_router_assoc_postcommit(self, ctx, router_assoc):
     kwargs = {
         'context': ctx,
         'bgpvpn_id': router_assoc['bgpvpn_id'],
         'router_id': router_assoc['router_id']
     }
     registry.notify(resources.BGPVPN_ROUTER_ASSOC, events.AFTER_DELETE,
                     self, **kwargs)
示例#36
0
 def _process_updated_router(self, router):
     ri = self.router_info[router['id']]
     ri.router = router
     registry.notify(resources.ROUTER, events.BEFORE_UPDATE,
                     self, router=ri)
     ri.process(self)
     registry.notify(resources.ROUTER, events.AFTER_UPDATE, self, router=ri)
     self.l3_ext_manager.update_router(self.context, router)
示例#37
0
    def _update(self, request, id, body, **kwargs):
        body = Controller.prepare_request_body(request.context,
                                               copy.deepcopy(body), False,
                                               self._resource, self._attr_info,
                                               allow_bulk=self._allow_bulk)
        action = self._plugin_handlers[self.UPDATE]
        # Load object to check authz
        # but pass only attributes in the original body and required
        # by the policy engine to the policy 'brain'
        field_list = [name for (name, value) in six.iteritems(self._attr_info)
                      if (value.get('required_by_policy') or
                          value.get('primary_key') or
                          'default' not in value)]
        # Ensure policy engine is initialized
        policy.init()
        parent_id = kwargs.get(self._parent_id_name)
        orig_obj = self._item(request, id, field_list=field_list,
                              parent_id=parent_id)
        orig_object_copy = copy.copy(orig_obj)
        orig_obj.update(body[self._resource])
        # Make a list of attributes to be updated to inform the policy engine
        # which attributes are set explicitly so that it can distinguish them
        # from the ones that are set to their default values.
        orig_obj[n_const.ATTRIBUTES_TO_UPDATE] = body[self._resource].keys()
        try:
            policy.enforce(request.context,
                           action,
                           orig_obj,
                           pluralized=self._collection)
        except oslo_policy.PolicyNotAuthorized:
            with excutils.save_and_reraise_exception() as ctxt:
                # If a tenant is modifying it's own object, it's safe to return
                # a 403. Otherwise, pretend that it doesn't exist to avoid
                # giving away information.
                if request.context.tenant_id != orig_obj['tenant_id']:
                    ctxt.reraise = False
            msg = _('The resource could not be found.')
            raise webob.exc.HTTPNotFound(msg)

        obj_updater = getattr(self._plugin, action)
        kwargs = {self._resource: body}
        if parent_id:
            kwargs[self._parent_id_name] = parent_id
        obj = obj_updater(request.context, id, **kwargs)
        # Usually an update operation does not alter resource usage, but as
        # there might be side effects it might be worth checking for changes
        # in resource usage here as well (e.g: a tenant port is created when a
        # router interface is added)
        resource_registry.set_resources_dirty(request.context)

        result = {self._resource: self._view(request.context, obj)}
        notifier_method = self._resource + '.update.end'
        self._notifier.info(request.context, notifier_method, result)
        registry.notify(self._resource, events.BEFORE_RESPONSE, self,
                        context=request.context, data=result,
                        method_name=notifier_method, action=action,
                        original=orig_object_copy)
        return result
示例#38
0
    def create_or_update_agent(self, context, agent_state):
        """Registers new agent in the database or updates existing.

        Returns tuple of agent status and state.
        Status is from server point of view: alive, new or revived.
        It could be used by agent to do some sync with the server if needed.
        """
        status = n_const.AGENT_ALIVE
        with context.session.begin(subtransactions=True):
            res_keys = ['agent_type', 'binary', 'host', 'topic']
            res = dict((k, agent_state[k]) for k in res_keys)
            if 'availability_zone' in agent_state:
                res['availability_zone'] = agent_state['availability_zone']
            configurations_dict = agent_state.get('configurations', {})
            res['configurations'] = jsonutils.dumps(configurations_dict)
            resource_versions_dict = agent_state.get('resource_versions')
            if resource_versions_dict:
                res['resource_versions'] = jsonutils.dumps(
                    resource_versions_dict)
            res['load'] = self._get_agent_load(agent_state)
            current_time = timeutils.utcnow()
            try:
                agent_db = self._get_agent_by_type_and_host(
                    context, agent_state['agent_type'], agent_state['host'])
                if not agent_db.is_active:
                    status = n_const.AGENT_REVIVED
                    if 'resource_versions' not in agent_state:
                        # updating agent_state with resource_versions taken
                        # from db so that
                        # _update_local_agent_resource_versions() will call
                        # version_manager and bring it up to date
                        agent_state['resource_versions'] = self._get_dict(
                            agent_db, 'resource_versions', ignore_missing=True)
                res['heartbeat_timestamp'] = current_time
                if agent_state.get('start_flag'):
                    res['started_at'] = current_time
                greenthread.sleep(0)
                self._log_heartbeat(agent_state, agent_db, configurations_dict)
                agent_db.update(res)
                event_type = events.AFTER_UPDATE
            except ext_agent.AgentNotFoundByTypeHost:
                greenthread.sleep(0)
                res['created_at'] = current_time
                res['started_at'] = current_time
                res['heartbeat_timestamp'] = current_time
                res['admin_state_up'] = cfg.CONF.enable_new_agents
                agent_db = Agent(**res)
                greenthread.sleep(0)
                context.session.add(agent_db)
                event_type = events.AFTER_CREATE
                self._log_heartbeat(agent_state, agent_db, configurations_dict)
                status = n_const.AGENT_NEW
            greenthread.sleep(0)

        registry.notify(resources.AGENT, event_type, self, context=context,
                        host=agent_state['host'], plugin=self,
                        agent=agent_state)
        return status, agent_state
示例#39
0
 def delete_agent(self, context, id):
     agent = self._get_agent(context, id)
     registry.notify(resources.AGENT,
                     events.BEFORE_DELETE,
                     self,
                     context=context,
                     agent=agent)
     with context.session.begin(subtransactions=True):
         context.session.delete(agent)
示例#40
0
 def test_vif_details_bridge_name_handler_registration(self,
                                                       mock_gen_br_name):
     driver.register()
     mock_gen_br_name.return_value = 'fake-trunk-br-name'
     test_trigger = mock.Mock()
     registry.notify(agent_consts.OVS_BRIDGE_NAME, events.BEFORE_READ,
                     test_trigger, **{'port': {'trunk_details':
                                               {'trunk_id': 'foo'}}})
     test_trigger.assert_called_once_with('fake-trunk-br-name')
示例#41
0
 def test_subscribe_address_scope_of_subnetpool(self, notify):
     l3_db.subscribe()
     registry.notify(resources.SUBNETPOOL_ADDRESS_SCOPE,
                     events.AFTER_UPDATE, mock.ANY, context=mock.ANY,
                     subnetpool_id='fake_id')
     notify.assert_called_once_with(resources.SUBNETPOOL_ADDRESS_SCOPE,
                                    events.AFTER_UPDATE, mock.ANY,
                                    context=mock.ANY,
                                    subnetpool_id='fake_id')
示例#42
0
    def remove_subports(self, context, trunk_id, subports):
        """Remove one or more subports from trunk."""
        subports = subports['sub_ports']
        with db_api.autonested_transaction(context.session):
            trunk = self._get_trunk(context, trunk_id)
            original_trunk = copy.deepcopy(trunk)
            rules.trunk_can_be_managed(context, trunk)

            subports_validator = rules.SubPortsValidator(
                self._segmentation_types, subports)
            # the subports are being removed, therefore we do not need to
            # enforce any specific trunk rules, other than basic validation
            # of the request body.
            subports = subports_validator.validate(context,
                                                   basic_validation=True,
                                                   trunk_validation=False)

            current_subports = {p.port_id: p for p in trunk.sub_ports}
            removed_subports = []

            for subport in subports:
                subport_obj = current_subports.pop(subport['port_id'], None)

                if not subport_obj:
                    raise trunk_exc.SubPortNotFound(trunk_id=trunk_id,
                                                    port_id=subport['port_id'])
                subport_obj.delete()
                removed_subports.append(subport_obj)

            del trunk.sub_ports[:]
            trunk.sub_ports.extend(current_subports.values())
            # NOTE(status_police): the trunk status should transition to
            # PENDING irrespective of the status in which it is in to allow
            # the user to resolve potential conflicts due to prior add_subports
            # operations.
            # Should a trunk be in PENDING or BUILD state (e.g. when dealing
            # with multiple concurrent requests), the status is still forced
            # to PENDING. See add_subports() for more details.
            trunk.status = constants.PENDING_STATUS
            trunk.update()
            payload = callbacks.TrunkPayload(context,
                                             trunk_id,
                                             current_trunk=trunk,
                                             original_trunk=original_trunk,
                                             subports=removed_subports)
            if removed_subports:
                registry.notify(constants.SUBPORTS,
                                events.PRECOMMIT_DELETE,
                                self,
                                payload=payload)
        if removed_subports:
            registry.notify(constants.SUBPORTS,
                            events.AFTER_DELETE,
                            self,
                            payload=payload)
        return trunk
示例#43
0
    def add_subports(self, context, trunk_id, subports):
        """Add one or more subports to trunk."""
        with db_api.autonested_transaction(context.session):
            trunk = self._get_trunk(context, trunk_id)

            # Check for basic validation since the request body here is not
            # automatically validated by the API layer.
            subports = subports['sub_ports']
            subports_validator = rules.SubPortsValidator(
                self._segmentation_types, subports, trunk['port_id'])
            subports = subports_validator.validate(context,
                                                   basic_validation=True)
            added_subports = []

            rules.trunk_can_be_managed(context, trunk)
            original_trunk = copy.deepcopy(trunk)
            # NOTE(status_police): the trunk status should transition to
            # DOWN (and finally in ACTIVE or ERROR), only if it is not in
            # ERROR status already. A user should attempt to resolve the ERROR
            # condition before adding more subports to the trunk. Should a
            # trunk be in DOWN or BUILD state (e.g. when dealing with
            # multiple concurrent requests), the status is still forced to
            # DOWN and thus can potentially overwrite an interleaving state
            # change to ACTIVE. Eventually the driver should bring the status
            # back to ACTIVE or ERROR.
            if trunk.status == constants.ERROR_STATUS:
                raise trunk_exc.TrunkInErrorState(trunk_id=trunk_id)
            else:
                trunk.update(status=constants.DOWN_STATUS)

            for subport in subports:
                obj = trunk_objects.SubPort(
                    context=context,
                    trunk_id=trunk_id,
                    port_id=subport['port_id'],
                    segmentation_type=subport['segmentation_type'],
                    segmentation_id=subport['segmentation_id'])
                obj.create()
                trunk['sub_ports'].append(obj)
                added_subports.append(obj)
            payload = callbacks.TrunkPayload(context,
                                             trunk_id,
                                             current_trunk=trunk,
                                             original_trunk=original_trunk,
                                             subports=added_subports)
            if added_subports:
                registry.notify(constants.SUBPORTS,
                                events.PRECOMMIT_CREATE,
                                self,
                                payload=payload)
        if added_subports:
            registry.notify(constants.SUBPORTS,
                            events.AFTER_CREATE,
                            self,
                            payload=payload)
        return trunk
示例#44
0
 def _check_router_not_in_use(self, context, router_id):
     try:
         kwargs = {'context': context, 'router_id': router_id}
         registry.notify(
             resources.ROUTER, events.BEFORE_DELETE, self, **kwargs)
     except exceptions.CallbackFailure as e:
         with excutils.save_and_reraise_exception():
             if len(e.errors) == 1:
                 raise e.errors[0].error
             raise l3.RouterInUse(router_id=router_id, reason=e)
示例#45
0
def check_delete_network_precommit(context, id):
    try:
        kwargs = {'context': context, 'network_id': id}
        registry.notify(midonet_const.MIDONET_NETWORK,
                        events.PRECOMMIT_DELETE, None, **kwargs)
    except callback_exc.CallbackFailure as e:
        with excutils.save_and_reraise_exception():
            if len(e.errors) == 1:
                raise e.errors[0].error
            raise midonet_exc.MidonetNetworkInUse(network_id=id, reason=e)
示例#46
0
def main(argv=sys.argv[1:]):
    _init_cfg()
    _init_resource_plugin()
    nsx_plugin_in_use = _get_plugin()
    LOG.info(_LI('NSX Plugin in use: %s'), nsx_plugin_in_use)

    _validate_resource_choice(cfg.CONF.resource, nsx_plugin_in_use)
    _validate_op_choice(cfg.CONF.operation, nsx_plugin_in_use)

    registry.notify(cfg.CONF.resource, cfg.CONF.operation, 'nsxadmin',
                    force=cfg.CONF.force, property=cfg.CONF.property)
示例#47
0
    def _set_bridge_name(port, vif_details):
        # REVISIT(rawlin): add BridgeName as a nullable column to the Port
        # model and simply check here if it's set and insert it into the
        # vif_details.

        def set_bridge_name_inner(bridge_name):
            vif_details[portbindings.VIF_DETAILS_BRIDGE_NAME] = bridge_name

        registry.notify(
            a_const.OVS_BRIDGE_NAME, events.BEFORE_READ,
            set_bridge_name_inner, port=port)
示例#48
0
 def _registry_notify(self, res, event, id=None, exc_cls=None, **kwargs):
     # NOTE(armax): a callback exception here will prevent the request
     # from being processed. This is a hook point for backend's validation;
     # we raise to propagate the reason for the failure.
     try:
         registry.notify(res, event, self, **kwargs)
     except exceptions.CallbackFailure as e:
         if exc_cls:
             reason = (_('cannot perform %(event)s due to %(reason)s') %
                       {'event': event, 'reason': e})
             raise exc_cls(reason=reason, id=id)
示例#49
0
    def _router_added(self, router_id, router):
        ri = self._create_router(router_id, router)
        registry.notify(resources.ROUTER, events.BEFORE_CREATE,
                        self, router=ri)

        self.router_info[router_id] = ri

        ri.initialize(self.process_monitor)

        # TODO(Carl) This is a hook in to fwaas.  It should be cleaned up.
        self.process_router_add(ri)
示例#50
0
文件: db.py 项目: fcxliang/lzf5-study
 def create_segment(self, context, segment):
     """Create a segment."""
     segment = segment['segment']
     segment_id = segment.get('id') or uuidutils.generate_uuid()
     try:
         new_segment = self._create_segment_db(context, segment_id, segment)
     except db_exc.DBReferenceError:
         raise n_exc.NetworkNotFound(net_id=segment['network_id'])
     registry.notify(resources.SEGMENT, events.AFTER_CREATE, self,
                     context=context, segment=new_segment)
     return self._make_segment_dict(new_segment)
示例#51
0
文件: plugin.py 项目: vj17/hdn
 def create_subnet(self, context, subnet):
     subnet['subnet']['status'] = constants.STATUS_PENDING_CREATE
     new_subnet = super(HdnNeutronPlugin, self).create_subnet(
         context, subnet)
     registry.notify(resources.SUBNET, events.AFTER_CREATE, self,
                     tenant_id=context.tenant_id,
                     resource_id=new_subnet['id'])
     # Notify HDN operators
     hdnlib.notify_subnet_create(new_subnet)
     LOG.debug("Queued request to create subnet: %s", new_subnet['id'])
     return new_subnet
示例#52
0
    def after(self, state):
        resource_name = state.request.context.get('resource')
        collection_name = state.request.context.get('collection')
        neutron_context = state.request.context.get('neutron_context')
        if not resource_name:
            LOG.debug("Skipping NotifierHook processing as there was no "
                      "resource associated with the request")
            return
        action = pecan_constants.ACTION_MAP.get(state.request.method)
        if not action or action not in ('create', 'update', 'delete'):
            LOG.debug("No notification will be sent for action: %s", action)
            return
        if utils.is_member_action(utils.get_controller(state)):
            return
        if state.response.status_int > 300:
            LOG.debug(
                "No notification will be sent due to unsuccessful "
                "status code: %s", state.response.status_int)
            return

        original = {}
        if (action in ('delete', 'update')
                and state.request.context.get('original_resources', [])):
            # We only need the original resource for updates and deletes
            original = state.request.context.get('original_resources')[0]
        if action == 'delete':
            # The object has been deleted, so we must notify the agent with the
            # data of the original object as the payload, but we do not need
            # to pass it in as the original
            result = {resource_name: original}
            original = {}
        else:
            if not state.response.body:
                result = {}
            else:
                result = state.response.json

        notifier_method = '%s.%s.end' % (resource_name, action)
        notifier_action = utils.get_controller(state).plugin_handlers[action]
        registry.notify(resource_name,
                        events.BEFORE_RESPONSE,
                        self,
                        context=neutron_context,
                        data=result,
                        method_name=notifier_method,
                        action=notifier_action,
                        collection=collection_name,
                        original=original)

        if action == 'delete':
            resource_id = state.request.context.get('resource_id')
            result = {resource_name + '_id': resource_id}
        self._notifier.info(neutron_context, notifier_method, result)
示例#53
0
 def create_bgpvpn_postcommit(self, ctx, bgpvpn):
     bgpvpn_rts = self._format_bgpvpn_route_targets(bgpvpn)
     kwargs = {
         'id': bgpvpn['id'],
         'name': bgpvpn['name'],
         'context': ctx,
         'type': bgpvpn['type'],
         'rd': bgpvpn['route_distinguishers'],
         'vni': bgpvpn['vni']
     }
     kwargs.update(bgpvpn_rts)
     registry.notify(resources.BGPVPN, events.AFTER_CREATE, self, **kwargs)
示例#54
0
def update_network(core_plugin, context, network_id, net_data):
    network = core_plugin.update_network(context, network_id,
                                         {resources.NETWORK: net_data})
    # bundle the plugin API update with any other action required to
    # reflect a state change on the network, e.g. DHCP notifications
    registry.notify(resources.NETWORK,
                    events.BEFORE_RESPONSE,
                    core_plugin,
                    context=context,
                    data={resources.NETWORK: network},
                    method_name='network.update.end')
    return network
示例#55
0
文件: plugin.py 项目: vj17/hdn
 def update_subnet(self, context, subnet_id, subnet):
     # Put the subnet in PENDING UPDATE status
     subnet['subnet']['status'] = constants.STATUS_PENDING_UPDATE
     upd_subnet = super(HdnNeutronPlugin, self).update_subnet(
         context, subnet_id, subnet)
     LOG.debug("Queued request to update subnet: %s", subnet['id'])
     registry.notify(resources.SUBNET, events.AFTER_UPDATE, self,
                     tenant_id=context.tenant_id,
                     resource_id=subnet_id)
     # Notify HDN operators
     hdnlib.notify_subnet_update(upd_subnet)
     return upd_subnet
示例#56
0
    def audit_agent_state(self):
        """
        Audit the current state of the agent to determine if fault reports
        need to be raised or cleared
        """
        LOG.debug("Audit agent state")
        previous_agents = self.agents
        admin_context = context.get_admin_context()
        self.agents = {a['id']: a for a in self.get_agents(admin_context)}
        new_alive_topics = set()
        host_availability = {}
        for uuid, agent in six.iteritems(self.agents):
            if agent['agent_type'] == constants.AGENT_TYPE_BGP_ROUTING:
                if not self.is_bgp_enabled():
                    self.agents[uuid] = None
                    self.delete_agent(admin_context, uuid)
                    continue
            hostname = agent['host']
            if hostname not in host_availability:
                # NOTE(alegacy): Cache to avoid repeating for multiple
                # agents on same host.
                host_availability[hostname] = \
                    self.is_host_available(admin_context, hostname)
            if not host_availability[hostname]:
                # If agent dies while host is down, delay updating list of
                # agents until after the host comes online, so that audit
                # can correctly assess change in agent state at that point.
                # Set to None so that alarm will be raised or cleared when
                # host comes up.
                self.agents[uuid] = None
                continue
            elif uuid in previous_agents:
                previous = previous_agents[uuid]
            else:
                previous = None

            # Raise or clear alarm either if the alive state changes, or
            # if the alarm hasn't been raised/cleared for this agent yet.
            if not previous or agent['alive'] != previous['alive']:
                registry.notify(resources.AGENT, events.AFTER_UPDATE, self,
                                context=admin_context, host=agent['host'],
                                plugin=self, agent=agent)
                # TODO(alegacy): move fault reporting to a registry callback
                # Clear fault if agent is alive
                if agent['alive']:
                    self.clear_agent_fault(agent)
                    new_alive_topics.add(agent['topic'])
                # Only report fault if agent's host is online
                else:
                    self.report_agent_fault(agent)
        for new_alive_topic in new_alive_topics:
            self._redistribute_for_new_agent(admin_context, new_alive_topic)
示例#57
0
def serve_wsgi(cls):

    try:
        service = cls.create()
        service.start()
    except Exception:
        with excutils.save_and_reraise_exception():
            LOG.exception(
                _LE('Unrecoverable error: please check log '
                    'for details.'))

    registry.notify(resources.PROCESS, events.BEFORE_SPAWN, service)
    return service
示例#58
0
 def delete_rbac_policy(self, context, id):
     entry = self._get_rbac_policy(context, id)
     object_type = entry['object_type']
     try:
         registry.notify(RBAC_POLICY, events.BEFORE_DELETE, self,
                         context=context, object_type=object_type,
                         policy=entry)
     except c_exc.CallbackFailure as ex:
         raise ext_rbac.RbacPolicyInUse(object_id=entry['object_id'],
                                        details=ex)
     with context.session.begin(subtransactions=True):
         context.session.delete(entry)
     self.object_type_cache.pop(id, None)
示例#59
0
 def test_create_duplicate_default_l2_gateway_noop(self):
     def_bridge_cluster_name = nsx_v3_mocks.NSX_BRIDGE_CLUSTER_NAME
     cfg.CONF.set_override("default_bridge_cluster",
                           def_bridge_cluster_name,
                           "nsx_v3")
     for i in range(0, 2):
         nsx_v3_driver.NsxV3Driver(mock.MagicMock())
         # fake the callback invoked after init
         registry.notify(resources.PROCESS, events.BEFORE_SPAWN,
                         mock.MagicMock())
     l2gws = self.driver._get_l2_gateways(self.context)
     # Verify whether only one default L2 gateway is created
     self.assertEqual(1, len(l2gws))
示例#60
0
文件: plugin.py 项目: vj17/hdn
 def create_port(self, context, port):
     # Set port status as PENDING_CREATE
     port['port']['status'] = constants.STATUS_PENDING_CREATE
     with db_api.autonested_transaction(context.session):
         new_port = super(HdnNeutronPlugin, self).create_port(
             context, port)
     registry.notify(resources.PORT, events.AFTER_CREATE, self,
                     tenant_id=context.tenant_id,
                     resource_id=new_port['id'])
     # Notify HDN operators
     hdnlib.notify_port_create(new_port)
     LOG.debug("Queued request to create port: %s", new_port['id'])
     return new_port