Ejemplo n.º 1
0
 def remove_addresses(self, context, address_group_id, addresses):
     ag = self._get_address_group(context, address_group_id)
     original_address_group = self._make_address_group_dict(ag)
     addrs_in_ag, addrs_not_in_ag = self._process_requested_addresses(
         ag, addresses['addresses'])
     if addrs_not_in_ag:
         raise ag_exc.AddressesNotFound(addresses=addrs_not_in_ag,
                                        address_group_id=address_group_id)
     for addr in addrs_in_ag:
         ag_obj.AddressAssociation.delete_objects(
             context, address_group_id=address_group_id, address=addr)
     ag.update()  # reload synthetic fields
     ag_dict = {'address_group': self._make_address_group_dict(ag)}
     registry.publish(resources.ADDRESS_GROUP,
                      events.AFTER_UPDATE,
                      self,
                      payload=events.DBEventPayload(
                          context,
                          resource_id=address_group_id,
                          states=(
                              original_address_group,
                              ag_dict['address_group'],
                          )))
     return ag_dict
Ejemplo n.º 2
0
    def update_security_group(self, context, id, security_group):
        s = security_group['security_group']

        kwargs = {
            'context': context,
            'security_group_id': id,
            'security_group': s,
        }
        self._registry_notify(resources.SECURITY_GROUP,
                              events.BEFORE_UPDATE,
                              exc_cls=ext_sg.SecurityGroupConflict,
                              **kwargs)

        with db_api.CONTEXT_WRITER.using(context):
            sg = self._get_security_group(context, id)
            if sg.name == 'default' and 'name' in s:
                raise ext_sg.SecurityGroupCannotUpdateDefault()
            sg_dict = self._make_security_group_dict(sg)
            kwargs['original_security_group'] = sg_dict
            sg.update_fields(s)
            sg.update()
            sg_dict = self._make_security_group_dict(sg)
            kwargs['security_group'] = sg_dict
            self._registry_notify(
                resources.SECURITY_GROUP,
                events.PRECOMMIT_UPDATE,
                exc_cls=ext_sg.SecurityGroupConflict,
                payload=events.DBEventPayload(
                    context,
                    request_body=s,
                    states=(kwargs['original_security_group'], ),
                    resource_id=id,
                    desired_state=sg_dict))
        registry.notify(resources.SECURITY_GROUP, events.AFTER_UPDATE, self,
                        **kwargs)
        return sg_dict
Ejemplo n.º 3
0
 def test_validate_rbac_policy_delete_fails_single_used_wildcarded(
         self, get_bound_tenant_ids_mock, mock_tenants_with_shared_access,
         _get_db_obj_rbac_entries_mock):
     policy = {
         'action': rbac_db_models.ACCESS_SHARED,
         'target_tenant': '*',
         'tenant_id': 'object_owner_tenant_id',
         'object_id': 'fake_obj_id'
     }
     context = mock.Mock()
     payload = events.DBEventPayload(
         context,
         states=(policy, ),
         metadata={
             'object_type':
             self._test_class.rbac_db_cls.db_model.object_type
         })
     with mock.patch.object(obj_db_api, 'get_object'):
         self.assertRaises(ext_rbac.RbacPolicyInUse,
                           self._test_class.validate_rbac_policy_delete,
                           resource=mock.Mock(),
                           event=events.BEFORE_DELETE,
                           trigger='dummy_trigger',
                           payload=payload)
Ejemplo n.º 4
0
def _update_segment_host_mapping_for_agent(resource,
                                           event,
                                           trigger,
                                           payload=None):
    plugin = payload.metadata.get('plugin')
    agent = payload.desired_state
    host = payload.metadata.get('host')
    context = payload.context

    check_segment_for_agent = getattr(plugin, 'check_segment_for_agent', None)
    if (not check_user_configured_segment_plugin()
            or not check_segment_for_agent):
        return
    phys_nets = _get_phys_nets(agent)
    if not phys_nets:
        return
    start_flag = agent.get('start_flag', None)
    if host in reported_hosts and not start_flag:
        return
    reported_hosts.add(host)
    segments = get_segments_with_phys_nets(context, phys_nets)
    current_segment_ids = {
        segment['id']
        for segment in segments if check_segment_for_agent(segment, agent)
    }
    update_segment_host_mapping(context, host, current_segment_ids)
    registry.publish(resources.SEGMENT_HOST_MAPPING,
                     events.AFTER_CREATE,
                     plugin,
                     payload=events.DBEventPayload(context,
                                                   metadata={
                                                       'host':
                                                       host,
                                                       'current_segment_ids':
                                                       current_segment_ids
                                                   }))
Ejemplo n.º 5
0
    def create_security_group(self, context, security_group, default_sg=False):
        """Create security group.

        If default_sg is true that means we are a default security group for
        a given tenant if it does not exist.
        """
        s = security_group['security_group']
        self._registry_publish(resources.SECURITY_GROUP, events.BEFORE_CREATE,
                               exc_cls=ext_sg.SecurityGroupConflict,
                               payload=events.DBEventPayload(
                                   context,
                                   metadata={'is_default': default_sg},
                                   request_body=security_group,
                                   desired_state=s))

        tenant_id = s['tenant_id']
        stateful = s.get('stateful', True)

        if not default_sg:
            self._ensure_default_security_group(context, tenant_id)
        else:
            existing_def_sg_id = self._get_default_sg_id(context, tenant_id)
            if existing_def_sg_id is not None:
                # default already exists, return it
                return self.get_security_group(context, existing_def_sg_id)

        with db_api.CONTEXT_WRITER.using(context):
            delta = len(ext_sg.sg_supported_ethertypes)
            delta = delta * 2 if default_sg else delta
            quota.QUOTAS.quota_limit_check(context, tenant_id,
                                           security_group_rule=delta)

            sg = sg_obj.SecurityGroup(
                context, id=s.get('id') or uuidutils.generate_uuid(),
                description=s['description'], project_id=tenant_id,
                name=s['name'], is_default=default_sg, stateful=stateful)
            sg.create()

            for ethertype in ext_sg.sg_supported_ethertypes:
                if default_sg:
                    # Allow intercommunication
                    ingress_rule = sg_obj.SecurityGroupRule(
                        context, id=uuidutils.generate_uuid(),
                        project_id=tenant_id, security_group_id=sg.id,
                        direction='ingress', ethertype=ethertype,
                        remote_group_id=sg.id)
                    ingress_rule.create()
                    sg.rules.append(ingress_rule)

                egress_rule = sg_obj.SecurityGroupRule(
                    context, id=uuidutils.generate_uuid(),
                    project_id=tenant_id, security_group_id=sg.id,
                    direction='egress', ethertype=ethertype)
                egress_rule.create()
                sg.rules.append(egress_rule)
            sg.obj_reset_changes(['rules'])

            # fetch sg from db to load the sg rules with sg model.
            # NOTE(slaweq): With new system/project scopes it may happen that
            # project admin will try to list security groups for different
            # project and during that call Neutron will ensure that default
            # security group is created. In such case elevated context needs to
            # be used here otherwise, SG will not be found and error 500 will
            # be returned through the API
            get_context = context.elevated() if default_sg else context
            sg = sg_obj.SecurityGroup.get_object(get_context, id=sg.id)
            secgroup_dict = self._make_security_group_dict(sg)
            self._registry_publish(resources.SECURITY_GROUP,
                                   events.PRECOMMIT_CREATE,
                                   exc_cls=ext_sg.SecurityGroupConflict,
                                   payload=events.DBEventPayload(
                                       context,
                                       resource_id=sg.id,
                                       metadata={'is_default': default_sg},
                                       states=(secgroup_dict,)))

        registry.publish(resources.SECURITY_GROUP, events.AFTER_CREATE,
                         self, payload=events.DBEventPayload(
                             context,
                             resource_id=secgroup_dict['id'],
                             metadata={'is_default': default_sg},
                             states=(secgroup_dict,)))

        return secgroup_dict
Ejemplo n.º 6
0
    def _create_security_group_rule(self, context, security_group_rule,
                                    validate=True):
        if validate:
            sg_id = self._validate_security_group_rule(context,
                                                       security_group_rule)
        rule_dict = security_group_rule['security_group_rule']
        remote_ip_prefix = rule_dict.get('remote_ip_prefix')
        if remote_ip_prefix:
            remote_ip_prefix = net.AuthenticIPNetwork(remote_ip_prefix)

        protocol = rule_dict.get('protocol')
        if protocol:
            # object expects strings only
            protocol = str(protocol)

        args = {
            'id': (rule_dict.get('id') or uuidutils.generate_uuid()),
            'project_id': rule_dict['tenant_id'],
            'security_group_id': rule_dict['security_group_id'],
            'direction': rule_dict['direction'],
            'remote_group_id': rule_dict.get('remote_group_id'),
            'remote_address_group_id': rule_dict.get(
                'remote_address_group_id'),
            'ethertype': rule_dict['ethertype'],
            'protocol': protocol,
            'remote_ip_prefix': remote_ip_prefix,
            'description': rule_dict.get('description'),
        }

        port_range_min = self._safe_int(rule_dict['port_range_min'])
        if port_range_min is not None:
            args['port_range_min'] = port_range_min

        port_range_max = self._safe_int(rule_dict['port_range_max'])
        if port_range_max is not None:
            args['port_range_max'] = port_range_max

        self._registry_publish(
            resources.SECURITY_GROUP_RULE,
            events.BEFORE_CREATE,
            exc_cls=ext_sg.SecurityGroupConflict,
            payload=events.DBEventPayload(
                context, resource_id=args['id'],
                states=(args,)))

        with db_api.CONTEXT_WRITER.using(context):
            if validate:
                self._check_for_duplicate_rules(context, sg_id,
                                                [security_group_rule])
            sg_rule = sg_obj.SecurityGroupRule(context, **args)
            sg_rule.create()

            # fetch sg_rule from db to load the sg rules with sg model
            # otherwise a DetachedInstanceError can occur for model extensions
            sg_rule = sg_obj.SecurityGroupRule.get_object(context,
                                                          id=sg_rule.id)
            res_rule_dict = self._make_security_group_rule_dict(sg_rule.db_obj)
            self._registry_publish(
                resources.SECURITY_GROUP_RULE,
                events.PRECOMMIT_CREATE,
                exc_cls=ext_sg.SecurityGroupConflict,
                payload=events.DBEventPayload(
                    context, resource_id=res_rule_dict['id'],
                    states=(res_rule_dict,)))

        return res_rule_dict
Ejemplo n.º 7
0
 def _create_test_payload(self, context='test_ctx'):
     bgp_speaker = {'id': '11111111-2222-3333-4444-555555555555'}
     payload = events.DBEventPayload(context,
                                     metadata={'plugin': self.plugin},
                                     states=(bgp_speaker, ))
     return payload
Ejemplo n.º 8
0
    def _ensure_vr_id(self, context, router_db, ha_network):
        router_id = router_db.id
        network_id = ha_network.network_id

        # TODO(kevinbenton): let decorator handle duplicate retry
        # like in review.opendev.org/#/c/367179/1/neutron/db/l3_hamode_db.py
        for count in range(MAX_ALLOCATION_TRIES):
            try:
                # NOTE(kevinbenton): we disallow subtransactions because the
                # retry logic will bust any parent transactions
                with db_api.CONTEXT_WRITER.using(context):
                    if router_db.extra_attributes.ha_vr_id:
                        LOG.debug(
                            "Router %(router_id)s has already been "
                            "allocated a ha_vr_id %(ha_vr_id)d!", {
                                'router_id': router_id,
                                'ha_vr_id': router_db.extra_attributes.ha_vr_id
                            })
                        return

                    old_router = self._make_router_dict(router_db)
                    vr_id = self._get_vr_id(context, network_id)
                    if vr_id is None:
                        raise l3ha_exc.NoVRIDAvailable(router_id=router_id)

                    allocation = l3_hamode.L3HARouterVRIdAllocation(
                        context, network_id=network_id, vr_id=vr_id)
                    allocation.create()

                    router_db.extra_attributes.ha_vr_id = allocation.vr_id
                    LOG.debug(
                        "Router %(router_id)s has been allocated a ha_vr_id "
                        "%(ha_vr_id)d.", {
                            'router_id': router_id,
                            'ha_vr_id': allocation.vr_id
                        })
                    router_body = {
                        l3_apidef.ROUTER: {
                            l3_ext_ha_apidef.HA_INFO: True,
                            'ha_vr_id': allocation.vr_id
                        }
                    }
                    registry.publish(resources.ROUTER,
                                     events.PRECOMMIT_UPDATE,
                                     self,
                                     payload=events.DBEventPayload(
                                         context,
                                         request_body=router_body,
                                         states=(old_router, ),
                                         resource_id=router_id,
                                         desired_state=router_db))

                    return allocation.vr_id

            except obj_base.NeutronDbObjectDuplicateEntry:
                LOG.info(
                    "Attempt %(count)s to allocate a VRID in the "
                    "network %(network)s for the router %(router)s", {
                        'count': count,
                        'network': network_id,
                        'router': router_id
                    })

        raise l3ha_exc.MaxVRIDAllocationTriesReached(
            network_id=network_id,
            router_id=router_id,
            max_tries=MAX_ALLOCATION_TRIES)
Ejemplo n.º 9
0
 def delete_agent(self, context, id):
     agent = self._get_agent(context, id)
     registry.publish(resources.AGENT, events.BEFORE_DELETE, self,
                      payload=events.DBEventPayload(
                          context, states=(agent,), resource_id=id))
     agent.delete()
Ejemplo n.º 10
0
 def test_is_to_be_committed(self):
     e = events.DBEventPayload(mock.ANY,
                               states=[mock.ANY],
                               resource_id='1a',
                               desired_state=object())
     self.assertTrue(e.is_to_be_committed)
Ejemplo n.º 11
0
 def test_is_persisted(self):
     e = events.DBEventPayload(mock.ANY, states=['s1'], resource_id='1a')
     self.assertTrue(e.is_persisted)
Ejemplo n.º 12
0
 def _notify_after_router_edge_association(self, context, router):
     registry.publish(nsxv_constants.SERVICE_EDGE,
                      events.AFTER_CREATE,
                      self,
                      payload=events.DBEventPayload(context,
                                                    states=(router, )))
Ejemplo n.º 13
0
 def _notify_port_updated(self, context, port_id):
     payload = lib_events.DBEventPayload(
         context, metadata={'changed_fields': {'local_ip'}},
         resource_id=port_id, states=(None,))
     lib_registry.publish(resources.PORT, lib_events.AFTER_UPDATE,
                          self, payload=payload)
Ejemplo n.º 14
0
    def remove_subports(self, context, trunk_id, subports):
        """Remove one or more subports from trunk."""
        subports = subports['sub_ports']
        with db_api.CONTEXT_WRITER.using(context):
            trunk = self._get_trunk(context, trunk_id)
            original_trunk = copy.deepcopy(trunk)
            rules.trunk_can_be_managed(context, trunk)

            subports_validator = rules.SubPortsValidator(
                self._segmentation_types, subports)
            # the subports are being removed, therefore we do not need to
            # enforce any specific trunk rules, other than basic validation
            # of the request body.
            subports = subports_validator.validate(context,
                                                   basic_validation=True,
                                                   trunk_validation=False)

            current_subports = {p.port_id: p for p in trunk.sub_ports}
            removed_subports = []

            for subport in subports:
                subport_obj = current_subports.pop(subport['port_id'], None)

                if not subport_obj:
                    raise trunk_exc.SubPortNotFound(trunk_id=trunk_id,
                                                    port_id=subport['port_id'])
                subport_obj.delete()
                removed_subports.append(subport_obj)

            del trunk.sub_ports[:]
            trunk.sub_ports.extend(current_subports.values())
            # NOTE(status_police): the trunk status should transition to
            # DOWN irrespective of the status in which it is in to allow
            # the user to resolve potential conflicts due to prior add_subports
            # operations.
            # Should a trunk be in DOWN or BUILD state (e.g. when dealing
            # with multiple concurrent requests), the status is still forced
            # to DOWN. See add_subports() for more details.
            trunk.update(status=constants.TRUNK_DOWN_STATUS)
            payload = events.DBEventPayload(
                context,
                resource_id=trunk_id,
                states=(
                    original_trunk,
                    trunk,
                ),
                metadata={'subports': removed_subports})
            if removed_subports:
                registry.publish(resources.SUBPORTS,
                                 events.PRECOMMIT_DELETE,
                                 self,
                                 payload=payload)
        if removed_subports:
            payload = events.DBEventPayload(
                context,
                resource_id=trunk_id,
                states=(
                    original_trunk,
                    trunk,
                ),
                metadata={'subports': removed_subports})
            registry.publish(resources.SUBPORTS,
                             events.AFTER_DELETE,
                             self,
                             payload=payload)
        return trunk
Ejemplo n.º 15
0
    def add_subports(self, context, trunk_id, subports):
        """Add one or more subports to trunk."""
        with db_api.CONTEXT_WRITER.using(context):
            trunk = self._get_trunk(context, trunk_id)

            # Check for basic validation since the request body here is not
            # automatically validated by the API layer.
            subports = subports['sub_ports']
            subports_validator = rules.SubPortsValidator(
                self._segmentation_types, subports, trunk['port_id'])
            subports = subports_validator.validate(context,
                                                   basic_validation=True)
            added_subports = []

            rules.trunk_can_be_managed(context, trunk)
            original_trunk = copy.deepcopy(trunk)
            # NOTE(status_police): the trunk status should transition to
            # DOWN (and finally in ACTIVE or ERROR), only if it is not in
            # ERROR status already. A user should attempt to resolve the ERROR
            # condition before adding more subports to the trunk. Should a
            # trunk be in DOWN or BUILD state (e.g. when dealing with
            # multiple concurrent requests), the status is still forced to
            # DOWN and thus can potentially overwrite an interleaving state
            # change to ACTIVE. Eventually the driver should bring the status
            # back to ACTIVE or ERROR.
            if trunk.status == constants.TRUNK_ERROR_STATUS:
                raise trunk_exc.TrunkInErrorState(trunk_id=trunk_id)
            trunk.update(status=constants.TRUNK_DOWN_STATUS)

            for subport in subports:
                obj = trunk_objects.SubPort(
                    context=context,
                    trunk_id=trunk_id,
                    port_id=subport['port_id'],
                    segmentation_type=subport['segmentation_type'],
                    segmentation_id=subport['segmentation_id'])
                obj.create()
                trunk['sub_ports'].append(obj)
                added_subports.append(obj)
            payload = events.DBEventPayload(
                context,
                resource_id=trunk_id,
                states=(
                    original_trunk,
                    trunk,
                ),
                metadata={'subports': added_subports})
            if added_subports:
                registry.publish(resources.SUBPORTS,
                                 events.PRECOMMIT_CREATE,
                                 self,
                                 payload=payload)
        if added_subports:
            payload = events.DBEventPayload(
                context,
                resource_id=trunk_id,
                states=(
                    original_trunk,
                    trunk,
                ),
                metadata={'subports': added_subports})
            registry.publish(resources.SUBPORTS,
                             events.AFTER_CREATE,
                             self,
                             payload=payload)
        return trunk
Ejemplo n.º 16
0
 def test_states(self):
     e = events.DBEventPayload(mock.ANY, states=['s1'])
     self.assertEqual(['s1'], e.states)
Ejemplo n.º 17
0
 def test_desired_state(self):
     desired_state = {'k': object()}
     e = events.DBEventPayload(mock.ANY, desired_state=desired_state)
     self.assertEqual(desired_state, e.desired_state)
     desired_state['a'] = 'A'
     self.assertEqual(desired_state, e.desired_state)
Ejemplo n.º 18
0
def _notify_l3_agent_port_update(resource, event, trigger, payload):
    new_port = payload.latest_state
    original_port = payload.states[0]

    is_fixed_ips_changed = n_utils.port_ip_changed(new_port, original_port)

    if (original_port['device_owner'] in [
            n_const.DEVICE_OWNER_HA_REPLICATED_INT,
            n_const.DEVICE_OWNER_ROUTER_SNAT, n_const.DEVICE_OWNER_ROUTER_GW
    ] and not is_fixed_ips_changed):
        return

    if new_port and original_port:
        l3plugin = directory.get_plugin(plugin_constants.L3)
        context = payload.context
        new_port_host = new_port.get(portbindings.HOST_ID)
        original_port_host = original_port.get(portbindings.HOST_ID)
        is_new_port_binding_changed = (new_port_host
                                       and new_port_host != original_port_host)
        is_bound_port_moved = (original_port_host
                               and original_port_host != new_port_host)
        fip_router_id = None
        dest_host = None
        new_port_profile = new_port.get(portbindings.PROFILE)
        if new_port_profile:
            dest_host = new_port_profile.get('migrating_to')
        if is_new_port_binding_changed or is_bound_port_moved or dest_host:
            fips = l3plugin._get_floatingips_by_port_id(
                context, port_id=original_port['id'])
            fip = fips[0] if fips else None
            if fip:
                fip_router_id = fip['router_id']
        if is_bound_port_moved:
            removed_routers = l3plugin.get_dvr_routers_to_remove(
                context, original_port, get_related_hosts_info=False)
            if removed_routers:
                _notify_port_delete(event,
                                    resource,
                                    trigger,
                                    payload=events.DBEventPayload(
                                        context,
                                        metadata={
                                            'removed_routers': removed_routers,
                                            'get_related_hosts_info': False
                                        },
                                        states=(original_port, )))

            def _should_notify_on_fip_update():
                if not fip_router_id:
                    return False
                for info in removed_routers:
                    if info['router_id'] == fip_router_id:
                        return False
                try:
                    router = l3plugin._get_router(context, fip_router_id)
                except l3_exc.RouterNotFound:
                    return False
                return l3_dvr_db.is_distributed_router(router)

            if _should_notify_on_fip_update():
                l3plugin.l3_rpc_notifier.routers_updated_on_host(
                    context, [fip_router_id],
                    original_port[portbindings.HOST_ID])
        # If dest_host is set, then the port profile has changed
        # and this port is in migration. The call below will
        # pre-create the router on the new host
        # If the original_port is None, then it is a migration
        # from unbound to bound.
        if (is_new_port_binding_changed or dest_host):
            if (not original_port[portbindings.HOST_ID]
                    and not original_port['device_owner']):
                l3plugin.dvr_handle_new_service_port(context,
                                                     new_port,
                                                     unbound_migrate=True)
            else:
                l3plugin.dvr_handle_new_service_port(context,
                                                     new_port,
                                                     dest_host=dest_host,
                                                     router_id=fip_router_id)
            l3plugin.update_arp_entry_for_dvr_service_port(context, new_port)
            return
        # Check for allowed_address_pairs and port state
        new_port_host = new_port.get(portbindings.HOST_ID)
        allowed_address_pairs_list = new_port.get('allowed_address_pairs')
        if allowed_address_pairs_list and new_port_host:
            new_port_state = new_port.get('admin_state_up')
            original_port_state = original_port.get('admin_state_up')
            if new_port_state:
                # Case were we activate the port from inactive state,
                # or the same port has additional address_pairs added.
                for address_pair in allowed_address_pairs_list:
                    _dvr_handle_unbound_allowed_addr_pair_add(
                        l3plugin, context, new_port, address_pair)
                return
            elif original_port_state:
                # Case were we deactivate the port from active state.
                for address_pair in allowed_address_pairs_list:
                    _dvr_handle_unbound_allowed_addr_pair_del(
                        l3plugin, context, original_port, address_pair)
                return

        if payload.metadata.get('mac_address_updated') or is_fixed_ips_changed:
            l3plugin.update_arp_entry_for_dvr_service_port(context, new_port)
Ejemplo n.º 19
0
 def test_is_not_to_be_committed(self):
     e = events.DBEventPayload(mock.ANY, states=['s1'], resource_id='1a')
     self.assertFalse(e.is_to_be_committed)
Ejemplo n.º 20
0
    def _process_device_if_exists(self, device_details):
        # ignore exceptions from devices that disappear because they will
        # be handled as removed in the next iteration
        device = device_details['device']
        with self._ignore_missing_device_exceptions(device):
            LOG.debug("Port %s added", device)

            if 'port_id' in device_details:
                LOG.info("Port %(device)s updated. Details: %(details)s", {
                    'device': device,
                    'details': device_details
                })
                self.mgr.setup_arp_spoofing_protection(device, device_details)

                segment = amb.NetworkSegment(
                    device_details.get('network_type'),
                    device_details['physical_network'],
                    device_details.get('segmentation_id'),
                    device_details.get('mtu'))
                network_id = device_details['network_id']
                self.rpc_callbacks.add_network(network_id, segment)
                interface_plugged = self.mgr.plug_interface(
                    network_id, segment, device,
                    device_details['device_owner'])
                # REVISIT(scheuran): Changed the way how ports admin_state_up
                # is implemented.
                #
                # Old lb implementation:
                # - admin_state_up: ensure that tap is plugged into bridge
                # - admin_state_down: remove tap from bridge
                # New lb implementation:
                # - admin_state_up: set tap device state to up
                # - admin_state_down: set tap device state to down
                #
                # However both approaches could result in races with
                # nova/libvirt and therefore to an invalid system state in the
                # scenario, where an instance is booted with a port configured
                # with admin_state_up = False:
                #
                # Libvirt does the following actions in exactly
                # this order (see libvirt virnetdevtap.c)
                #     1) Create the tap device, set its MAC and MTU
                #     2) Plug the tap into the bridge
                #     3) Set the tap online
                #
                # Old lb implementation:
                #   A race could occur, if the lb agent removes the tap device
                #   right after step 1). Then libvirt will add it to the bridge
                #   again in step 2).
                # New lb implementation:
                #   The race could occur if the lb-agent sets the taps device
                #   state to down right after step 2). In step 3) libvirt
                #   might set it to up again.
                #
                # This is not an issue if an instance is booted with a port
                # configured with admin_state_up = True. Libvirt would just
                # set the tap device up again.
                #
                # This refactoring is recommended for the following reasons:
                # 1) An existing race with libvirt caused by the behavior of
                #    the old implementation. See Bug #1312016
                # 2) The new code is much more readable
                if interface_plugged:
                    self.mgr.ensure_port_admin_state(
                        device, device_details['admin_state_up'])
                # update plugin about port status if admin_state is up
                if device_details['admin_state_up']:
                    if interface_plugged:
                        self.plugin_rpc.update_device_up(
                            self.context, device, self.agent_id, cfg.CONF.host)
                    else:
                        self.plugin_rpc.update_device_down(
                            self.context, device, self.agent_id, cfg.CONF.host)
                self._update_network_ports(device_details['network_id'],
                                           device_details['port_id'],
                                           device_details['device'])
                self.ext_manager.handle_port(self.context, device_details)
                registry.publish(local_resources.PORT_DEVICE,
                                 events.AFTER_UPDATE,
                                 self,
                                 payload=events.DBEventPayload(
                                     self.context,
                                     states=(device_details, ),
                                     resource_id=device))
            elif constants.NO_ACTIVE_BINDING in device_details:
                LOG.info("Device %s has no active binding in host", device)
            else:
                LOG.info("Device %s not defined on plugin", device)
Ejemplo n.º 21
0
 def test_latest_state_with_desired_state(self):
     desired_state = object()
     e = events.DBEventPayload(mock.ANY,
                               states=[object()],
                               desired_state=desired_state)
     self.assertEqual(desired_state, e.latest_state)
Ejemplo n.º 22
0
    def _wire_trunk(self, trunk_br, port, rewire=False):
        """Wire trunk bridge with integration bridge.

        The method calls into trunk manager to create patch ports for trunk and
        patch ports for all subports associated with this trunk. If rewire is
        True, a diff is performed between desired state (the one got from the
        server) and actual state (the patch ports present on the trunk bridge)
        and subports are wired/unwired accordingly.

        :param trunk_br: OVSBridge object representing the trunk bridge.
        :param port: Parent port dict.
        :param rewire: True if local trunk state must be reconciled with
            server's state.
        """
        ctx = self.context
        try:
            parent_port_id = (
                self.trunk_manager.get_port_uuid_from_external_ids(port))
            trunk = self.trunk_rpc.get_trunk_details(ctx, parent_port_id)
        except tman.TrunkManagerError:
            LOG.error("Can't obtain parent port ID from port %s",
                      port['name'])
            return
        except resources_rpc.ResourceNotFound:
            LOG.error("Port %s has no trunk associated.", parent_port_id)
            return

        try:
            registry.publish(
                resources.TRUNK, events.BEFORE_CREATE, self,
                payload=events.DBEventPayload(ctx, resource_id=trunk.id,
                                              desired_state=trunk))
            self.trunk_manager.create_trunk(
                trunk.id, trunk.port_id,
                port['external_ids'].get('attached-mac'))
        except tman.TrunkManagerError as te:
            LOG.error("Failed to create trunk %(trunk_id)s: %(err)s",
                      {'trunk_id': trunk.id,
                       'err': te})
            # NOTE(status_police): Trunk couldn't be created so it ends in
            # ERROR status and resync can fix that later.
            self.report_trunk_status(
                ctx, trunk.id, constants.TRUNK_ERROR_STATUS)
            return

        # We need to remove stale subports
        unwire_status = constants.TRUNK_ACTIVE_STATUS
        if rewire:
            old_subport_ids = self.get_connected_subports_for_trunk(trunk.id)
            subports = {p['port_id'] for p in trunk.sub_ports}
            subports_to_delete = set(old_subport_ids) - subports
            if subports_to_delete:
                unwire_status = self.unwire_subports_for_trunk(
                    trunk.id, subports_to_delete)

        # NOTE(status_police): inform the server whether the operation
        # was a partial or complete success. Do not inline status.
        # NOTE: in case of rewiring we readd ports that are already present on
        # the bridge because e.g. the segmentation ID might have changed (e.g.
        # agent crashed, port was removed and readded with a different seg ID)
        wire_status = self.wire_subports_for_trunk(
            ctx, trunk.id, trunk.sub_ports,
            trunk_bridge=trunk_br, parent_port=port)

        if (unwire_status == wire_status and
                wire_status == constants.TRUNK_ACTIVE_STATUS):
            status = constants.TRUNK_ACTIVE_STATUS
        else:
            status = constants.TRUNK_DEGRADED_STATUS
        self.report_trunk_status(ctx, trunk.id, status)
Ejemplo n.º 23
0
    def create_or_update_agent(self, context, agent_state):
        """Registers new agent in the database or updates existing.

        Returns tuple of agent status and state.
        Status is from server point of view: alive, new or revived.
        It could be used by agent to do some sync with the server if needed.
        """
        status = agent_consts.AGENT_ALIVE
        with db_api.CONTEXT_WRITER.using(context):
            res_keys = ['agent_type', 'binary', 'host', 'topic']
            res = dict((k, agent_state[k]) for k in res_keys)
            if 'availability_zone' in agent_state:
                res['availability_zone'] = agent_state['availability_zone']
            configurations_dict = agent_state.get('configurations', {})
            res['configurations'] = jsonutils.dumps(configurations_dict)
            resource_versions_dict = agent_state.get('resource_versions')
            if resource_versions_dict:
                res['resource_versions'] = jsonutils.dumps(
                    resource_versions_dict)
            res['load'] = self._get_agent_load(agent_state)
            current_time = timeutils.utcnow()
            try:
                agent = self._get_agent_by_type_and_host(
                    context, agent_state['agent_type'], agent_state['host'])
                agent_state_orig = copy.deepcopy(agent_state)
                if not agent.is_active:
                    status = agent_consts.AGENT_REVIVED
                    if 'resource_versions' not in agent_state:
                        # updating agent_state with resource_versions taken
                        # from db so that
                        # _update_local_agent_resource_versions() will call
                        # version_manager and bring it up to date
                        agent_state['resource_versions'] = self._get_dict(
                            agent, 'resource_versions', ignore_missing=True)
                res['heartbeat_timestamp'] = current_time
                if agent_state.get('start_flag'):
                    res['started_at'] = current_time
                greenthread.sleep(0)
                self._log_heartbeat(agent_state, agent, configurations_dict)
                agent.update_fields(res)
                agent.update()
                event_type = events.AFTER_UPDATE
            except agent_exc.AgentNotFoundByTypeHost:
                agent_state_orig = None
                greenthread.sleep(0)
                res['created_at'] = current_time
                res['started_at'] = current_time
                res['heartbeat_timestamp'] = current_time
                res['admin_state_up'] = cfg.CONF.enable_new_agents
                agent = agent_obj.Agent(context=context, **res)
                greenthread.sleep(0)
                agent.create()
                event_type = events.AFTER_CREATE
                self._log_heartbeat(agent_state, agent, configurations_dict)
                status = agent_consts.AGENT_NEW
            greenthread.sleep(0)

        agent_state['agent_status'] = status
        agent_state['admin_state_up'] = agent.admin_state_up
        registry.publish(resources.AGENT,
                         event_type,
                         self,
                         payload=events.DBEventPayload(
                             context=context,
                             metadata={
                                 'host': agent_state['host'],
                                 'plugin': self,
                                 'status': status
                             },
                             states=(agent_state_orig, ),
                             desired_state=agent_state,
                             resource_id=agent.id))
        return status, agent_state
Ejemplo n.º 24
0
    def _process_port_request(self, event, context, port):
        # Deleting floatingip will receive port resource with precommit_delete
        # event, so just return, then check the request in
        # _check_floatingip_request callback.
        if port['device_owner'].startswith(
                lib_consts.DEVICE_OWNER_FLOATINGIP):
            return

        # This block is used for checking if there are some fixed ips updates.
        # Whatever the event is AFTER_UPDATE/PRECOMMIT_DELETE,
        # we will use the update_ip_set for checking if the possible associated
        # port forwarding resources need to be deleted for port's AFTER_UPDATE
        # event. Or get all affected ip addresses for port's PRECOMMIT_DELETE
        # event.
        port_id = port['id']
        update_fixed_ips = port['fixed_ips']
        update_ip_set = set()
        for update_fixed_ip in update_fixed_ips:
            if (netaddr.IPNetwork(update_fixed_ip.get('ip_address')).version ==
                    lib_consts.IP_VERSION_4):
                update_ip_set.add(update_fixed_ip.get('ip_address'))
        if not update_ip_set:
            return

        # If the port owner wants to update or delete port, we must elevate the
        # context to check if the floatingip or port forwarding resources
        # are owned by other tenants.
        if not context.is_admin:
            context = context.elevated()
        # If the logic arrives here, that means we have got update_ip_set and
        # its value is not None. So we need to get all port forwarding
        # resources based on the request port_id for preparing the next
        # process, such as deleting them.
        pf_resources = pf.PortForwarding.get_objects(
            context, internal_port_id=port_id)
        if not pf_resources:
            return

        # If the logic arrives here, that means we have got pf_resources and
        # its value is not None either. Then we collect all ip addresses
        # which are used by port forwarding resources to generate used_ip_set,
        # and we default to set remove_ip_set as used_ip_set which means we
        # want to delete all port forwarding resources when event is
        # PRECOMMIT_DELETE. And when event is AFTER_UPDATE, we get the
        # different part.
        used_ip_set = set()
        for pf_resource in pf_resources:
            used_ip_set.add(str(pf_resource.internal_ip_address))
        remove_ip_set = used_ip_set
        if event == events.AFTER_UPDATE:
            remove_ip_set = used_ip_set - update_ip_set
            if not remove_ip_set:
                return

        # Here, we get the remove_ip_set, the following block will delete the
        # port forwarding resources based on remove_ip_set. Just need to note
        # here, if event is AFTER_UPDATE, and remove_ip_set is empty, the
        # following block won't be processed.
        remove_port_forwarding_list = []
        with db_api.CONTEXT_WRITER.using(context):
            for pf_resource in pf_resources:
                if str(pf_resource.internal_ip_address) in remove_ip_set:
                    pf_objs = pf.PortForwarding.get_objects(
                        context, floatingip_id=pf_resource.floatingip_id)
                    if len(pf_objs) == 1 and pf_objs[0].id == pf_resource.id:
                        fip_obj = l3_obj.FloatingIP.get_object(
                            context, id=pf_resource.floatingip_id)
                        fip_obj.update_fields({'router_id': None})
                        fip_obj.update()
                    pf_resource.delete()
                    remove_port_forwarding_list.append(pf_resource)

        if self._rpc_notifications_required:
            self.push_api.push(context, remove_port_forwarding_list,
                               rpc_events.DELETED)
        for pf_obj in remove_port_forwarding_list:
            payload = events.DBEventPayload(context, states=(pf_obj,))
            registry.publish(pf_consts.PORT_FORWARDING,
                             events.AFTER_DELETE,
                             self,
                             payload=payload)
Ejemplo n.º 25
0
    def create_security_group(self, context, security_group, default_sg=False):
        """Create security group.

        If default_sg is true that means we are a default security group for
        a given tenant if it does not exist.
        """
        s = security_group['security_group']
        kwargs = {
            'context': context,
            'security_group': s,
            'is_default': default_sg,
        }
        self._registry_notify(resources.SECURITY_GROUP,
                              events.BEFORE_CREATE,
                              exc_cls=ext_sg.SecurityGroupConflict,
                              payload=events.DBEventPayload(
                                  context,
                                  metadata={'is_default': default_sg},
                                  request_body=security_group,
                                  desired_state=s))

        tenant_id = s['tenant_id']
        stateful = s.get('stateful', True)

        if not default_sg:
            self._ensure_default_security_group(context, tenant_id)
        else:
            existing_def_sg_id = self._get_default_sg_id(context, tenant_id)
            if existing_def_sg_id is not None:
                # default already exists, return it
                return self.get_security_group(context, existing_def_sg_id)

        with db_api.CONTEXT_WRITER.using(context):
            delta = len(ext_sg.sg_supported_ethertypes)
            delta = delta * 2 if default_sg else delta
            reservation = quota.QUOTAS.make_reservation(
                context, tenant_id, {'security_group_rule': delta}, self)

            sg = sg_obj.SecurityGroup(context,
                                      id=s.get('id')
                                      or uuidutils.generate_uuid(),
                                      description=s['description'],
                                      project_id=tenant_id,
                                      name=s['name'],
                                      is_default=default_sg,
                                      stateful=stateful)
            sg.create()

            for ethertype in ext_sg.sg_supported_ethertypes:
                if default_sg:
                    # Allow intercommunication
                    ingress_rule = sg_obj.SecurityGroupRule(
                        context,
                        id=uuidutils.generate_uuid(),
                        project_id=tenant_id,
                        security_group_id=sg.id,
                        direction='ingress',
                        ethertype=ethertype,
                        remote_group_id=sg.id)
                    ingress_rule.create()
                    sg.rules.append(ingress_rule)

                egress_rule = sg_obj.SecurityGroupRule(
                    context,
                    id=uuidutils.generate_uuid(),
                    project_id=tenant_id,
                    security_group_id=sg.id,
                    direction='egress',
                    ethertype=ethertype)
                egress_rule.create()
                sg.rules.append(egress_rule)
            sg.obj_reset_changes(['rules'])

            quota.QUOTAS.commit_reservation(context,
                                            reservation.reservation_id)

            # fetch sg from db to load the sg rules with sg model.
            sg = sg_obj.SecurityGroup.get_object(context, id=sg.id)
            secgroup_dict = self._make_security_group_dict(sg)
            kwargs['security_group'] = secgroup_dict
            self._registry_notify(resources.SECURITY_GROUP,
                                  events.PRECOMMIT_CREATE,
                                  exc_cls=ext_sg.SecurityGroupConflict,
                                  **kwargs)

        registry.notify(resources.SECURITY_GROUP, events.AFTER_CREATE, self,
                        **kwargs)
        return secgroup_dict
Ejemplo n.º 26
0
    def update_floatingip_port_forwarding(self, context, id, floatingip_id,
                                          port_forwarding):
        port_forwarding = port_forwarding.get(apidef.RESOURCE_NAME)
        new_internal_port_id = None
        if port_forwarding and port_forwarding.get('internal_port_id'):
            new_internal_port_id = port_forwarding.get('internal_port_id')
            self._check_port_has_binding_floating_ip(context, port_forwarding)

        try:
            with db_api.CONTEXT_WRITER.using(context):
                fip_obj = self._get_fip_obj(context, floatingip_id)
                pf_obj = pf.PortForwarding.get_object(context, id=id)
                if not pf_obj:
                    raise pf_exc.PortForwardingNotFound(id=id)
                original_pf_obj = copy.deepcopy(pf_obj)
                ori_internal_port_id = pf_obj.internal_port_id
                if new_internal_port_id and (new_internal_port_id !=
                                             ori_internal_port_id):
                    router_id = self._find_a_router_for_fip_port_forwarding(
                        context, port_forwarding, fip_obj)
                    self._check_router_match(context, fip_obj,
                                             router_id, port_forwarding)

                # As the socket will update when dict contains
                # internal_ip_address and internal_port.
                internal_ip_address = port_forwarding.get(
                    'internal_ip_address')
                internal_port = port_forwarding.get('internal_port')
                if any([internal_ip_address, internal_port]):
                    port_forwarding.update({
                        'internal_ip_address': internal_ip_address
                        if internal_ip_address else
                        str(pf_obj.internal_ip_address),
                        'internal_port': internal_port if internal_port else
                        pf_obj.internal_port
                    })
                pf_obj.update_fields(port_forwarding, reset_changes=True)
                self._check_port_forwarding_update(context, pf_obj)

                port_changed_keys = ['internal_port', 'internal_port_range',
                                     'external_port', 'external_port_range']

                if [k for k in port_changed_keys if k in port_forwarding]:
                    self._check_port_collisions(
                        context, floatingip_id, port_forwarding,
                        id, pf_obj.get('internal_port_id'),
                        pf_obj.get('protocol'),
                        pf_obj.get('internal_ip_address'))

                pf_obj.update()
        except oslo_db_exc.DBDuplicateEntry:
            (__, conflict_params) = self._find_existing_port_forwarding(
                context, floatingip_id, pf_obj.to_dict())
            message = _("A duplicate port forwarding entry with same "
                        "attributes already exists, conflicting values "
                        "are %s") % conflict_params
            raise lib_exc.BadRequest(resource=apidef.RESOURCE_NAME,
                                     msg=message)
        if self._rpc_notifications_required:
            self.push_api.push(context, [pf_obj], rpc_events.UPDATED)
        registry.publish(pf_consts.PORT_FORWARDING, events.AFTER_UPDATE,
                         self,
                         payload=events.DBEventPayload(
                             context,
                             states=(original_pf_obj, pf_obj)))
        return pf_obj