コード例 #1
0
ファイル: octavia_listener.py プロジェクト: asomya/vmware-nsx
class NSXOctaviaListenerEndpoint(object):
    target = messaging.Target(namespace="control", version='1.0')

    def __init__(self,
                 client=None,
                 loadbalancer=None,
                 listener=None,
                 pool=None,
                 member=None,
                 healthmonitor=None,
                 l7policy=None,
                 l7rule=None):

        self.client = client
        self.loadbalancer = loadbalancer
        self.listener = listener
        self.pool = pool
        self.member = member
        self.healthmonitor = healthmonitor
        self.l7policy = l7policy
        self.l7rule = l7rule

    def get_completor_func(self, obj_type, obj, delete=False, cascade=False):
        # return a method that will be called on success/failure completion
        def completor_func(success=True):
            LOG.debug("Octavia transaction completed. delete %s, status %s",
                      delete, 'success' if success else 'failure')

            # calculate the provisioning and operating statuses
            main_prov_status = constants.ACTIVE
            parent_prov_status = constants.ACTIVE
            if not success:
                main_prov_status = constants.ERROR
                parent_prov_status = constants.ERROR
            elif delete:
                main_prov_status = constants.DELETED
            op_status = constants.ONLINE if success else constants.ERROR

            # add the status of the created/deleted/updated object
            status_dict = {
                obj_type: [{
                    'id': obj['id'],
                    constants.PROVISIONING_STATUS: main_prov_status,
                    constants.OPERATING_STATUS: op_status
                }]
            }

            # Get all its parents, and update their statuses as well
            loadbalancer_id = None
            listener_id = None
            pool_id = None
            policy_id = None
            if obj_type != constants.LOADBALANCERS:
                loadbalancer_id = None
                if obj.get('loadbalancer_id'):
                    loadbalancer_id = obj.get('loadbalancer_id')
                if obj.get('pool'):
                    pool_id = obj['pool']['id']
                    listener_id = obj['pool'].get('listener_id')
                    if not loadbalancer_id:
                        loadbalancer_id = obj['pool'].get('loadbalancer_id')
                elif obj.get('pool_id'):
                    pool_id = obj['pool_id']
                if obj.get('listener'):
                    listener_id = obj['listener']['id']
                    if not loadbalancer_id:
                        loadbalancer_id = obj['listener'].get(
                            'loadbalancer_id')
                elif obj.get('listener_id'):
                    listener_id = obj['listener_id']
                if obj.get('policy') and obj['policy'].get('listener'):
                    policy_id = obj['policy']['id']
                    if not listener_id:
                        listener_id = obj['policy']['listener']['id']
                        if not loadbalancer_id:
                            loadbalancer_id = obj['policy']['listener'].get(
                                'loadbalancer_id')

                if (loadbalancer_id
                        and not status_dict.get(constants.LOADBALANCERS)):
                    status_dict[constants.LOADBALANCERS] = [{
                        'id':
                        loadbalancer_id,
                        constants.PROVISIONING_STATUS:
                        parent_prov_status,
                        constants.OPERATING_STATUS:
                        op_status
                    }]
                if (listener_id and not status_dict.get(constants.LISTENERS)):
                    status_dict[constants.LISTENERS] = [{
                        'id':
                        listener_id,
                        constants.PROVISIONING_STATUS:
                        parent_prov_status,
                        constants.OPERATING_STATUS:
                        op_status
                    }]
                if (pool_id and not status_dict.get(constants.POOLS)):
                    status_dict[constants.POOLS] = [{
                        'id':
                        pool_id,
                        constants.PROVISIONING_STATUS:
                        parent_prov_status,
                        constants.OPERATING_STATUS:
                        op_status
                    }]
                if (policy_id and not status_dict.get(constants.L7POLICIES)):
                    status_dict[constants.L7POLICIES] = [{
                        'id':
                        policy_id,
                        constants.PROVISIONING_STATUS:
                        parent_prov_status,
                        constants.OPERATING_STATUS:
                        op_status
                    }]
            elif delete and cascade:
                # add deleted status to all other objects
                status_dict[constants.LISTENERS] = []
                status_dict[constants.POOLS] = []
                status_dict[constants.MEMBERS] = []
                status_dict[constants.L7POLICIES] = []
                status_dict[constants.L7RULES] = []
                status_dict[constants.HEALTHMONITORS] = []
                for pool in obj.get('pools', []):
                    for member in pool.get('members', []):
                        status_dict[constants.MEMBERS].append({
                            'id':
                            member['id'],
                            constants.PROVISIONING_STATUS:
                            constants.DELETED,
                            constants.OPERATING_STATUS:
                            op_status
                        })
                    if pool.get('healthmonitor'):
                        status_dict[constants.HEALTHMONITORS].append({
                            'id':
                            pool['healthmonitor']['id'],
                            constants.PROVISIONING_STATUS:
                            constants.DELETED,
                            constants.OPERATING_STATUS:
                            op_status
                        })
                    status_dict[constants.POOLS].append({
                        'id':
                        pool['id'],
                        constants.PROVISIONING_STATUS:
                        constants.DELETED,
                        constants.OPERATING_STATUS:
                        op_status
                    })
                for listener in obj.get('listeners', []):
                    status_dict[constants.LISTENERS].append({
                        'id':
                        listener['id'],
                        constants.PROVISIONING_STATUS:
                        constants.DELETED,
                        constants.OPERATING_STATUS:
                        op_status
                    })
                    for policy in listener.get('l7policies', []):
                        status_dict[constants.L7POLICIES].append({
                            'id':
                            policy['id'],
                            constants.PROVISIONING_STATUS:
                            constants.DELETED,
                            constants.OPERATING_STATUS:
                            op_status
                        })
                        for rule in policy.get('rules', []):
                            status_dict[constants.L7RULES].append({
                                'id':
                                rule['id'],
                                constants.PROVISIONING_STATUS:
                                constants.DELETED,
                                constants.OPERATING_STATUS:
                                op_status
                            })

            LOG.debug("Octavia transaction completed with statuses %s",
                      status_dict)
            kw = {'status': status_dict}
            self.client.cast({}, 'update_loadbalancer_status', **kw)

        return completor_func

    def update_listener_statistics(self, statistics):
        kw = {'statistics': statistics}
        self.client.cast({}, 'update_listener_statistics', **kw)

    @log_helpers.log_method_call
    def loadbalancer_create(self, ctxt, loadbalancer):
        ctx = neutron_context.Context(None, loadbalancer['project_id'])
        completor = self.get_completor_func(constants.LOADBALANCERS,
                                            loadbalancer)
        try:
            self.loadbalancer.create(ctx, loadbalancer, completor)
        except Exception as e:
            LOG.error('NSX driver loadbalancer_create failed %s', e)
            completor(success=False)

    @log_helpers.log_method_call
    def loadbalancer_delete_cascade(self, ctxt, loadbalancer):
        ctx = neutron_context.Context(None, loadbalancer['project_id'])

        def dummy_completor(success=True):
            pass

        # Go over the LB tree and delete one by one using the cascade
        # api implemented for each resource
        for listener in loadbalancer.get('listeners', []):
            for policy in listener.get('l7policies', []):
                for rule in policy.get('rules', []):
                    self.l7rule.delete_cascade(ctx, rule, dummy_completor)
                self.l7policy.delete_cascade(ctx, policy, dummy_completor)
            self.listener.delete_cascade(ctx, listener, dummy_completor)
        for pool in loadbalancer.get('pools', []):
            for member in pool.get('members', []):
                self.member.delete_cascade(ctx, member, dummy_completor)
            if pool.get('healthmonitor'):
                self.healthmonitor.delete_cascade(ctx, pool['healthmonitor'],
                                                  dummy_completor)
            self.pool.delete_cascade(ctx, pool, dummy_completor)

        # Delete the loadbalancer itself with the completor that marks all
        # as deleted
        completor = self.get_completor_func(constants.LOADBALANCERS,
                                            loadbalancer,
                                            delete=True)
        try:
            self.loadbalancer.delete_cascade(
                ctx, loadbalancer,
                self.get_completor_func(constants.LOADBALANCERS,
                                        loadbalancer,
                                        delete=True,
                                        cascade=True))
        except Exception as e:
            LOG.error('NSX driver loadbalancer_delete_cascade failed %s', e)
            completor(success=False)

    @log_helpers.log_method_call
    def loadbalancer_delete(self, ctxt, loadbalancer, cascade=False):
        if cascade:
            return self.loadbalancer_delete_cascade(ctxt, loadbalancer)

        ctx = neutron_context.Context(None, loadbalancer['project_id'])
        completor = self.get_completor_func(constants.LOADBALANCERS,
                                            loadbalancer,
                                            delete=True)
        try:
            self.loadbalancer.delete(ctx, loadbalancer, completor)
        except Exception as e:
            LOG.error('NSX driver loadbalancer_delete failed %s', e)
            completor(success=False)

    @log_helpers.log_method_call
    def loadbalancer_update(self, ctxt, old_loadbalancer, new_loadbalancer):
        ctx = neutron_context.Context(None, old_loadbalancer['project_id'])
        completor = self.get_completor_func(constants.LOADBALANCERS,
                                            new_loadbalancer)
        try:
            self.loadbalancer.update(ctx, old_loadbalancer, new_loadbalancer,
                                     completor)
        except Exception as e:
            LOG.error('NSX driver loadbalancer_update failed %s', e)
            completor(success=False)

    # Listener
    @log_helpers.log_method_call
    def listener_create(self, ctxt, listener, cert):
        ctx = neutron_context.Context(None, listener['project_id'])
        completor = self.get_completor_func(constants.LISTENERS, listener)
        try:
            self.listener.create(ctx, listener, completor, certificate=cert)
        except Exception as e:
            LOG.error('NSX driver listener_create failed %s', e)
            completor(success=False)

    @log_helpers.log_method_call
    def listener_delete(self, ctxt, listener):
        ctx = neutron_context.Context(None, listener['project_id'])
        completor = self.get_completor_func(constants.LISTENERS,
                                            listener,
                                            delete=True)
        try:
            self.listener.delete(ctx, listener, completor)
        except Exception as e:
            LOG.error('NSX driver listener_delete failed %s', e)
            completor(success=False)

    @log_helpers.log_method_call
    def listener_update(self, ctxt, old_listener, new_listener, cert):
        ctx = neutron_context.Context(None, old_listener['project_id'])
        completor = self.get_completor_func(constants.LISTENERS, new_listener)
        try:
            self.listener.update(ctx,
                                 old_listener,
                                 new_listener,
                                 completor,
                                 certificate=cert)
        except Exception as e:
            LOG.error('NSX driver listener_update failed %s', e)
            completor(success=False)

    # Pool
    @log_helpers.log_method_call
    def pool_create(self, ctxt, pool):
        ctx = neutron_context.Context(None, pool['project_id'])
        completor = self.get_completor_func(constants.POOLS, pool)
        try:
            self.pool.create(ctx, pool, completor)
        except Exception as e:
            LOG.error('NSX driver pool_create failed %s', e)
            completor(success=False)

    @log_helpers.log_method_call
    def pool_delete(self, ctxt, pool):
        ctx = neutron_context.Context(None, pool['project_id'])
        completor = self.get_completor_func(constants.POOLS, pool, delete=True)
        try:
            self.pool.delete(ctx, pool, completor)
        except Exception as e:
            LOG.error('NSX driver pool_delete failed %s', e)
            completor(success=False)

    @log_helpers.log_method_call
    def pool_update(self, ctxt, old_pool, new_pool):
        ctx = neutron_context.Context(None, old_pool['project_id'])
        completor = self.get_completor_func(constants.POOLS, new_pool)
        try:
            self.pool.update(ctx, old_pool, new_pool, completor)
        except Exception as e:
            LOG.error('NSX driver pool_update failed %s', e)
            completor(success=False)

    # Member
    @log_helpers.log_method_call
    def member_create(self, ctxt, member):
        ctx = neutron_context.Context(None, member['project_id'])
        completor = self.get_completor_func(constants.MEMBERS, member)
        try:
            self.member.create(ctx, member, completor)
        except Exception as e:
            LOG.error('NSX driver member_create failed %s', e)
            completor(success=False)

    @log_helpers.log_method_call
    def member_delete(self, ctxt, member):
        ctx = neutron_context.Context(None, member['project_id'])
        completor = self.get_completor_func(constants.MEMBERS,
                                            member,
                                            delete=True)
        try:
            self.member.delete(ctx, member, completor)
        except Exception as e:
            LOG.error('NSX driver member_delete failed %s', e)
            completor(success=False)

    @log_helpers.log_method_call
    def member_update(self, ctxt, old_member, new_member):
        ctx = neutron_context.Context(None, old_member['project_id'])
        completor = self.get_completor_func(constants.MEMBERS, new_member)
        try:
            self.member.update(ctx, old_member, new_member, completor)
        except Exception as e:
            LOG.error('NSX driver member_update failed %s', e)
            completor(success=False)

    # Health Monitor
    @log_helpers.log_method_call
    def healthmonitor_create(self, ctxt, healthmonitor):
        ctx = neutron_context.Context(None, healthmonitor['project_id'])
        completor = self.get_completor_func(constants.HEALTHMONITORS,
                                            healthmonitor)
        try:
            self.healthmonitor.create(ctx, healthmonitor, completor)
        except Exception as e:
            LOG.error('NSX driver healthmonitor_create failed %s', e)
            completor(success=False)

    @log_helpers.log_method_call
    def healthmonitor_delete(self, ctxt, healthmonitor):
        ctx = neutron_context.Context(None, healthmonitor['project_id'])
        completor = self.get_completor_func(constants.HEALTHMONITORS,
                                            healthmonitor,
                                            delete=True)
        try:
            self.healthmonitor.delete(ctx, healthmonitor, completor)
        except Exception as e:
            LOG.error('NSX driver healthmonitor_delete failed %s', e)
            completor(success=False)

    @log_helpers.log_method_call
    def healthmonitor_update(self, ctxt, old_healthmonitor, new_healthmonitor):
        ctx = neutron_context.Context(None, old_healthmonitor['project_id'])
        completor = self.get_completor_func(constants.HEALTHMONITORS,
                                            new_healthmonitor)
        try:
            self.healthmonitor.update(ctx, old_healthmonitor,
                                      new_healthmonitor, completor)
        except Exception as e:
            LOG.error('NSX driver healthmonitor_update failed %s', e)
            completor(success=False)

    # L7 Policy
    @log_helpers.log_method_call
    def l7policy_create(self, ctxt, l7policy):
        ctx = neutron_context.Context(None, l7policy['project_id'])
        completor = self.get_completor_func(constants.L7POLICIES, l7policy)
        try:
            self.l7policy.create(ctx, l7policy, completor)
        except Exception as e:
            LOG.error('NSX driver l7policy_create failed %s', e)
            completor(success=False)

    @log_helpers.log_method_call
    def l7policy_delete(self, ctxt, l7policy):
        ctx = neutron_context.Context(None, l7policy['project_id'])
        completor = self.get_completor_func(constants.L7POLICIES,
                                            l7policy,
                                            delete=True)
        try:
            self.l7policy.delete(ctx, l7policy, completor)
        except Exception as e:
            LOG.error('NSX driver l7policy_delete failed %s', e)
            completor(success=False)

    @log_helpers.log_method_call
    def l7policy_update(self, ctxt, old_l7policy, new_l7policy):
        ctx = neutron_context.Context(None, old_l7policy['project_id'])
        completor = self.get_completor_func(constants.L7POLICIES, new_l7policy)
        try:
            self.l7policy.update(ctx, old_l7policy, new_l7policy, completor)
        except Exception as e:
            LOG.error('NSX driver l7policy_update failed %s', e)
            completor(success=False)

    # L7 Rule
    @log_helpers.log_method_call
    def l7rule_create(self, ctxt, l7rule):
        ctx = neutron_context.Context(None, l7rule['project_id'])
        completor = self.get_completor_func(constants.L7RULES, l7rule)
        try:
            self.l7rule.create(ctx, l7rule, completor)
        except Exception as e:
            LOG.error('NSX driver l7rule_create failed %s', e)
            completor(success=False)

    @log_helpers.log_method_call
    def l7rule_delete(self, ctxt, l7rule):
        ctx = neutron_context.Context(None, l7rule['project_id'])
        completor = self.get_completor_func(constants.L7RULES,
                                            l7rule,
                                            delete=True)
        try:
            self.l7rule.delete(ctx, l7rule, completor)
        except Exception as e:
            LOG.error('NSX driver l7rule_delete failed %s', e)
            completor(success=False)

    @log_helpers.log_method_call
    def l7rule_update(self, ctxt, old_l7rule, new_l7rule):
        ctx = neutron_context.Context(None, old_l7rule['project_id'])
        completor = self.get_completor_func(constants.L7RULES, new_l7rule)
        try:
            self.l7rule.update(ctx, old_l7rule, new_l7rule, completor)
        except Exception as e:
            LOG.error('NSX driver l7rule_update failed %s', e)
            completor(success=False)
コード例 #2
0
ファイル: service.py プロジェクト: wangyc666666/ussuri_nova
    def start(self):
        """Start the service.

        This includes starting an RPC service, initializing
        periodic tasks, etc.
        """
        # NOTE(melwitt): Clear the cell cache holding database transaction
        # context manager objects. We do this to ensure we create new internal
        # oslo.db locks to avoid a situation where a child process receives an
        # already locked oslo.db lock when it is forked. When a child process
        # inherits a locked oslo.db lock, database accesses through that
        # transaction context manager will never be able to acquire the lock
        # and requests will fail with CellTimeout errors.
        # See https://bugs.python.org/issue6721 for more information.
        # With python 3.7, it would be possible for oslo.db to make use of the
        # os.register_at_fork() method to reinitialize its lock. Until we
        # require python 3.7 as a mininum version, we must handle the situation
        # outside of oslo.db.
        context.CELL_CACHE = {}

        assert_eventlet_uses_monotonic_clock()

        verstr = version.version_string_with_package()
        LOG.info(_LI('Starting %(topic)s node (version %(version)s)'), {
            'topic': self.topic,
            'version': verstr
        })
        self.basic_config_check()
        self.manager.init_host()
        self.model_disconnected = False
        ctxt = context.get_admin_context()
        self.service_ref = objects.Service.get_by_host_and_binary(
            ctxt, self.host, self.binary)
        if self.service_ref:
            _update_service_ref(self.service_ref)

        else:
            try:
                self.service_ref = _create_service_ref(self, ctxt)
            except (exception.ServiceTopicExists,
                    exception.ServiceBinaryExists):
                # NOTE(danms): If we race to create a record with a sibling
                # worker, don't fail here.
                self.service_ref = objects.Service.get_by_host_and_binary(
                    ctxt, self.host, self.binary)

        self.manager.pre_start_hook()

        if self.backdoor_port is not None:
            self.manager.backdoor_port = self.backdoor_port

        LOG.debug("Creating RPC server for service %s", self.topic)

        target = messaging.Target(topic=self.topic, server=self.host)

        endpoints = [
            self.manager,
            baserpc.BaseRPCAPI(self.manager.service_name, self.backdoor_port)
        ]
        endpoints.extend(self.manager.additional_endpoints)

        serializer = objects_base.NovaObjectSerializer()

        self.rpcserver = rpc.get_server(target, endpoints, serializer)
        self.rpcserver.start()

        self.manager.post_start_hook()

        LOG.debug("Join ServiceGroup membership for this service %s",
                  self.topic)
        # Add service to the ServiceGroup membership group.
        self.servicegroup_api.join(self.host, self.topic, self)

        if self.periodic_enable:
            if self.periodic_fuzzy_delay:
                initial_delay = random.randint(0, self.periodic_fuzzy_delay)
            else:
                initial_delay = None

            self.tg.add_dynamic_timer(
                self.periodic_tasks,
                initial_delay=initial_delay,
                periodic_interval_max=self.periodic_interval_max)
コード例 #3
0
 def _setup_client(self, transport, topic='testtopic'):
     return oslo_messaging.RPCClient(transport,
                                     oslo_messaging.Target(topic=topic),
                                     serializer=self.serializer)
コード例 #4
0
ファイル: manager.py プロジェクト: nvazquez13alexa/cinder
class SchedulerManager(manager.Manager):
    """Chooses a host to create volumes."""

    RPC_API_VERSION = '2.1'

    target = messaging.Target(version=RPC_API_VERSION)

    def __init__(self,
                 scheduler_driver=None,
                 service_name=None,
                 *args,
                 **kwargs):
        if not scheduler_driver:
            scheduler_driver = CONF.scheduler_driver
        self.driver = importutils.import_object(scheduler_driver)
        super(SchedulerManager, self).__init__(*args, **kwargs)
        self._startup_delay = True

    def init_host_with_rpc(self):
        ctxt = context.get_admin_context()
        self.request_service_capabilities(ctxt)

        eventlet.sleep(CONF.periodic_interval)
        self._startup_delay = False

    def reset(self):
        super(SchedulerManager, self).reset()
        self.driver.reset()

    def update_service_capabilities(self,
                                    context,
                                    service_name=None,
                                    host=None,
                                    capabilities=None,
                                    **kwargs):
        """Process a capability update from a service node."""
        if capabilities is None:
            capabilities = {}
        self.driver.update_service_capabilities(service_name, host,
                                                capabilities)

    def _wait_for_scheduler(self):
        # NOTE(dulek): We're waiting for scheduler to announce that it's ready
        # or CONF.periodic_interval seconds from service startup has passed.
        while self._startup_delay and not self.driver.is_ready():
            eventlet.sleep(1)

    def create_consistencygroup(self,
                                context,
                                topic,
                                group,
                                request_spec_list=None,
                                filter_properties_list=None):

        self._wait_for_scheduler()
        try:
            self.driver.schedule_create_consistencygroup(
                context, group, request_spec_list, filter_properties_list)
        except exception.NoValidHost:
            LOG.error(
                _LE("Could not find a host for consistency group "
                    "%(group_id)s."), {'group_id': group.id})
            group.status = 'error'
            group.save()
        except Exception:
            with excutils.save_and_reraise_exception():
                LOG.exception(
                    _LE("Failed to create consistency group "
                        "%(group_id)s."), {'group_id': group.id})
                group.status = 'error'
                group.save()

    def create_volume(self,
                      context,
                      topic,
                      volume_id,
                      snapshot_id=None,
                      image_id=None,
                      request_spec=None,
                      filter_properties=None,
                      volume=None):

        self._wait_for_scheduler()

        # FIXME(dulek): Remove this in v3.0 of RPC API.
        if volume is None:
            # For older clients, mimic the old behavior and look up the
            # volume by its volume_id.
            volume = objects.Volume.get_by_id(context, volume_id)

        try:
            flow_engine = create_volume.get_flow(context, db, self.driver,
                                                 request_spec,
                                                 filter_properties, volume,
                                                 snapshot_id, image_id)
        except Exception:
            msg = _("Failed to create scheduler manager volume flow")
            LOG.exception(msg)
            raise exception.CinderException(msg)

        with flow_utils.DynamicLogListener(flow_engine, logger=LOG):
            flow_engine.run()

    def request_service_capabilities(self, context):
        volume_rpcapi.VolumeAPI().publish_service_capabilities(context)

    def migrate_volume_to_host(self,
                               context,
                               topic,
                               volume_id,
                               host,
                               force_host_copy,
                               request_spec,
                               filter_properties=None,
                               volume=None):
        """Ensure that the host exists and can accept the volume."""

        self._wait_for_scheduler()

        # FIXME(dulek): Remove this in v3.0 of RPC API.
        if volume is None:
            # For older clients, mimic the old behavior and look up the
            # volume by its volume_id.
            volume = objects.Volume.get_by_id(context, volume_id)

        def _migrate_volume_set_error(self, context, ex, request_spec):
            if volume.status == 'maintenance':
                previous_status = (volume.previous_status or 'maintenance')
                volume_state = {
                    'volume_state': {
                        'migration_status': 'error',
                        'status': previous_status
                    }
                }
            else:
                volume_state = {'volume_state': {'migration_status': 'error'}}
            self._set_volume_state_and_notify('migrate_volume_to_host',
                                              volume_state, context, ex,
                                              request_spec)

        try:
            tgt_host = self.driver.host_passes_filters(context, host,
                                                       request_spec,
                                                       filter_properties)
        except exception.NoValidHost as ex:
            _migrate_volume_set_error(self, context, ex, request_spec)
        except Exception as ex:
            with excutils.save_and_reraise_exception():
                _migrate_volume_set_error(self, context, ex, request_spec)
        else:
            volume_rpcapi.VolumeAPI().migrate_volume(context, volume, tgt_host,
                                                     force_host_copy)

    def retype(self,
               context,
               topic,
               volume_id,
               request_spec,
               filter_properties=None,
               volume=None):
        """Schedule the modification of a volume's type.

        :param context: the request context
        :param topic: the topic listened on
        :param volume_id: the ID of the volume to retype
        :param request_spec: parameters for this retype request
        :param filter_properties: parameters to filter by
        :param volume: the volume object to retype
        """

        self._wait_for_scheduler()

        # FIXME(dulek): Remove this in v3.0 of RPC API.
        if volume is None:
            # For older clients, mimic the old behavior and look up the
            # volume by its volume_id.
            volume = objects.Volume.get_by_id(context, volume_id)

        def _retype_volume_set_error(self, context, ex, request_spec,
                                     volume_ref, msg, reservations):
            if reservations:
                QUOTAS.rollback(context, reservations)
            previous_status = (volume_ref.previous_status or volume_ref.status)
            volume_state = {'volume_state': {'status': previous_status}}
            self._set_volume_state_and_notify('retype', volume_state, context,
                                              ex, request_spec, msg)

        reservations = request_spec.get('quota_reservations')
        old_reservations = request_spec.get('old_reservations', None)
        new_type = request_spec.get('volume_type')
        if new_type is None:
            msg = _('New volume type not specified in request_spec.')
            ex = exception.ParameterNotFound(param='volume_type')
            _retype_volume_set_error(self, context, ex, request_spec, volume,
                                     msg, reservations)

        # Default migration policy is 'never'
        migration_policy = request_spec.get('migration_policy')
        if not migration_policy:
            migration_policy = 'never'

        try:
            tgt_host = self.driver.find_retype_host(context, request_spec,
                                                    filter_properties,
                                                    migration_policy)
        except exception.NoValidHost as ex:
            msg = (_("Could not find a host for volume %(volume_id)s with "
                     "type %(type_id)s.") % {
                         'type_id': new_type['id'],
                         'volume_id': volume.id
                     })
            _retype_volume_set_error(self, context, ex, request_spec, volume,
                                     msg, reservations)
        except Exception as ex:
            with excutils.save_and_reraise_exception():
                _retype_volume_set_error(self, context, ex, request_spec,
                                         volume, None, reservations)
        else:
            volume_rpcapi.VolumeAPI().retype(context, volume, new_type['id'],
                                             tgt_host, migration_policy,
                                             reservations, old_reservations)

    def manage_existing(self,
                        context,
                        topic,
                        volume_id,
                        request_spec,
                        filter_properties=None,
                        volume=None):
        """Ensure that the host exists and can accept the volume."""

        self._wait_for_scheduler()

        # FIXME(mdulko): Remove this in v3.0 of RPC API.
        if volume is None:
            # For older clients, mimic the old behavior and look up the
            # volume by its volume_id.
            volume = objects.Volume.get_by_id(context, volume_id)

        def _manage_existing_set_error(self, context, ex, request_spec):
            volume_state = {'volume_state': {'status': 'error'}}
            self._set_volume_state_and_notify('manage_existing', volume_state,
                                              context, ex, request_spec)

        try:
            self.driver.host_passes_filters(context, volume.host, request_spec,
                                            filter_properties)
        except exception.NoValidHost as ex:
            _manage_existing_set_error(self, context, ex, request_spec)
        except Exception as ex:
            with excutils.save_and_reraise_exception():
                _manage_existing_set_error(self, context, ex, request_spec)
        else:
            volume_rpcapi.VolumeAPI().manage_existing(context, volume,
                                                      request_spec.get('ref'))

    def get_pools(self, context, filters=None):
        """Get active pools from scheduler's cache.

        NOTE(dulek): There's no self._wait_for_scheduler() because get_pools is
        an RPC call (is blocking for the c-api). Also this is admin-only API
        extension so it won't hurt the user much to retry the request manually.
        """
        return self.driver.get_pools(context, filters)

    def _set_volume_state_and_notify(self,
                                     method,
                                     updates,
                                     context,
                                     ex,
                                     request_spec,
                                     msg=None):
        # TODO(harlowja): move into a task that just does this later.
        if not msg:
            msg = (_LE("Failed to schedule_%(method)s: %(ex)s") % {
                'method': method,
                'ex': six.text_type(ex)
            })
        LOG.error(msg)

        volume_state = updates['volume_state']
        properties = request_spec.get('volume_properties', {})

        volume_id = request_spec.get('volume_id', None)

        if volume_id:
            db.volume_update(context, volume_id, volume_state)

        payload = dict(request_spec=request_spec,
                       volume_properties=properties,
                       volume_id=volume_id,
                       state=volume_state,
                       method=method,
                       reason=ex)

        rpc.get_notifier("scheduler").error(context, 'scheduler.' + method,
                                            payload)
コード例 #5
0
class ConsoleProxyManager(manager.Manager):
    """Sets up and tears down any console proxy connections.

    Needed for accessing instance consoles securely.

    """

    target = messaging.Target(version='2.0')

    def __init__(self, console_driver=None, *args, **kwargs):
        if not console_driver:
            console_driver = CONF.console_driver
        self.driver = importutils.import_object(console_driver)
        super(ConsoleProxyManager, self).__init__(service_name='console',
                                                  *args,
                                                  **kwargs)
        self.driver.host = self.host
        self.compute_rpcapi = compute_rpcapi.ComputeAPI()

    def reset(self):
        LOG.info(_LI('Reloading compute RPC API'))
        compute_rpcapi.LAST_VERSION = None
        self.compute_rpcapi = compute_rpcapi.ComputeAPI()

    def init_host(self):
        self.driver.init_host()

    def add_console(self, context, instance_id):
        instance = self.db.instance_get(context, instance_id)
        host = instance['host']
        name = instance['name']
        pool = self._get_pool_for_instance_host(context, host)
        try:
            console = self.db.console_get_by_pool_instance(
                context, pool['id'], instance['uuid'])
        except exception.NotFound:
            LOG.debug('Adding console', instance=instance)
            password = utils.generate_password(8)
            port = self.driver.get_port(context)
            console_data = {
                'instance_name': name,
                'instance_uuid': instance['uuid'],
                'password': password,
                'pool_id': pool['id']
            }
            if port:
                console_data['port'] = port
            console = self.db.console_create(context, console_data)
            self.driver.setup_console(context, console)

        return console['id']

    def remove_console(self, context, console_id):
        try:
            console = self.db.console_get(context, console_id)
        except exception.NotFound:
            LOG.debug(
                'Tried to remove non-existent console '
                '%(console_id)s.', {'console_id': console_id})
            return
        self.db.console_delete(context, console_id)
        self.driver.teardown_console(context, console)

    def _get_pool_for_instance_host(self, context, instance_host):
        context = context.elevated()
        console_type = self.driver.console_type
        try:
            pool = self.db.console_pool_get_by_host_type(
                context, instance_host, self.host, console_type)
        except exception.NotFound:
            # NOTE(mdragon): Right now, the only place this info exists is the
            #                compute worker's flagfile, at least for
            #                xenserver. Thus we ned to ask.
            pool_info = self.compute_rpcapi.get_console_pool_info(
                context, instance_host, console_type)
            pool_info['password'] = self.driver.fix_pool_password(
                pool_info['password'])
            pool_info['host'] = self.host
            pool_info['public_hostname'] = CONF.console_public_hostname
            pool_info['console_type'] = self.driver.console_type
            pool_info['compute_host'] = instance_host
            pool = self.db.console_pool_create(context, pool_info)
        return pool
コード例 #6
0
ファイル: ipsec.py プロジェクト: winniepooh/neutron-vpnaas
 def __init__(self, topic):
     target = oslo_messaging.Target(topic=topic, version='1.0')
     self.client = n_rpc.get_client(target)
コード例 #7
0
class CellsManager(manager.Manager):
    """The nova-cells manager class.  This class defines RPC
    methods that the local cell may call.  This class is NOT used for
    messages coming from other cells.  That communication is
    driver-specific.

    Communication to other cells happens via the nova.cells.messaging module.
    The MessageRunner from that module will handle routing the message to
    the correct cell via the communications driver.  Most methods below
    create 'targeted' (where we want to route a message to a specific cell)
    or 'broadcast' (where we want a message to go to multiple cells)
    messages.

    Scheduling requests get passed to the scheduler class.
    """

    target = oslo_messaging.Target(version='1.35')

    def __init__(self, *args, **kwargs):
        LOG.warning(_LW('The cells feature of Nova is considered experimental '
                        'by the OpenStack project because it receives much '
                        'less testing than the rest of Nova. This may change '
                        'in the future, but current deployers should be aware '
                        'that the use of it in production right now may be '
                        'risky.'))
        # Mostly for tests.
        cell_state_manager = kwargs.pop('cell_state_manager', None)
        super(CellsManager, self).__init__(service_name='cells',
                                           *args, **kwargs)
        if cell_state_manager is None:
            cell_state_manager = cells_state.CellStateManager
        self.state_manager = cell_state_manager()
        self.msg_runner = messaging.MessageRunner(self.state_manager)
        cells_driver_cls = importutils.import_class(
                CONF.cells.driver)
        self.driver = cells_driver_cls()
        self.instances_to_heal = iter([])

    def post_start_hook(self):
        """Have the driver start its servers for inter-cell communication.
        Also ask our child cells for their capacities and capabilities so
        we get them more quickly than just waiting for the next periodic
        update.  Receiving the updates from the children will cause us to
        update our parents.  If we don't have any children, just update
        our parents immediately.
        """
        # FIXME(comstud): There's currently no hooks when services are
        # stopping, so we have no way to stop servers cleanly.
        self.driver.start_servers(self.msg_runner)
        ctxt = context.get_admin_context()
        if self.state_manager.get_child_cells():
            self.msg_runner.ask_children_for_capabilities(ctxt)
            self.msg_runner.ask_children_for_capacities(ctxt)
        else:
            self._update_our_parents(ctxt)

    @periodic_task.periodic_task
    def _update_our_parents(self, ctxt):
        """Update our parent cells with our capabilities and capacity
        if we're at the bottom of the tree.
        """
        self.msg_runner.tell_parents_our_capabilities(ctxt)
        self.msg_runner.tell_parents_our_capacities(ctxt)

    @periodic_task.periodic_task
    def _heal_instances(self, ctxt):
        """Periodic task to send updates for a number of instances to
        parent cells.

        On every run of the periodic task, we will attempt to sync
        'CONF.cells.instance_update_num_instances' number of instances.
        When we get the list of instances, we shuffle them so that multiple
        nova-cells services aren't attempting to sync the same instances
        in lockstep.

        If CONF.cells.instance_update_at_threshold is set, only attempt
        to sync instances that have been updated recently.  The CONF
        setting defines the maximum number of seconds old the updated_at
        can be.  Ie, a threshold of 3600 means to only update instances
        that have modified in the last hour.
        """

        if not self.state_manager.get_parent_cells():
            # No need to sync up if we have no parents.
            return

        info = {'updated_list': False}

        def _next_instance():
            try:
                instance = next(self.instances_to_heal)
            except StopIteration:
                if info['updated_list']:
                    return
                threshold = CONF.cells.instance_updated_at_threshold
                updated_since = None
                if threshold > 0:
                    updated_since = timeutils.utcnow() - datetime.timedelta(
                            seconds=threshold)
                self.instances_to_heal = cells_utils.get_instances_to_sync(
                        ctxt, updated_since=updated_since, shuffle=True,
                        uuids_only=True)
                info['updated_list'] = True
                try:
                    instance = next(self.instances_to_heal)
                except StopIteration:
                    return
            return instance

        rd_context = ctxt.elevated(read_deleted='yes')

        for i in range(CONF.cells.instance_update_num_instances):
            while True:
                # Yield to other greenthreads
                time.sleep(0)
                instance_uuid = _next_instance()
                if not instance_uuid:
                    return
                try:
                    instance = objects.Instance.get_by_uuid(rd_context,
                            instance_uuid)
                except exception.InstanceNotFound:
                    continue
                self._sync_instance(ctxt, instance)
                break

    def _sync_instance(self, ctxt, instance):
        """Broadcast an instance_update or instance_destroy message up to
        parent cells.
        """
        if instance.deleted:
            self.instance_destroy_at_top(ctxt, instance)
        else:
            self.instance_update_at_top(ctxt, instance)

    def build_instances(self, ctxt, build_inst_kwargs):
        """Pick a cell (possibly ourselves) to build new instance(s) and
        forward the request accordingly.
        """
        # Target is ourselves first.
        filter_properties = build_inst_kwargs.get('filter_properties')
        if (filter_properties is not None and
            not isinstance(filter_properties['instance_type'],
                           objects.Flavor)):
            # NOTE(danms): Handle pre-1.30 build_instances() call. Remove me
            # when we bump the RPC API version to 2.0.
            flavor = objects.Flavor(**filter_properties['instance_type'])
            build_inst_kwargs['filter_properties'] = dict(
                filter_properties, instance_type=flavor)
        instances = build_inst_kwargs['instances']
        if not isinstance(instances[0], objects.Instance):
            # NOTE(danms): Handle pre-1.32 build_instances() call. Remove me
            # when we bump the RPC API version to 2.0
            build_inst_kwargs['instances'] = instance_obj._make_instance_list(
                ctxt, objects.InstanceList(), instances, ['system_metadata',
                                                          'metadata'])
        our_cell = self.state_manager.get_my_state()
        self.msg_runner.build_instances(ctxt, our_cell, build_inst_kwargs)

    def get_cell_info_for_neighbors(self, _ctxt):
        """Return cell information for our neighbor cells."""
        return self.state_manager.get_cell_info_for_neighbors()

    def run_compute_api_method(self, ctxt, cell_name, method_info, call):
        """Call a compute API method in a specific cell."""
        response = self.msg_runner.run_compute_api_method(ctxt,
                                                          cell_name,
                                                          method_info,
                                                          call)
        if call:
            return response.value_or_raise()

    def instance_update_at_top(self, ctxt, instance):
        """Update an instance at the top level cell."""
        self.msg_runner.instance_update_at_top(ctxt, instance)

    def instance_destroy_at_top(self, ctxt, instance):
        """Destroy an instance at the top level cell."""
        self.msg_runner.instance_destroy_at_top(ctxt, instance)

    def instance_delete_everywhere(self, ctxt, instance, delete_type):
        """This is used by API cell when it didn't know what cell
        an instance was in, but the instance was requested to be
        deleted or soft_deleted.  So, we'll broadcast this everywhere.
        """
        if isinstance(instance, dict):
            instance = objects.Instance._from_db_object(ctxt,
                    objects.Instance(), instance)
        self.msg_runner.instance_delete_everywhere(ctxt, instance,
                                                   delete_type)

    def instance_fault_create_at_top(self, ctxt, instance_fault):
        """Create an instance fault at the top level cell."""
        self.msg_runner.instance_fault_create_at_top(ctxt, instance_fault)

    def bw_usage_update_at_top(self, ctxt, bw_update_info):
        """Update bandwidth usage at top level cell."""
        self.msg_runner.bw_usage_update_at_top(ctxt, bw_update_info)

    def sync_instances(self, ctxt, project_id, updated_since, deleted):
        """Force a sync of all instances, potentially by project_id,
        and potentially since a certain date/time.
        """
        self.msg_runner.sync_instances(ctxt, project_id, updated_since,
                                       deleted)

    def service_get_all(self, ctxt, filters):
        """Return services in this cell and in all child cells."""
        responses = self.msg_runner.service_get_all(ctxt, filters)
        ret_services = []
        # 1 response per cell.  Each response is a list of services.
        for response in responses:
            services = response.value_or_raise()
            for service in services:
                service = cells_utils.add_cell_to_service(
                    service, response.cell_name)
                ret_services.append(service)
        return ret_services

    @oslo_messaging.expected_exceptions(exception.CellRoutingInconsistency)
    def service_get_by_compute_host(self, ctxt, host_name):
        """Return a service entry for a compute host in a certain cell."""
        cell_name, host_name = cells_utils.split_cell_and_item(host_name)
        response = self.msg_runner.service_get_by_compute_host(ctxt,
                                                               cell_name,
                                                               host_name)
        service = response.value_or_raise()
        service = cells_utils.add_cell_to_service(service, response.cell_name)
        return service

    def get_host_uptime(self, ctxt, host_name):
        """Return host uptime for a compute host in a certain cell

        :param host_name: fully qualified hostname. It should be in format of
         parent!child@host_id
        """
        cell_name, host_name = cells_utils.split_cell_and_item(host_name)
        response = self.msg_runner.get_host_uptime(ctxt, cell_name,
                                                   host_name)
        return response.value_or_raise()

    def service_update(self, ctxt, host_name, binary, params_to_update):
        """Used to enable/disable a service. For compute services, setting to
        disabled stops new builds arriving on that host.

        :param host_name: the name of the host machine that the service is
                          running
        :param binary: The name of the executable that the service runs as
        :param params_to_update: eg. {'disabled': True}
        :returns: the service reference
        """
        cell_name, host_name = cells_utils.split_cell_and_item(host_name)
        response = self.msg_runner.service_update(
            ctxt, cell_name, host_name, binary, params_to_update)
        service = response.value_or_raise()
        service = cells_utils.add_cell_to_service(service, response.cell_name)
        return service

    def service_delete(self, ctxt, cell_service_id):
        """Deletes the specified service."""
        cell_name, service_id = cells_utils.split_cell_and_item(
            cell_service_id)
        self.msg_runner.service_delete(ctxt, cell_name, service_id)

    @oslo_messaging.expected_exceptions(exception.CellRoutingInconsistency)
    def proxy_rpc_to_manager(self, ctxt, topic, rpc_message, call, timeout):
        """Proxy an RPC message as-is to a manager."""
        compute_topic = CONF.compute_topic
        cell_and_host = topic[len(compute_topic) + 1:]
        cell_name, host_name = cells_utils.split_cell_and_item(cell_and_host)
        response = self.msg_runner.proxy_rpc_to_manager(ctxt, cell_name,
                host_name, topic, rpc_message, call, timeout)
        return response.value_or_raise()

    def task_log_get_all(self, ctxt, task_name, period_beginning,
                         period_ending, host=None, state=None):
        """Get task logs from the DB from all cells or a particular
        cell.

        If 'host' is not None, host will be of the format 'cell!name@host',
        with '@host' being optional.  The query will be directed to the
        appropriate cell and return all task logs, or task logs matching
        the host if specified.

        'state' also may be None.  If it's not, filter by the state as well.
        """
        if host is None:
            cell_name = None
        else:
            cell_name, host = cells_utils.split_cell_and_item(host)
            # If no cell name was given, assume that the host name is the
            # cell_name and that the target is all hosts
            if cell_name is None:
                cell_name, host = host, cell_name
        responses = self.msg_runner.task_log_get_all(ctxt, cell_name,
                task_name, period_beginning, period_ending,
                host=host, state=state)
        # 1 response per cell.  Each response is a list of task log
        # entries.
        ret_task_logs = []
        for response in responses:
            task_logs = response.value_or_raise()
            for task_log in task_logs:
                cells_utils.add_cell_to_task_log(task_log,
                                                 response.cell_name)
                ret_task_logs.append(task_log)
        return ret_task_logs

    @oslo_messaging.expected_exceptions(exception.CellRoutingInconsistency)
    def compute_node_get(self, ctxt, compute_id):
        """Get a compute node by ID in a specific cell."""
        cell_name, compute_id = cells_utils.split_cell_and_item(
                compute_id)
        response = self.msg_runner.compute_node_get(ctxt, cell_name,
                                                    compute_id)
        node = response.value_or_raise()
        node = cells_utils.add_cell_to_compute_node(node, cell_name)
        return node

    def compute_node_get_all(self, ctxt, hypervisor_match=None):
        """Return list of compute nodes in all cells."""
        responses = self.msg_runner.compute_node_get_all(ctxt,
                hypervisor_match=hypervisor_match)
        # 1 response per cell.  Each response is a list of compute_node
        # entries.
        ret_nodes = []
        for response in responses:
            nodes = response.value_or_raise()
            for node in nodes:
                node = cells_utils.add_cell_to_compute_node(node,
                                                            response.cell_name)
                ret_nodes.append(node)
        return ret_nodes

    def compute_node_stats(self, ctxt):
        """Return compute node stats totals from all cells."""
        responses = self.msg_runner.compute_node_stats(ctxt)
        totals = {}
        for response in responses:
            data = response.value_or_raise()
            for key, val in six.iteritems(data):
                totals.setdefault(key, 0)
                totals[key] += val
        return totals

    def actions_get(self, ctxt, cell_name, instance_uuid):
        response = self.msg_runner.actions_get(ctxt, cell_name, instance_uuid)
        return response.value_or_raise()

    def action_get_by_request_id(self, ctxt, cell_name, instance_uuid,
                                 request_id):
        response = self.msg_runner.action_get_by_request_id(ctxt, cell_name,
                                                            instance_uuid,
                                                            request_id)
        return response.value_or_raise()

    def action_events_get(self, ctxt, cell_name, action_id):
        response = self.msg_runner.action_events_get(ctxt, cell_name,
                                                     action_id)
        return response.value_or_raise()

    def consoleauth_delete_tokens(self, ctxt, instance_uuid):
        """Delete consoleauth tokens for an instance in API cells."""
        self.msg_runner.consoleauth_delete_tokens(ctxt, instance_uuid)

    def validate_console_port(self, ctxt, instance_uuid, console_port,
                              console_type):
        """Validate console port with child cell compute node."""
        instance = objects.Instance.get_by_uuid(ctxt, instance_uuid)
        if not instance.cell_name:
            raise exception.InstanceUnknownCell(instance_uuid=instance_uuid)
        response = self.msg_runner.validate_console_port(ctxt,
                instance.cell_name, instance_uuid, console_port,
                console_type)
        return response.value_or_raise()

    def get_capacities(self, ctxt, cell_name):
        return self.state_manager.get_capacities(cell_name)

    def bdm_update_or_create_at_top(self, ctxt, bdm, create=None):
        """BDM was created/updated in this cell.  Tell the API cells."""
        # TODO(ndipanov): Move inter-cell RPC to use objects
        bdm = base_obj.obj_to_primitive(bdm)
        self.msg_runner.bdm_update_or_create_at_top(ctxt, bdm, create=create)

    def bdm_destroy_at_top(self, ctxt, instance_uuid, device_name=None,
                           volume_id=None):
        """BDM was destroyed for instance in this cell.  Tell the API cells."""
        self.msg_runner.bdm_destroy_at_top(ctxt, instance_uuid,
                                           device_name=device_name,
                                           volume_id=volume_id)

    def get_migrations(self, ctxt, filters):
        """Fetch migrations applying the filters."""
        target_cell = None
        if "cell_name" in filters:
            _path_cell_sep = cells_utils.PATH_CELL_SEP
            target_cell = '%s%s%s' % (CONF.cells.name, _path_cell_sep,
                                      filters['cell_name'])

        responses = self.msg_runner.get_migrations(ctxt, target_cell,
                                                       False, filters)
        migrations = []
        for response in responses:
            migrations += response.value_or_raise()
        return migrations

    def instance_update_from_api(self, ctxt, instance, expected_vm_state,
                        expected_task_state, admin_state_reset):
        """Update an instance in its cell."""
        self.msg_runner.instance_update_from_api(ctxt, instance,
                                                 expected_vm_state,
                                                 expected_task_state,
                                                 admin_state_reset)

    def start_instance(self, ctxt, instance):
        """Start an instance in its cell."""
        self.msg_runner.start_instance(ctxt, instance)

    def stop_instance(self, ctxt, instance, do_cast=True,
                      clean_shutdown=True):
        """Stop an instance in its cell."""
        response = self.msg_runner.stop_instance(ctxt, instance,
                                                 do_cast=do_cast,
                                                 clean_shutdown=clean_shutdown)
        if not do_cast:
            return response.value_or_raise()

    def cell_create(self, ctxt, values):
        return self.state_manager.cell_create(ctxt, values)

    def cell_update(self, ctxt, cell_name, values):
        return self.state_manager.cell_update(ctxt, cell_name, values)

    def cell_delete(self, ctxt, cell_name):
        return self.state_manager.cell_delete(ctxt, cell_name)

    def cell_get(self, ctxt, cell_name):
        return self.state_manager.cell_get(ctxt, cell_name)

    def reboot_instance(self, ctxt, instance, reboot_type):
        """Reboot an instance in its cell."""
        self.msg_runner.reboot_instance(ctxt, instance, reboot_type)

    def pause_instance(self, ctxt, instance):
        """Pause an instance in its cell."""
        self.msg_runner.pause_instance(ctxt, instance)

    def unpause_instance(self, ctxt, instance):
        """Unpause an instance in its cell."""
        self.msg_runner.unpause_instance(ctxt, instance)

    def suspend_instance(self, ctxt, instance):
        """Suspend an instance in its cell."""
        self.msg_runner.suspend_instance(ctxt, instance)

    def resume_instance(self, ctxt, instance):
        """Resume an instance in its cell."""
        self.msg_runner.resume_instance(ctxt, instance)

    def terminate_instance(self, ctxt, instance):
        """Delete an instance in its cell."""
        self.msg_runner.terminate_instance(ctxt, instance)

    def soft_delete_instance(self, ctxt, instance):
        """Soft-delete an instance in its cell."""
        self.msg_runner.soft_delete_instance(ctxt, instance)

    def resize_instance(self, ctxt, instance, flavor,
                        extra_instance_updates,
                        clean_shutdown=True):
        """Resize an instance in its cell."""
        self.msg_runner.resize_instance(ctxt, instance,
                                        flavor, extra_instance_updates,
                                        clean_shutdown=clean_shutdown)

    def live_migrate_instance(self, ctxt, instance, block_migration,
                              disk_over_commit, host_name):
        """Live migrate an instance in its cell."""
        self.msg_runner.live_migrate_instance(ctxt, instance,
                                              block_migration,
                                              disk_over_commit,
                                              host_name)

    def revert_resize(self, ctxt, instance):
        """Revert a resize for an instance in its cell."""
        self.msg_runner.revert_resize(ctxt, instance)

    def confirm_resize(self, ctxt, instance):
        """Confirm a resize for an instance in its cell."""
        self.msg_runner.confirm_resize(ctxt, instance)

    def reset_network(self, ctxt, instance):
        """Reset networking for an instance in its cell."""
        self.msg_runner.reset_network(ctxt, instance)

    def inject_network_info(self, ctxt, instance):
        """Inject networking for an instance in its cell."""
        self.msg_runner.inject_network_info(ctxt, instance)

    def snapshot_instance(self, ctxt, instance, image_id):
        """Snapshot an instance in its cell."""
        self.msg_runner.snapshot_instance(ctxt, instance, image_id)

    def backup_instance(self, ctxt, instance, image_id, backup_type, rotation):
        """Backup an instance in its cell."""
        self.msg_runner.backup_instance(ctxt, instance, image_id,
                                        backup_type, rotation)

    def rebuild_instance(self, ctxt, instance, image_href, admin_password,
                         files_to_inject, preserve_ephemeral, kwargs):
        self.msg_runner.rebuild_instance(ctxt, instance, image_href,
                                         admin_password, files_to_inject,
                                         preserve_ephemeral, kwargs)

    def set_admin_password(self, ctxt, instance, new_pass):
        self.msg_runner.set_admin_password(ctxt, instance, new_pass)
コード例 #8
0
class OptimizerCallbacks(object):
    target = oslo_messaging.Target(version='1.0')

    def __init__(self, plugin):
        super(OptimizerCallbacks, self).__init__()
        self.plugin = plugin

    def set_optimizer_status(self, context, optimizer_id, status, **kwargs):
        """Agent uses this to set a optimizer's status."""
        LOG.debug("Setting optimizer %s to status: %s" %
                  (optimizer_id, status))
        # Sanitize status first
        if status in (const.ACTIVE, const.DOWN, const.INACTIVE):
            to_update = status
        else:
            to_update = const.ERROR
        # ignore changing status if optimizer expects to be deleted
        # That case means that while some pending operation has been
        # performed on the backend, neutron server received delete request
        # and changed optimizer status to PENDING_DELETE
        updated = self.plugin.update_optimizer_status(
            context, optimizer_id, to_update, not_in=(const.PENDING_DELETE, ))
        if updated:
            LOG.debug("optimizer %s status set: %s" %
                      (optimizer_id, to_update))
        return updated and to_update != const.ERROR

    def optimizer_deleted(self, context, optimizer_id, **kwargs):
        """Agent uses this to indicate optimizer is deleted."""
        LOG.debug("optimizer_deleted() called")
        with context.session.begin(subtransactions=True):
            opt_db = self.plugin._get_optimizer(context, optimizer_id)
            # allow to delete optimizers in ERROR state
            if opt_db.status in (const.PENDING_DELETE, const.ERROR):
                self.plugin.delete_db_optimizer_object(context, optimizer_id)
                return True
            else:
                LOG.warn(
                    _LW('Optimizer %(opt)s unexpectedly deleted by agent, '
                        'status was %(status)s'), {
                            'opt': optimizer_id,
                            'status': opt_db.status
                        })
                opt_db.update({"status": const.ERROR})
                return False

    def get_optimizers_for_tenant(self, context, **kwargs):
        """Agent uses this to get all optimizers and rules for a tenant."""
        LOG.debug("get_optimizers_for_tenant() called")
        opt_list = []
        for opt in self.plugin.get_optimizers(context):
            opt_with_rules = self.plugin._make_optimizer_dict_with_rules(
                context, opt['id'])
            if opt['status'] == const.PENDING_DELETE:
                opt_with_rules['add-router-ids'] = []
                opt_with_rules['del-router-ids'] = (
                    self.plugin.get_optimizer_routers(context, opt['id']))
            else:
                opt_with_rules['add-router-ids'] = (
                    self.plugin.get_optimizer_routers(context, opt['id']))
                opt_with_rules['del-router-ids'] = []
            opt_list.append(opt_with_rules)
        return opt_list

    def get_optimizers_for_tenant_without_rules(self, context, **kwargs):
        """Agent uses this to get all optimizers for a tenant."""
        LOG.debug("get_optimizers_for_tenant_without_rules() called")
        opt_list = [opt for opt in self.plugin.get_optimizers(context)]
        return opt_list

    def get_tenants_with_optimizers(self, context, **kwargs):
        """Agent uses this to get all tenants that have optimizers."""
        LOG.debug("get_tenants_with_optimizers() called")
        ctx = neutron_context.get_admin_context()
        opt_list = self.plugin.get_optimizers(ctx)
        opt_tenant_list = list(set(opt['tenant_id'] for opt in opt_list))
        return opt_tenant_list
コード例 #9
0
class RestProxyAgent(sg_rpc.SecurityGroupAgentRpcCallbackMixin):

    target = oslo_messaging.Target(version='1.1')

    def __init__(self, integ_br, polling_interval, root_helper, vs='ovs'):
        super(RestProxyAgent, self).__init__()
        self.polling_interval = polling_interval
        self._setup_rpc()
        self.sg_agent = sg_rpc.SecurityGroupAgentRpc(self.context,
                                                     self.sg_plugin_rpc,
                                                     root_helper)
        if vs == 'ivs':
            self.int_br = IVSBridge(integ_br, root_helper)
        else:
            self.int_br = ovs_lib.OVSBridge(integ_br, root_helper)

    def _setup_rpc(self):
        self.topic = topics.AGENT
        self.plugin_rpc = agent_rpc.PluginApi(topics.PLUGIN)
        self.sg_plugin_rpc = sg_rpc.SecurityGroupServerRpcApi(topics.PLUGIN)
        self.context = q_context.get_admin_context_without_session()
        self.endpoints = [self]
        consumers = [[topics.PORT, topics.UPDATE],
                     [topics.SECURITY_GROUP, topics.UPDATE]]
        self.connection = agent_rpc.create_consumers(self.endpoints,
                                                     self.topic, consumers)

    def port_update(self, context, **kwargs):
        LOG.debug("Port update received")
        port = kwargs.get('port')
        vif_port = self.int_br.get_vif_port_by_id(port['id'])
        if not vif_port:
            LOG.debug("Port %s is not present on this host.", port['id'])
            return

        LOG.debug("Port %s found. Refreshing firewall.", port['id'])
        if ext_sg.SECURITYGROUPS in port:
            self.sg_agent.refresh_firewall()

    def _update_ports(self, registered_ports):
        ports = self.int_br.get_vif_port_set()
        if ports == registered_ports:
            return
        added = ports - registered_ports
        removed = registered_ports - ports
        return {'current': ports, 'added': added, 'removed': removed}

    def _process_devices_filter(self, port_info):
        if 'added' in port_info:
            self.sg_agent.prepare_devices_filter(port_info['added'])
        if 'removed' in port_info:
            self.sg_agent.remove_devices_filter(port_info['removed'])

    def daemon_loop(self):
        ports = set()

        while True:
            start = time.time()
            try:
                port_info = self._update_ports(ports)
                if port_info:
                    LOG.debug("Agent loop has new device")
                    self._process_devices_filter(port_info)
                    ports = port_info['current']
            except Exception:
                LOG.exception(_LE("Error in agent event loop"))

            elapsed = max(time.time() - start, 0)
            if (elapsed < self.polling_interval):
                time.sleep(self.polling_interval - elapsed)
            else:
                LOG.debug(
                    "Loop iteration exceeded interval "
                    "(%(polling_interval)s vs. %(elapsed)s)!", {
                        'polling_interval': self.polling_interval,
                        'elapsed': elapsed
                    })
コード例 #10
0
 def __init__(self, topic, host):
     self.host = host
     target = messaging.Target(topic=topic, version='1.0')
     self.client = n_rpc.get_client(target)
     super(TaasPluginApiMixin, self).__init__()
     return
コード例 #11
0
ファイル: client.py プロジェクト: thenoizz/coriolis
 def __init__(self, timeout=None):
     target = messaging.Target(topic='coriolis_conductor', version=VERSION)
     if timeout is None:
         timeout = CONF.conductor.conductor_rpc_timeout
     self._client = rpc.get_client(target, timeout=timeout)
コード例 #12
0
ファイル: service.py プロジェクト: makr123/cinder
    def start(self):
        version_string = version.version_string()
        LOG.info(_LI('Starting %(topic)s node (version %(version_string)s)'), {
            'topic': self.topic,
            'version_string': version_string
        })
        self.model_disconnected = False

        if self.coordination:
            coordination.COORDINATOR.start()

        self.manager.init_host(added_to_cluster=self.added_to_cluster,
                               service_id=Service.service_id)

        LOG.debug("Creating RPC server for service %s", self.topic)

        ctxt = context.get_admin_context()
        endpoints = [self.manager]
        endpoints.extend(self.manager.additional_endpoints)
        obj_version_cap = objects.Service.get_minimum_obj_version(ctxt)
        LOG.debug("Pinning object versions for RPC server serializer to %s",
                  obj_version_cap)
        serializer = objects_base.CinderObjectSerializer(obj_version_cap)

        target = messaging.Target(topic=self.topic, server=self.host)
        self.rpcserver = rpc.get_server(target, endpoints, serializer)
        self.rpcserver.start()

        # NOTE(dulek): Kids, don't do that at home. We're relying here on
        # oslo.messaging implementation details to keep backward compatibility
        # with pre-Ocata services. This will not matter once we drop
        # compatibility with them.
        if self.topic == constants.VOLUME_TOPIC:
            target = messaging.Target(topic='%(topic)s.%(host)s' % {
                'topic': self.topic,
                'host': self.host
            },
                                      server=vol_utils.extract_host(
                                          self.host, 'host'))
            self.backend_rpcserver = rpc.get_server(target, endpoints,
                                                    serializer)
            self.backend_rpcserver.start()

        # TODO(geguileo): In O - Remove the is_svc_upgrading_to_n part
        if self.cluster and not self.is_svc_upgrading_to_n(self.binary):
            LOG.info(
                _LI('Starting %(topic)s cluster %(cluster)s (version '
                    '%(version)s)'), {
                        'topic': self.topic,
                        'version': version_string,
                        'cluster': self.cluster
                    })
            target = messaging.Target(topic=self.topic, server=self.cluster)
            serializer = objects_base.CinderObjectSerializer(obj_version_cap)
            self.cluster_rpcserver = rpc.get_server(target, endpoints,
                                                    serializer)
            self.cluster_rpcserver.start()

        self.manager.init_host_with_rpc()

        if self.report_interval:
            pulse = loopingcall.FixedIntervalLoopingCall(self.report_state)
            pulse.start(interval=self.report_interval,
                        initial_delay=self.report_interval)
            self.timers.append(pulse)

        if self.periodic_interval:
            if self.periodic_fuzzy_delay:
                initial_delay = random.randint(0, self.periodic_fuzzy_delay)
            else:
                initial_delay = None

            periodic = loopingcall.FixedIntervalLoopingCall(
                self.periodic_tasks)
            periodic.start(interval=self.periodic_interval,
                           initial_delay=initial_delay)
            self.timers.append(periodic)
コード例 #13
0
class RestProxyAgent(api_sg_rpc.SecurityGroupAgentRpcCallbackMixin):

    target = oslo_messaging.Target(version='1.1')

    def __init__(self, integ_br, polling_interval, vs='ovs'):
        super(RestProxyAgent, self).__init__()
        self.polling_interval = polling_interval
        if vs == 'ivs':
            self.int_br = IVSBridge()
            self.agent_type = "BSN IVS Agent"
        elif vs == "nfvswitch":
            self.int_br = NFVSwitchBridge()
            self.agent_type = "BSN NFVSwitch Agent"
        else:
            self.int_br = ovs_lib.OVSBridge(integ_br)
            self.agent_type = "OVS Agent"
        self.agent_state = {
            'binary': 'neutron-bsn-agent',
            'host': cfg.CONF.host,
            'topic': q_const.L2_AGENT_TOPIC,
            'configurations': {},
            'agent_type': self.agent_type,
            'start_flag': True
        }
        self.use_call = True

        self._setup_rpc()
        self.sg_agent = FilterDeviceIDMixin(self.context, self.sg_plugin_rpc)

    def _report_state(self):
        # How many devices are likely used by a VM
        try:
            self.state_rpc.report_state(self.context, self.agent_state,
                                        self.use_call)
            self.use_call = False
            self.agent_state.pop('start_flag', None)
        except Exception:
            LOG.exception("Failed reporting state!")

    def _setup_rpc(self):
        self.topic = topics.AGENT
        self.plugin_rpc = agent_rpc.PluginApi(topics.PLUGIN)
        self.sg_plugin_rpc = api_sg_rpc.SecurityGroupServerRpcApi(
            topics.PLUGIN)
        self.context = q_context.get_admin_context_without_session()
        self.endpoints = [self]
        consumers = [[topics.PORT, topics.UPDATE],
                     [topics.SECURITY_GROUP, topics.UPDATE]]
        self.connection = agent_rpc.create_consumers(self.endpoints,
                                                     self.topic, consumers)
        self.state_rpc = agent_rpc.PluginReportStateAPI(topics.PLUGIN)
        report_interval = cfg.CONF.AGENT.report_interval
        if report_interval:
            heartbeat = loopingcall.FixedIntervalLoopingCall(
                self._report_state)
            heartbeat.start(interval=report_interval)

    def port_update(self, context, **kwargs):
        LOG.debug("Port update received")
        port = kwargs.get('port')
        vif_port = self.int_br.get_vif_port_by_id(port['id'])
        if not vif_port:
            LOG.debug("Port %s is not present on this host.", port['id'])
            return

        LOG.debug("Port %s found. Refreshing firewall.", port['id'])
        if ext_sg.SECURITYGROUPS in port:
            self.sg_agent.refresh_firewall()

    def _update_ports(self, registered_ports):
        ports = self.int_br.get_vif_port_set()
        if ports == registered_ports:
            return
        added = ports - registered_ports
        removed = registered_ports - ports
        return {'current': ports, 'added': added, 'removed': removed}

    def _update_port_mtus(self, port_info):
        """Update the MTU of all ports that attach the VM port to IVS """
        if 'added' in port_info:
            ports = port_info['added']
            for port in ports:
                self.int_br.set_port_mtu(port)

    def _process_devices_filter(self, port_info):
        if 'added' in port_info:
            self.sg_agent.prepare_devices_filter(port_info['added'])
        if 'removed' in port_info:
            self.sg_agent.remove_devices_filter(port_info['removed'])

    def daemon_loop(self):
        ports = set()

        while True:
            start = time.time()
            try:
                port_info = self._update_ports(ports)
                if port_info:
                    LOG.debug("Agent loop has new device")
                    self._update_port_mtus(port_info)
                    self._process_devices_filter(port_info)
                    ports = port_info['current']
            except Exception:
                LOG.exception("Error in agent event loop")

            elapsed = max(time.time() - start, 0)
            if (elapsed < self.polling_interval):
                time.sleep(self.polling_interval - elapsed)
            else:
                LOG.debug(
                    "Loop iteration exceeded interval "
                    "(%(polling_interval)s vs. %(elapsed)s)!", {
                        'polling_interval': self.polling_interval,
                        'elapsed': elapsed
                    })
コード例 #14
0
class ConsoleAuthManager(manager.Manager):
    """Manages token based authentication."""

    target = messaging.Target(version='2.1')

    def __init__(self, scheduler_driver=None, *args, **kwargs):
        super(ConsoleAuthManager, self).__init__(service_name='consoleauth',
                                                 *args,
                                                 **kwargs)
        self.mc = memorycache.get_client()
        self.compute_rpcapi = compute_rpcapi.ComputeAPI()
        self.cells_rpcapi = cells_rpcapi.CellsAPI()

    def reset(self):
        LOG.info(_LI('Reloading compute RPC API'))
        compute_rpcapi.LAST_VERSION = None
        self.compute_rpcapi = compute_rpcapi.ComputeAPI()

    def _get_tokens_for_instance(self, instance_uuid):
        tokens_str = self.mc.get(instance_uuid.encode('UTF-8'))
        if not tokens_str:
            tokens = []
        else:
            tokens = jsonutils.loads(tokens_str)
        return tokens

    def authorize_console(self,
                          context,
                          token,
                          console_type,
                          host,
                          port,
                          internal_access_path,
                          instance_uuid,
                          access_url=None):

        token_dict = {
            'token': token,
            'instance_uuid': instance_uuid,
            'console_type': console_type,
            'host': host,
            'port': port,
            'internal_access_path': internal_access_path,
            'access_url': access_url,
            'last_activity_at': time.time()
        }
        data = jsonutils.dumps(token_dict)

        # We need to log the warning message if the token is not cached
        # successfully, because the failure will cause the console for
        # instance to not be usable.
        if not self.mc.set(token.encode('UTF-8'), data,
                           CONF.console_token_ttl):
            LOG.warning(_LW("Token: %(token)s failed to save into memcached."),
                        {'token': token})
        tokens = self._get_tokens_for_instance(instance_uuid)

        # Remove the expired tokens from cache.
        tokens = [tok for tok in tokens if self.mc.get(tok.encode('UTF-8'))]
        tokens.append(token)

        if not self.mc.set(instance_uuid.encode('UTF-8'),
                           jsonutils.dumps(tokens)):
            LOG.warning(
                _LW("Instance: %(instance_uuid)s failed to save "
                    "into memcached"), {'instance_uuid': instance_uuid})

        LOG.info(_LI("Received Token: %(token)s, %(token_dict)s"), {
            'token': token,
            'token_dict': token_dict
        })

    def _validate_token(self, context, token):
        instance_uuid = token['instance_uuid']
        if instance_uuid is None:
            return False

        # NOTE(comstud): consoleauth was meant to run in API cells.  So,
        # if cells is enabled, we must call down to the child cell for
        # the instance.
        if CONF.cells.enable:
            return self.cells_rpcapi.validate_console_port(
                context, instance_uuid, token['port'], token['console_type'])

        instance = objects.Instance.get_by_uuid(context, instance_uuid)

        return self.compute_rpcapi.validate_console_port(
            context, instance, token['port'], token['console_type'])

    def check_token(self, context, token):
        token_str = self.mc.get(token.encode('UTF-8'))
        token_valid = (token_str is not None)
        LOG.info(_LI("Checking Token: %(token)s, %(token_valid)s"), {
            'token': token,
            'token_valid': token_valid
        })
        if token_valid:
            token = jsonutils.loads(token_str)
            if self._validate_token(context, token):
                return token

    def delete_tokens_for_instance(self, context, instance_uuid):
        tokens = self._get_tokens_for_instance(instance_uuid)
        for token in tokens:
            self.mc.delete(token.encode('UTF-8'))
        self.mc.delete(instance_uuid.encode('UTF-8'))
コード例 #15
0
def main(argv=None):
    _usage = """Usage: %prog [options] <method> [<arg-name=arg-value>]*"""
    parser = optparse.OptionParser(usage=_usage)
    parser.add_option("--topic",
                      action="store",
                      default="my-topic",
                      help="target topic, default 'my-topic'")
    parser.add_option("--exchange",
                      action="store",
                      default="my-exchange",
                      help="target exchange, default 'my-exchange'")
    parser.add_option("--namespace",
                      action="store",
                      default="my-namespace",
                      help="target namespace, default 'my-namespace'")
    parser.add_option("--server",
                      action="store",
                      help="Send only to the named server")
    parser.add_option("--fanout", action="store_true", help="Fanout target")
    parser.add_option("--timeout",
                      action="store",
                      type="int",
                      default=10,
                      help="timeout RPC request in seconds, default 10")
    parser.add_option("--cast", action="store_true", help="cast the RPC call")
    parser.add_option("--version", action="store", default="1.1")
    parser.add_option("--url",
                      action="store",
                      default="rabbit://localhost",
                      help="transport address, default 'rabbit://localhost'")
    parser.add_option("--oslo-config",
                      type="string",
                      help="the oslo.messaging configuration file.")
    parser.add_option("--payload",
                      type="string",
                      help="Path to a data file to use as message body.")

    opts, extra = parser.parse_args(args=argv)
    if not extra:
        print("Error: <method> not supplied!!")
        return False

    rpc_log_init()
    method = extra[0]
    extra = extra[1:]
    args = {}
    args = dict([(x.split('=')[0], x.split('=')[1] if len(x) > 1 else None)
                 for x in extra])

    LOG.info("Calling %s (%s) on server=%s exchange=%s topic=%s namespace=%s"
             " fanout=%s cast=%s" %
             (method, extra, opts.server, opts.exchange, opts.topic,
              opts.namespace, str(opts.fanout), str(opts.cast)))

    if opts.oslo_config:
        LOG.info("Loading config file %s" % opts.oslo_config)
        cfg.CONF(["--config-file", opts.oslo_config])

    transport = messaging.get_transport(cfg.CONF, url=opts.url)

    target = messaging.Target(exchange=opts.exchange,
                              topic=opts.topic,
                              namespace=opts.namespace,
                              server=opts.server,
                              fanout=opts.fanout,
                              version=opts.version)

    client = messaging.RPCClient(transport,
                                 target,
                                 timeout=opts.timeout,
                                 version_cap=opts.version).prepare()

    try:
        test_context = {"application": "rpc-client", "time": time.ctime()}
        if opts.cast or opts.fanout:
            client.cast(test_context, method, **args)
        else:
            rc = client.call(test_context, method, **args)
            LOG.info("RPC return value=%s" % str(rc))
    except KeyboardInterrupt:
        raise
    except Exception as e:
        LOG.error("Unexpected exception occured: %s" % str(e))
        raise

    LOG.info("RPC complete!  Cleaning up transport...")
    transport.cleanup()
    return True
コード例 #16
0
 def __init__(self, topic):
     target = oslo_messaging.Target(topic=topic, version='1.0',
                                    namespace=n_const.RPC_NAMESPACE_STATE)
     self.client = n_rpc.get_client(target)
コード例 #17
0
ファイル: rpcapi.py プロジェクト: bopopescu/nova-14
 def __init__(self):
     super(CertAPI, self).__init__()
     target = messaging.Target(topic='cert', version='2.0')
     version_cap = self.VERSION_ALIASES.get(CONF.upgrade_levels.cert,
                                            CONF.upgrade_levels.cert)
     self.client = rpc.get_client(target, version_cap=version_cap)
コード例 #18
0
ファイル: rpc.py プロジェクト: stackhpc/neutron
 def __init__(self, topic):
     target = oslo_messaging.Target(topic=topic,
                                    version='1.2',
                                    namespace=constants.RPC_NAMESPACE_STATE)
     self.client = lib_rpc.get_client(target)
     self.timeout = cfg.CONF.AGENT.report_interval
コード例 #19
0
ファイル: ipsec.py プロジェクト: winniepooh/neutron-vpnaas
class IPsecDriver(device_drivers.DeviceDriver, metaclass=abc.ABCMeta):
    """VPN Device Driver for IPSec.

    This class is designed for use with L3-agent now.
    However this driver will be used with another agent in future
    so the use of "Router" is kept minimal now.
    Instead of router_id, we are using process_id in this code.
    """

    # history
    #   1.0 Initial version
    target = oslo_messaging.Target(version='1.0')

    def __init__(self, vpn_service, host):
        # TODO(pc_m) Replace vpn_service with config arg, once all driver
        # implementations no longer need vpn_service.
        self.conf = vpn_service.conf
        self.host = host
        self.conn = n_rpc.Connection()
        self.context = context.get_admin_context_without_session()
        self.topic = topics.IPSEC_AGENT_TOPIC
        node_topic = '%s.%s' % (self.topic, self.host)

        self.processes = {}
        self.routers = {}
        self.process_status_cache = {}

        self.endpoints = [self]
        self.conn.create_consumer(node_topic, self.endpoints, fanout=False)
        self.conn.consume_in_threads()
        self.agent_rpc = IPsecVpnDriverApi(topics.IPSEC_DRIVER_TOPIC)
        self.process_status_cache_check = loopingcall.FixedIntervalLoopingCall(
            self.report_status, self.context)
        self.process_status_cache_check.start(
            interval=self.conf.ipsec.ipsec_status_check_interval)

    def get_namespace(self, router_id):
        """Get namespace of router.

        :router_id: router_id
        :returns: namespace string.
            Note: If the router is a DVR, then the SNAT namespace will be
                  provided. If the router does not exist, return None.
        """
        router = self.routers.get(router_id)
        if not router:
            return
        # For DVR, use SNAT namespace
        # TODO(pcm): Use router object method to tell if DVR, when available
        if router.router['distributed']:
            return router.snat_namespace.name
        return router.ns_name

    def get_router_based_iptables_manager(self, router):
        """Returns router based iptables manager

        In DVR routers the IPsec VPN service should run inside
        the snat namespace. So the iptables manager used for
        snat namespace is different from the iptables manager
        used for the qr namespace in a non dvr based router.

        This function will check the router type and then will
        return the right iptables manager. If DVR enabled router
        it will return the snat_iptables_manager otherwise it will
        return the legacy iptables_manager.
        """
        # TODO(pcm): Use router object method to tell if DVR, when available
        if router.router['distributed']:
            return router.snat_iptables_manager
        return router.iptables_manager

    def add_nat_rule(self, router_id, chain, rule, top=False):
        """Add nat rule in namespace.

        :param router_id: router_id
        :param chain: a string of chain name
        :param rule: a string of rule
        :param top: if top is true, the rule
            will be placed on the top of chain
            Note if there is no router, this method does nothing
        """
        router = self.routers.get(router_id)
        if not router:
            return
        iptables_manager = self.get_router_based_iptables_manager(router)
        iptables_manager.ipv4['nat'].add_rule(chain, rule, top=top)

    def remove_nat_rule(self, router_id, chain, rule, top=False):
        """Remove nat rule in namespace.

        :param router_id: router_id
        :param chain: a string of chain name
        :param rule: a string of rule
        :param top: unused
            needed to have same argument with add_nat_rule
        """
        router = self.routers.get(router_id)
        if not router:
            return
        iptables_manager = self.get_router_based_iptables_manager(router)
        iptables_manager.ipv4['nat'].remove_rule(chain, rule, top=top)

    def iptables_apply(self, router_id):
        """Apply IPtables.

        :param router_id: router_id
        This method do nothing if there is no router
        """
        router = self.routers.get(router_id)
        if not router:
            return
        iptables_manager = self.get_router_based_iptables_manager(router)
        iptables_manager.apply()

    def _update_nat(self, vpnservice, func):
        """Setting up nat rule in iptables.

        We need to setup nat rule for ipsec packet.
        :param vpnservice: vpnservices
        :param func: self.add_nat_rule or self.remove_nat_rule
        """
        router_id = vpnservice['router_id']
        for ipsec_site_connection in vpnservice['ipsec_site_connections']:
            for local_cidr in ipsec_site_connection['local_cidrs']:
                # This ipsec rule is not needed for ipv6.
                if netaddr.IPNetwork(local_cidr).version == 6:
                    continue

                for peer_cidr in ipsec_site_connection['peer_cidrs']:
                    func(router_id,
                         'POSTROUTING',
                         '-s %s -d %s -m policy '
                         '--dir out --pol ipsec '
                         '-j ACCEPT ' % (local_cidr, peer_cidr),
                         top=True)
        self.iptables_apply(router_id)

    @log_helpers.log_method_call
    def vpnservice_updated(self, context, **kwargs):
        """Vpnservice updated rpc handler

        VPN Service Driver will call this method
        when vpnservices updated.
        Then this method start sync with server.
        """
        router = kwargs.get('router', None)
        self.sync(context, [router] if router else [])

    @abc.abstractmethod
    def create_process(self, process_id, vpnservice, namespace):
        pass

    def ensure_process(self, process_id, vpnservice=None):
        """Ensuring process.

        If the process doesn't exist, it will create process
        and store it in self.process
        """
        process = self.processes.get(process_id)
        if not process or not process.namespace:
            namespace = self.get_namespace(process_id)
            process = self.create_process(process_id, vpnservice, namespace)
            self.processes[process_id] = process
        elif vpnservice:
            process.update_vpnservice(vpnservice)
        return process

    def create_router(self, router):
        """Handling create router event.

        Agent calls this method, when the process namespace is ready.
        Note: process_id == router_id == vpnservice_id
        """
        process_id = router.router_id
        self.routers[process_id] = router
        if process_id in self.processes:
            # In case of vpnservice is created
            # before router's namespace
            process = self.processes[process_id]
            self._update_nat(process.vpnservice, self.add_nat_rule)
            # Don't run ipsec process for backup HA router
            if router.router['ha'] and router.ha_state == 'backup':
                return
            process.enable()

    def destroy_process(self, process_id):
        """Destroy process.

        Disable the process, remove the nat rule, and remove the process
        manager for the processes that no longer are running vpn service.
        """
        if process_id in self.processes:
            process = self.processes[process_id]
            process.disable()
            vpnservice = process.vpnservice
            if vpnservice:
                self._update_nat(vpnservice, self.remove_nat_rule)
            del self.processes[process_id]

    def destroy_router(self, process_id):
        """Handling destroy_router event.

        Agent calls this method, when the process namespace
        is deleted.
        """
        self.destroy_process(process_id)
        if process_id in self.routers:
            del self.routers[process_id]

    def get_process_status_cache(self, process):
        if not self.process_status_cache.get(process.id):
            self.process_status_cache[process.id] = {
                'status': None,
                'id': process.vpnservice['id'],
                'updated_pending_status': False,
                'ipsec_site_connections': {}
            }
        return self.process_status_cache[process.id]

    def is_status_updated(self, process, previous_status):
        if process.updated_pending_status:
            return True
        if process.status != previous_status['status']:
            return True
        if (process.connection_status !=
                previous_status['ipsec_site_connections']):
            return True

    def unset_updated_pending_status(self, process):
        process.updated_pending_status = False
        for connection_status in process.connection_status.values():
            connection_status['updated_pending_status'] = False

    def copy_process_status(self, process):
        return {
            'id': process.vpnservice['id'],
            'status': process.status,
            'updated_pending_status': process.updated_pending_status,
            'ipsec_site_connections': copy.deepcopy(process.connection_status)
        }

    def update_downed_connections(self, process_id, new_status):
        """Update info to be reported, if connections just went down.

        If there is no longer any information for a connection, because it
        has been removed (e.g. due to an admin down of VPN service or IPSec
        connection), but there was previous status information for the
        connection, mark the connection as down for reporting purposes.
        """
        if process_id in self.process_status_cache:
            for conn in self.process_status_cache[process_id][IPSEC_CONNS]:
                if conn not in new_status[IPSEC_CONNS]:
                    new_status[IPSEC_CONNS][conn] = {
                        'status': constants.DOWN,
                        'updated_pending_status': True
                    }

    def should_be_reported(self, context, process):
        if (context.is_admin
                or process.vpnservice["tenant_id"] == context.tenant_id):
            return True

    @log_helpers.log_method_call
    def report_status(self, context):
        status_changed_vpn_services = []
        for process_id, process in list(self.processes.items()):
            # NOTE(mnaser): It's not necessary to check status for processes
            #               of a backup L3 agent
            router = self.routers.get(process_id)
            if router and router.router['ha'] and router.ha_state == 'backup':
                LOG.debug("%s router in backup state, skipping", process_id)
                continue
            if not self.should_be_reported(context, process):
                continue
            previous_status = self.get_process_status_cache(process)
            if self.is_status_updated(process, previous_status):
                new_status = self.copy_process_status(process)
                self.update_downed_connections(process.id, new_status)
                status_changed_vpn_services.append(new_status)
                self.process_status_cache[process.id] = (
                    self.copy_process_status(process))
                # We need unset updated_pending status after it
                # is reported to the server side
                self.unset_updated_pending_status(process)

        if status_changed_vpn_services:
            self.agent_rpc.update_status(context, status_changed_vpn_services)

    @log_helpers.log_method_call
    @lockutils.synchronized('vpn-agent', 'neutron-')
    def sync(self, context, routers):
        """Sync status with server side.

        :param context: context object for RPC call
        :param routers: Router objects which is created in this sync event

        There could be many failure cases should be
        considered including the followings.
        1) Agent class restarted
        2) Failure on process creation
        3) VpnService is deleted during agent down
        4) RPC failure

        In order to handle these failure cases,
        This driver takes simple sync strategies.
        """
        vpnservices = self.agent_rpc.get_vpn_services_on_host(
            context, self.host)
        router_ids = [vpnservice['router_id'] for vpnservice in vpnservices]
        sync_router_ids = [router['id'] for router in routers]

        self._sync_vpn_processes(vpnservices, sync_router_ids)
        self._delete_vpn_processes(sync_router_ids, router_ids)
        self._cleanup_stale_vpn_processes(router_ids)

        self.report_status(context)

    def _sync_vpn_processes(self, vpnservices, sync_router_ids):
        # Ensure the ipsec process is enabled only for
        # - the vpn services which are not yet in self.processes
        # - vpn services whose router id is in 'sync_router_ids'
        for vpnservice in vpnservices:
            if vpnservice['router_id'] not in self.processes or (
                    vpnservice['router_id'] in sync_router_ids):
                process = self.ensure_process(vpnservice['router_id'],
                                              vpnservice=vpnservice)
                self._update_nat(vpnservice, self.add_nat_rule)
                router = self.routers.get(vpnservice['router_id'])
                if not router:
                    continue
                # For HA router, spawn vpn process on master router
                # and terminate vpn process on backup router
                if router.router['ha'] and router.ha_state == 'backup':
                    process.disable()
                else:
                    process.update()

    def _delete_vpn_processes(self, sync_router_ids, vpn_router_ids):
        # Delete any IPSec processes that are
        # associated with routers, but are not running the VPN service.
        for process_id in sync_router_ids:
            if process_id not in vpn_router_ids:
                self.destroy_process(process_id)

    def _cleanup_stale_vpn_processes(self, vpn_router_ids):
        # Delete any IPSec processes running
        # VPN that do not have an associated router.
        process_ids = [
            pid for pid in self.processes if pid not in vpn_router_ids
        ]
        for process_id in process_ids:
            self.destroy_process(process_id)
コード例 #20
0
        if pma == 'arbiterPMA':
            self.arbiterPMA.handleEvent({}, 'pike', event)
        elif pma == 'monitorPMA':
            self.monitorPMA.handleEvent({}, 'pike', event)

    def sendEventForResult(self, ctxt, host, pma, event):
        print "sendEventForResult"
        if pma == 'arbiterPMA':
            return self.arbiterPMA.handleEventWithResult({}, 'pike', event)
        elif pma == 'monitorPMA':
            return self.monitorPMA.handleEventWithResult({}, 'pike', event)
        else:
            return False


if __name__ == "__main__":
    print CONF.hades_exchange
    CONF.control_exchange = CONF.hades_exchange
    transport = messaging.get_transport(CONF)
    target = messaging.Target(topic=CONF.hades_eventService_topic,
                              server='pike')
    endpoints = [
        EventServiceManager(),
    ]
    server = messaging.get_rpc_server(transport,
                                      target,
                                      endpoints,
                                      executor='blocking')
    server.start()
    server.wait()
コード例 #21
0
ファイル: dhcp_rpc.py プロジェクト: HoratiusTang/neutron
class DhcpRpcCallback(object):
    """DHCP agent RPC callback in plugin implementations.

    This class implements the server side of an rpc interface.  The client
    side of this interface can be found in
    neutron.agent.dhcp.agent.DhcpPluginApi.  For more information about
    changing rpc interfaces, see doc/source/devref/rpc_api.rst.
    """

    # API version history:
    #     1.0 - Initial version.
    #     1.1 - Added get_active_networks_info, create_dhcp_port,
    #           and update_dhcp_port methods.
    #     1.2 - Removed get_dhcp_port. When removing a method (Making a
    #           backwards incompatible change) you would normally bump the
    #           major version. However, since the method was unused in the
    #           RPC client for many releases, it should be OK to bump the
    #           minor release instead and claim RPC compatibility with the
    #           last few client versions.
    #     1.3 - Removed release_port_fixed_ip. It's not used by reference DHCP
    #           agent since Juno, so similar rationale for not bumping the
    #           major version as above applies here too.
    #     1.4 - Removed update_lease_expiration. It's not used by reference
    #           DHCP agent since Juno, so similar rationale for not bumping the
    #           major version as above applies here too.
    target = oslo_messaging.Target(
        namespace=constants.RPC_NAMESPACE_DHCP_PLUGIN, version='1.4')

    def _get_active_networks(self, context, **kwargs):
        """Retrieve and return a list of the active networks."""
        host = kwargs.get('host')
        plugin = manager.NeutronManager.get_plugin()
        if utils.is_extension_supported(
                plugin, constants.DHCP_AGENT_SCHEDULER_EXT_ALIAS):
            if cfg.CONF.network_auto_schedule:
                plugin.auto_schedule_networks(context, host)
            nets = plugin.list_active_networks_on_active_dhcp_agent(
                context, host)
        else:
            filters = dict(admin_state_up=[True])
            nets = plugin.get_networks(context, filters=filters)
        return nets

    def _port_action(self, plugin, context, port, action):
        """Perform port operations taking care of concurrency issues."""
        try:
            if action == 'create_port':
                return p_utils.create_port(plugin, context, port)
            elif action == 'update_port':
                return plugin.update_port(context, port['id'], port)
            else:
                msg = _('Unrecognized action')
                raise exceptions.Invalid(message=msg)
        except (db_exc.DBError, exceptions.NetworkNotFound,
                exceptions.SubnetNotFound,
                exceptions.IpAddressGenerationFailure) as e:
            with excutils.save_and_reraise_exception(reraise=False) as ctxt:
                if isinstance(e, exceptions.IpAddressGenerationFailure):
                    # Check if the subnet still exists and if it does not,
                    # this is the reason why the ip address generation failed.
                    # In any other unlikely event re-raise
                    try:
                        subnet_id = port['port']['fixed_ips'][0]['subnet_id']
                        plugin.get_subnet(context, subnet_id)
                    except exceptions.SubnetNotFound:
                        pass
                    else:
                        ctxt.reraise = True
                net_id = port['port']['network_id']
                LOG.warning(
                    _LW("Action %(action)s for network %(net_id)s "
                        "could not complete successfully: %(reason)s"), {
                            "action": action,
                            "net_id": net_id,
                            'reason': e
                        })

    def get_active_networks(self, context, **kwargs):
        """Retrieve and return a list of the active network ids."""
        # NOTE(arosen): This method is no longer used by the DHCP agent but is
        # left so that neutron-dhcp-agents will still continue to work if
        # neutron-server is upgraded and not the agent.
        host = kwargs.get('host')
        LOG.debug('get_active_networks requested from %s', host)
        nets = self._get_active_networks(context, **kwargs)
        return [net['id'] for net in nets]

    def _group_by_network_id(self, res):
        grouped = {}
        keyfunc = operator.itemgetter('network_id')
        for net_id, values in itertools.groupby(sorted(res, key=keyfunc),
                                                keyfunc):
            grouped[net_id] = list(values)
        return grouped

    def get_active_networks_info(self, context, **kwargs):
        """Returns all the networks/subnets/ports in system."""
        host = kwargs.get('host')
        LOG.debug('get_active_networks_info from %s', host)
        networks = self._get_active_networks(context, **kwargs)
        plugin = manager.NeutronManager.get_plugin()
        filters = {'network_id': [network['id'] for network in networks]}
        ports = plugin.get_ports(context, filters=filters)
        filters['enable_dhcp'] = [True]
        subnets = plugin.get_subnets(context, filters=filters)

        grouped_subnets = self._group_by_network_id(subnets)
        grouped_ports = self._group_by_network_id(ports)
        for network in networks:
            network['subnets'] = grouped_subnets.get(network['id'], [])
            network['ports'] = grouped_ports.get(network['id'], [])

        return networks

    def get_network_info(self, context, **kwargs):
        """Retrieve and return extended information about a network."""
        network_id = kwargs.get('network_id')
        host = kwargs.get('host')
        LOG.debug('Network %(network_id)s requested from '
                  '%(host)s', {
                      'network_id': network_id,
                      'host': host
                  })
        plugin = manager.NeutronManager.get_plugin()
        try:
            network = plugin.get_network(context, network_id)
        except exceptions.NetworkNotFound:
            LOG.debug(
                "Network %s could not be found, it might have "
                "been deleted concurrently.", network_id)
            return
        filters = dict(network_id=[network_id])
        network['subnets'] = plugin.get_subnets(context, filters=filters)
        network['ports'] = plugin.get_ports(context, filters=filters)
        return network

    @db_api.retry_db_errors
    def release_dhcp_port(self, context, **kwargs):
        """Release the port currently being used by a DHCP agent."""
        host = kwargs.get('host')
        network_id = kwargs.get('network_id')
        device_id = kwargs.get('device_id')

        LOG.debug(
            'DHCP port deletion for %(network_id)s request from '
            '%(host)s', {
                'network_id': network_id,
                'host': host
            })
        plugin = manager.NeutronManager.get_plugin()
        plugin.delete_ports_by_device_id(context, device_id, network_id)

    @db_api.retry_db_errors
    @resource_registry.mark_resources_dirty
    def create_dhcp_port(self, context, **kwargs):
        """Create and return dhcp port information.

        If an expected failure occurs, a None port is returned.

        """
        host = kwargs.get('host')
        # Note(pbondar): Create deep copy of port to prevent operating
        # on changed dict if RetryRequest is raised
        port = copy.deepcopy(kwargs.get('port'))
        LOG.debug('Create dhcp port %(port)s '
                  'from %(host)s.', {
                      'port': port,
                      'host': host
                  })

        port['port']['device_owner'] = constants.DEVICE_OWNER_DHCP
        port['port'][portbindings.HOST_ID] = host
        if 'mac_address' not in port['port']:
            port['port']['mac_address'] = attributes.ATTR_NOT_SPECIFIED
        plugin = manager.NeutronManager.get_plugin()
        return self._port_action(plugin, context, port, 'create_port')

    @db_api.retry_db_errors
    def update_dhcp_port(self, context, **kwargs):
        """Update the dhcp port."""
        host = kwargs.get('host')
        port = kwargs.get('port')
        port['id'] = kwargs.get('port_id')
        port['port'][portbindings.HOST_ID] = host
        plugin = manager.NeutronManager.get_plugin()
        old_port = plugin.get_port(context, port['id'])
        if (old_port['device_id'] != constants.DEVICE_ID_RESERVED_DHCP_PORT
                and old_port['device_id'] != utils.get_dhcp_agent_device_id(
                    port['port']['network_id'], host)):
            raise n_exc.DhcpPortInUse(port_id=port['id'])
        LOG.debug('Update dhcp port %(port)s '
                  'from %(host)s.', {
                      'port': port,
                      'host': host
                  })
        return self._port_action(plugin, context, port, 'update_port')
コード例 #22
0
 def __init__(self, topic):
     target = oslo_messaging.Target(
         topic=topic,
         namespace=constants.RPC_NAMESPACE_METADATA,
         version='1.0')
     self.client = n_rpc.get_client(target)
コード例 #23
0
class L3RpcCallback(object):
    """L3 agent RPC callback in plugin implementations."""

    # 1.0 L3PluginApi BASE_RPC_API_VERSION
    # 1.1 Support update_floatingip_statuses
    # 1.2 Added methods for DVR support
    # 1.3 Added a method that returns the list of activated services
    # 1.4 Added L3 HA update_router_state. This method was later removed,
    #     since it was unused. The RPC version was not changed
    # 1.5 Added update_ha_routers_states
    target = oslo_messaging.Target(version='1.5')

    @property
    def plugin(self):
        if not hasattr(self, '_plugin'):
            self._plugin = manager.NeutronManager.get_plugin()
        return self._plugin

    @property
    def l3plugin(self):
        if not hasattr(self, '_l3plugin'):
            self._l3plugin = manager.NeutronManager.get_service_plugins()[
                plugin_constants.L3_ROUTER_NAT]
        return self._l3plugin

    def sync_routers(self, context, **kwargs):
        """Sync routers according to filters to a specific agent.

        @param context: contain user information
        @param kwargs: host, router_ids
        @return: a list of routers
                 with their interfaces and floating_ips
        """
        router_ids = kwargs.get('router_ids')
        host = kwargs.get('host')
        context = neutron_context.get_admin_context()
        if not self.l3plugin:
            routers = {}
            LOG.error(
                _LE('No plugin for L3 routing registered! Will reply '
                    'to l3 agent with empty router dictionary.'))
        elif utils.is_extension_supported(
                self.l3plugin, constants.L3_AGENT_SCHEDULER_EXT_ALIAS):
            if cfg.CONF.router_auto_schedule:
                self.l3plugin.auto_schedule_routers(context, host, router_ids)
            routers = (
                self.l3plugin.list_active_sync_routers_on_active_l3_agent(
                    context, host, router_ids))
        else:
            routers = self.l3plugin.get_sync_data(context, router_ids)
        if utils.is_extension_supported(self.plugin,
                                        constants.PORT_BINDING_EXT_ALIAS):
            self._ensure_host_set_on_ports(context, host, routers)
        LOG.debug("Routers returned to l3 agent:\n %s",
                  jsonutils.dumps(routers, indent=5))
        return routers

    def _ensure_host_set_on_ports(self, context, host, routers):
        for router in routers:
            LOG.debug("Checking router: %(id)s for host: %(host)s", {
                'id': router['id'],
                'host': host
            })
            if router.get('gw_port') and router.get('distributed'):
                self._ensure_host_set_on_port(context,
                                              router.get('gw_port_host'),
                                              router.get('gw_port'),
                                              router['id'])
                for p in router.get(constants.SNAT_ROUTER_INTF_KEY, []):
                    self._ensure_host_set_on_port(context,
                                                  router.get('gw_port_host'),
                                                  p, router['id'])
            else:
                self._ensure_host_set_on_port(context, host,
                                              router.get('gw_port'),
                                              router['id'])
            for interface in router.get(constants.INTERFACE_KEY, []):
                self._ensure_host_set_on_port(context, host, interface,
                                              router['id'])
            interface = router.get(constants.HA_INTERFACE_KEY)
            if interface:
                self._ensure_host_set_on_port(context, host, interface,
                                              router['id'])

    def _ensure_host_set_on_port(self, context, host, port, router_id=None):
        if (port and
            (port.get('device_owner') != constants.DEVICE_OWNER_DVR_INTERFACE
             and port.get(portbindings.HOST_ID) != host
             or port.get(portbindings.VIF_TYPE)
             == portbindings.VIF_TYPE_BINDING_FAILED)):
            # All ports, including ports created for SNAT'ing for
            # DVR are handled here
            try:
                self.plugin.update_port(context, port['id'],
                                        {'port': {
                                            portbindings.HOST_ID: host
                                        }})
            except exceptions.PortNotFound:
                LOG.debug(
                    "Port %(port)s not found while updating "
                    "agent binding for router %(router)s.", {
                        "port": port['id'],
                        "router": router_id
                    })
        elif (port and port.get('device_owner')
              == constants.DEVICE_OWNER_DVR_INTERFACE):
            # Ports that are DVR interfaces have multiple bindings (based on
            # of hosts on which DVR router interfaces are spawned). Such
            # bindings are created/updated here by invoking
            # update_dvr_port_binding
            self.plugin.update_dvr_port_binding(
                context, port['id'],
                {'port': {
                    portbindings.HOST_ID: host,
                    'device_id': router_id
                }})

    def get_external_network_id(self, context, **kwargs):
        """Get one external network id for l3 agent.

        l3 agent expects only on external network when it performs
        this query.
        """
        context = neutron_context.get_admin_context()
        net_id = self.plugin.get_external_network_id(context)
        LOG.debug("External network ID returned to l3 agent: %s", net_id)
        return net_id

    def get_service_plugin_list(self, context, **kwargs):
        plugins = manager.NeutronManager.get_service_plugins()
        return plugins.keys()

    def update_floatingip_statuses(self, context, router_id, fip_statuses):
        """Update operational status for a floating IP."""
        with context.session.begin(subtransactions=True):
            for (floatingip_id, status) in fip_statuses.iteritems():
                LOG.debug(
                    "New status for floating IP %(floatingip_id)s: "
                    "%(status)s", {
                        'floatingip_id': floatingip_id,
                        'status': status
                    })
                try:
                    self.l3plugin.update_floatingip_status(
                        context, floatingip_id, status)
                except l3.FloatingIPNotFound:
                    LOG.debug("Floating IP: %s no longer present.",
                              floatingip_id)
            # Find all floating IPs known to have been the given router
            # for which an update was not received. Set them DOWN mercilessly
            # This situation might occur for some asynchronous backends if
            # notifications were missed
            known_router_fips = self.l3plugin.get_floatingips(
                context, {'last_known_router_id': [router_id]})
            # Consider only floating ips which were disassociated in the API
            # FIXME(salv-orlando): Filtering in code should be avoided.
            # the plugin should offer a way to specify a null filter
            fips_to_disable = (fip['id'] for fip in known_router_fips
                               if not fip['router_id'])
            for fip_id in fips_to_disable:
                self.l3plugin.update_floatingip_status(
                    context, fip_id, constants.FLOATINGIP_STATUS_DOWN)

    def get_ports_by_subnet(self, context, **kwargs):
        """DVR: RPC called by dvr-agent to get all ports for subnet."""
        subnet_id = kwargs.get('subnet_id')
        LOG.debug("DVR: subnet_id: %s", subnet_id)
        filters = {'fixed_ips': {'subnet_id': [subnet_id]}}
        return self.plugin.get_ports(context, filters=filters)

    def get_agent_gateway_port(self, context, **kwargs):
        """Get Agent Gateway port for FIP.

        l3 agent expects an Agent Gateway Port to be returned
        for this query.
        """
        network_id = kwargs.get('network_id')
        host = kwargs.get('host')
        admin_ctx = neutron_context.get_admin_context()
        agent_port = self.l3plugin.create_fip_agent_gw_port_if_not_exists(
            admin_ctx, network_id, host)
        self._ensure_host_set_on_port(admin_ctx, host, agent_port)
        LOG.debug(
            'Agent Gateway port returned : %(agent_port)s with '
            'host %(host)s', {
                'agent_port': agent_port,
                'host': host
            })
        return agent_port

    def update_ha_routers_states(self, context, **kwargs):
        """Update states for HA routers.

        Get a map of router_id to its HA state on a host and update the DB.
        State must be in: ('active', 'standby').
        """
        states = kwargs.get('states')
        host = kwargs.get('host')

        LOG.debug('Updating HA routers states on host %s: %s', host, states)
        self.l3plugin.update_routers_states(context, states, host)
コード例 #24
0
 def __init__(self, topic=topics.DHCP_AGENT, plugin=None):
     self._plugin = plugin
     target = oslo_messaging.Target(topic=topic, version='1.0')
     self.client = n_rpc.get_client(target)
コード例 #25
0
    def test_notifier(self, mock_utcnow):
        drivers = []
        if self.v1:
            drivers.append('messaging')
        if self.v2:
            drivers.append('messagingv2')

        self.config(driver=drivers,
                    topics=self.topics,
                    group='oslo_messaging_notifications')

        transport = _FakeTransport(self.conf)

        if hasattr(self, 'ctor_pub_id'):
            notifier = oslo_messaging.Notifier(transport,
                                               publisher_id=self.ctor_pub_id)
        else:
            notifier = oslo_messaging.Notifier(transport)

        prepare_kwds = {}
        if hasattr(self, 'retry'):
            prepare_kwds['retry'] = self.retry
        if hasattr(self, 'prep_pub_id'):
            prepare_kwds['publisher_id'] = self.prep_pub_id
        if prepare_kwds:
            notifier = notifier.prepare(**prepare_kwds)

        self.mox.StubOutWithMock(transport, '_send_notification')

        message_id = uuid.uuid4()
        self.mox.StubOutWithMock(uuid, 'uuid4')
        uuid.uuid4().AndReturn(message_id)

        mock_utcnow.return_value = datetime.datetime.utcnow()

        message = {
            'message_id': str(message_id),
            'publisher_id': self.expected_pub_id,
            'event_type': 'test.notify',
            'priority': self.priority.upper(),
            'payload': self.payload,
            'timestamp': str(timeutils.utcnow()),
        }

        sends = []
        if self.v1:
            sends.append(dict(version=1.0))
        if self.v2:
            sends.append(dict(version=2.0))

        for send_kwargs in sends:
            for topic in self.topics:
                if hasattr(self, 'retry'):
                    send_kwargs['retry'] = self.retry
                else:
                    send_kwargs['retry'] = None
                target = oslo_messaging.Target(topic='%s.%s' %
                                               (topic, self.priority))
                transport._send_notification(target, self.ctxt, message,
                                             **send_kwargs).InAnyOrder()

        self.mox.ReplayAll()

        method = getattr(notifier, self.priority)
        method(self.ctxt, 'test.notify', self.payload)
コード例 #26
0
ファイル: agent.py プロジェクト: dingboopt/neutron-x
class DhcpAgent(manager.Manager):
    """DHCP agent service manager.

    Note that the public methods of this class are exposed as the server side
    of an rpc interface.  The neutron server uses
    neutron.api.rpc.agentnotifiers.dhcp_rpc_agent_api.DhcpAgentNotifyApi as the
    client side to execute the methods here.  For more information about
    changing rpc interfaces, see doc/source/devref/rpc_api.rst.
    """
    target = oslo_messaging.Target(version='1.0')

    def __init__(self, host=None, conf=None):
        super(DhcpAgent, self).__init__(host=host)
        self.needs_resync_reasons = collections.defaultdict(list)
        self.conf = conf or cfg.CONF
        self.cache = NetworkCache()
        self.dhcp_driver_cls = importutils.import_class(self.conf.dhcp_driver)
        ctx = context.get_admin_context_without_session()
        self.plugin_rpc = DhcpPluginApi(topics.PLUGIN, ctx, self.conf.host)
        # create dhcp dir to store dhcp info
        dhcp_dir = os.path.dirname("/%s/dhcp/" % self.conf.state_path)
        utils.ensure_dir(dhcp_dir)
        self.dhcp_version = self.dhcp_driver_cls.check_version()
        self._populate_networks_cache()
        # keep track of mappings between networks and routers for
        # metadata processing
        self._metadata_routers = {}  # {network_id: router_id}
        self._process_monitor = external_process.ProcessMonitor(
            config=self.conf, resource_type='dhcp')

    def init_host(self):
        self.sync_state()

    def _populate_networks_cache(self):
        """Populate the networks cache when the DHCP-agent starts."""
        try:
            existing_networks = self.dhcp_driver_cls.existing_dhcp_networks(
                self.conf)
            for net_id in existing_networks:
                net = dhcp.NetModel({"id": net_id, "subnets": [], "ports": []})
                self.cache.put(net)
        except NotImplementedError:
            # just go ahead with an empty networks cache
            LOG.debug(
                "The '%s' DHCP-driver does not support retrieving of a "
                "list of existing networks", self.conf.dhcp_driver)

    def after_start(self):
        self.run()
        LOG.info(_LI("DHCP agent started"))

    def run(self):
        """Activate the DHCP agent."""
        self.sync_state()
        self.periodic_resync()

    def call_driver(self, action, network, **action_kwargs):
        """Invoke an action on a DHCP driver instance."""
        LOG.debug('Calling driver for network: %(net)s action: %(action)s', {
            'net': network.id,
            'action': action
        })
        try:
            # the Driver expects something that is duck typed similar to
            # the base models.
            driver = self.dhcp_driver_cls(self.conf, network,
                                          self._process_monitor,
                                          self.dhcp_version, self.plugin_rpc)
            getattr(driver, action)(**action_kwargs)
            return True
        except exceptions.Conflict:
            # No need to resync here, the agent will receive the event related
            # to a status update for the network
            LOG.warning(
                _LW('Unable to %(action)s dhcp for %(net_id)s: there '
                    'is a conflict with its current state; please '
                    'check that the network and/or its subnet(s) '
                    'still exist.'), {
                        'net_id': network.id,
                        'action': action
                    })
        except Exception as e:
            if getattr(e, 'exc_type', '') != 'IpAddressGenerationFailure':
                # Don't resync if port could not be created because of an IP
                # allocation failure. When the subnet is updated with a new
                # allocation pool or a port is  deleted to free up an IP, this
                # will automatically be retried on the notification
                self.schedule_resync(e, network.id)
            if (isinstance(e, oslo_messaging.RemoteError)
                    and e.exc_type == 'NetworkNotFound'
                    or isinstance(e, exceptions.NetworkNotFound)):
                LOG.debug("Network %s has been deleted.", network.id)
            else:
                LOG.exception(_LE('Unable to %(action)s dhcp for %(net_id)s.'),
                              {
                                  'net_id': network.id,
                                  'action': action
                              })

    def schedule_resync(self, reason, network_id=None):
        """Schedule a resync for a given network and reason. If no network is
        specified, resync all networks.
        """
        self.needs_resync_reasons[network_id].append(reason)

    @utils.synchronized('dhcp-agent')
    def sync_state(self, networks=None):
        """Sync the local DHCP state with Neutron. If no networks are passed,
        or 'None' is one of the networks, sync all of the networks.
        """
        only_nets = set([] if (not networks or None in networks) else networks)
        LOG.info(_LI('Synchronizing state'))
        pool = eventlet.GreenPool(self.conf.num_sync_threads)
        known_network_ids = set(self.cache.get_network_ids())

        try:
            active_networks = self.plugin_rpc.get_active_networks_info()
            LOG.info(_LI('All active networks have been fetched through RPC.'))
            active_network_ids = set(network.id for network in active_networks)
            for deleted_id in known_network_ids - active_network_ids:
                try:
                    self.disable_dhcp_helper(deleted_id)
                except Exception as e:
                    self.schedule_resync(e, deleted_id)
                    LOG.exception(
                        _LE('Unable to sync network state on '
                            'deleted network %s'), deleted_id)

            for network in active_networks:
                if (not only_nets or  # specifically resync all
                        network.id not in known_network_ids or  # missing net
                        network.id in only_nets):  # specific network to sync
                    pool.spawn(self.safe_configure_dhcp_for_network, network)
            pool.waitall()
            LOG.info(_LI('Synchronizing state complete'))

        except Exception as e:
            if only_nets:
                for network_id in only_nets:
                    self.schedule_resync(e, network_id)
            else:
                self.schedule_resync(e)
            LOG.exception(_LE('Unable to sync network state.'))

    @utils.exception_logger()
    def _periodic_resync_helper(self):
        """Resync the dhcp state at the configured interval."""
        while True:
            eventlet.sleep(self.conf.resync_interval)
            if self.needs_resync_reasons:
                # be careful to avoid a race with additions to list
                # from other threads
                reasons = self.needs_resync_reasons
                self.needs_resync_reasons = collections.defaultdict(list)
                for net, r in reasons.items():
                    if not net:
                        net = "*"
                    LOG.debug("resync (%(network)s): %(reason)s", {
                        "reason": r,
                        "network": net
                    })
                self.sync_state(reasons.keys())

    def periodic_resync(self):
        """Spawn a thread to periodically resync the dhcp state."""
        eventlet.spawn(self._periodic_resync_helper)

    def safe_get_network_info(self, network_id):
        try:
            network = self.plugin_rpc.get_network_info(network_id)
            if not network:
                LOG.debug('Network %s has been deleted.', network_id)
            return network
        except Exception as e:
            self.schedule_resync(e, network_id)
            LOG.exception(_LE('Network %s info call failed.'), network_id)

    def enable_dhcp_helper(self, network_id):
        """Enable DHCP for a network that meets enabling criteria."""
        network = self.safe_get_network_info(network_id)
        if network:
            self.configure_dhcp_for_network(network)

    @utils.exception_logger()
    def safe_configure_dhcp_for_network(self, network):
        try:
            network_id = network.get('id')
            LOG.info(_LI('Starting network %s dhcp configuration'), network_id)
            self.configure_dhcp_for_network(network)
            LOG.info(_LI('Finished network %s dhcp configuration'), network_id)
        except (exceptions.NetworkNotFound, RuntimeError):
            LOG.warning(
                _LW('Network %s may have been deleted and '
                    'its resources may have already been disposed.'),
                network.id)

    def configure_dhcp_for_network(self, network):
        if not network.admin_state_up:
            return

        enable_metadata = self.dhcp_driver_cls.should_enable_metadata(
            self.conf, network)
        dhcp_network_enabled = False

        for subnet in network.subnets:
            if subnet.enable_dhcp:
                if self.call_driver('enable', network):
                    dhcp_network_enabled = True
                    self.cache.put(network)
                break

        if enable_metadata and dhcp_network_enabled:
            for subnet in network.subnets:
                if subnet.ip_version == 4 and subnet.enable_dhcp:
                    self.enable_isolated_metadata_proxy(network)
                    break
        elif (not self.conf.force_metadata
              and not self.conf.enable_isolated_metadata):
            # In the case that the dhcp agent ran with metadata enabled,
            # and dhcp agent now starts with metadata disabled, check and
            # delete any metadata_proxy.
            self.disable_isolated_metadata_proxy(network)

    def disable_dhcp_helper(self, network_id):
        """Disable DHCP for a network known to the agent."""
        network = self.cache.get_network_by_id(network_id)
        if network:
            if self.conf.enable_isolated_metadata:
                # NOTE(jschwarz): In the case where a network is deleted, all
                # the subnets and ports are deleted before this function is
                # called, so checking if 'should_enable_metadata' is True
                # for any subnet is false logic here.
                self.disable_isolated_metadata_proxy(network)
            if self.call_driver('disable', network):
                self.cache.remove(network)

    def refresh_dhcp_helper(self, network_id):
        """Refresh or disable DHCP for a network depending on the current state
        of the network.
        """
        old_network = self.cache.get_network_by_id(network_id)
        if not old_network:
            # DHCP current not running for network.
            return self.enable_dhcp_helper(network_id)

        network = self.safe_get_network_info(network_id)
        if not network:
            return

        old_cidrs = set(s.cidr for s in old_network.subnets if s.enable_dhcp)
        new_cidrs = set(s.cidr for s in network.subnets if s.enable_dhcp)

        if new_cidrs and old_cidrs == new_cidrs:
            self.call_driver('reload_allocations', network)
            self.cache.put(network)
        elif new_cidrs:
            if self.call_driver('restart', network):
                self.cache.put(network)
        else:
            self.disable_dhcp_helper(network.id)

    @utils.synchronized('dhcp-agent')
    def network_create_end(self, context, payload):
        """Handle the network.create.end notification event."""
        network_id = payload['network']['id']
        self.enable_dhcp_helper(network_id)

    @utils.synchronized('dhcp-agent')
    def network_update_end(self, context, payload):
        """Handle the network.update.end notification event."""
        network_id = payload['network']['id']
        if payload['network']['admin_state_up']:
            self.enable_dhcp_helper(network_id)
        else:
            self.disable_dhcp_helper(network_id)

    @utils.synchronized('dhcp-agent')
    def network_delete_end(self, context, payload):
        """Handle the network.delete.end notification event."""
        self.disable_dhcp_helper(payload['network_id'])

    @utils.synchronized('dhcp-agent')
    def subnet_update_end(self, context, payload):
        """Handle the subnet.update.end notification event."""
        network_id = payload['subnet']['network_id']
        self.refresh_dhcp_helper(network_id)

    # Use the update handler for the subnet create event.
    subnet_create_end = subnet_update_end

    @utils.synchronized('dhcp-agent')
    def subnet_delete_end(self, context, payload):
        """Handle the subnet.delete.end notification event."""
        subnet_id = payload['subnet_id']
        network = self.cache.get_network_by_subnet_id(subnet_id)
        if network:
            self.refresh_dhcp_helper(network.id)

    @utils.synchronized('dhcp-agent')
    def port_update_end(self, context, payload):
        """Handle the port.update.end notification event."""
        updated_port = dhcp.DictModel(payload['port'])
        network = self.cache.get_network_by_id(updated_port.network_id)
        if network:
            LOG.info(_LI("Trigger reload_allocations for port %s"),
                     updated_port)
            driver_action = 'reload_allocations'
            if self._is_port_on_this_agent(updated_port):
                orig = self.cache.get_port_by_id(updated_port['id'])
                # assume IP change if not in cache
                old_ips = {i['ip_address'] for i in orig['fixed_ips'] or []}
                new_ips = {i['ip_address'] for i in updated_port['fixed_ips']}
                if old_ips != new_ips:
                    driver_action = 'restart'
            self.cache.put_port(updated_port)
            self.call_driver(driver_action, network)

    def _is_port_on_this_agent(self, port):
        thishost = utils.get_dhcp_agent_device_id(port['network_id'],
                                                  self.conf.host)
        return port['device_id'] == thishost

    # Use the update handler for the port create event.
    port_create_end = port_update_end

    @utils.synchronized('dhcp-agent')
    def port_delete_end(self, context, payload):
        """Handle the port.delete.end notification event."""
        port = self.cache.get_port_by_id(payload['port_id'])
        if port:
            network = self.cache.get_network_by_id(port.network_id)
            self.cache.remove_port(port)
            self.call_driver('reload_allocations', network)

    def enable_isolated_metadata_proxy(self, network):

        # The proxy might work for either a single network
        # or all the networks connected via a router
        # to the one passed as a parameter
        kwargs = {'network_id': network.id}
        # When the metadata network is enabled, the proxy might
        # be started for the router attached to the network
        if self.conf.enable_metadata_network:
            router_ports = [
                port for port in network.ports
                if (port.device_owner in constants.ROUTER_INTERFACE_OWNERS)
            ]
            if router_ports:
                # Multiple router ports should not be allowed
                if len(router_ports) > 1:
                    LOG.warning(
                        _LW("%(port_num)d router ports found on the "
                            "metadata access network. Only the port "
                            "%(port_id)s, for router %(router_id)s "
                            "will be considered"), {
                                'port_num': len(router_ports),
                                'port_id': router_ports[0].id,
                                'router_id': router_ports[0].device_id
                            })
                kwargs = {'router_id': router_ports[0].device_id}
                self._metadata_routers[network.id] = router_ports[0].device_id

        metadata_driver.MetadataDriver.spawn_monitored_metadata_proxy(
            self._process_monitor, network.namespace, dhcp.METADATA_PORT,
            self.conf, **kwargs)

    def disable_isolated_metadata_proxy(self, network):
        if (self.conf.enable_metadata_network
                and network.id in self._metadata_routers):
            uuid = self._metadata_routers[network.id]
            is_router_id = True
        else:
            uuid = network.id
            is_router_id = False
        metadata_driver.MetadataDriver.destroy_monitored_metadata_proxy(
            self._process_monitor, uuid, self.conf)
        if is_router_id:
            del self._metadata_routers[network.id]
コード例 #27
0
 def create_consumer(self, topic, endpoints, fanout=False):
     target = oslo_messaging.Target(topic=topic,
                                    server=cfg.CONF.host,
                                    fanout=fanout)
     server = get_server(target, endpoints)
     self.servers.append(server)
コード例 #28
0
 def __init__(self, topic=None):
     super(VolumeAPI, self).__init__()
     target = messaging.Target(topic=CONF.volume_topic,
                               version=self.BASE_RPC_API_VERSION)
     serializer = objects_base.CinderObjectSerializer()
     self.client = rpc.get_client(target, '1.30', serializer=serializer)
コード例 #29
0
class DhcpAgent(manager.Manager):
    """DHCP agent service manager.

    Note that the public methods of this class are exposed as the server side
    of an rpc interface.  The neutron server uses
    neutron.api.rpc.agentnotifiers.dhcp_rpc_agent_api.DhcpAgentNotifyApi as the
    client side to execute the methods here.  For more information about
    changing rpc interfaces, see doc/source/contributor/internals/rpc_api.rst.
    """
    target = oslo_messaging.Target(version='1.0')

    def __init__(self, host=None, conf=None):
        super(DhcpAgent, self).__init__(host=host)
        self.needs_resync_reasons = collections.defaultdict(list)
        self.dhcp_ready_ports = set()
        self.conf = conf or cfg.CONF
        # If 'resync_throttle' is configured more than 'resync_interval' by
        # mistake, raise exception and log with message.
        if self.conf.resync_throttle > self.conf.resync_interval:
            msg = _("DHCP agent must have resync_throttle <= resync_interval")
            LOG.exception(msg)
            raise exceptions.InvalidConfigurationOption(
                opt_name='resync_throttle',
                opt_value=self.conf.resync_throttle)
        self._periodic_resync_event = threading.Event()
        self.cache = NetworkCache()
        self.dhcp_driver_cls = importutils.import_class(self.conf.dhcp_driver)
        self.plugin_rpc = DhcpPluginApi(topics.PLUGIN, self.conf.host)
        # create dhcp dir to store dhcp info
        dhcp_dir = os.path.dirname("/%s/dhcp/" % self.conf.state_path)
        fileutils.ensure_tree(dhcp_dir, mode=0o755)
        self.dhcp_version = self.dhcp_driver_cls.check_version()
        self._populate_networks_cache()
        # keep track of mappings between networks and routers for
        # metadata processing
        self._metadata_routers = {}  # {network_id: router_id}
        self._process_monitor = external_process.ProcessMonitor(
            config=self.conf, resource_type='dhcp')
        self._pool_size = DHCP_PROCESS_GREENLET_MIN
        self._pool = eventlet.GreenPool(size=self._pool_size)
        self._queue = queue.ResourceProcessingQueue()

    def init_host(self):
        self.sync_state()

    def _populate_networks_cache(self):
        """Populate the networks cache when the DHCP-agent starts."""
        try:
            existing_networks = self.dhcp_driver_cls.existing_dhcp_networks(
                self.conf)
            for net_id in existing_networks:
                net = dhcp.NetModel({
                    "id": net_id,
                    "subnets": [],
                    "non_local_subnets": [],
                    "ports": []
                })
                self.cache.put(net)
        except NotImplementedError:
            # just go ahead with an empty networks cache
            LOG.debug(
                "The '%s' DHCP-driver does not support retrieving of a "
                "list of existing networks", self.conf.dhcp_driver)

    def after_start(self):
        self.run()
        LOG.info("DHCP agent started")

    def run(self):
        """Activate the DHCP agent."""
        self.periodic_resync()
        self.start_ready_ports_loop()
        eventlet.spawn_n(self._process_loop)

    def call_driver(self, action, network, **action_kwargs):
        """Invoke an action on a DHCP driver instance."""
        LOG.debug('Calling driver for network: %(net)s action: %(action)s', {
            'net': network.id,
            'action': action
        })
        try:
            # the Driver expects something that is duck typed similar to
            # the base models.
            driver = self.dhcp_driver_cls(self.conf, network,
                                          self._process_monitor,
                                          self.dhcp_version, self.plugin_rpc)
            getattr(driver, action)(**action_kwargs)
            return True
        except exceptions.Conflict:
            # No need to resync here, the agent will receive the event related
            # to a status update for the network
            LOG.debug(
                'Unable to %(action)s dhcp for %(net_id)s: there '
                'is a conflict with its current state; please '
                'check that the network and/or its subnet(s) '
                'still exist.', {
                    'net_id': network.id,
                    'action': action
                })
        except exceptions.SubnetMismatchForPort as e:
            # FIXME(kevinbenton): get rid of this once bug/1627480 is fixed
            LOG.debug("Error configuring DHCP port, scheduling resync: %s", e)
            self.schedule_resync(e, network.id)
        except Exception as e:
            if getattr(e, 'exc_type', '') != 'IpAddressGenerationFailure':
                # Don't resync if port could not be created because of an IP
                # allocation failure. When the subnet is updated with a new
                # allocation pool or a port is  deleted to free up an IP, this
                # will automatically be retried on the notification
                self.schedule_resync(e, network.id)
            if (isinstance(e, oslo_messaging.RemoteError)
                    and e.exc_type == 'NetworkNotFound'
                    or isinstance(e, exceptions.NetworkNotFound)):
                LOG.debug(
                    "Network %s has been removed from the agent "
                    "or deleted from DB.", network.id)
            else:
                LOG.exception('Unable to %(action)s dhcp for %(net_id)s.', {
                    'net_id': network.id,
                    'action': action
                })

    def schedule_resync(self, reason, network_id=None):
        """Schedule a resync for a given network and reason. If no network is
        specified, resync all networks.
        """
        self.needs_resync_reasons[network_id].append(reason)
        self._periodic_resync_event.set()
        # Yield to allow other threads that may be ready to run.
        # This helps prevent one thread from acquiring the same lock over and
        # over again, in which case no other threads waiting on the
        # "dhcp-agent" lock would make any progress.
        eventlet.greenthread.sleep(0)

    @_sync_lock
    def sync_state(self, networks=None):
        """Sync the local DHCP state with Neutron. If no networks are passed,
        or 'None' is one of the networks, sync all of the networks.
        """
        only_nets = set([] if (not networks or None in networks) else networks)
        LOG.info('Synchronizing state')
        pool = eventlet.GreenPool(self.conf.num_sync_threads)
        known_network_ids = set(self.cache.get_network_ids())

        try:
            active_networks = self.plugin_rpc.get_active_networks_info(
                enable_dhcp_filter=False)
            LOG.info('All active networks have been fetched through RPC.')
            active_network_ids = set(network.id for network in active_networks)
            for deleted_id in known_network_ids - active_network_ids:
                try:
                    self.disable_dhcp_helper(deleted_id)
                except Exception as e:
                    self.schedule_resync(e, deleted_id)
                    LOG.exception(
                        'Unable to sync network state on '
                        'deleted network %s', deleted_id)

            for network in active_networks:
                if (not only_nets or  # specifically resync all
                        network.id not in known_network_ids or  # missing net
                        network.id in only_nets):  # specific network to sync
                    pool.spawn(self.safe_configure_dhcp_for_network, network)
            pool.waitall()
            # we notify all ports in case some were created while the agent
            # was down
            self.dhcp_ready_ports |= set(self.cache.get_port_ids(only_nets))
            LOG.info('Synchronizing state complete')

        except Exception as e:
            if only_nets:
                for network_id in only_nets:
                    self.schedule_resync(e, network_id)
            else:
                self.schedule_resync(e)
            LOG.exception('Unable to sync network state.')

    def _dhcp_ready_ports_loop(self):
        """Notifies the server of any ports that had reservations setup."""
        while True:
            # this is just watching a set so we can do it really frequently
            eventlet.sleep(0.1)
            if self.dhcp_ready_ports:
                ports_to_send = set()
                for port_count in range(
                        min(len(self.dhcp_ready_ports),
                            DHCP_READY_PORTS_SYNC_MAX)):
                    ports_to_send.add(self.dhcp_ready_ports.pop())

                try:
                    self.plugin_rpc.dhcp_ready_on_ports(ports_to_send)
                    LOG.info("DHCP configuration for ports %s is completed",
                             ports_to_send)
                    continue
                except Exception:
                    LOG.exception("Failure notifying DHCP server of "
                                  "ready DHCP ports. Will retry on next "
                                  "iteration.")
                self.dhcp_ready_ports |= ports_to_send

    def start_ready_ports_loop(self):
        """Spawn a thread to push changed ports to server."""
        eventlet.spawn(self._dhcp_ready_ports_loop)

    @utils.exception_logger()
    def _periodic_resync_helper(self):
        """Resync the dhcp state at the configured interval and throttle."""
        while True:
            # threading.Event.wait blocks until the internal flag is true. It
            # returns the internal flag on exit, so it will always return True
            # except if a timeout is given and the operation times out.
            if self._periodic_resync_event.wait(self.conf.resync_interval):
                LOG.debug("Resync event has been scheduled")
                clear_periodic_resync_event = self._periodic_resync_event.clear
                # configure throttler for clear_periodic_resync_event to
                # introduce delays between resync state events.
                throttled_clear_periodic_resync_event = utils.throttler(
                    self.conf.resync_throttle)(clear_periodic_resync_event)
                throttled_clear_periodic_resync_event()

            if self.needs_resync_reasons:
                # be careful to avoid a race with additions to list
                # from other threads
                reasons = self.needs_resync_reasons
                self.needs_resync_reasons = collections.defaultdict(list)
                for net, r in reasons.items():
                    if not net:
                        net = "*"
                    LOG.debug("resync (%(network)s): %(reason)s", {
                        "reason": r,
                        "network": net
                    })
                self.sync_state(reasons.keys())

    def periodic_resync(self):
        """Spawn a thread to periodically resync the dhcp state."""
        eventlet.spawn(self._periodic_resync_helper)

    def safe_get_network_info(self, network_id):
        try:
            network = self.plugin_rpc.get_network_info(network_id)
            if not network:
                LOG.debug('Network %s has been deleted.', network_id)
            return network
        except Exception as e:
            self.schedule_resync(e, network_id)
            LOG.exception('Network %s info call failed.', network_id)

    def enable_dhcp_helper(self, network_id):
        """Enable DHCP for a network that meets enabling criteria."""
        network = self.safe_get_network_info(network_id)
        if network:
            self.configure_dhcp_for_network(network)

    @utils.exception_logger()
    def safe_configure_dhcp_for_network(self, network):
        try:
            network_id = network.get('id')
            LOG.info('Starting network %s dhcp configuration', network_id)
            self.configure_dhcp_for_network(network)
            LOG.info('Finished network %s dhcp configuration', network_id)
        except (exceptions.NetworkNotFound, RuntimeError):
            LOG.warning(
                'Network %s may have been deleted and '
                'its resources may have already been disposed.', network.id)

    def configure_dhcp_for_network(self, network):
        if not network.admin_state_up:
            return

        for subnet in network.subnets:
            if subnet.enable_dhcp:
                if self.call_driver('enable', network):
                    self.update_isolated_metadata_proxy(network)
                    self.cache.put(network)
                    # After enabling dhcp for network, mark all existing
                    # ports as ready. So that the status of ports which are
                    # created before enabling dhcp can be updated.
                    self.dhcp_ready_ports |= {p.id for p in network.ports}
                break

        self._resize_process_pool()

    def disable_dhcp_helper(self, network_id):
        """Disable DHCP for a network known to the agent."""
        network = self.cache.get_network_by_id(network_id)
        if network:
            # NOTE(yamahata): Kill the metadata proxy process
            # unconditionally, as in the case where a network
            # is deleted, all the subnets and ports are deleted
            # before this function is called, so determining if
            # the proxy should be terminated is error prone.
            # destroy_monitored_metadata_proxy() is a noop when
            # there is no process running.
            self.disable_isolated_metadata_proxy(network)
            if self.call_driver('disable', network):
                self.cache.remove(network)

        self._resize_process_pool()

    def refresh_dhcp_helper(self, network_id):
        """Refresh or disable DHCP for a network depending on the current state
        of the network.
        """
        old_network = self.cache.get_network_by_id(network_id)
        if not old_network:
            # DHCP current not running for network.
            return self.enable_dhcp_helper(network_id)

        network = self.safe_get_network_info(network_id)
        if not network:
            return

        if not any(s for s in network.subnets if s.enable_dhcp):
            self.disable_dhcp_helper(network.id)
            return
        old_non_local_subnets = getattr(old_network, 'non_local_subnets', [])
        new_non_local_subnets = getattr(network, 'non_local_subnets', [])
        old_cidrs = [
            s.cidr for s in (old_network.subnets + old_non_local_subnets)
            if s.enable_dhcp
        ]
        new_cidrs = [
            s.cidr for s in (network.subnets + new_non_local_subnets)
            if s.enable_dhcp
        ]
        if old_cidrs == new_cidrs:
            self.call_driver('reload_allocations', network)
            self.cache.put(network)
        elif self.call_driver('restart', network):
            self.cache.put(network)
        # mark all ports as active in case the sync included
        # new ports that we hadn't seen yet.
        self.dhcp_ready_ports |= {p.id for p in network.ports}

        # Update the metadata proxy after the dhcp driver has been updated
        self.update_isolated_metadata_proxy(network)

    def network_create_end(self, context, payload):
        """Handle the network.create.end notification event."""
        update = queue.ResourceUpdate(payload['network']['id'],
                                      payload.get('priority',
                                                  DEFAULT_PRIORITY),
                                      action='_network_create',
                                      resource=payload)
        self._queue.add(update)

    @_wait_if_syncing
    def _network_create(self, payload):
        network_id = payload['network']['id']
        self.enable_dhcp_helper(network_id)

    def network_update_end(self, context, payload):
        """Handle the network.update.end notification event."""
        update = queue.ResourceUpdate(payload['network']['id'],
                                      payload.get('priority',
                                                  DEFAULT_PRIORITY),
                                      action='_network_update',
                                      resource=payload)
        self._queue.add(update)

    @_wait_if_syncing
    def _network_update(self, payload):
        network_id = payload['network']['id']
        if payload['network']['admin_state_up']:
            self.enable_dhcp_helper(network_id)
        else:
            self.disable_dhcp_helper(network_id)

    def network_delete_end(self, context, payload):
        """Handle the network.delete.end notification event."""
        update = queue.ResourceUpdate(payload['network_id'],
                                      payload.get('priority',
                                                  DEFAULT_PRIORITY),
                                      action='_network_delete',
                                      resource=payload)
        self._queue.add(update)

    @_wait_if_syncing
    def _network_delete(self, payload):
        network_id = payload['network_id']
        self.disable_dhcp_helper(network_id)

    def subnet_update_end(self, context, payload):
        """Handle the subnet.update.end notification event."""
        update = queue.ResourceUpdate(payload['subnet']['network_id'],
                                      payload.get('priority',
                                                  DEFAULT_PRIORITY),
                                      action='_subnet_update',
                                      resource=payload)
        self._queue.add(update)

    @_wait_if_syncing
    def _subnet_update(self, payload):
        network_id = payload['subnet']['network_id']
        self.refresh_dhcp_helper(network_id)

    # Use the update handler for the subnet create event.
    subnet_create_end = subnet_update_end

    def _get_network_lock_id(self, payload):
        """Determine which lock to hold when servicing an RPC event"""
        # TODO(alegacy): in a future release this function can be removed and
        # uses of it can be replaced with payload['network_id'].  It exists
        # only to satisfy backwards compatibility between older servers and
        # newer agents.  Once the 'network_id' attribute is guaranteed to be
        # sent by the server on all *_delete_end events then it can be removed.
        if 'network_id' in payload:
            return payload['network_id']
        elif 'subnet_id' in payload:
            subnet_id = payload['subnet_id']
            network = self.cache.get_network_by_subnet_id(subnet_id)
            return network.id if network else None
        elif 'port_id' in payload:
            port_id = payload['port_id']
            port = self.cache.get_port_by_id(port_id)
            return port.network_id if port else None

    def subnet_delete_end(self, context, payload):
        """Handle the subnet.delete.end notification event."""
        network_id = self._get_network_lock_id(payload)
        if not network_id:
            return
        update = queue.ResourceUpdate(network_id,
                                      payload.get('priority',
                                                  DEFAULT_PRIORITY),
                                      action='_subnet_delete',
                                      resource=payload)
        self._queue.add(update)

    @_wait_if_syncing
    def _subnet_delete(self, payload):
        network_id = self._get_network_lock_id(payload)
        if not network_id:
            return
        subnet_id = payload['subnet_id']
        network = self.cache.get_network_by_subnet_id(subnet_id)
        if not network:
            return
        self.refresh_dhcp_helper(network.id)

    @lockutils.synchronized('resize_greenpool')
    def _resize_process_pool(self):
        num_nets = len(self.cache.get_network_ids())
        pool_size = max([
            DHCP_PROCESS_GREENLET_MIN,
            min([DHCP_PROCESS_GREENLET_MAX, num_nets])
        ])
        if pool_size == self._pool_size:
            return
        LOG.info("Resizing dhcp processing queue green pool size to: %d",
                 pool_size)
        self._pool.resize(pool_size)
        self._pool_size = pool_size

    def _process_loop(self):
        LOG.debug("Starting _process_loop")

        while True:
            self._pool.spawn_n(self._process_resource_update)

    def _process_resource_update(self):
        for tmp, update in self._queue.each_update_to_next_resource():
            method = getattr(self, update.action)
            method(update.resource)

    def port_update_end(self, context, payload):
        """Handle the port.update.end notification event."""
        updated_port = dhcp.DictModel(payload['port'])
        if self.cache.is_port_message_stale(updated_port):
            LOG.debug("Discarding stale port update: %s", updated_port)
            return
        update = queue.ResourceUpdate(updated_port.network_id,
                                      payload.get('priority',
                                                  DEFAULT_PRIORITY),
                                      action='_port_update',
                                      resource=updated_port)
        self._queue.add(update)

    @_wait_if_syncing
    def _port_update(self, updated_port):
        if self.cache.is_port_message_stale(updated_port):
            LOG.debug("Discarding stale port update: %s", updated_port)
            return
        network = self.cache.get_network_by_id(updated_port.network_id)
        if not network:
            return
        self.reload_allocations(updated_port, network)

    def reload_allocations(self, port, network):
        LOG.info("Trigger reload_allocations for port %s", port)
        driver_action = 'reload_allocations'
        if self._is_port_on_this_agent(port):
            orig = self.cache.get_port_by_id(port['id'])
            # assume IP change if not in cache
            orig = orig or {'fixed_ips': []}
            old_ips = {i['ip_address'] for i in orig['fixed_ips'] or []}
            new_ips = {i['ip_address'] for i in port['fixed_ips']}
            old_subs = {i['subnet_id'] for i in orig['fixed_ips'] or []}
            new_subs = {i['subnet_id'] for i in port['fixed_ips']}
            if new_subs != old_subs:
                # subnets being serviced by port have changed, this could
                # indicate a subnet_delete is in progress. schedule a
                # resync rather than an immediate restart so we don't
                # attempt to re-allocate IPs at the same time the server
                # is deleting them.
                self.schedule_resync("Agent port was modified",
                                     port.network_id)
                return
            elif old_ips != new_ips:
                LOG.debug("Agent IPs on network %s changed from %s to %s",
                          network.id, old_ips, new_ips)
                driver_action = 'restart'
        self.cache.put_port(port)
        self.call_driver(driver_action, network)
        self.dhcp_ready_ports.add(port.id)
        self.update_isolated_metadata_proxy(network)

    def _is_port_on_this_agent(self, port):
        thishost = utils.get_dhcp_agent_device_id(port['network_id'],
                                                  self.conf.host)
        return port['device_id'] == thishost

    def port_create_end(self, context, payload):
        """Handle the port.create.end notification event."""
        created_port = dhcp.DictModel(payload['port'])
        update = queue.ResourceUpdate(created_port.network_id,
                                      payload.get('priority',
                                                  DEFAULT_PRIORITY),
                                      action='_port_create',
                                      resource=created_port)
        self._queue.add(update)

    @_wait_if_syncing
    def _port_create(self, created_port):
        network = self.cache.get_network_by_id(created_port.network_id)
        if not network:
            return
        new_ips = {i['ip_address'] for i in created_port['fixed_ips']}
        for port_cached in network.ports:
            # if in the same network there are ports cached with the same
            # ip address but different MAC address and/or different id,
            # this indicate that the cache is out of sync
            cached_ips = {i['ip_address'] for i in port_cached['fixed_ips']}
            if (new_ips.intersection(cached_ips) and
                (created_port['id'] != port_cached['id'] or
                 created_port['mac_address'] != port_cached['mac_address'])):
                self.schedule_resync(
                    "Duplicate IP addresses found, "
                    "DHCP cache is out of sync", created_port.network_id)
                return
        self.reload_allocations(created_port, network)

    def port_delete_end(self, context, payload):
        """Handle the port.delete.end notification event."""
        network_id = self._get_network_lock_id(payload)
        if not network_id:
            return
        update = queue.ResourceUpdate(network_id,
                                      payload.get('priority',
                                                  DEFAULT_PRIORITY),
                                      action='_port_delete',
                                      resource=payload)
        self._queue.add(update)

    @_wait_if_syncing
    def _port_delete(self, payload):
        network_id = self._get_network_lock_id(payload)
        if not network_id:
            return
        port_id = payload['port_id']
        port = self.cache.get_port_by_id(port_id)
        self.cache.deleted_ports.add(port_id)
        if not port:
            return
        network = self.cache.get_network_by_id(port.network_id)
        self.cache.remove_port(port)
        if self._is_port_on_this_agent(port):
            # the agent's port has been deleted. disable the service
            # and add the network to the resync list to create
            # (or acquire a reserved) port.
            self.call_driver('disable', network)
            self.schedule_resync("Agent port was deleted", port.network_id)
        else:
            self.call_driver('reload_allocations', network)
            self.update_isolated_metadata_proxy(network)

    def update_isolated_metadata_proxy(self, network):
        """Spawn or kill metadata proxy.

        According to return from driver class, spawn or kill the metadata
        proxy process. Spawn an existing metadata proxy or kill a nonexistent
        metadata proxy will just silently return.
        """
        should_enable_metadata = self.dhcp_driver_cls.should_enable_metadata(
            self.conf, network)
        if should_enable_metadata:
            self.enable_isolated_metadata_proxy(network)
        else:
            self.disable_isolated_metadata_proxy(network)

    def enable_isolated_metadata_proxy(self, network):

        # The proxy might work for either a single network
        # or all the networks connected via a router
        # to the one passed as a parameter
        kwargs = {'network_id': network.id}
        # When the metadata network is enabled, the proxy might
        # be started for the router attached to the network
        if self.conf.enable_metadata_network:
            router_ports = [
                port for port in network.ports
                if (port.device_owner in constants.ROUTER_INTERFACE_OWNERS)
            ]
            if router_ports:
                # Multiple router ports should not be allowed
                if len(router_ports) > 1:
                    LOG.warning(
                        "%(port_num)d router ports found on the "
                        "metadata access network. Only the port "
                        "%(port_id)s, for router %(router_id)s "
                        "will be considered", {
                            'port_num': len(router_ports),
                            'port_id': router_ports[0].id,
                            'router_id': router_ports[0].device_id
                        })
                all_subnets = self.dhcp_driver_cls._get_all_subnets(network)
                if self.dhcp_driver_cls.has_metadata_subnet(all_subnets):
                    kwargs = {'router_id': router_ports[0].device_id}
                    self._metadata_routers[network.id] = (
                        router_ports[0].device_id)

        metadata_driver.MetadataDriver.spawn_monitored_metadata_proxy(
            self._process_monitor,
            network.namespace,
            dhcp.METADATA_PORT,
            self.conf,
            bind_address=dhcp.METADATA_DEFAULT_IP,
            **kwargs)

    def disable_isolated_metadata_proxy(self, network):
        if (self.conf.enable_metadata_network
                and network.id in self._metadata_routers):
            uuid = self._metadata_routers[network.id]
            is_router_id = True
        else:
            uuid = network.id
            is_router_id = False
        metadata_driver.MetadataDriver.destroy_monitored_metadata_proxy(
            self._process_monitor, uuid, self.conf, network.namespace)
        if is_router_id:
            del self._metadata_routers[network.id]
コード例 #30
0
 def __init__(self, topic, host):
     self.host = host
     self.target = oslo_messaging.Target(topic=topic, version='1.0')
     self.client = n_rpc.get_client(self.target)