示例#1
0
文件: api.py 项目: alanmeadows/cobalt
    def __init__(self, image_service=None, **kwargs):
        super(API, self).__init__(**kwargs)
        self.compute_api = compute.API()
        self.image_service = image_service if image_service is not None else image.ImageService()
        self.scheduler_rpcapi = scheduler_rpcapi.SchedulerAPI()
        self.CAPABILITIES = CAPABILITIES
        self.sg_api = sg_driver.get_openstack_security_group_driver()

        # Fixup an power-states related to blessed instances.
        elevated = context.get_admin_context()
        instances = self.compute_api.get_all(elevated,
                                             {'deleted':False})
        for instance in instances:
            if instance['power_state'] == None:
                # (dscannell) We need to update the power_state to something
                # valid. Since it is a blessed instance we simply update its
                # state to 'no state'.
                self.db.instance_update(elevated, instance['uuid'],
                                        {'power_state':power_state.NOSTATE})
            # (rui-lin) Host or nova-gc process failure during bless can cause
            # source instance to be undeletable and stuck in 'blessing' state,
            # so we clear state to default and allow it to be deleted if needed
            if instance['vm_state'] == vm_states.ACTIVE:
                if instance['task_state'] == "blessing":
                    self.db.instance_update(elevated, instance['uuid'],
                        {'disable_terminate':False,'task_state':'None'})
示例#2
0
def fake_InstanceMetadata(
    stubs,
    inst_data,
    address=None,
    sgroups=None,
    content=None,
    extra_md=None,
    vd_driver=None,
    network_info=None,
    network_metadata=None,
):
    content = content or []
    extra_md = extra_md or {}
    if sgroups is None:
        sgroups = [{"name": "default"}]

    def sg_get(*args, **kwargs):
        return sgroups

    secgroup_api = openstack_driver.get_openstack_security_group_driver()
    stubs.Set(secgroup_api.__class__, "get_instance_security_groups", sg_get)
    return base.InstanceMetadata(
        inst_data,
        address=address,
        content=content,
        extra_md=extra_md,
        vd_driver=vd_driver,
        network_info=network_info,
        network_metadata=network_metadata,
    )
示例#3
0
    def __init__(self, image_service=None, **kwargs):
        super(API, self).__init__(**kwargs)
        self.compute_api = compute.API()
        self.image_service = image_service if image_service is not None else image.ImageService(
        )
        self.scheduler_rpcapi = scheduler_rpcapi.SchedulerAPI()
        self.CAPABILITIES = CAPABILITIES
        self.sg_api = sg_driver.get_openstack_security_group_driver()

        # Fixup an power-states related to blessed instances.
        elevated = context.get_admin_context()
        instances = self.compute_api.get_all(elevated, {'deleted': False})
        for instance in instances:
            if instance['power_state'] == None:
                # (dscannell) We need to update the power_state to something
                # valid. Since it is a blessed instance we simply update its
                # state to 'no state'.
                self.db.instance_update(elevated, instance['uuid'],
                                        {'power_state': power_state.NOSTATE})
            # (rui-lin) Host or nova-gc process failure during bless can cause
            # source instance to be undeletable and stuck in 'blessing' state,
            # so we clear state to default and allow it to be deleted if needed
            if instance['vm_state'] == vm_states.ACTIVE:
                if instance['task_state'] == "blessing":
                    self.db.instance_update(elevated, instance['uuid'], {
                        'disable_terminate': False,
                        'task_state': 'None'
                    })
示例#4
0
文件: manager.py 项目: k-i-t-e/nova
 def __init__(self, *args, **kwargs):
     super(ConductorManager, self).__init__(*args, **kwargs)
     self.security_group_api = (
         openstack_driver.get_openstack_security_group_driver())
     self._network_api = None
     self._compute_api = None
     self.quotas = quota.QUOTAS
示例#5
0
文件: manager.py 项目: djipko/nova
 def __init__(self, *args, **kwargs):
     super(ConductorManager, self).__init__(service_name="conductor", *args, **kwargs)
     self.security_group_api = openstack_driver.get_openstack_security_group_driver()
     self._network_api = None
     self._compute_api = None
     self.compute_task_mgr = ComputeTaskManager()
     self.quotas = quota.QUOTAS
示例#6
0
 def __init__(self):
     super(API, self).__init__()
     self.last_neutron_extension_sync = None
     self.extensions = {}
     self.conductor_api = conductor.API()
     self.security_group_api = (
         openstack_driver.get_openstack_security_group_driver())
示例#7
0
文件: api.py 项目: kavonm/nova
 def __init__(self):
     super(API, self).__init__()
     self.last_neutron_extension_sync = None
     self.extensions = {}
     self.conductor_api = conductor.API()
     self.security_group_api = (
         openstack_driver.get_openstack_security_group_driver())
示例#8
0
 def __init__(self, *args, **kwargs):
     super(ConductorManager, self).__init__(*args, **kwargs)
     self.security_group_api = (
         openstack_driver.get_openstack_security_group_driver())
     self._network_api = None
     self._compute_api = None
     self.quotas = quota.QUOTAS
示例#9
0
 def __init__(self, *args, **kwargs):
     super(SecurityGroupActionController, self).__init__(*args, **kwargs)
     self.security_group_api = (
         openstack_driver.get_openstack_security_group_driver(
             skip_policy_check=True))
     self.compute_api = compute.API(
         security_group_api=self.security_group_api, skip_policy_check=True)
示例#10
0
 def __init__(self, *args, **kwargs):
     super(SecurityGroupActionController, self).__init__(*args, **kwargs)
     self.security_group_api = (
         openstack_driver.get_openstack_security_group_driver(
             skip_policy_check=True))
     self.compute_api = compute.API(
         security_group_api=self.security_group_api, skip_policy_check=True)
示例#11
0
文件: manager.py 项目: nash-x/hws
 def __init__(self, *args, **kwargs):
     super(ConductorManager, self).__init__(service_name="conductor", *args, **kwargs)
     self.security_group_api = openstack_driver.get_openstack_security_group_driver()
     self._network_api = None
     self._compute_api = None
     self.compute_task_mgr = ComputeTaskManager()
     self.cells_rpcapi = cells_rpcapi.CellsAPI()
     self.additional_endpoints.append(self.compute_task_mgr)
示例#12
0
 def __init__(self, *args, **kwargs):
     super(ConductorManager, self).__init__(service_name='conductor',
                                            *args, **kwargs)
     self.security_group_api = (
         openstack_driver.get_openstack_security_group_driver())
     self._network_api = None
     self._compute_api = None
     self.compute_task_mgr = ComputeTaskManager()
     self.quotas = quota.QUOTAS
 def __init__(self):
     """Initialize view builder."""
     super(ViewBuilder, self).__init__()
     self._address_builder = views_addresses.ViewBuilder()
     self._image_builder = views_images.ViewBuilder()
     self._flavor_builder = views_flavors.ViewBuilder()
     self.compute_api = compute.API()
     self.security_group_api = (
         openstack_driver.get_openstack_security_group_driver())
示例#14
0
 def __init__(self):
     """Initialize view builder."""
     super(ViewBuilder, self).__init__()
     self._address_builder = views_addresses.ViewBuilder()
     self._image_builder = views_images.ViewBuilder()
     self._flavor_builder = views_flavors.ViewBuilder()
     self.compute_api = compute.API()
     self.security_group_api = (
         openstack_driver.get_openstack_security_group_driver())
示例#15
0
 def __init__(self, *args, **kwargs):
     super(ConductorManager, self).__init__(service_name='conductor',
                                            *args, **kwargs)
     self.security_group_api = (
         openstack_driver.get_openstack_security_group_driver())
     self._network_api = None
     self._compute_api = None
     self.compute_task_mgr = ComputeTaskManager()
     self.cells_rpcapi = cells_rpcapi.CellsAPI()
     self.additional_endpoints.append(self.compute_task_mgr)
示例#16
0
 def __init__(self):
     self.security_group_api = (
         openstack_driver.get_openstack_security_group_driver(
             skip_policy_check=True))
示例#17
0
 def test_caches(self, mock_import):
     sgapi.DRIVER_CACHE = None
     for _ in range(2):
         self.assertIsNotNone(sgapi.get_openstack_security_group_driver())
     mock_import.assert_called_once_with(sgapi.NEUTRON_DRIVER)
示例#18
0
 def __init__(self):
     self.security_group_api = (
         openstack_driver.get_openstack_security_group_driver())
     self.compute_api = compute.API(
                                security_group_api=self.security_group_api)
示例#19
0
 def __init__(self, *args, **kwargs):
     super(SecurityGroupsOutputController, self).__init__(*args, **kwargs)
     self.compute_api = compute.API()
     self.security_group_api = (
         openstack_driver.get_openstack_security_group_driver())
示例#20
0
    def apply(self, context, resource):

        self.security_group_api = \
                    openstack_driver.get_openstack_security_group_driver()

        base_options = {
            'reservation_id': resource.reservation_id,
            'image_ref': resource.image_href,
            'kernel_id': resource.kernel_id or '',
            'ramdisk_id': resource.ramdisk_id or '',
            'power_state': power_state.NOSTATE,
            'vm_state': vm_states.BUILDING,
            'config_drive_id': resource.config_drive_id or '',
            'config_drive': resource.config_drive or '',
            'user_id': context.user_id,
            'project_id': context.project_id,
            'launch_time': time.strftime('%Y-%m-%dT%H:%M:%SZ',
                time.gmtime()),
            'instance_type_id': resource.instance_type['id'],
            'memory_mb': resource.instance_type['memory_mb'],
            'vcpus': resource.instance_type['vcpus'],
            'root_gb': resource.instance_type['root_gb'],
            'ephemeral_gb': resource.instance_type['ephemeral_gb'],
            'display_name': resource.display_name,
            'display_description': resource.display_description,
            'user_data': resource.user_data,
            'key_name': resource.key_name,
            'key_data': resource.key_data,
            'locked': False,
            'metadata': resource.metadata,
            'access_ip_v4': resource.access_ip_v4,
            'access_ip_v6': resource.access_ip_v6,
            'availability_zone': resource.availability_zone,
            'root_device_name': resource.root_device_name,
            'progress': 0,
            'system_metadata': resource.system_metadata}

        options_from_image = self._inherit_properties_from_image(
                resource.image, resource.auto_disk_config)

        base_options.update(options_from_image)

        LOG.debug(_("Going to run %s instances..."), resource.num_instances)

        filter_properties = dict(scheduler_hints=resource.scheduler_hints)
        if resource.forced_host:
            _check_policy(context, 'create:forced_host', {})
            filter_properties['force_hosts'] = [resource.forced_host]

        resource.filter_properties = filter_properties

        # Create DB Entry for the instances and initiate a workflow request
        for i in xrange(resource.num_instances):
            options = base_options.copy()
            instance = self._create_db_entry_for_new_instance(context,
                                             resource.image,
                                             options,
                                             resource.security_group,
                                             resource.block_device_mapping,
                                             resource.num_instances, i)
            resource.instances.append(jsonutils.to_primitive(instance))

            # send a state update notification for the initial create to
            # show it going from non-existent to BUILDING
            notifications.send_update_with_states(context, instance, None,
                vm_states.BUILDING, None, None, service="api")

        # Commit the reservations
        QUOTAS.commit(context, resource.quota_reservations)

        # Record the starting of instances in the db
        for instance in resource.instances:
            self._record_action_start(context, instance,
                instance_actions.CREATE)

        return orc_utils.DictableObject(details='created_db_entry',
                                        resource=resource)
示例#21
0
class API(base.Base):
    """API for interacting with the neutron 2.x API."""

    conductor_api = conductor.API()
    security_group_api = openstack_driver.get_openstack_security_group_driver()

    def __init__(self):
        super(API, self).__init__()
        self.last_neutron_extension_sync = None
        self.extensions = {}

    def setup_networks_on_host(self, context, instance, host=None,
                               teardown=False):
        """Setup or teardown the network structures."""

    def _get_available_networks(self, context, project_id,
                                net_ids=None):
        """Return a network list available for the tenant.
        The list contains networks owned by the tenant and public networks.
        If net_ids specified, it searches networks with requested IDs only.
        """
        neutron = neutronv2.get_client(context)

        # If user has specified to attach instance only to specific
        # networks, add them to **search_opts
        # (1) Retrieve non-public network list owned by the tenant.
        search_opts = {"tenant_id": project_id, 'shared': False}
        if net_ids:
            search_opts['id'] = net_ids
        nets = neutron.list_networks(**search_opts).get('networks', [])
        # (2) Retrieve public network list.
        search_opts = {'shared': True}
        if net_ids:
            search_opts['id'] = net_ids
        nets += neutron.list_networks(**search_opts).get('networks', [])

        _ensure_requested_network_ordering(
            lambda x: x['id'],
            nets,
            net_ids)

        return nets

    @refresh_cache
    def allocate_for_instance(self, context, instance, **kwargs):
        """Allocate network resources for the instance.

        :param requested_networks: optional value containing
            network_id, fixed_ip, and port_id
        :param security_groups: security groups to allocate for instance
        :param macs: None or a set of MAC addresses that the instance
            should use. macs is supplied by the hypervisor driver (contrast
            with requested_networks which is user supplied).
            NB: NeutronV2 currently assigns hypervisor supplied MAC addresses
            to arbitrary networks, which requires openflow switches to
            function correctly if more than one network is being used with
            the bare metal hypervisor (which is the only one known to limit
            MAC addresses).
        """
        hypervisor_macs = kwargs.get('macs', None)
        available_macs = None
        if hypervisor_macs is not None:
            # Make a copy we can mutate: records macs that have not been used
            # to create a port on a network. If we find a mac with a
            # pre-allocated port we also remove it from this set.
            available_macs = set(hypervisor_macs)
        neutron = neutronv2.get_client(context)
        LOG.debug(_('allocate_for_instance() for %s'),
                  instance['display_name'])
        if not instance['project_id']:
            msg = _('empty project id for instance %s')
            raise exception.InvalidInput(
                reason=msg % instance['display_name'])
        requested_networks = kwargs.get('requested_networks')
        ports = {}
        fixed_ips = {}
        net_ids = []
        if requested_networks:
            for network_id, fixed_ip, port_id in requested_networks:
                if port_id:
                    port = neutron.show_port(port_id)['port']
                    if hypervisor_macs is not None:
                        if port['mac_address'] not in hypervisor_macs:
                            raise exception.PortNotUsable(port_id=port_id,
                                instance=instance['display_name'])
                        else:
                            # Don't try to use this MAC if we need to create a
                            # port on the fly later. Identical MACs may be
                            # configured by users into multiple ports so we
                            # discard rather than popping.
                            available_macs.discard(port['mac_address'])
                    network_id = port['network_id']
                    ports[network_id] = port
                elif fixed_ip and network_id:
                    fixed_ips[network_id] = fixed_ip
                if network_id:
                    net_ids.append(network_id)

        nets = self._get_available_networks(context, instance['project_id'],
                                            net_ids)

        if not nets:
            LOG.warn(_("No network configured!"), instance=instance)
            return []

        security_groups = kwargs.get('security_groups', [])
        security_group_ids = []

        # TODO(arosen) Should optimize more to do direct query for security
        # group if len(security_groups) == 1
        if len(security_groups):
            search_opts = {'tenant_id': instance['project_id']}
            user_security_groups = neutron.list_security_groups(
                **search_opts).get('security_groups')

        for security_group in security_groups:
            name_match = None
            uuid_match = None
            for user_security_group in user_security_groups:
                if user_security_group['name'] == security_group:
                    if name_match:
                        msg = (_("Multiple security groups found matching"
                                 " '%s'. Use an ID to be more specific."),
                                 security_group)
                        raise exception.NoUniqueMatch(msg)
                    name_match = user_security_group['id']
                if user_security_group['id'] == security_group:
                    uuid_match = user_security_group['id']

            # If a user names the security group the same as
            # another's security groups uuid, the name takes priority.
            if not name_match and not uuid_match:
                raise exception.SecurityGroupNotFound(
                    security_group_id=security_group)
            elif name_match:
                security_group_ids.append(name_match)
            elif uuid_match:
                security_group_ids.append(uuid_match)

        touched_port_ids = []
        created_port_ids = []
        for network in nets:
            # If security groups are requested on an instance then the
            # network must has a subnet associated with it. Some plugins
            # implement the port-security extension which requires
            # 'port_security_enabled' to be True for security groups.
            # That is why True is returned if 'port_security_enabled'
            # is not found.
            if (security_groups and not (
                    network['subnets']
                    and network.get('port_security_enabled', True))):

                raise exception.SecurityGroupCannotBeApplied()
            network_id = network['id']
            zone = 'compute:%s' % instance['availability_zone']
            port_req_body = {'port': {'device_id': instance['uuid'],
                                      'device_owner': zone}}
            try:
                port = ports.get(network_id)
                self._populate_neutron_extension_values(instance,
                                                        port_req_body)
                # Requires admin creds to set port bindings
                port_client = (neutron if not
                               self._has_port_binding_extension() else
                               neutronv2.get_client(context, admin=True))
                if port:
                    port_client.update_port(port['id'], port_req_body)
                    touched_port_ids.append(port['id'])
                else:
                    fixed_ip = fixed_ips.get(network_id)
                    if fixed_ip:
                        port_req_body['port']['fixed_ips'] = [{'ip_address':
                                                               fixed_ip}]
                    port_req_body['port']['network_id'] = network_id
                    port_req_body['port']['admin_state_up'] = True
                    port_req_body['port']['tenant_id'] = instance['project_id']
                    if security_group_ids:
                        port_req_body['port']['security_groups'] = (
                            security_group_ids)
                    if available_macs is not None:
                        if not available_macs:
                            raise exception.PortNotFree(
                                instance=instance['display_name'])
                        mac_address = available_macs.pop()
                        port_req_body['port']['mac_address'] = mac_address
                    created_port_ids.append(
                        port_client.create_port(port_req_body)['port']['id'])
            except Exception:
                with excutils.save_and_reraise_exception():
                    for port_id in touched_port_ids:
                        try:
                            port_req_body = {'port': {'device_id': None}}
                            # Requires admin creds to set port bindings
                            if self._has_port_binding_extension():
                                port_req_body['port']['binding:host_id'] = None
                                port_client = neutronv2.get_client(
                                    context, admin=True)
                            else:
                                port_client = neutron
                            port_client.update_port(port_id, port_req_body)
                        except Exception:
                            msg = _("Failed to update port %s")
                            LOG.exception(msg, port_id)

                    for port_id in created_port_ids:
                        try:
                            neutron.delete_port(port_id)
                        except Exception:
                            msg = _("Failed to delete port %s")
                            LOG.exception(msg, port_id)

        nw_info = self._get_instance_nw_info(context, instance, networks=nets)
        # NOTE(danms): Only return info about ports we created in this run.
        # In the initial allocation case, this will be everything we created,
        # and in later runs will only be what was created that time. Thus,
        # this only affects the attach case, not the original use for this
        # method.
        return network_model.NetworkInfo([port for port in nw_info
                                          if port['id'] in created_port_ids +
                                                           touched_port_ids])

    def _refresh_neutron_extensions_cache(self):
        """Refresh the neutron extensions cache when necessary."""
        if (not self.last_neutron_extension_sync or
            ((time.time() - self.last_neutron_extension_sync)
             >= CONF.neutron_extension_sync_interval)):
            neutron = neutronv2.get_client(context.get_admin_context())
            extensions_list = neutron.list_extensions()['extensions']
            self.last_neutron_extension_sync = time.time()
            self.extensions.clear()
            self.extensions = dict((ext['name'], ext)
                                   for ext in extensions_list)

    def _has_port_binding_extension(self, refresh_cache=False):
        if refresh_cache:
            self._refresh_neutron_extensions_cache()
        return constants.PORTBINDING_EXT in self.extensions

    def _populate_neutron_extension_values(self, instance, port_req_body):
        """Populate neutron extension values for the instance.

        If the extension contains nvp-qos then get the rxtx_factor.
        """
        self._refresh_neutron_extensions_cache()
        if 'nvp-qos' in self.extensions:
            instance_type = flavors.extract_flavor(instance)
            rxtx_factor = instance_type.get('rxtx_factor')
            port_req_body['port']['rxtx_factor'] = rxtx_factor
        if self._has_port_binding_extension():
            port_req_body['port']['binding:host_id'] = instance.get('host')

    def deallocate_for_instance(self, context, instance, **kwargs):
        """Deallocate all network resources related to the instance."""
        LOG.debug(_('deallocate_for_instance() for %s'),
                  instance['display_name'])
        search_opts = {'device_id': instance['uuid']}
        data = neutronv2.get_client(context).list_ports(**search_opts)
        ports = [port['id'] for port in data.get('ports', [])]

        requested_networks = kwargs.get('requested_networks') or {}
        ports_to_skip = [port_id for nets, fips, port_id in requested_networks]
        ports = set(ports) - set(ports_to_skip)

        for port in ports:
            try:
                neutronv2.get_client(context).delete_port(port)
            except Exception:
                LOG.exception(_("Failed to delete neutron port %(portid)s")
                              % {'portid': port})

    @refresh_cache
    def allocate_port_for_instance(self, context, instance, port_id,
                                   network_id=None, requested_ip=None,
                                   conductor_api=None):
        """Allocate a port for the instance."""
        return self.allocate_for_instance(context, instance,
                requested_networks=[(network_id, requested_ip, port_id)],
                conductor_api=conductor_api)

    @refresh_cache
    def deallocate_port_for_instance(self, context, instance, port_id,
                                     conductor_api=None):
        """Remove a specified port from the instance.

        Return network information for the instance
        """
        try:
            neutronv2.get_client(context).delete_port(port_id)
        except Exception as ex:
            LOG.exception(_("Failed to delete neutron port %s") %
                          port_id)

        return self._get_instance_nw_info(context, instance)

    def list_ports(self, context, **search_opts):
        """List ports for the client based on search options."""
        return neutronv2.get_client(context).list_ports(**search_opts)

    def show_port(self, context, port_id):
        """Return the port for the client given the port id."""
        return neutronv2.get_client(context).show_port(port_id)

    def get_instance_nw_info(self, context, instance, networks=None):
        """Return network information for specified instance
           and update cache.
        """
        result = self._get_instance_nw_info(context, instance, networks)
        update_instance_info_cache(self, context, instance, result,
                                   update_cells=False)
        return result

    def _get_instance_nw_info(self, context, instance, networks=None):
        LOG.debug(_('get_instance_nw_info() for %s'),
                  instance['display_name'])
        nw_info = self._build_network_info_model(context, instance, networks)
        return network_model.NetworkInfo.hydrate(nw_info)

    @refresh_cache
    def add_fixed_ip_to_instance(self, context, instance, network_id,
                                 conductor_api=None):
        """Add a fixed ip to the instance from specified network."""
        search_opts = {'network_id': network_id}
        data = neutronv2.get_client(context).list_subnets(**search_opts)
        ipam_subnets = data.get('subnets', [])
        if not ipam_subnets:
            raise exception.NetworkNotFoundForInstance(
                instance_id=instance['uuid'])

        zone = 'compute:%s' % instance['availability_zone']
        search_opts = {'device_id': instance['uuid'],
                       'device_owner': zone,
                       'network_id': network_id}
        data = neutronv2.get_client(context).list_ports(**search_opts)
        ports = data['ports']
        for p in ports:
            for subnet in ipam_subnets:
                fixed_ips = p['fixed_ips']
                fixed_ips.append({'subnet_id': subnet['id']})
                port_req_body = {'port': {'fixed_ips': fixed_ips}}
                try:
                    neutronv2.get_client(context).update_port(p['id'],
                                                              port_req_body)
                    return
                except Exception as ex:
                    msg = _("Unable to update port %(portid)s on subnet "
                            "%(subnet_id)s with failure: %(exception)s")
                    LOG.debug(msg, {'portid': p['id'],
                                    'subnet_id': subnet['id'],
                                    'exception': ex})

        raise exception.NetworkNotFoundForInstance(
                instance_id=instance['uuid'])

    @refresh_cache
    def remove_fixed_ip_from_instance(self, context, instance, address,
                                      conductor_api=None):
        """Remove a fixed ip from the instance."""
        zone = 'compute:%s' % instance['availability_zone']
        search_opts = {'device_id': instance['uuid'],
                       'device_owner': zone,
                       'fixed_ips': 'ip_address=%s' % address}
        data = neutronv2.get_client(context).list_ports(**search_opts)
        ports = data['ports']
        for p in ports:
            fixed_ips = p['fixed_ips']
            new_fixed_ips = []
            for fixed_ip in fixed_ips:
                if fixed_ip['ip_address'] != address:
                    new_fixed_ips.append(fixed_ip)
            port_req_body = {'port': {'fixed_ips': new_fixed_ips}}
            try:
                neutronv2.get_client(context).update_port(p['id'],
                                                          port_req_body)
            except Exception as ex:
                msg = _("Unable to update port %(portid)s with"
                        " failure: %(exception)s")
                LOG.debug(msg, {'portid': p['id'], 'exception': ex})
            return

        raise exception.FixedIpNotFoundForSpecificInstance(
                instance_uuid=instance['uuid'], ip=address)

    def validate_networks(self, context, requested_networks):
        """Validate that the tenant can use the requested networks."""
        LOG.debug(_('validate_networks() for %s'),
                  requested_networks)

        if not requested_networks:
            nets = self._get_available_networks(context, context.project_id)
            if len(nets) > 1:
                # Attaching to more than one network by default doesn't
                # make sense, as the order will be arbitrary and the guest OS
                # won't know which to configure
                msg = _("Multiple possible networks found, use a Network "
                         "ID to be more specific.")
                raise exception.NetworkAmbiguous(msg)
            return

        net_ids = []

        for (net_id, _i, port_id) in requested_networks:
            if port_id:
                try:
                    port = (neutronv2.get_client(context)
                                     .show_port(port_id)
                                     .get('port'))
                except neutronv2.exceptions.NeutronClientException as e:
                    if e.status_code == 404:
                        port = None
                if not port:
                    raise exception.PortNotFound(port_id=port_id)
                if port.get('device_id', None):
                    raise exception.PortInUse(port_id=port_id)
                net_id = port['network_id']
            if net_id in net_ids:
                raise exception.NetworkDuplicated(network_id=net_id)
            net_ids.append(net_id)

        # Now check to see if all requested networks exist
        nets = self._get_available_networks(context,
                                context.project_id, net_ids)

        if len(nets) != len(net_ids):
            requsted_netid_set = set(net_ids)
            returned_netid_set = set([net['id'] for net in nets])
            lostid_set = requsted_netid_set - returned_netid_set
            id_str = ''
            for _id in lostid_set:
                id_str = id_str and id_str + ', ' + _id or _id
            raise exception.NetworkNotFound(network_id=id_str)

    def _get_instance_uuids_by_ip(self, context, address):
        """Retrieve instance uuids associated with the given ip address.

        :returns: A list of dicts containing the uuids keyed by 'instance_uuid'
                  e.g. [{'instance_uuid': uuid}, ...]
        """
        search_opts = {"fixed_ips": 'ip_address=%s' % address}
        data = neutronv2.get_client(context).list_ports(**search_opts)
        ports = data.get('ports', [])
        return [{'instance_uuid': port['device_id']} for port in ports
                if port['device_id']]

    def get_instance_uuids_by_ip_filter(self, context, filters):
        """Return a list of dicts in the form of
        [{'instance_uuid': uuid}] that matched the ip filter.
        """
        # filters['ip'] is composed as '^%s$' % fixed_ip.replace('.', '\\.')
        ip = filters.get('ip')
        # we remove ^$\ in the ip filer
        if ip[0] == '^':
            ip = ip[1:]
        if ip[-1] == '$':
            ip = ip[:-1]
        ip = ip.replace('\\.', '.')
        return self._get_instance_uuids_by_ip(context, ip)

    def _get_port_id_by_fixed_address(self, client,
                                      instance, address):
        """Return port_id from a fixed address."""
        zone = 'compute:%s' % instance['availability_zone']
        search_opts = {'device_id': instance['uuid'],
                       'device_owner': zone}
        data = client.list_ports(**search_opts)
        ports = data['ports']
        port_id = None
        for p in ports:
            for ip in p['fixed_ips']:
                if ip['ip_address'] == address:
                    port_id = p['id']
                    break
        if not port_id:
            raise exception.FixedIpNotFoundForAddress(address=address)
        return port_id

    @refresh_cache
    def associate_floating_ip(self, context, instance,
                              floating_address, fixed_address,
                              affect_auto_assigned=False):
        """Associate a floating ip with a fixed ip."""

        # Note(amotoki): 'affect_auto_assigned' is not respected
        # since it is not used anywhere in nova code and I could
        # find why this parameter exists.

        client = neutronv2.get_client(context)
        port_id = self._get_port_id_by_fixed_address(client, instance,
                                                     fixed_address)
        fip = self._get_floating_ip_by_address(client, floating_address)
        param = {'port_id': port_id,
                 'fixed_ip_address': fixed_address}
        client.update_floatingip(fip['id'], {'floatingip': param})

        if fip['port_id']:
            port = client.show_port(fip['port_id'])['port']
            orig_instance_uuid = port['device_id']

            msg_dict = dict(address=floating_address,
                            instance_id=orig_instance_uuid)
            LOG.info(_('re-assign floating IP %(address)s from '
                       'instance %(instance_id)s') % msg_dict)
            orig_instance = self.db.instance_get_by_uuid(context,
                                                         orig_instance_uuid)

            # purge cached nw info for the original instance
            update_instance_info_cache(self, context, orig_instance)

    def get_all(self, context):
        """Get all networks for client."""
        client = neutronv2.get_client(context)
        networks = client.list_networks().get('networks')
        for network in networks:
            network['label'] = network['name']
        return networks

    def get(self, context, network_uuid):
        """Get specific network for client."""
        client = neutronv2.get_client(context)
        network = client.show_network(network_uuid).get('network') or {}
        network['label'] = network['name']
        return network

    def delete(self, context, network_uuid):
        """Delete a network for client."""
        raise NotImplementedError()

    def disassociate(self, context, network_uuid):
        """Disassociate a network for client."""
        raise NotImplementedError()

    def get_fixed_ip(self, context, id):
        """Get a fixed ip from the id."""
        raise NotImplementedError()

    def get_fixed_ip_by_address(self, context, address):
        """Return instance uuids given an address."""
        uuid_maps = self._get_instance_uuids_by_ip(context, address)
        if len(uuid_maps) == 1:
            return uuid_maps[0]
        elif not uuid_maps:
            raise exception.FixedIpNotFoundForAddress(address=address)
        else:
            raise exception.FixedIpAssociatedWithMultipleInstances(
                address=address)

    def _setup_net_dict(self, client, network_id):
        if not network_id:
            return {}
        pool = client.show_network(network_id)['network']
        return {pool['id']: pool}

    def _setup_port_dict(self, client, port_id):
        if not port_id:
            return {}
        port = client.show_port(port_id)['port']
        return {port['id']: port}

    def _setup_pools_dict(self, client):
        pools = self._get_floating_ip_pools(client)
        return dict([(i['id'], i) for i in pools])

    def _setup_ports_dict(self, client, project_id=None):
        search_opts = {'tenant_id': project_id} if project_id else {}
        ports = client.list_ports(**search_opts)['ports']
        return dict([(p['id'], p) for p in ports])

    def get_floating_ip(self, context, id):
        """Return floating ip object given the floating ip id."""
        client = neutronv2.get_client(context)
        try:
            fip = client.show_floatingip(id)['floatingip']
        except neutronv2.exceptions.NeutronClientException as e:
            if e.status_code == 404:
                raise exception.FloatingIpNotFound(id=id)
        pool_dict = self._setup_net_dict(client,
                                         fip['floating_network_id'])
        port_dict = self._setup_port_dict(client, fip['port_id'])
        return self._format_floating_ip_model(fip, pool_dict, port_dict)

    def _get_floating_ip_pools(self, client, project_id=None):
        search_opts = {constants.NET_EXTERNAL: True}
        if project_id:
            search_opts.update({'tenant_id': project_id})
        data = client.list_networks(**search_opts)
        return data['networks']

    def get_floating_ip_pools(self, context):
        """Return floating ip pools."""
        client = neutronv2.get_client(context)
        pools = self._get_floating_ip_pools(client)
        return [{'name': n['name'] or n['id']} for n in pools]

    def _format_floating_ip_model(self, fip, pool_dict, port_dict):
        pool = pool_dict[fip['floating_network_id']]
        result = {'id': fip['id'],
                  'address': fip['floating_ip_address'],
                  'pool': pool['name'] or pool['id'],
                  'project_id': fip['tenant_id'],
                  # In Neutron v2, an exact fixed_ip_id does not exist.
                  'fixed_ip_id': fip['port_id'],
                  }
        # In Neutron v2 API fixed_ip_address and instance uuid
        # (= device_id) are known here, so pass it as a result.
        result['fixed_ip'] = {'address': fip['fixed_ip_address']}
        if fip['port_id']:
            instance_uuid = port_dict[fip['port_id']]['device_id']
            result['instance'] = {'uuid': instance_uuid}
        else:
            result['instance'] = None
        return result

    def get_floating_ip_by_address(self, context, address):
        """Return a floating ip given an address."""
        client = neutronv2.get_client(context)
        fip = self._get_floating_ip_by_address(client, address)
        pool_dict = self._setup_net_dict(client,
                                         fip['floating_network_id'])
        port_dict = self._setup_port_dict(client, fip['port_id'])
        return self._format_floating_ip_model(fip, pool_dict, port_dict)

    def get_floating_ips_by_project(self, context):
        client = neutronv2.get_client(context)
        project_id = context.project_id
        fips = client.list_floatingips(tenant_id=project_id)['floatingips']
        pool_dict = self._setup_pools_dict(client)
        port_dict = self._setup_ports_dict(client, project_id)
        return [self._format_floating_ip_model(fip, pool_dict, port_dict)
                for fip in fips]

    def get_floating_ips_by_fixed_address(self, context, fixed_address):
        return []

    def get_instance_id_by_floating_address(self, context, address):
        """Return the instance id a floating ip's fixed ip is allocated to."""
        client = neutronv2.get_client(context)
        fip = self._get_floating_ip_by_address(client, address)
        if not fip['port_id']:
            return None
        port = client.show_port(fip['port_id'])['port']
        return port['device_id']

    def get_vifs_by_instance(self, context, instance):
        raise NotImplementedError()

    def get_vif_by_mac_address(self, context, mac_address):
        raise NotImplementedError()

    def _get_floating_ip_pool_id_by_name_or_id(self, client, name_or_id):
        search_opts = {constants.NET_EXTERNAL: True, 'fields': 'id'}
        if uuidutils.is_uuid_like(name_or_id):
            search_opts.update({'id': name_or_id})
        else:
            search_opts.update({'name': name_or_id})
        data = client.list_networks(**search_opts)
        nets = data['networks']

        if len(nets) == 1:
            return nets[0]['id']
        elif len(nets) == 0:
            raise exception.FloatingIpPoolNotFound()
        else:
            msg = (_("Multiple floating IP pools matches found for name '%s'")
                   % name_or_id)
            raise exception.NovaException(message=msg)

    def allocate_floating_ip(self, context, pool=None):
        """Add a floating ip to a project from a pool."""
        client = neutronv2.get_client(context)
        pool = pool or CONF.default_floating_pool
        pool_id = self._get_floating_ip_pool_id_by_name_or_id(client, pool)

        # TODO(amotoki): handle exception during create_floatingip()
        # At this timing it is ensured that a network for pool exists.
        # quota error may be returned.
        param = {'floatingip': {'floating_network_id': pool_id}}
        fip = client.create_floatingip(param)
        return fip['floatingip']['floating_ip_address']

    def _get_floating_ip_by_address(self, client, address):
        """Get floatingip from floating ip address."""
        if not address:
            raise exception.FloatingIpNotFoundForAddress(address=address)
        data = client.list_floatingips(floating_ip_address=address)
        fips = data['floatingips']
        if len(fips) == 0:
            raise exception.FloatingIpNotFoundForAddress(address=address)
        elif len(fips) > 1:
            raise exception.FloatingIpMultipleFoundForAddress(address=address)
        return fips[0]

    def _get_floating_ips_by_fixed_and_port(self, client, fixed_ip, port):
        """Get floatingips from fixed ip and port."""
        try:
            data = client.list_floatingips(fixed_ip_address=fixed_ip,
                                           port_id=port)
        # If a neutron plugin does not implement the L3 API a 404 from
        # list_floatingips will be raised.
        except neutronv2.exceptions.NeutronClientException as e:
            if e.status_code == 404:
                return []
            raise
        return data['floatingips']

    def release_floating_ip(self, context, address,
                            affect_auto_assigned=False):
        """Remove a floating ip with the given address from a project."""

        # Note(amotoki): We cannot handle a case where multiple pools
        # have overlapping IP address range. In this case we cannot use
        # 'address' as a unique key.
        # This is a limitation of the current nova.

        # Note(amotoki): 'affect_auto_assigned' is not respected
        # since it is not used anywhere in nova code and I could
        # find why this parameter exists.

        client = neutronv2.get_client(context)
        fip = self._get_floating_ip_by_address(client, address)
        if fip['port_id']:
            raise exception.FloatingIpAssociated(address=address)
        client.delete_floatingip(fip['id'])

    @refresh_cache
    def disassociate_floating_ip(self, context, instance, address,
                                 affect_auto_assigned=False):
        """Disassociate a floating ip from the instance."""

        # Note(amotoki): 'affect_auto_assigned' is not respected
        # since it is not used anywhere in nova code and I could
        # find why this parameter exists.

        client = neutronv2.get_client(context)
        fip = self._get_floating_ip_by_address(client, address)
        client.update_floatingip(fip['id'], {'floatingip': {'port_id': None}})

    def migrate_instance_start(self, context, instance, migration):
        """Start to migrate the network of an instance."""
        # NOTE(wenjianhn): just pass to make migrate instance doesn't
        # raise for now.
        pass

    def migrate_instance_finish(self, context, instance, migration):
        """Finish migrating the network of an instance."""
        if not self._has_port_binding_extension(refresh_cache=True):
            return
        neutron = neutronv2.get_client(context, admin=True)
        search_opts = {'device_id': instance['uuid'],
                       'tenant_id': instance['project_id']}
        data = neutron.list_ports(**search_opts)
        ports = data['ports']
        for p in ports:
            port_req_body = {'port': {'binding:host_id': instance.get('host')}}
            try:
                neutron.update_port(p['id'], port_req_body)
            except Exception as ex:
                with excutils.save_and_reraise_exception():
                    msg = _("Unable to update host of port %s")
                    LOG.exception(msg, p['id'])

    def add_network_to_project(self, context, project_id, network_uuid=None):
        """Force add a network to the project."""
        raise NotImplementedError()

    def _nw_info_get_ips(self, client, port):
        network_IPs = []
        for fixed_ip in port['fixed_ips']:
            fixed = network_model.FixedIP(address=fixed_ip['ip_address'])
            floats = self._get_floating_ips_by_fixed_and_port(
                client, fixed_ip['ip_address'], port['id'])
            for ip in floats:
                fip = network_model.IP(address=ip['floating_ip_address'],
                                       type='floating')
                fixed.add_floating_ip(fip)
            network_IPs.append(fixed)
        return network_IPs

    def _nw_info_get_subnets(self, context, port, network_IPs):
        subnets = self._get_subnets_from_port(context, port)
        for subnet in subnets:
            subnet['ips'] = [fixed_ip for fixed_ip in network_IPs
                             if fixed_ip.is_in_subnet(subnet)]
        return subnets

    def _nw_info_build_network(self, port, networks, subnets):
        # NOTE(danms): This loop can't fail to find a network since we
        # filtered ports to only the ones matching networks in our parent
        for net in networks:
            if port['network_id'] == net['id']:
                network_name = net['name']
                break

        bridge = None
        ovs_interfaceid = None
        # Network model metadata
        should_create_bridge = None
        vif_type = port.get('binding:vif_type')
        # TODO(berrange) Neutron should pass the bridge name
        # in another binding metadata field
        if vif_type == network_model.VIF_TYPE_OVS:
            bridge = CONF.neutron_ovs_bridge
            ovs_interfaceid = port['id']
        elif vif_type == network_model.VIF_TYPE_BRIDGE:
            bridge = "brq" + port['network_id']
            should_create_bridge = True

        if bridge is not None:
            bridge = bridge[:network_model.NIC_NAME_LEN]

        network = network_model.Network(
            id=port['network_id'],
            bridge=bridge,
            injected=CONF.flat_injected,
            label=network_name,
            tenant_id=net['tenant_id']
            )
        network['subnets'] = subnets
        if should_create_bridge is not None:
            network['should_create_bridge'] = should_create_bridge
        return network, ovs_interfaceid

    def _build_network_info_model(self, context, instance, networks=None):
        search_opts = {'tenant_id': instance['project_id'],
                       'device_id': instance['uuid'], }
        client = neutronv2.get_client(context, admin=True)
        data = client.list_ports(**search_opts)
        ports = data.get('ports', [])
        if networks is None:
            # retrieve networks from info_cache to get correct nic order
            network_cache = self.conductor_api.instance_get_by_uuid(
                context, instance['uuid'])['info_cache']['network_info']
            network_cache = jsonutils.loads(network_cache)
            net_ids = [iface['network']['id'] for iface in network_cache]
            networks = self._get_available_networks(context,
                                                    instance['project_id'])

        # ensure ports are in preferred network order, and filter out
        # those not attached to one of the provided list of networks
        else:
            net_ids = [n['id'] for n in networks]
        ports = [port for port in ports if port['network_id'] in net_ids]
        _ensure_requested_network_ordering(lambda x: x['network_id'],
                                           ports, net_ids)

        nw_info = network_model.NetworkInfo()
        for port in ports:
            network_IPs = self._nw_info_get_ips(client, port)
            subnets = self._nw_info_get_subnets(context, port, network_IPs)

            devname = "tap" + port['id']
            devname = devname[:network_model.NIC_NAME_LEN]

            network, ovs_interfaceid = self._nw_info_build_network(port,
                                                                   networks,
                                                                   subnets)

            nw_info.append(network_model.VIF(
                id=port['id'],
                address=port['mac_address'],
                network=network,
                type=port.get('binding:vif_type'),
                ovs_interfaceid=ovs_interfaceid,
                devname=devname))
        return nw_info

    def _get_subnets_from_port(self, context, port):
        """Return the subnets for a given port."""

        fixed_ips = port['fixed_ips']
        # No fixed_ips for the port means there is no subnet associated
        # with the network the port is created on.
        # Since list_subnets(id=[]) returns all subnets visible for the
        # current tenant, returned subnets may contain subnets which is not
        # related to the port. To avoid this, the method returns here.
        if not fixed_ips:
            return []
        search_opts = {'id': [ip['subnet_id'] for ip in fixed_ips]}
        data = neutronv2.get_client(context).list_subnets(**search_opts)
        ipam_subnets = data.get('subnets', [])
        subnets = []

        for subnet in ipam_subnets:
            subnet_dict = {'cidr': subnet['cidr'],
                           'gateway': network_model.IP(
                                address=subnet['gateway_ip'],
                                type='gateway'),
            }

            # attempt to populate DHCP server field
            search_opts = {'network_id': subnet['network_id'],
                           'device_owner': 'network:dhcp'}
            data = neutronv2.get_client(context).list_ports(**search_opts)
            dhcp_ports = data.get('ports', [])
            for p in dhcp_ports:
                for ip_pair in p['fixed_ips']:
                    if ip_pair['subnet_id'] == subnet['id']:
                        subnet_dict['dhcp_server'] = ip_pair['ip_address']
                        break

            subnet_object = network_model.Subnet(**subnet_dict)
            for dns in subnet.get('dns_nameservers', []):
                subnet_object.add_dns(
                    network_model.IP(address=dns, type='dns'))

            # TODO(gongysh) get the routes for this subnet
            subnets.append(subnet_object)
        return subnets

    def get_dns_domains(self, context):
        """Return a list of available dns domains.

        These can be used to create DNS entries for floating ips.
        """
        raise NotImplementedError()

    def add_dns_entry(self, context, address, name, dns_type, domain):
        """Create specified DNS entry for address."""
        raise NotImplementedError()

    def modify_dns_entry(self, context, name, address, domain):
        """Create specified DNS entry for address."""
        raise NotImplementedError()

    def delete_dns_entry(self, context, name, domain):
        """Delete the specified dns entry."""
        raise NotImplementedError()

    def delete_dns_domain(self, context, domain):
        """Delete the specified dns domain."""
        raise NotImplementedError()

    def get_dns_entries_by_address(self, context, address, domain):
        """Get entries for address and domain."""
        raise NotImplementedError()

    def get_dns_entries_by_name(self, context, name, domain):
        """Get entries for name and domain."""
        raise NotImplementedError()

    def create_private_dns_domain(self, context, domain, availability_zone):
        """Create a private DNS domain with nova availability zone."""
        raise NotImplementedError()

    def create_public_dns_domain(self, context, domain, project=None):
        """Create a private DNS domain with optional nova project."""
        raise NotImplementedError()
示例#22
0
 def __init__(self, *args, **kwargs):
     super(SecurityGroupsOutputController, self).__init__(*args, **kwargs)
     self.compute_api = compute.API()
     self.security_group_api = (
         openstack_driver.get_openstack_security_group_driver())
示例#23
0
 def __init__(self):
     self.security_group_api = (
         openstack_driver.get_openstack_security_group_driver(
             skip_policy_check=True))
     self.compute_api = compute.API(
         security_group_api=self.security_group_api, skip_policy_check=True)
示例#24
0
 def test_sg_custom(self):
     self.flags(security_group_api=
                'nova.tests.unit.network.test_config.FileATicket')
     driver = sgapi.get_openstack_security_group_driver()
     self.assertIsInstance(driver, FileATicket)
 def __init__(self):
     super(SecurityGroupDefaultRulesController, self).__init__()
     self.security_group_api = (
         openstack_driver.get_openstack_security_group_driver())
 def __init__(self):
     self.security_group_api = openstack_driver.get_openstack_security_group_driver(skip_policy_check=True)
示例#27
0
    def __init__(self, instance, address=None, content=None, extra_md=None,
                 network_info=None, vd_driver=None, network_metadata=None,
                 request_context=None):
        """Creation of this object should basically cover all time consuming
        collection.  Methods after that should not cause time delays due to
        network operations or lengthy cpu operations.

        The user should then get a single instance and make multiple method
        calls on it.
        """
        if not content:
            content = []

        ctxt = context.get_admin_context()

        # The default value of mimeType is set to MIME_TYPE_TEXT_PLAIN
        self.set_mimetype(MIME_TYPE_TEXT_PLAIN)
        self.instance = instance
        self.extra_md = extra_md

        self.availability_zone = az.get_instance_availability_zone(ctxt,
                                                                   instance)

        secgroup_api = openstack_driver.get_openstack_security_group_driver()
        self.security_groups = secgroup_api.get_instance_security_groups(
            ctxt, instance)

        self.mappings = _format_instance_mapping(ctxt, instance)

        if instance.user_data is not None:
            self.userdata_raw = base64.b64decode(instance.user_data)
        else:
            self.userdata_raw = None

        self.address = address

        # expose instance metadata.
        self.launch_metadata = utils.instance_meta(instance)

        self.password = password.extract_password(instance)

        self.uuid = instance.uuid

        self.content = {}
        self.files = []

        # get network info, and the rendered network template
        if network_info is None:
            network_info = instance.info_cache.network_info

        # expose network metadata
        if network_metadata is None:
            self.network_metadata = netutils.get_network_metadata(network_info)
        else:
            self.network_metadata = network_metadata

        self.ip_info = \
                ec2utils.get_ip_info_for_instance_from_nw_info(network_info)

        self.network_config = None
        cfg = netutils.get_injected_network_template(network_info)

        if cfg:
            key = "%04i" % len(self.content)
            self.content[key] = cfg
            self.network_config = {"name": "network_config",
                'content_path': "/%s/%s" % (CONTENT_DIR, key)}

        # 'content' is passed in from the configdrive code in
        # nova/virt/libvirt/driver.py.  That's how we get the injected files
        # (personalities) in. AFAIK they're not stored in the db at all,
        # so are not available later (web service metadata time).
        for (path, contents) in content:
            key = "%04i" % len(self.content)
            self.files.append({'path': path,
                'content_path': "/%s/%s" % (CONTENT_DIR, key)})
            self.content[key] = contents

        if vd_driver is None:
            vdclass = importutils.import_class(CONF.vendordata_driver)
        else:
            vdclass = vd_driver

        self.vddriver = vdclass(instance=instance, address=address,
                                extra_md=extra_md, network_info=network_info)

        self.route_configuration = None

        # NOTE(mikal): the decision to not pass extra_md here like we
        # do to the StaticJSON driver is deliberate. extra_md will
        # contain the admin password for the instance, and we shouldn't
        # pass that to external services.
        self.vendordata_providers = {
            'StaticJSON': vendordata_json.JsonFileVendorData(
                instance=instance, address=address,
                extra_md=extra_md, network_info=network_info),
            'DynamicJSON': vendordata_dynamic.DynamicVendorData(
                instance=instance, address=address,
                network_info=network_info, context=request_context)
        }
示例#28
0
 def __init__(self):
     self.security_group_api = (
         openstack_driver.get_openstack_security_group_driver())
     self.compute_api = compute.API(
         security_group_api=self.security_group_api)
示例#29
0
 def __init__(self):
     self.security_group_api = (
         openstack_driver.get_openstack_security_group_driver(
             skip_policy_check=True))
     self.compute_api = compute.API(
         security_group_api=self.security_group_api, skip_policy_check=True)
 def __init__(self):
     self.security_group_api = (
         openstack_driver.get_openstack_security_group_driver())
示例#31
0
 def test_sg_nova(self):
     self.flags(security_group_api='nova')
     driver = sgapi.get_openstack_security_group_driver()
     self.assertIsInstance(
         driver,
         nova.compute.api.SecurityGroupAPI)
示例#32
0
 def __init__(self):
     self.security_group_api = (
         openstack_driver.get_openstack_security_group_driver())
 def __init__(self):
     super(SecurityGroupControllerBase, self).__init__()
     self.security_group_api = (
         openstack_driver.get_openstack_security_group_driver())
     self.compute_api = compute.API(
         security_group_api=self.security_group_api)
示例#34
0
 def test_sg_neutron(self):
     self.flags(security_group_api='neutron')
     driver = sgapi.get_openstack_security_group_driver()
     self.assertIsInstance(
         driver,
         nova.network.security_group.neutron_driver.SecurityGroupAPI)
示例#35
0
 def test_sg_neutron(self):
     self.flags(security_group_api='neutron')
     driver = sgapi.get_openstack_security_group_driver()
     self.assertIsInstance(
         driver,
         nova.network.security_group.neutron_driver.SecurityGroupAPI)
示例#36
0
 def test_sg_custom(self):
     self.flags(
         security_group_api='nova.tests.unit.network.test_config.FileATicket'
     )
     driver = sgapi.get_openstack_security_group_driver()
     self.assertIsInstance(driver, FileATicket)
示例#37
0
 def test_sg_nova(self):
     self.flags(security_group_api='nova')
     driver = sgapi.get_openstack_security_group_driver()
     self.assertIsInstance(driver, nova.compute.api.SecurityGroupAPI)
示例#38
0
 def __init__(self, *args, **kwargs):
     super(API, self).__init__(*args, **kwargs)
     self._secgroup_service = openstack_driver.get_openstack_security_group_driver()
     network_api.API()._register_callback(base_api._callback_reasons.pre_delete, self.delete_network_firewalls)