예제 #1
0
def get_openstack_security_group_driver(skip_policy_check=False):
    if CONF.security_group_api.lower() == "patron":
        return importutils.import_object(NOVA_DRIVER, skip_policy_check=skip_policy_check)
    elif is_neutron_security_groups():
        return importutils.import_object(NEUTRON_DRIVER, skip_policy_check=skip_policy_check)
    else:
        return importutils.import_object(CONF.security_group_api, skip_policy_check=skip_policy_check)
예제 #2
0
def get_openstack_security_group_driver():
    if is_neutron_security_groups():
        return importutils.import_object(NEUTRON_DRIVER)
    elif CONF.security_group_api.lower() == 'nova':
        return importutils.import_object(NOVA_DRIVER)
    else:
        return importutils.import_object(CONF.security_group_api)
예제 #3
0
파일: api.py 프로젝트: lyarwood/nova
    def instance_for_format(image, mountdir, partition):
        """Get a Mount instance for the image type

        :param image: instance of nova.virt.image.model.Image
        :param mountdir: path to mount the image at
        :param partition: partition number to mount
        """
        LOG.debug(
            "Instance for format image=%(image)s " "mountdir=%(mountdir)s partition=%(partition)s",
            {"image": image, "mountdir": mountdir, "partition": partition},
        )

        if isinstance(image, imgmodel.LocalFileImage):
            if image.format == imgmodel.FORMAT_RAW:
                LOG.debug("Using LoopMount")
                return importutils.import_object("nova.virt.disk.mount.loop.LoopMount", image, mountdir, partition)
            else:
                LOG.debug("Using NbdMount")
                return importutils.import_object("nova.virt.disk.mount.nbd.NbdMount", image, mountdir, partition)
        elif isinstance(image, imgmodel.LocalBlockImage):
            LOG.debug("Using BlockMount")
            return importutils.import_object("nova.virt.disk.mount.block.BlockMount", image, mountdir, partition)
        else:
            # TODO(berrange) We could mount RBDImage directly
            # using kernel RBD block dev support.
            #
            # This is left as an enhancement for future
            # motivated developers todo, since raising
            # an exception is on par with what this
            # code did historically
            raise exception.UnsupportedImageModel(image.__class__.__name__)
예제 #4
0
파일: api.py 프로젝트: dtroyer/nova
    def instance_for_image(imgfile, imgfmt, partition):
        LOG.debug("Instance for image imgfile=%(imgfile)s "
                  "imgfmt=%(imgfmt)s partition=%(partition)s",
                  {'imgfile': imgfile, 'imgfmt': imgfmt,
                   'partition': partition})

        vfs = None
        try:
            LOG.debug("Using primary VFSGuestFS")
            vfs = importutils.import_object(
                "nova.virt.disk.vfs.guestfs.VFSGuestFS",
                imgfile, imgfmt, partition)
            if not VFS.guestfs_ready:
                # Inspect for capabilities and keep
                # track of the result only if succeeded.
                vfs.inspect_capabilities()
                VFS.guestfs_ready = True
            return vfs
        except exception.NovaException:
            if vfs is not None:
                # We are able to load libguestfs but
                # something wrong happens when trying to
                # check for capabilities.
                raise
            else:
                LOG.info(_LI("Unable to import guestfs, "
                             "falling back to VFSLocalFS"))

        return importutils.import_object(
            "nova.virt.disk.vfs.localfs.VFSLocalFS",
            imgfile, imgfmt, partition)
예제 #5
0
 def setUp(self):
     super(TestAutoScheduleSegments, self).setUp()
     self.plugin = importutils.import_object('neutron.plugins.ml2.plugin.'
                                             'Ml2Plugin')
     self.segments_plugin = importutils.import_object(
         'neutron.services.segments.plugin.Plugin')
     self.ctx = context.get_admin_context()
예제 #6
0
    def _init_bigip_managers(self):

        if self.conf.vlan_binding_driver:
            try:
                self.vlan_binding = importutils.import_object(
                    self.conf.vlan_binding_driver, self.conf, self)
            except ImportError:
                LOG.error('Failed to import VLAN binding driver: %s'
                          % self.conf.vlan_binding_driver)

        if self.conf.l3_binding_driver:
            try:
                self.l3_binding = importutils.import_object(
                    self.conf.l3_binding_driver, self.conf, self)
            except ImportError:
                LOG.error('Failed to import L3 binding driver: %s'
                          % self.conf.l3_binding_driver)
        else:
            LOG.debug('No L3 binding driver configured.'
                      ' No L3 binding will be done.')

        self.service_adapter = ServiceModelAdapter(self.conf)
        self.tenant_manager = BigipTenantManager(self.conf, self)
        self.cluster_manager = ClusterManager()
        self.system_helper = SystemHelper()
        self.lbaas_builder = LBaaSBuilder(self.conf, self)

        if self.conf.f5_global_routed_mode:
            self.network_builder = None
        else:
            self.network_builder = NetworkServiceBuilder(
                self.conf,
                self,
                self.conf.f5_global_routed_mode)
예제 #7
0
파일: api.py 프로젝트: Juniper/nova
    def instance_for_device(image, mountdir, partition, device):
        """Get a Mount instance for the device type

        :param image: instance of nova.virt.image.model.Image
        :param mountdir: path to mount the image at
        :param partition: partition number to mount
        :param device: mounted device path
        """

        LOG.debug("Instance for device image=%(image)s "
                  "mountdir=%(mountdir)s partition=%(partition)s "
                  "device=%(device)s",
                  {'image': image, 'mountdir': mountdir,
                   'partition': partition, 'device': device})

        if "loop" in device:
            LOG.debug("Using LoopMount")
            return importutils.import_object(
                "nova.virt.disk.mount.loop.LoopMount",
                image, mountdir, partition, device)
        elif "nbd" in device:
            LOG.debug("Using NbdMount")
            return importutils.import_object(
                "nova.virt.disk.mount.nbd.NbdMount",
                image, mountdir, partition, device)
        else:
            LOG.debug("Using BlockMount")
            return importutils.import_object(
                "nova.virt.disk.mount.block.BlockMount",
                image, mountdir, partition, device)
    def __init__(self, plugin=None, env=None):
        """Driver initialization."""
        if not plugin:
            LOG.error('Required LBaaS Driver and Core Driver Missing')
            sys.exit(1)

        self.plugin = plugin
        self.env = env

        self.loadbalancer = LoadBalancerManager(self)
        self.listener = ListenerManager(self)
        self.pool = PoolManager(self)
        self.member = MemberManager(self)
        self.healthmonitor = HealthMonitorManager(self)

        # what scheduler to use for pool selection
        self.scheduler = importutils.import_object(
            cfg.CONF.f5_loadbalancer_pool_scheduler_driver_v2)

        self.service_builder = importutils.import_object(
            cfg.CONF.f5_loadbalancer_service_builder_v2, self)

        self.agent_rpc = agent_rpc.LBaaSv2AgentRPC(self)
        self.plugin_rpc = plugin_rpc.LBaaSv2PluginCallbacksRPC(self)

        # add this agent RPC to the neutron agent scheduler
        # mixins agent_notifiers dictionary for it's env
        self.plugin.agent_notifiers.update(
            {q_const.AGENT_TYPE_LOADBALANCER: self.agent_rpc})
예제 #9
0
    def __init__(self):
        """Initialize Brocade Plugin.

        Specify switch address and db configuration.
        """

        super(BrocadePluginV2, self).__init__()
        self.supported_extension_aliases = ["binding", "security-group",
                                            "external-net", "router",
                                            "extraroute", "agent",
                                            "l3_agent_scheduler",
                                            "dhcp_agent_scheduler"]

        self.physical_interface = (cfg.CONF.PHYSICAL_INTERFACE.
                                   physical_interface)
        self.base_binding_dict = self._get_base_binding_dict()
        portbindings_base.register_port_dict_function()
        self.ctxt = n_context.get_admin_context()
        self._vlan_bitmap = vbm.VlanBitmap(self.ctxt)
        self._setup_rpc()
        self.network_scheduler = importutils.import_object(
            cfg.CONF.network_scheduler_driver
        )
        self.router_scheduler = importutils.import_object(
            cfg.CONF.router_scheduler_driver
        )
        self.brocade_init()
        self.start_periodic_dhcp_agent_status_check()
예제 #10
0
def load_auth_methods():
    global AUTH_PLUGINS_LOADED

    if AUTH_PLUGINS_LOADED:
        # Only try and load methods a single time.
        return
    # config.setup_authentication should be idempotent, call it to ensure we
    # have setup all the appropriate configuration options we may need.
    config.setup_authentication()
    for plugin in CONF.auth.methods:
        if '.' in plugin:
            # NOTE(morganfainberg): if '.' is in the plugin name, it should be
            # imported rather than used as a plugin identifier.
            plugin_class = plugin
            driver = importutils.import_object(plugin)
            if not hasattr(driver, 'method'):
                raise ValueError(_('Cannot load an auth-plugin by class-name '
                                   'without a "method" attribute defined: %s'),
                                 plugin_class)

            LOG.info(_LI('Loading auth-plugins by class-name is deprecated.'))
            plugin_name = driver.method
        else:
            plugin_name = plugin
            plugin_class = CONF.auth.get(plugin)
            driver = importutils.import_object(plugin_class)
        if plugin_name in AUTH_METHODS:
            raise ValueError(_('Auth plugin %(plugin)s is requesting '
                               'previously registered method %(method)s') %
                             {'plugin': plugin_class, 'method': driver.method})
        AUTH_METHODS[plugin_name] = driver
    AUTH_PLUGINS_LOADED = True
예제 #11
0
 def setUp(self):
     super(DHCPAgentWeightSchedulerTestCase, self).setUp()
     weight_scheduler = "neutron.scheduler.dhcp_agent_scheduler.WeightScheduler"
     cfg.CONF.set_override("network_scheduler_driver", weight_scheduler)
     self.plugin = importutils.import_object("neutron.plugins.ml2.plugin." "Ml2Plugin")
     mock.patch.object(
         self.plugin, "filter_hosts_with_network_access", side_effect=lambda context, network_id, hosts: hosts
     ).start()
     self.plugin.network_scheduler = importutils.import_object(weight_scheduler)
     cfg.CONF.set_override("dhcp_load_type", "networks")
     self.ctx = context.get_admin_context()
예제 #12
0
 def __init__(self):
     super(LocalManager, self).__init__()
     # NOTE(vish): setting the host to none ensures that the actual
     #             l3driver commands for l3 are done via rpc.
     self.host = None
     self.servicegroup_api = servicegroup.API()
     self.network_rpcapi = network_rpcapi.NetworkAPI()
     self.floating_dns_manager = importutils.import_object(
             CONF.floating_ip_dns_manager)
     self.instance_dns_manager = importutils.import_object(
             CONF.instance_dns_manager)
     self.notifier = rpc.get_notifier('network', CONF.host)
예제 #13
0
파일: api.py 프로젝트: dtroyer/nova
 def instance_for_format(imgfile, mountdir, partition, imgfmt):
     LOG.debug(
         "Instance for format imgfile=%(imgfile)s "
         "mountdir=%(mountdir)s partition=%(partition)s "
         "imgfmt=%(imgfmt)s",
         {"imgfile": imgfile, "mountdir": mountdir, "partition": partition, "imgfmt": imgfmt},
     )
     if imgfmt == "raw":
         LOG.debug("Using LoopMount")
         return importutils.import_object("nova.virt.disk.mount.loop.LoopMount", imgfile, mountdir, partition)
     else:
         LOG.debug("Using NbdMount")
         return importutils.import_object("nova.virt.disk.mount.nbd.NbdMount", imgfile, mountdir, partition)
    def get_connector(self, fabric, sb_connector):
        """Returns Device Connector.

        Factory method to create and return
        correct SB connector object based on the protocol
        """

        fabric_ip = fabric.safe_get('fc_fabric_address')
        client = self.sb_conn_map.get(fabric_ip)

        if not client:

            fabric_user = fabric.safe_get('fc_fabric_user')
            fabric_pwd = fabric.safe_get('fc_fabric_password')
            fabric_port = fabric.safe_get('fc_fabric_port')
            fc_vfid = fabric.safe_get('fc_virtual_fabric_id')
            fabric_ssh_cert_path = fabric.safe_get('fc_fabric_ssh_cert_path')

            LOG.debug("Client not found. Creating connection client for"
                      " %(ip)s with %(connector)s protocol "
                      "for the user %(user)s at port %(port)s.",
                      {'ip': fabric_ip,
                       'connector': sb_connector,
                       'user': fabric_user,
                       'port': fabric_port,
                       'vf_id': fc_vfid})

            if sb_connector.lower() in (fc_zone_constants.HTTP,
                                        fc_zone_constants.HTTPS):
                client = importutils.import_object(
                    "jacket.storage.zonemanager.drivers.brocade."
                    "brcd_http_fc_zone_client.BrcdHTTPFCZoneClient",
                    ipaddress=fabric_ip,
                    username=fabric_user,
                    password=fabric_pwd,
                    port=fabric_port,
                    vfid=fc_vfid,
                    protocol=sb_connector
                )
            else:
                client = importutils.import_object(
                    "jacket.storage.zonemanager.drivers.brocade."
                    "brcd_fc_zone_client_cli.BrcdFCZoneClientCLI",
                    ipaddress=fabric_ip,
                    username=fabric_user,
                    password=fabric_pwd,
                    key=fabric_ssh_cert_path,
                    port=fabric_port
                )
            self.sb_conn_map.update({fabric_ip: client})
        return client
 def setUp(self):
     super(DHCPAgentWeightSchedulerTestCase, self).setUp()
     DB_PLUGIN_KLASS = 'neutron.plugins.ml2.plugin.Ml2Plugin'
     self.setup_coreplugin(DB_PLUGIN_KLASS)
     cfg.CONF.set_override("network_scheduler_driver",
         'neutron.scheduler.dhcp_agent_scheduler.WeightScheduler')
     self.plugin = importutils.import_object('neutron.plugins.ml2.plugin.'
                                             'Ml2Plugin')
     self.assertEqual(1, self.patched_dhcp_periodic.call_count)
     self.plugin.network_scheduler = importutils.import_object(
         'neutron.scheduler.dhcp_agent_scheduler.WeightScheduler'
     )
     cfg.CONF.set_override('dhcp_agents_per_network', 1)
     cfg.CONF.set_override("dhcp_load_type", "networks")
 def __init__(self):
     self.setup_rpc()
     basepath = networking_cisco.plugins.__path__[0]
     ext_paths = [basepath + '/cisco/extensions']
     cp = cfg.CONF.api_extensions_path
     to_add = ""
     for ext_path in ext_paths:
         if cp.find(ext_path) == -1:
             to_add += ':' + ext_path
     if to_add != "":
         cfg.CONF.set_override('api_extensions_path', cp + to_add)
     self.router_scheduler = importutils.import_object(
         cfg.CONF.routing.router_type_aware_scheduler_driver)
     self.l3agent_scheduler = importutils.import_object(
         cfg.CONF.router_scheduler_driver)
 def setUp(self):
     super(DHCPAgentWeightSchedulerTestCase, self).setUp()
     weight_scheduler = (
         'neutron.scheduler.dhcp_agent_scheduler.WeightScheduler')
     cfg.CONF.set_override('network_scheduler_driver', weight_scheduler)
     self.plugin = self.driver
     mock.patch.object(
         self.plugin, 'filter_hosts_with_network_access',
         side_effect=lambda context, network_id, hosts: hosts).start()
     self.plugin.network_scheduler = importutils.import_object(
         weight_scheduler)
     cfg.CONF.set_override("dhcp_load_type", "networks")
     self.segments_plugin = importutils.import_object(
         'neutron.services.segments.plugin.Plugin')
     self.ctx = context.get_admin_context()
예제 #18
0
파일: api.py 프로젝트: dtroyer/nova
 def instance_for_device(imgfile, mountdir, partition, device):
     LOG.debug(
         "Instance for device imgfile=%(imgfile)s "
         "mountdir=%(mountdir)s partition=%(partition)s "
         "device=%(device)s",
         {"imgfile": imgfile, "mountdir": mountdir, "partition": partition, "device": device},
     )
     if "loop" in device:
         LOG.debug("Using LoopMount")
         return importutils.import_object(
             "nova.virt.disk.mount.loop.LoopMount", imgfile, mountdir, partition, device
         )
     else:
         LOG.debug("Using NbdMount")
         return importutils.import_object("nova.virt.disk.mount.nbd.NbdMount", imgfile, mountdir, partition, device)
예제 #19
0
def import_object(conf, driver_info, db):
    """Import a class and return an instance of it."""
    os.environ['LANG'] = 'C'
    cli = _DRIVERS.get('HORCM')
    return importutils.import_object(
        '.'.join([_DRIVER_DIR, cli[driver_info['proto']]]),
        conf, driver_info, db)
예제 #20
0
 def __init__(self, scheduler_driver=None, service_name=None,
              *args, **kwargs):
     if not scheduler_driver:
         scheduler_driver = CONF.scheduler_driver
     self.driver = importutils.import_object(scheduler_driver)
     super(SchedulerManager, self).__init__(*args, **kwargs)
     self._startup_delay = True
예제 #21
0
    def __init__(self, conf, f5_global_routed_mode):
        self.conf = conf
        self.f5_global_routed_mode = f5_global_routed_mode
        self.vlan_binding = None
        self.fdb_connector = None
        self.vcmp_manager = None
        self.interface_mapping = {}
        self.tagging_mapping = {}
        self.system_helper = SystemHelper()
        self.network_helper = NetworkHelper()
        self.service_adapter = ServiceModelAdapter(conf)

        if not f5_global_routed_mode:
            self.fdb_connector = FDBConnectorML2(conf)

        if self.conf.vlan_binding_driver:
            try:
                self.vlan_binding = importutils.import_object(
                    self.conf.vlan_binding_driver, self.conf, self)
            except ImportError:
                LOG.error('Failed to import VLAN binding driver: %s'
                          % self.conf.vlan_binding_driver)

        # map format is   phynet:interface:tagged
        for maps in self.conf.f5_external_physical_mappings:
            intmap = maps.split(':')
            net_key = str(intmap[0]).strip()
            if len(intmap) > 3:
                net_key = net_key + ':' + str(intmap[3]).strip()
            self.interface_mapping[net_key] = str(intmap[1]).strip()
            self.tagging_mapping[net_key] = str(intmap[2]).strip()
            LOG.debug('physical_network %s = interface %s, tagged %s'
                      % (net_key, intmap[1], intmap[2]))
예제 #22
0
    def test_compute_manager(self):
        was = {'called': False}

        def fake_get_all_by_filters(context, *args, **kwargs):
            was['called'] = True
            instances = []
            for x in xrange(2):
                instances.append(fake_instance.fake_db_instance(
                                                        image_ref='1',
                                                        uuid=x,
                                                        name=x,
                                                        vm_state='',
                                                        task_state=''))
            return instances

        with utils.tempdir() as tmpdir:
            self.flags(instances_path=tmpdir)

            self.stubs.Set(db, 'instance_get_all_by_filters',
                           fake_get_all_by_filters)
            compute = importutils.import_object(CONF.compute_manager)
            self.flags(use_local=True, group='conductor')
            compute.conductor_api = conductor.API()
            compute._run_image_cache_manager_pass(None)
            self.assertTrue(was['called'])
예제 #23
0
 def _setup_dhcp(self):
     """Initialize components to support DHCP."""
     if cfg.CONF.df.use_centralized_ipv6_DHCP:
         self.network_scheduler = importutils.import_object(
             cfg.CONF.network_scheduler_driver
         )
         self.start_periodic_dhcp_agent_status_check()
예제 #24
0
파일: manager.py 프로젝트: aawm/manila
    def __init__(self, share_driver=None, service_name=None, *args, **kwargs):
        """Load the driver from args, or from flags."""
        self.configuration = manila.share.configuration.Configuration(
            share_manager_opts,
            config_group=service_name)
        self._verify_unused_share_server_cleanup_interval()
        super(ShareManager, self).__init__(service_name='share',
                                           *args, **kwargs)

        if not share_driver:
            share_driver = self.configuration.share_driver
        if share_driver in MAPPING:
            msg_args = {'old': share_driver, 'new': MAPPING[share_driver]}
            LOG.warning(_LW("Driver path %(old)s is deprecated, update your "
                            "configuration to the new path %(new)s"),
                        msg_args)
            share_driver = MAPPING[share_driver]

        ctxt = context.get_admin_context()
        private_storage = drivers_private_data.DriverPrivateData(
            context=ctxt, backend_host=self.host,
            config_group=self.configuration.config_group
        )

        self.driver = importutils.import_object(
            share_driver, private_storage=private_storage,
            configuration=self.configuration
        )
 def setUp(self):
     super(TestBgpAgentFilter, self).setUp()
     self.bgp_drscheduler = importutils.import_object(
         'neutron_dynamic_routing.services.bgp.scheduler.'
         'bgp_dragent_scheduler.ChanceScheduler'
     )
     self.plugin = self
예제 #26
0
 def _setup_volume_drivers(self):
     if CONF.enabled_backends:
         for backend in CONF.enabled_backends:
             host = "%s@%s" % (CONF.host, backend)
             mgr = importutils.import_object(CONF.volume_manager, host=host, service_name=backend)
             config = mgr.configuration
             backend_name = config.safe_get("volume_backend_name")
             LOG.debug(
                 "Registering backend %(backend)s (host=%(host)s " "backend_name=%(backend_name)s).",
                 {"backend": backend, "host": host, "backend_name": backend_name},
             )
             self.volume_managers[backend] = mgr
     else:
         default = importutils.import_object(CONF.volume_manager)
         LOG.debug("Registering default backend %s.", default)
         self.volume_managers["default"] = default
예제 #27
0
    def setUp(self):
        flowclassifier_plugin = (
            test_flowclassifier_db.DB_FLOWCLASSIFIER_PLUGIN_CLASS)

        service_plugins = {
            flowclassifier.FLOW_CLASSIFIER_EXT: flowclassifier_plugin
        }
        fdb.FlowClassifierDbPlugin.supported_extension_aliases = [
            flowclassifier.FLOW_CLASSIFIER_EXT]
        fdb.FlowClassifierDbPlugin.path_prefix = (
            flowclassifier.FLOW_CLASSIFIER_PREFIX
        )
        super(OVSFlowClassifierDriverTestCase, self).setUp(
            ext_mgr=None,
            plugin=None,
            service_plugins=service_plugins
        )
        self.flowclassifier_plugin = importutils.import_object(
            flowclassifier_plugin)
        ext_mgr = api_ext.PluginAwareExtensionManager(
            test_flowclassifier_db.extensions_path,
            {
                flowclassifier.FLOW_CLASSIFIER_EXT: self.flowclassifier_plugin
            }
        )
        app = config.load_paste_app('extensions_test_app')
        self.ext_api = api_ext.ExtensionMiddleware(app, ext_mgr=ext_mgr)
        self.ctx = context.get_admin_context()
        self.driver = driver.OVSFlowClassifierDriver()
        self.driver.initialize()
 def _load_drivers(self):
     """Loads back end network provision driver from configuration."""
     driver_obj = self.conf.ml2_hpe.net_provisioning_driver
     if not driver_obj:
         raise SystemExit(_('A network provisioning driver'
                            'must be specified'))
     self.np_driver = importutils.import_object(driver_obj)
예제 #29
0
 def setUp(self):
     super(L2GWTestCase, self).setUp()
     self.ctx = context.get_admin_context()
     self.mixin = l2gateway_db.L2GatewayMixin()
     self.gw_resource = constants.L2_GATEWAYS
     self.con_resource = constants.CONNECTION_RESOURCE_NAME
     self.plugin = importutils.import_object(DB_PLUGIN_KLASS)
    def brocade_init(self):
        """Brocade specific initialization for this class."""

        osversion = None
        self._switch = {
            'address': cfg.CONF.ml2_brocade.address,
            'username': cfg.CONF.ml2_brocade.username,
            'password': cfg.CONF.ml2_brocade.password,
            'ostype': cfg.CONF.ml2_brocade.ostype,
            'osversion': cfg.CONF.ml2_brocade.osversion}

        self._driver = importutils.import_object(NOS_DRIVER)

        # Detect version of NOS on the switch
        osversion = self._switch['osversion']
        if osversion == "autodetect":
            osversion = self._driver.get_nos_version(
                self._switch['address'],
                self._switch['username'],
                self._switch['password'])

        virtual_fabric_enabled = self._driver.is_virtual_fabric_enabled(
            self._switch['address'],
            self._switch['username'],
            self._switch['password'])

        if virtual_fabric_enabled:
            LOG.debug("Virtual Fabric: enabled")
        else:
            LOG.debug("Virtual Fabric: not enabled")

        self.set_features_enabled(osversion, virtual_fabric_enabled)
        self._driver.close_session()
 def __init__(self):
     self.network_scheduler = importutils.import_object(
         cfg.CONF.network_scheduler_driver)
     super(TestAgentSchedCorePlugin, self).__init__()
예제 #32
0
 def __init__(self, console_driver=None, *args, **kwargs):
     self.driver = importutils.import_object(CONF.console_driver)
     self.compute_rpcapi = compute_rpcapi.ComputeAPI()
     super(ConsoleVMRCManager, self).__init__(*args, **kwargs)
예제 #33
0
 def _setup_dhcp(self):
     """Initialize components to support DHCP."""
     if cfg.CONF.df.use_centralized_ipv6_DHCP:
         self.network_scheduler = importutils.import_object(
             cfg.CONF.network_scheduler_driver)
         self.start_periodic_dhcp_agent_status_check()
 def setUp(self):
     super(PortToHAIPAddressBindingTestCase, self).setUp()
     self.plugin = importutils.import_object(DB_PLUGIN_KLASS)
     self.context = context.get_admin_context()
     self.net1_data = {
         'network': {
             'id': 'fake-net1-id',
             'name': 'net1',
             'admin_state_up': True,
             'tenant_id': 'test-tenant',
             'shared': False
         }
     }
     self.net2_data = {
         'network': {
             'id': 'fake-net2-id',
             'name': 'net2',
             'admin_state_up': True,
             'tenant_id': 'test-tenant',
             'shared': False
         }
     }
     self.port1_data = {
         'port': {
             'id': 'fake-port1-id',
             'name': 'port1',
             'network_id': 'fake-net1-id',
             'tenant_id': 'test-tenant',
             'device_id': 'fake_device',
             'device_owner': 'fake_owner',
             'fixed_ips': [],
             'mac_address': 'fake-mac',
             'admin_state_up': True
         }
     }
     # Port that is in the same network as port_1
     self.port1_2_data = {
         'port': {
             'id': 'fake-port1-2-id',
             'name': 'port1',
             'network_id': 'fake-net1-id',
             'tenant_id': 'test-tenant',
             'device_id': 'fake_device',
             'device_owner': 'fake_owner',
             'fixed_ips': [],
             'mac_address': 'fake-mac-2',
             'admin_state_up': True
         }
     }
     self.port2_data = {
         'port': {
             'id': 'fake-port2-id',
             'name': 'port2',
             'network_id': 'fake-net2-id',
             'tenant_id': 'test-tenant',
             'device_id': 'fake_device',
             'device_owner': 'fake_owner',
             'fixed_ips': [],
             'mac_address': 'fake-mac',
             'admin_state_up': True
         }
     }
     self.ha_ip1 = "ha-ip-1"
     self.ha_ip2 = "ha-ip-2"
     self.plugin.create_network(self.context, self.net1_data)
     self.plugin.create_network(self.context, self.net2_data)
     self.port1 = self.plugin.create_port(self.context, self.port1_data)
     self.port1_2 = self.plugin.create_port(self.context, self.port1_2_data)
     self.port2 = self.plugin.create_port(self.context, self.port2_data)
     self.port_haip = ha.PortForHAIPAddress()
예제 #35
0
 def test_backup_manager_driver_name(self):
     """"Test mapping between backup services and backup drivers."""
     self.override_config('backup_driver', "cinder.backup.services.swift")
     backup_mgr = \
         importutils.import_object(CONF.backup_manager)
     self.assertEqual('cinder.backup.drivers.swift', backup_mgr.driver_name)
예제 #36
0
def get_installer(conf, log):
    installer_class = _installer_name_class_mapping[conf.installer.type]
    return importutils.import_object(installer_class, conf, log)
 def setUp(self):
     super(TestBgpAgentFilter, self).setUp()
     self.bgp_drscheduler = importutils.import_object(
         'neutron_dynamic_routing.services.bgp.scheduler.'
         'bgp_dragent_scheduler.ChanceScheduler')
     self.plugin = self
예제 #38
0
 def setup_driver(self, config):
     self.driver = importutils.import_object(
         'cinder.zonemanager.drivers.cisco.cisco_fc_zone_driver'
         '.CiscoFCZoneDriver', configuration=config)
예제 #39
0
 def _load_using_import(plugin_name):
     return importutils.import_object(plugin_name)
예제 #40
0
 def __init__(self):
     transport = CONF.libvirt.remote_filesystem_transport
     cls_name = '.'.join([__name__, transport.capitalize()])
     cls_name += 'Driver'
     self.driver = importutils.import_object(cls_name)
예제 #41
0
    def __init__(self, host, conf=None):
        if conf:
            self.conf = conf
        else:
            self.conf = cfg.CONF
        self.router_info = {}

        self._check_config_params()

        self.process_monitor = external_process.ProcessMonitor(
            config=self.conf, resource_type='router')

        try:
            self.driver = importutils.import_object(self.conf.interface_driver,
                                                    self.conf)
        except Exception:
            LOG.error(_LE("Error importing interface driver "
                          "'%s'"), self.conf.interface_driver)
            raise SystemExit(1)

        self.context = n_context.get_admin_context_without_session()
        self.plugin_rpc = L3PluginApi(topics.L3PLUGIN, host)
        self.fullsync = True

        # Get the list of service plugins from Neutron Server
        # This is the first place where we contact neutron-server on startup
        # so retry in case its not ready to respond.
        retry_count = 5
        while True:
            retry_count = retry_count - 1
            try:
                self.neutron_service_plugins = (
                    self.plugin_rpc.get_service_plugin_list(self.context))
            except oslo_messaging.RemoteError as e:
                with excutils.save_and_reraise_exception() as ctx:
                    ctx.reraise = False
                    LOG.warning(
                        _LW('l3-agent cannot check service plugins '
                            'enabled at the neutron server when '
                            'startup due to RPC error. It happens '
                            'when the server does not support this '
                            'RPC API. If the error is '
                            'UnsupportedVersion you can ignore this '
                            'warning. Detail message: %s'), e)
                self.neutron_service_plugins = None
            except oslo_messaging.MessagingTimeout as e:
                with excutils.save_and_reraise_exception() as ctx:
                    if retry_count > 0:
                        ctx.reraise = False
                        LOG.warning(
                            _LW('l3-agent cannot check service '
                                'plugins enabled on the neutron '
                                'server. Retrying. '
                                'Detail message: %s'), e)
                        continue
            break

        self.namespaces_manager = namespace_manager.NamespaceManager(
            self.conf, self.driver, self.conf.use_namespaces)

        self._queue = queue.RouterProcessingQueue()
        super(L3NATAgent, self).__init__(conf=self.conf)

        self.target_ex_net_id = None
        self.use_ipv6 = ipv6_utils.is_enabled()

        if self.conf.enable_metadata_proxy:
            self.metadata_driver = metadata_driver.MetadataDriver(self)
예제 #42
0
 def setup_driver(self, config):
     self.driver = importutils.import_object(
         'cinder.zonemanager.drivers.brocade.brcd_fc_zone_driver'
         '.BrcdFCZoneDriver',
         configuration=config)
예제 #43
0
    def add_connection(self,
                       fabric,
                       initiator_target_map,
                       host_name=None,
                       storage_system=None):
        """Concrete implementation of add_connection.

        Based on zoning policy and state of each I-T pair, list of zone
        members are created and pushed to the fabric to add zones. The
        new zones created or zones updated are activated based on isActivate
        flag set in cinder.conf returned by volume driver after attach
        operation.

        :param fabric: Fabric name from cinder.conf file
        :param initiator_target_map: Mapping of initiator to list of targets
        """

        LOG.debug("Add connection for Fabric: %s", fabric)
        LOG.info("CiscoFCZoneDriver - Add connection "
                 "for I-T map: %s", initiator_target_map)
        fabric_ip = self.fabric_configs[fabric].safe_get(
            'cisco_fc_fabric_address')
        fabric_user = self.fabric_configs[fabric].safe_get(
            'cisco_fc_fabric_user')
        fabric_pwd = self.fabric_configs[fabric].safe_get(
            'cisco_fc_fabric_password')
        fabric_port = self.fabric_configs[fabric].safe_get(
            'cisco_fc_fabric_port')
        zoning_policy = self.configuration.zoning_policy
        zoning_policy_fab = self.fabric_configs[fabric].safe_get(
            'cisco_zoning_policy')
        if zoning_policy_fab:
            zoning_policy = zoning_policy_fab

        zoning_vsan = self.fabric_configs[fabric].safe_get('cisco_zoning_vsan')

        LOG.info("Zoning policy for Fabric %s", zoning_policy)

        statusmap_from_fabric = self.get_zoning_status(fabric_ip, fabric_user,
                                                       fabric_pwd, fabric_port,
                                                       zoning_vsan)

        if statusmap_from_fabric.get('session') == 'none':

            cfgmap_from_fabric = self.get_active_zone_set(
                fabric_ip, fabric_user, fabric_pwd, fabric_port, zoning_vsan)
            zone_names = []
            if cfgmap_from_fabric.get('zones'):
                zone_names = cfgmap_from_fabric['zones'].keys()
            # based on zoning policy, create zone member list and
            # push changes to fabric.
            for initiator_key in initiator_target_map.keys():
                zone_map = {}
                zone_update_map = {}
                initiator = initiator_key.lower()
                t_list = initiator_target_map[initiator_key]
                if zoning_policy == 'initiator-target':
                    for t in t_list:
                        target = t.lower()
                        zone_members = [
                            zm_utils.get_formatted_wwn(initiator),
                            zm_utils.get_formatted_wwn(target)
                        ]
                        zone_name = (driver_utils.get_friendly_zone_name(
                            zoning_policy, initiator, target, host_name,
                            storage_system,
                            self.configuration.cisco_zone_name_prefix,
                            SUPPORTED_CHARS))
                        if (len(cfgmap_from_fabric) == 0
                                or (zone_name not in zone_names)):
                            zone_map[zone_name] = zone_members
                        else:
                            # This is I-T zoning, skip if zone exists.
                            LOG.info(
                                "Zone exists in I-T mode. "
                                "Skipping zone creation %s", zone_name)
                elif zoning_policy == 'initiator':
                    zone_members = [zm_utils.get_formatted_wwn(initiator)]
                    for t in t_list:
                        target = t.lower()
                        zone_members.append(zm_utils.get_formatted_wwn(target))
                    zone_name = (driver_utils.get_friendly_zone_name(
                        zoning_policy, initiator, target, host_name,
                        storage_system,
                        self.configuration.cisco_zone_name_prefix,
                        SUPPORTED_CHARS))

                    # If zone exists, then perform a update_zone and add
                    # new members into existing zone.
                    if zone_name and (zone_name in zone_names):
                        zone_members = filter(
                            lambda x: x not in cfgmap_from_fabric['zones'][
                                zone_name], zone_members)
                        if zone_members:
                            zone_update_map[zone_name] = zone_members
                    else:
                        zone_map[zone_name] = zone_members
                else:
                    msg = _("Zoning Policy: %s, not"
                            " recognized") % zoning_policy
                    LOG.error(msg)
                    raise exception.FCZoneDriverException(msg)

            LOG.info("Zone map to add: %(zone_map)s", {'zone_map': zone_map})
            LOG.info("Zone map to update add: %(zone_update_map)s",
                     {'zone_update_map': zone_update_map})
            if zone_map or zone_update_map:
                conn = None
                try:
                    conn = importutils.import_object(
                        self.configuration.cisco_sb_connector,
                        ipaddress=fabric_ip,
                        username=fabric_user,
                        password=fabric_pwd,
                        port=fabric_port,
                        vsan=zoning_vsan)
                    if zone_map:
                        conn.add_zones(zone_map,
                                       self.configuration.cisco_zone_activate,
                                       zoning_vsan, cfgmap_from_fabric,
                                       statusmap_from_fabric)
                    if zone_update_map:
                        conn.update_zones(
                            zone_update_map,
                            self.configuration.cisco_zone_activate,
                            zoning_vsan, ZoneConstant.ZONE_ADD,
                            cfgmap_from_fabric, statusmap_from_fabric)
                    conn.cleanup()
                except exception.CiscoZoningCliException as cisco_ex:
                    msg = _("Exception: %s") % six.text_type(cisco_ex)
                    raise exception.FCZoneDriverException(msg)
                except Exception:
                    msg = _("Failed to add zoning configuration.")
                    LOG.exception(msg)
                    raise exception.FCZoneDriverException(msg)
                LOG.debug("Zones added successfully: %s", zone_map)
            else:
                LOG.debug("Zones already exist - Initiator Target Map: %s",
                          initiator_target_map)
        else:
            LOG.debug("Zoning session exists VSAN: %s", zoning_vsan)
예제 #44
0
def _setup_logging_from_conf(conf, project, version):
    log_root = getLogger(None).logger

    # Remove all handlers
    for handler in list(log_root.handlers):
        log_root.removeHandler(handler)
    logpath = _get_log_file_path(conf)
    if logpath:
        # On Windows, in-use files cannot be moved or deleted.
        if conf.watch_log_file and platform.system() == 'Linux':
            from oslo_log import watchers
            file_handler = watchers.FastWatchedFileHandler
            filelog = file_handler(logpath)
        elif conf.log_rotation_type.lower() == "interval":
            file_handler = logging.handlers.TimedRotatingFileHandler
            when = conf.log_rotate_interval_type.lower()
            interval_type = LOG_ROTATE_INTERVAL_MAPPING[when]
            # When weekday is configured, "when" has to be a value between
            # 'w0'-'w6' (w0 for Monday, w1 for Tuesday, and so on)'
            if interval_type == 'w':
                interval_type = interval_type + str(conf.log_rotate_interval)
            filelog = file_handler(logpath,
                                   when=interval_type,
                                   interval=conf.log_rotate_interval,
                                   backupCount=conf.max_logfile_count)
        elif conf.log_rotation_type.lower() == "size":
            file_handler = logging.handlers.RotatingFileHandler
            maxBytes = conf.max_logfile_size_mb * units.Mi
            filelog = file_handler(logpath,
                                   maxBytes=maxBytes,
                                   backupCount=conf.max_logfile_count)
        else:
            file_handler = logging.handlers.WatchedFileHandler
            filelog = file_handler(logpath)
        
        mode = int(CONF.logfile_mode, 8)
        st = os.stat(logpath)
        if st.st_mode != (stat.S_IFREG | mode):
            os.chmod(logpath, mode)

        log_root.addHandler(filelog)

    if conf.use_stderr:
        streamlog = handlers.ColorHandler()
        log_root.addHandler(streamlog)

    if conf.use_journal:
        journal = handlers.OSJournalHandler()
        log_root.addHandler(journal)

    if conf.use_eventlog:
        if platform.system() == 'Windows':
            eventlog = logging.handlers.NTEventLogHandler(project)
            log_root.addHandler(eventlog)
        else:
            raise RuntimeError(_("Windows Event Log is not available on this "
                                 "platform."))

    # if None of the above are True, then fall back to standard out
    if not logpath and not conf.use_stderr and not conf.use_journal:
        # pass sys.stdout as a positional argument
        # python2.6 calls the argument strm, in 2.7 it's stream
        streamlog = handlers.ColorHandler(sys.stdout)
        log_root.addHandler(streamlog)

    if conf.publish_errors:
        handler = importutils.import_object(
            "oslo_messaging.notify.log_handler.PublishErrorsHandler",
            logging.ERROR)
        log_root.addHandler(handler)

    if conf.use_syslog:
        global syslog
        if syslog is None:
            raise RuntimeError("syslog is not available on this platform")
        facility = _find_facility(conf.syslog_log_facility)
        syslog_handler = handlers.OSSysLogHandler(facility=facility)
        log_root.addHandler(syslog_handler)

    datefmt = conf.log_date_format
    if not conf.use_json:
        for handler in log_root.handlers:
            handler.setFormatter(formatters.ContextFormatter(project=project,
                                                             version=version,
                                                             datefmt=datefmt,
                                                             config=conf))
    else:
        for handler in log_root.handlers:
            handler.setFormatter(formatters.JSONFormatter(datefmt=datefmt))
    _refresh_root_level(conf.debug)

    for pair in conf.default_log_levels:
        mod, _sep, level_name = pair.partition('=')
        logger = logging.getLogger(mod)
        numeric_level = None
        try:
            # NOTE(harlowja): integer's are valid level names, and for some
            # libraries they have a lower level than DEBUG that is typically
            # defined at level 5, so to make that accessible, try to convert
            # this to a integer, and if not keep the original...
            numeric_level = int(level_name)
        except ValueError:  # nosec
            pass
        if numeric_level is not None:
            logger.setLevel(numeric_level)
        else:
            logger.setLevel(level_name)

    if conf.rate_limit_burst >= 1 and conf.rate_limit_interval >= 1:
        from oslo_log import rate_limit
        rate_limit.install_filter(conf.rate_limit_burst,
                                  conf.rate_limit_interval,
                                  conf.rate_limit_except)
예제 #45
0
 def __init__(self):
     self.host_manager = importutils.import_object(
         CONF.scheduler_host_manager)
     self.servicegroup_api = servicegroup.API()
예제 #46
0
 def __init__(self, app, controller):
     super(Requestify, self).__init__(app)
     self.controller = importutils.import_object(controller)
예제 #47
0
 def setUp(self):
     super(TestAutoScheduleSegments, self).setUp()
     self.plugin = self.driver
     self.segments_plugin = importutils.import_object(
         'neutron.services.segments.plugin.Plugin')
     self.ctx = context.get_admin_context()
def init_leases(network_id):
    """Get the list of hosts for a network."""
    ctxt = context.get_admin_context()
    network = objects.Network.get_by_id(ctxt, network_id)
    network_manager = importutils.import_object(CONF.network_manager)
    return network_manager.get_dhcp_leases(ctxt, network)
예제 #49
0
 def __init__(self):
     super(BgpPlugin, self).__init__()
     self.bgp_drscheduler = importutils.import_object(
         cfg.CONF.bgp_drscheduler_driver)
     self._setup_rpc()
     self._register_callbacks()
예제 #50
0
def API(*args, **kwargs):
    class_name = CONF.backup_api_class
    return importutils.import_object(class_name, *args, **kwargs)
예제 #51
0
def API(*args, **kwargs):
    class_name = _get_compute_api_class_name()
    return importutils.import_object(class_name, *args, **kwargs)
예제 #52
0
def _setup_logging_from_conf(project, version):
    log_root = getLogger(None).logger
    for handler in log_root.handlers:
        log_root.removeHandler(handler)

    logpath = _get_log_file_path()
    if logpath:
        filelog = logging.handlers.WatchedFileHandler(logpath)
        log_root.addHandler(filelog)

    if CONF.use_stderr:
        streamlog = ColorHandler()
        log_root.addHandler(streamlog)

    elif not logpath:
        # pass sys.stdout as a positional argument
        # python2.6 calls the argument strm, in 2.7 it's stream
        streamlog = logging.StreamHandler(sys.stdout)
        log_root.addHandler(streamlog)

    if CONF.publish_errors:
        try:
            handler = importutils.import_object(
                "openstack_dashboard.openstack.common.log_handler.PublishErrorsHandler",
                logging.ERROR)
        except ImportError:
            handler = importutils.import_object(
                "oslo.messaging.notify.log_handler.PublishErrorsHandler",
                logging.ERROR)
        log_root.addHandler(handler)

    datefmt = CONF.log_date_format
    for handler in log_root.handlers:
        # NOTE(alaski): CONF.log_format overrides everything currently.  This
        # should be deprecated in favor of context aware formatting.
        if CONF.log_format:
            handler.setFormatter(logging.Formatter(fmt=CONF.log_format,
                                                   datefmt=datefmt))
            log_root.info('Deprecated: log_format is now deprecated and will '
                          'be removed in the next release')
        else:
            handler.setFormatter(ContextFormatter(project=project,
                                                  version=version,
                                                  datefmt=datefmt))

    if CONF.debug:
        log_root.setLevel(logging.DEBUG)
    elif CONF.verbose:
        log_root.setLevel(logging.INFO)
    else:
        log_root.setLevel(logging.WARNING)

    for pair in CONF.default_log_levels:
        mod, _sep, level_name = pair.partition('=')
        logger = logging.getLogger(mod)
        # NOTE(AAzza) in python2.6 Logger.setLevel doesn't convert string name
        # to integer code.
        if sys.version_info < (2, 7):
            level = logging.getLevelName(level_name)
            logger.setLevel(level)
        else:
            logger.setLevel(level_name)

    if CONF.use_syslog:
        try:
            facility = _find_facility_from_conf()
            # TODO(bogdando) use the format provided by RFCSysLogHandler
            #   after existing syslog format deprecation in J
            if CONF.use_syslog_rfc_format:
                syslog = RFCSysLogHandler(facility=facility)
            else:
                syslog = logging.handlers.SysLogHandler(facility=facility)
            log_root.addHandler(syslog)
        except socket.error:
            log_root.error('Unable to add syslog handler. Verify that syslog '
                           'is running.')
예제 #53
0
파일: manager.py 프로젝트: QthCN/wsgi-basic
 def _load_using_import(driver_name, *args):
     return importutils.import_object(driver_name, *args)
예제 #54
0
 def _load_vif_driver_class(self):
     class_name = get_network_driver()
     self._vif_driver = importutils.import_object(class_name)