def instance_for_image(imgfile, imgfmt, partition): LOG.debug("Instance for image imgfile=%(imgfile)s " "imgfmt=%(imgfmt)s partition=%(partition)s", {'imgfile': imgfile, 'imgfmt': imgfmt, 'partition': partition}) vfs = None try: LOG.debug("Using primary VFSGuestFS") vfs = importutils.import_object( "nova.virt.disk.vfs.guestfs.VFSGuestFS", imgfile, imgfmt, partition) if not VFS.guestfs_ready: # Inspect for capabilities and keep # track of the result only if succeeded. vfs.inspect_capabilities() VFS.guestfs_ready = True return vfs except exception.NovaException: if vfs is not None: # We are able to load libguestfs but # something wrong happens when trying to # check for capabilities. raise else: LOG.info(_LI("Unable to import guestfs" "falling back to VFSLocalFS")) return importutils.import_object( "nova.virt.disk.vfs.localfs.VFSLocalFS", imgfile, imgfmt, partition)
def get_openstack_security_group_driver(): if CONF.security_group_api.lower() == 'nova': return importutils.import_object(NOVA_DRIVER) elif CONF.security_group_api.lower() in ('neutron', 'quantum'): return importutils.import_object(NEUTRON_DRIVER) else: return importutils.import_object(CONF.security_group_api)
def __init__(self, virtapi, read_only=False): super(BareMetalDriver, self).__init__(virtapi) self.driver = importutils.import_object( CONF.baremetal.driver, virtapi) self.vif_driver = importutils.import_object( CONF.baremetal.vif_driver) self.firewall_driver = firewall.load_driver( default=DEFAULT_FIREWALL_DRIVER) self.volume_driver = importutils.import_object( CONF.baremetal.volume_driver, virtapi) self.image_cache_manager = imagecache.ImageCacheManager() extra_specs = {} extra_specs["baremetal_driver"] = CONF.baremetal.driver for pair in CONF.baremetal.flavor_extra_specs: keyval = pair.split(':', 1) keyval[0] = keyval[0].strip() keyval[1] = keyval[1].strip() extra_specs[keyval[0]] = keyval[1] if 'cpu_arch' not in extra_specs: LOG.warning( _('cpu_arch is not found in flavor_extra_specs')) extra_specs['cpu_arch'] = '' self.extra_specs = extra_specs self.supported_instances = [ (extra_specs['cpu_arch'], 'baremetal', 'baremetal'), ]
def setUp(self): super(VolumeTestCase, self).setUp() self.compute = importutils.import_object(FLAGS.compute_manager) self.flags(connection_type='fake') self.volume = importutils.import_object(FLAGS.volume_manager) self.context = context.get_admin_context() self.instance_id = db.instance_create(self.context, {})['id']
def get_openstack_security_group_driver(): if CONF.security_group_api.lower() == 'nova': return importutils.import_object(NOVA_DRIVER) elif CONF.security_group_api.lower() == 'quantum': return importutils.import_object(QUANTUM_DRIVER) else: return importutils.import_object(CONF.security_group_api)
def __init__(self, virtapi, read_only=False): super(BareMetalDriver, self).__init__(virtapi) self.driver = importutils.import_object( CONF.baremetal.driver, virtapi) self.vif_driver = importutils.import_object( CONF.baremetal.vif_driver) self.firewall_driver = firewall.load_driver( default=DEFAULT_FIREWALL_DRIVER) self.volume_driver = importutils.import_object( CONF.baremetal.volume_driver, virtapi) self.image_cache_manager = imagecache.ImageCacheManager() extra_specs = {} extra_specs["baremetal_driver"] = CONF.baremetal.driver for pair in CONF.baremetal.flavor_extra_specs: keyval = pair.split(':', 1) keyval[0] = keyval[0].strip() keyval[1] = keyval[1].strip() extra_specs[keyval[0]] = keyval[1] self.extra_specs = extra_specs if 'cpu_arch' not in extra_specs: LOG.info( _('cpu_arch is not found in flavor_extra_specs')) self.supported_instances = [] else: self.supported_instances = [( arch.canonicalize(extra_specs['cpu_arch']), hvtype.BAREMETAL, vm_mode.HVM ), ]
def __init__(self, read_only=False): super(BareMetalDriver, self).__init__() self.baremetal_nodes = importutils.import_object( FLAGS.baremetal_driver) self._vif_driver = importutils.import_object( FLAGS.baremetal_vif_driver) self._firewall_driver = firewall.load_driver( default=DEFAULT_FIREWALL_DRIVER) self._volume_driver = importutils.import_object( FLAGS.baremetal_volume_driver) self._image_cache_manager = imagecache.ImageCacheManager() extra_specs = {} extra_specs["baremetal_driver"] = FLAGS.baremetal_driver for pair in FLAGS.instance_type_extra_specs: keyval = pair.split(':', 1) keyval[0] = keyval[0].strip() keyval[1] = keyval[1].strip() extra_specs[keyval[0]] = keyval[1] if not 'cpu_arch' in extra_specs: LOG.warning('cpu_arch is not found in instance_type_extra_specs') extra_specs['cpu_arch'] = '' self._extra_specs = extra_specs self._supported_instances = [ (extra_specs['cpu_arch'], 'baremetal', 'baremetal'), ]
def __init__(self): super(MultiScheduler, self).__init__() compute_driver = importutils.import_object( CONF.compute_scheduler_driver) default_driver = importutils.import_object( CONF.default_scheduler_driver) self.drivers = {'compute': compute_driver, 'default': default_driver}
def __init__(self): super(MultiScheduler, self).__init__() compute_driver = importutils.import_object( FLAGS.compute_scheduler_driver) volume_driver = importutils.import_object( FLAGS.volume_scheduler_driver) self.drivers = {'compute': compute_driver, 'volume': volume_driver}
def __init__(self): super(LocalManager, self).__init__() # NOTE(vish): setting the host to none ensures that the actual # l3driver commands for l3 are done via rpc. self.host = None self.servicegroup_api = servicegroup.API() self.network_rpcapi = network_rpcapi.NetworkAPI() self.floating_dns_manager = importutils.import_object(CONF.floating_ip_dns_manager) self.instance_dns_manager = importutils.import_object(CONF.instance_dns_manager) self.notifier = rpc.get_notifier("network", CONF.host)
def setUp(self): super(VolumeTestCase, self).setUp() self.compute = importutils.import_object(FLAGS.compute_manager) self.flags(compute_driver='nova.virt.fake.FakeDriver') self.stubs.Set(nova.flags.FLAGS, 'notification_driver', 'nova.notifier.test_notifier') self.volume = importutils.import_object(FLAGS.volume_manager) self.context = context.get_admin_context() instance = db.instance_create(self.context, {}) self.instance_id = instance['id'] self.instance_uuid = instance['uuid'] test_notifier.NOTIFICATIONS = []
def __init__(self, *args, **kwargs): self.compute_rpcapi = compute_rpcapi.ComputeAPI() self.scheduler_rpcapi = scheduler_rpcapi.SchedulerAPI() self.network_api = network.API() self.volume_api = volume.API() self.conductor_api = conductor.API() self.workflow_messaging_driver = importutils.import_object( CONF.orchestration.workflow_messaging_driver) self.backend_driver = importutils.import_object( CONF.orchestration.worfklow_persistent_backend_driver) self.queue = CONF.orchestration.zookeeper_queue_path super(OrchestrationManager, self).__init__(*args, **kwargs)
def instance_for_device(imgfile, mountdir, partition, device): LOG.debug(_("Instance for device imgfile=%(imgfile)s " "mountdir=%(mountdir)s partition=%(partition)s " "device=%(device)s") % locals()) if "loop" in device: LOG.debug(_("Using LoopMount")) return importutils.import_object( "nova.virt.disk.mount.loop.LoopMount", imgfile, mountdir, partition, device) else: LOG.debug(_("Using NbdMount")) return importutils.import_object( "nova.virt.disk.mount.nbd.NbdMount", imgfile, mountdir, partition, device)
def instance_for_format(imgfile, mountdir, partition, imgfmt): LOG.debug(_("Instance for format imgfile=%(imgfile)s " "mountdir=%(mountdir)s partition=%(partition)s " "imgfmt=%(imgfmt)s") % locals()) if imgfmt == "raw": LOG.debug(_("Using LoopMount")) return importutils.import_object( "nova.virt.disk.mount.loop.LoopMount", imgfile, mountdir, partition) else: LOG.debug(_("Using NbdMount")) return importutils.import_object( "nova.virt.disk.mount.nbd.NbdMount", imgfile, mountdir, partition)
def setUp(self): super(VolumeTestCase, self).setUp() self.compute = importutils.import_object(FLAGS.compute_manager) vol_tmpdir = tempfile.mkdtemp() self.flags(compute_driver='nova.virt.fake.FakeDriver', volumes_dir=vol_tmpdir, notification_driver=[test_notifier.__name__]) self.stubs.Set(iscsi.TgtAdm, '_get_target', self.fake_get_target) self.volume = importutils.import_object(FLAGS.volume_manager) self.context = context.get_admin_context() instance = db.instance_create(self.context, {}) self.instance_id = instance['id'] self.instance_uuid = instance['uuid'] test_notifier.NOTIFICATIONS = []
def test_compute_manager(self): was = {'called': False} def fake_get_all_by_filters(context, *args, **kwargs): was['called'] = True instances = [] for x in xrange(2): instances.append(fake_instance.fake_db_instance( image_ref='1', uuid=x, name=x, vm_state='', task_state='')) return instances with utils.tempdir() as tmpdir: self.flags(instances_path=tmpdir) self.stubs.Set(db, 'instance_get_all_by_filters', fake_get_all_by_filters) compute = importutils.import_object(CONF.compute_manager) self.flags(use_local=True, group='conductor') compute.conductor_api = conductor.API() compute._run_image_cache_manager_pass(None) self.assertTrue(was['called'])
def setUp(self): super(ServerActionsControllerTest, self).setUp() self.stubs.Set(db, "instance_get_by_uuid", fakes.fake_instance_get(vm_state=vm_states.ACTIVE, host="fake_host")) self.stubs.Set(db, "instance_update_and_get_original", instance_update_and_get_original) fakes.stub_out_glance(self.stubs) fakes.stub_out_nw_api(self.stubs) fakes.stub_out_compute_api_snapshot(self.stubs) fake.stub_out_image_service(self.stubs) service_class = "nova.image.glance.GlanceImageService" self.service = importutils.import_object(service_class) self.sent_to_glance = {} fakes.stub_out_glanceclient_create(self.stubs, self.sent_to_glance) self.flags(allow_instance_snapshots=True, enable_instance_password=True) self.uuid = FAKE_UUID self.url = "/v2/fake/servers/%s/action" % self.uuid self._image_href = "155d900f-4e14-4e4c-a73d-069cbf4541e6" class FakeExtManager(object): def is_loaded(self, ext): return False self.controller = servers.Controller(ext_mgr=FakeExtManager()) self.compute_api = self.controller.compute_api self.context = context.RequestContext("fake", "fake") self.app = fakes.wsgi_app(init_only=("servers",), fake_auth_context=self.context)
def instance_for_device(imgfile, mountdir, partition, device): LOG.debug("Instance for device imgfile=%(imgfile)s " "mountdir=%(mountdir)s partition=%(partition)s " "device=%(device)s", {'imgfile': imgfile, 'mountdir': mountdir, 'partition': partition, 'device': device}) if "loop" in device: LOG.debug("Using LoopMount") return importutils.import_object( "nova.virt.disk.mount.loop.LoopMount", imgfile, mountdir, partition, device) else: LOG.debug("Using NbdMount") return importutils.import_object( "nova.virt.disk.mount.nbd.NbdMount", imgfile, mountdir, partition, device)
def __init__(self, console_driver=None, *args, **kwargs): if not console_driver: console_driver = CONF.console_driver self.driver = importutils.import_object(console_driver) super(ConsoleProxyManager, self).__init__(*args, **kwargs) self.driver.host = self.host self.compute_rpcapi = compute_rpcapi.ComputeAPI()
def setUp(self): super(ServerActionsControllerTest, self).setUp() self.stubs.Set(nova.db, 'instance_get_by_uuid', fakes.fake_instance_get(vm_state=vm_states.ACTIVE, host='fake_host')) self.stubs.Set(nova.db, 'instance_update_and_get_original', instance_update) fakes.stub_out_glance(self.stubs) fakes.stub_out_nw_api(self.stubs) fakes.stub_out_rate_limiting(self.stubs) fakes.stub_out_compute_api_snapshot(self.stubs) nova.tests.image.fake.stub_out_image_service(self.stubs) service_class = 'nova.image.glance.GlanceImageService' self.service = importutils.import_object(service_class) self.sent_to_glance = {} fakes.stub_out_glanceclient_create(self.stubs, self.sent_to_glance) self.flags(allow_instance_snapshots=True, enable_instance_password=True) self.uuid = FAKE_UUID self.url = '/v2/fake/servers/%s/action' % self.uuid self._image_href = '155d900f-4e14-4e4c-a73d-069cbf4541e6' self.controller = servers.Controller()
def setUp(self): def fake_get_nw_info(cls, ctxt, instance): self.assertTrue(ctxt.is_admin) return fake_network.fake_get_instance_nw_info(self.stubs, 1, 1) super(UsageInfoTestCase, self).setUp() self.stubs.Set(network_api.API, 'get_instance_nw_info', fake_get_nw_info) fake_notifier.stub_notifier(self.stubs) self.addCleanup(fake_notifier.reset) self.flags(use_local=True, group='conductor') self.flags(compute_driver='nova.virt.fake.FakeDriver', network_manager='nova.network.manager.FlatManager') self.compute = importutils.import_object(CONF.compute_manager) self.user_id = 'fake' self.project_id = 'fake' self.context = context.RequestContext(self.user_id, self.project_id) def fake_show(meh, context, id): return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1}} self.stubs.Set(nova.tests.image.fake._FakeImageService, 'show', fake_show) fake_network.set_stub_network_methods(self.stubs) fake_instance_actions.stub_out_action_events(self.stubs)
def setUp(self): super(ServerActionsControllerTest, self).setUp() CONF.set_override('glance_host', 'localhost') self.stubs.Set(db, 'instance_get_by_uuid', fakes.fake_instance_get(vm_state=vm_states.ACTIVE, host='fake_host')) self.stubs.Set(db, 'instance_update_and_get_original', instance_update_and_get_original) fakes.stub_out_glance(self.stubs) fakes.stub_out_nw_api(self.stubs) fakes.stub_out_compute_api_snapshot(self.stubs) fake.stub_out_image_service(self.stubs) service_class = 'nova.image.glance.GlanceImageService' self.service = importutils.import_object(service_class) self.sent_to_glance = {} fakes.stub_out_glanceclient_create(self.stubs, self.sent_to_glance) self.flags(allow_instance_snapshots=True, enable_instance_password=True) self.uuid = FAKE_UUID self.url = '/servers/%s/action' % self.uuid self._image_href = '155d900f-4e14-4e4c-a73d-069cbf4541e6' ext_info = plugins.LoadedExtensionInfo() self.controller = servers.ServersController(extension_info=ext_info) self.compute_api = self.controller.compute_api self.context = context.RequestContext('fake', 'fake') self.app = fakes.wsgi_app_v3(init_only=('servers',), fake_auth_context=self.context)
def __init__(self, scheduler_driver=None, *args, **kwargs): if not scheduler_driver: scheduler_driver = CONF.scheduler_driver self.driver = importutils.import_object(scheduler_driver) self.compute_rpcapi = compute_rpcapi.ComputeAPI() super(SchedulerManager, self).__init__(service_name='scheduler', *args, **kwargs)
def setUp(self): super(MultiNodeComputeTestCase, self).setUp() self.flags(compute_driver='nova.virt.fake.FakeDriver') self.compute = importutils.import_object(CONF.compute_manager) self.flags(use_local=True, group='conductor') self.conductor = self.start_service('conductor', manager=CONF.conductor.manager)
def test_compute_manager(self): was = {'called': False} def fake_get_all(context, *args, **kwargs): was['called'] = True return [{'image_ref': '1', 'host': CONF.host, 'name': 'instance-1', 'uuid': '123', 'vm_state': '', 'task_state': ''}, {'image_ref': '1', 'host': CONF.host, 'name': 'instance-2', 'uuid': '456', 'vm_state': '', 'task_state': ''}] with utils.tempdir() as tmpdir: self.flags(instances_path=tmpdir) self.stubs.Set(db, 'instance_get_all', fake_get_all) compute = importutils.import_object(CONF.compute_manager) self.flags(use_local=True, group='conductor') compute.conductor_api = conductor.API() compute._run_image_cache_manager_pass(None) self.assertTrue(was['called'])
def __new__(cls, *args, **kwargs): '''Create an instance of the servicegroup API. args and kwargs are passed down to the servicegroup driver when it gets created. No args currently exist, though. Valid kwargs are: db_allowed - Boolean. False if direct db access is not allowed and alternative data access (conductor) should be used instead. ''' if not cls._driver: LOG.debug(_('ServiceGroup driver defined as an instance of %s'), str(CONF.servicegroup_driver)) driver_name = CONF.servicegroup_driver try: driver_class = cls._driver_name_class_mapping[driver_name] except KeyError: raise TypeError(_("unknown ServiceGroup driver name: %s") % driver_name) cls._driver = importutils.import_object(driver_class, *args, **kwargs) utils.check_isinstance(cls._driver, ServiceGroupDriver) # we don't have to check that cls._driver is not NONE, # check_isinstance does it return super(API, cls).__new__(cls)
def setUp(self): super(_VirtDriverTestCase, self).setUp() self.flags(instances_path=self.useFixture(fixtures.TempDir()).path) self.connection = importutils.import_object(self.driver_module, fake.FakeVirtAPI()) self.ctxt = test_utils.get_test_admin_context() self.image_service = fake_image.FakeImageService()
def __init__(self, *args, **kwargs): super(ConstraintSolverScheduler, self).__init__(*args, **kwargs) self.options = scheduler_options.SchedulerOptions() self.compute_rpcapi = compute_rpcapi.ComputeAPI() #The hosts solver to use self.hosts_solver = importutils.import_object( CONF.scheduler_host_solver)
def setUp(self): def fake_get_nw_info(cls, ctxt, instance): self.assertTrue(ctxt.is_admin) return fake_network.fake_get_instance_nw_info(self.stubs, 1, 1, spectacular=True) super(UsageInfoTestCase, self).setUp() self.stubs.Set(network_api.API, 'get_instance_nw_info', fake_get_nw_info) self.flags(compute_driver='nova.virt.fake.FakeDriver', notification_driver=[test_notifier.__name__], network_manager='nova.network.manager.FlatManager') self.compute = importutils.import_object(CONF.compute_manager) self.user_id = 'fake' self.project_id = 'fake' self.context = context.RequestContext(self.user_id, self.project_id) test_notifier.NOTIFICATIONS = [] def fake_show(meh, context, id): return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1}} self.stubs.Set(nova.tests.image.fake._FakeImageService, 'show', fake_show) fake_network.set_stub_network_methods(self.stubs)
def __init__(self, host, driver, nodename): self.host = host self.driver = driver self.nodename = nodename self.compute_node = None self.stats = importutils.import_object(FLAGS.compute_stats_class) self.tracked_instances = {}
def __init__(self, scheduler_driver=None, *args, **kwargs): if not scheduler_driver: scheduler_driver = CONF.scheduler_driver self.driver = importutils.import_object(scheduler_driver) self.compute_rpcapi = compute_rpcapi.ComputeAPI() super(SchedulerManager, self).__init__(service_name='scheduler', *args, **kwargs) self.additional_endpoints.append(_SchedulerManagerV3Proxy(self))
def __init__(self, host, driver, nodename): self.host = host self.driver = driver self.nodename = nodename self.compute_node = None self.stats = importutils.import_object(CONF.compute_stats_class) self.tracked_instances = {} self.tracked_migrations = {}
def setUp(self): super(ComputeXenTestCase, self).setUp() self.flags(compute_driver='xenapi.XenAPIDriver', xenapi_connection_url='test_url', xenapi_connection_password='******') stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests) self.compute = importutils.import_object(CONF.compute_manager)
def _load_vif_driver_class(self): try: class_name = self._vif_driver_class_map[CONF.network_api_class] self._vif_driver = importutils.import_object(class_name) except KeyError: raise TypeError( _("VIF driver not found for " "network_api_class: %s") % CONF.network_api_class)
def __init__(self, host, driver): self.host = host self.driver = driver self.compute_node = None self.next_claim_id = 1 self.claims = {} self.stats = importutils.import_object(FLAGS.compute_stats_class) self.tracked_instances = {}
def setUp(self): super(_VirtDriverTestCase, self).setUp() self.flags(instances_path=self.useFixture(fixtures.TempDir()).path) self.connection = importutils.import_object(self.driver_module, fake.FakeVirtAPI()) self.ctxt = test_utils.get_test_admin_context() self.image_service = fake_image.FakeImageService()
def __init__(self, console_driver=None, *args, **kwargs): if not console_driver: console_driver = CONF.console_driver self.driver = importutils.import_object(console_driver) super(ConsoleProxyManager, self).__init__(service_name='console', *args, **kwargs) self.driver.host = self.host self.compute_rpcapi = compute_rpcapi.ComputeAPI()
def _driver(self): if self.__driver: return self.__driver if not self._driver_cls: self._driver_cls = CONF.quota_driver if isinstance(self._driver_cls, basestring): self._driver_cls = importutils.import_object(self._driver_cls) self.__driver = self._driver_cls return self.__driver
def setUp(self): super(ComputeManagerBuildInstanceTestCase, self).setUp() self.compute = importutils.import_object(CONF.compute_manager) self.context = context.RequestContext('fake', 'fake') self.instance = fake_instance.fake_db_instance( vm_state=vm_states.ACTIVE) self.admin_pass = '******' self.injected_files = [] self.image = {}
def add_lease(mac, ip_address): """Set the IP that was assigned by the DHCP server.""" if CONF.fake_rabbit: LOG.debug(_("leasing ip")) network_manager = importutils.import_object(CONF.network_manager) network_manager.lease_fixed_ip(context.get_admin_context(), ip_address) else: api = network_rpcapi.NetworkAPI() api.lease_fixed_ip(context.get_admin_context(), ip_address, CONF.host)
def setUp(self): super(ConsoleTestCase, self).setUp() self.flags(console_driver='nova.console.fake.FakeConsoleProxy', stub_compute=True) self.console = importutils.import_object(CONF.console_manager) self.user_id = 'fake' self.project_id = 'fake' self.context = context.RequestContext(self.user_id, self.project_id) self.host = 'test_compute_host'
def InstanceActionAPI(*args, **kwargs): """Returns the 'InstanceActionAPI' class from the same module as the configured compute api. """ importutils = nova.openstack.common.importutils compute_api_class_name = _get_compute_api_class_name() compute_api_class = importutils.import_class(compute_api_class_name) class_name = compute_api_class.__module__ + ".InstanceActionAPI" return importutils.import_object(class_name, *args, **kwargs)
def _setup_logging_from_conf(): log_root = getLogger(None).logger for handler in log_root.handlers: log_root.removeHandler(handler) if CONF.use_syslog: facility = _find_facility_from_conf() syslog = logging.handlers.SysLogHandler(address='/dev/log', facility=facility) log_root.addHandler(syslog) logpath = _get_log_file_path() if logpath: filelog = logging.handlers.WatchedFileHandler(logpath) log_root.addHandler(filelog) if CONF.use_stderr: streamlog = ColorHandler() log_root.addHandler(streamlog) elif not CONF.log_file: # pass sys.stdout as a positional argument # python2.6 calls the argument strm, in 2.7 it's stream streamlog = logging.StreamHandler(sys.stdout) log_root.addHandler(streamlog) if CONF.publish_errors: handler = importutils.import_object( "nova.openstack.common.log_handler.PublishErrorsHandler", logging.ERROR) log_root.addHandler(handler) datefmt = CONF.log_date_format for handler in log_root.handlers: # NOTE(alaski): CONF.log_format overrides everything currently. This # should be deprecated in favor of context aware formatting. if CONF.log_format: handler.setFormatter(logging.Formatter(fmt=CONF.log_format, datefmt=datefmt)) log_root.info('Deprecated: log_format is now deprecated and will ' 'be removed in the next release') else: handler.setFormatter(ContextFormatter(datefmt=datefmt)) if CONF.debug: log_root.setLevel(logging.DEBUG) elif CONF.verbose: log_root.setLevel(logging.INFO) else: log_root.setLevel(logging.WARNING) for pair in CONF.default_log_levels: mod, _sep, level_name = pair.partition('=') level = logging.getLevelName(level_name) logger = logging.getLogger(mod) logger.setLevel(level)
def HostAPI(*args, **kwargs): """ Returns the 'HostAPI' class from the same module as the configured compute api """ importutils = nova.openstack.common.importutils compute_api_class_name = oslo.config.cfg.CONF.compute_api_class compute_api_class = importutils.import_class(compute_api_class_name) class_name = compute_api_class.__module__ + ".HostAPI" return importutils.import_object(class_name, *args, **kwargs)
def __init__(self, host, driver, nodename): self.host = host self.driver = driver self.pci_tracker = pci_manager.PciDevTracker() self.nodename = nodename self.compute_node = None self.stats = importutils.import_object(CONF.compute_stats_class) self.tracked_instances = {} self.tracked_migrations = {} self.conductor_api = conductor.API()
def setUp(self): super(ComputeXenTestCase, self).setUp() self.flags(compute_driver='xenapi.XenAPIDriver') self.flags(connection_url='test_url', connection_password='******', group='xenserver') stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests) self.compute = importutils.import_object(CONF.compute_manager) # execute power syncing synchronously for testing: self.compute._sync_power_pool = eventlet_utils.SyncPool()
def __init__(self, volume_driver=None, *args, **kwargs): """Load the driver from the one specified in args, or from flags.""" if not volume_driver: volume_driver = FLAGS.volume_driver self.driver = importutils.import_object(volume_driver) super(VolumeManager, self).__init__(service_name='volume', *args, **kwargs) # NOTE(vish): Implementation specific db handling is done # by the driver. self.driver.db = self.db self._last_volume_stats = []
def del_lease(mac, ip_address): """Called when a lease expires.""" if CONF.fake_rabbit: LOG.debug(_("releasing ip")) network_manager = importutils.import_object(CONF.network_manager) network_manager.release_fixed_ip(context.get_admin_context(), ip_address) else: api = network_rpcapi.NetworkAPI() api.release_fixed_ip(context.get_admin_context(), ip_address, CONF.host)
def __init__(self, driver=None, *args, **kwargs): """Inits the driver from parameter or flag __init__ is run every time AuthManager() is called, so we only reset the driver if it is not set or a new driver is specified. """ self.network_manager = importutils.import_object(FLAGS.network_manager) if driver or not getattr(self, 'driver', None): self.driver = importutils.import_class(driver or FLAGS.auth_driver) if AuthManager.mc is None: AuthManager.mc = memcache.Client(FLAGS.memcached_servers, debug=0)
def instance_for_device(imgfile, mountdir, partition, device): LOG.debug( _("Instance for device imgfile=%(imgfile)s " "mountdir=%(mountdir)s partition=%(partition)s " "device=%(device)s"), { 'imgfile': imgfile, 'mountdir': mountdir, 'partition': partition, 'device': device }) if "loop" in device: LOG.debug(_("Using LoopMount")) return importutils.import_object( "nova.virt.disk.mount.loop.LoopMount", imgfile, mountdir, partition, device) else: LOG.debug(_("Using NbdMount")) return importutils.import_object( "nova.virt.disk.mount.nbd.NbdMount", imgfile, mountdir, partition, device)
def instance_for_format(imgfile, mountdir, partition, imgfmt): LOG.debug( _("Instance for format imgfile=%(imgfile)s " "mountdir=%(mountdir)s partition=%(partition)s " "imgfmt=%(imgfmt)s"), { 'imgfile': imgfile, 'mountdir': mountdir, 'partition': partition, 'imgfmt': imgfmt }) if imgfmt == "raw": LOG.debug(_("Using LoopMount")) return importutils.import_object( "nova.virt.disk.mount.loop.LoopMount", imgfile, mountdir, partition) else: LOG.debug(_("Using NbdMount")) return importutils.import_object( "nova.virt.disk.mount.nbd.NbdMount", imgfile, mountdir, partition)
def __init__(self, quota_driver_class=None): """Initialize a Quota object.""" if not quota_driver_class: quota_driver_class = FLAGS.quota_driver if isinstance(quota_driver_class, basestring): quota_driver_class = importutils.import_object(quota_driver_class) self._resources = {} self._driver = quota_driver_class
def __init__(self, host, driver, nodename): self.host = host self.driver = driver self.pci_tracker = None self.nodename = nodename self.compute_node = None self.stats = importutils.import_object(CONF.compute_stats_class) self.tracked_instances = {} self.tracked_migrations = {} self.conductor_api = conductor.API() monitor_handler = monitors.ResourceMonitorHandler() self.monitors = monitor_handler.choose_monitors(self)
def _get_matchmaker(*args, **kwargs): global matchmaker if not matchmaker: mm = CONF.rpc_zmq_matchmaker if mm.endswith('matchmaker.MatchMakerRing'): mm.replace('matchmaker', 'matchmaker_ring') LOG.warn( _('rpc_zmq_matchmaker = %(orig)s is deprecated; use' ' %(new)s instead') % dict(orig=CONF.rpc_zmq_matchmaker, new=mm)) matchmaker = importutils.import_object(mm, *args, **kwargs) return matchmaker
def create(self, label=None, cidr=None, num_networks=None, network_size=None, multi_host=None, vlan_start=None, vpn_start=None, cidr_v6=None, gateway=None, gateway_v6=None, bridge=None, bridge_interface=None, dns1=None, dns2=None, project_id=None, priority=None, uuid=None, fixed_cidr=None): """Creates fixed ips for host by range.""" kwargs = dict(((k, v) for k, v in locals().iteritems() if v and k != "self")) if multi_host is not None: kwargs['multi_host'] = multi_host == 'T' net_manager = importutils.import_object(CONF.network_manager) net_manager.create_networks(context.get_admin_context(), **kwargs)
def instance_for_image(imgfile, imgfmt, partition): LOG.debug(_("Instance for image imgfile=%(imgfile)s " "imgfmt=%(imgfmt)s partition=%(partition)s") % locals()) hasGuestfs = False try: LOG.debug(_("Trying to import guestfs")) importutils.import_module("guestfs") hasGuestfs = True except Exception: pass if hasGuestfs: LOG.debug(_("Using primary VFSGuestFS")) return importutils.import_object( "nova.virt.disk.vfs.guestfs.VFSGuestFS", imgfile, imgfmt, partition) else: LOG.debug(_("Falling back to VFSLocalFS")) return importutils.import_object( "nova.virt.disk.vfs.localfs.VFSLocalFS", imgfile, imgfmt, partition)
def setUp(self): super(UsageInfoTestCase, self).setUp() self.flags(compute_driver='nova.virt.fake.FakeDriver', host='fake', notification_driver=[test_notifier.__name__]) fake_network.set_stub_network_methods(self.stubs) self.volume = importutils.import_object(FLAGS.volume_manager) self.user_id = 'fake' self.project_id = 'fake' self.snapshot_id = 'fake' self.volume_size = 0 self.context = context.RequestContext(self.user_id, self.project_id) test_notifier.NOTIFICATIONS = []
def setUp(self): super(UsageInfoTestCase, self).setUp() self.flags(connection_type='fake', stub_network=True, host='fake') self.stubs.Set(nova.flags.FLAGS, 'notification_driver', 'nova.notifier.test_notifier') self.volume = importutils.import_object(FLAGS.volume_manager) self.user_id = 'fake' self.project_id = 'fake' self.snapshot_id = 'fake' self.volume_size = 0 self.context = context.RequestContext(self.user_id, self.project_id) test_notifier.NOTIFICATIONS = []
def setUp(self): super(_VirtDriverTestCase, self).setUp() self.flags(instances_path=self.useFixture(fixtures.TempDir()).path) self.connection = importutils.import_object(self.driver_module, fake.FakeVirtAPI()) self.ctxt = test_utils.get_test_admin_context() self.image_service = fake_image.FakeImageService() # NOTE(dripton): resolve_driver_format does some file reading and # writing and chowning that complicate testing too much by requiring # using real directories with proper permissions. Just stub it out # here; we test it in test_imagebackend.py self.stubs.Set(imagebackend.Image, 'resolve_driver_format', imagebackend.Image._get_driver_format)
def setUp(self): super(UsageInfoTestCase, self).setUp() self.flags(compute_driver='nova.virt.fake.FakeDriver', stub_network=True, host='fake') self.stubs.Set(flags.FLAGS, 'notification_driver', ['nova.openstack.common.notifier.test_notifier']) self.volume = importutils.import_object(FLAGS.volume_manager) self.user_id = 'fake' self.project_id = 'fake' self.snapshot_id = 'fake' self.volume_size = 0 self.context = context.RequestContext(self.user_id, self.project_id) test_notifier.NOTIFICATIONS = []