def profile_cputime(module, decorator_name, status): try: if status: profile_cpu.add_module(module) else: profile_cpu.delete_module(module) # import decorator function decorator = importutils.import_class(decorator_name) __import__(module) # Retrieve module information using pyclbr module_data = pyclbr.readmodule_ex(module) for key in module_data.keys(): # set the decorator for the class methods if isinstance(module_data[key], pyclbr.Class): clz = importutils.import_class("%s.%s" % (module, key)) for method, func in inspect.getmembers(clz, inspect.ismethod): if func.func_code.co_name == 'profile_cputime': pass else: setattr(clz, method, decorator("%s.%s.%s" % (module, key, method), func)) LOG.info(_('Decorated method ' + method)) # set the decorator for the function if isinstance(module_data[key], pyclbr.Function): func = importutils.import_class("%s.%s" % (module, key)) if func.func_code.co_name == 'profile_cputime': pass else: setattr(sys.modules[module], key, decorator("%s.%s" % (module, key), func)) LOG.info(_('Decorated method ' + key)) except: LOG.error(_('Invalid module or decorator name ')) LOG.error(_('Exception occurred %s ') % traceback.format_exc())
def __init__(self): LOG.debug("QFC = %s", FLAGS.baremetal_quantum_filter_connection) QFC = importutils.import_class(FLAGS.baremetal_quantum_filter_connection) self._connection = QFC() self._network_infos = {} self._basic_filters = {} self._filters = {}
def __init__(self): self.pending={} LOG.info("Connecting to database @ %s"%(FLAGS.sql_connection)) self.conn=get_engine() dnsmanager_class=importutils.import_class(FLAGS.dns_manager); self.dnsmanager=dnsmanager_class() self.eventlet = eventlet.spawn(self._pollip)
def API(): importutils = nova.openstack.common.importutils network_api_class = oslo.config.cfg.CONF.network_api_class if 'quantumv2' in network_api_class: network_api_class = network_api_class.replace('quantumv2', 'neutronv2') cls = importutils.import_class(network_api_class) return cls()
def _cleanup_deploy(self, node, instance, network_info): icli = self._get_client() if 'pxe' in node.driver: # add required fields pxe_fields = importutils.import_class( 'ironic.nova.virt.ironic.ironic_driver_fields.PXE') patch = [] for field in pxe_fields.required: path_to_remove = "%s/%s" % (field['ironic_path'], field['ironic_variable']) patch = [{'op': 'remove', 'path': path_to_remove}] try: self._retry_if_service_is_unavailable(icli.node.update, node.uuid, patch) except MaximumRetriesReached: LOG.warning(_("Removing the parameter %(param)s on node " "%(node)s failed after %(retries)d retries") % {'param': path_to_remove, 'node': node.uuid, 'retries': CONF.ironic.api_max_retries}) except ironic_exception.HTTPBadRequest: pass self._unplug_vifs(node, instance, network_info) self._stop_firewall(instance, network_info)
def _add_driver_fields(self, node, instance, image_meta, flavor=None): icli = self._get_client() if 'pxe' in node.driver: # add required fields pxe_fields = importutils.import_class( 'ironic.nova.virt.ironic.ironic_driver_fields.PXE') patch = [] for field in pxe_fields.required: path_to_add = "%s/%s" % (field['ironic_path'], field['ironic_variable']) patch = [{'op': 'add', 'path': path_to_add, 'value': unicode(_get_required_value( eval(field['nova_object']), field['object_field']))}] try: self._retry_if_service_is_unavailable(icli.node.update, node.uuid, patch) except MaximumRetriesReached: msg = (_("Adding the parameter %(param)s on node %(node)s " "failed after %(retries)d retries") % {'param': path_to_add, 'node': node.uuid, 'retries': CONF.ironic.api_max_retries}) LOG.error(msg) raise exception.NovaException(msg)
def __init__(self): self.instance_path = None self.container_rootfs = None vif_class = importutils.import_class(CONF.lxc.vif_driver) self.vif_driver = vif_class() self.volumes = volumes.VolumeOps()
def __init__( self, host, binary, topic, manager, report_interval=None, periodic_enable=None, periodic_fuzzy_delay=None, periodic_interval_max=None, *args, **kwargs ): self.host = host self.binary = binary self.topic = topic self.manager_class_name = manager manager_class = importutils.import_class(self.manager_class_name) self.manager = manager_class(host=self.host, *args, **kwargs) self.report_interval = report_interval self.periodic_enable = periodic_enable self.periodic_fuzzy_delay = periodic_fuzzy_delay self.periodic_interval_max = periodic_interval_max self.saved_args, self.saved_kwargs = args, kwargs self.timers = [] self.backdoor_port = None self.servicegroup_api = servicegroup.API()
def __init__(self, virtapi): super(LibvirtVolumeDriver, self).__init__(virtapi) self.volume_drivers = {} for driver_str in CONF.libvirt_volume_drivers: driver_type, _sep, driver = driver_str.partition('=') driver_class = importutils.import_class(driver) self.volume_drivers[driver_type] = driver_class(self)
def __init__(self, host, binary, topic, manager, report_interval=None, periodic_enable=None, periodic_fuzzy_delay=None, periodic_interval_max=None, db_allowed=True, *args, **kwargs): super(Service, self).__init__() self.host = host self.binary = binary self.topic = topic self.manager_class_name = manager # NOTE(russellb) We want to make sure to create the servicegroup API # instance early, before creating other things such as the manager, # that will also create a servicegroup API instance. Internally, the # servicegroup only allocates a single instance of the driver API and # we want to make sure that our value of db_allowed is there when it # gets created. For that to happen, this has to be the first instance # of the servicegroup API. self.servicegroup_api = servicegroup.API(db_allowed=db_allowed) manager_class = importutils.import_class(self.manager_class_name) self.manager = manager_class(host=self.host, *args, **kwargs) self.report_interval = report_interval self.periodic_enable = periodic_enable self.periodic_fuzzy_delay = periodic_fuzzy_delay self.periodic_interval_max = periodic_interval_max self.saved_args, self.saved_kwargs = args, kwargs self.backdoor_port = None self.conductor_api = conductor.API(use_local=db_allowed) self.conductor_api.wait_until_ready(context.get_admin_context())
def get_remote_image_service(context, image_href): """Create an image_service and parse the id from the given image_href. The image_href param can be an href of the form 'http://example.com:9292/v1/images/b8b2c6f7-7345-4e2f-afa2-eedaba9cbbe3', or just an id such as 'b8b2c6f7-7345-4e2f-afa2-eedaba9cbbe3'. If the image_href is a standalone id, then the default image service is returned. :param image_href: href that describes the location of an image :returns: a tuple of the form (image_service, image_id) """ # NOTE(bcwaldon): If image_href doesn't look like a URI, assume its a # standalone image ID if '/' not in str(image_href): image_service = get_default_image_service() return image_service, image_href try: (image_id, glance_host, glance_port, use_ssl) = \ _parse_image_ref(image_href) glance_client = GlanceClientWrapper(context=context, host=glance_host, port=glance_port, use_ssl=use_ssl) except ValueError: raise exception.InvalidImageRef(image_href=image_href) image_service_class = importutils.import_class('nova.huawei.image.glance.HuaweiGlanceImageService') image_service = image_service_class(client=glance_client) return image_service, image_id
def __init__(self, container, instance, image_meta, network_info): self.container = container self.instance = instance self.image_meta = image_meta self.network_info = network_info vif_class = importutils.import_class(CONF.lxc.vif_driver) self.vif_driver = vif_class()
def __init__(self, virtapi): self.virtapi = virtapi self.instance_path = None self.container_rootfs = None vif_class = importutils.import_class(CONF.lxc.vif_driver) self.vif_driver = vif_class() self.volumes = volumes.VolumeOps() self.idmap = container_utils.LXCUserIdMap()
def load_standard_extensions(ext_mgr, logger, path, package, ext_list=None): """Registers all standard API extensions.""" # Walk through all the modules in our directory... our_dir = path[0] for dirpath, dirnames, filenames in os.walk(our_dir): # Compute the relative package name from the dirpath relpath = os.path.relpath(dirpath, our_dir) if relpath == ".": relpkg = "" else: relpkg = ".%s" % ".".join(relpath.split(os.sep)) # Now, consider each file in turn, only considering .py files for fname in filenames: root, ext = os.path.splitext(fname) # Skip __init__ and anything that's not .py if ext != ".py" or root == "__init__": continue # Try loading it classname = "%s%s" % (root[0].upper(), root[1:]) classpath = "%s%s.%s.%s" % (package, relpkg, root, classname) if ext_list is not None and classname not in ext_list: logger.debug("Skipping extension: %s" % classpath) continue try: ext_mgr.load_extension(classpath) except Exception as exc: logger.warn(_("Failed to load extension %(classpath)s: " "%(exc)s") % locals()) # Now, let's consider any subdirectories we may have... subdirs = [] for dname in dirnames: # Skip it if it does not have __init__.py if not os.path.exists(os.path.join(dirpath, dname, "__init__.py")): continue # If it has extension(), delegate... ext_name = "%s%s.%s.extension" % (package, relpkg, dname) try: ext = importutils.import_class(ext_name) except ImportError: # extension() doesn't exist on it, so we'll explore # the directory for ourselves subdirs.append(dname) else: try: ext(ext_mgr) except Exception as exc: logger.warn(_("Failed to load extension %(ext_name)s: " "%(exc)s") % locals()) # Update the list of directories we'll explore... dirnames[:] = subdirs
def __init__(self, state_manager): self.state_manager = state_manager cells_scheduler_cls = importutils.import_class(CONF.cells.scheduler) self.scheduler = cells_scheduler_cls(self) self.response_queues = {} self.methods_by_type = {} self.our_name = CONF.cells.name for msg_type, cls in _CELL_MESSAGE_TYPE_TO_METHODS_CLS.iteritems(): self.methods_by_type[msg_type] = cls(self)
def driver_dict_from_config(named_driver_config, *args, **kwargs): driver_registry = dict() for driver_str in named_driver_config: driver_type, _sep, driver = driver_str.partition('=') driver_class = importutils.import_class(driver) driver_registry[driver_type] = driver_class(*args, **kwargs) return driver_registry
def __init__(self, app, conf, **local_conf): cfg.CONF.register_opts(self.opts) # Determine the context class to use self.ctxcls = RequestContext if 'context_class' in local_conf: self.ctxcls = importutils.import_class(local_conf['context_class']) super(ContextMiddleware, self).__init__(app)
def InstanceActionAPI(*args, **kwargs): """Returns the 'InstanceActionAPI' class from the same module as the configured compute api. """ importutils = nova.openstack.common.importutils compute_api_class_name = _get_compute_api_class_name() compute_api_class = importutils.import_class(compute_api_class_name) class_name = compute_api_class.__module__ + ".InstanceActionAPI" return importutils.import_object(class_name, *args, **kwargs)
def _get_sample(cls, name, suffix=''): parts = [os.path.dirname(os.path.abspath(__file__))] parts.append('api_samples') if cls.all_extensions: parts.append('all_extensions') if cls.extension_name: alias = importutils.import_class(cls.extension_name).alias parts.append(alias) parts.append(name + "." + cls.ctype + suffix) return os.path.join(*parts)
def _get_sample_path(cls, name, dirname, suffix=""): parts = [dirname] parts.append("api_samples") if cls.all_extensions: parts.append("all_extensions") if cls.extension_name: alias = importutils.import_class(cls.extension_name).alias parts.append(alias) parts.append(name + "." + cls.ctype + suffix) return os.path.join(*parts)
def HostAPI(*args, **kwargs): """ Returns the 'HostAPI' class from the same module as the configured compute api """ importutils = nova.openstack.common.importutils compute_api_class_name = oslo.config.cfg.CONF.compute_api_class compute_api_class = importutils.import_class(compute_api_class_name) class_name = compute_api_class.__module__ + ".HostAPI" return importutils.import_object(class_name, *args, **kwargs)
def __init__(self): self.params = dict(hostname=FLAGS.rabbit_host, port=FLAGS.rabbit_port, userid=FLAGS.rabbit_userid, password=FLAGS.rabbit_password, virtual_host=FLAGS.rabbit_virtual_host) self.connection = None self.eventlet = None listener_class = importutils.import_class(FLAGS.dns_listener); self.listener = listener_class()
def profile_memory(method, decorator_name, status, setref): try: profile_mem.modules_profiling_status[method] = status profile_mem.setref = setref # import decorator function decorator = importutils.import_class(decorator_name) class_str, _sep, method_str = method.rpartition('.') clz = importutils.import_class(class_str) # set the decorator for the function func = getattr(clz, method_str) if func.func_code.co_name == 'profile_memory': pass else: setattr(clz, method_str, decorator(method, func)) LOG.info(_('Decorated method ' + method_str)) except: LOG.error(_('Invalid method or decorator name ')) LOG.error(_('Exception occurred %s ') % traceback.format_exc())
def __init__(self, driver=None, *args, **kwargs): """Inits the driver from parameter or flag __init__ is run every time AuthManager() is called, so we only reset the driver if it is not set or a new driver is specified. """ self.network_manager = importutils.import_object(FLAGS.network_manager) if driver or not getattr(self, 'driver', None): self.driver = importutils.import_class(driver or FLAGS.auth_driver) if AuthManager.mc is None: AuthManager.mc = memcache.Client(FLAGS.memcached_servers, debug=0)
def placement_module(): importutils = nova.openstack.common.importutils host_type = oslo.config.cfg.CONF.host_type.lower() if 'powervm' in host_type: placement_class = 'paxes_nova.network.ibmpowervm.placement_pvm.' +\ 'IBMPowerVMNetworkPlacementPVM' elif 'kvm' in host_type: placement_class = 'paxes_nova.network.powerkvm.placement_kvm.' +\ 'IBMPowerVMNetworkPlacementKVM' cls = importutils.import_class(placement_class) return cls()
def __init__(self, *args, **kwargs): # Mostly for tests. cell_state_manager = kwargs.pop("cell_state_manager", None) super(CellsManager, self).__init__(*args, **kwargs) if cell_state_manager is None: cell_state_manager = cells_state.CellStateManager self.state_manager = cell_state_manager() self.msg_runner = messaging.MessageRunner(self.state_manager) cells_driver_cls = importutils.import_class(CONF.cells.driver) self.driver = cells_driver_cls() self.instances_to_heal = iter([])
def __init__(self, plugin_class_name = None): # TODO(danms): This needs to be something more generic for # other/future users of this sort of functionality. self._manager = utils.ExceptionHelper(manager.ConductorManager()) if plugin_class_name is None: plugin_class_name = '%s.PluginAPI' % CONF.conductor.conductor_plugin_class_name plugin_class = importutils.import_class(plugin_class_name) self.plugin = plugin_class(manager = self._manager) LOG.debug(_('Load Plugin at Conductor LocalAPI %(plugin_class_name)s'), {'plugin_class_name': plugin_class_name})
def __init__(self,plugin_class_name = None): self._manager = rpcapi.ConductorAPI() self.base_rpcapi = baserpc.BaseAPI(topic=CONF.conductor.topic) if plugin_class_name is None: plugin_class_name = '%s.PluginAPI' % CONF.conductor.conductor_plugin_class_name plugin_class = importutils.import_class(plugin_class_name) self.plugin = plugin_class(manager = self._manager) LOG.debug(_('Load Plugin at Conductor API %(plugin_class_name)s'), {'plugin_class_name': plugin_class_name})
def _load_driver(self, driver_provider): LOG.debug("Driver location:%s", driver_provider) # If the plugin can't be found let them know gracefully try: LOG.info("Loading Driver: %s" % driver_provider) plugin_klass = importutils.import_class(driver_provider) except ClassNotFound: LOG.exception("Error loading driver") raise Exception("driver_provider not found. You can install a " "Driver with: pip install <plugin-name>\n" "Example: pip install quantum-sample-driver") return plugin_klass()
def __init__(self,plugin_class_name=None): super(ConductorAPI, self).__init__( topic=CONF.conductor.topic, default_version=self.BASE_RPC_API_VERSION, serializer=objects_base.NovaObjectSerializer()) self.client = self.get_client() if plugin_class_name is None: plugin_class_name = '%s.RpcApiPlugin' % CONF.conductor.conductor_plugin_class_name plugin_class = importutils.import_class(plugin_class_name) self.plugin = plugin_class()
def __init__(self, *args, **kwargs): # Mostly for tests. cell_state_manager = kwargs.pop('cell_state_manager', None) super(CellsManager, self).__init__(service_name='cells', *args, **kwargs) if cell_state_manager is None: cell_state_manager = cells_state.CellStateManager self.state_manager = cell_state_manager() self.msg_runner = messaging.MessageRunner(self.state_manager) cells_driver_cls = importutils.import_class(CONF.cells.driver) self.driver = cells_driver_cls() self.instances_to_heal = iter([])
def monkey_patch(): """If the CONF.monkey_patch set as True, this function patches a decorator for all functions in specified modules. You can set decorators for each modules using CONF.monkey_patch_modules. The format is "Module path:Decorator function". Example: 'nova.api.ec2.cloud:nova.notifications.notify_decorator' Parameters of the decorator is as follows. (See nova.notifications.notify_decorator) name - name of the function function - object of the function """ # If CONF.monkey_patch is not True, this function do nothing. if not CONF.monkey_patch: return # Get list of modules and decorators for module_and_decorator in CONF.monkey_patch_modules: module, decorator_name = module_and_decorator.split(':') # import decorator function decorator = importutils.import_class(decorator_name) __import__(module) # Retrieve module information using pyclbr module_data = pyclbr.readmodule_ex(module) for key in module_data.keys(): # set the decorator for the class methods if isinstance(module_data[key], pyclbr.Class): clz = importutils.import_class("%s.%s" % (module, key)) for method, func in inspect.getmembers(clz, inspect.ismethod): setattr(clz, method, decorator("%s.%s.%s" % (module, key, method), func)) # set the decorator for the function if isinstance(module_data[key], pyclbr.Function): func = importutils.import_class("%s.%s" % (module, key)) setattr(sys.modules[module], key, decorator("%s.%s" % (module, key), func))
def __init__(self, host, binary, topic, manager, report_interval=None, periodic_interval=None, periodic_fuzzy_delay=None, *args, **kwargs): self.host = host self.binary = binary self.topic = topic self.manager_class_name = manager manager_class = importutils.import_class(self.manager_class_name) self.manager = manager_class(host=self.host, *args, **kwargs) self.report_interval = report_interval self.periodic_interval = periodic_interval self.periodic_fuzzy_delay = periodic_fuzzy_delay self.saved_args, self.saved_kwargs = args, kwargs self.timers = []
def get_filter_classes(filter_class_names): """Get filter classes from class names.""" classes = [] for cls_name in filter_class_names: obj = importutils.import_class(cls_name) if _is_filter_class(obj): classes.append(obj) elif type(obj) is types.FunctionType: # Get list of classes from a function classes.extend(obj()) else: raise exception.ClassNotFound( class_name=cls_name, exception='Not a valid scheduler filter') return classes
def _cleanup_deploy(self, node, instance, network_info): icli = self._get_client() # remove the instance uuid if node.instance_uuid and node.instance_uuid == instance['uuid']: try: patch = [{'op': 'remove', 'path': '/instance_uuid'}] self._retry_if_service_is_unavailable(icli.node.update, node.uuid, patch) except MaximumRetriesReached: LOG.warning( _("Failed to unassociate the instance " "%(instance)s with node %(node)s") % { 'instance': instance['uuid'], 'node': node.uuid }) except ironic_exception.HTTPBadRequest: pass if 'pxe' in node.driver: # add required fields pxe_fields = importutils.import_class( 'ironic.nova.virt.ironic.ironic_driver_fields.PXE') patch = [] for field in pxe_fields.required: path_to_remove = "%s/%s" % (field['ironic_path'], field['ironic_variable']) patch = [{'op': 'remove', 'path': path_to_remove}] try: self._retry_if_service_is_unavailable( icli.node.update, node.uuid, patch) except MaximumRetriesReached: LOG.warning( _("Removing the parameter %(param)s on node " "%(node)s failed after %(retries)d retries") % { 'param': path_to_remove, 'node': node.uuid, 'retries': CONF.ironic.api_max_retries }) except ironic_exception.HTTPBadRequest: pass self._unplug_vifs(node, instance, network_info) self._stop_firewall(instance, network_info)
def load_extension(self, ext_factory): """Execute an extension factory. Loads an extension. The 'ext_factory' is the name of a callable that will be imported and called with one argument--the extension manager. The factory callable is expected to call the register() method at least once. """ LOG.debug(_("Loading extension %s"), ext_factory) # Load the factory factory = importutils.import_class(ext_factory) # Call it LOG.debug(_("Calling extension factory %s"), ext_factory) factory(self)
def is_neutron(): global _IS_NEUTRON if _IS_NEUTRON is not None: return _IS_NEUTRON try: # compatibility with Folsom/Grizzly configs cls_name = CONF.network_api_class if cls_name == 'nova.network.quantumv2.api.API': cls_name = 'nova.network.neutronv2.api.API' from nova.network.neutronv2 import api as neutron_api _IS_NEUTRON = issubclass(importutils.import_class(cls_name), neutron_api.API) except ImportError: _IS_NEUTRON = False return _IS_NEUTRON
def __init__(self, *args, **kwargs): LOG.warn( _('The cells feature of Nova is considered experimental ' 'by the OpenStack project because it receives much ' 'less testing than the rest of Nova. This may change ' 'in the future, but current deployers should be aware ' 'that the use of it in production right now may be ' 'risky.')) # Mostly for tests. cell_state_manager = kwargs.pop('cell_state_manager', None) super(CellsManager, self).__init__(service_name='cells', *args, **kwargs) if cell_state_manager is None: cell_state_manager = cells_state.CellStateManager self.state_manager = cell_state_manager() self.msg_runner = messaging.MessageRunner(self.state_manager) cells_driver_cls = importutils.import_class(CONF.cells.driver) self.driver = cells_driver_cls() self.instances_to_heal = iter([])
def _get_manager(self): """Initialize a Manager object appropriate for this service. Use the service name to look up a Manager subclass from the configuration and initialize an instance. If no class name is configured, just return None. :returns: a Manager instance, or None. """ fl = '%s_manager' % self.name if fl not in CONF: return None manager_class_name = CONF.get(fl, None) if not manager_class_name: return None manager_class = importutils.import_class(manager_class_name) return manager_class()
def get_matching_classes(self, loadable_class_names): """Get loadable classes from a list of names. Each name can be a full module path or the full path to a method that returns classes to use. The latter behavior is useful to specify a method that returns a list of classes to use in a default case. """ classes = [] for cls_name in loadable_class_names: obj = importutils.import_class(cls_name) if self._is_correct_class(obj): classes.append(obj) elif inspect.isfunction(obj): # Get list of classes from a function for cls in obj(): classes.append(cls) else: error_str = 'Not a class of the correct type' raise exception.ClassNotFound(class_name=cls_name, exception=error_str) return classes
def get_cost_functions(self, topic=None): """Returns a list of tuples containing weights and cost functions to use for weighing hosts """ if topic is None: # Schedulers only support compute right now. topic = "compute" if topic in self.cost_function_cache: return self.cost_function_cache[topic] cost_fns = [] for cost_fn_str in FLAGS.least_cost_functions: if '.' in cost_fn_str: short_name = cost_fn_str.split('.')[-1] else: short_name = cost_fn_str cost_fn_str = "%s.%s.%s" % (__name__, self.__class__.__name__, short_name) if not (short_name.startswith('%s_' % topic) or short_name.startswith('noop')): continue try: # NOTE: import_class is somewhat misnamed since # the weighing function can be any non-class callable # (i.e., no 'self') cost_fn = importutils.import_class(cost_fn_str) except ImportError: raise exception.SchedulerCostFunctionNotFound( cost_fn_str=cost_fn_str) try: flag_name = "%s_weight" % cost_fn.__name__ weight = getattr(FLAGS, flag_name) except AttributeError: raise exception.SchedulerWeightFlagNotFound( flag_name=flag_name) cost_fns.append((weight, cost_fn)) self.cost_function_cache[topic] = cost_fns return cost_fns
def __init__(self, **kwargs): global _conn global _virtual_power_settings global _cmds if _cmds is None: LOG.debug("Setting up %s commands." % CONF.baremetal.virtual_power_type) _vpc = 'nova.virt.baremetal.virtual_power_driver_settings.%s' % \ CONF.baremetal.virtual_power_type _cmds = importutils.import_class(_vpc) self._vp_cmd = _cmds() self.connection_data = _conn node = kwargs.pop('node', {}) instance = kwargs.pop('instance', {}) self._node_name = instance.get('hostname', "") self._mac_address = node.get('prov_mac_address', "") self._mac_address = self._mac_address.replace(':', '') self._connection = None self._matched_name = '' self.state = None
def __init__(self, **kwargs): global _conn global _cmds if _cmds is None: LOG.debug(_("Setting up %s commands."), CONF.baremetal.virtual_power_type) _vpc = 'nova.virt.baremetal.virtual_power_driver_settings.%s' % \ CONF.baremetal.virtual_power_type _cmds = importutils.import_class(_vpc) self._vp_cmd = _cmds() self.connection_data = _conn node = kwargs.pop('node', {}) instance = kwargs.pop('instance', {}) self._node_name = instance.get('hostname', "") context = nova_context.get_admin_context() ifs = db.bm_interface_get_all_by_bm_node_id(context, node['id']) self._mac_addresses = [_normalize_mac(i['address']) for i in ifs] self._connection = None self._matched_name = '' self.state = None
def _get_cost_functions(): """Returns a list of tuples containing weights and cost functions to use for weighing hosts """ cost_fns_conf = CONF.least_cost_functions if cost_fns_conf is None: # The old default. This will get fixed up below. fn_str = 'nova.scheduler.least_cost.compute_fill_first_cost_fn' cost_fns_conf = [fn_str] cost_fns = [] for cost_fn_str in cost_fns_conf: short_name = cost_fn_str.split('.')[-1] if not (short_name.startswith('compute_') or short_name.startswith('noop')): continue # Fix up any old paths to the new paths if cost_fn_str.startswith('nova.scheduler.least_cost.'): cost_fn_str = ('nova.scheduler.weights.least_cost' + cost_fn_str[25:]) try: # NOTE: import_class is somewhat misnamed since # the weighing function can be any non-class callable # (i.e., no 'self') cost_fn = importutils.import_class(cost_fn_str) except ImportError: raise exception.SchedulerCostFunctionNotFound( cost_fn_str=cost_fn_str) try: flag_name = "%s_weight" % cost_fn.__name__ weight = getattr(CONF, flag_name) except AttributeError: raise exception.SchedulerWeightFlagNotFound(flag_name=flag_name) # Set the original default. if (flag_name == 'compute_fill_first_cost_fn_weight' and weight is None): weight = -1.0 cost_fns.append((weight, cost_fn)) return cost_fns
def __init__(self, application, limits=None, limiter=None, **kwargs): """ Initialize new `RateLimitingMiddleware`, which wraps the given WSGI application and sets up the given limits. @param application: WSGI application to wrap @param limits: String describing limits @param limiter: String identifying class for representing limits Other parameters are passed to the constructor for the limiter. """ base_wsgi.Middleware.__init__(self, application) # Select the limiter class if limiter is None: limiter = Limiter else: limiter = importutils.import_class(limiter) # Parse the limits, if any are provided if limits is not None: limits = limiter.parse_limits(limits) self._limiter = limiter(limits or DEFAULT_LIMITS, **kwargs)
def __init__(self, host, binary, topic, manager, report_interval=None, periodic_enable=None, periodic_fuzzy_delay=None, periodic_interval_max=None, db_allowed=True, *args, **kwargs): super(Service, self).__init__() self.host = host self.binary = binary self.topic = topic self.manager_class_name = manager # NOTE(russellb) We want to make sure to create the servicegroup API # instance early, before creating other things such as the manager, # that will also create a servicegroup API instance. Internally, the # servicegroup only allocates a single instance of the driver API and # we want to make sure that our value of db_allowed is there when it # gets created. For that to happen, this has to be the first instance # of the servicegroup API. self.servicegroup_api = servicegroup.API(db_allowed=db_allowed) manager_class = importutils.import_class(self.manager_class_name) self.manager = manager_class(host=self.host, *args, **kwargs) self.rpcserver = None self.report_interval = report_interval self.periodic_enable = periodic_enable self.periodic_fuzzy_delay = periodic_fuzzy_delay self.periodic_interval_max = periodic_interval_max self.saved_args, self.saved_kwargs = args, kwargs self.backdoor_port = None self.conductor_api = conductor.API(use_local=db_allowed) self.conductor_api.wait_until_ready(context.get_admin_context())
def __init__(self, instance, address=None, content=None, extra_md=None, conductor_api=None, network_info=None, vd_driver=None): """Creation of this object should basically cover all time consuming collection. Methods after that should not cause time delays due to network operations or lengthy cpu operations. The user should then get a single instance and make multiple method calls on it. """ if not content: content = [] ctxt = context.get_admin_context() # NOTE(danms): This should be removed after bp:compute-manager-objects if not isinstance(instance, instance_obj.Instance): instance = instance_obj.Instance._from_db_object( ctxt, instance_obj.Instance(), instance, expected_attrs=['metadata', 'system_metadata']) self.instance = instance self.extra_md = extra_md if conductor_api: capi = conductor_api else: capi = conductor.API() self.availability_zone = ec2utils.get_availability_zone_by_host( instance['host'], capi) self.security_groups = secgroup_obj.SecurityGroupList.get_by_instance( ctxt, instance) self.mappings = _format_instance_mapping(capi, ctxt, instance) if instance.get('user_data', None) is not None: self.userdata_raw = base64.b64decode(instance['user_data']) else: self.userdata_raw = None self.ec2_ids = capi.get_ec2_ids(ctxt, obj_base.obj_to_primitive(instance)) self.address = address # expose instance metadata. self.launch_metadata = utils.instance_meta(instance) self.password = password.extract_password(instance) self.uuid = instance.get('uuid') self.content = {} self.files = [] # get network info, and the rendered network template if network_info is None: network_info = network.API().get_instance_nw_info(ctxt, instance) self.ip_info = \ ec2utils.get_ip_info_for_instance_from_nw_info(network_info) self.network_config = None cfg = netutils.get_injected_network_template(network_info) if cfg: key = "%04i" % len(self.content) self.content[key] = cfg self.network_config = { "name": "network_config", 'content_path': "/%s/%s" % (CONTENT_DIR, key) } # 'content' is passed in from the configdrive code in # nova/virt/libvirt/driver.py. Thats how we get the injected files # (personalities) in. AFAIK they're not stored in the db at all, # so are not available later (web service metadata time). for (path, contents) in content: key = "%04i" % len(self.content) self.files.append({ 'path': path, 'content_path': "/%s/%s" % (CONTENT_DIR, key) }) self.content[key] = contents if vd_driver is None: vdclass = importutils.import_class(CONF.vendordata_driver) else: vdclass = vd_driver self.vddriver = vdclass(instance=instance, address=address, extra_md=extra_md, network_info=network_info)
def API(): importutils = nova.openstack.common.importutils cls = importutils.import_class(nova.flags.FLAGS.volume_api_class) return cls()
def API(): importutils = nova.openstack.common.importutils network_api_class = nova.openstack.common.cfg.CONF.network_api_class cls = importutils.import_class(network_api_class) return cls()
def __init__(self, virtapi, read_only=False): super(FakeTestDriver, self).__init__(virtapi) vif_class = importutils.import_class(CONF.libvirt_vif_driver) self.vif_driver = vif_class(None)
def get_power_manager(**kwargs): cls = importutils.import_class(CONF.baremetal.power_manager) return cls(**kwargs)
def API(*args, **kwargs): importutils = nova.openstack.common.importutils compute_api_class = nova.openstack.common.cfg.CONF.compute_api_class cls = importutils.import_class(compute_api_class) return cls(*args, **kwargs)
def load_driver(default, *args, **kwargs): fw_class = importutils.import_class(CONF.firewall_driver or default) return fw_class(*args, **kwargs)
def __init__(self, virtapi): super(DockerDriver, self).__init__(virtapi) self._docker = None vif_class = importutils.import_class(CONF.docker.vif_driver) self.vif_driver = vif_class()
def API(): cls = importutils.import_class(CONF.keymgr.api_class) return cls()
def load_standard_extensions(ext_mgr, logger, path, package, ext_list=None): """Registers all standard API extensions.""" # Walk through all the modules in our directory... our_dir = path[0] for dirpath, dirnames, filenames in os.walk(our_dir): # Compute the relative package name from the dirpath relpath = os.path.relpath(dirpath, our_dir) if relpath == '.': relpkg = '' else: relpkg = '.%s' % '.'.join(relpath.split(os.sep)) # Now, consider each file in turn, only considering .py files for fname in filenames: root, ext = os.path.splitext(fname) # Skip __init__ and anything that's not .py if ext != '.py' or root == '__init__': continue # Try loading it classname = "%s%s" % (root[0].upper(), root[1:]) classpath = ("%s%s.%s.%s" % (package, relpkg, root, classname)) if ext_list is not None and classname not in ext_list: logger.debug("Skipping extension: %s" % classpath) continue try: ext_mgr.load_extension(classpath) except Exception as exc: logger.warn(_('Failed to load extension %(classpath)s: ' '%(exc)s'), {'classpath': classpath, 'exc': exc}) # Now, let's consider any subdirectories we may have... subdirs = [] for dname in dirnames: # Skip it if it does not have __init__.py if not os.path.exists(os.path.join(dirpath, dname, '__init__.py')): continue # If it has extension(), delegate... ext_name = "%s%s.%s.extension" % (package, relpkg, dname) try: ext = importutils.import_class(ext_name) except ImportError: # extension() doesn't exist on it, so we'll explore # the directory for ourselves subdirs.append(dname) else: try: ext(ext_mgr) except Exception as exc: logger.warn(_('Failed to load extension %(ext_name)s:' '%(exc)s'), {'ext_name': ext_name, 'exc': exc}) # Update the list of directories we'll explore... dirnames[:] = subdirs
def API(): importutils = nova.openstack.common.importutils volume_api_class = nova.openstack.common.cfg.CONF.volume_api_class cls = importutils.import_class(volume_api_class) return cls()
def get_default_image_service(): ImageService = importutils.import_class(FLAGS.image_service) return ImageService()
def get_power_manager(node, **kwargs): cls = importutils.import_class(FLAGS.power_manager) return cls(node, **kwargs)
import time import urlparse from glance.common import exception as glance_exception from nova import exception from nova import flags from nova import log as logging from nova.openstack.common import importutils from nova import utils LOG = logging.getLogger(__name__) FLAGS = flags.FLAGS GlanceClient = importutils.import_class('glance.client.Client') def _parse_image_ref(image_href): """Parse an image href into composite parts. :param image_href: href of an image :returns: a tuple of the form (image_id, host, port) :raises ValueError """ o = urlparse.urlparse(image_href) port = o.port or 80 host = o.netloc.split(':', 1)[0] image_id = o.path.split('/')[-1] return (image_id, host, port)