def __init__(self): self.pending={} self.conn=sqlalchemy.engine.create_engine(FLAGS.sql_connection, pool_recycle=FLAGS.sql_idle_timeout, echo=False) dnsmanager_class=utils.import_class(FLAGS.dns_manager); self.dnsmanager=dnsmanager_class() self.eventlet = eventlet.spawn(self._pollip)
def start(self): manager_class = utils.import_class(self.manager_class_name) self.manager = manager_class(host=self.host, *self.saved_args, **self.saved_kwargs) self.manager.init_host() self.model_disconnected = False ctxt = context.get_admin_context() try: service_ref = db.service_get_by_args(ctxt, self.host, self.binary) self.service_id = service_ref["id"] except exception.NotFound: self._create_service_ref(ctxt) conn1 = rpc.Connection.instance(new=True) conn2 = rpc.Connection.instance(new=True) if self.report_interval: consumer_all = rpc.AdapterConsumer(connection=conn1, topic=self.topic, proxy=self) consumer_node = rpc.AdapterConsumer(connection=conn2, topic="%s.%s" % (self.topic, self.host), proxy=self) self.timers.append(consumer_all.attach_to_eventlet()) self.timers.append(consumer_node.attach_to_eventlet()) pulse = utils.LoopingCall(self.report_state) pulse.start(interval=self.report_interval, now=False) self.timers.append(pulse) if self.periodic_interval: periodic = utils.LoopingCall(self.periodic_tasks) periodic.start(interval=self.periodic_interval, now=False) self.timers.append(periodic)
def __init__(self): self.pending={} LOG.info("Connecting to database @ %s"%(FLAGS.sql_connection)) self.conn=get_engine() dnsmanager_class=utils.import_class(FLAGS.dns_manager); self.dnsmanager=dnsmanager_class() self.eventlet = eventlet.spawn(self._pollip)
def get_cost_fns(self, topic): """Returns a list of tuples containing weights and cost functions to use for weighing hosts """ if topic in self.cost_fns_cache: return self.cost_fns_cache[topic] cost_fns = [] for cost_fn_str in FLAGS.least_cost_scheduler_cost_functions: if "." in cost_fn_str: short_name = cost_fn_str.split(".")[-1] else: short_name = cost_fn_str cost_fn_str = "%s.%s.%s" % (__name__, self.__class__.__name__, short_name) if not (short_name.startswith("%s_" % topic) or short_name.startswith("noop")): continue try: # NOTE(sirp): import_class is somewhat misnamed since it can # any callable from a module cost_fn = utils.import_class(cost_fn_str) except exception.ClassNotFound: raise exception.SchedulerCostFunctionNotFound(cost_fn_str=cost_fn_str) try: weight = getattr(FLAGS, "%s_weight" % cost_fn.__name__) except AttributeError: raise exception.SchedulerWeightFlagNotFound(flag_name=flag_name) cost_fns.append((weight, cost_fn)) self.cost_fns_cache[topic] = cost_fns return cost_fns
def __init__(self, driver=None, *args, **kwargs): """Inits the driver from parameter or flag __init__ is run every time AuthManager() is called, so we only reset the driver if it is not set or a new driver is specified. """ self.network_manager = utils.import_object(FLAGS.network_manager) if driver or not getattr(self, 'driver', None): self.driver = utils.import_class(driver or FLAGS.auth_driver)
def __init__(self, driver=None, *args, **kwargs): """Inits the driver from parameter or flag __init__ is run every time AuthManager() is called, so we only reset the driver if it is not set or a new driver is specified. """ self.network_manager = utils.import_object(FLAGS.network_manager) if driver or not getattr(self, 'driver', None): self.driver = utils.import_class(driver or FLAGS.auth_driver)
def load_standard_extensions(ext_mgr, logger, path, package): """Registers all standard API extensions.""" # Walk through all the modules in our directory... our_dir = path[0] for dirpath, dirnames, filenames in os.walk(our_dir): # Compute the relative package name from the dirpath relpath = os.path.relpath(dirpath, our_dir) if relpath == '.': relpkg = '' else: relpkg = '.%s' % '.'.join(relpath.split(os.sep)) # Now, consider each file in turn, only considering .py files for fname in filenames: root, ext = os.path.splitext(fname) # Skip __init__ and anything that's not .py if ext != '.py' or root == '__init__': continue # Try loading it classname = ("%s%s.%s.%s%s" % (package, relpkg, root, root[0].upper(), root[1:])) try: ext_mgr.load_extension(classname) except Exception as exc: logger.warn(_('Failed to load extension %(classname)s: ' '%(exc)s') % locals()) # Now, let's consider any subdirectories we may have... subdirs = [] for dname in dirnames: # Skip it if it does not have __init__.py if not os.path.exists(os.path.join(dirpath, dname, '__init__.py')): continue # If it has extension(), delegate... ext_name = ("%s%s.%s.extension" % (package, relpkg, dname)) try: ext = utils.import_class(ext_name) except exception.ClassNotFound: # extension() doesn't exist on it, so we'll explore # the directory for ourselves subdirs.append(dname) else: try: ext(ext_mgr) except Exception as exc: logger.warn(_('Failed to load extension %(ext_name)s: ' '%(exc)s') % locals()) # Update the list of directories we'll explore... dirnames[:] = subdirs
def __init__(self): self.pending = {} self.conn = sqlalchemy.engine.create_engine( FLAGS.sql_connection, pool_recycle=FLAGS.sql_idle_timeout, echo=False) dnsmanager_class = utils.import_class(FLAGS.dns_manager) self.dnsmanager = dnsmanager_class() self.eventlet = eventlet.spawn(self._pollip)
def load_standard_extensions(ext_mgr, logger, path, package): """Registers all standard API extensions.""" # Walk through all the modules in our directory... our_dir = path[0] for dirpath, dirnames, filenames in os.walk(our_dir): # Compute the relative package name from the dirpath relpath = os.path.relpath(dirpath, our_dir) if relpath == '.': relpkg = '' else: relpkg = '.%s' % '.'.join(relpath.split(os.sep)) # Now, consider each file in turn, only considering .py files for fname in filenames: root, ext = os.path.splitext(fname) # Skip __init__ and anything that's not .py if ext != '.py' or root == '__init__': continue # Try loading it classname = ("%s%s.%s.%s%s" % (package, relpkg, root, root[0].upper(), root[1:])) try: ext_mgr.load_extension(classname) except Exception as exc: logger.warn(_('Failed to load extension %(classname)s: ' '%(exc)s') % locals()) # Now, let's consider any subdirectories we may have... subdirs = [] for dname in dirnames: # Skip it if it does not have __init__.py if not os.path.exists(os.path.join(dirpath, dname, '__init__.py')): continue # If it has extension(), delegate... ext_name = ("%s%s.%s.extension" % (package, relpkg, dname)) try: ext = utils.import_class(ext_name) except exception.ClassNotFound: # extension() doesn't exist on it, so we'll explore # the directory for ourselves subdirs.append(dname) else: try: ext(ext_mgr) except Exception as exc: logger.warn(_('Failed to load extension %(ext_name)s: ' '%(exc)s') % locals()) # Update the list of directories we'll explore... dirnames[:] = subdirs
def __init__(self): self.params = dict(hostname=FLAGS.rabbit_host, port=FLAGS.rabbit_port, userid=FLAGS.rabbit_userid, password=FLAGS.rabbit_password, virtual_host=FLAGS.rabbit_virtual_host) self.connection = None self.eventlet = None listener_class = utils.import_class(FLAGS.dns_listener); self.listener = listener_class()
def __init__(self, driver=None, *args, **kwargs): """Inits the driver from parameter or flag __init__ is run every time AuthManager() is called, so we only reset the driver if it is not set or a new driver is specified. """ self.network_manager = utils.import_object(FLAGS.network_manager) if driver or not getattr(self, "driver", None): self.driver = utils.import_class(driver or FLAGS.auth_driver) if AuthManager.mc is None: AuthManager.mc = memcache.Client(FLAGS.memcached_servers, debug=0)
def __init__(self, host, binary, topic, manager, report_interval=None, periodic_interval=None, *args, **kwargs): self.host = host self.binary = binary self.topic = topic self.manager_class_name = manager manager_class = utils.import_class(self.manager_class_name) self.manager = manager_class(host=self.host, *args, **kwargs) self.report_interval = report_interval self.periodic_interval = periodic_interval super(Service, self).__init__(*args, **kwargs) self.saved_args, self.saved_kwargs = args, kwargs self.timers = []
def __init__(self, host, binary, topic, manager, report_interval=None, periodic_interval=None, *args, **kwargs): self.host = host self.binary = binary self.topic = topic self.manager_class_name = manager manager_class = utils.import_class(self.manager_class_name) self.manager = manager_class(host=self.host, *args, **kwargs) self.report_interval = report_interval self.periodic_interval = periodic_interval super(Service, self).__init__(*args, **kwargs) self.saved_args, self.saved_kwargs = args, kwargs self.timers = []
def get_filter_classes(filter_class_names): """Get filter classes from class names.""" classes = [] for cls_name in filter_class_names: obj = utils.import_class(cls_name) if _is_filter_class(obj): classes.append(obj) elif type(obj) is types.FunctionType: # Get list of classes from a function classes.extend(obj()) else: raise exception.ClassNotFound(class_name=cls_name, exception='Not a valid scheduler filter') return classes
def __init__(self, driver=None, *args, **kwargs): """Inits the driver from parameter or flag __init__ is run every time AuthManager() is called, so we only reset the driver if it is not set or a new driver is specified. """ self.network_manager = utils.import_object(FLAGS.network_manager) if driver or not getattr(self, 'driver', None): self.driver = utils.import_class(driver or FLAGS.auth_driver) if FLAGS.memcached_servers: import memcache else: from nova import fakememcache as memcache self.mc = memcache.Client(FLAGS.memcached_servers, debug=0)
def load_extension(self, ext_factory): """Execute an extension factory. Loads an extension. The 'ext_factory' is the name of a callable that will be imported and called with one argument--the extension manager. The factory callable is expected to call the register() method at least once. """ LOG.debug(_("Loading extension %s"), ext_factory) # Load the factory factory = utils.import_class(ext_factory) # Call it LOG.debug(_("Calling extension factory %s"), ext_factory) factory(self)
def test_10_should_raise_IOError_if_format_fails(self): """ Tests that if the driver's _format method fails, its public format method will perform an assertion properly, discover it failed, and raise an exception. """ volume_driver_cls = utils.import_class(FLAGS.volume_driver) class BadFormatter(volume_driver_cls): def _format(self, device_path): pass bad_client = volume.Client(volume_driver=BadFormatter()) bad_client._format(self.story.device_path)
def load_extension(self, ext_factory): """Execute an extension factory. Loads an extension. The 'ext_factory' is the name of a callable that will be imported and called with one argument--the extension manager. The factory callable is expected to call the register() method at least once. """ LOG.debug(_("Loading extension %s"), ext_factory) # Load the factory factory = utils.import_class(ext_factory) # Call it LOG.debug(_("Calling extension factory %s"), ext_factory) factory(self)
def test_10_should_raise_IOError_if_format_fails(self): """ Tests that if the driver's _format method fails, its public format method will perform an assertion properly, discover it failed, and raise an exception. """ volume_driver_cls = utils.import_class(FLAGS.volume_driver) class BadFormatter(volume_driver_cls): def _format(self, device_path): pass bad_client = volume.Client(volume_driver=BadFormatter()) bad_client._format(self.story.device_path)
def __init__(self): if self.nova_linux_net is None: self.nova_linux_net = utils.import_object('nova.network.linux_net') cls = utils.import_class( 'nova.network.linux_net.LinuxOVSInterfaceDriver') self.parent = cls() LOG.debug('ryu rest host %s', FLAGS.linuxnet_ovs_ryu_api_host) self.ryu_client = OFPClient(FLAGS.linuxnet_ovs_ryu_api_host) self.datapath_id = ovs_utils.get_datapath_id( FLAGS.linuxnet_ovs_integration_bridge) if self.nova_linux_net.binary_name == 'nova-network': for tables in [self.nova_linux_net.iptables_manager.ipv4, self.nova_linux_net.iptables_manager.ipv6]: tables['filter'].add_rule('FORWARD', '--in-interface gw-+ --out-interface gw-+ -j DROP') self.nova_linux_net.iptables_manager.apply()
def _get_manager(self): """Initialize a Manager object appropriate for this service. Use the service name to look up a Manager subclass from the configuration and initialize an instance. If no class name is configured, just return None. :returns: a Manager instance, or None. """ fl = '%s_manager' % self.name if not fl in FLAGS: return None manager_class_name = FLAGS.get(fl, None) if not manager_class_name: return None manager_class = utils.import_class(manager_class_name) return manager_class()
def _get_manager(self): """Initialize a Manager object appropriate for this service. Use the service name to look up a Manager subclass from the configuration and initialize an instance. If no class name is configured, just return None. :returns: a Manager instance, or None. """ fl = '%s_manager' % self.name if not fl in FLAGS: return None manager_class_name = FLAGS.get(fl, None) if not manager_class_name: return None manager_class = utils.import_class(manager_class_name) return manager_class()
def get_cost_functions(self, topic=None): """Returns a list of tuples containing weights and cost functions to use for weighing hosts """ if topic is None: # Schedulers only support compute right now. topic = "compute" if topic in self.cost_function_cache: return self.cost_function_cache[topic] cost_fns = [] for cost_fn_str in FLAGS.least_cost_functions: if '.' in cost_fn_str: short_name = cost_fn_str.split('.')[-1] else: short_name = cost_fn_str cost_fn_str = "%s.%s.%s" % ( __name__, self.__class__.__name__, short_name) if not (short_name.startswith('%s_' % topic) or short_name.startswith('noop')): continue try: # NOTE: import_class is somewhat misnamed since # the weighing function can be any non-class callable # (i.e., no 'self') cost_fn = utils.import_class(cost_fn_str) except exception.ClassNotFound: raise exception.SchedulerCostFunctionNotFound( cost_fn_str=cost_fn_str) try: flag_name = "%s_weight" % cost_fn.__name__ weight = getattr(FLAGS, flag_name) except AttributeError: raise exception.SchedulerWeightFlagNotFound( flag_name=flag_name) cost_fns.append((weight, cost_fn)) self.cost_function_cache[topic] = cost_fns return cost_fns
def get_cost_fns(self, topic): """Returns a list of tuples containing weights and cost functions to use for weighing hosts """ if topic in self.cost_fns_cache: return self.cost_fns_cache[topic] cost_fns = [] for cost_fn_str in FLAGS.least_cost_scheduler_cost_functions: if '.' in cost_fn_str: short_name = cost_fn_str.split('.')[-1] else: short_name = cost_fn_str cost_fn_str = "%s.%s.%s" % (__name__, self.__class__.__name__, short_name) if not (short_name.startswith('%s_' % topic) or short_name.startswith('noop')): continue try: # NOTE(sirp): import_class is somewhat misnamed since it can # any callable from a module cost_fn = utils.import_class(cost_fn_str) except exception.ClassNotFound: raise exception.SchedulerCostFunctionNotFound( cost_fn_str=cost_fn_str) try: weight = getattr(FLAGS, "%s_weight" % cost_fn.__name__) except AttributeError: raise exception.SchedulerWeightFlagNotFound( flag_name=flag_name) cost_fns.append((weight, cost_fn)) self.cost_fns_cache[topic] = cost_fns return cost_fns
def __init__(self, application, limits=None, limiter=None, **kwargs): """ Initialize new `RateLimitingMiddleware`, which wraps the given WSGI application and sets up the given limits. @param application: WSGI application to wrap @param limits: String describing limits @param limiter: String identifying class for representing limits Other parameters are passed to the constructor for the limiter. """ base_wsgi.Middleware.__init__(self, application) # Select the limiter class if limiter is None: limiter = Limiter else: limiter = utils.import_class(limiter) # Parse the limits, if any are provided if limits is not None: limits = limiter.parse_limits(limits) self._limiter = limiter(limits or DEFAULT_LIMITS, **kwargs)
def __init__(self, application, limits=None, limiter=None, **kwargs): """ Initialize new `RateLimitingMiddleware`, which wraps the given WSGI application and sets up the given limits. @param application: WSGI application to wrap @param limits: String describing limits @param limiter: String identifying class for representing limits Other parameters are passed to the constructor for the limiter. """ base_wsgi.Middleware.__init__(self, application) # Select the limiter class if limiter is None: limiter = Limiter else: limiter = utils.import_class(limiter) # Parse the limits, if any are provided if limits is not None: limits = limiter.parse_limits(limits) self._limiter = limiter(limits or DEFAULT_LIMITS, **kwargs)
def __init__(self): manager_class=utils.import_class(FLAGS.dns_manager); self.manager=manager_class()
def __init__(self, app, controller): super(Requestify, self).__init__(app) self.controller = utils.import_class(controller)()
def __init__(self): manager_class=utils.import_class(FLAGS.dns_manager); self.manager=manager_class()
from glance.common import exception as glance_exception from nova import exception from nova import flags from nova import log as logging from nova import utils from nova.image import service LOG = logging.getLogger('nova.image.glance') FLAGS = flags.FLAGS GlanceClient = utils.import_class('glance.client.Client') def _parse_image_ref(image_href): """Parse an image href into composite parts. :param image_href: href of an image :returns: a tuple of the form (image_id, host, port) :raises ValueError """ o = urlparse(image_href) port = o.port or 80 host = o.netloc.split(':', 1)[0] image_id = int(o.path.split('/')[-1]) return (image_id, host, port)
import datetime from glance.common import exception as glance_exception from nova import exception from nova import flags from nova import log as logging from nova import utils from nova.image import service LOG = logging.getLogger('nova.image.glance') FLAGS = flags.FLAGS GlanceClient = utils.import_class('glance.client.Client') class GlanceImageService(service.BaseImageService): """Provides storage and retrieval of disk image objects within Glance.""" GLANCE_ONLY_ATTRS = ['size', 'location', 'disk_format', 'container_format'] # NOTE(sirp): Overriding to use _translate_to_service provided by # BaseImageService SERVICE_IMAGE_ATTRS = service.BaseImageService.BASE_IMAGE_ATTRS +\ GLANCE_ONLY_ATTRS def __init__(self, client=None): # FIXME(sirp): can we avoid dependency-injection here by using # stubbing out a fake?
def get_default_image_service(): ImageService = utils.import_class(FLAGS.image_service) return ImageService()
def __init__(self): LOG.debug("QFC = %s", FLAGS.physical_quantum_filter_connection) QFC = utils.import_class(FLAGS.physical_quantum_filter_connection) self._connection = QFC()
def init_guestfs(cls): if cls.__guestfs_cls is None: cls.__guestfs_cls = utils.import_class('guestfs.GuestFS') return cls.__guestfs_cls()
def get_default_image_service(): ImageService = utils.import_class(FLAGS.image_service) return ImageService()
def __init__(self, app, controller): super(Requestify, self).__init__(app) self.controller = utils.import_class(controller)()
from glance.common import exception as glance_exception from nova import exception from nova import flags from nova import log as logging from nova import utils LOG = logging.getLogger("nova.image.glance") FLAGS = flags.FLAGS GlanceClient = utils.import_class("glance.client.Client") def _parse_image_ref(image_href): """Parse an image href into composite parts. :param image_href: href of an image :returns: a tuple of the form (image_id, host, port) :raises ValueError """ o = urlparse(image_href) port = o.port or 80 host = o.netloc.split(":", 1)[0] image_id = o.path.split("/")[-1] return (image_id, host, port)
def init_guestfs(cls): if cls.__guestfs_cls is None: cls.__guestfs_cls = utils.import_class('guestfs.GuestFS') return cls.__guestfs_cls()