def is_admin_context(context): """Indicates if the request context is an administrator.""" if not context: LOG.warning(_LW('Use of empty request context is deprecated'), DeprecationWarning) raise Exception('die') return context.is_admin
def kill(self): """Destroy the service object in the datastore.""" self.stop() try: service_ref = objects.Service.get_by_id( context.get_admin_context(), self.service_id) service_ref.destroy() except exception.NotFound: LOG.warning(_LW('Service killed that has no database entry'))
def resource_fetch(self, ctxt, cloud): """Fetches list of resources from given Clouds.""" if cloud.type != "source": LOG.warning(_LW('Cloud %(cloud_name)s is not a Source Cloud.'), {'cloud_name': cloud.name}) LOG.info(_LI("Fetching resources from the Cloud %(cloud_name)s."), {'cloud_name': cloud.name}) self._resource_fetch(ctxt, cloud)
def properties_update(self, context, clouds): cloud_objs = [] for cloud in clouds: cloud_obj = objects.Cloud.get_by_id(context, cloud) if cloud_obj.type != "destination": LOG.warning(_LW('Cannot fetch resources from the ' 'cloud: %s') % (cloud)) continue cloud_objs.append(cloud_obj) self.scheduler_rpcapi.cloud_prop_update(context, clouds=cloud_objs)
def prop_update(self, ctxt, cloud): """Updates properties of the Cloud.""" if cloud.type != "destination": LOG.warning(_LW('Cloud %(cloud)s is not a destination Cloud.'), {'cloud': cloud.name}) return LOG.info(_LI("Fetching resources from the Cloud %(cloud)s."), {'cloud': cloud.name}) self._prop_update(ctxt, cloud)
def _schedule(self, context, request_spec, filter_properties=None, filter_class_names=None): weighed_hosts = self._get_weighted_candidates(context, request_spec, filter_properties, filter_class_names) if not weighed_hosts: LOG.warning(_LW("No weighed hosts found for resource " "fetching.")) return None return self._choose_top_host(weighed_hosts, request_spec)
def resource_fetch(self, context, clouds): cloud_objs = [] for cloud in clouds: cloud_obj = objects.Cloud.get_by_id(context, cloud) if cloud_obj.type != "source": LOG.warning(_LW('Cannot fetch resources from the ' 'cloud: %s') % (cloud)) continue cloud_objs.append(cloud_obj) self.scheduler_rpcapi.resource_fetch(context, clouds=cloud_objs)
def _load_extensions(self): """Load extensions specified on the command line.""" extensions = list(self.cls_list) for ext_factory in extensions: try: self.load_extension(ext_factory) except Exception as exc: LOG.warning(_LW('Failed to load extension %(ext_factory)s: ' '%(exc)s'), {'ext_factory': ext_factory, 'exc': exc})
def wrapped(*args, **kwargs): while True: try: return f(*args, **kwargs) except db_exc.DBDeadlock: LOG.warning(_LW("Deadlock detected when running " "'%(func_name)s': Retrying..."), dict(func_name=f.__name__)) # Retry! time.sleep(0.5) continue
def wrapped(*args, **kwargs): while True: try: return f(*args, **kwargs) except db_exc.DBDeadlock: LOG.warning( _LW("Deadlock detected when running " "'%(func_name)s': Retrying..."), dict(func_name=f.__name__)) # Retry! time.sleep(0.5) continue
def _load_extensions(self): """Load extensions specified on the command line.""" extensions = list(self.cls_list) for ext_factory in extensions: try: self.load_extension(ext_factory) except Exception as exc: LOG.warning( _LW('Failed to load extension %(ext_factory)s: ' '%(exc)s'), { 'ext_factory': ext_factory, 'exc': exc })
def basic_config_check(self): """Perform basic config checks before starting service.""" # Make sure report interval is less than service down time if self.report_interval: if CONF.service_down_time <= self.report_interval: new_down_time = int(self.report_interval * 2.5) LOG.warning( _LW("Report interval must be less than service down " "time. Current config service_down_time: " "%(service_down_time)s, report_interval for this: " "service is: %(report_interval)s. Setting global " "service_down_time to: %(new_down_time)s"), {'service_down_time': CONF.service_down_time, 'report_interval': self.report_interval, 'new_down_time': new_down_time}) CONF.set_override('service_down_time', new_down_time)
def basic_config_check(self): """Perform basic config checks before starting service.""" # Make sure report interval is less than service down time if self.report_interval: if CONF.service_down_time <= self.report_interval: new_down_time = int(self.report_interval * 2.5) LOG.warning( _LW("Report interval must be less than service down " "time. Current config service_down_time: " "%(service_down_time)s, report_interval for this: " "service is: %(report_interval)s. Setting global " "service_down_time to: %(new_down_time)s"), { 'service_down_time': CONF.service_down_time, 'report_interval': self.report_interval, 'new_down_time': new_down_time }) CONF.set_override('service_down_time', new_down_time)
def setup_profiler(binary, host): if CONF.profiler.profiler_enabled: _notifier = osprofiler.notifier.create( "Messaging", messaging, context.get_admin_context().to_dict(), rpc.TRANSPORT, "guts", binary, host) osprofiler.notifier.set(_notifier) LOG.warning( _LW("OSProfiler is enabled.\nIt means that person who knows " "any of hmac_keys that are specified in " "/etc/guts/api-paste.ini can trace his requests. \n" "In real life only operator can read this file so there " "is no security issue. Note that even if person can " "trigger profiler, only admin user can retrieve trace " "information.\n" "To disable OSprofiler set in guts.conf:\n" "[profiler]\nenabled=false")) else: osprofiler.web.disable()
def get_internal_tenant_context(): """Build and return the Guts internal tenant context object This request context will only work for internal Guts operations. It will not be able to make requests to remote services. To do so it will need to use the keystone client to get an auth_token. """ project_id = CONF.guts_internal_tenant_project_id user_id = CONF.guts_internal_tenant_user_id if project_id and user_id: return RequestContext(user_id=user_id, project_id=project_id, is_admin=True) else: LOG.warning(_LW('Unable to get internal tenant context: Missing ' 'required config parameters.')) return None
def _setup_extensions(self, ext_mgr): for extension in ext_mgr.get_controller_extensions(): collection = extension.collection controller = extension.controller if collection not in self.resources: LOG.warning(_LW('Extension %(ext_name)s: Cannot extend ' 'resource %(collection)s: No such resource'), {'ext_name': extension.extension.name, 'collection': collection}) continue LOG.debug('Extension %(ext_name)s extending resource: ' '%(collection)s', {'ext_name': extension.extension.name, 'collection': collection}) resource = self.resources[collection] resource.register_actions(controller) resource.register_extensions(controller)
def _update_host_state_map(self, context): # Get resource usage across the available nodes: sources = objects.ServiceList.get_all_by_topic(context, CONF.source_topic, disabled=False) dests = objects.ServiceList.get_all_by_topic(context, CONF.destination_topic, disabled=False) active_hosts = set() no_capabilities_hosts = set() for service in sources.objects + dests.objects: host = service.host if not utils.service_is_up(service): LOG.warning(_LW("Service is down. (host: %s)"), host) continue capabilities = self.service_states.get(host, None) if capabilities is None: no_capabilities_hosts.add(host) continue host_state = self.host_state_map.get(host) if not host_state: host_state = self.host_state_cls(host, capabilities=capabilities, service=dict(service)) self.host_state_map[host] = host_state # update capabilities and attributes in host_state host_state.update_from_migration_capability(capabilities, service=dict(service)) active_hosts.add(host) self._no_capabilities_hosts = no_capabilities_hosts # remove non-active hosts from host_state_map nonactive_hosts = set(self.host_state_map.keys()) - active_hosts for host in nonactive_hosts: LOG.info(_LI("Removing non-active host: %(host)s from " "scheduler cache."), {'host': host}) del self.host_state_map[host]
def _setup_extensions(self, ext_mgr): for extension in ext_mgr.get_controller_extensions(): collection = extension.collection controller = extension.controller if collection not in self.resources: LOG.warning( _LW('Extension %(ext_name)s: Cannot extend ' 'resource %(collection)s: No such resource'), { 'ext_name': extension.extension.name, 'collection': collection }) continue LOG.debug( 'Extension %(ext_name)s extending resource: ' '%(collection)s', { 'ext_name': extension.extension.name, 'collection': collection }) resource = self.resources[collection] resource.register_actions(controller) resource.register_extensions(controller)
def load_standard_extensions(ext_mgr, logger, path, package, ext_list=None): """Registers all standard API extensions.""" # Walk through all the modules in our directory... our_dir = path[0] for dirpath, dirnames, filenames in os.walk(our_dir): # Compute the relative package name from the dirpath relpath = os.path.relpath(dirpath, our_dir) if relpath == '.': relpkg = '' else: relpkg = '.%s' % '.'.join(relpath.split(os.sep)) # Now, consider each file in turn, only considering .py files for fname in filenames: root, ext = os.path.splitext(fname) # Skip __init__ and anything that's not .py if ext != '.py' or root == '__init__': continue # Try loading it classname = "%s%s" % (root[0].upper(), root[1:]) classpath = ("%s%s.%s.%s" % (package, relpkg, root, classname)) if ext_list is not None and classname not in ext_list: logger.debug("Skipping extension: %s" % classpath) continue try: ext_mgr.load_extension(classpath) except Exception as exc: logger.warning(_LW('Failed to load extension %(classpath)s: ' '%(exc)s'), {'classpath': classpath, 'exc': exc}) # Now, let's consider any subdirectories we may have... subdirs = [] for dname in dirnames: # Skip it if it does not have __init__.py if not os.path.exists(os.path.join(dirpath, dname, '__init__.py')): continue # If it has extension(), delegate... ext_name = ("%s%s.%s.extension" % (package, relpkg, dname)) try: ext = importutils.import_class(ext_name) except ImportError: # extension() doesn't exist on it, so we'll explore # the directory for ourselves subdirs.append(dname) else: try: ext(ext_mgr) except Exception as exc: logger.warning(_LW('Failed to load extension ' '%(ext_name)s: %(exc)s'), {'ext_name': ext_name, 'exc': exc}) # Update the list of directories we'll explore... dirnames[:] = subdirs
def load_standard_extensions(ext_mgr, logger, path, package, ext_list=None): """Registers all standard API extensions.""" # Walk through all the modules in our directory... our_dir = path[0] for dirpath, dirnames, filenames in os.walk(our_dir): # Compute the relative package name from the dirpath relpath = os.path.relpath(dirpath, our_dir) if relpath == '.': relpkg = '' else: relpkg = '.%s' % '.'.join(relpath.split(os.sep)) # Now, consider each file in turn, only considering .py files for fname in filenames: root, ext = os.path.splitext(fname) # Skip __init__ and anything that's not .py if ext != '.py' or root == '__init__': continue # Try loading it classname = "%s%s" % (root[0].upper(), root[1:]) classpath = ("%s%s.%s.%s" % (package, relpkg, root, classname)) if ext_list is not None and classname not in ext_list: logger.debug("Skipping extension: %s" % classpath) continue try: ext_mgr.load_extension(classpath) except Exception as exc: logger.warning( _LW('Failed to load extension %(classpath)s: ' '%(exc)s'), { 'classpath': classpath, 'exc': exc }) # Now, let's consider any subdirectories we may have... subdirs = [] for dname in dirnames: # Skip it if it does not have __init__.py if not os.path.exists(os.path.join(dirpath, dname, '__init__.py')): continue # If it has extension(), delegate... ext_name = ("%s%s.%s.extension" % (package, relpkg, dname)) try: ext = importutils.import_class(ext_name) except ImportError: # extension() doesn't exist on it, so we'll explore # the directory for ourselves subdirs.append(dname) else: try: ext(ext_mgr) except Exception as exc: logger.warning( _LW('Failed to load extension ' '%(ext_name)s: %(exc)s'), { 'ext_name': ext_name, 'exc': exc }) # Update the list of directories we'll explore... dirnames[:] = subdirs