def test_bound_method(self): c = Class() name = reflection.get_class_name(c.method) self.assertEqual('%s.Class' % __name__, name) # test with fully_qualified=False name = reflection.get_class_name(c.method, fully_qualified=False) self.assertEqual('Class', name)
def _apply_region_proxy(self, proxy_list): if isinstance(proxy_list, list): proxies = [] for item in proxy_list: if isinstance(item, str): LOG.debug('Importing class %s as KVS proxy.', item) pxy = importutils.import_class(item) else: pxy = item if issubclass(pxy, proxy.ProxyBackend): proxies.append(pxy) else: pxy_cls_name = reflection.get_class_name( pxy, fully_qualified=False) LOG.warning(_LW('%s is not a dogpile.proxy.ProxyBackend'), pxy_cls_name) for proxy_cls in reversed(proxies): proxy_cls_name = reflection.get_class_name( proxy_cls, fully_qualified=False) LOG.info(_LI('Adding proxy \'%(proxy)s\' to KVS %(name)s.'), {'proxy': proxy_cls_name, 'name': self._region.name}) self._region.wrap(proxy_cls)
def __repr__(self): cb = self.callback if cb is None: repr_msg = "%s object at 0x%x; dead" % ( reflection.get_class_name(self, fully_qualified=False), id(self)) else: repr_msg = "%s object at 0x%x calling into '%r'" % ( reflection.get_class_name(self, fully_qualified=False), id(self), cb) if self._details_filter is not None: repr_msg += " using details filter '%r'" % self._details_filter return "<%s>" % repr_msg
def wrapper(f, instance, args, kwargs): qualified, f_name = _get_qualified_name(f) if qualified: if inspect.isclass(f): prefix_pre = "Using class" thing_post = '' else: prefix_pre = "Using function/method" thing_post = '()' if not qualified: prefix_pre = "Using function/method" base_name = None if instance is None: # Decorator was used on a class if inspect.isclass(f): prefix_pre = "Using class" thing_post = '' module_name = _get_module_name(inspect.getmodule(f)) if module_name == '__main__': f_name = reflection.get_class_name( f, fully_qualified=False) else: f_name = reflection.get_class_name( f, fully_qualified=True) # Decorator was a used on a function else: thing_post = '()' module_name = _get_module_name(inspect.getmodule(f)) if module_name != '__main__': f_name = reflection.get_callable_name(f) # Decorator was used on a classmethod or instancemethod else: thing_post = '()' base_name = reflection.get_class_name(instance, fully_qualified=False) if base_name: thing_name = ".".join([base_name, f_name]) else: thing_name = f_name else: thing_name = f_name if thing_post: thing_name += thing_post prefix = prefix_pre + " '%s' is deprecated" % (thing_name) out_message = _utils.generate_message( prefix, version=version, removal_version=removal_version, message=message) _utils.deprecation(out_message, stacklevel) return f(*args, **kwargs)
def create_legacy_driver(driver_class): """Helper function to deprecate the original driver classes. The keystone.{subsystem}.Driver classes are deprecated in favor of the new versioned classes. This function creates a new class based on a versioned class and adds a deprecation message when it is used. This will allow existing custom drivers to work when the Driver class is renamed to include a version. Example usage: Driver = create_legacy_driver(CatalogDriverV8) """ module_name = driver_class.__module__ class_name = reflection.get_class_name(driver_class) class Driver(driver_class): @versionutils.deprecated( as_of=versionutils.deprecated.LIBERTY, what='%s.Driver' % module_name, in_favor_of=class_name, remove_in=+2) def __init__(self, *args, **kwargs): super(Driver, self).__init__(*args, **kwargs) return Driver
def _infer_entity_data(self, entity): if self.status is None: self.status = entity.status if self.status_reason is None: self.status_reason = entity.status_reason e_type = reflection.get_class_name(entity, fully_qualified=False) e_type = e_type.upper() if e_type == 'CLUSTER': self.obj_id = entity.id self.cluster_id = entity.id self.obj_name = entity.name self.obj_type = 'CLUSTER' elif e_type == 'NODE': self.obj_id = entity.id self.cluster_id = entity.cluster_id self.obj_name = entity.name self.obj_type = 'NODE' elif e_type == 'CLUSTERACTION': self.obj_id = entity.target self.cluster_id = entity.target self.obj_name = entity.cluster.name self.obj_type = 'CLUSTER' elif e_type == 'NODEACTION': self.obj_id = entity.target self.cluster_id = entity.node.cluster_id self.obj_name = entity.node.name self.obj_type = 'NODE'
def __repr__(self): repr_msg = "%s object at 0x%x calling into '%r'" % ( reflection.get_class_name( self, fully_qualified=False), id(self), self._callback) if self._details_filter is not None: repr_msg += " using details filter '%r'" % self._details_filter return "<%s>" % repr_msg
def __repr__(self): reprkeys = sorted(k for k in self.__dict__.keys() if k[0] != '_' and k != 'manager') info = ", ".join("%s=%s" % (k, getattr(self, k)) for k in reprkeys) self_cls_name = reflection.get_class_name(self, fully_qualified=False) return "<%s %s>" % (self_cls_name, info)
def wrapper(*args, **kwargs): # NOTE(tovin07): Workaround for this issue # F823 local variable 'info' # (defined in enclosing scope on line xxx) # referenced before assignment info_ = info if "name" not in info_["function"]: # Get this once (as it should **not** be changing in # subsequent calls). info_["function"]["name"] = reflection.get_callable_name(f) if not hide_args: info_["function"]["args"] = str(args) info_["function"]["kwargs"] = str(kwargs) stop_info = None try: start(name, info=info_) result = f(*args, **kwargs) except Exception as ex: stop_info = { "etype": reflection.get_class_name(ex), "message": str(ex) } raise else: if not hide_result: stop_info = {"function": {"result": repr(result)}} return result finally: if stop_info: stop(info=stop_info) else: stop()
def run_periodic_tasks(self, context, raise_on_error=False): """Tasks to be run at a periodic interval.""" cls_name = reflection.get_class_name(self, fully_qualified=False) idle_for = DEFAULT_INTERVAL for task_name, task in self._periodic_tasks: full_task_name = '.'.join([cls_name, task_name]) spacing = self._periodic_spacing[task_name] last_run = self._periodic_last_run[task_name] # Check if due, if not skip idle_for = min(idle_for, spacing) if last_run is not None: delta = last_run + spacing - time.time() if delta > 0: idle_for = min(idle_for, delta) continue LOG.debug("Running periodic task %(full_task_name)s", {"full_task_name": full_task_name}) self._periodic_last_run[task_name] = _nearest_boundary( last_run, spacing) try: task(self, context) except Exception: if raise_on_error: raise LOG.exception(_LE("Error during %(full_task_name)s"), {"full_task_name": full_task_name}) time.sleep(0) return idle_for
def _process_broadcast_message(self, channel, message): if self.dead.is_set() or self.quiescing: raise excp.Dying LOG.debug("Processing %s message: %s", channel.name.lower(), message) self._capture_occurrence(channel, message) for h_cls in list(self.handlers): if self.dead.is_set(): raise excp.Dying h_match = h_cls.handles(message, channel, h_cls.fetch_config(self)) if not h_match: continue h = h_cls(self, message) with self._capture_for_record(channel, message, h): h_cls_stats = h_cls.stats h_cls_stats.ran += 1 try: h.run(h_match) except Exception: LOG.exception("Processing %s with '%s' failed", message, reflection.get_class_name(h_cls)) h_cls_stats.failed += 1 try: h_cls_stats.total_run_time += h.watch.elapsed() except RuntimeError: pass else: try: h_cls_stats.total_run_time += h.watch.elapsed() except RuntimeError: pass
def _insert_periodics(self, scheduler): try: danger_period = self.config.danger_period except AttributeError: pass else: runner = periodics.DangerZoneDetector(self) if runner.is_enabled(self): runner_name = reflection.get_class_name(runner) runner_description = periodics.DangerZoneDetector.__doc__ runner_trigger = cron.CronTrigger.from_crontab( danger_period, timezone=self.config.tz) runner_id = utils.hash_pieces([ runner_name, danger_period, runner_description, ], max_len=8) scheduler.add_job(runner, trigger=runner_trigger, jobstore='memory', name="\n".join( [runner_name, runner_description]), id=runner_id, coalesce=True)
def __str__(self): base = reflection.get_class_name(self, fully_qualified=False) if self.strategy is not None: strategy_name = self.strategy.name else: strategy_name = "???" return base + "(strategy=%s)" % (strategy_name)
def create_legacy_driver(driver_class): """Helper function to deprecate the original driver classes. The keystone.{subsystem}.Driver classes are deprecated in favor of the new versioned classes. This function creates a new class based on a versioned class and adds a deprecation message when it is used. This will allow existing custom drivers to work when the Driver class is renamed to include a version. Example usage: Driver = create_legacy_driver(CatalogDriverV8) """ module_name = driver_class.__module__ class_name = reflection.get_class_name(driver_class) class Driver(driver_class): @versionutils.deprecated(as_of=versionutils.deprecated.LIBERTY, what='%s.Driver' % module_name, in_favor_of=class_name, remove_in=+2) def __init__(self, *args, **kwargs): super(Driver, self).__init__(*args, **kwargs) return Driver
def _make_html_response(self, results, healthy): try: hostname = socket.gethostname() except socket.error: hostname = None translated_results = [] for result in results: translated_results.append({ 'details': result.details or '', 'reason': result.reason, 'class': reflection.get_class_name(result, fully_qualified=False), }) params = { 'healthy': healthy, 'hostname': hostname, 'results': translated_results, 'detailed': self._show_details, 'now': str(timeutils.utcnow()), 'python_version': sys.version, 'platform': platform.platform(), 'gc': { 'counts': gc.get_count(), 'threshold': gc.get_threshold(), }, 'threads': self._get_threadstacks(), 'greenthreads': self._get_threadstacks(), } body = _expand_template(self.HTML_RESPONSE_TEMPLATE, params) return (body.strip(), 'text/html')
def local_error_name(self, error): """Returns the name of the error with any _Remote postfix removed. :param error: Remote raised error to derive the name from. """ error_name = reflection.get_class_name(error, fully_qualified=False) return error_name.split('_Remote')[0]
def _get_from_cache(self, target_self, *args, skip_cache=False, **kwargs): target_self_cls_name = reflection.get_class_name(target_self, fully_qualified=False) func_name = "%(module)s.%(class)s.%(func_name)s" % { 'module': target_self.__module__, 'class': target_self_cls_name, 'func_name': self.func.__name__, } key = (func_name,) + args if kwargs: key += helpers.dict2tuple(kwargs) # oslo.cache expects a string or a buffer key = str(key) if not skip_cache: try: item = target_self._cache.get(key) except TypeError: LOG.debug("Method %(func_name)s cannot be cached due to " "unhashable parameters: args: %(args)s, kwargs: " "%(kwargs)s", {'func_name': func_name, 'args': args, 'kwargs': kwargs}) return self.func(target_self, *args, **kwargs) else: LOG.debug('Skipping getting result from cache for %s.', func_name) item = self._not_cached if item is self._not_cached: item = self.func(target_self, *args, **kwargs) target_self._cache.set(key, item) return item
def __init__(self, timestamp, level, entity=None, **kwargs): self.timestamp = timestamp self.level = level self.id = kwargs.get('id') self.user_id = kwargs.get('user_id') self.action = kwargs.get('action') self.status = kwargs.get('status') self.status_reason = kwargs.get('status_reason') self.obj_id = kwargs.get('obj_id') self.obj_type = kwargs.get('obj_type') self.obj_name = kwargs.get('obj_name') self.metadata = kwargs.get('metadata') cntx = kwargs.get('context') if cntx is not None: self.user_id = cntx.project if entity is not None: self.obj_id = entity.id self.obj_name = entity.name e_type = reflection.get_class_name(entity, fully_qualified=False) self.obj_type = e_type.upper()
def _get_from_cache(self, target_self, *args, **kwargs): target_self_cls_name = reflection.get_class_name(target_self, fully_qualified=False) func_name = "%(module)s.%(class)s.%(func_name)s" % { 'module': target_self.__module__, 'class': target_self_cls_name, 'func_name': self.func.__name__, } key = (func_name,) + args if kwargs: key += dict2tuple(kwargs) try: item = target_self._cache.get(key, self._not_cached) except TypeError: LOG.debug("Method %(func_name)s cannot be cached due to " "unhashable parameters: args: %(args)s, kwargs: " "%(kwargs)s", {'func_name': func_name, 'args': args, 'kwargs': kwargs}) return self.func(target_self, *args, **kwargs) if item is self._not_cached: item = self.func(target_self, *args, **kwargs) target_self._cache.set(key, item, None) return item
def moved_class(new_class, old_class_name, old_module_name, message=None, version=None, removal_version=None, stacklevel=3): """Deprecates a class that was moved to another location. This creates a 'new-old' type that can be used for a deprecation period that can be inherited from. This will emit warnings when the old locations class is initialized, telling where the new and improved location for the old class now is. """ old_name = ".".join((old_module_name, old_class_name)) new_name = reflection.get_class_name(new_class) prefix = _CLASS_MOVED_PREFIX_TPL % (old_name, new_name) out_message = _utils.generate_message( prefix, message=message, version=version, removal_version=removal_version) def decorator(f): # Use the older functools until the following is available: # # https://bitbucket.org/gutworth/six/issue/105 @functools.wraps(f, assigned=("__name__", "__doc__")) def wrapper(self, *args, **kwargs): _utils.deprecation(out_message, stacklevel=stacklevel) return f(self, *args, **kwargs) return wrapper old_class = type(old_class_name, (new_class,), {}) old_class.__module__ = old_module_name old_class.__init__ = decorator(old_class.__init__) return old_class
def _infer_entity_data(self, entity): if self.status is None: self.status = entity.status if self.status_reason is None: self.status_reason = entity.status_reason e_type = reflection.get_class_name(entity, fully_qualified=False) e_type = e_type.upper() if e_type == 'CLUSTER': self.oid = entity.id self.cluster_id = entity.id self.oname = entity.name self.otype = 'CLUSTER' elif e_type == 'NODE': self.oid = entity.id self.cluster_id = entity.cluster_id self.oname = entity.name self.otype = 'NODE' elif e_type == 'CLUSTERACTION': self.oid = entity.target self.cluster_id = entity.target self.oname = entity.cluster.name self.otype = 'CLUSTER' elif e_type == 'NODEACTION': self.oid = entity.target self.cluster_id = entity.node.cluster_id self.oname = entity.node.name self.otype = 'NODE' else: self.oid = entity.target self.cluster_id = '' self.oname = '' self.otype = ''
def _make_json_response(self, results, healthy): if self._show_details: body = { 'detailed': True, 'python_version': sys.version, 'now': str(timeutils.utcnow()), 'platform': platform.platform(), 'gc': { 'counts': gc.get_count(), 'threshold': gc.get_threshold(), }, } reasons = [] for result in results: reasons.append({ 'reason': result.reason, 'details': result.details or '', 'class': reflection.get_class_name(result, fully_qualified=False), }) body['reasons'] = reasons body['greenthreads'] = self._get_greenstacks() body['threads'] = self._get_threadstacks() else: body = { 'reasons': [result.reason for result in results], 'detailed': False, } return (self._pretty_json_dumps(body), 'application/json')
def to_dict(self): """Return json-serializable request. To convert requests that have failed due to some exception this will convert all `failure.Failure` objects into dictionaries (which will then be reconstituted by the receiver). """ request = { 'task_cls': reflection.get_class_name(self._task), 'task_name': self._task.name, 'task_version': self._task.version, 'action': self._action, 'arguments': self._arguments, } if 'result' in self._kwargs: result = self._kwargs['result'] if isinstance(result, ft.Failure): request['result'] = ('failure', failure_to_dict(result)) else: request['result'] = ('success', result) if 'failures' in self._kwargs: failures = self._kwargs['failures'] request['failures'] = {} for task, failure in six.iteritems(failures): request['failures'][task] = failure_to_dict(failure) return request
def setup(self): bind_port = self.port if self.exposed: bind_addr = '0.0.0.0' else: bind_addr = 'localhost' try: keyfile = self.ssl_config.private_key.path except AttributeError: keyfile = None try: certfile = self.ssl_config.cert.path except AttributeError: certfile = None executor = futurist.ThreadPoolExecutor(max_workers=self.max_workers) server = make_server(bind_addr, bind_port, self.wsgi_app, executor, certfile=certfile, keyfile=keyfile) if keyfile or certfile: server_base = 'https' else: server_base = 'http' server_host, server_port = server.server_address for pat, ok_methods, _maybe_handler in getattr(self.wsgi_app, 'urls', []): LOG.info("Will match %s requests that match pattern" " '%s' on port %s on %s://%s for app: %s (dispatching" " into a worker pool/executor of size %s)", ", ".join(sorted(ok_methods)), pat.pattern, server_port, server_base, server_host, reflection.get_class_name(self.wsgi_app), self.max_workers) self.server = server self.executor = executor self._server_base = server_base self._server_port = server_port
def main(): # NOTE(dmitryme): since we do not read stderr in the main process, # we need to flush it somewhere, otherwise both processes might # hang because of i/o buffer overflow. with open('/dev/null', 'w') as sys.stderr: while True: result = dict() try: # TODO(elmiko) these pickle usages should be # reinvestigated to determine a more secure manner to # deploy remote commands. if isinstance(sys.stdin, _io.TextIOWrapper): func = pickle.load(sys.stdin.buffer) # nosec args = pickle.load(sys.stdin.buffer) # nosec kwargs = pickle.load(sys.stdin.buffer) # nosec else: func = pickle.load(sys.stdin) # nosec args = pickle.load(sys.stdin) # nosec kwargs = pickle.load(sys.stdin) # nosec result['output'] = func(*args, **kwargs) except BaseException as e: cls_name = reflection.get_class_name(e, fully_qualified=False) result['exception'] = cls_name + ': ' + str(e) result['traceback'] = traceback.format_exc() if isinstance(sys.stdin, _io.TextIOWrapper): pickle.dump(result, sys.stdout.buffer, protocol=2) # nosec else: pickle.dump(result, sys.stdout, protocol=2) # nosec sys.stdout.flush()
def __repr__(self): reprkeys = sorted(k for k in self.__dict__.keys() if k[0] != '_' and k != 'manager') info = ", ".join("%s=%s" % (k, getattr(self, k)) for k in reprkeys) class_name = reflection.get_class_name(self, fully_qualified=False) return "<%s %s>" % (class_name, info)
def wrapper(*args, **kwargs): # NOTE(tovin07): Workaround for this issue # F823 local variable 'info' # (defined in enclosing scope on line xxx) # referenced before assignment info_ = info if "name" not in info_["function"]: # Get this once (as it should **not** be changing in # subsequent calls). info_["function"]["name"] = reflection.get_callable_name(f) if not hide_args: info_["function"]["args"] = str(args) info_["function"]["kwargs"] = str(kwargs) stop_info = None try: start(name, info=info_) result = f(*args, **kwargs) except Exception as ex: stop_info = { "etype": reflection.get_class_name(ex), "message": six.text_type(ex) } raise else: if not hide_result: stop_info = {"function": {"result": repr(result)}} return result finally: if stop_info: stop(info=stop_info) else: stop()
def __str__(self): self.details = _("Requested version of Heat API is not" "available.") return (_("%(name)s (HTTP %(code)s) %(details)s") % { 'name': reflection.get_class_name(self, fully_qualified=False), 'code': self.code, 'details': self.details })
def to_dict(self): """Return json-serializable request. To convert requests that have failed due to some exception this will convert all `failure.Failure` objects into dictionaries (which will then be reconstituted by the receiver). """ request = { 'task_cls': reflection.get_class_name(self._task), 'task_name': self._task.name, 'task_version': self._task.version, 'action': self._action, 'arguments': self._arguments, } if 'result' in self._kwargs: result = self._kwargs['result'] if isinstance(result, ft.Failure): request['result'] = ('failure', result.to_dict()) else: request['result'] = ('success', result) if 'failures' in self._kwargs: failures = self._kwargs['failures'] request['failures'] = {} for task, failure in six.iteritems(failures): request['failures'][task] = failure.to_dict() return request
def handler(*args, **kwargs): if id_prop and not get_args: get_args['id'] = id_prop[0] if 'marker' in id_prop: if 'marker' not in u.get_request_args(): return func(*args, **kwargs) kwargs['marker'] = u.get_request_args()['marker'] get_kwargs = {} for get_arg in get_args: get_kwargs[get_arg] = kwargs[get_args[get_arg]] obj = None try: obj = get_func(**get_kwargs) except Exception as e: cls_name = reflection.get_class_name(e, fully_qualified=False) if 'notfound' not in cls_name.lower(): raise e if obj is None: e = ex.NotFoundException(get_kwargs, _('Object with %s not found')) return u.not_found(e) if 'marker' in kwargs: del(kwargs['marker']) return func(*args, **kwargs)
def __repr__(self): r = reflection.get_class_name(self, fully_qualified=False) if self.identity is not self._NO_IDENTITY: r += "(identity=%s, tasks=%s, topic=%s)" % (self.identity, self.tasks, self.topic) else: r += "(identity=*, tasks=%s, topic=%s)" % (self.tasks, self.topic) return r
def setUp(self): super(TestEndpoint, self).setUp() self.task_cls = utils.TaskOneReturn self.task_uuid = 'task-uuid' self.task_args = {'context': 'context'} self.task_cls_name = reflection.get_class_name(self.task_cls) self.task_ep = ep.Endpoint(self.task_cls) self.task_result = 1
def check_skip(self): test_cls_name = reflection.get_class_name(self, fully_qualified=False) test_method_name = '.'.join([test_cls_name, self._testMethodName]) test_skipped = (self.conf.skip_scenario_test_list and ( test_cls_name in self.conf.skip_scenario_test_list or test_method_name in self.conf.skip_scenario_test_list)) if self.conf.skip_scenario_tests or test_skipped: self.skipTest('Test disabled in conf, skipping')
def __init__(self, topic, tasks, identity=_NO_IDENTITY): self.tasks = [] for task in tasks: if not isinstance(task, six.string_types): task = reflection.get_class_name(task) self.tasks.append(task) self.topic = topic self.identity = identity
def test_local_error_name(self): ex = exception.NotFound() self.assertEqual('NotFound', self.rpcapi.local_error_name(ex)) exr = self._to_remote_error(ex) self.assertEqual('NotFound_Remote', reflection.get_class_name(exr, fully_qualified=False)) self.assertEqual('NotFound', self.rpcapi.local_error_name(exr))
def __str__(self): self.details = _("Requested version of Heat API is not" "available.") return (_("%(name)s (HTTP %(code)s) %(details)s") % { 'name': reflection.get_class_name(self, fully_qualified=False), 'code': self.code, 'details': self.details})
def __init__(self, object_class, db_exception): super(NeutronDbObjectDuplicateEntry, self).__init__( object_type=reflection.get_class_name(object_class, fully_qualified=False), attributes=db_exception.columns, values=db_exception.value) self.columns = db_exception.columns self.value = db_exception.value
def get_unix_group(group=None): """Get the gid and group name. This is a convenience utility which accepts a variety of input which might represent a unix group. If successful it returns the gid and name. Valid input is: string A string is first considered to be a group name and a lookup is attempted under that name. If no name is found then an attempt is made to convert the string to an integer and perform a lookup as a gid. int An integer is interpreted as a gid. None None is interpreted to mean use the current process's effective group. If the input is a valid type but no group is found a KeyError is raised. If the input is not a valid type a TypeError is raised. :param object group: string, int or None specifying the group to lookup. :returns: tuple of (gid, name) """ if isinstance(group, six.string_types): try: group_info = grp.getgrnam(group) except KeyError: # Was an int passed as a string? # Try converting to int and lookup by id instead. try: i = int(group) except ValueError: raise KeyError("group name '%s' not found" % group) try: group_info = grp.getgrgid(i) except KeyError: raise KeyError("group id %d not found" % i) elif isinstance(group, int): try: group_info = grp.getgrgid(group) except KeyError: raise KeyError("group id %d not found" % group) elif group is None: group_info = grp.getgrgid(os.getegid()) else: group_cls_name = reflection.get_class_name(group, fully_qualified=False) raise TypeError('group must be string, int or None; not %s (%r)' % (group_cls_name, group)) return group_info.gr_gid, group_info.gr_name
def __repr__(self): """Return string representation of TokenModel.""" desc = ('<%(type)s (audit_id=%(audit_id)s, ' 'audit_chain_id=%(audit_ids)s) at %(loc)s>') self_cls_name = reflection.get_class_name(self, fully_qualified=False) return desc % {'type': self_cls_name, 'audit_id': self.audit_id, 'audit_ids': self.audit_ids, 'loc': hex(id(self))}
def __repr__(self): desc = ('<%(type)s (audit_id=%(audit_id)s, ' 'audit_chain_id=%(audit_chain_id)s) at %(loc)s>') self_cls_name = reflection.get_class_name(self, fully_qualified=False) return desc % {'type': self_cls_name, 'audit_id': self.audit_id, 'audit_chain_id': self.audit_chain_id, 'loc': hex(id(self))}
def __exit__(self, etype, value, traceback): if etype: info = { "etype": reflection.get_class_name(etype), "message": value.args[0] if value.args else None } stop(info=info) else: stop()
def _extract_policy_data(self, policy_data): clsname = reflection.get_class_name(self, fully_qualified=False) if clsname not in policy_data: return None data = policy_data.get(clsname) if 'version' not in data or data['version'] != self.VERSION: return None return data.get('data', None)
def _default(obj): if isinstance(obj, (datetime.datetime, datetime.date)): return obj.isoformat() elif inspect.isclass(obj): return reflection.get_class_name(obj) elif isinstance(obj, (set, frozenset)): return list(sorted(obj)) else: raise TypeError("Type '%r' is not JSON serializable" % (obj, ))
def prettify_failures(failures, limit=-1): """Prettifies a checked commits failures (ignores sensitive data...). Example input and output: >>> from taskflow.utils import kazoo_utils >>> conf = {"hosts": ['localhost:2181']} >>> c = kazoo_utils.make_client(conf) >>> c.start(timeout=1) >>> txn = c.transaction() >>> txn.create("/test") >>> txn.check("/test", 2) >>> txn.delete("/test") >>> try: ... kazoo_utils.checked_commit(txn) ... except kazoo_utils.KazooTransactionException as e: ... print(kazoo_utils.prettify_failures(e.failures, limit=1)) ... RolledBackError@Create(path='/test') and 2 more... >>> c.stop() >>> c.close() """ prettier = [] for (op, r) in failures: pretty_op = reflection.get_class_name(op, fully_qualified=False) # Pick off a few attributes that are meaningful (but one that don't # show actual data, which might not be desired to show...). selected_attrs = [ "path=%r" % op.path, ] try: if op.version != -1: selected_attrs.append("version=%s" % op.version) except AttributeError: pass pretty_op += "(%s)" % (", ".join(selected_attrs)) pretty_cause = reflection.get_class_name(r, fully_qualified=False) prettier.append("%s@%s" % (pretty_cause, pretty_op)) if limit <= 0 or len(prettier) <= limit: return ", ".join(prettier) else: leftover = prettier[limit:] prettier = prettier[0:limit] return ", ".join(prettier) + " and %s more..." % len(leftover)
def _error(self, ex): trace = None traceback_marker = 'Traceback (most recent call last)' webob_exc = None safe = getattr(ex, 'safe', False) if isinstance(ex, exception.HTTPExceptionDisguise): # An HTTP exception was disguised so it could make it here # let's remove the disguise and set the original HTTP exception if cfg.CONF.debug: trace = ''.join(traceback.format_tb(ex.tb)) ex = ex.exc webob_exc = ex ex_type = reflection.get_class_name(ex, fully_qualified=False) is_remote = ex_type.endswith('_Remote') if is_remote: ex_type = ex_type[:-len('_Remote')] full_message = six.text_type(ex) if '\n' in full_message and is_remote: message, msg_trace = full_message.split('\n', 1) elif traceback_marker in full_message: message, msg_trace = full_message.split(traceback_marker, 1) message = message.rstrip('\n') msg_trace = traceback_marker + msg_trace else: msg_trace = 'None\n' if sys.exc_info() != (None, None, None): msg_trace = traceback.format_exc() message = full_message if isinstance(ex, exception.HeatException): message = ex.message if cfg.CONF.debug and not trace: trace = msg_trace if not webob_exc: webob_exc = self._map_exception_to_error(ex.__class__) error = { 'code': webob_exc.code, 'title': webob_exc.title, 'explanation': webob_exc.explanation, 'error': { 'type': ex_type, 'traceback': trace, } } if safe: error['error']['message'] = message return error