def LockCheck(): global semaphores while 1: each = None Sleep(5 * 60) now = time.time() try: for each in semaphores.keys(): BeNice() if (each.count<=0) and (each.waiting.balance < 0) and (each.lockedWhen and (now - each.lockedWhen)>=(5*MIN)): logger.error("Semaphore %s appears to have threads in a locking conflict."%id(each)) logger.error("holding thread:") try: for s in traceback.format_list(traceback.extract_stack(each.thread.frame,40)): logger.error(s) except: sys.exc_clear() first = each.waiting.queue t = first while t: logger.error("waiting thread %s:"%id(t),4) try: for s in traceback.format_list(traceback.extract_stack(t.frame,40)): logger.error(s,4) except: sys.exc_clear() t = t.next if t is first: break logger.error("End of locking conflict log") except StandardError: StackTrace() sys.exc_clear()
def TriggerEvent(self, function, args): """Triggers an event for the plugins. Since events and notifications are precisely the same except for how n+ responds to them, both can be triggered by this function.""" hotpotato = args for module, plugin in list(self.enabled_plugins.items()): try: func = eval("plugin.PLUGIN." + function) ret = func(*hotpotato) if ret is not None and type(ret) != tupletype: if ret == returncode['zap']: return None elif ret == returncode['break']: return hotpotato elif ret == returncode['pass']: pass else: log.add(_("Plugin %(module)s returned something weird, '%(value)s', ignoring") % {'module': module, 'value': ret}) if ret is not None: hotpotato = ret except: log.add(_("Plugin %(module)s failed with error %(errortype)s: %(error)s.\nTrace: %(trace)s\nProblem area:%(area)s") % { 'module': module, 'errortype': sys.exc_info()[0], 'error': sys.exc_info()[1], 'trace': ''.join(format_list(extract_stack())), 'area': ''.join(format_list(extract_tb(sys.exc_info()[2]))) }) return hotpotato
def format_stack_report(details, exc_info): header = '' header += "Exception\n---------\n" if exc_info: header += ''.join(traceback.format_exception(*exc_info)) header += "\n" # Print out a stack trace too. The exception stack only contains # calls between the try and the exception. try: stack = util.get_nice_stack() except StandardError: stack = traceback.extract_stack() header += "Call Stack\n---------\n" header += ''.join(traceback.format_list(stack)) header += "\n" else: # fake an exception with our call stack try: stack = util.get_nice_stack() except StandardError: stack = traceback.extract_stack() header += ''.join(traceback.format_list(stack)) header += 'UnknownError: %s\n' % details header += "\n" return header
def __reportLocalError(self, tb_list, class_, func, error): ''' Create a report of an error while calling a __post_init__ method and the error is thrown within this module. (class) Errors are probably argument type errors, so we want to point to the declaration of the __post_init__ function that generated the exception :param tb_list: traceback list of the error :param class_: visited class that produced the error :param func: method of the class that produced the error :param error: error (exception) thrown while calling func ''' tb_list = traceback.format_list(tb_list[:-3]) msg = '\nTraceback: \n' msg += ''.join(tb_list) msg += '%s' % error.message # create traceback friendly report filename = inspect.getsourcefile(func) lines, lineno = inspect.getsourcelines(func) line = lines[0] name = func.__name__ extracted_list = [(filename, lineno, name, line)] # Print the report func_hint = ''.join(traceback.format_list(extracted_list)) msg += '\n\n%s' % (func_hint) msg += ' Remember that %r \n only accepts keywords arguments in' \ ' the constructor.' % class_ return msg
def log_except(*args, **kw): logger = kw.get('logger', None) better = kw.pop('_better', 0) if not logger: logger = logging.root if not len(args): msg = None elif len(args) == 1: msg = args[0] args = [] else: msg = args[0] args = args[1:] lines = ['Traceback (most recent call last):\n'] if better: import better_exchook better_exchook.better_exchook(*sys.exc_info(), output=lambda s:lines.append('%s\n' % s)) else: ei = sys.exc_info() st = traceback.extract_stack(f=ei[2].tb_frame.f_back) et = traceback.extract_tb(ei[2]) lines.extend(traceback.format_list(st)) lines.append(' ****** Traceback ****** \n') lines.extend(traceback.format_list(et)) lines.extend(traceback.format_exception_only(ei[0], ei[1])) exc = ''.join(lines) if msg: args = list(args) args.append(exc) logger.error(msg + ':\n%s', *args) else: logger.error(exc)
def make_hivemap(hm, __subcontext__=None, *args, **kwargs): maphive = build_hivemap(hm) try: return maphive(*args, **kwargs).getinstance() except TypeError as e: raise # ##TODO if __subcontext__ is None: raise import sys, traceback tb = sys.exc_info()[2] tbstack = traceback.extract_tb(tb) tblist = traceback.format_list(tbstack) raise HivemapTypeError(__subcontext__, tblist, e.args) except ValueError as e: raise # ##TODO if __subcontext__ is None: raise import sys, traceback tb = sys.exc_info()[2] tbstack = traceback.extract_tb(tb) tblist = traceback.format_list(tbstack) raise HivemapValueError(__subcontext__, tblist, e.args)
def _stack_function_(message='', limit=None): import traceback stack = traceback.extract_stack() if stack: if limit: logging.debug('%s\n*** %s' % (message, '*** '.join(traceback.format_list(stack[-limit-1:-1])))) else: logging.debug('%s\n*** %s' % (message, '*** '.join(traceback.format_list(stack)[0:-1])))
def initiate_guest(kickstart_host, cobbler_system_name, virt_type, name, mem_kb, vcpus, disk_gb, virt_bridge, disk_path, extra_append, log_notify_handler=None): error_messages = {} success = 0 try: if disk_path.startswith('/dev/'): if not os.path.exists(disk_path): raise BlockDeviceNonexistentError(disk_path) else: if os.path.exists(disk_path): raise VirtDiskPathExistsError(disk_path) k = Koan() k.list_items = 0 k.server = kickstart_host k.is_virt = 1 k.is_replace = 0 k.is_display = 0 k.port = 443 k.profile = None k.system = cobbler_system_name k.should_poll = 1 k.image = None k.live_cd = None k.virt_name = name k.virt_path = disk_path k.virt_type = virt_type k.virt_bridge = virt_bridge k.no_gfx = False k.add_reinstall_entry = None k.kopts_override = None k.virt_auto_boot = None if hasattr(k, 'no_copy_default'): k.no_copy_default = 1 else: # older koan k.grubby_copy_default = 0 if hasattr(k, 'virtinstall_wait'): k.virtinstall_wait = 0 k.run() # refresh current virtualization state on the server import virtualization.support virtualization.support.refresh() except Exception, e: (xa, xb, tb) = sys.exc_info() if str(xb).startswith("The MAC address you entered is already in use"): # I really wish there was a better way to check for this error_messages['koan'] = str(xb) print str(xb) elif hasattr(e,"from_koan") and len(str(e)) > 1: error_messages['koan'] = str(e)[1:-1] print str(e)[1:-1] # nice exception, no traceback needed else: print xa print xb print string.join(traceback.format_list(traceback.extract_tb(tb))) error_messages['koan'] = str(xb) + ' ' + string.join(traceback.format_list(traceback.extract_tb(tb))) return (1, "Virtual kickstart failed. Koan error.", error_messages)
def initiate(kickstart_host, base, extra_append, static_device=None, system_record="", preserve_files=[]): error_messages = {} success = 0 # cleanup previous attempt rm_rf(SHADOW) os.mkdir(SHADOW) print "Preserve files! : %s" % preserve_files try: if static_device: update_static_device_records(kickstart_host, static_device) k = Koan() k.list_items = 0 k.server = kickstart_host k.is_virt = 0 k.is_replace = 1 k.is_display = 0 k.profile = None if system_record != "": k.system = system_record else: k.system = None k.port = 443 k.image = None k.live_cd = None k.virt_path = None k.virt_type = None k.virt_bridge = None k.no_gfx = 1 k.add_reinstall_entry = None k.kopts_override = None k.use_kexec = None k.embed_kickstart = None if hasattr(k, 'no_copy_default'): k.no_copy_default = 1 else: # older koan k.grubby_copy_default = 0 if static_device: k.embed_kickstart = 1 k.run() except Exception, e: (xa, xb, tb) = sys.exc_info() try: getattr(e,"from_koan") error_messages['koan'] = str(e)[1:-1] print str(e)[1:-1] # nice exception, no traceback needed except: print xa print xb print string.join(traceback.format_list(traceback.extract_tb(tb))) error_messages['koan'] = string.join(traceback.format_list(traceback.extract_tb(tb))) return (1, "Kickstart failed. Koan error.", error_messages)
def _limited_traceback(excinfo): """ Return a formatted traceback with all the stack from this frame (i.e __file__) up removed """ tb = extract_tb(excinfo.tb) try: idx = [__file__ in e for e in tb].index(True) return format_list(tb[idx+1:]) except ValueError: return format_list(tb)
def configure(self, beedict): if not self.bound_target: from .drone import drone if isinstance(self.target.instance, drone) and self.target.instance in beedict.values(): self.target = [v for v in beedict if beedict[v] is self.target.instance][0] else: self.bind() n = beedict[self.target] n = resolve(n, parameters=self.parameters) if n is self: raise Exception("bee.configure target '%s' is self" % self.target) from .worker import workerframe if isinstance(n, beewrapper): assert n.instance is not None n = n.instance if isinstance(n, workerframe): assert n.built n = n.bee for attrs, stack, args, kargs in self.configuration: args = tuple([resolve(a, parameters=self.parameters) for a in args]) kargs = dict((a, resolve(kargs[a], parameters=self.parameters)) for a in kargs) try: nn = n setitem = False for mode, attr in attrs: if mode == "getattr": nn = getattr(nn, attr) elif mode == "getitem": nn = nn[attr] elif mode == "setitem": attr, value = attr nn[attr] = value setitem = True else: raise Exception(mode) # should never happen if not setitem: nn(*args, **kargs) except Exception as e: s1 = traceback.format_list(stack[:-1]) tbstack = traceback.extract_tb(sys.exc_info()[2]) s2 = traceback.format_list(tbstack[1:]) s3 = traceback.format_exception_only(type(e), e) s = "\n" + "".join(s1 + s2 + s3) raise ConfigureBeeException(s) if isinstance(n, configure_base): n.configure()
def __init__(self, msg, cause=None): """Constructor.""" if (cause != None) and (not isinstance(cause, ChainedException)): # store stack trace in other Exceptions exi = sys.exc_info() if exi != None: cause.__dict__["stackTrace"] = traceback.format_list(traceback.extract_tb(exi[2])) Exception.__init__(self, msg) self.msg = msg self.cause = cause self.stackTrace = traceback.format_list(traceback.extract_stack())[0:-1]
def __init__(self, msg, cause=None): """Constructor.""" if (cause is not None) and (not isinstance(cause, PycbioException)): # store stack trace in other Exception types exi = sys.exc_info() if exi is not None: setattr(cause, "stackTrace", traceback.format_list(traceback.extract_tb(exi[2]))) Exception.__init__(self, msg) self.msg = msg self.cause = cause self.stackTrace = traceback.format_list(traceback.extract_stack())[0:-1]
def _limited_traceback(excinfo): """Return a formatted traceback with this frame up removed. The function removes all the stack from this frame up (i.e from __file__ and up) """ tb = extract_tb(excinfo.tb) try: idx = [__file__ in e for e in tb].index(True) return format_list(tb[idx + 1 :]) except ValueError: return format_list(tb)
def get_trace(): trace = sys.exc_info()[2] if trace: stack = traceback.extract_tb(trace) traceback_list = traceback.format_list(stack) return "".join(traceback_list) else: stack = traceback.extract_stack() traceback_list = traceback.format_list(stack) return "".join(traceback_list[:-1])
def default_exception_handler(self, context): """Default exception handler. This is called when an exception occurs and no exception handler is set, and can be called by a custom exception handler that wants to defer to the default behavior. The context parameter has the same meaning as in `call_exception_handler()`. """ message = context.get('message') if not message: message = 'Unhandled exception in event loop' exception = context.get('exception') if exception is not None: if hasattr(exception, '__traceback__'): # Python 3 tb = exception.__traceback__ else: # call_exception_handler() is usually called indirectly # from an except block. If it's not the case, the traceback # is undefined... tb = sys.exc_info()[2] exc_info = (type(exception), exception, tb) else: exc_info = False if ('source_traceback' not in context and self._current_handle is not None and self._current_handle._source_traceback): context['handle_traceback'] = self._current_handle._source_traceback log_lines = [message] for key in sorted(context): if key in ('message', 'exception'): continue value = context[key] if key == 'source_traceback': tb = ''.join(traceback.format_list(value)) value = 'Object created at (most recent call last):\n' value += tb.rstrip() elif key == 'handle_traceback': tb = ''.join(traceback.format_list(value)) value = 'Handle created at (most recent call last):\n' value += tb.rstrip() else: value = repr(value) log_lines.append('{0}: {1}'.format(key, value)) logger.error('\n'.join(log_lines), exc_info=exc_info)
def __init__(self, field, config, msg): self.fieldType = type(field) self.fieldName = field.name self.fullname = _joinNamePath(config._name, field.name) self.history = config.history.setdefault(field.name, []) self.fieldSource = field.source self.configSource = config._source error="%s '%s' failed validation: %s\n"\ "For more information read the Field definition at:\n%s"\ "And the Config definition at:\n%s"%\ (self.fieldType.__name__, self.fullname, msg, traceback.format_list([self.fieldSource])[0], traceback.format_list([self.configSource])[0]) ValueError.__init__(self, error)
def __init__ (self, msg, parent=None) : ptype = type(parent).__name__ # exception type for parent stype = type(self).__name__ # exception type for self, useful for # inherited exceptions # did we get a parent exception? if parent : # if so, then this exception is likely created in some 'except' # clause, as a reaction on a previously catched exception (the # parent). Thus we append the message of the parent to our own # message, but keep the parent's traceback (after all, the original # exception location is what we are interested in). # if isinstance (parent, MyEx) : # that all works nicely when parent is our own exception type... self.traceback = parent.traceback frame = traceback.extract_stack ()[-2] line = "%s +%s (%s) : %s" % frame self.msg = " %-20s: %s (%s)\n%s" % (stype, msg, line, parent.msg) else : # ... but if parent is a native (or any other) exception type, # we don't have a traceback really -- so we dig it out of # sys.exc_info. trace = sys.exc_info ()[2] stack = traceback.extract_tb (trace) traceback_list = traceback.format_list (stack) self.traceback = "".join (traceback_list) # the message composition is very similar -- we just inject the # parent exception type inconspicuously somewhere (above that # was part of 'parent.msg' already). frame = traceback.extract_stack ()[-2] line = "%s +%s (%s) : %s" % frame self.msg = " %-20s: %s (%s)\n %-20s: %s" % (stype, msg, line, ptype, parent) else : # if we don't have a parent, we are a 1st principle exception, # i.e. a reaction to some genuine code error. Thus we extract the # traceback from exactly where we are in the code (the last stack # frame will be the call to this exception constructor), and we # create the original exception message from 'stype' and 'msg'. stack = traceback.extract_stack () traceback_list = traceback.format_list (stack) self.traceback = "".join (traceback_list[:-1]) self.msg = " %-20s: %s" % (stype, msg)
def __str__(self): if self._op is not None: output = ["%s\n\nCaused by op %r, defined at:\n" % (self.message, self._op.name,)] curr_traceback_list = traceback.format_list(self._op.traceback) output.extend(curr_traceback_list) # pylint: disable=protected-access original_op = self._op._original_op # pylint: enable=protected-access while original_op is not None: output.append( "\n...which was originally created as op %r, defined at:\n" % (original_op.name,)) prev_traceback_list = curr_traceback_list curr_traceback_list = traceback.format_list(original_op.traceback) # Attempt to elide large common subsequences of the subsequent # stack traces. # # TODO(mrry): Consider computing the actual longest common subsequence. is_eliding = False elide_count = 0 last_elided_line = None for line, line_in_prev in zip(curr_traceback_list, prev_traceback_list): if line == line_in_prev: if is_eliding: elide_count += 1 last_elided_line = line else: output.append(line) is_eliding = True elide_count = 0 else: if is_eliding: if elide_count > 0: output.extend( ["[elided %d identical lines from previous traceback]\n" % (elide_count - 1,), last_elided_line]) is_eliding = False output.extend(line) # pylint: disable=protected-access original_op = original_op._original_op # pylint: enable=protected-access output.append("\n%s (see above for traceback): %s\n" % (type(self).__name__, self.message)) return "".join(output) else: return self.message
def start_game(self, gamekey): dict_files_to_copy = self.apply_mods(False) logging.info("start the game") if self.save_settings() != -1: # Check to see if folderlocation is valid. if path.isdir(self.config[SET].get(WF_KEY, "\n\n\n\n\n\n\n\n\n\n\n")): # TODO: put in try catch block here. try: if(sys.platform=='win32'): subprocess.Popen(path.join(self.config[SET][WF_KEY],gamekey), cwd=self.config[SET][WF_KEY]) if sys.platform.startswith('linux'): subprocess.Popen("wine " + path.join(self.config[SET][WF_KEY],gamekey), cwd=self.config[SET][WF_KEY]) except OSError: # ICK. User is on windows an the executable is set to run as administrator. logging.warning(sys.exc_info()[1]) for line in traceback.format_list(traceback.extract_tb(sys.exc_info()[2])): logging.warning(line) logging.info("Using workaround.") # Use workaround. game_location = dict_files_to_copy.get(gamekey) try: subprocess.Popen(game_location, cwd=self.config[SET][WF_KEY]) except OSError: logging.warning("OSError error:", sys.exc_info()[0]) logging.warning(sys.exc_info()[1]) for line in traceback.format_list(traceback.extract_tb(sys.exc_info()[2])): logging.warning(line) messagebox.showerror(message="OSError: Exe is set to run as administrator. Please uncheck the 'run as admin' in the exe properties.", title='OSError') except: logging.warning("Unexpected error:", sys.exc_info()[0]) logging.warning(sys.exc_info()[1]) for line in traceback.format_list(traceback.extract_tb(sys.exc_info()[2])): logging.warning(line) messagebox.showerror(message="Unknown Error", title='Error') else: logging.info("No.") except: logging.warning("Unexpected error:", sys.exc_info()[0]) logging.warning(sys.exc_info()[1]) for line in traceback.format_list(traceback.extract_tb(sys.exc_info()[2])): logging.warning(line) messagebox.showerror(message="Unknown Error", title='Error') else: logging.warning("Invalid directory location")
def handleTraceback(self, index): with self.writeLock: tb = self._termWidget.traceback(index) self.moveCursor(QtGui.QTextCursor.End) linkName = "Error: {path}".format(path=traceback.format_list(tb)[-1]) hoverText = ' '.join(traceback.format_list(tb)) # For some reason the </a> doesn't get closed unless we have something after it, hence the space. self.insertHtml("""<a href='traceback:///{index}' title='{hoverText}'>{linkName} </a> """.format( linkName=linkName, index=index, hoverText=hoverText, ) ) self.insertPlainText("\n")
def __call__(self, environ, start_response): request = Request.createFromEnviron(environ) response = Router.notfound('Path not found') for name in self.router.routes: regex, controller, vars = self.router.routes[name] match = regex.match(request.get_path_info()) if match: urlvars = vars urlvars.update(match.groupdict()) for k in urlvars: urlvars[k] = urlvars[k].decode('utf-8') try: if isinstance(controller, basestring): controller = self.router.\ load_controller(controller) template = has_template(controller, self.template_env, request, **urlvars) if template: response = controller(request, template, **urlvars) else: response = controller(request, **urlvars) if isinstance(response, (basestring, list)): response = Response(response) except Exception as e: tb = traceback.format_list(\ traceback.extract_tb(sys.exc_traceback)) response = self.error(e, tb) break try: output = response.getContent() status = response.getStatusCode() headers = response.getHeaders() except Exception as e: tb = traceback.format_list(\ traceback.extract_tb(sys.exc_traceback)) response = self.error(e, tb) output = response.getContent() status = response.getStatusCode() headers = response.getHeaders() start_response(status, headers) if isinstance(output, basestring): yield output if not response.encoding\ else output.encode(response.encoding) elif isinstance(output, list): for ou in output: if isinstance(ou, basestring): yield ou if not response.encoding\ else ou.encode(response.encoding)
def traceback(self, offset=None): """Return nicely formatted most recent traceback. """ etype, value, tb = sys.exc_info() if tb.tb_next is not None: _tb = tb.tb_next else: _tb = tb try: if offset is not None: lines = traceback.extract_tb(_tb) line = lines[0][1] + offset lines[0] = (lines[0][0], line) + lines[0][2:] text = [self._header] text = text + traceback.format_list(lines) text = text + traceback.format_exception_only(etype, value) line = lines[0][1] else: text = traceback.format_exception(etype, value, _tb) line = _tb.tb_lineno finally: del tb, _tb return ''.join(text), line
def retry_task_cb(self, task, reason=""): self.fail_num += 1 self.continue_fail_num += 1 self.last_fail_time = time.time() self.logger.warn("retry_task_cb: %s", task.url) if task.responsed: self.logger.warn("retry but responsed. %s", task.url) st = traceback.extract_stack() stl = traceback.format_list(st) self.logger.warn("stack:%r", repr(stl)) task.finish() return if task.retry_count > 10: task.response_fail("retry time exceed 10") return if time.time() - task.start_time > task.timeout: task.response_fail("retry timeout:%d" % (time.time() - task.start_time)) return if not self.running: task.response_fail("retry but stopped.") return task.set_state("retry(%s)" % reason) task.retry_count += 1 self.request_queue.put(task)
def format_exception (filename, should_remove_self): type, value, tb = sys.exc_info () sys.last_type = type sys.last_value = value sys.last_traceback = tb if type is SyntaxError: try: # parse the error message msg, (dummy_filename, lineno, offset, line) = value except: pass # Not the format we expect; leave it alone else: # Stuff in the right filename value = SyntaxError(msg, (filename, lineno, offset, line)) sys.last_value = value res = traceback.format_exception_only (type, value) # There are some compilation errors which do not provide traceback so we # should not massage it. if should_remove_self: tblist = traceback.extract_tb (tb) del tblist[:1] res = traceback.format_list (tblist) if res: res.insert(0, "Traceback (most recent call last):\n") res[len(res):] = traceback.format_exception_only (type, value) # traceback.print_exception(type, value, tb) for line in res: print(line, end=' ')
def showtraceback(self): """This needs to override the default traceback thing so it can put it into a pretty colour and maybe other stuff, I don't know""" try: t, v, tb = sys.exc_info() sys.last_type = t sys.last_value = v sys.last_traceback = tb tblist = traceback.extract_tb(tb) del tblist[:1] # Set the right lineno (encoding header adds an extra line) if not py3: for i, (filename, lineno, module, something) in enumerate(tblist): if filename == '<input>': tblist[i] = (filename, lineno - 1, module, something) l = traceback.format_list(tblist) if l: l.insert(0, "Traceback (most recent call last):\n") l[len(l):] = traceback.format_exception_only(t, v) finally: tblist = tb = None self.writetb(l)
def execute(code): if code == 'test_error': raise ValueError tempdir = gettempdir() script_py = os.path.join(tempdir, 'my_script.py') write_to_path(script_py, code) stdin = AccessRecorder() stdout = StringIO() stderr = StringIO() file_tracker = FileTracker(open) with temp_vars([sys, 'stdout', stdout], [sys, 'stderr', stderr], [sys, 'stdin', stdin]): # noinspection PyBroadException try: exec ( compile(code, script_py, 'exec', dont_inherit=True), {'open': file_tracker, '__name__': '__main__'}, {}) except QuietExit: pass except: tb_list = [e for e in tb.extract_tb(sys.exc_info()[2]) if os.path.dirname(__file__) not in e[0]] print('Traceback (most recent call last):\n' + ''.join(tb.format_list(tb_list) + tb.format_exception_only(*sys.exc_info()[:2])).replace(tempdir, ''), file=sys.stderr) return {'stdout': stdout.getvalue(), 'stderr': stderr.getvalue(), 'files': file_tracker.result(), 'stdin_used': stdin.accessed, 'non_ascii_files': file_tracker.non_ascii_files, 'version': platform.python_version()}
def _sendErrorEmail(self, e): ty, ex, tb = sys.exc_info() tracebackList = traceback.format_list(traceback.extract_tb(tb)) text = ( _( """ Offline website creation for the [event:%s] had caused an error while running the task. - Request from user: %s <%s> - Details of the exception: %s - Traceback: %s -- <Indico support> indico-project @ cern.ch """ ) % (self._conf.getId(), self._toUser.getFullName(), self._toUser.getEmail(), e, "\n".join(tracebackList)) ) maildata = { "fromAddr": Config.getInstance().getSupportEmail(), "toList": [Config.getInstance().getSupportEmail()], "subject": _("[Indico] Error in task: Offline website creation"), "body": text, } GenericMailer.send(GenericNotification(maildata))
def default_exception_handler(self, context): """Default exception handler. This is called when an exception occurs and no exception handler is set, and can be called by a custom exception handler that wants to defer to the default behavior. The context parameter has the same meaning as in `call_exception_handler()`. """ message = context.get('message') if not message: message = 'Unhandled exception in event loop' exception = context.get('exception') if exception is not None: exc_info = (type(exception), exception, exception.__traceback__) else: exc_info = False log_lines = [message] for key in sorted(context): if key in {'message', 'exception'}: continue value = context[key] if key == 'source_traceback': tb = ''.join(traceback.format_list(value)) value = 'Object created at (most recent call last):\n' value += tb.rstrip() else: value = repr(value) log_lines.append('{}: {}'.format(key, value)) logger.error('\n'.join(log_lines), exc_info=exc_info)
def trouble(self, message=None, tb=None): """Determine action to take when a download problem appears. Depending on if the downloader has been configured to ignore download errors or not, this method may throw an exception or not when errors are found, after printing the message. tb, if given, is additional traceback information. """ if message is not None: self.to_stderr(message) if self.params.get('verbose'): if tb is None: if sys.exc_info()[0]: # if .trouble has been called from an except block tb = u'' if hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]: tb += u''.join(traceback.format_exception(*sys.exc_info()[1].exc_info)) tb += compat_str(traceback.format_exc()) else: tb_data = traceback.format_list(traceback.extract_stack()) tb = u''.join(tb_data) self.to_stderr(tb) if not self.params.get('ignoreerrors', False): if sys.exc_info()[0] and hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]: exc_info = sys.exc_info()[1].exc_info else: exc_info = sys.exc_info() raise DownloadError(message, exc_info) self._download_retcode = 1
def report_internal_error( err: Exception, file: Optional[str], line: int, errors: Errors, options: Options, stdout: Optional[TextIO] = None, stderr: Optional[TextIO] = None, ) -> None: """Report internal error and exit. This optionally starts pdb or shows a traceback. """ stdout = (stdout or sys.stdout) stderr = (stderr or sys.stderr) # Dump out errors so far, they often provide a clue. # But catch unexpected errors rendering them. try: for msg in errors.new_messages(): print(msg) except Exception as e: print("Failed to dump errors:", repr(e), file=stderr) # Compute file:line prefix for official-looking error messages. if file: if line: prefix = '{}:{}: '.format(file, line) else: prefix = '{}: '.format(file) else: prefix = '' # Print "INTERNAL ERROR" message. print('{}error: INTERNAL ERROR --'.format(prefix), 'Please try using mypy master on Github:\n' 'https://mypy.readthedocs.io/en/stable/common_issues.html' '#using-a-development-mypy-build', file=stderr) if options.show_traceback: print('Please report a bug at https://github.com/python/mypy/issues', file=stderr) else: print( 'If this issue continues with mypy master, ' 'please report a bug at https://github.com/python/mypy/issues', file=stderr) print('version: {}'.format(mypy_version), file=stderr) # If requested, drop into pdb. This overrides show_tb. if options.pdb: print('Dropping into pdb', file=stderr) import pdb pdb.post_mortem(sys.exc_info()[2]) # If requested, print traceback, else print note explaining how to get one. if options.raise_exceptions: raise err if not options.show_traceback: if not options.pdb: print('{}: note: please use --show-traceback to print a traceback ' 'when reporting a bug'.format(prefix), file=stderr) else: tb = traceback.extract_stack()[:-2] tb2 = traceback.extract_tb(sys.exc_info()[2]) print('Traceback (most recent call last):') for s in traceback.format_list(tb + tb2): print(s.rstrip('\n')) print('{}: {}'.format(type(err).__name__, err), file=stdout) print('{}: note: use --pdb to drop into pdb'.format(prefix), file=stderr) # Exit. The caller has nothing more to say. # We use exit code 2 to signal that this is no ordinary error. raise SystemExit(2)
def getFormattedList(self): '''Get a list of strings as returned by the traceback module's format_list() and format_exception_only() functions.''' tmp = traceback.format_list(self.traceback) tmp.extend(traceback.format_exception_only(self.ExcClass, self.excArg)) return tmp
while msg_q.empty(): rlist, _, _ = select.select([psock], [], [], 1) if rlist: size = int(psock.recv(10)) sys.stdout.write(mm[:size]) psock.send("1") ret = msg_q.get() if ret[0] == 0 and ret[1] is True: logger.info('downloaded, upload %s second(s) background' % args.upload_time) retcode = 0 elif ret[0] == 1: logger.error("Traceback (most recent call last):") exc_type, exc_obj, exc_trace = ret[1] logger.error("".join(traceback.format_list(exc_trace)) + exc_type.__name__ + ": " + str(exc_obj)) retcode = 1 if exc_type.__name__ == 'ChecksumMissMatch': logger.error(exc_obj.message) retcode = RETCODE.checksum_missmatch elif exc_type.__name__ == 'TrackerUnavailable': logger.error("Tracker Server: %s Unavaliable" % args.tracker) retcode = RETCODE.tracker_unavaliable elif exc_type.__name__ == 'gaierror': logger.error("Domain name resolve failed, check DNS") retcode = RETCODE.dns_fail elif exc_type.__name__ == 'OriginURLConnectError': logger.error("Get data from origin url fail") retcode = RETCODE.origin_fail
def __str__(self): r = "Debug stack trace of %s (back %s frames):\n"%( self.label, len(self.trace),) for i in traceback.format_list(self.trace): r+=i return r
def _exception_to_string(excp): stack = traceback.extract_stack()[:-3] + traceback.extract_tb( excp.__traceback__) pretty = traceback.format_list(stack) return ''.join(pretty) + '\n {} {}'.format(excp.__class__, excp)
def __init__(self, msg, parent=None, api_object=None, from_log=False): """ Create a new exception object. :param msg: The exception message. :param parent: Original exception :param api_object: The object that has caused the exception, default is None. :param from_log: Exception c'tor originates from the static log_ member method (ignore in exception stack!) """ Exception.__init__(self, msg) self._plain_message = msg self._exceptions = [self] self._top_exception = self self._ptype = type(parent).__name__ # parent exception type self._stype = type(self).__name__ # own exception type ignore_stack = 2 if from_log: ignore_stack += 1 if api_object: self._object = weakref.ref(api_object) else: self._object = None # did we get a parent exception? if parent: # if so, then this exception is likely created in some 'except' # clause, as a reaction on a previously catched exception (the # parent). Thus we append the message of the parent to our own # message, but keep the parent's traceback (after all, the original # exception location is what we are interested in). # if isinstance(parent, SagaException): # that all works nicely when parent is our own exception type... self._traceback = parent.traceback frame = traceback.extract_stack()[-ignore_stack] line = "%s +%s (%s) : %s" % frame self._message = " %-20s: %s (%s)\n%s" \ % (self._stype, msg, line, parent.msg) else: if self._stype != "NoneType": # ... but if parent is a native (or any other) exception # type, we don't have a traceback really -- so we dig it # out of sys.exc_info. trace = sys.exc_info()[2] stack = traceback.extract_tb(trace) traceback_list = traceback.format_list(stack) self._traceback = "".join(traceback_list) # the message composition is very similar -- we just inject # the parent exception type inconspicuously somewhere (above # that was part of 'parent.message' already). frame = traceback.extract_stack()[-ignore_stack] line = "%s +%s (%s) : %s" % frame self._message = " %-20s: %s (%s)\n %-20s: %s" \ % (self._stype, msg, line, self._ptype, parent) else: # if we don't have a parent, we are a 1st principle exception, # i.e. a reaction to some genuine code error. Thus we extract the # traceback from exactly where we are in the code (the last stack # frame will be the call to this exception constructor), and we # create the original exception message from 'stype' and 'message'. stack = traceback.extract_stack() traceback_list = traceback.format_list(stack) self._traceback = "".join(traceback_list[:-1]) frame = traceback.extract_stack()[-ignore_stack - 1] line = "%s +%s (%s) : %s" % frame self._message = "%s (%s)" % (msg, line) # we can't do that earlier as _msg was not set up before self._messages = [self._message]
def run(self): if self.autocommit: conn = sqlite3.connect(self.filename, isolation_level=None, check_same_thread=False) else: conn = sqlite3.connect(self.filename, check_same_thread=False) conn.execute('PRAGMA journal_mode = %s' % self.journal_mode) conn.text_factory = str cursor = conn.cursor() conn.commit() cursor.execute('PRAGMA synchronous=OFF') res = None while True: req, arg, res, outer_stack = self.reqs.get() if req == '--close--': assert res, ('--close-- without return queue', res) break elif req == '--commit--': conn.commit() if res: res.put('--no more--') else: try: cursor.execute(req, arg) except Exception as err: self.exception = (e_type, e_value, e_tb) = sys.exc_info() inner_stack = traceback.extract_stack() # An exception occurred in our thread, but we may not # immediately able to throw it in our calling thread, if it has # no return `res` queue: log as level ERROR both the inner and # outer exception immediately. # # Any iteration of res.get() or any next call will detect the # inner exception and re-raise it in the calling Thread; though # it may be confusing to see an exception for an unrelated # statement, an ERROR log statement from the 'sqlitedict.*' # namespace contains the original outer stack location. self.log.error('Inner exception:') for item in traceback.format_list(inner_stack): self.log.error(item) self.log.error( '') # deliniate traceback & exception w/blank line for item in traceback.format_exception_only( e_type, e_value): self.log.error(item) self.log.error('') # exception & outer stack w/blank line self.log.error('Outer stack:') for item in traceback.format_list(outer_stack): self.log.error(item) self.log.error('Exception will be re-raised at next call.') if res: for rec in cursor: res.put(rec) res.put('--no more--') if self.autocommit: conn.commit() self.log.debug('received: %s, send: --no more--', req) conn.close() res.put('--no more--')
""" JDSL Object, where the program begins and receives user input """ class JDSL: __instance = None def __new__(cls): if cls.__instance is None: filename, trace = cls.get_user_input() cls.__instance = object.__new__(cls) cls.__instance.program = JVP(filename,trace) return cls.__instance @classmethod def get_user_input(cls): filename, trace = '',None while filename[-5:] != '.jdsl' or not op.isfile(filename): filename = input('enter a valid .jdsl filename: ') print('\ndo you want to enable tracing?') while trace not in ['Y','y','N','n']: trace = input('\nenter Y or N: ') return filename, trace if __name__ == '__main__': try: print('\nSimple Java Program Builder\n') JDSL() print('\nGoodbye!\n') except Exception as e: print(str(e)) _, _, tb = sys.exc_info() print(traceback.format_list(traceback.extract_tb(tb)[-1:])[-1])
def _print_exception(t, value, tb, realfile, text, context): error = [] try: exception = traceback.format_exception_only(t, value) error.append('Error executing a python function in %s:\n' % realfile) # Strip 'us' from the stack (better_exec call) tb = tb.tb_next textarray = text.split('\n') linefailed = tb.tb_lineno tbextract = traceback.extract_tb(tb) tbformat = traceback.format_list(tbextract) error.append( "The stack trace of python calls that resulted in this exception/failure was:" ) error.append("File: '%s', lineno: %s, function: %s" % (tbextract[0][0], tbextract[0][1], tbextract[0][2])) error.extend(_print_trace(textarray, linefailed)) # See if this is a function we constructed and has calls back into other functions in # "text". If so, try and improve the context of the error by diving down the trace level = 0 nexttb = tb.tb_next while nexttb is not None and (level + 1) < len(tbextract): error.append("File: '%s', lineno: %s, function: %s" % (tbextract[level + 1][0], tbextract[level + 1][1], tbextract[level + 1][2])) if tbextract[level][0] == tbextract[level + 1][0] and tbextract[ level + 1][2] == tbextract[level][0]: # The code was possibly in the string we compiled ourselves error.extend(_print_trace(textarray, tbextract[level + 1][1])) elif tbextract[level + 1][0].startswith("/"): # The code looks like it might be in a file, try and load it try: with open(tbextract[level + 1][0], "r") as f: text = f.readlines() error.extend( _print_trace(text, tbextract[level + 1][1])) except: error.append(tbformat[level + 1]) elif "d" in context and tbextract[level + 1][2]: # Try and find the code in the datastore based on the functionname d = context["d"] functionname = tbextract[level + 1][2] text = d.getVar(functionname, True) if text: error.extend( _print_trace(text.split('\n'), tbextract[level + 1][1])) else: error.append(tbformat[level + 1]) else: error.append(tbformat[level + 1]) nexttb = tb.tb_next level = level + 1 error.append("Exception: %s" % ''.join(exception)) finally: logger.error("\n".join(error))
def test_learners(description, group, max_tests, needed_wins, row_limits, col_limits, seed, grader): """Test data generation methods beat given learner. Requires test description, test case group, and a grader fixture. """ points_earned = 0.0 # initialize points for this test case incorrect = True msgs = [] try: dataX, dataY = None, None same_dataX, same_dataY = None, None diff_dataX, diff_dataY = None, None betterLearner, worseLearner = None, None if group == 'author': try: from gen_data import author auth_string = run_with_timeout(author, seconds_per_test_case, (), {}) if auth_string == 'tb34': incorrect = True msgs.append(" Incorrect author name (tb34)") points_earned = -10 elif auth_string == '': incorrect = True msgs.append(" Empty author name") points_earned = -10 else: incorrect = False except Exception as e: incorrect = True msgs.append( " Exception occured when calling author() method: {}". format(e)) points_earned = -10 else: if group == "best4dt": from gen_data import best4DT dataX, dataY = run_with_timeout(best4DT, seconds_per_test_case, (), {'seed': seed}) same_dataX, same_dataY = run_with_timeout( best4DT, seconds_per_test_case, (), {'seed': seed}) diff_dataX, diff_dataY = run_with_timeout( best4DT, seconds_per_test_case, (), {'seed': seed + 1}) betterLearner = DTLearner worseLearner = LinRegLearner elif group == 'best4lr': from gen_data import best4LinReg dataX, dataY = run_with_timeout(best4LinReg, seconds_per_test_case, (), {'seed': seed}) same_dataX, same_dataY = run_with_timeout( best4LinReg, seconds_per_test_case, (), {'seed': seed}) diff_dataX, diff_dataY = run_with_timeout( best4LinReg, seconds_per_test_case, (), {'seed': seed + 1}) betterLearner = LinRegLearner worseLearner = DTLearner num_samples = dataX.shape[0] cutoff = int(num_samples * 0.6) worse_better_err = [] for run in range(max_tests): permutation = np.random.permutation(num_samples) train_X, train_Y = dataX[permutation[:cutoff]], dataY[ permutation[:cutoff]] test_X, test_Y = dataX[permutation[cutoff:]], dataY[ permutation[cutoff:]] better = betterLearner() worse = worseLearner() better.addEvidence(train_X, train_Y) worse.addEvidence(train_X, train_Y) better_pred = better.query(test_X) worse_pred = worse.query(test_X) better_err = np.linalg.norm(test_Y - better_pred) worse_err = np.linalg.norm(test_Y - worse_pred) worse_better_err.append((worse_err, better_err)) worse_better_err.sort(lambda a, b: int((b[0] - b[1]) - (a[0] - a[1]))) better_wins_count = 0 for worse_err, better_err in worse_better_err: if better_err < 0.9 * worse_err: better_wins_count = better_wins_count + 1 points_earned += 5.0 if better_wins_count >= needed_wins: break incorrect = False if (dataX.shape[0] < row_limits[0]) or (dataX.shape[0] > row_limits[1]): incorrect = True msgs.append( " Invalid number of rows. Should be between {}, found {}" .format(row_limits, dataX.shape[0])) points_earned = max(0, points_earned - 20) if (dataX.shape[1] < col_limits[0]) or (dataX.shape[1] > col_limits[1]): incorrect = True msgs.append( " Invalid number of columns. Should be between {}, found {}" .format(col_limits, dataX.shape[1])) points_earned = max(0, points_earned - 20) if better_wins_count < needed_wins: incorrect = True msgs.append( " Better learner did not exceed worse learner. Expected {}, found {}" .format(needed_wins, better_wins_count)) if not (np.array_equal(same_dataY, dataY)) or not (np.array_equal( same_dataX, dataX)): incorrect = True msgs.append(" Did not produce the same data with the same seed.\n"+\ " First dataX:\n{}\n".format(dataX)+\ " Second dataX:\n{}\n".format(same_dataX)+\ " First dataY:\n{}\n".format(dataY)+\ " Second dataY:\n{}\n".format(same_dataY)) points_earned = max(0, points_earned - 20) if np.array_equal(diff_dataY, dataY) and np.array_equal( diff_dataX, dataX): incorrect = True msgs.append(" Did not produce different data with different seeds.\n"+\ " First dataX:\n{}\n".format(dataX)+\ " Second dataX:\n{}\n".format(diff_dataX)+\ " First dataY:\n{}\n".format(dataY)+\ " Second dataY:\n{}\n".format(diff_dataY)) points_earned = max(0, points_earned - 20) if incorrect: if group == 'author': raise IncorrectOutput, "Test failed on one or more criteria.\n {}".format( '\n'.join(msgs)) else: inputs_str = " Residuals: {}".format(worse_better_err) raise IncorrectOutput, "Test failed on one or more output criteria.\n Inputs:\n{}\n Failures:\n{}".format( inputs_str, "\n".join(msgs)) else: if group != 'author': avg_ratio = 0.0 worse_better_err.sort( lambda a, b: int(np.sign((b[0] - b[1]) - (a[0] - a[1])))) for we, be in worse_better_err[:10]: avg_ratio += (float(we) - float(be)) avg_ratio = avg_ratio / 10.0 if group == "best4dt": grader.add_performance(np.array([avg_ratio, 0])) else: grader.add_performance(np.array([0, avg_ratio])) except Exception as e: # Test result: failed msg = "Description: {} (group: {})\n".format(description, group) # Generate a filtered stacktrace, only showing erroneous lines in student file(s) tb_list = tb.extract_tb(sys.exc_info()[2]) for i in xrange(len(tb_list)): row = tb_list[i] tb_list[i] = (os.path.basename(row[0]), row[1], row[2], row[3] ) # show only filename instead of long absolute path tb_list = [row for row in tb_list if (row[0] == 'gen_data.py')] if tb_list: msg += "Traceback:\n" msg += ''.join(tb.format_list(tb_list)) # contains newlines elif 'grading_traceback' in dir(e): msg += "Traceback:\n" msg += ''.join(tb.format_list(e.grading_traceback)) msg += "{}: {}".format(e.__class__.__name__, e.message) # Report failure result to grader, with stacktrace grader.add_result( GradeResult(outcome='failed', points=points_earned, msg=msg)) raise else: # Test result: passed (no exceptions) grader.add_result( GradeResult(outcome='passed', points=points_earned, msg=None))
def initiate_guest(kickstart_host, cobbler_system_name, virt_type, name, mem_kb, vcpus, disk_gb, virt_bridge, disk_path, extra_append, log_notify_handler=None): error_messages = {} success = 0 try: if disk_path.startswith('/dev/'): if not os.path.exists(disk_path): raise BlockDeviceNonexistentError(disk_path) else: if os.path.exists(disk_path): raise VirtDiskPathExistsError(disk_path) # Switch to KVM if possible if virt_type == "qemu": if os.path.exists("/dev/kvm"): virt_type = "kvm" else: print("Warning: KVM not available, using QEMU.") k = Koan() k.list_items = 0 k.server = kickstart_host k.is_virt = 1 k.is_replace = 0 k.is_display = 0 k.port = 443 k.profile = None k.system = cobbler_system_name k.should_poll = 1 k.image = None k.live_cd = None k.virt_name = name k.virt_path = disk_path k.virt_type = virt_type k.virt_bridge = virt_bridge k.no_gfx = False k.add_reinstall_entry = None k.kopts_override = None k.virt_auto_boot = None if hasattr(k, 'no_copy_default'): k.no_copy_default = 1 else: # older koan k.grubby_copy_default = 0 if hasattr(k, 'virtinstall_wait'): k.virtinstall_wait = 0 k.run() # refresh current virtualization state on the server import virtualization.support virtualization.support.refresh() except Exception: (xa, xb, tb) = sys.exc_info() if str(xb).startswith("The MAC address you entered is already in use"): # I really wish there was a better way to check for this error_messages['koan'] = str(xb) print(str(xb)) elif hasattr(xb, "from_koan") and len(str(xb)) > 1: error_messages['koan'] = str(xb)[1:-1] print(str(xb)[1:-1]) # nice exception, no traceback needed else: print(xa) print(xb) print(" ".join(traceback.format_list(traceback.extract_tb(tb)))) error_messages['koan'] = str(xb) + ' ' + " ".join(traceback.format_list(traceback.extract_tb(tb))) return (1, "Virtual kickstart failed. Koan error.", error_messages) return (0, "Virtual kickstart initiate succeeded", error_messages)
async def _run_consumer(topic_queue): control = {} control_disposables = {} topics = {} # context of each subscribed topic def on_next_control(obv, i): nonlocal control control[obv] = i def on_partition_subscribe(tp_context, observer, scheduler): tp_context.observer = observer if feed_mode is DataFeedMode.PULL: observer.on_next( functools.partial(on_partition_back, tp_context.tp)) def on_revoked(tps): inactive_topics = {} for topic in topics: inactive_topics[topic] = False for tp in tps: topics[tp.topic].partitions[ tp.partition].observer.on_completed() del topics[tp.topic].partitions[tp.partition] if len(topics[tp.topic].partitions) == 0: inactive_topics[tp.topic] == True all_inactive = [inactive_topics[s] for s in inactive_topics] if all(all_inactive): topic_queue.put_nowait(RevokedCmd()) def on_assigned(tps): for tp in tps: context = TopicPartitionContext() context.tp = tp topics[tp.topic].partitions[tp] = context topics[tp.topic].observer.on_next( rx.create( functools.partial(on_partition_subscribe, context))) topic_queue.put_nowait(AssignedCmd()) async def tp_is_completed(topic_partition): if source_type is DataSourceType.BATCH: highwater = client.highwater(topic_partition) if highwater: position = await client.position(topic_partition) if highwater == position: print("no more lag on {}-{}".format( topic_partition.topic, topic_partition.partition)) topics[topic_partition.topic].partitions[ topic_partition].completed = True return True return False async def process_next_batch(topic_partition, count): tp = [topic_partition] if topic_partition else [] read_count = 0 if count == 1: msg = await client.getone(*tp) if topic_partition is None: topic_partition = TopicPartition(msg.topic, msg.partition) topic = topics[topic_partition.topic] decoded_msg = topic.decode(msg.value) topic.partitions[topic_partition].observer.on_next(decoded_msg) read_count += 1 else: data = await client.getmany(*tp, timeout_ms=5000, max_records=count) if len(data) > 0: msgs = data[topic_partition] topic = topics[topic_partition.topic] for msg in msgs: decoded_msg = topic.decode(msg.value) topic.partitions[topic_partition].observer.on_next( decoded_msg) read_count += 1 return read_count try: client = AIOKafkaConsumer( loop=loop, bootstrap_servers=server, group_id=group, auto_offset_reset='latest', enable_auto_commit=True, max_partition_fetch_bytes=max_partition_fetch_bytes, ) print("start kafka consumer") await client.start() partition_assigned = False yield_countdown = 5000 prev_partition = None pcount = 0 while True: try: cmd = topic_queue.get_nowait() except asyncio.QueueEmpty as e: print("queue empty") cmd = await topic_queue.get() #if len(topics) == 0 or not topic_queue.empty(): #cmd = await topic_queue.get() if type(cmd) is AddConsumerCmd: print('run consumer: add {}'.format(cmd.consumer.topic)) if cmd.consumer.topic in topics: source_observer.on_error( ValueError( "topic already subscribed for this consumer: {}" .format(cmd.consumer.decode))) break if cmd.consumer.control is not None: control_disposables[ cmd.observer] = cmd.consumer.control.subscribe( on_next=functools.partial( on_next_control, cmd.observer), on_error=source_observer.on_error, ) topics[cmd.consumer.topic] = TopicContext( observer=cmd.observer, topic=cmd.consumer.topic, decode=cmd.consumer.decode, start_from=cmd.consumer.start_from, partitions={}) sub_start_positions = {} sub_topics = [] for k, c in topics.items(): sub_topics.append(c.topic) sub_start_positions[c.topic] = c.start_from sub_topics = set(sub_topics) client.subscribe(topics=sub_topics, listener=ConsumerRebalancer( client, sub_start_positions, on_revoked=on_revoked, on_assigned=on_assigned, )) elif type(cmd) is DelConsumerCmd: print('run consumer: del {}'.format(cmd)) topic = topics[cmd.topic] disposable = control_disposables.pop(topic.observer, None) if disposable is not None: disposable.dispose() topics.pop(cmd.topic) sub_start_positions = {} sub_topics = [] for k, c in topics.items(): sub_topics.append(c.topic) sub_start_positions[c.topic] = c.start_from sub_topics = set(sub_topics) if len(sub_topics) > 0: client.subscribe(topics=sub_topics, listener=ConsumerRebalancer( client, sub_start_positions, on_revoked=on_revoked, on_assigned=on_assigned, )) topic.observer.on_completed() elif type(cmd) is PullTopicPartitionCmd: no_lag = await tp_is_completed(cmd.topic_partition) if source_type is DataSourceType.BATCH and no_lag == True: topic = topics[cmd.topic_partition.topic] topic.partitions[ cmd.topic_partition].observer.on_completed() if all( [i.completed for _, i in topic.partitions.items()]): print("completed processing topic {}".format( cmd.topic_partition.topic)) topic.observer.on_completed() else: await process_next_batch(cmd.topic_partition, cmd.count) elif type(cmd) is PushRecordCmd: read_count = await process_next_batch(None, 1) if read_count > 0: topic_queue.put_nowait(PushRecordCmd()) elif type(cmd) is AssignedCmd: if partition_assigned is False: partition_assigned = True if feed_mode is DataFeedMode.PUSH: topic_queue.put_nowait(PushRecordCmd()) elif type(cmd) is RevokedCmd: partition_assigned = False else: source_observer.on_error( TypeError( "invalid type for queue command: {}".format(cmd))) if len(topics) == 0: print("no more topic subscribed, ending consumer task") break regulated = False for topic, consumer in topics.items(): regulation_time = control.get(consumer.observer, None) if regulation_time is not None and regulation_time > 0: await asyncio.sleep(regulation_time) regulated = True yield_countdown = 5000 control[consumer.observer] = None break # limitation only one controllable topic for now yield_countdown -= 1 if yield_countdown == 0 and regulated is False: await asyncio.sleep(0) yield_countdown = 5000 await client.stop() except asyncio.CancelledError as e: print("cancelled {}".format(e)) except Exception as e: print("consummer exception: {}:{}".format(type(e), e)) print(traceback.format_list(traceback.extract_tb(e.__traceback__))) raise e
def pyzo_excepthook(type, value, tb): out = 'Uncaught Python exception: ' + str(value) + '\n' out += ''.join(traceback.format_list(traceback.extract_tb(tb))) out += '\n' sys.stderr.write(out)
def _format_traceback(self, tb): return "".join(traceback.format_list(tb))
def new(*args, **kwargs): print('Function %s is deprecated.' % old.__name__, file=sys.stderr) trace = traceback.extract_stack() for line in traceback.format_list(trace[:-1]): stderr(line[:-1]) return old(*args, **kwargs)
def getVars(self): vars = WTemplated.getVars(self) ex = sys.exc_info()[1] vars["msg"] = self.htmlText(str(ex)) vars["area"] = "" ty, ex, tb = sys.exc_info() tracebackList = traceback.format_list(traceback.extract_tb(tb)) rh = self._rh.__class__ url = request.url.encode('utf-8') params = [] for (k, v) in self._rh.getRequestParams().items(): if k.strip() != "password": params.append("""%s = %s""" % (self.htmlText(k), self.htmlText(v))) headers = [] for k, v in request.headers.iteritems(): headers.append("""%s: %s""" % (self.htmlText(k), self.htmlText(v))) userHTML = """-- none --""" vars["userEmail"] = "" av = self._rh.getAW().getUser() if av: userHTML = self.htmlText("%s <%s>" % (av.getFullName(), av.getEmail())) vars["userEmail"] = quoteattr(av.getEmail()) vars["reportURL"] = quoteattr( str(urlHandlers.UHErrorReporting.getURL())) details = "" show_details = Config.getInstance().getDebug() if not show_details: try: show_details = session.user and session.user.is_admin except Exception: # We are handling some error so we cannot know if accessing the session user works # If it fails we simply don't show details... pass if show_details: details = """ <table class="errorDetailsBox"> <tr> <td>ERROR DETAILS</td> </tr> <tr> <td><br></td> </tr> <tr> <td nowrap align="right"><b>Exception type:</b></td> <td>%s</td> </tr> <tr> <td nowrap align="right" valign="top"><b>Exception message:</b></td> <td>%s</td> </tr> """ % (self.htmlText(str(ty)), self.htmlText(str(ex))) if hasattr(ex, 'problematic_templates') and hasattr( ex, 'template_tracebacks'): for i in range(len(ex.problematic_templates)): details += """ <tr> <td nowrap align="right" valign="top"><b>Traceback for<br>%s.tpl:</b></td> <td>%s</td> </tr> """ % (ex.problematic_templates[i], "<br>".join( ex.template_tracebacks[i])) details +=""" <tr> <td valign="top" nowrap align="right"><b>Traceback:</b></td> <td><pre>%s</pre></td> </tr> <tr> <td nowrap align="right"><b>Request handler:</b></td> <td>%s</td> </tr> <tr> <td nowrap align="right"><b>URL:</b></td> <td>%s</td> </tr> <tr> <td nowrap align="right" valign="top"><b>Params:</b></td> <td>%s</td> </tr> <tr> <td valign="top" nowrap align="right"><b>HTTP headers:</b></td> <td><pre>%s</pre></td> </tr> <tr> <td nowrap align="right"><b>Logged user:</b></td> <td>%s</td> </tr> </table> """%("\n".join( tracebackList ), rh.__name__, url, "<br>".join(params), \ "\n".join( headers ), userHTML ) vars["errorDetails"] = details vars["reportMsg"] = quoteattr( json.dumps({ 'request_info': get_request_info(), 'traceback': traceback.format_exc() })) return vars
def Handler(errtype, value, tback): """User friendly error handling """ # prepare traceback text trace = traceback.extract_tb(tback) linetrace = traceback.format_list(trace) texttrace = ''.join(linetrace) textexc = ''.join(traceback.format_exception_only(errtype, value)) # debug log information logtext = '' outf, logname = Wammu.ErrorLog.SaveLog() if outf is not None: print 'Created debug log copy in %s for error reporting.' % logname logtext = '\n%s\n' % _( 'Debug log was saved for phone communication, if this error appeared during communicating with phone, you are strongly encouraged to include it in bugreport. Debug log is saved in file %s.' ) % logname # detection of same errors tracehash = md5('%s,%s' % (textexc, texttrace)).hexdigest() if tracehash in ERROR_HISTORY: print 'Same error already detected, not showing dialog!' print texttrace print 'Exception: %s' % textexc return ERROR_HISTORY.append(tracehash) # traceback id (md5 sum of last topmost traceback item inside Wammu - file(function):code) try: for trace_line in trace: if trace_line[0].rfind('Wammu') > -1: lasttrace = trace_line traceidtext = '%s(%s):%s' % ( lasttrace[0][lasttrace[0].rfind('Wammu'):], lasttrace[2], lasttrace[3]) traceid = md5(traceidtext).hexdigest() tracetext = '\n%s\n' % ( _('Before submiting please try searching for simmilar bugs on %s') % ('https://github.com/search?l=&q=%s+%%40gammu&ref=advsearch&type=Issues' % traceid)) except: traceid = 'N/A' tracetext = '' # unicode warning if errtype == UnicodeEncodeError or errtype == UnicodeDecodeError: unicodewarning = ('\n%s\n' % _( 'Unicode encoding error appeared, see question 1 in FAQ, how to solve this.' )) else: unicodewarning = '' # prepare message text = u"""%s %s %s%s%s %s ------------------ Traceback ID ------------------- %s -------------------- Traceback -------------------- %s-------------------- Exception -------------------- %s--------------------------------------------------- """ % (_('Unhandled exception appeared.'), _('If you want to help improving this program, please submit following infomation and description how did it happen to %s. Please report in english, otherwise you will be most likely told to translate you report to english later.' ) % 'http://bugs.wammu.eu/', logtext, tracetext, unicodewarning, Wammu.ErrorLog.GetSystemInfo(), traceid, StrConv(texttrace), StrConv(textexc)) # Include exception info in crash file if outf is not None: outf.write(text.encode('utf-8')) outf.close() # display error try: Wammu.ErrorMessage.ErrorMessage( HANDLER_PARENT, _('Unhandled exception appeared. If you want to help improving this program, please report this together with description how this situation has happened. Please report in english, otherwise you will be most likely told to translate you report to english later.' ), _('Unhandled exception'), traceid=traceid, autolog=logname, exception=_('Traceback:\n%(traceback)s\nException: %(exception)s') % { 'traceback': StrConv(texttrace), 'exception': StrConv(textexc) }).ShowModal() except: print text
def pytest_pyfunc_call(pyfuncitem): """Inspects and consumes certain exceptions The guts of this function are explained above in the module documentation. Args: pyfuncitem: A pytest test item. """ # do whatever you want before the next hook executes if not enable_rbac: yield return # Login as the "new" user to run the test under if 'rbac_role' in pyfuncitem.fixturenames: user = pyfuncitem._request.getfuncargvalue('rbac_role') really_logout() logger.info("setting user to {}".format(user)) user_obj = current_appliance.collections.users.instantiate( username=conf.credentials[user]['username'], password=conf.credentials[user]['password']) # Actually perform the test. outcome is set to be a result object from the test with user_obj: outcome = yield screenshot, screenshot_error = take_screenshot() # Handle the Exception logger.error(pyfuncitem.location[0]) loc = "{}/{}".format(pyfuncitem.location[0], pyfuncitem.location[2]) # loc = loc[:min([loc.rfind('['), len(loc)])] logger.error(loc) # errors = [v for k, v in tests.items() if loc.startswith(k)] errors = pyfuncitem.function.meta.kwargs['from_docs']['rbac']['roles'] if errors: # errors = errors[0] user = pyfuncitem.funcargs['rbac_role'] if errors[user]: if not outcome.excinfo: logger.error("RBAC: Test should fail!") raise Exception("RBAC: You should fail!") else: if outcome.excinfo[1].__repr__().startswith(errors[user]): logger.info("RBAC: Test failed as expected") outcome.force_result(True) else: contents = "".join( traceback.format_list( traceback.extract_tb(outcome.excinfo[2]))) save_traceback_file(pyfuncitem, contents) save_screenshot(pyfuncitem, screenshot, screenshot_error) logger.error("RBAC: You blithering idiot, " "you failed with the wrong exception") raise Exception( "RBAC: You should fail with {}!".format( errors[user])) else: if not outcome.excinfo: logger.info("RBAC: Test passed as expected") else: logger.error("RBAC: Test should have passed!") contents = "".join( traceback.format_list( traceback.extract_tb(outcome.excinfo[2]))) save_traceback_file(pyfuncitem, contents) save_screenshot(pyfuncitem, screenshot, screenshot_error) raise Exception("RBAC: Test should have passed!")
def log_test_failure_data(test, test_logpath, driver, browser, url=None): import traceback browser_displayed = browser driver_displayed = None browser_version = None driver_version = None driver_name = None try: browser_version = get_browser_version(driver) except Exception: pass try: driver_name, driver_version = get_driver_name_and_version( driver, browser ) except Exception: pass if browser_version: headless = "" if test.headless and browser in ["chrome", "edge", "firefox"]: headless = " / headless" browser_displayed = "%s (%s%s)" % (browser, browser_version, headless) if driver_name and driver_version: driver_displayed = "%s (%s)" % (driver_name, driver_version) if not browser_version: browser_displayed = browser driver_displayed = "(Unknown Driver)" if not driver_displayed: driver_displayed = "(Unknown Driver)" basic_info_name = settings.BASIC_INFO_NAME basic_file_path = "%s/%s" % (test_logpath, basic_info_name) if url: last_page = url else: last_page = get_last_page(driver) timestamp, the_date, the_time = get_master_time() test_id = get_test_id(test) # pytest runnable display_id (with the "::") data_to_save = [] data_to_save.append("%s" % test_id) data_to_save.append( "--------------------------------------------------------------------" ) data_to_save.append("Last Page: %s" % last_page) data_to_save.append(" Browser: %s" % browser_displayed) data_to_save.append(" Driver: %s" % driver_displayed) data_to_save.append("Timestamp: %s" % timestamp) data_to_save.append(" Date: %s" % the_date) data_to_save.append(" Time: %s" % the_time) data_to_save.append( "--------------------------------------------------------------------" ) if ( sys.version_info[0] >= 3 and hasattr(test, "_outcome") and (hasattr(test._outcome, "errors") and test._outcome.errors) ): try: exc_message = test._outcome.errors[0][1][1] traceback_address = test._outcome.errors[0][1][2] traceback_list = traceback.format_list( traceback.extract_tb(traceback_address)[1:] ) traceback_message = "".join(traceback_list).strip() except Exception: exc_message = "(Unknown Exception)" traceback_message = "(Unknown Traceback)" data_to_save.append("Traceback: " + traceback_message) data_to_save.append("Exception: " + str(exc_message)) else: the_traceback = None if hasattr(test, "is_behave") and test.is_behave: if sb_config.behave_scenario.status.name == "failed": if sb_config.behave_step.error_message: the_traceback = sb_config.behave_step.error_message else: the_traceback = "".join( traceback.format_exception( sys.exc_info()[0], sys.exc_info()[1], sys.exc_info()[2], ) ) if ( not the_traceback or len(str(the_traceback)) < 30 or the_traceback.endswith("StopIteration\n") ): good_stack = [] the_stacks = [] if hasattr(sys, "last_traceback"): the_stacks = traceback.format_list( traceback.extract_tb(sys.last_traceback) ) else: message = None if hasattr(test, "is_behave") and test.is_behave: message = "Behave step was not implemented or skipped!" else: message = "Traceback not found!" the_stacks = [message] for stack in the_stacks: if "/site-packages/pluggy/" not in stack: if "/site-packages/_pytest/" not in stack: good_stack.append(stack) the_traceback = "".join(good_stack) data_to_save.append("Traceback: " + the_traceback) if hasattr(sys, "last_value"): last_value = sys.last_value if last_value: data_to_save.append("Exception: " + str(last_value)) else: data_to_save.append("Traceback: " + the_traceback) log_file = codecs.open(basic_file_path, "w+", "utf-8") log_file.writelines("\r\n".join(data_to_save)) log_file.close()
def printException(self): tb = [item for item in traceback.extract_tb(sys.exc_info()[2]) if not isTracebackItemGrader(item)] for item in traceback.format_list(tb): self.fail('%s' % item)
def get_traceback(stacklevel=1): s = traceback.extract_stack()[:-1 - stacklevel] return ''.join(traceback.format_list(s))
def _BuildFromOther(self, site, exc_type, value, tb): self.colno = -1 self.lineno = 0 if debugging: # Full traceback if debugging. list = traceback.format_exception(exc_type, value, tb) self.description = ExpandTabs(''.join(list)) return # Run down the traceback list, looking for the first "<Script..>" # Hide traceback above this. In addition, keep going down # looking for a "_*_" attribute, and below hide these also. # hide from these functions down in the traceback. hide_names = ["r_import", "r_reload", "r_open"] depth = None tb_top = tb while tb_top: filename, lineno, name, line = self.ExtractTracebackInfo( tb_top, site) if filename[:7] == "<Script": break tb_top = tb_top.tb_next format_items = [] if tb_top: # found one. depth = 0 tb_look = tb_top # Look down for our bottom while tb_look: filename, lineno, name, line = self.ExtractTracebackInfo( tb_look, site) if name in hide_names: break # We can report a line-number, but not a filename. Therefore, # we return the last line-number we find in one of our script # blocks. if filename.startswith("<Script"): self.lineno = lineno self.linetext = line format_items.append((filename, lineno, name, line)) depth = depth + 1 tb_look = tb_look.tb_next else: depth = None tb_top = tb bits = ['Traceback (most recent call last):\n'] bits.extend(traceback.format_list(format_items)) if exc_type == pythoncom.com_error: desc = "%s (0x%x)" % (value.strerror, value.hresult) if value.hresult == winerror.DISP_E_EXCEPTION and value.excepinfo and value.excepinfo[ 2]: desc = value.excepinfo[2] bits.append("COM Error: " + desc) else: bits.extend(traceback.format_exception_only(exc_type, value)) # XXX - this utf8 encoding seems bogus. From well before py3k, # we had the comment: # > all items in the list are utf8 courtesy of Python magically # > converting unicode to utf8 before compilation. # but that is likely just confusion from early unicode days; # Python isn't doing it, pywin32 probably was, so 'mbcs' would # be the default encoding. We should never hit this these days # anyway, but on py3k, we *never* will, and str objects there # don't have a decode method... if sys.version_info < (3, ): for i in range(len(bits)): if isinstance(bits[i], str): #assert type(bits[i]) is str, type(bits[i]) bits[i] = bits[i].decode('utf8') self.description = ExpandTabs(''.join(bits)) # Clear tracebacks etc. tb = tb_top = tb_look = None
ret = operation.run() except Exception as e: exc_type, exc_value, exc_traceback = sys.exc_info() tb_list = traceback.extract_tb(exc_traceback) try: # TODO: Build an ExceptionCapsule that can return the traceback # to RemoteOperation as well. See Pyro. # logger.exception(e) # logging 'e' could be necessary for traceback pickled_ret = pickle.dumps( e) # Pickle exception for stdout transmission except Exception as f: # logger.exception(f) # 'f' is not important to us, except for debugging perhaps # No hope of pickling a precise Exception back to RemoteOperation. # So, provide meaningful trace as text and provide a non-zero return code # to signal to RemoteOperation that its Command invocation of gpoperation.py has failed. pretty_trace = str(e) + "\n" pretty_trace += 'Traceback (most recent call last):\n' pretty_trace += ''.join(traceback.format_list(tb_list)) logger.critical(pretty_trace) print(pretty_trace, file=sys.stderr) sys.exit(2) # signal that gpoperation.py has hit unexpected error else: pickled_ret = pickle.dumps(ret) sys.stdout = old_stdout sys.stdout.buffer.write(pickled_ret) sys.exit(0)
def testFormatStack(self): stack, expected_stack = extract_stack() self.assertEqual( traceback.format_list(stack), traceback.format_list(expected_stack))
def updateDatabase(self, sources=None, sourceOptions=None, cacheOnly=False, forceUpdate=False): if self._updating: raise Exception("_updating set before updateDatabase()") self._loki.testDatabaseWriteable() if self._loki.getDatabaseSetting('finalized', int): raise Exception("cannot update a finalized database") # check for extraneous options self.logPush("preparing for update ...\n") srcSet = self.attachSourceModules(sources) srcOpts = sourceOptions or {} for srcName in srcOpts.keys(): if srcName not in srcSet: self.log( "WARNING: not updating from source '%s' for which options were supplied\n" % srcName) logIndent = self.logPop("... OK\n") # update all specified sources iwd = os.path.abspath(os.getcwd()) self._updating = True self._tablesUpdated = set() self._tablesDeindexed = set() srcErrors = set() cursor = self._db.cursor() cursor.execute("SAVEPOINT 'updateDatabase'") try: for srcName in sorted(srcSet): cursor.execute("SAVEPOINT 'updateDatabase_%s'" % (srcName, )) try: srcObj = self._sourceObjects[srcName] srcID = srcObj.getSourceID() # validate options, if any options = srcOpts.get(srcName, {}) if options: self.logPush("validating %s options ...\n" % srcName) msg = srcObj.validateOptions(options) if msg != True: raise Exception(msg) for opt, val in options.iteritems(): self.log("%s = %s\n" % (opt, val)) self.logPop("... OK\n") # switch to a temp subdirectory for this source path = os.path.join(iwd, srcName) if not os.path.exists(path): os.makedirs(path) os.chdir(path) # download files into a local cache if not cacheOnly: self.logPush("downloading %s data ...\n" % srcName) srcObj.download(options) self.logPop("... OK\n") # calculate source file metadata # all timestamps are assumed to be in UTC, but if a source # provides file timestamps with no TZ (like via FTP) we use them # as-is and assume they're supposed to be UTC self.log("analyzing %s data files ..." % srcName) filehash = dict() for filename in os.listdir('.'): stat = os.stat(filename) md5 = hashlib.md5() with open(filename, 'rb') as f: chunk = f.read(8 * 1024 * 1024) while chunk: md5.update(chunk) chunk = f.read(8 * 1024 * 1024) filehash[filename] = (filename, long(stat.st_size), long(stat.st_mtime), md5.hexdigest()) self.log(" OK\n") # compare current loader version, options and file metadata to the last update skip = not forceUpdate last = '?' if skip: for row in cursor.execute( "SELECT version, DATETIME(updated,'localtime') FROM `db`.`source` WHERE source_id = ?", (srcID, )): skip = skip and (row[0] == srcObj.getVersionString()) last = row[1] if skip: n = 0 for row in cursor.execute( "SELECT option, value FROM `db`.`source_option` WHERE source_id = ?", (srcID, )): n += 1 skip = skip and (row[0] in options) and ( row[1] == options[row[0]]) skip = skip and (n == len(options)) if skip: n = 0 for row in cursor.execute( "SELECT filename, size, md5 FROM `db`.`source_file` WHERE source_id = ?", (srcID, )): n += 1 skip = skip and (row[0] in filehash) and ( row[1] == filehash[row[0]][1]) and ( row[2] == filehash[row[0]][3]) skip = skip and (n == len(filehash)) # skip the update if the current loader and all source file versions match the last update if skip: self.log( "skipping %s update, no data or software changes since %s\n" % (srcName, last)) else: # process new files (or old files with a new loader) self.logPush("processing %s data ...\n" % srcName) cursor.execute( "DELETE FROM `db`.`warning` WHERE source_id = ?", (srcID, )) srcObj.update(options) cursor.execute( "UPDATE `db`.`source` SET updated = DATETIME('now'), version = ? WHERE source_id = ?", (srcObj.getVersionString(), srcID)) cursor.execute( "DELETE FROM `db`.`source_option` WHERE source_id = ?", (srcID, )) sql = "INSERT INTO `db`.`source_option` (source_id, option, value) VALUES (%d,?,?)" % srcID cursor.executemany(sql, options.iteritems()) cursor.execute( "DELETE FROM `db`.`source_file` WHERE source_id = ?", (srcID, )) sql = "INSERT INTO `db`.`source_file` (source_id, filename, size, modified, md5) VALUES (%d,?,?,DATETIME(?,'unixepoch'),?)" % srcID cursor.executemany(sql, filehash.values()) self.logPop("... OK\n") #if skip except: srcErrors.add(srcName) excType, excVal, excTrace = sys.exc_info() while self.logPop() > logIndent: pass self.logPush("ERROR: failed to update %s\n" % (srcName, )) if excTrace: for line in traceback.format_list( traceback.extract_tb(excTrace)[-1:]): self.log(line) for line in traceback.format_exception_only( excType, excVal): self.log(line) self.logPop() cursor.execute( "ROLLBACK TRANSACTION TO SAVEPOINT 'updateDatabase_%s'" % (srcName, )) finally: cursor.execute("RELEASE SAVEPOINT 'updateDatabase_%s'" % (srcName, )) #try/except/finally #foreach source # pull the latest GRCh/UCSChg conversions # http://genome.ucsc.edu/FAQ/FAQreleases.html # http://genome.ucsc.edu/goldenPath/releaseLog.html # TODO: find a better machine-readable source for this data if not cacheOnly: self.log("updating GRCh:UCSChg genome build identities ...") import urllib2 import re response = urllib2.urlopen( 'http://genome.ucsc.edu/FAQ/FAQreleases.html') page = "" while True: data = response.read() if not data: break page += data rowHuman = False for tablerow in re.finditer(r'<tr>.*?</tr>', page, re.IGNORECASE | re.DOTALL): cols = tuple( match.group()[4:-5].strip().lower() for match in re.finditer( r'<td>.*?</td>', tablerow.group(), re.IGNORECASE | re.DOTALL)) if cols and ((cols[0] == 'human') or (rowHuman and (cols[0] in ('', ' ')))): rowHuman = True grch = ucschg = None try: if cols[1].startswith('hg'): ucschg = int(cols[1][2:]) if cols[3].startswith( 'genome reference consortium grch'): grch = int(cols[3][32:]) if cols[3].startswith('ncbi build '): grch = int(cols[3][11:]) except: pass if grch and ucschg: cursor.execute( "INSERT OR REPLACE INTO `db`.`grch_ucschg` (grch,ucschg) VALUES (?,?)", (grch, ucschg)) else: rowHuman = False #foreach tablerow self.log(" OK\n") #if not cacheOnly # cross-map GRCh/UCSChg build versions for all sources ucscGRC = collections.defaultdict(int) for row in self._db.cursor().execute( "SELECT grch,ucschg FROM `db`.`grch_ucschg`"): ucscGRC[row[1]] = max(row[0], ucscGRC[row[1]]) cursor.execute( "UPDATE `db`.`source` SET grch = ? WHERE grch IS NULL AND ucschg = ?", (row[0], row[1])) cursor.execute( "UPDATE `db`.`source` SET ucschg = ? WHERE ucschg IS NULL AND grch = ?", (row[1], row[0])) cursor.execute( "UPDATE `db`.`source` SET current_ucschg = ucschg WHERE current_ucschg IS NULL" ) # check for any source with an unrecognized GRCh build mismatch = False for row in cursor.execute( "SELECT source, grch, ucschg FROM `db`.`source` WHERE (grch IS NULL) != (ucschg IS NULL)" ): self.log( "WARNING: unrecognized genome build for '%s' (NCBI GRCh%s, UCSC hg%s)\n" % (row[0], (row[1] or "?"), (row[2] or "?"))) mismatch = True if mismatch: self.log( "WARNING: database may contain incomparable genome positions!\n" ) # check all sources' UCSChg build versions and set the latest as the target hgSources = collections.defaultdict(set) for row in cursor.execute( "SELECT source_id, current_ucschg FROM `db`.`source` WHERE current_ucschg IS NOT NULL" ): hgSources[row[1]].add(row[0]) if hgSources: targetHG = max(hgSources) self.log("database genome build: GRCh%s / UCSChg%s\n" % (ucscGRC.get(targetHG, '?'), targetHG)) targetUpdated = (self._loki.getDatabaseSetting('ucschg', int) != targetHG) self._loki.setDatabaseSetting('ucschg', targetHG) # liftOver sources with old build versions, if there are any if len(hgSources) > 1: locusSources = set(row[0] for row in cursor.execute( "SELECT DISTINCT source_id FROM `db`.`snp_locus`")) regionSources = set(row[0] for row in cursor.execute( "SELECT DISTINCT source_id FROM `db`.`biopolymer_region`")) chainsUpdated = ('grch_ucschg' in self._tablesUpdated or 'chain' in self._tablesUpdated or 'chain_data' in self._tablesUpdated) for oldHG in sorted(hgSources): if oldHG == targetHG: continue if not self._loki.hasLiftOverChains(oldHG, targetHG): self.log( "ERROR: no chains available to lift hg%d to hg%d\n" % (oldHG, targetHG)) continue if targetUpdated or chainsUpdated or 'snp_locus' in self._tablesUpdated: sourceIDs = hgSources[oldHG] & locusSources if sourceIDs: self.liftOverSNPLoci(oldHG, targetHG, sourceIDs) if targetUpdated or chainsUpdated or 'biopolymer_region' in self._tablesUpdated: sourceIDs = hgSources[oldHG] & regionSources if sourceIDs: self.liftOverRegions(oldHG, targetHG, sourceIDs) sql = "UPDATE `db`.`source` SET current_ucschg = %d WHERE source_id = ?" % targetHG cursor.executemany(sql, ((sourceID, ) for sourceID in hgSources[oldHG])) #foreach old build #if any old builds # post-process as needed #self.log("MEMORY: %d bytes (%d peak)\n" % self._loki.getDatabaseMemoryUsage()) #DEBUG if 'snp_merge' in self._tablesUpdated: self.cleanupSNPMerges() #self.log("MEMORY: %d bytes (%d peak)\n" % self._loki.getDatabaseMemoryUsage()) #DEBUG if 'snp_merge' in self._tablesUpdated or 'snp_locus' in self._tablesUpdated: self.updateMergedSNPLoci() #self.log("MEMORY: %d bytes (%d peak)\n" % self._loki.getDatabaseMemoryUsage()) #DEBUG if 'snp_locus' in self._tablesUpdated: self.cleanupSNPLoci() #self.log("MEMORY: %d bytes (%d peak)\n" % self._loki.getDatabaseMemoryUsage()) #DEBUG if 'snp_merge' in self._tablesUpdated or 'snp_entrez_role' in self._tablesUpdated: self.updateMergedSNPEntrezRoles() #self.log("MEMORY: %d bytes (%d peak)\n" % self._loki.getDatabaseMemoryUsage()) #DEBUG if 'snp_entrez_role' in self._tablesUpdated: self.cleanupSNPEntrezRoles() #self.log("MEMORY: %d bytes (%d peak)\n" % self._loki.getDatabaseMemoryUsage()) #DEBUG if 'snp_merge' in self._tablesUpdated or 'gwas' in self._tablesUpdated: self.updateMergedGWASAnnotations() #self.log("MEMORY: %d bytes (%d peak)\n" % self._loki.getDatabaseMemoryUsage()) #DEBUG if 'biopolymer_name' in self._tablesUpdated or 'biopolymer_name_name' in self._tablesUpdated: self.resolveBiopolymerNames() #self.log("MEMORY: %d bytes (%d peak)\n" % self._loki.getDatabaseMemoryUsage()) #DEBUG if 'biopolymer_name' in self._tablesUpdated or 'snp_entrez_role' in self._tablesUpdated: self.resolveSNPBiopolymerRoles() #self.log("MEMORY: %d bytes (%d peak)\n" % self._loki.getDatabaseMemoryUsage()) #DEBUG if 'biopolymer_name' in self._tablesUpdated or 'group_member_name' in self._tablesUpdated: self.resolveGroupMembers() #self.log("MEMORY: %d bytes (%d peak)\n" % self._loki.getDatabaseMemoryUsage()) #DEBUG if 'biopolymer_region' in self._tablesUpdated: self.updateBiopolymerZones() #self.log("MEMORY: %d bytes (%d peak)\n" % self._loki.getDatabaseMemoryUsage()) #DEBUG # reindex all remaining tables self.log("finishing update ...") if self._tablesDeindexed: self._loki.createDatabaseIndecies(None, 'db', self._tablesDeindexed) if self._tablesUpdated: self._loki.setDatabaseSetting('optimized', 0) self.log(" OK\n") except: excType, excVal, excTrace = sys.exc_info() while self.logPop() > logIndent: pass self.logPush("ERROR: failed to update the database\n") if excTrace: for line in traceback.format_list( traceback.extract_tb(excTrace)[-1:]): self.log(line) for line in traceback.format_exception_only(excType, excVal): self.log(line) self.logPop() cursor.execute( "ROLLBACK TRANSACTION TO SAVEPOINT 'updateDatabase'") finally: cursor.execute("RELEASE SAVEPOINT 'updateDatabase'") self._updating = False self._tablesUpdated = None self._tablesDeindexed = None os.chdir(iwd) #try/except/finally # report and return if srcErrors: self.logPush("WARNING: data from these sources was not updated:\n") for srcName in sorted(srcErrors): self.log("%s\n" % srcName) self.logPop() return False return True
# Python Traceback Stack # traceback — Print or retrieve a stack traceback. # This module provides a standard interface to extract, format and print stack traces of Python programs. # It exactly mimics the behavior of the Python interpreter when it prints a stack trace. # This is useful when you want to print stack traces under program control, such as in a “wrapper” around the interpreter. # The module uses traceback objects — this is the object type that is stored in the sys.last_traceback variable and returned as the third item from # sys.exc_info(). # # This last example demonstrates the final few formatting functions: # import traceback traceback.format_list([('spam.py', 3, '<module>', 'spam.eggs()'), ('eggs.py', 42, 'eggs', 'return "bacon"')]) # # OUTPUT: # # [' File "spam.py", line 3, in <module>\n spam.eggs()\n', # ' File "eggs.py", line 42, in eggs\n return "bacon"\n'] an_error = IndexError('tuple index out of range') traceback.format_exception_only(type(an_error), an_error) # OUTPUT: '['IndexError: tuple index out of range\n']'
loop.set_default_executor = warn_use( # type: ignore loop.set_default_executor, "sets default executor on the event loop" ) return loop @callback def _async_loop_exception_handler(_: Any, context: dict[str, Any]) -> None: """Handle all exception inside the core loop.""" kwargs = {} if exception := context.get("exception"): kwargs["exc_info"] = (type(exception), exception, exception.__traceback__) logger = logging.getLogger(__package__) if source_traceback := context.get("source_traceback"): stack_summary = "".join(traceback.format_list(source_traceback)) logger.error( "Error doing job: %s: %s", context["message"], stack_summary, **kwargs # type: ignore ) return logger.error("Error doing job: %s", context["message"], **kwargs) # type: ignore async def setup_and_run_hass(runtime_config: RuntimeConfig) -> int: """Set up Home Assistant and run.""" hass = await bootstrap.async_setup_hass(runtime_config) if hass is None: return 1
def CheckInputFromValidContext(op, input_op): """Returns whether `input_op` can be used from `op`s context. Conceptually, only inputs from op's while context or any ancestor while context (including outside of any context) are valid. In practice, there are many other edge cases as well. Args: op: Operation input_op: Operation Raises: ValueError: if input_op is from an invalid context. """ op_ctxt = op._get_control_flow_context() # pylint: disable=protected-access input_ctxt = GetOutputContext(input_op) valid = False if not input_ctxt: # input_op isn't in a control flow context. valid = True elif op_ctxt is input_ctxt: # input_op is in the same context as op. valid = True else: while_ctxt = GetContainingWhileContext(op_ctxt) input_while_ctxt = GetContainingWhileContext(input_ctxt) if while_ctxt is None: if input_while_ctxt is None: # Neither op nor input_op is in a while loop, but one or both are in # conds. We allow this, although execution will fail if the branch # corresponding to input_op's cond context isn't taken. valid = True # Invalid if op isn't in a while loop and input_op is. Unless... if IsLoopEnter(op): # WhileContext._BuildLoop clears context for Enter nodes. valid = True if IsSwitch(op): # CondContext.AddValue clears context for Switch nodes. valid = True elif IsContainingContext(while_ctxt, input_while_ctxt): # input_op is in a while loop which contains op's while loop (or not in a # while loop at all). valid = True elif (while_ctxt.grad_state and IsContainingContext(while_ctxt.grad_state.forward_context, input_while_ctxt)): # op is in a gradient context and input_op is in the associated forward # pass context or an ancestor thereof. This case is need to build while # loop gradients. # NOTE(skyewm): we theoretically also need this case for custom gradient # functions that close over tensors from ancestor contexts, but I haven't # verified this. valid = True elif (while_ctxt.grad_state and while_ctxt.grad_state.forward_context is input_while_ctxt._outer_context): # pylint: disable=protected-access # op is in a gradient context and input_op is in a child of the associated # forward pass context. This case is needed for the gradients of while # loops with conds. valid = True elif (input_while_ctxt.grad_state and input_while_ctxt.grad_state.forward_context is while_ctxt): # input_op is in the gradient context of op's context. This case is needed # when the gradient of a while loop gradient is requested (this will # eventually fail unless there is a stop_gradient() or similar). valid = True elif (input_while_ctxt.grad_state and input_ctxt.grad_state.forward_context.grad_state and input_ctxt.grad_state.forward_context.grad_state.forward_context is while_ctxt): # input_op is in the grad grad context of op's context. This case is # needed when the gradient of a while loop gradient is requested (this # will eventually fail unless there is a stop_gradient() or similar). valid = True if not valid: if while_ctxt: error_msg = ( "Cannot use '%s' as input to '%s' because they are in different while" " loops." % (op.name, input_op.name)) else: error_msg = ( "Cannot use '%s' as input to '%s' because '%s' is in a while loop." % (input_op.name, op.name, input_op.name)) # Log the error message plus the relevant stack traces. The stacks may be # useful for debugging this error, but we don't want to raise an # unreadable exception. log_msg = error_msg log_msg += "\n\n%s while context: %s" % (op.name, while_ctxt) log_msg += "\n%s while context: %s" % (input_op.name, input_while_ctxt) log_msg += "\n\nTraceback for %s:\n%s\nTraceback for %s:\n%s\n" % ( op.name, "".join(traceback.format_list(op.traceback)), input_op.name, "".join(traceback.format_list(input_op.traceback))) logging.info(log_msg) raise ValueError(error_msg + " See info log for more details.")
def initiate(kickstart_host, base, extra_append, static_device=None, system_record="", preserve_files=[]): error_messages = {} success = 0 # cleanup previous attempt rm_rf(SHADOW) os.mkdir(SHADOW) print("Preserve files! : %s" % preserve_files) try: if static_device: update_static_device_records(kickstart_host, static_device) k = Koan() k.list_items = 0 k.server = kickstart_host k.is_virt = 0 k.is_replace = 1 k.is_display = 0 k.profile = None if system_record != "": k.system = system_record else: k.system = None k.port = 443 k.image = None k.live_cd = None k.virt_path = None k.virt_type = None k.virt_bridge = None k.no_gfx = 1 k.add_reinstall_entry = None k.kopts_override = None k.use_kexec = None k.embed_kickstart = k.embed_autoinst = None if hasattr(k, 'no_copy_default'): k.no_copy_default = 1 else: # older koan k.grubby_copy_default = 0 if static_device: k.embed_kickstart = k.embed_autoinst = 1 k.run() except Exception: (xa, xb, tb) = sys.exc_info() try: getattr(xb, "from_koan") error_messages['koan'] = str(xb)[1:-1] print(str(xb)[1:-1]) # nice exception, no traceback needed except: print(xa) print(xb) print(" ".join(traceback.format_list(traceback.extract_tb(tb)))) error_messages['koan'] = " ".join(traceback.format_list(traceback.extract_tb(tb))) return (1, "Kickstart failed. Koan error.", error_messages) # Now process preserve_files if there are any initrd = getInitrdPath() if preserve_files: ret = create_new_rd(initrd, preserve_files) if ret: # Error return ret initrd = initrd + ".merged" return (0, "Kickstart initiate succeeded", error_messages)
def get_variable(self, name, shape=None, dtype=dtypes.float32, initializer=None, regularizer=None, reuse=None, trainable=True, collections=None, caching_device=None): """Gets an existing variable with these parameters or create a new one. If a variable with the given name is already stored, we return the stored variable. Otherwise, we create a new one. Set `reuse` to `True` when you only want to reuse existing Variables. Set `reuse` to `False` when you only want to create new Variables. If `reuse` is `None` (the default), both new and existing variables are returned. If initializer is `None` (the default), the default initializer passed in the constructor is used. If that one is `None` too, we use a new `UniformUnitScalingInitializer`. If initializer is a Tensor, we use it as a value and derive the shape from the initializer. Args: name: the name of the new or existing variable. shape: shape of the new or existing variable. dtype: type of the new or existing variable (defaults to `DT_FLOAT`). initializer: initializer for the variable. regularizer: a (Tensor -> Tensor or None) function; the result of applying it on a newly created variable will be added to the collection GraphKeys.REGULARIZATION_LOSSES and can be used for regularization. reuse: a Boolean or `None`. Controls reuse or creation of variables. trainable: If `True` also add the variable to the graph collection `GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable). collections: List of graph collections keys to add the Variable to. Defaults to `[GraphKeys.VARIABLES]` (see tf.Variable). caching_device: Optional device string or function describing where the Variable should be cached for reading. Defaults to the Variable's device. If not `None`, caches on another device. Typical use is to cache on the device where the Ops using the Variable reside, to deduplicate copying through `Switch` and other conditional statements. Returns: The created or existing variable. Raises: ValueError: when creating a new variable and shape is not declared, when reusing a variable and specifying a conflicting shape, or when violating reuse during variable creation. """ # Set to true if initializer is a constant. initializing_from_value = False if initializer is not None and isinstance(initializer, ops.Tensor): initializing_from_value = True if shape is not None and initializing_from_value: raise ValueError("If initializer is a constant, do not specify shape.") should_check = reuse is not None dtype = dtypes.as_dtype(dtype) shape = tensor_shape.as_shape(shape) if name in self._vars: # Here we handle the case when returning an existing variable. if should_check and not reuse: tb = self._vars[name].op.traceback[::-1] # Throw away internal tf entries and only take a few lines. tb = [x for x in tb if "tensorflow/python" not in x[0]][:3] raise ValueError("Variable %s already exists, disallowed." " Did you mean to set reuse=True in VarScope? " "Originally defined at:\n\n%s" % ( name, "".join(traceback.format_list(tb)))) found_var = self._vars[name] if not shape.is_compatible_with(found_var.get_shape()): raise ValueError("Trying to share variable %s, but specified shape %s" " and found shape %s." % (name, shape, found_var.get_shape())) if not dtype.is_compatible_with(found_var.dtype): dtype_str = dtype.name found_type_str = found_var.dtype.name raise ValueError("Trying to share variable %s, but specified dtype %s" " and found dtype %s." % (name, dtype_str, found_type_str)) return found_var # The code below handles only the case of creating a new variable. if should_check and reuse: raise ValueError("Variable %s does not exist, disallowed." " Did you mean to set reuse=None in VarScope?" % name) if not shape.is_fully_defined() and not initializing_from_value: raise ValueError("Shape of a new variable (%s) must be fully defined, " "but instead was %s." % (name, shape)) # Create the tensor to initialize the variable. if initializer is None: initializer = init_ops.uniform_unit_scaling_initializer() # Clear control dependencies while creating the initializer. with ops.control_dependencies(None): if initializing_from_value: init_val = initializer variable_dtype = None else: init_val = lambda: initializer(shape.as_list(), dtype=dtype) variable_dtype = dtype.base_dtype # Create the variable. v = variables.Variable(initial_value=init_val, name=name, trainable=trainable, collections=collections, caching_device=caching_device, dtype=variable_dtype) self._vars[name] = v logging.info("Created variable %s with shape %s and init %s", v.name, format(shape), initializer) # Run the regularizer if requested and save the resulting loss. if regularizer: with ops.name_scope(name + "/Regularizer/"): loss = regularizer(v) if loss is not None: logging.info("Applied regularizer to %s and added the result %s to " "REGULARIZATION_LOSSES.", v.name, loss.name) ops.add_to_collection(ops.GraphKeys.REGULARIZATION_LOSSES, loss) return v