def go(name, feeds, start, end=None, logLevel=logging.INFO): """ The main entry point to run feeds. 'feeds' is a list of :class:`Feed` objects. 'start' is the datetime to fetch from 'end' is the datetime to fetch to """ end = end or datetime.date.today() failures = [] stats = dict([(x.name, {}) for x in feeds]) site_series_count = 0 for feed_obj in feeds: feed = feed_obj.name try: feed_obj.get_logger().setLevel(logLevel) feed_obj.parser.get_logger().setLevel(logLevel) count = feed_obj.go(start, end) if count == 0: msg = 'no data updates for feed=%s' % feed # todo: would be nice to know how many cache_files were downloaded log.warn(msg) failures.append(msg) stats[feed]['errors'] = msg elif feed_obj.expected_series_count and \ count < feed_obj.expected_series_count: msg = 'expected feed item count is %s but only processed '\ '%s.' % (feed_obj.expected_series_count, count) log.warn(msg) stats[feed]['warning'] = msg stats[feed]['count'] = count stats[feed]['obj'] = feed_obj site_series_count += count except ErrorForAllRequests, fetch_ex: _, t, v, tbinfo = compact_traceback() msg = 'feed failure for {0}, errors are {1}. traceback is: ({2}:{3} {4})'.format( feed, str(fetch_ex), t, v, tbinfo) log.warn(msg) failures.append(msg) stats[feed]['errors'] = msg except Exception, ex: _, t, v, tbinfo = compact_traceback() msg = 'feed failure for {0}, errors are {1}. traceback is: ({2}:{3} {4})'.format( feed, str(ex), t, v, tbinfo) log.warn(msg) failures.append(msg) stats[feed]['errors'] = msg
def open_browser(uri): ''' Open browser on Windows NT ''' # # We use the startfile() function here because we want to be dead # sure that the command is nonblocking and webbrowser focus more # on flexibility than on being nonblocking. # The only case where startfile() is going to fail is the one in # which a handler for HTML is not installed. # # # The check whether the URI actually looks like a URI is # performed in browser.py. As an extra check, ensure that # we don't call startfile() when a file with that name exists # on the system. I don't know the internals of startfile() # and I prefer to be paranoid. # if os.path.exists(uri): sys.stderr.write('ERROR: there is a file named like the URI\n') return False try: os.startfile(uri) except WindowsError: error = str(asyncore.compact_traceback()) logging.warning('browser_nt: startfile() failed: %s', error) return False return True
def handle_error(self): nil, t, v, tbinfo = compact_traceback() # sometimes a user repr method will crash. try: self_repr = repr(self) except: self_repr = '<__repr__(self) failed for object at %0x>' % id(self) try: v = str(v) except: v = '' if not 'The handshake operation timed out' in v and not 'http request' in v and not 'EOF occurred in violation of protocol' in v and not "[Errno 0] Error" in v: logging.critical( 'uncaptured python exception, closing channel %s (%s:%s %s)', self_repr, t, v, tbinfo) logging.info("Trying to send error information") stat_server = StatusSocket(ADEHOST, ADEPORT, False) if stat_server.send_error(): logging.info("Error information sent successful") else: logging.info("Error information sent unsuccessful") logging.info("StorADE closed") self.handle_close()
def handle_error(self): nil, t, v, tbinfo = asyncore.compact_traceback() if t == socket.error: self._error = "SOCKET ERROR" else: self._error = str(t)+" "+str(v) self.handle_close()
def _poll(self, timeout): """ Poll for readability and writability """ # Immediately break out of the loop if requested to do so if not self.again: raise StopPoller("self.again is False") # Monitor streams readability/writability elif self.readset or self.writeset: # Get list of readable/writable streams try: res = select.select(list(self.readset.keys()), list(self.writeset.keys()), [], timeout) except select.error: code = sys.exc_info()[1][0] if code != errno.EINTR: logging.error(str(asyncore.compact_traceback())) raise else: # Take care of EINTR return # No error? Fire readable and writable events for fileno in res[0]: self._call_handle_read(fileno) for fileno in res[1]: self._call_handle_write(fileno) # No I/O pending? Break out of the loop. else: raise StopPoller("No I/O pending")
def main(): ''' Wrapper for the real main ''' try: realmain() except: sys.stderr.write('%s\n' % str(asyncore.compact_traceback())) sys.exit(1)
def handle_read(self): data = self.in_buffer + self.recv(8192) pos = data.rfind("\r") if pos == -1: pos = data.rfind("\n") if pos != -1: self.in_buffer = data[pos:] data = data[:pos] data = data.replace("\r\n", "\n").replace("\r", "\n") if len(data) > 0 and data[0] == "\n": data = data[1:] passthrough = True for s, lf in self.stolf.items(): if data.find(s) != -1: for f in lf: try: (nextfunction, passthrough) = f(data) if not nextfunction: break except: nil, t, v, tbinfo = asyncore.compact_traceback() try: self_repr = repr( self ) # sometimes a user repr method will crash. except: self_repr = '<__repr__(self) failed for object at %0x>' % id( self) log('uncaptured python exception, ignoring %s (%s:%s %s)' % (self_repr, t, v, tbinfo)) if passthrough and mcStdin: mcStdin.send(data + "\n")
def handle(self): try: while True: packetlen = int(struct.unpack('!I', self.request.recv(MILTER_LEN_BYTES))[0]) inbuf = [] read = 0 while read < packetlen: partial_data = self.request.recv(packetlen - read) inbuf.append(partial_data) read += len(partial_data) data = "".join(inbuf) logger.debug(' <<< %s', binascii.b2a_qp(data)) try: response = self.__milter_dispatcher.Dispatch(data) if type(response) == list: for r in response: self.__send_response(r) elif response: self.__send_response(response) except ppymilterbase.PpyMilterCloseConnection, e: logger.info('Closing connection ("%s")', str(e)) break except Exception: # use similar error production as asyncore as they already make # good 1 line errors - similar to handle_error in asyncore.py # proper cleanup happens regardless even if we catch this exception (nil, t, v, tbinfo) = asyncore.compact_traceback() logger.error('uncaptured python exception, closing channel %s ' '(%s:%s %s)' % (repr(self), t, v, tbinfo))
def handle(self): try: while True: packetlen = int( struct.unpack('!I', self.request.recv(MILTER_LEN_BYTES))[0]) inbuf = [] read = 0 while read < packetlen: partial_data = self.request.recv(packetlen - read) inbuf.append(partial_data) read += len(partial_data) data = b"".join(inbuf) logger.debug(' <<< %s', binascii.b2a_qp(data)) try: response = self.__milter_dispatcher.Dispatch(data) if type(response) == list: for r in response: self.__send_response(r) elif response: self.__send_response(response) except ppymilterbase.PpyMilterCloseConnection as e: logger.info('Closing connection ("%s")', str(e)) break except Exception: # use similar error production as asyncore as they already make # good 1 line errors - similar to handle_error in asyncore.py # proper cleanup happens regardless even if we catch this exception (nil, t, v, tbinfo) = asyncore.compact_traceback() logger.error('uncaptured python exception, closing channel %s ' '(%s:%s %s)' % (repr(self), t, v, tbinfo))
def found_terminator (self): self.buffer, data = [], string.join (self.buffer, '') if self.pstate is self.STATE_LENGTH: packet_length = string.atoi (data, 16) self.set_terminator (packet_length) self.pstate = self.STATE_PACKET else: self.set_terminator (8) self.pstate = self.STATE_LENGTH (path, params) = marshal.loads (data) o = self.root e = None try: for p in path: o = getattr (o, p) result = apply (o, params) except: e = repr (asyncore.compact_traceback()) result = None rb = marshal.dumps ((e,result)) self.push (('%08x' % len(rb)) + rb)
def migrate(cls, connection): ''' Migrate: 4.2 -> 4.3 ''' try: logging.info('migrate2: fix reordering column bug of v4.2') instance = cls() operations = [] for tbl in ('bittorrent', 'speedtest'): logging.info('migrate2: build operations for %s...', tbl) result = instance.build_operations(connection, tbl, operations) logging.info('migrate2: built operations for %s: %s', tbl, str(result)) cursor = connection.cursor() for operation in operations: cursor.execute(operation[0], operation[1]) cursor.close() except (KeyboardInterrupt, SystemExit): raise except: exc = asyncore.compact_traceback() logging.error('migrate2: cannot recover from reordering column ' 'bug, please contact Neubot developers and report ' 'this problem.') logging.error('migrate2: error details: %s', str(exc)) logging.info('migrate2: from schema version 4.2 to 4.3') connection.execute('''UPDATE config SET value='4.3' WHERE name='version';''') connection.commit()
def __notify_adjust_privacy(): ''' Notify the user she should adjust privacy settings via the web user interface ''' try: pynotify.init('Neubot 0.4.12-rc2') notification = pynotify.Notification( PRIVACY_TITLE, PRIVACY_EXPLANATION, NEUBOT_ICON ) notification.set_urgency(pynotify.URGENCY_CRITICAL) notification.set_timeout(15) notification.show() except: syslog.syslog(syslog.LOG_ERR, '%s' % str(asyncore.compact_traceback())) # # Reraise the exception because each login spawns a new instance # of this script. Old instances will fail because pynotify cannot # connect to the session dbus. So, reraising the exception here # is a cheap and dirty way to enforce the singleton pattern. # raise
def found_terminator(self): self.buffer, data = [], string.join(self.buffer, '') if self.pstate is self.STATE_LENGTH: packet_length = string.atoi(data, 16) self.set_terminator(packet_length) self.pstate = self.STATE_PACKET else: self.set_terminator(8) self.pstate = self.STATE_LENGTH (path, params) = marshal.loads(data) o = self.root e = None try: for p in path: o = getattr(o, p) result = apply(o, params) except: e = repr(asyncore.compact_traceback()) result = None rb = marshal.dumps((e, result)) self.push(('%08x' % len(rb)) + rb)
def run_queue(self): ''' If possible run the first test in queue ''' # Adapted from neubot/rendezvous/client.py if not self.queue: return if self.running: return # # Subscribe BEFORE starting the test, otherwise we # may miss the 'testdone' event if the connection # to the negotiator service fails, and we will stay # stuck forever. # NOTIFIER.subscribe('testdone', self.test_done) # Prevent concurrent tests self.running = True # Safely run first element in queue try: self._do_run_queue() except (SystemExit, KeyboardInterrupt): raise except: exc = asyncore.compact_traceback() error = str(exc) LOG.error('runner_core: catched exception: %s' % error) NOTIFIER.publish('testdone')
def found_terminator(self): # note("--- found_terminator(%s), collector is %s", repr(self), repr(self.collector)) if self.collector: if self.collector.found_terminator(): self.collector = None else: header = self.in_buffer self.in_buffer = "" lines = string.split(header, "\r\n") # -------------------------------------------------- # crack the request header # -------------------------------------------------- # note("--- lines are <%s>", lines) if not lines: self.close_when_done() return request = lines[0].strip() note("c%d: request is <%s>", self.channel_number, request) r = self.parse_request(request) # note("--- r is %s", repr(r)) self.request_counter.increment() self.server.total_requests.increment() if not r.valid(): note("c%d: invalid request %s", self.channel_number, r) self.log_info("Bad IMAP request: %s" % repr(request), "error") r.error("Bad request: %s" % repr(request)) return try: self.current_request = r if self.collector: note( 3, "c%d: waiting for more data (%s) %s...", self.channel_number, self.collector.waiting_for_what(), self.collector, ) return # note("--- about to handle request") self.do_request() note(3, "-------- c%d: handled request", self.channel_number) except: self.server.exceptions.increment() (file, fun, line), t, v, tbinfo = asyncore.compact_traceback() self.log_info( "c%d: server Error: %s, %s: file: %s line: %s" % (self.channel_number, t, v, file, line), "error" ) try: r.error("server exception: %s, %s: file: %s line: %s" % (t, v, file, line)) except: pass
def _run_task(task): """ Safely run task """ try: task.func() except (KeyboardInterrupt, SystemExit): raise except: logging.error(str(asyncore.compact_traceback()))
def handle_error(self): nil, t, v, tbinfo = asyncore.compact_traceback() self.close() if InvalidToken == t or InvalidSignature == t: raise WazuhException( 3010, "Could not decrypt message from {0}".format(self.addr)) else: raise WazuhException(3010, str(v))
def _fireq(self, event, queue): for func, context in queue: try: func(event, context) except (KeyboardInterrupt, SystemExit): raise except: logging.error(str(asyncore.compact_traceback()))
def handle_error(self): logger.exception('Handle response error') error = asyncore.compact_traceback()[2] try: if self.response.context: self.response.context.on_error(error) finally: if self.response.will_close: self.response.close()
def main(args): ''' Monitor Neubot state via command line ''' try: options, arguments = getopt.getopt(args[1:], 'D:v') except getopt.error: sys.exit('Usage: neubot api.client [-v] [-D property=value]') if arguments: sys.exit('Usage: neubot api.client [-v] [-D property=value]') address, port, verbosity = '127.0.0.1', '9774', 0 for name, value in options: if name == '-D': name, value = value.split('=', 1) if name == 'address': address = value elif name == 'port': port = value elif name == '-v': verbosity += 1 timestamp = 0 while True: try: connection = lib_http.HTTPConnection(address, port) connection.set_debuglevel(verbosity) connection.request('GET', '/api/state?t=%d' % timestamp) response = connection.getresponse() if response.status != 200: raise RuntimeError('Bad HTTP status: %d' % response.status) if response.getheader("content-type") != "application/json": raise RuntimeError("Unexpected contenty type") octets = response.read() dictionary = json.loads(octets) logging.info("APIStateTracker: received JSON: %s", json.dumps(dictionary, ensure_ascii=True)) if not "events" in dictionary: continue if not "current" in dictionary: raise RuntimeError("Incomplete dictionary") timestamp = max(0, int(dictionary["t"])) json.dumps(dictionary, sys.stdout) except KeyboardInterrupt: break except: error = asyncore.compact_traceback() logging.error('Exception: %s', str(error)) time.sleep(5)
def close(self, stream): self.unset_readable(stream) self.unset_writable(stream) try: stream.handle_close() except (KeyboardInterrupt, SystemExit): raise except: logging.error(str(asyncore.compact_traceback()))
def handle_error(self): #self.handle_close() exc, why, _traceback = sys.exc_info() self.connected = False if exc == exceptions.KeyboardInterrupt: # FIX: works? error("received keyboard interrupt") if exc == socket.error: if why[0] == errno.ECONNREFUSED: debug("no chaser at %s:%s (connection refused)", self.host, self.port) else: info("network error on connection %s:%s (%s %s)", self.host, self.port, exc, why) debug(" traceback: %s", asyncore.compact_traceback()[3]) else: info("unexpected error on connection %s:%s (%s %s)", self.host, self.port, exc, why) info(" traceback: %s", asyncore.compact_traceback()[3])
def main(): ''' Wrapper for the real main ''' try: serversmain() except (KeyboardInterrupt, SystemExit): raise except: sys.stderr.write('%s\n' % str(asyncore.compact_traceback())) sys.exit(1)
def loop(self): ''' Event loop ''' while self.again: try: self.run() except (KeyboardInterrupt, SystemExit): raise except: logging.error(asyncore.compact_traceback())
def handle_error(self): nil, t, v, tbinfo = asyncore.compact_traceback() try: self_repr = repr(self) except: self_repr = '<__repr__(self) failed for object at %0x>' % id(self) logger.error("{} Error: '{}'.".format(self.tag, v)) logger.debug("{} Error: '{}' - '{}'.".format(self.tag, t, tbinfo))
def handle_error(self): nil, t, v, tbinfo = asyncore.compact_traceback() if t == InvalidToken or t == InvalidSignature: error = "Could not decrypt message from {0}".format(self.addr) else: error = str(v) logging.error("Error handling request: {0}".format(error)) self.data = json.dumps({'error': 1, 'data': error}) self.handle_write()
def _call_handle_write(self, fileno): if self.writeset.has_key(fileno): stream = self.writeset[fileno] try: stream.handle_write() except (KeyboardInterrupt, SystemExit): raise except: logging.error(str(asyncore.compact_traceback())) self.close(stream)
def _call_handle_write(self, fileno): """ Safely dispatch write event """ if fileno in self.writeset: stream = self.writeset[fileno] try: stream.handle_write() except (KeyboardInterrupt, SystemExit): raise except: logging.error(str(asyncore.compact_traceback())) self.close(stream)
def __parse(self, cache_files): parser = self.parser for file_path in cache_files: try: self.get_logger().info('Parsing file %s' % file_path) yield parser.parse(file_path) except Exception, e: _, t, v, tbinfo = compact_traceback() msg = 'error parsing in {0}:, error={1}, traceback is: ' \ '({2}: {3} {4})'.format(file_path, e, t, v, tbinfo) self.get_logger().warn(msg)
def handle_error(self): nil, t, v, tbinfo = asyncore.compact_traceback() try: self_repr = repr(self) except: self_repr = '<__repr__(self) failed for object at %0x>' % id(self) self.handle_close() logger.error("[Transport-Handler] Error: '{}'.".format(v)) logger.debug("[Transport-Handler] Error: '{}' - '{}'.".format(t, tbinfo))
def handle_error(self): nil, t, v, tbinfo = asyncore.compact_traceback() self.close() if InvalidToken == t: raise InvalidToken("Could not decrypt message from {0}".format( self.addr[0])) elif InvalidSignature == t: raise InvalidSignature("Could not decrypt message from {0}".format( self.addr[0])) else: raise t(v)
def reap (self): # find DNS requests that have timed out now = int(time.time()) if now - self.last_reap_time > 180: # reap every 3 minutes self.last_reap_time = now # update before we forget for k,(host,unpack,callback,when) in self.request_map.items(): if now - when > 180: # over 3 minutes old del self.request_map[k] try: # same code as in handle_read callback (host, 0, None) # timeout val is (0,None) except: (file,fun,line), t, v, tbinfo = asyncore.compact_traceback() self.log_info('%s %s %s' % (t,v,tbinfo), 'error')
def main(args): ''' Wrapper for the real main ''' try: __main(args) except (SystemExit, KeyboardInterrupt): raise except: sys.stderr.write('ERROR: unhandled exception: %s\n' % str(asyncore.compact_traceback())) sys.exit(1)
def main(): ''' Wrapper around the real __main() ''' try: __main() except SystemExit: raise except: try: why = asyncore.compact_traceback() syslog.syslog(syslog.LOG_ERR, 'Unhandled exception: %s' % str(why)) except: pass sys.exit(1)
def reap(self): # find DNS requests that have timed out now = int(time.time()) if now - self.last_reap_time > 180: # reap every 3 minutes self.last_reap_time = now # update before we forget for k, (host, unpack, callback, when) in self.request_map.items(): if now - when > 180: # over 3 minutes old del self.request_map[k] try: # same code as in handle_read callback(host, 0, None) # timeout val is (0,None) except: (file, fun, line), t, v, tbinfo = asyncore.compact_traceback() self.log_info('%s %s %s' % (t, v, tbinfo), 'error')
def handle_read (self): self.recv (8192) try: self.lock.acquire() for thunk in self.thunks: try: thunk() except: (file, fun, line), t, v, tbinfo = asyncore.compact_traceback() print 'exception in trigger thunk: (%s:%s %s)' % (t, v, tbinfo) self.thunks = [] finally: self.lock.release()
def _download_and_verify_update(server='releases.neubot.org'): ''' Wrapper around __download_and_verify_update() that catches and handles exceptions. ''' try: return __download_and_verify_update(server) except: why = asyncore.compact_traceback() syslog.syslog(syslog.LOG_ERR, '_download_and_verify_update: %s' % str(why)) return None
def loop(self): """ Poller loop """ while True: try: self.run() except (KeyboardInterrupt, SystemExit): raise except select.error: raise except StopPoller: break except: logging.error(str(asyncore.compact_traceback()))
def handle_error_no_close(self): """See asyncore.dispatcher.handle_error()""" nil, t, v, tbinfo = compact_traceback() # sometimes a user repr method will crash. try: self_repr = repr(self) except: self_repr = '<__repr__(self) failed for object at %0x>' % id(self) self.log_info( 'uncaptured python exception, closing channel %s (%s:%s %s)' % (self_repr, t, v, tbinfo), 'error')
def found_terminator(self): line = self.in_buffer if not len(line): return sp = string.find(line, ' ') if sp != -1: line = [line[:sp], line[sp + 1:]] else: line = [line] command = string.lower(line[0]) # watch especially for 'urgent' abort commands. if string.find(command, 'abor') != -1: # strip off telnet sync chars and the like... while command and command[0] not in string.letters: command = command[1:] fun_name = 'cmd_%s' % command if command != 'pass': self.log('<== %s' % repr(self.in_buffer)[1:-1]) else: self.log('<== %s' % line[0] + ' <password>') self.in_buffer = '' if not hasattr(self, fun_name): self.command_not_understood(line[0]) return fun = getattr(self, fun_name) if (not self.authorized) and (command not in ('user', 'pass', 'help', 'quit')): self.respond('530 Please log in with USER and PASS') elif (not self.check_command_authorization(command)): self.command_not_authorized(command) else: try: result = apply(fun, (line, )) except: self.server.total_exceptions.increment() (file, fun, line), t, v, tbinfo = asyncore.compact_traceback() if self.client_dc: try: self.client_dc.close() except: pass self.respond('451 Server Error: %s, %s: file: %s line: %s' % ( t, v, file, line, ))
def handle_read(self): reply, whence = self.socket.recvfrom(512) # for security reasons we may want to double-check # that <whence> is the server we sent the request to. id = (ord(reply[0]) << 8) + ord(reply[1]) if id in self.request_map: host, unpack, callback, when = self.request_map[id] del self.request_map[id] ttl, answer = unpack(reply) try: callback(host, ttl, answer) except Exception: (file, fun, line), t, v, tbinfo = asyncore.compact_traceback() self.log_info('%s %s %s' % (t, v, tbinfo), 'error')
def handle_read (self): reply, whence = self.socket.recvfrom (512) # for security reasons we may want to double-check # that <whence> is the server we sent the request to. id = (ord(reply[0])<<8) + ord(reply[1]) if self.request_map.has_key (id): host, unpack, callback, when = self.request_map[id] del self.request_map[id] ttl, answer = unpack (reply) try: callback (host, ttl, answer) except: (file,fun,line), t, v, tbinfo = asyncore.compact_traceback() self.log_info('%s %s %s' % ( t,v,tbinfo), 'error')
def handle_error(self): nil, t, v, tbinfo = asyncore.compact_traceback() # sometimes a user repr method will crash. try: self_repr = repr(self) except: self_repr = '<__repr__(self) failed for object at %0x>' % id(self) self.logger.error('CiaoThread - python exception %s (%s:%s %s)' % ( self_repr, t, v, tbinfo )) self.logger.debug("Handle ERROR") return
def handle_read(self): try: self.recv(8192) except (OSError, socket.error): return with self.lock: for thunk in self.thunks: try: thunk() except: nil, t, v, tbinfo = asyncore.compact_traceback() self.log_info('exception in trigger thunk: (%s:%s %s)' % (t, v, tbinfo)) self.thunks = []
def handle_read(self): try: self.recv(8192) except socket.error: return with self.lock: for thunk in self.thunks: try: thunk() except: _nil, t, v, tbinfo = asyncore.compact_traceback() print('exception in trigger thunk:' ' (%s:%s %s)' % (t, v, tbinfo)) self.thunks = []
def test_compact_traceback(self): try: raise Exception("I don't like spam!") except: real_t, real_v, real_tb = sys.exc_info() r = asyncore.compact_traceback() else: self.fail('Expected exception') (f, function, line), t, v, info = r self.assertEqual(os.path.split(f)[-1], 'test_asyncore.py') self.assertEqual(function, 'test_compact_traceback') self.assertEqual(t, real_t) self.assertEqual(v, real_v) self.assertEqual(info, '[%s|%s|%s]' % (f, function, line))
def do_command (self, cmd, data): if self.check_command_name (cmd): try: method = getattr (self, 'cmd_%s' % cmd,) except AttributeError: print 'no support for "%s" command' % (cmd,) else: try: method (data) except: (file, fun, line), t, v, tbinfo = asyncore.compact_traceback() print 'caesure error: %s, %s: file: %s line: %s' % (t, v, file, line) print ' ********** problem processing %r command: packet=%r' % (cmd, data) else: print 'bad command: "%r", ignoring' % (cmd,)
def handle_error(self): nil, t, v, tbinfo = asyncore.compact_traceback() # sometimes a user repr method will crash. try: self_repr = repr(self) except: self_repr = '<__repr__(self) failed for object at %0x>' % id(self) self.logger.error('Uncaptured python exception %s (%s:%s %s)' % (self_repr, t, v, tbinfo)) self.logger.error('Closing channel' % self.name) self.close() #self.close should trigger handle_close but it seems it doesn't (than we call it manually) self.handle_close()
def handle_error (self, i): (file,fun,line), t, v, tbinfo = asyncore.compact_traceback() # Sometimes a user repr method will crash. try: i_repr = repr(i) except: i_repr = '<__repr__(i) failed for object at %0x>' % id(i) print ( 'uncaptured python exception in scheduled item %s (%s:%s %s)' % ( i_repr, t, v, tbinfo ) )
def handle_read(self): self.recv(8192) try: self.lock.acquire() for thunk in self.thunks: try: thunk() except: (file, fun, line), t, v, tbinfo = \ asyncore.compact_traceback() print('exception in trigger thunk: (%s:%s %s)' % (t, v, tbinfo)) self.thunks = [] finally: self.lock.release()
def onecmd(self, line): line = self._preprocess_line(line) result = None try: result = cmd.Cmd.onecmd(self, line) except KeyboardInterrupt: self._output("Interrupt") except Exception as e: (file, fun, line), t, v, tbinfo = compact_traceback() error = 'Error: %s, %s: file: %s line: %s' % (t, v, file, line) self._output(error) self._update_prompt() return result
def handle_error(self): ''' Method for handling uncaptured errors. ''' t, err, tbinfo = asyncore.compact_traceback()[1:] self.onError(Exception("%s:%s\n%s" % (t, err, tbinfo[1:-1].replace("] [", "\n")))) if isinstance(err, socket.error): if connection.closingSocketError(err): pass elif connection.minorSocketError(err): return else: self.onError(connection.ConnectionError( connection.processSocketError(err))) self.disconnect()
def handle_error(self): _, t, v, tbinfo = asyncore.compact_traceback() self_msg = "%s failed for object at %0x" % (self._get_classname(), id(self)) Log.error("Uncaptured python exception, closing channel %s (%s:%s %s)" % (self_msg, t, v, tbinfo)) if self._connecting: # Error when trying to connect # first cleanup by handle_close(), and tells a subclass about this error. # the subclass can then call start_connect() again, if appropriate self.handle_close() self.on_connect(StatusCode.CONNECT_ERROR) else: self.handle_close() self.on_error()
def handle_error(self, *info): # The prototype for handle_error is different for 2.x and 1.5: # 2.x: def handle_error (self) # 1.5: def handle_error (self, *info): # with exception info in info if not info: # python 2.x _, exc_type, exc_value, exc_traceback = asyncore.compact_traceback( ) else: # python 1.5 (exc_type, exc_value, exc_traceback) = info self.err_ = (exc_type, exc_value) prodlib.log("error encountered: %s-%s" % self.err_) # stderr logging! del exc_traceback self.close()
def handle_read(self): data = self.in_buffer + self.recv(8192) pos = data.rfind("\r") if pos == -1: pos = data.rfind("\n") if pos != -1: self.in_buffer = data[pos:] data = data[:pos] data = data.replace("\r\n", "\n").replace("\r", "\n") if len(data) > 0 and data[0] == "\n": data = data[1:] print data if (len(self.login) > 0) and ( #MinecraftForge v4.0.0.247 Initialized #(not self.foundDone and (data.find("MinecraftForge v4.0.0.247 Initialized") != -1)) or #(not self.foundDone and (data.find(" Done ") != -1)) or (not self.foundDone and (data.find(" achievements") != -1)) or (self.foundDone and (data.find(" logged in ") != -1))): while len(self.login) > 0: obj = self.login.pop(0) if not obj.connected and not obj.connecting: log("Attempting ' Done ' or ' logged in ' connect (%d) with %d remaining in queue" % (len(obj.out_buffer), len(self.login))) obj.do_connect() break else: log("Already connecting or connected") self.foundDone = True for s, lf in self.stolf.items(): if data.find(s) != -1: for f in lf: try: nextfunction = f(data) if not nextfunction: break except: nil, t, v, tbinfo = asyncore.compact_traceback() try: self_repr = repr( self ) # sometimes a user repr method will crash. except: self_repr = '<__repr__(self) failed for object at %0x>' % id( self) log('uncaptured python exception, ignoring %s (%s:%s %s)' % (self_repr, t, v, tbinfo))
def handle_read(self): try: self.recv(8192) except socket.error: return self.lock.acquire() try: for thunk in self.thunks: try: thunk() except: nil, t, v, tbinfo = asyncore.compact_traceback() print('exception in trigger thunk:' ' (%s:%s %s)' % (t, v, tbinfo)) self.thunks = [] finally: self.lock.release()
def handle_error(self): _, t, v, tbinfo = asyncore.compact_traceback() self_msg = f"{self._get_classname()} failed for object at {id(self):x}" Log.error( f"Uncaptured python exception, closing channel {self_msg} ({t}:{v} {tbinfo})" ) if self._connecting: # Error when trying to connect # first cleanup by handle_close(), and tells a subclass about this error. # the subclass can then call start_connect() again, if appropriate self._handle_close() self.on_connect(StatusCode.CONNECT_ERROR) else: self._handle_close() self.on_error()
def onecmd(self, line): line = self._preprocess_line(line) result = None try: result = cmd.Cmd.onecmd(self, line) except KeyboardInterrupt: self._output("Interrupt") except Exception: (file, fun, line), t, v, tbinfo = compact_traceback() error = 'Error: %s, %s: file: %s line: %s' % (t, v, file, line) self._output(error) if not line.startswith("quit"): self._output_mpu_status() return result
def handle_error(self): nil, t, v, tbinfo = asyncore.compact_traceback() # sometimes a user repr method will crash. try: self_repr = repr(self) except: self_repr = '<__repr__(self) failed for object at %0x>' % id( self) self.d_error( 'Error: uncaptured python exception, closing channel %s (%s:%s %s)', self_repr, t, v, tbinfo) # self.close() if self.is_connected: self.gate.on_disconnect(self, True) if not self.is_child: self.gate.on_reconnect(self)