def test_log_uncaught_errors(config, log): with pytest.raises(ValueError): def error_throwing_function(): raise ValueError log_uncaught_errors(error_throwing_function, log)() log_contents = open(os.path.join(config['LOGDIR'], 'server.log'), 'r').read() assert 'ValueError' in log_contents # Check that the traceback is logged. The traceback stored in # sys.exc_info() contains an extra entry for the test_log_uncaught_errors # frame, so just look for the rest of the traceback. tb = sys.exc_info()[2] for call in traceback.format_tb(tb)[1:]: assert call in log_contents
def test_log_uncaught_errors(config, log): with pytest.raises(ValueError): log_uncaught_errors(error_throwing_function)() log_uncaught_errors(simulate_greenlet_exit) log_contents = open(os.path.join(config['LOGDIR'], 'server.log'), 'r').read() assert 'ValueError' in log_contents assert 'GreenletExit' not in log_contents # Check that the traceback is logged. The traceback stored in # sys.exc_info() contains an extra entry for the test_log_uncaught_errors # frame, so just look for the rest of the traceback. tb = sys.exc_info()[2] for call in traceback.format_tb(tb)[1:]: assert call in log_contents
def make_zerorpc(cls, location): assert location, "Location to bind for %s cannot be none!" % cls def m(): """ Exposes `cls` as a ZeroRPC server on the given address+port. """ s = zerorpc.Server(cls()) s.bind(location) log.info("ZeroRPC: Starting %s at %s" % (cls.__name__, location)) s.run() # By default, when an uncaught error is thrown inside a greenlet, gevent # will print the stacktrace to stderr and kill the greenlet. Here we're # wrapping m in order to also log uncaught errors to disk. return gevent.Greenlet.spawn(log_uncaught_errors(m))
def commit_uids(db_session, log, new_uids): new_messages = [item.message for item in new_uids] # Save message part blobs before committing changes to db. for msg in new_messages: threads = [ Greenlet.spawn(log_uncaught_errors(part.save, log), part._data) for part in msg.parts if hasattr(part, '_data') ] # Fatally abort if part saves error out. Messages in this # chunk will be retried when the sync is restarted. gevent_check_join(log, threads, "Could not save message parts to blob store!") # clear data to save memory for part in msg.parts: part._data = None garbage_collect() try: log.info("Committing {0} UIDs".format(len(new_uids))) db_session.add_all(new_uids) db_session.commit() except DataError as e: db_session.rollback() log.error("Issue inserting new UIDs into database. " "This probably means that an object's property is " "malformed or way too long, etc.") for uid in new_uids: log.error(uid) import inspect from pprint import pformat log.error(inspect.getmembers(uid)) try: log.error(pformat(uid.__dict__, indent=2)) except AttributeError: pass for part in uid.message.parts: log.error(inspect.getmembers(part)) try: log.error(pformat(part.__dict__, indent=2)) except AttributeError: pass raise e
def commit_uids(db_session, log, new_uids): new_messages = [item.message for item in new_uids] # Save message part blobs before committing changes to db. for msg in new_messages: threads = [Greenlet.spawn(log_uncaught_errors(part.save, log), part._data) for part in msg.parts if hasattr(part, '_data')] # Fatally abort if part saves error out. Messages in this # chunk will be retried when the sync is restarted. gevent_check_join(log, threads, "Could not save message parts to blob store!") # clear data to save memory for part in msg.parts: part._data = None garbage_collect() try: log.info("Committing {0} UIDs".format(len(new_uids))) db_session.add_all(new_uids) db_session.commit() except DataError as e: db_session.rollback() log.error("Issue inserting new UIDs into database. " "This probably means that an object's property is " "malformed or way too long, etc.") for uid in new_uids: log.error(uid) import inspect from pprint import pformat log.error(inspect.getmembers(uid)) try: log.error(pformat(uid.__dict__, indent=2)) except AttributeError: pass for part in uid.message.parts: log.error(inspect.getmembers(part)) try: log.error(pformat(part.__dict__, indent=2)) except AttributeError: pass raise e
def _run_impl(self): sync = Greenlet.spawn(log_uncaught_errors(self.sync, self.log)) while not sync.ready(): try: cmd = self.inbox.get_nowait() if not self.process_command(cmd): self.log.info("Stopping sync for {0}".format( self.email_address)) # ctrl-c, basically! for monitor in self.folder_monitors: monitor.kill(block=True) sync.kill(block=True) return except Empty: sleep(self.heartbeat) assert not sync.successful(), "mail sync should run forever!" raise sync.exception
def _run_impl(self): sync = Greenlet.spawn(log_uncaught_errors(self.sync, self.log)) while not sync.ready(): try: cmd = self.inbox.get_nowait() if not self.process_command(cmd): # ctrl-c, basically! self.log.info("Stopping sync for {0}".format( self.email_address)) # make sure the parent can't start/stop any folder monitors # first sync.kill(block=True) killall(self.folder_monitors) return except Empty: sleep(self.heartbeat) assert not sync.successful(), \ "mail sync for {} account {} should run forever!"\ .format(self.provider, self.account_id) raise sync.exception
def _run(self): gevent.spawn(log_uncaught_errors(self.retry_failed, self.log)) log_uncaught_errors(self._run_impl, self.log)()
def _run(self): self.log.info('Running the webhook service') log_uncaught_errors(self._run_impl, self.log)()
def fork_and_perform_job(self, job): """Spawns a gevent greenlet to perform the actual work. """ self.gevent_pool.spawn(log_uncaught_errors(self.perform_job, self.log), job)
def _run(self): return log_uncaught_errors(self._run_impl, self.log)()