def _run(self): # Not ignoring soft-deleted objects here because if you, say, delete a # draft, we still need to access the object to delete it on the remote. with session_scope(ignore_soft_deletes=False) as db_session: try: if self.extra_args: self.func(self.account_id, self.record_id, db_session, self.extra_args) else: self.func(self.account_id, self.record_id, db_session) except Exception: log_uncaught_errors(self.log) # Wait for a bit, then remove the log id from the scheduled set # so that it can be retried. gevent.sleep(self.retry_interval) self.syncback_service.remove_from_schedule(self.action_log_id) raise else: action_log_entry = db_session.query(ActionLog).get( self.action_log_id) action_log_entry.executed = True db_session.commit() self.log.info('syncback action completed', action_id=self.action_log_id) self.syncback_service.remove_from_schedule(self.action_log_id)
def test_configurable_retry_count_resets(monkeypatch): logger = MockLogger() default_failing_function = FailingFunction( ValueError, max_executions=3, delay=.1) exc_callback = lambda: log_uncaught_errors(logger) retry(default_failing_function, exc_callback=exc_callback, max_count=3, backoff_delay=0)() assert logger.call_count == 2 assert default_failing_function.call_count == 3 logger = MockLogger() failing_function = FailingFunction(ValueError, max_executions=5, delay=.1) exc_callback = lambda: log_uncaught_errors(logger) retry(failing_function, exc_callback=exc_callback, max_count=5, backoff_delay=0)() assert logger.call_count == 4 assert failing_function.call_count == 5
def test_configurable_retry_count_resets(monkeypatch): logger = MockLogger() default_failing_function = FailingFunction(ValueError, max_executions=3, delay=.1) exc_callback = lambda: log_uncaught_errors(logger) retry(default_failing_function, exc_callback=exc_callback, max_count=3, backoff_delay=0)() assert logger.call_count == 2 assert default_failing_function.call_count == 3 logger = MockLogger() failing_function = FailingFunction(ValueError, max_executions=5, delay=.1) exc_callback = lambda: log_uncaught_errors(logger) retry(failing_function, exc_callback=exc_callback, max_count=5, backoff_delay=0)() assert logger.call_count == 4 assert failing_function.call_count == 5
def _run(self): # Not ignoring soft-deleted objects here because if you, say, delete a # draft, we still need to access the object to delete it on the remote. with session_scope(ignore_soft_deletes=False) as db_session: try: if self.extra_args: self.func(self.account_id, self.record_id, db_session, self.extra_args) else: self.func(self.account_id, self.record_id, db_session) except Exception: log_uncaught_errors(self.log) # Wait for a bit, then remove the log id from the scheduled set # so that it can be retried. gevent.sleep(self.retry_interval) self.syncback_service.remove_from_schedule(self.action_log_id) raise else: action_log_entry = db_session.query(ActionLog).get( self.action_log_id) action_log_entry.executed = True db_session.commit() self.log.info('syncback action completed', action_id=self.action_log_id) self.syncback_service.remove_from_schedule(self.action_log_id)
def test_log_uncaught_errors(config, log): try: error_throwing_function() except: log_uncaught_errors() log_contents = open(os.path.join(config['LOGDIR'], 'server.log'), 'r').read() assert 'ValueError' in log_contents assert 'GreenletExit' not in log_contents # Check that the traceback is logged. The traceback stored in # sys.exc_info() contains an extra entry for the test_log_uncaught_errors # frame, so just look for the rest of the traceback. tb = sys.exc_info()[2] for call in traceback.format_tb(tb)[1:]: assert call in log_contents
def retry_and_report_killed(func, account_id, folder_name=None, logger=None, retry_classes=None, fail_classes=None): exc_callback = lambda: log_uncaught_errors(logger) fail_callback = lambda: report_killed(account_id, folder_name) return retry(func, exc_callback=exc_callback, fail_callback=fail_callback, retry_classes=retry_classes, fail_classes=fail_classes)()
def retry_and_report_killed(func, account_id, folder_name=None, logger=None, retry_classes=None, fail_classes=None): exc_callback = lambda: log_uncaught_errors(logger=logger, account_id=account_id) fail_callback = lambda: report_killed(account_id, folder_name) return retry(func, exc_callback=exc_callback, fail_callback=fail_callback, retry_classes=retry_classes, fail_classes=fail_classes)()
def _run(self): # Not ignoring soft-deleted objects here because if you, say, delete a # draft, we still need to access the object to delete it on the remote. with session_scope(ignore_soft_deletes=False) as db_session: try: self.func(self.account_id, self.record_id, db_session) except Exception: log_uncaught_errors(self.log) # Wait for a bit before rescheduling. gevent.sleep(self.retry_interval) else: action_log_entry = db_session.query(ActionLog).get( self.action_log_id) action_log_entry.executed = True db_session.commit() self.log.info('syncback action completed', action_id=self.action_log_id) self.syncback_service.mark_for_rescheduling(self.action_log_id)
def _run(self): # Not ignoring soft-deleted objects here because if you, say, delete a # draft, we still need to access the object to delete it on the remote. with session_scope(ignore_soft_deletes=False) as db_session: try: self.func(self.account_id, self.record_id, db_session) except Exception: log_uncaught_errors(self.log) # Wait for a bit before rescheduling. gevent.sleep(self.retry_interval) else: action_log_entry = db_session.query(ActionLog).get( self.action_log_id) action_log_entry.executed = True db_session.commit() self.log.info('syncback action completed', action_id=self.action_log_id) self.syncback_service.mark_for_rescheduling(self.action_log_id)
def test_log_uncaught_errors(config, log): try: error_throwing_function() except: log_uncaught_errors() with open(config.get_required('TEST_LOGFILE'), 'r') as f: last_log_entry = json.loads(f.readlines()[-1]) assert 'exception' in last_log_entry exc_info = last_log_entry['exception'] assert 'ValueError' in exc_info assert 'GreenletExit' not in exc_info # Check that the traceback is logged. The traceback stored in # sys.exc_info() contains an extra entry for the test_log_uncaught_errors # frame, so just look for the rest of the traceback. tb = sys.exc_info()[2] for call in traceback.format_tb(tb)[1:]: assert call in exc_info
def test_log_uncaught_errors(config, log): try: error_throwing_function() except: log_uncaught_errors() with open(config.get_required('TEST_LOGFILE'), 'r') as f: last_log_entry = json.loads(f.readlines()[-1]) assert 'exception' in last_log_entry exc_info = last_log_entry['exception'] assert 'ValueError' in exc_info assert 'GreenletExit' not in exc_info # Check that the traceback is logged. The traceback stored in # sys.exc_info() contains an extra entry for the test_log_uncaught_errors # frame, so just look for the rest of the traceback. tb = sys.exc_info()[2] for call in traceback.format_tb(tb)[1:]: assert call in exc_info
def retry_with_logging(func, logger=None, retry_classes=None, fail_classes=None, account_id=None, **reset_params): callback = lambda: log_uncaught_errors(logger, account_id) return retry(func, exc_callback=callback, retry_classes=retry_classes, fail_classes=fail_classes, **reset_params)()
def retry_and_report_killed(func, account_id, folder_name=None, logger=None, retry_classes=None, fail_classes=None, exc_callback=None, fail_callback=None, **reset_params): if not exc_callback: exc_callback = lambda: log_uncaught_errors(logger=logger, account_id=account_id) if not fail_callback: fail_callback = lambda: report_killed(account_id, folder_name) return retry(func, exc_callback=exc_callback, fail_callback=fail_callback, retry_classes=retry_classes, fail_classes=fail_classes, **reset_params)()
def test_retry_count_resets(monkeypatch): monkeypatch.setattr('inbox.util.concurrency.resettable_counter', lambda: resettable_counter(reset_interval=0)) logger = MockLogger() failing_function = FailingFunction(ValueError, max_executions=6, delay=.1) exc_callback = lambda: log_uncaught_errors(logger) retry(failing_function, exc_callback=exc_callback)() assert logger.call_count == 5 assert failing_function.call_count == 6
def test_retry_count_resets(monkeypatch): monkeypatch.setattr( 'inbox.util.concurrency._resettable_counter', lambda x, y: _resettable_counter(max_count=3, reset_interval=0)) logger = MockLogger() failing_function = FailingFunction(ValueError, max_executions=6, delay=.001) exc_callback = lambda: log_uncaught_errors(logger) retry(failing_function, exc_callback=exc_callback, max_count=3, backoff_delay=0)() assert logger.call_count == 5 assert failing_function.call_count == 6
def retry_with_logging(func, logger=None): callback = lambda: log_uncaught_errors(logger) return retry(func, exc_callback=callback)()
def retry_with_logging(func, logger=None, retry_classes=None, fail_classes=None): callback = lambda: log_uncaught_errors(logger) return retry(func, exc_callback=callback, retry_classes=retry_classes, fail_classes=fail_classes)()
def wrapped(*args, **kwargs): try: return func(*args, **kwargs) except exception_type: log_uncaught_errors() pdb.post_mortem()
def retry_with_logging(func, logger=None, retry_classes=None, fail_classes=None): callback = lambda: log_uncaught_errors(logger) return retry(func, exc_callback=callback, retry_classes=retry_classes, fail_classes=fail_classes)()
def retry_with_logging(func, logger=None, retry_classes=None, fail_classes=None, account_id=None, **reset_params): callback = lambda: log_uncaught_errors(logger, account_id) return retry(func, exc_callback=callback, fail_callback=callback, retry_classes=retry_classes, fail_classes=fail_classes, **reset_params)()
def wrapped(*args, **kwargs): try: return func(*args, **kwargs) except exception_type: log_uncaught_errors() pdb.post_mortem()
def retry_and_report_killed(func, logger, account_id, folder_name=None): exc_callback = lambda: log_uncaught_errors(logger) fail_callback = lambda: report_exit('killed', account_id, folder_name) return retry(func, exc_callback=exc_callback, fail_callback=fail_callback)()