Esempio n. 1
0
 def _run(self):
     self.log.info('Starting syncback service',
                   process_num=self.process_number,
                   total_processes=self.total_processes,
                   keys=self.keys)
     while self.keep_running:
         retry_with_logging(self._run_impl, self.log)
Esempio n. 2
0
def test_no_logging_on_greenlet_exit():
    logger = MockLogger()
    failing_function = FailingFunction(GreenletExit)
    with pytest.raises(GreenletExit):
        retry_with_logging(failing_function, logger=logger)
    assert logger.call_count == 0
    assert failing_function.call_count == 1
Esempio n. 3
0
def test_logging_on_critical_error():
    critical = [
        TypeError("Example TypeError"),
        StatementError(message="?", statement="SELECT *", params={},
                       orig=None),
        StatementError(
            message="?",
            statement="SELECT *",
            params={},
            orig=_mysql_exceptions.OperationalError(
                "(_mysql_exceptions.OperationalError) Incorrect string value "
                "'\\xE7\\x(a\\x84\\xE5'"),
        ),
        _mysql_exceptions.OperationalError(
            "(_mysql_exceptions.OperationalError) Incorrect string value "
            "'\\xE7\\x(a\\x84\\xE5'"),
        _mysql_exceptions.IntegrityError(
            "(_mysql_exceptions.IntegrityError) Column not found"),
    ]

    for critical_exc in critical:
        logger = MockLogger()
        failing_function = FailingFunction(critical_exc, max_executions=2)
        retry_with_logging(failing_function, logger=logger)

        assert logger.call_count == 1, "{} should be logged".format(
            critical_exc)
        assert failing_function.call_count == 2
Esempio n. 4
0
    def _run(self):
        # Bind greenlet-local logging context.
        self.log = log.new(account_id=self.account_id,
                           folder=self.folder_name,
                           provider=self.provider_name)
        # eagerly signal the sync status
        self.heartbeat_status.publish()

        try:
            self.update_folder_sync_status(lambda s: s.start_sync())
        except IntegrityError:
            # The state insert failed because the folder ID ForeignKey
            # was no longer valid, ie. the folder for this engine was deleted
            # while we were starting up.
            # Exit the sync and let the monitor sort things out.
            log.info("Folder state loading failed due to IntegrityError",
                     folder_id=self.folder_id,
                     account_id=self.account_id)
            raise MailsyncDone()

        # NOTE: The parent ImapSyncMonitor handler could kill us at any
        # time if it receives a shutdown command. The shutdown command is
        # equivalent to ctrl-c.
        while True:
            retry_with_logging(self._run_impl,
                               account_id=self.account_id,
                               provider=self.provider_name,
                               logger=log)
Esempio n. 5
0
 def _run(self):
     self.log.info('Starting syncback service',
                   process_num=self.process_number,
                   total_processes=self.total_processes,
                   keys=self.keys)
     while self.keep_running:
         retry_with_logging(self._run_impl, self.log)
Esempio n. 6
0
def test_retry_with_logging():
    logger = MockLogger()
    failing_function = FailingFunction(ValueError)
    with pytest.raises(ValueError):
        retry_with_logging(failing_function, logger=logger)
    assert logger.call_count == 3
    assert failing_function.call_count == 3
Esempio n. 7
0
def test_no_logging_on_greenlet_exit():
    logger = MockLogger()
    failing_function = FailingFunction(GreenletExit)
    with pytest.raises(GreenletExit):
        retry_with_logging(failing_function, logger=logger)
    assert logger.call_count == 0
    assert failing_function.call_count == 1
def test_retry_with_logging():
    logger = MockLogger()
    failing_function = FailingFunction(ValueError)
    with pytest.raises(ValueError):
        retry_with_logging(failing_function, logger=logger, max_count=3,
                           backoff_delay=0)
    assert logger.call_count == 4
    assert failing_function.call_count == 3
Esempio n. 9
0
def test_selective_retry():
    logger = MockLogger()
    failing_function = FailingFunction(ValueError)
    with pytest.raises(ValueError):
        retry_with_logging(failing_function, logger=logger,
                           fail_classes=[ValueError])
    assert logger.call_count == 1
    assert failing_function.call_count == 1
Esempio n. 10
0
 def run(self):
     if config.get('DEBUG_PROFILING_ON'):
         # If config flag is set, get live top-level profiling output on
         # stdout by doing kill -SIGTRAP <sync_process>.
         # This slows things down so you probably don't want to do it
         # normally.
         attach_profiler()
     setproctitle('inbox-sync-{}'.format(self.cpu_id))
     retry_with_logging(self._run_impl, self.log)
Esempio n. 11
0
def test_selective_retry():
    logger = MockLogger()
    failing_function = FailingFunction(ValueError)
    with pytest.raises(ValueError):
        retry_with_logging(failing_function,
                           logger=logger,
                           fail_classes=[ValueError])
    assert logger.call_count == 0
    assert failing_function.call_count == 1
Esempio n. 12
0
 def run(self):
     if config.get('DEBUG_PROFILING_ON'):
         # If config flag is set, get live top-level profiling output on
         # stdout by doing kill -SIGTRAP <sync_process>.
         # This slows things down so you probably don't want to do it
         # normally.
         attach_profiler()
     setproctitle('inbox-sync-{}'.format(self.cpu_id))
     retry_with_logging(self._run_impl, self.log)
Esempio n. 13
0
    def run(self):
        if config.get('DEBUG_CONSOLE_ON'):
            # Enable the debugging console if this flag is set. Connect to
            # localhost on the port shown in the logs to get access to a REPL
            port = None
            start_port = config.get('DEBUG_START_PORT')
            if start_port:
                port = start_port + self.cpu_id

            gevent.spawn(break_to_interpreter, port=port)

        setproctitle('inbox-sync-{}'.format(self.cpu_id))
        retry_with_logging(self._run_impl, self.log)
Esempio n. 14
0
    def run(self):
        if config.get('DEBUG_CONSOLE_ON'):
            # Enable the debugging console if this flag is set. Connect to
            # localhost on the port shown in the logs to get access to a REPL
            port = None
            start_port = config.get('DEBUG_START_PORT')
            if start_port:
                port = start_port + self.cpu_id

            gevent.spawn(break_to_interpreter, port=port)

        setproctitle('inbox-sync-{}'.format(self.cpu_id))
        retry_with_logging(self._run_impl, self.log)
Esempio n. 15
0
    def _run(self):
        # Bind greenlet-local logging context.
        self.log = log.new(
            account_id=self.account_id,
            folder=self.folder_name,
            provider=self.provider_name,
        )
        # eagerly signal the sync status
        self.heartbeat_status.publish()

        def start_sync(saved_folder_status):
            # Ensure we don't cause an error if the folder was deleted.
            sync_end_time = (saved_folder_status.folder and
                             saved_folder_status.metrics.get("sync_end_time"))
            if sync_end_time:
                sync_delay = datetime.utcnow() - sync_end_time
                if sync_delay > timedelta(days=1):
                    saved_folder_status.state = "initial"
                    log.info(
                        "switching to initial sync due to delay",
                        folder_id=self.folder_id,
                        account_id=self.account_id,
                        sync_delay=sync_delay.total_seconds(),
                    )

            saved_folder_status.start_sync()

        try:
            self.update_folder_sync_status(start_sync)
        except IntegrityError:
            # The state insert failed because the folder ID ForeignKey
            # was no longer valid, ie. the folder for this engine was deleted
            # while we were starting up.
            # Exit the sync and let the monitor sort things out.
            log.info(
                "Folder state loading failed due to IntegrityError",
                folder_id=self.folder_id,
                account_id=self.account_id,
            )
            raise MailsyncDone()

        # NOTE: The parent ImapSyncMonitor handler could kill us at any
        # time if it receives a shutdown command. The shutdown command is
        # equivalent to ctrl-c.
        while self.state != "finish":
            retry_with_logging(
                self._run_impl,
                account_id=self.account_id,
                provider=self.provider_name,
                logger=log,
            )
Esempio n. 16
0
def test_no_logging_until_many_transient_error():
    logger = MockLogger()

    failing_function = FailingFunction(socket.error, max_executions=2)
    retry_with_logging(failing_function, logger=logger)

    assert logger.call_count == 0
    assert failing_function.call_count == 2

    failing_function = FailingFunction(socket.error, max_executions=21)
    retry_with_logging(failing_function, logger=logger)

    assert logger.call_count == 1
    assert failing_function.call_count == 21
Esempio n. 17
0
def test_no_logging_until_many_transient_error():
    logger = MockLogger()

    failing_function = FailingFunction(socket.error, max_executions=2)
    retry_with_logging(failing_function, logger=logger)

    assert logger.call_count == 0
    assert failing_function.call_count == 2

    failing_function = FailingFunction(socket.error, max_executions=21)
    retry_with_logging(failing_function, logger=logger)

    assert logger.call_count == 1
    assert failing_function.call_count == 21
Esempio n. 18
0
 def _run(self):
     # Bind greenlet-local logging context.
     self.log = self.log.new(account_id=self.account_id)
     try:
         while True:
             retry_with_logging(self._run_impl, account_id=self.account_id,
                                fail_classes=[ValidationError],
                                provider=self.provider_name, logger=self.log)
     except ValidationError:
         # Bad account credentials; exit.
         self.log.error('Credential validation error; exiting',
                        exc_info=True, logstash_tag='mark_invalid')
         with session_scope(self.namespace_id) as db_session:
             account = db_session.query(Account).get(self.account_id)
             account.mark_invalid(scope=self.scope)
Esempio n. 19
0
 def _run(self):
     # Bind greenlet-local logging context.
     self.log = log.new(account_id=self.account_id, folder=self.folder_name,
                        provider=self.provider_name, program='s3_sync')
     # eagerly signal the sync status
     return retry_with_logging(self._run_impl, account_id=self.account_id,
                               provider=self.provider_name, logger=log)
Esempio n. 20
0
 def _run(self):
     # Bind greenlet-local logging context.
     self.log = log.new(account_id=self.account_id, folder=self.folder_name,
                        provider=self.provider_name, program='s3_sync')
     # eagerly signal the sync status
     return retry_with_logging(self._run_impl, account_id=self.account_id,
                               provider=self.provider_name, logger=log)
Esempio n. 21
0
 def _run(self):
     # Bind greenlet-local logging context.
     self.log = log.new(account_id=self.account_id, folder=self.folder_name)
     # eagerly signal the sync status
     self.heartbeat_status.publish()
     return retry_with_logging(self._run_impl, account_id=self.account_id,
                               logger=log)
Esempio n. 22
0
 def _run(self):
     # Bind greenlet-local logging context.
     self.log = log.new(account_id=self.account_id, folder=self.folder_name,
                        provider=self.provider_name)
     # eagerly signal the sync status
     self.heartbeat_status.publish()
     return retry_with_logging(self._run_impl, account_id=self.account_id,
                               provider=self.provider_name, logger=log)
Esempio n. 23
0
def test_no_logging_until_many_transient_error():
    transient = [
        socket.timeout,
        socket.error,
        _mysql_exceptions.OperationalError(
            "(_mysql_exceptions.OperationalError) (1213, 'Deadlock "
            "found when trying to get lock; try restarting transaction')"),
        _mysql_exceptions.OperationalError(
            "(_mysql_exceptions.OperationalError) Lost connection to MySQL "
            "server during query"),
        _mysql_exceptions.OperationalError(
            "(_mysql_exceptions.OperationalError) MySQL server has gone away."
        ),
        _mysql_exceptions.OperationalError(
            "(_mysql_exceptions.OperationalError) Can't connect to MySQL "
            "server on 127.0.0.1"),
        _mysql_exceptions.OperationalError(
            "(_mysql_exceptions.OperationalError) Max connect timeout reached "
            "while reaching hostgroup 71"),
        StatementError(
            message="?",
            statement="SELECT *",
            params={},
            orig=_mysql_exceptions.OperationalError(
                "(_mysql_exceptions.OperationalError) MySQL server has gone away."
            ),
        ),
    ]

    for transient_exc in transient:
        logger = MockLogger()
        failing_function = FailingFunction(transient_exc, max_executions=2)
        retry_with_logging(failing_function, logger=logger)

        assert logger.call_count == 0, "{} should not be logged".format(
            transient_exc)
        assert failing_function.call_count == 2

        failing_function = FailingFunction(socket.error, max_executions=21)
        retry_with_logging(failing_function, logger=logger)

        assert logger.call_count == 1
        assert failing_function.call_count == 21

        failing_function = FailingFunction(socket.error, max_executions=2)
Esempio n. 24
0
 def _run(self):
     try:
         return retry_with_logging(self._run_impl,
                                   account_id=self.account_id,
                                   provider=self.provider_name,
                                   logger=self.log)
     except GreenletExit:
         self._cleanup()
         raise
Esempio n. 25
0
 def _run(self):
     # Bind greenlet-local logging context.
     self.log = self.log.new(account_id=self.account_id)
     try:
         while True:
             retry_with_logging(self._run_impl,
                                account_id=self.account_id,
                                fail_classes=[ValidationError],
                                provider=self.provider_name,
                                logger=self.log)
     except ValidationError:
         # Bad account credentials; exit.
         self.log.error('Credential validation error; exiting',
                        exc_info=True,
                        logstash_tag='mark_invalid')
         with session_scope(self.namespace_id) as db_session:
             account = db_session.query(Account).get(self.account_id)
             account.mark_invalid(scope=self.scope)
Esempio n. 26
0
    def run(self):
        if config.get('DEBUG_PROFILING_ON'):
            # If config flag is set, get live top-level profiling output on
            # stdout by doing kill -SIGTRAP <sync_process>.
            # This slows things down so you probably don't want to do it
            # normally.
            attach_profiler()

        if config.get('DEBUG_CONSOLE_ON'):
            # Enable the debugging console if this flag is set. Connect to
            # localhost on port $PID to get access to a REPL
            port = None
            start_port = config.get('DEBUG_START_PORT')
            if start_port:
                port = start_port + self.cpu_id

            gevent.spawn(break_to_interpreter, port=port)

        setproctitle('inbox-sync-{}'.format(self.cpu_id))
        retry_with_logging(self._run_impl, self.log)
Esempio n. 27
0
    def run(self):
        if config.get('DEBUG_PROFILING_ON'):
            # If config flag is set, get live top-level profiling output on
            # stdout by doing kill -SIGTRAP <sync_process>.
            # This slows things down so you probably don't want to do it
            # normally.
            attach_profiler()

        if config.get('DEBUG_CONSOLE_ON'):
            # Enable the debugging console if this flag is set. Connect to
            # localhost on the port shown in the logs to get access to a REPL
            port = None
            start_port = config.get('DEBUG_START_PORT')
            if start_port:
                port = start_port + self.cpu_id

            gevent.spawn(break_to_interpreter, port=port)

        setproctitle('inbox-sync-{}'.format(self.cpu_id))
        retry_with_logging(self._run_impl, self.log)
Esempio n. 28
0
def test_no_logging_until_many_transient_error():
    transient = [
        socket.timeout,
        socket.error,
        _mysql_exceptions.OperationalError(
            "(_mysql_exceptions.OperationalError) (1213, 'Deadlock "
            "found when trying to get lock; try restarting transaction')"),
        _mysql_exceptions.OperationalError(
            "(_mysql_exceptions.OperationalError) Lost connection to MySQL "
            "server during query"),
        _mysql_exceptions.OperationalError(
            "(_mysql_exceptions.OperationalError) MySQL server has gone away."),
        _mysql_exceptions.OperationalError(
            "(_mysql_exceptions.OperationalError) Can't connect to MySQL "
            "server on 127.0.0.1"),
        _mysql_exceptions.OperationalError(
            "(_mysql_exceptions.OperationalError) Max connect timeout reached "
            "while reaching hostgroup 71"),
        StatementError(
            message="?", statement="SELECT *", params={},
            orig=_mysql_exceptions.OperationalError(
                "(_mysql_exceptions.OperationalError) MySQL server has gone away.")),
    ]

    for transient_exc in transient:
        logger = MockLogger()
        failing_function = FailingFunction(transient_exc, max_executions=2)
        retry_with_logging(failing_function, logger=logger)

        assert logger.call_count == 0, '{} should not be logged'.format(transient_exc)
        assert failing_function.call_count == 2

        failing_function = FailingFunction(socket.error, max_executions=21)
        retry_with_logging(failing_function, logger=logger)

        assert logger.call_count == 1
        assert failing_function.call_count == 21

        failing_function = FailingFunction(socket.error, max_executions=2)
Esempio n. 29
0
    def _run(self):
        # Bind greenlet-local logging context.
        self.log = log.new(account_id=self.account_id, folder=self.folder_name,
                           provider=self.provider_name)
        # eagerly signal the sync status
        self.heartbeat_status.publish()

        try:
            self.update_folder_sync_status(lambda s: s.start_sync())
        except IntegrityError:
            # The state insert failed because the folder ID ForeignKey
            # was no longer valid, ie. the folder for this engine was deleted
            # while we were starting up.
            # Exit the sync and let the monitor sort things out.
            log.info("Folder state loading failed due to IntegrityError",
                     folder_id=self.folder_id, account_id=self.account_id)
            raise MailsyncDone()

        # NOTE: The parent ImapSyncMonitor handler could kill us at any
        # time if it receives a shutdown command. The shutdown command is
        # equivalent to ctrl-c.
        while True:
            retry_with_logging(self._run_impl, account_id=self.account_id,
                               provider=self.provider_name, logger=log)
Esempio n. 30
0
def test_logging_on_critical_error():
    critical = [
        TypeError("Example TypeError"),
        StatementError(
            message="?", statement="SELECT *", params={}, orig=None),
        StatementError(
            message="?", statement="SELECT *", params={},
            orig=_mysql_exceptions.OperationalError(
                "(_mysql_exceptions.OperationalError) Incorrect string value "
                "'\\xE7\\x(a\\x84\\xE5'")),
        _mysql_exceptions.OperationalError(
            "(_mysql_exceptions.OperationalError) Incorrect string value "
            "'\\xE7\\x(a\\x84\\xE5'"),
        _mysql_exceptions.IntegrityError(
            "(_mysql_exceptions.IntegrityError) Column not found"),
    ]

    for critical_exc in critical:
        logger = MockLogger()
        failing_function = FailingFunction(critical_exc, max_executions=2)
        retry_with_logging(failing_function, logger=logger)

        assert logger.call_count == 1, '{} should be logged'.format(critical_exc)
        assert failing_function.call_count == 2
Esempio n. 31
0
 def _run(self):
     retry_with_logging(self._run_impl, self.log)
Esempio n. 32
0
 def _run(self):
     return retry_with_logging(self._run_impl, self.log)
Esempio n. 33
0
def test_retry_with_logging():
    logger = MockLogger()
    failing_function = FailingFunction(ValueError)
    retry_with_logging(failing_function, logger=logger, backoff_delay=0)
    assert logger.call_count == failing_function.max_executions - 1
    assert failing_function.call_count == failing_function.max_executions
Esempio n. 34
0
 def _run(self):
     while True:
         retry_with_logging(self._run_impl, account_id=self.account_id,
                            provider=self.provider_name)
Esempio n. 35
0
 def run(self):
     log.info('Queueing accounts', zone=self.zone, shards=self.shards)
     while True:
         retry_with_logging(self._run_impl)
Esempio n. 36
0
 def run(self):
     return retry_with_logging(self._run_impl)
Esempio n. 37
0
 def _run(self):
     while True:
         retry_with_logging(self._run_impl)
Esempio n. 38
0
 def _run(self):
     # Bind greenlet-local logging context.
     self.log = self.log.new(account_id=self.account_id)
     return retry_with_logging(self._run_impl, account_id=self.account_id,
                               logger=self.log)
Esempio n. 39
0
 def _run(self):
     gevent.spawn(retry_with_logging, self.retry_failed, self.log)
     retry_with_logging(self._run_impl, self.log)
Esempio n. 40
0
def test_retry_with_logging():
    logger = MockLogger()
    failing_function = FailingFunction(ValueError)
    retry_with_logging(failing_function, logger=logger, backoff_delay=0)
    assert logger.call_count == failing_function.max_executions - 1
    assert failing_function.call_count == failing_function.max_executions
Esempio n. 41
0
 def _monitoring_thread(self):
     # Logger needs to be instantiated in new thread.
     self.log = get_logger()
     while True:
         retry_with_logging(self._run_impl, self.log)
Esempio n. 42
0
 def run(self):
     return retry_with_logging(self._run_impl)
Esempio n. 43
0
 def _run(self):
     gevent.spawn(retry_with_logging, self.retry_failed, self.log)
     retry_with_logging(self._run_impl, self.log)
Esempio n. 44
0
 def _run(self):
     return retry_with_logging(self._run_impl, account_id=self.account_id,
                               provider=self.provider_name, logger=self.log)
Esempio n. 45
0
 def _monitoring_thread(self):
     # Logger needs to be instantiated in new thread.
     self.log = get_logger()
     while True:
         retry_with_logging(self._run_impl, self.log)
Esempio n. 46
0
 def run(self):
     while self.keep_running:
         retry_with_logging(self._run_impl, self.log)
Esempio n. 47
0
 def run(self):
     log.info("Queueing accounts", zone=self.zone, shards=self.shards)
     while True:
         retry_with_logging(self._run_impl)
Esempio n. 48
0
 def _run(self):
     return retry_with_logging(self._run_impl, account_id=self.account_id)
Esempio n. 49
0
 def _run(self):
     return retry_with_logging(self._run_impl, account_id=self.account_id)
Esempio n. 50
0
 def _run(self):
     while True:
         retry_with_logging(self._run_impl,
                            account_id=self.account_id,
                            provider=self.provider_name)
Esempio n. 51
0
 def _run(self):
     # Bind greenlet-local logging context.
     self.log = self.log.new(account_id=self.account_id)
     return retry_with_logging(self._run_impl,
                               account_id=self.account_id,
                               logger=self.log)
Esempio n. 52
0
 def run(self):
     while True:
         retry_with_logging(self._run_impl, self.log)