Ejemplo n.º 1
0
def syncback_worker(semaphore, action, action_log_id, record_id, account_id,
                    syncback_service, retry_interval=30, extra_args=None):
    func = ACTION_FUNCTION_MAP[action]

    with semaphore:
        log = logger.new(record_id=record_id, action_log_id=action_log_id,
                         action=func, account_id=account_id,
                         extra_args=extra_args)
        # Not ignoring soft-deleted objects here because if you, say,
        # delete a draft, we still need to access the object to delete it
        # on the remote.
        try:
            with session_scope(ignore_soft_deletes=False) as db_session:
                if extra_args:
                    func(account_id, record_id, db_session, extra_args)
                else:
                    func(account_id, record_id, db_session)
                action_log_entry = db_session.query(ActionLog).get(
                    action_log_id)
                action_log_entry.status = 'successful'
                db_session.commit()
                latency = round((datetime.utcnow() -
                                 action_log_entry.created_at).
                                total_seconds(), 2)
                log.info('syncback action completed',
                         action_id=action_log_id,
                         latency=latency)
                syncback_service.remove_from_schedule(action_log_id)
        except Exception as e:
            # To reduce error-reporting noise, don't ship to Sentry
            # if not actionable.
            if isinstance(e, ProviderSpecificException):
                log.warning('Uncaught error', exc_info=True)
            else:
                log_uncaught_errors(log, account_id=account_id)

            with session_scope() as db_session:
                action_log_entry = db_session.query(ActionLog).get(
                    action_log_id)
                action_log_entry.retries += 1

                if action_log_entry.retries == ACTION_MAX_NR_OF_RETRIES:
                    log.critical('Max retries reached, giving up.',
                                 action_id=action_log_id,
                                 account_id=account_id, exc_info=True)
                    action_log_entry.status = 'failed'
                db_session.commit()

            # Wait for a bit before retrying
            gevent.sleep(retry_interval)

            # Remove the entry from the scheduled set so that it can be
            # retried or given up on.
            syncback_service.remove_from_schedule(action_log_id)

            # Again, don't raise on exceptions that require
            # provider-specific handling e.g. EAS
            if not isinstance(e, ProviderSpecificException):
                raise
Ejemplo n.º 2
0
def syncback_worker(semaphore,
                    func,
                    action_log_id,
                    record_id,
                    account_id,
                    syncback_service,
                    retry_interval=30,
                    extra_args=None):
    with semaphore:
        log = logger.new(record_id=record_id,
                         action_log_id=action_log_id,
                         action=func,
                         account_id=account_id,
                         extra_args=extra_args)
        # Not ignoring soft-deleted objects here because if you, say,
        # delete a draft, we still need to access the object to delete it
        # on the remote.
        try:
            with session_scope(ignore_soft_deletes=False) as db_session:
                if extra_args:
                    func(account_id, record_id, db_session, extra_args)
                else:
                    func(account_id, record_id, db_session)
                action_log_entry = db_session.query(ActionLog).get(
                    action_log_id)
                action_log_entry.executed = True
                db_session.commit()
                log.info('syncback action completed', action_id=action_log_id)
                syncback_service.remove_from_schedule(action_log_id)
        except Exception as e:
            # To reduce error-reporting noise, don't ship to Sentry
            # if not actionable.
            if isinstance(e, ProviderSpecificException):
                log.warning('Uncaught error', exc_info=True)
            else:
                log_uncaught_errors(log)

            # Wait for a bit, then remove the log id from the scheduled set
            # so that it can be retried.
            gevent.sleep(retry_interval)
            syncback_service.remove_from_schedule(action_log_id)

            # Again, don't raise on exceptions that require
            # provider-specific handling e.g. EAS
            if not isinstance(e, ProviderSpecificException):
                raise
Ejemplo n.º 3
0
    def _run(self):
        with self.semaphore:
            log = logger.new(record_id=self.record_id,
                             action_log_id=self.action_log_id,
                             action=self.action_name,
                             account_id=self.account_id,
                             extra_args=self.extra_args)

            for _ in range(ACTION_MAX_NR_OF_RETRIES):
                with session_scope() as db_session:
                    try:
                        action_log_entry = db_session.query(ActionLog).get(
                            self.action_log_id)
                        if self.extra_args:
                            self.func(self.account_id, self.record_id,
                                      db_session, self.extra_args)
                        else:
                            self.func(self.account_id, self.record_id,
                                      db_session)
                        action_log_entry.status = 'successful'
                        db_session.commit()
                        latency = round(
                            (datetime.utcnow() -
                             action_log_entry.created_at).total_seconds(), 2)
                        log.info('syncback action completed',
                                 action_id=self.action_log_id,
                                 latency=latency)
                        self._log_to_statsd(action_log_entry.status, latency)
                        return

                    except Exception:
                        log_uncaught_errors(log, account_id=self.account_id)
                        with session_scope() as db_session:
                            action_log_entry.retries += 1
                            if (action_log_entry.retries ==
                                    ACTION_MAX_NR_OF_RETRIES):
                                log.critical('Max retries reached, giving up.',
                                             exc_info=True)
                                action_log_entry.status = 'failed'
                                self._log_to_statsd(action_log_entry.status)
                            db_session.commit()

                # Wait before retrying
                gevent.sleep(self.retry_interval)
Ejemplo n.º 4
0
    def _run(self):
        with self.semaphore:
            log = logger.new(
                record_id=self.record_id, action_log_id=self.action_log_id,
                action=self.action_name, account_id=self.account_id,
                extra_args=self.extra_args)

            for _ in range(ACTION_MAX_NR_OF_RETRIES):
                with session_scope() as db_session:
                    try:
                        action_log_entry = db_session.query(ActionLog).get(
                            self.action_log_id)
                        if self.extra_args:
                            self.func(self.account_id, self.record_id,
                                      db_session, self.extra_args)
                        else:
                            self.func(self.account_id, self.record_id,
                                      db_session)
                        action_log_entry.status = 'successful'
                        db_session.commit()
                        latency = round((datetime.utcnow() -
                                         action_log_entry.created_at).
                                        total_seconds(), 2)
                        log.info('syncback action completed',
                                 action_id=self.action_log_id,
                                 latency=latency)
                        self._log_to_statsd(action_log_entry.status, latency)
                        return

                    except Exception:
                        log_uncaught_errors(log, account_id=self.account_id)
                        with session_scope() as db_session:
                            action_log_entry.retries += 1
                            if (action_log_entry.retries ==
                                    ACTION_MAX_NR_OF_RETRIES):
                                log.critical('Max retries reached, giving up.',
                                             exc_info=True)
                                action_log_entry.status = 'failed'
                                self._log_to_statsd(action_log_entry.status)
                            db_session.commit()

                # Wait before retrying
                gevent.sleep(self.retry_interval)
Ejemplo n.º 5
0
def syncback_worker(semaphore, func, action_log_id, record_id, account_id,
                    syncback_service, retry_interval=30, extra_args=None):
        with semaphore:
            log = logger.new(record_id=record_id, action_log_id=action_log_id,
                             action=func, account_id=account_id,
                             extra_args=extra_args)
            # Not ignoring soft-deleted objects here because if you, say,
            # delete a draft, we still need to access the object to delete it
            # on the remote.
            try:
                with session_scope(ignore_soft_deletes=False) as db_session:
                    if extra_args:
                        func(account_id, record_id, db_session, extra_args)
                    else:
                        func(account_id, record_id, db_session)
                    action_log_entry = db_session.query(ActionLog).get(
                        action_log_id)
                    action_log_entry.executed = True
                    db_session.commit()
                    log.info('syncback action completed',
                             action_id=action_log_id)
                    syncback_service.remove_from_schedule(action_log_id)
            except Exception as e:
                # To reduce error-reporting noise, don't ship to Sentry
                # if not actionable.
                if isinstance(e, ProviderSpecificException):
                    log.warning('Uncaught error', exc_info=True)
                else:
                    log_uncaught_errors(log)

                # Wait for a bit, then remove the log id from the scheduled set
                # so that it can be retried.
                gevent.sleep(retry_interval)
                syncback_service.remove_from_schedule(action_log_id)

                # Again, don't raise on exceptions that require
                # provider-specific handling e.g. EAS
                if not isinstance(e, ProviderSpecificException):
                    raise
Ejemplo n.º 6
0
def syncback_worker(semaphore,
                    action,
                    action_log_id,
                    record_id,
                    account_id,
                    syncback_service,
                    retry_interval=30,
                    extra_args=None):
    func = ACTION_FUNCTION_MAP[action]

    with semaphore:
        log = logger.new(record_id=record_id,
                         action_log_id=action_log_id,
                         action=func,
                         account_id=account_id,
                         extra_args=extra_args)
        # Not ignoring soft-deleted objects here because if you, say,
        # delete a draft, we still need to access the object to delete it
        # on the remote.
        try:
            with session_scope(ignore_soft_deletes=False) as db_session:
                if extra_args:
                    func(account_id, record_id, db_session, extra_args)
                else:
                    func(account_id, record_id, db_session)
                action_log_entry = db_session.query(ActionLog).get(
                    action_log_id)
                action_log_entry.status = 'successful'
                db_session.commit()
                latency = round((datetime.utcnow() -
                                 action_log_entry.created_at).total_seconds(),
                                2)
                log.info('syncback action completed',
                         action_id=action_log_id,
                         latency=latency)
                syncback_service.remove_from_schedule(action_log_id)
        except Exception as e:
            # To reduce error-reporting noise, don't ship to Sentry
            # if not actionable.
            if isinstance(e, ProviderSpecificException):
                log.warning('Uncaught error', exc_info=True)
            else:
                log_uncaught_errors(log, account_id=account_id)

            with session_scope() as db_session:
                action_log_entry = db_session.query(ActionLog).get(
                    action_log_id)
                action_log_entry.retries += 1

                if action_log_entry.retries == ACTION_MAX_NR_OF_RETRIES:
                    log.critical('Max retries reached, giving up.',
                                 action_id=action_log_id,
                                 account_id=account_id,
                                 exc_info=True)
                    action_log_entry.status = 'failed'
                db_session.commit()

            # Wait for a bit before retrying
            gevent.sleep(retry_interval)

            # Remove the entry from the scheduled set so that it can be
            # retried or given up on.
            syncback_service.remove_from_schedule(action_log_id)

            # Again, don't raise on exceptions that require
            # provider-specific handling e.g. EAS
            if not isinstance(e, ProviderSpecificException):
                raise