コード例 #1
27
ファイル: slave.py プロジェクト: sofian86/indico-gh-test
    def run(self):
        self._prepare()
        self._logger.info('Running task {}.. (delay: {})'.format(self._task.id, self._executionDelay))

        try:
            for i, retry in enumerate(transaction.attempts(self._config.task_max_tries)):
                with retry:
                    self._logger.info('Task attempt #{}'.format(i))
                    if i > 0:
                        self._prepare_retry()
                    try:
                        self._process_task()
                        break
                    except ConflictError:
                        transaction.abort()
                    except ClientDisconnected:
                        self._logger.warning("Retrying for the {}th time in {} secs..".format(i + 1, seconds))
                        transaction.abort()
                        time.sleep(i * 10)
                    except TaskDelayed, e:
                        self._logger.info("{} delayed by {} seconds".format(self._task, e.delaySeconds))
                        self._delayed = True
                        self._executionDelay = 0
                        time.sleep(e.delaySeconds)
            flush_after_commit_queue(True)
            GenericMailer.flushQueue(True)
コード例 #2
0
ファイル: process.py プロジェクト: iason-andr/indico
    def invokeMethod(self, method, params, req):

        MAX_RETRIES = 10

        # clear the context
        ContextManager.destroy()

        DBMgr.getInstance().startRequest()

        # room booking database
        _startRequestSpecific2RH()

        # notify components that the request has started
        self._notify('requestStarted', req)

        forcedConflicts = Config.getInstance().getForceConflicts()
        retry = MAX_RETRIES
        try:
            while retry > 0:
                if retry < MAX_RETRIES:
                    # notify components that the request is being retried
                    self._notify('requestRetry', req, MAX_RETRIES - retry)

                try:
                    # delete all queued emails
                    GenericMailer.flushQueue(False)

                    DBMgr.getInstance().sync()

                    try:
                        result = processRequest(method, copy.deepcopy(params), req)
                    except MaKaC.errors.NoReportError, e:
                        raise NoReportError(e.getMsg())
                    rh = ContextManager.get('currentRH')

                    # notify components that the request has ended
                    self._notify('requestFinished', req)
                    # Raise a conflict error if enabled. This allows detecting conflict-related issues easily.
                    if retry > (MAX_RETRIES - forcedConflicts):
                        raise ConflictError
                    _endRequestSpecific2RH( True )
                    DBMgr.getInstance().endRequest(True)
                    GenericMailer.flushQueue(True) # send emails
                    if rh._redisPipeline:
                        try:
                            rh._redisPipeline.execute()
                        except RedisError:
                            Logger.get('redis').exception('Could not execute pipeline')
                    break
                except ConflictError:
                    _abortSpecific2RH()
                    DBMgr.getInstance().abort()
                    retry -= 1
                    continue
                except ClientDisconnected:
                    _abortSpecific2RH()
                    DBMgr.getInstance().abort()
                    retry -= 1
                    time.sleep(MAX_RETRIES - retry)
                    continue
コード例 #3
0
ファイル: slave.py プロジェクト: pferreir/indico-backup
    def run(self):
        self._prepare()
        self._logger.info('Running task {}.. (delay: {})'.format(self._task.id, self._executionDelay))

        try:
            for i, retry in enumerate(transaction.attempts(self._config.task_max_tries)):
                with retry:
                    self._logger.info('Task attempt #{}'.format(i))
                    if i > 0:
                        self._prepare_retry()
                    try:
                        self._process_task()
                        break
                    except ConflictError:
                        transaction.abort()
                    except ClientDisconnected:
                        self._logger.warning("Retrying for the {}th time in {} secs..".format(i + 1, seconds))
                        transaction.abort()
                        time.sleep(i * 10)
                    except TaskDelayed, e:
                        self._logger.info("{} delayed by {} seconds".format(self._task, e.delaySeconds))
                        self._delayed = True
                        self._executionDelay = 0
                        time.sleep(e.delaySeconds)
            flush_after_commit_queue(True)
            GenericMailer.flushQueue(True)
コード例 #4
0
ファイル: process.py プロジェクト: belokop/indico_bare
 def _invokeMethodRetryBefore(self):
     # clear/init fossil cache
     fossilize.clearCache()
     # clear after-commit queue
     flush_after_commit_queue(False)
     # delete all queued emails
     GenericMailer.flushQueue(False)
     DBMgr.getInstance().sync()
コード例 #5
0
 def _invokeMethodRetryBefore(self):
     # clear/init fossil cache
     fossilize.clearCache()
     # clear after-commit queue
     flush_after_commit_queue(False)
     # delete all queued emails
     GenericMailer.flushQueue(False)
     DBMgr.getInstance().sync()
コード例 #6
0
ファイル: process.py プロジェクト: belokop/indico_bare
    def _invokeMethodSuccess(self):
        rh = ContextManager.get('currentRH')

        flush_after_commit_queue(True)  # run after-commit functions
        GenericMailer.flushQueue(True)  # send emails
        if rh._redisPipeline:
            try:
                rh._redisPipeline.execute()
            except RedisError:
                Logger.get('redis').exception('Could not execute pipeline')
コード例 #7
0
    def _invokeMethodSuccess(self):
        rh = ContextManager.get('currentRH')

        flush_after_commit_queue(True)  # run after-commit functions
        GenericMailer.flushQueue(True)  # send emails
        if rh._redisPipeline:
            try:
                rh._redisPipeline.execute()
            except RedisError:
                Logger.get('redis').exception('Could not execute pipeline')
コード例 #8
0
    def invokeMethod(self, method, params, req):

        MAX_RETRIES = 10

        # clear the context
        ContextManager.destroy()

        DBMgr.getInstance().startRequest()

        # room booking database
        _startRequestSpecific2RH()

        # notify components that the request has started
        self._notify('requestStarted', req)

        forcedConflicts = Config.getInstance().getForceConflicts()
        retry = MAX_RETRIES
        try:
            while retry > 0:
                if retry < MAX_RETRIES:
                    # notify components that the request is being retried
                    self._notify('requestRetry', req, MAX_RETRIES - retry)

                try:
                    # delete all queued emails
                    GenericMailer.flushQueue(False)

                    DBMgr.getInstance().sync()

                    try:
                        result = processRequest(method, copy.deepcopy(params),
                                                req)
                    except MaKaC.errors.NoReportError, e:
                        raise NoReportError(e.getMsg())

                    # notify components that the request has ended
                    self._notify('requestFinished', req)
                    # Raise a conflict error if enabled. This allows detecting conflict-related issues easily.
                    if retry > (MAX_RETRIES - forcedConflicts):
                        raise ConflictError
                    _endRequestSpecific2RH(True)
                    DBMgr.getInstance().endRequest(True)
                    GenericMailer.flushQueue(True)  # send emails
                    break
                except ConflictError:
                    _abortSpecific2RH()
                    DBMgr.getInstance().abort()
                    retry -= 1
                    continue
                except ClientDisconnected:
                    _abortSpecific2RH()
                    DBMgr.getInstance().abort()
                    retry -= 1
                    time.sleep(MAX_RETRIES - retry)
                    continue
コード例 #9
0
ファイル: base.py プロジェクト: pmart123/indico
    def __call__(self, aw):
        """Perform the actual exporting"""
        if self.HTTP_POST != (request.method == 'POST'):
            raise HTTPAPIError(
                'This action requires %s' %
                ('POST' if self.HTTP_POST else 'GET'), 405)
        if not self.GUEST_ALLOWED and not aw.getUser():
            raise HTTPAPIError('Guest access to this resource is forbidden.',
                               403)

        method_name = self._getMethodName()
        func = getattr(self, method_name, None)
        extra_func = getattr(self, method_name + '_extra', None)
        if not func:
            raise NotImplementedError(method_name)

        if not self.COMMIT:
            is_response, resultList, complete, extra = self._perform(
                aw, func, extra_func)
        else:
            dbi = DBMgr.getInstance()
            try:
                for i, retry in enumerate(transaction.attempts(10)):
                    with retry:
                        if i > 0:
                            dbi.abort()
                        flush_after_commit_queue(False)
                        GenericMailer.flushQueue(False)
                        dbi.sync()
                        try:
                            is_response, resultList, complete, extra = self._perform(
                                aw, func, extra_func)
                            transaction.commit()
                            flush_after_commit_queue(True)
                            GenericMailer.flushQueue(True)
                            break
                        except ConflictError:
                            transaction.abort()
                        except ClientDisconnected:
                            transaction.abort()
                            time.sleep(i * 5)
                else:
                    raise HTTPAPIError(
                        'An unresolvable database conflict has occured', 500)
            except Exception:
                transaction.abort()
                raise
        if is_response:
            return resultList
        return resultList, extra, complete, self.SERIALIZER_TYPE_MAP
コード例 #10
0
ファイル: base.py プロジェクト: OmeGak/indico
    def __call__(self, aw):
        """Perform the actual exporting"""
        if self.HTTP_POST != (request.method == 'POST'):
            raise HTTPAPIError('This action requires %s' % ('POST' if self.HTTP_POST else 'GET'), 405)
        if not self.GUEST_ALLOWED and not aw.getUser():
            raise HTTPAPIError('Guest access to this resource is forbidden.', 403)

        method_name = self._getMethodName()
        func = getattr(self, method_name, None)
        extra_func = getattr(self, method_name + '_extra', None)
        if not func:
            raise NotImplementedError(method_name)

        if not self.COMMIT:
            is_response, resultList, complete, extra = self._perform(aw, func, extra_func)
        else:
            dbi = DBMgr.getInstance()
            try:
                for i, retry in enumerate(transaction.attempts(10)):
                    with retry:
                        if i > 0:
                            dbi.abort()
                        flush_after_commit_queue(False)
                        GenericMailer.flushQueue(False)
                        dbi.sync()
                        try:
                            is_response, resultList, complete, extra = self._perform(aw, func, extra_func)
                            transaction.commit()
                            flush_after_commit_queue(True)
                            GenericMailer.flushQueue(True)
                            break
                        except ConflictError:
                            transaction.abort()
                        except ClientDisconnected:
                            transaction.abort()
                            time.sleep(i * 5)
                else:
                    raise HTTPAPIError('An unresolvable database conflict has occured', 500)
            except Exception:
                transaction.abort()
                raise
        if is_response:
            return resultList
        return resultList, extra, complete, self.SERIALIZER_TYPE_MAP
コード例 #11
0
ファイル: slave.py プロジェクト: sofian86/indico-gh-test
 def _prepare_retry(self):
     self._dbi.abort()
     self._dbi.sync()
     flush_after_commit_queue(False)
     GenericMailer.flushQueue(False)
     self._task.plugLogger(self._logger)
コード例 #12
0
ファイル: slave.py プロジェクト: ferhatelmas/indico
    def run(self):

        self._prepare()

        self._logger.info('Running task %s.. (delay: %s)' % (self._task.id, self._executionDelay))

        # We will try to run the task TASK_MAX_RETRIES
        # times and if it continues failing we abort it
        i = 0

        # RoomBooking forces us to connect to its own DB if needed
        # Maybe we should add some extension point here that lets plugins
        # define their own actions on DB connect/disconnect/commit/abort

        # potentially conflict-prone (!)
        with self._dbi.transaction(sync=True):
            with self._rbdbi.transaction():
                self._task.prepare()

        delayed = False
        while i < self._config.task_max_tries:
            # Otherwise objects modified in indico itself are not updated here
            if hasattr(self._rbdbi, 'sync'):
                self._rbdbi.sync()

            try:
                if i > 0:
                    self._dbi.abort()
                    # delete all queued emails
                    GenericMailer.flushQueue(False)
                    # restore logger
                    self._task.plugLogger(self._logger)

                with self._dbi.transaction():
                    with self._rbdbi.transaction():

                        self._logger.info('Task cycle %d' % i)
                        i = i + 1

                        # clear the fossile cache at the start of each task
                        fossilize.clearCache()

                        self._task.start(self._executionDelay)
                        break

            except TaskDelayed, e:
                nextRunIn = e.delaySeconds
                self._executionDelay = 0
                delayed = True
                self._logger.info("%s delayed by %d seconds" % (self._task, e.delaySeconds))
                base.TimeSource.get().sleep(nextRunIn)

            except Exception, e:
                self._logger.exception("%s failed with exception '%s'. " % \
                                       (self._task, e))

                if  i < self._config.task_max_tries:
                    nextRunIn = i * 10  # secs

                    self._logger.warning("Retrying for the %dth time in %d secs.." % \
                                         (i + 1, nextRunIn))

                    # if i is still low enough, we sleep progressively more
                    # so that if the error is caused by concurrency we don't make
                    # the problem worse by hammering the server.
                    base.TimeSource.get().sleep(nextRunIn)
コード例 #13
0
ファイル: slave.py プロジェクト: ferhatelmas/indico
            except Exception, e:
                self._logger.exception("%s failed with exception '%s'. " % \
                                       (self._task, e))

                if  i < self._config.task_max_tries:
                    nextRunIn = i * 10  # secs

                    self._logger.warning("Retrying for the %dth time in %d secs.." % \
                                         (i + 1, nextRunIn))

                    # if i is still low enough, we sleep progressively more
                    # so that if the error is caused by concurrency we don't make
                    # the problem worse by hammering the server.
                    base.TimeSource.get().sleep(nextRunIn)

        GenericMailer.flushQueue(True)

        self._logger.info('Ended on: %s' % self._task.endedOn)

        # task successfully finished
        if self._task.endedOn:
            with self._dbi.transaction():
                self._setResult(True)
            if i > (1 + int(delayed)):
                self._logger.warning("%s failed %d times before "
                                     "finishing correctly" % (self._task, i - int(delayed) - 1))
        else:
            with self._dbi.transaction():
                self._setResult(False)
            self._logger.error("%s failed too many (%d) times. "
                               "Aborting its execution.." % (self._task, i))
コード例 #14
0
ファイル: slave.py プロジェクト: pferreir/indico-backup
 def _prepare_retry(self):
     self._dbi.abort()
     self._dbi.sync()
     flush_after_commit_queue(False)
     GenericMailer.flushQueue(False)
     self._task.plugLogger(self._logger)
コード例 #15
0
ファイル: slave.py プロジェクト: shirabe/indico
    def run(self):

        self._prepare()

        self._logger.info('Running task %s.. (delay: %s)' %
                          (self._task.id, self._executionDelay))

        # We will try to run the task TASK_MAX_RETRIES
        # times and if it continues failing we abort it
        i = 0

        # RoomBooking forces us to connect to its own DB if needed
        # Maybe we should add some extension point here that lets plugins
        # define their own actions on DB connect/disconnect/commit/abort

        # potentially conflict-prone (!)
        with self._dbi.transaction(sync=True):
            with self._rbdbi.transaction():
                self._task.prepare()

        delayed = False
        while i < self._config.task_max_tries:
            # Otherwise objects modified in indico itself are not updated here
            if hasattr(self._rbdbi, 'sync'):
                self._rbdbi.sync()

            try:
                if i > 0:
                    self._dbi.abort()
                    GenericMailer.flushQueue(False)
                    # restore logger
                    self._task.plugLogger(self._logger)

                with self._dbi.transaction():
                    with self._rbdbi.transaction():

                        self._logger.info('Task cycle %d' % i)
                        i = i + 1

                        self._task.start(self._executionDelay)
                        break

            except TaskDelayed, e:
                nextRunIn = e.delaySeconds
                self._executionDelay = 0
                delayed = True
                self._logger.info("%s delayed by %d seconds" %
                                  (self._task, e.delaySeconds))
                base.TimeSource.get().sleep(nextRunIn)

            except Exception, e:
                self._logger.exception("%s failed with exception '%s'. " % \
                                       (self._task, e))

                if i < self._config.task_max_tries:
                    nextRunIn = i * 10  # secs

                    self._logger.warning("Retrying for the %dth time in %d secs.." % \
                                         (i + 1, nextRunIn))

                    # if i is still low enough, we sleep progressively more
                    # so that if the error is caused by concurrency we don't make
                    # the problem worse by hammering the server.
                    base.TimeSource.get().sleep(nextRunIn)
コード例 #16
0
ファイル: slave.py プロジェクト: shirabe/indico
            except Exception, e:
                self._logger.exception("%s failed with exception '%s'. " % \
                                       (self._task, e))

                if i < self._config.task_max_tries:
                    nextRunIn = i * 10  # secs

                    self._logger.warning("Retrying for the %dth time in %d secs.." % \
                                         (i + 1, nextRunIn))

                    # if i is still low enough, we sleep progressively more
                    # so that if the error is caused by concurrency we don't make
                    # the problem worse by hammering the server.
                    base.TimeSource.get().sleep(nextRunIn)

        GenericMailer.flushQueue(True)

        self._logger.info('Ended on: %s' % self._task.endedOn)

        # task successfully finished
        if self._task.endedOn:
            with self._dbi.transaction():
                self._setResult(True)
            if i > (1 + int(delayed)):
                self._logger.warning("%s failed %d times before "
                                     "finishing correctly" %
                                     (self._task, i - int(delayed) - 1))
        else:
            with self._dbi.transaction():
                self._setResult(False)
            self._logger.error("%s failed too many (%d) times. "