Ejemplo n.º 1
0
    def job_delayed(self, job, queue):
        """
        Called if a job, before trying to run it, has the "delayed" status, or,
        after run, if its status was set to "delayed"
        If delayed_until was not set, or is invalid, set it to 60sec in the future
        """
        delayed_until = job.delayed_until.hget()
        if delayed_until:
            try:
                delayed_until = compute_delayed_until(delayed_until=parse(delayed_until))
            except (ValueError, TypeError):
                delayed_until = None

        if not delayed_until:
            # by default delay it for 60 seconds
            delayed_until = compute_delayed_until(delayed_for=60)

        job.enqueue_or_delay(
            queue_name=queue._cached_name,
            delayed_until=delayed_until,
            queue_model=queue.__class__,
        )

        self.log(self.job_delayed_message(job, queue), level='warning')

        if hasattr(job, 'on_delayed'):
            job.on_delayed(queue)
Ejemplo n.º 2
0
    def job_delayed(self, job, queue):
        """
        Called if a job, before trying to run it, has the "delayed" status, or,
        after run, if its status was set to "delayed"
        If delayed_until was not set, or is invalid, set it to 60sec in the future
        """
        delayed_until = job.delayed_until.hget()
        if delayed_until:
            try:
                delayed_until = compute_delayed_until(
                    delayed_until=parse(delayed_until))
            except (ValueError, TypeError):
                delayed_until = None

        if not delayed_until:
            # by default delay it for 60 seconds
            delayed_until = compute_delayed_until(delayed_for=60)

        job.enqueue_or_delay(
            queue_name=queue._cached_name,
            delayed_until=delayed_until,
            queue_model=queue.__class__,
        )

        self.log(self.job_delayed_message(job, queue), level='warning')

        if hasattr(job, 'on_delayed'):
            job.on_delayed(queue)
Ejemplo n.º 3
0
    def run(self, queue):
        super(SearchReferenceCommitForEvent, self).run(queue)

        event = self.event

        try:
            # try to find the matching commit
            event.related_object = Commit.objects.filter(
                authored_at__lte=event.created_at,
                sha=event.commit_sha,
                author=event.user
            ).order_by('-authored_at')[0]
        except IndexError:
            # the commit was not found

            tries = int(self.nb_tries.hget() or 0)

            if tries >= 5:
                # enough tries, stop now
                self.status.hset(STATUSES.CANCELED)
                return None
            else:
                # we'll try again...
                self.status.hset(STATUSES.DELAYED)
                self.delayed_until.hset(compute_delayed_until(delayed_for=60*tries))
                self.nb_tries.hincrby(1)
            return False

        # commit found, save the event
        event.save()

        return True
Ejemplo n.º 4
0
    def run(self, queue):
        super(SearchReferenceCommitForComment, self).run(queue)

        repository_id, commit_sha = self.hmget('repository_id', 'commit_sha')

        try:
            # try to find the matching commit
            Commit.objects.filter(
                repository_id=repository_id,
                sha__startswith=commit_sha,
            ).order_by('-authored_at')[0]
        except IndexError:
            # the commit was not found

            tries = int(self.nb_tries.hget() or 0)

            if tries >= 5:
                # enough tries, stop now
                self.status.hset(STATUSES.CANCELED)
                return None
            else:
                # we'll try again...
                self.status.hset(STATUSES.DELAYED)
                self.delayed_until.hset(compute_delayed_until(delayed_for=60*tries))
                self.nb_tries.hincrby(1)
            return False

        # commit found, save the comment
        self.object.save()

        return True
Ejemplo n.º 5
0
    def requeue(self, queue_name=None, priority=None, delayed_for=None,
                                        delayed_until=None, queue_model=None):
        """
        Requeue the job in the given queue if it has previously failed
        """
        queue_name = self._get_queue_name(queue_name)

        # we can only requeue a job that raised an error
        if self.status.hget() != STATUSES.ERROR:
            raise LimpydJobsException('Job cannot be requeued if not in ERROR status')

        self.hdel('start', 'end')

        if priority is None:
            priority = self.priority.hget()

        delayed_until = compute_delayed_until(delayed_for, delayed_until)

        self.enqueue_or_delay(queue_name, priority, delayed_until, queue_model=queue_model)
Ejemplo n.º 6
0
    def requeue(self, queue_name=None, priority=None, delayed_for=None,
                                        delayed_until=None, queue_model=None):
        """
        Requeue the job in the given queue if it has previously failed
        """
        queue_name = self._get_queue_name(queue_name)

        # we can only requeue a job that raised an error
        if self.status.hget() != STATUSES.ERROR:
            raise LimpydJobsException('Job cannot be requeued if not in ERROR status')

        self.hdel('start', 'end')

        if priority is None:
            priority = self.priority.hget()

        delayed_until = compute_delayed_until(delayed_for, delayed_until)

        self.enqueue_or_delay(queue_name, priority, delayed_until, queue_model=queue_model)
Ejemplo n.º 7
0
    def add_job(cls, identifier, queue_name=None, priority=0, queue_model=None,
                prepend=False, delayed_for=None, delayed_until=None,
                **fields_if_new):
        """
        Add a job to a queue.
        If this job already exists, check it's current priority. If its higher
        than the new one, don't touch it, else move the job to the wanted queue.
        Before setting/moving the job to the queue, check for a `delayed_for`
        (int/foat/timedelta) or `delayed_until` (datetime) argument to see if
        it must be delayed instead of queued.
        If the job is created, fields in fields_if_new will be set for the new
        job.
        Finally return the job.
        """

        # check for delayed_for/delayed_until arguments
        delayed_until = compute_delayed_until(delayed_for, delayed_until)

        # create the job or get an existing one
        job_kwargs = {'identifier': identifier, 'queued': '1'}
        retries = 0
        while retries < 10:
            retries += 1
            try:
                job, created = cls.get_or_connect(**job_kwargs)
            except IndexError:
                # Failure during the retrieval https://friendpaste.com/5U63a8aFuV44SEgQckgMP
                # => retry
                continue
            except ValueError:
                # more than one already in the queue !
                try:
                    job = cls.collection(**job_kwargs).instances()[0]
                except IndexError:
                    # but no more now ?!
                    # => retry
                    continue
                else:
                    created = False

            # ok we have our job, stop now
            break

        try:
            # check queue_name
            queue_name = cls._get_queue_name(queue_name)

            # if the job already exists, and we want a higher priority or move it,
            # start by updating it
            if not created:
                current_priority = int(job.priority.hget() or 0)
                # if the job has a higher priority, or don't need to be moved,
                # don't move it
                if not prepend and current_priority >= priority:
                    return job

                # cancel it temporarily, we'll set it as waiting later
                job.status.hset(STATUSES.CANCELED)

                # remove it from the current queue, we'll add it to the new one later
                if queue_model is None:
                    queue_model = cls.queue_model
                current_queue = queue_model.get_queue(queue_name, current_priority)
                current_queue.waiting.lrem(0, job.ident)

            else:
                job.set_fields(added=str(datetime.utcnow()), **(fields_if_new or {}))

            # add the job to the queue
            job.enqueue_or_delay(queue_name, priority, delayed_until, prepend, queue_model)

            return job
        except Exception:
            job.queued.delete()
            raise
Ejemplo n.º 8
0
    def _get_gh(self):
        """
        Return a Connection object based on arguments saved in the job, or by
        type of permission, to get one from the Token model
        """
        from core.limpyd_models import Token

        args = self.gh_args.hgetall()
        if 'access_token' not in args:
            args = None

        permission = getattr(self, 'permission', 'read')

        token = None

        # we have connection args: get the token if available
        if args:
            try:
                token_kwargs = {'token': args['access_token']}
                # ignore the available flag for "self"
                if permission != 'self':
                    token_kwargs['available'] = 1
                try:
                    token = Token.get(**token_kwargs)
                except IndexError:
                    # changed during the "get"... retry once
                    # explanation: the get first check the length of the result
                    # and if it's 1, then it retrieves the first, but in the
                    # meantime, the data may have changed and there is result
                    # anymore...
                    token = Token.get(**token_kwargs)
                except:
                    raise
            except (Token.DoesNotExist, KeyError):
                pass
            else:
                # final check on remaining api calls
                if int(token.rate_limit_remaining.get() or 0):
                    return token.gh

        # no token, try to get one...
        repository = None

        if permission == 'self':
            # forced to use the current one, but not available...
            pass
        else:

            # if we have a repository, get one following permission
            repository = getattr(self, 'repository')
            if repository:
                if repository.private and permission not in ('admin', 'push'):
                    # force correct permission if repository is private
                    permission = 'push'
                token = Token.get_one_for_repository(repository.pk, permission)

            # no repository, not "self", but want one ? don't know why but ok...
            else:
                token = Token.get_one()

        # if we don't have token it's that there is no one available: we delay
        # the job
        if not token:
            self.status.hset(STATUSES.DELAYED)

            if hasattr(self, 'delay_for_gh'):
                # use the "delay_for_gh" attribute if any to delay the job for X seconds
                self.delayed_until.hset(compute_delayed_until(delayed_for=self.delay_for_gh))

            else:
                # check the first available gh
                if permission == 'self':
                    if args:
                        token = Token.get(token=args['access_token'])
                elif repository:
                    token = Token.get_one_for_repository(repository.pk, permission, available=False, sort_by='rate_limit_reset')
                else:
                    token = Token.get_one(available=False, sort_by='rate_limit_reset')

                # if we have a token, get it's delay before availability, and
                # set it on the job for future use
                if token:

                    remaining = token.get_remaining_seconds()
                    if remaining is not None and remaining >= 0:
                        self.delayed_until.hset(compute_delayed_until(remaining))
                    else:
                        self.delayed_until.delete()

                    self.gh = token.gh

                else:
                    # no token at all ? we may have no one for this permission !
                    # so retry in 15mn
                    self.delayed_until.hset(compute_delayed_until(delayed_for=60 * 15))

            return None

        # save it in the job, useful when cloning to avoid searching for a new
        # gh (will only happen if it is not available anymore)
        self.gh = token.gh

        # and ok, return it
        return token.gh
Ejemplo n.º 9
0
    def add_job(cls, identifier, queue_name=None, priority=0, queue_model=None,
                prepend=False, delayed_for=None, delayed_until=None,
                **fields_if_new):
        """
        Add a job to a queue.
        If this job already exists, check it's current priority. If its higher
        than the new one, don't touch it, else move the job to the wanted queue.
        Before setting/moving the job to the queue, check for a `delayed_for`
        (int/foat/timedelta) or `delayed_until` (datetime) argument to see if
        it must be delayed instead of queued.
        If the job is created, fields in fields_if_new will be set for the new
        job.
        Finally return the job.
        """

        # check for delayed_for/delayed_until arguments
        delayed_until = compute_delayed_until(delayed_for, delayed_until)

        # create the job or get an existing one
        job_kwargs = {'identifier': identifier, 'queued': '1'}
        retries = 0
        while retries < 10:
            retries += 1
            try:
                job, created = cls.get_or_connect(**job_kwargs)
            except IndexError:
                # Failure during the retrieval https://friendpaste.com/5U63a8aFuV44SEgQckgMP
                # => retry
                continue
            except ValueError:
                # more than one already in the queue !
                try:
                    job = cls.collection(**job_kwargs).instances()[0]
                except IndexError:
                    # but no more now ?!
                    # => retry
                    continue
                else:
                    created = False

            # ok we have our job, stop now
            break

        try:
            # check queue_name
            queue_name = cls._get_queue_name(queue_name)

            # if the job already exists, and we want a higher priority or move it,
            # start by updating it
            if not created:
                current_priority = int(job.priority.hget() or 0)
                # if the job has a higher priority, or don't need to be moved,
                # don't move it
                if not prepend and current_priority >= priority:
                    return job

                # cancel it temporarily, we'll set it as waiting later
                job.status.hset(STATUSES.CANCELED)

                # remove it from the current queue, we'll add it to the new one later
                if queue_model is None:
                    queue_model = cls.queue_model
                current_queue = queue_model.get_queue(queue_name, current_priority)
                current_queue.waiting.lrem(0, job.ident)

            else:
                job.set_fields(added=str(datetime.utcnow()), **(fields_if_new or {}))

            # add the job to the queue
            job.enqueue_or_delay(queue_name, priority, delayed_until, prepend, queue_model)

            return job
        except Exception:
            job.queued.delete()
            raise