def update_from_gh(self, gh, api_error, method, path, request_headers, response_headers, kw): """ Will update the current token object with information from the gh object and the error, if not None: - will save current date, and one if error or not - save the rate_limit limit and remaining, expiring the remaining with the reset givent by github - save scopes and mark is valid or notes - save the api_error if given If the remaining call is small (< 10% of the limit), mark the token as unavailable and ask for a reset when they will be available """ self.username.hset(gh._connection_args['username']) # save last calls now = datetime.utcnow() str_now = str(now) self.last_call.hset(str_now) log_unavailability = False is_error = False if api_error: is_error = True if hasattr(api_error, 'code'): if api_error.code == 304 or (200 <= api_error.code < 300): is_error = False if not is_error: self.last_call_ok.hset(str_now) else: self.last_call_ko.hset(str_now) # reset scopes if gh.x_oauth_scopes is not None: self.scopes.delete() if gh.x_oauth_scopes: self.scopes.sadd(*gh.x_oauth_scopes) self.valid_scopes.hset(int(bool(gh.x_oauth_scopes))) if not gh.x_oauth_scopes or 'repo' not in gh.x_oauth_scopes: self.available.hset(0) log_unavailability = True self.ask_for_reset_flags(3600) # check again in an hour if gh.x_ratelimit_remaining != -1: # add rate limit remaining, clear it after reset time self.rate_limit_remaining.set(gh.x_ratelimit_remaining) if gh.x_ratelimit_reset != -1: self.connection.expireat(self.rate_limit_remaining.key, gh.x_ratelimit_reset) self.rate_limit_reset.hset(gh.x_ratelimit_reset) else: self.connection.expire(self.rate_limit_remaining.key, 3600) self.rate_limit_reset.hset(datetime_to_score(datetime.utcnow()+timedelta(seconds=3600))) # if to few requests remaining, consider it as not available for public queries limit = 5000 if gh.x_ratelimit_limit == -1 else gh.x_ratelimit_limit self.rate_limit_limit.hset(limit) if not gh.x_ratelimit_remaining or gh.x_ratelimit_remaining < self.LIMIT: self.available.hset(0) log_unavailability = True self.ask_for_reset_flags() else: self.available.hset(1) if is_error or log_unavailability: json_data = { 'request': { 'path': path, 'method': method, 'headers': request_headers, 'args': kw, }, 'response': { 'headers': response_headers, }, } if api_error: if hasattr(api_error, 'code'): json_data['response']['code'] = api_error.code if api_error.response and api_error.response.json: json_data['response']['content'] = api_error.response.json json_data = json.dumps(json_data) when = datetime_to_score(now) if is_error: self.errors.zadd(when, json_data) if log_unavailability: self.unavailabilities.zadd(when, json_data)
def delay_job(self, job, delayed_until): """ Add the job to the delayed list (zset) of the queue. """ timestamp = datetime_to_score(delayed_until) self.delayed.zadd(timestamp, job.ident)
def delay_job(self, job, delayed_until): """ Add the job to the delayed list (zset) of the queue. """ timestamp = datetime_to_score(delayed_until) self.delayed.zadd({job.ident: timestamp})
def requeue_delayed_jobs(self): """ Put all delayed jobs that are now ready, back in the queue waiting list Return a list of failures """ lock_key = self.make_key( self._name, self.pk.get(), "requeue_all_delayed_ready_jobs", ) connection = self.get_connection() if connection.exists(lock_key): # if locked, a worker is already on it, don't wait and exit return [] with Lock(connection, lock_key, timeout=60): # stop here if we know we have nothing first_delayed_time = self.first_delayed_time if not first_delayed_time: return [] # get when we are :) now_timestamp = datetime_to_score(datetime.utcnow()) # the first job will be ready later, and so the other ones too, then # abort if float(first_delayed_time) > now_timestamp: return [] failures = [] while True: # get the first entry first_entry = self.first_delayed # no first entry, another worker took all from us ! if not first_entry: break # split into vars for readability job_ident, delayed_until = first_entry # if the date of the job is in the future, another work took the # job we wanted, so we let this job here and stop the loop as we # know (its a zset sorted by date) that no other jobs are ready if delayed_until > now_timestamp: break # remove the entry we just got from the delayed ones self.delayed.zrem(job_ident) # and add it to the waiting queue try: job = Job.get_from_ident(job_ident) if job.status.hget() == STATUSES.DELAYED: job.status.hset(STATUSES.WAITING) self.enqueue_job(job) except Exception as e: failures.append((job_ident, '%s' % e)) return failures