def can_run(self, limit_key, limit): if redis.incr(limit_key) > limit: redis.decr(limit_key) return False else: redis.expire(limit_key, CODEBOX_COUNTER_TIMEOUT) return True
def process(self, instance_pk, incentive_pk, **kwargs): logger = self.get_logger() instance = _get_instance(instance_pk) if instance is None: logger.warning( "%s[pk=%s] for %s cannot be run, because instance was not found.", self.incentive_class.__name__, incentive_pk, instance) return incentive = self.get_incentive(instance, incentive_pk) if not incentive or not self.is_incentive_valid( instance, incentive, **kwargs): return spec = self.create_spec(instance, incentive, **kwargs) if not OwnerInGoodStanding.is_admin_in_good_standing( instance.owner_id): self.block_run( 'Blocked %s for %s, instance owner cannot run new codeboxes.', incentive, instance, spec) return # If legacy codeboxes are disabled, only allow new socket format. if not settings.LEGACY_CODEBOX_ENABLED and ( incentive.socket is None or not incentive.socket.is_new_format): self.block_run('Blocked %s for %s, legacy codeboxes are disabled.', incentive, instance, spec) return logger.info('Running %s for %s.', incentive, instance) if incentive.socket is not None and incentive.socket.is_new_format: self.process_grpc(instance, incentive, spec) return spec_key = self.publish_codebox_spec(instance_pk, incentive_pk, spec) if self.is_incentive_priority(instance, incentive): queue = QUEUE_PRIORITY_TEMPLATE.format(instance=instance_pk) else: queue = QUEUE_TEMPLATE.format(instance=instance_pk) concurrency_limit = spec['run']['concurrency_limit'] if redis.llen( queue ) >= settings.CODEBOX_QUEUE_LIMIT_PER_RUNNER * concurrency_limit: self.block_run('Blocked %s for %s, queue limit exceeded.', incentive, instance, spec) return redis.rpush(queue, spec_key) redis.expire(queue, QUEUE_TIMEOUT) # Wake up codebox runner CodeBoxRunTask.delay(instance_pk=instance.pk, concurrency_limit=concurrency_limit)
def cleanup(self, limit_key): if redis.decr(limit_key) < 0: redis.delete(limit_key) else: redis.expire(limit_key, CODEBOX_COUNTER_TIMEOUT)
def run(self, **kwargs): logger = self.get_logger() instance_pk = self.instance.pk self.countdown = None obj = self.model_class.objects.filter( **self.query).order_by('updated_at').first() if not obj: return # Increase attempt key for an object and check if we haven't exceeded max attempts to process it attempt_key = self.get_attempt_key(instance_pk=instance_pk) attempt = redis.incr(attempt_key) redis.expire(attempt_key, self.lock_expire) logger.info('Processing of %s[pk=%s] in Instance[pk=%s]. Attempt #%d.', self.model_class.__name__, obj.pk, instance_pk, attempt) try: if self.process_object(obj, **kwargs) is not False: self.save_object(obj) except ObjectProcessingError as exc: if attempt < self.max_attempts and exc.retry: logger.warning( 'ProcessingError during processing of %s[pk=%s] in Instance[pk=%s]. Retrying.', self.model_class.__name__, obj.pk, instance_pk, exc_info=1) return logger.warning( 'ProcessingError during processing of %s[pk=%s] in Instance[pk=%s].', self.model_class.__name__, obj.pk, instance_pk, exc_info=1) self.handle_exception(obj, exc) except Exception as exc: # Return if encountered unexpected error. We will retry in after lock handler. if attempt < self.max_attempts: logger.warning( 'Unhandled error during processing of %s[pk=%s] in Instance[pk=%s]. Retrying.', self.model_class.__name__, obj.pk, instance_pk, exc_info=1) self.countdown = attempt * self.default_retry_delay return # Otherwise if we reached max attempts - log it logger.error( 'Unhandled error during processing of %s[pk=%s] in Instance[pk=%s].', self.model_class.__name__, obj.pk, instance_pk, exc_info=1) self.handle_exception(obj, exc) # No unexpected error encountered - we're done, reset attempts redis.delete(attempt_key)