Пример #1
0
    def invalidate(self, request, *args, **kwargs):
        endpoint = self.get_object()
        socket = Cached(Socket, kwargs={'pk': endpoint.socket_id}).get()

        cache_key = ENDPOINT_CACHE_KEY_TEMPLATE.format(
            schema=request.instance.pk,
            name=endpoint.name,
            hash=socket.get_hash(),
        )
        redis.delete(cache_key)
        return HttpResponse(status=status.HTTP_204_NO_CONTENT)
Пример #2
0
    def test_list_handling_expired_objects(self):
        obj = MyModel.create(char='cba')
        model_list = MyModel.list()
        self.assertEqual(len(model_list), 1)

        redis.delete(obj.get_object_key(pk=obj.pk))

        self.assertRaises(ObjectDoesNotExist, MyModel.get, obj.pk)
        list_key = MyModel.get_list_key()
        self.assertEqual(redis.zcard(list_key), 1)
        model_list = MyModel.list()
        self.assertEqual(len(model_list), 0)
Пример #3
0
    def aggregate(self, left_boundary, right_boundary):
        aggregates_to_create = []
        aggregate_model = self.model
        bucket_name = aggregate_model.bucket_name(left_boundary)

        for key, value in redis.hscan_iter(bucket_name):
            admin_id, instance_id, instance_name, source = key.decode().split(
                ':')

            aggregate = aggregate_model(timestamp=left_boundary,
                                        source=source,
                                        admin_id=admin_id or None,
                                        instance_id=instance_id or None,
                                        instance_name=instance_name or None,
                                        value=value.decode())
            aggregates_to_create.append(aggregate)

        redis.delete(bucket_name)
        return aggregates_to_create
Пример #4
0
    def process_spec(self, spec_key, queue=None):
        logger = self.get_logger()
        runner = CodeBoxRunner(logger=logger)

        codebox_spec = get_codebox_spec(spec_key)
        if codebox_spec is None:
            logger.warning("CodeBox spec has expired. Nothing to do here.")
            return

        expire_at = codebox_spec.get('expire_at')
        if expire_at:
            now = timezone.now()
            expire_at = parse_datetime(expire_at)

            if now > expire_at:
                if 'trace' in codebox_spec:
                    SaveTraceTask.delay(
                        codebox_spec['trace'], {
                            'status': Trace.STATUS_CHOICES.QUEUE_TIMEOUT,
                            'executed_at': now.strftime(
                                settings.DATETIME_FORMAT),
                            'result': {
                                'stdout': '',
                                'stderr': 'Internal queue timeout.'
                            }
                        })
                logger.warning("CodeBox spec runtime has expired.")
                return

        try:
            runner.run(codebox_spec)
        except (ContainerException, IOError, FileNotFoundError) as exc:
            # Put it at the beginning of queue, still log the error as we should try to fix it.
            if isinstance(exc, ContainerException):
                self.get_logger().exception(exc)
            if queue is not None:
                redis.lpush(queue, spec_key)
        except Exception as exc:
            self.get_logger().exception(exc)
        else:
            redis.delete(spec_key)
Пример #5
0
    def run(self,
            incentive_pk,
            instance_pk,
            payload_key,
            meta_key,
            trace_pk,
            expire_at=None,
            result_key=None,
            template_name=None,
            script_pk=None):
        payload = redis.get(payload_key)
        meta = redis.get(meta_key)

        self.process(instance_pk=instance_pk,
                     incentive_pk=incentive_pk,
                     script_pk=script_pk,
                     additional_args=payload,
                     result_key=result_key,
                     template_name=template_name,
                     trace_pk=trace_pk,
                     expire_at=expire_at,
                     meta=meta)
        redis.delete(payload_key)
Пример #6
0
 def cleanup(self, limit_key):
     if redis.decr(limit_key) < 0:
         redis.delete(limit_key)
     else:
         redis.expire(limit_key, CODEBOX_COUNTER_TIMEOUT)
Пример #7
0
    def run(self, **kwargs):
        logger = self.get_logger()
        instance_pk = self.instance.pk
        self.countdown = None

        obj = self.model_class.objects.filter(
            **self.query).order_by('updated_at').first()
        if not obj:
            return

        # Increase attempt key for an object and check if we haven't exceeded max attempts to process it
        attempt_key = self.get_attempt_key(instance_pk=instance_pk)
        attempt = redis.incr(attempt_key)
        redis.expire(attempt_key, self.lock_expire)

        logger.info('Processing of %s[pk=%s] in Instance[pk=%s]. Attempt #%d.',
                    self.model_class.__name__, obj.pk, instance_pk, attempt)

        try:
            if self.process_object(obj, **kwargs) is not False:
                self.save_object(obj)
        except ObjectProcessingError as exc:
            if attempt < self.max_attempts and exc.retry:
                logger.warning(
                    'ProcessingError during processing of %s[pk=%s] in Instance[pk=%s]. Retrying.',
                    self.model_class.__name__,
                    obj.pk,
                    instance_pk,
                    exc_info=1)
                return

            logger.warning(
                'ProcessingError during processing of %s[pk=%s] in Instance[pk=%s].',
                self.model_class.__name__,
                obj.pk,
                instance_pk,
                exc_info=1)
            self.handle_exception(obj, exc)
        except Exception as exc:
            # Return if encountered unexpected error. We will retry in after lock handler.
            if attempt < self.max_attempts:
                logger.warning(
                    'Unhandled error during processing of %s[pk=%s] in Instance[pk=%s]. Retrying.',
                    self.model_class.__name__,
                    obj.pk,
                    instance_pk,
                    exc_info=1)
                self.countdown = attempt * self.default_retry_delay
                return

            # Otherwise if we reached max attempts - log it
            logger.error(
                'Unhandled error during processing of %s[pk=%s] in Instance[pk=%s].',
                self.model_class.__name__,
                obj.pk,
                instance_pk,
                exc_info=1)
            self.handle_exception(obj, exc)

        # No unexpected error encountered - we're done, reset attempts
        redis.delete(attempt_key)