Esempio n. 1
0
def create_gs_thumbnail_mapstory_tx_aware(instance, overwrite):
    # if this is a map (i.e. multiple layers), handoff to original implementation
    if instance.class_name == 'Map':
        return create_gs_thumbnail_geonode(instance, overwrite)
    # because layer hasn't actually been committed yet, we don't create the thumbnail until the transaction commits
    # if the task were to run now, it wouldnt be able to retreive layer from the database
    connection.on_commit(lambda: run_task(instance.pk, overwrite))
    def test_db_query_in_hook(self, track):
        with atomic():
            Thing.objects.create(num=1)
            connection.on_commit(
                lambda: [track.notify(t.num) for t in Thing.objects.all()])

        track.assert_done([1])
Esempio n. 3
0
def create_gs_thumbnail_mapstory_tx_aware(instance, overwrite):
    if instance.class_name == "Map":
        return create_gs_thumbnail_geonode(instance, overwrite)

    # because layer hasn't actually been committed yet, we don't create the thumbnail until the transaction commits
    # if the task were to run now, it wouldnt be able to retrieve layer from the database
    connection.on_commit(lambda: run_task(instance.pk, overwrite))
Esempio n. 4
0
def enqueue_task(action, instance, **kwargs):
    """
    Common utility for enqueing a task for the given action and
    model instance.
    """
    identifier = get_identifier(instance)
    options = {}
    if settings.CELERY_HAYSTACK_QUEUE:
        options['queue'] = settings.CELERY_HAYSTACK_QUEUE
    if settings.CELERY_HAYSTACK_COUNTDOWN:
        options['countdown'] = settings.CELERY_HAYSTACK_COUNTDOWN

    task = get_update_task()

    def task_func():
        return task.apply_async((action, identifier), kwargs, **options)

    if hasattr(transaction, 'on_commit'):
        # Django 1.9 on_commit hook
        transaction.on_commit(
            task_func
        )
    elif hasattr(connection, 'on_commit'):
        # Django-transaction-hooks
        connection.on_commit(
            task_func
        )
    else:
        task_func()
Esempio n. 5
0
File: forms.py Progetto: arky/pootle
 def save(self, response_url=None, commit=True):
     tp = self.instance
     initialize_from_templates = False
     if tp.id is None:
         initialize_from_templates = tp.can_be_inited_from_templates()
     tp = super(TranslationProjectForm, self).save(commit)
     project = tp.project
     config = ObjectConfig(project)
     mappings = config.get("pootle.core.lang_mapping", {})
     mappings = dict((v, k) for k, v in mappings.iteritems())
     if not self.cleaned_data["fs_code"]:
         if tp.language.code in mappings:
             del mappings[tp.language.code]
     else:
         mappings[tp.language.code] = self.cleaned_data["fs_code"]
     config["pootle.core.lang_mapping"] = dict(
         (v, k) for k, v in mappings.iteritems())
     if initialize_from_templates:
         def _enqueue_job():
             queue = get_queue('default')
             queue.enqueue(
                 update_translation_project,
                 tp,
                 response_url)
         connection.on_commit(_enqueue_job)
     return tp
def on_new_history_entry(sender, instance, created, **kwargs):
    if not settings.WEBHOOKS_ENABLED:
        return None

    if instance.is_hidden:
        return None

    model = history_service.get_model_from_key(instance.key)
    pk = history_service.get_pk_from_key(instance.key)
    obj = model.objects.get(pk=pk)

    webhooks = _get_project_webhooks(obj.project)

    if instance.type == HistoryType.create:
        task = tasks.create_webhook
        extra_args = []
    elif instance.type == HistoryType.change:
        task = tasks.change_webhook
        extra_args = [instance]
    elif instance.type == HistoryType.delete:
        task = tasks.delete_webhook
        extra_args = [timezone.now()]

    for webhook in webhooks:
        args = [webhook["id"], webhook["url"], webhook["key"], obj] + extra_args

        if settings.CELERY_ENABLED:
            connection.on_commit(lambda: task.delay(*args))
        else:
            connection.on_commit(lambda: task(*args))
Esempio n. 7
0
def _push_to_timelines(project, user, obj, event_type, created_datetime, extra_data={}):
    project_id = None if project is None else project.id

    ct = ContentType.objects.get_for_model(obj)
    if settings.CELERY_ENABLED:
        connection.on_commit(lambda: push_to_timelines.delay(project_id, user.id, ct.app_label, ct.model, obj.id, event_type, created_datetime, extra_data=extra_data))
    else:
        push_to_timelines(project_id, user.id, ct.app_label, ct.model, obj.id, event_type, created_datetime, extra_data=extra_data)
Esempio n. 8
0
def create_update_cache_job_wrapper(instance, keys, decrement=1):
    queue = get_queue('default')
    if queue._async:

        def _create_update_cache_job():
            create_update_cache_job(queue, instance, keys, decrement=decrement)
        connection.on_commit(_create_update_cache_job)
    else:
        instance._update_cache_job(keys, decrement=decrement)
    def test_save_object_in_hook(self, track):
        with atomic():
            def on_commit():
                t = Thing(num=1)
                t.save()
                track.notify(t.num)
                
            connection.on_commit(on_commit)

        track.assert_done([1])
    def test_transaction_in_hook(self, track):
        def on_commit():
            with atomic():
                t = Thing.objects.create(num=1)
                track.notify(t.num)

        with atomic():
            connection.on_commit(on_commit)

        track.assert_done([1])
    def test_error_in_hook_doesnt_prevent_clearing_hooks(self, track):
        try:
            with atomic():
                connection.on_commit(lambda: track.notify('error'))
        except ForcedError:
            pass

        with atomic():
            track.do(1)

        track.assert_done([1])
Esempio n. 12
0
def on_delete_any_model(sender, instance, **kwargs):
    # Ignore any object that can not have project_id
    content_type = get_typename_for_model_instance(instance)

    # Ignore any other changes
    if content_type not in events.watched_types:
        return

    sesionid = mw.get_current_session_id()
    emit_event = lambda: events.emit_event_for_model(instance, sessionid=sesionid, type="delete")
    connection.on_commit(emit_event)
Esempio n. 13
0
 def post_save(self, instance, **kwargs):
     #print('<<<<<<<<<<<<')
     #print(kwargs.get('update_fields', None))
     kwargs = {key:getattr(instance, key) for key in self.keys}
     key = self.make_key(**kwargs)
     def update_cache():
         new_obj = self.compute_obj(**kwargs)
         self.cache.set(key, new_obj, timeout=self.timeout)
         #if set(kwargs.get('update_fields', set())).intersect(self.keys):
             
     connection.on_commit(update_cache)
Esempio n. 14
0
def action_post_save_fanout(sender, instance, created, **kwargs):
    """
    Fanout action if new instance saved
    """
    if created:
        # Fanout action (populate streams)
        if hasattr(connection, 'on_commit'):
            # Use django-transaction-hook to trigger tasks after transaction commit
            connection.on_commit(lambda: fanout_action.delay(instance.pk))
        else:
            fanout_action.delay(instance.pk)
Esempio n. 15
0
def _delete_file(file_obj):
    def delete_from_storage():
        try:
            cleanup_pre_delete.send(sender=None, file=file_obj)
            storage.delete(file_obj.name)
            cleanup_post_delete.send(sender=None, file=file_obj)
        except Exception:
            logger.exception("Unexpected exception while attempting "
                             "to delete old file '%s'".format(file_obj.name))

    storage = file_obj.storage
    if storage and storage.exists(file_obj.name):
        connection.on_commit(delete_from_storage)
    def test_call_cacheops_cbs_before_on_commit_cbs(self):
        calls = []

        with atomic():
            def django_commit_handler():
                calls.append('django')
            connection.on_commit(django_commit_handler)

            @queue_when_in_transaction
            def cacheops_commit_handler(using):
                calls.append('cacheops')
            cacheops_commit_handler('default')

        self.assertEqual(calls, ['cacheops', 'django'])
Esempio n. 17
0
    def save(self, response_url, commit=True):
        tp = self.instance
        initialize_from_templates = False
        if tp.id is None:
            initialize_from_templates = tp.can_be_inited_from_templates()
        tp = super(TranslationProjectForm, self).save(commit)

        def _enqueue_job():
            queue = get_queue('default')
            queue.enqueue(update_translation_project,
                          tp, initialize_from_templates,
                          response_url)
        connection.on_commit(_enqueue_job)
        return tp
Esempio n. 18
0
    def apply_async(self, *args, **kwargs):
        # Delay the task unless the client requested otherwise or transactions
        # aren't being managed (i.e. the signal handlers won't send the task).
        using = kwargs['using'] if 'using' in kwargs else None
        con = transaction.get_connection(using)

        if con.get_autocommit() or con.in_atomic_block:
            if not transaction.is_dirty():
                # Always mark the transaction as dirty
                # because we push task in queue that must be fired or discarded
                transaction.set_dirty(using=using)

            task = lambda: self.original_apply_async(*args, **kwargs)
            connection.on_commit(task)
        else:
            return self.original_apply_async(*args, **kwargs)
Esempio n. 19
0
def create_subsequent_tasks(project):
    """
    Create tasks for a given project whose dependencies have been
    completed.

    Args:
        project (orchestra.models.Project):
            The project for which to create tasks.

    Returns:
        project (orchestra.models.Project):
            The modified project object.
    """
    workflow_version = project.workflow_version
    all_steps = workflow_version.steps.all()

    # get all completed tasks associated with a given project
    completed_tasks = Task.objects.filter(status=Task.Status.COMPLETE,
                                          project=project)
    completed_step_slugs = set(completed_tasks.values_list('step__slug',
                                                           flat=True))

    machine_tasks_to_schedule = []
    for step in all_steps:
        if step.slug in completed_step_slugs or Task.objects.filter(
                project=project, step=step).exists():
            continue

        if _are_desired_steps_completed_on_project(
                step.creation_depends_on, completed_tasks=completed_tasks):
            if _check_creation_policy(step, project):
                # create new task and task_assignment
                task = Task(step=step,
                            project=project,
                            status=Task.Status.AWAITING_PROCESSING)
                task.save()

                _preassign_workers(task, AssignmentPolicyType.ENTRY_LEVEL)

                if not step.is_human:
                    machine_tasks_to_schedule.append(step)

    if len(machine_tasks_to_schedule) > 0:
        connection.on_commit(lambda: schedule_machine_tasks(
            project, machine_tasks_to_schedule))
Esempio n. 20
0
def enqueue_task(action, instance):
    """
    Common utility for enqueing a task for the given action and
    model instance.
    """
    identifier = get_identifier(instance)
    kwargs = {}
    if settings.CELERY_HAYSTACK_QUEUE:
        kwargs['queue'] = settings.CELERY_HAYSTACK_QUEUE
    if settings.CELERY_HAYSTACK_COUNTDOWN:
        kwargs['countdown'] = settings.CELERY_HAYSTACK_COUNTDOWN
    task = get_update_task()
    if hasattr(connection, 'on_commit'):
        connection.on_commit(
            lambda: task.apply_async((action, identifier), {}, **kwargs)
        )
    else:
        task.apply_async((action, identifier), {}, **kwargs)
Esempio n. 21
0
def on_save_any_model(sender, instance, created, **kwargs):
    # Ignore any object that can not have project_id
    if not hasattr(instance, "project_id"):
        return
    content_type = get_typename_for_model_instance(instance)

    # Ignore any other events
    if content_type not in events.watched_types:
        return

    sesionid = mw.get_current_session_id()

    type = "change"
    if created:
        type = "create"

    emit_event = lambda: events.emit_event_for_model(instance, sessionid=sesionid, type=type)
    connection.on_commit(emit_event)
Esempio n. 22
0
def emit_event(data:dict, routing_key:str, *,
               sessionid:str=None, channel:str="events",
               on_commit:bool=True):
    if not sessionid:
        sessionid = mw.get_current_session_id()

    data = {"session_id": sessionid,
            "data": data}

    backend = backends.get_events_backend()

    def backend_emit_event():
        backend.emit_event(message=json.dumps(data), routing_key=routing_key, channel=channel)

    if on_commit:
        connection.on_commit(backend_emit_event)
    else:
        backend_emit_event()
Esempio n. 23
0
def emit_event(data: dict,
               routing_key: str,
               *,
               sessionid: str = None,
               channel: str = "events",
               on_commit: bool = True):
    if not sessionid:
        sessionid = mw.get_current_session_id()

    data = {"session_id": sessionid, "data": data}

    backend = backends.get_events_backend()

    def backend_emit_event():
        backend.emit_event(message=json.dumps(data),
                           routing_key=routing_key,
                           channel=channel)

    if on_commit:
        connection.on_commit(backend_emit_event)
    else:
        backend_emit_event()
Esempio n. 24
0
 def process_finished_workflow_jobs(self, workflow_jobs):
     result = []
     for workflow_job in workflow_jobs:
         dag = WorkflowDAG(workflow_job)
         if workflow_job.cancel_flag:
             workflow_job.status = 'canceled'
             workflow_job.save()
             dag.cancel_node_jobs()
             connection.on_commit(
                 lambda: workflow_job.websocket_emit_status(workflow_job.
                                                            status))
         else:
             is_done, has_failed = dag.is_workflow_done()
             if not is_done:
                 continue
             result.append(workflow_job.id)
             workflow_job.status = 'failed' if has_failed else 'successful'
             workflow_job.save()
             connection.on_commit(
                 lambda: workflow_job.websocket_emit_status(workflow_job.
                                                            status))
     return result
Esempio n. 25
0
def study_update_ice(sender, instance, created, raw, using, **kwargs):
    """
    Checks whether the study has been renamed by comparing its current name with the one set in
    study_name_change_check. If it has, and if the study is associated with any ICE strains,
    updates the corresponding ICE entry(ies) to label links to this study with its new name.
    """
    if check_ice_cannot_proceed(raw):
        return
    if hasattr(instance,
               'pre_save_name') and instance.name == instance.pre_save_name:
        return
    eligible = Q(line__study_id=instance.pk,
                 registry_url__isnull=False,
                 registry_id__isnull=False)
    with transaction.atomic(savepoint=False):
        strains = edd_models.Strain.objects.filter(eligible).distinct()
        strains_to_link = set(strains.values_list('id', flat=True))
    if strains_to_link:
        partial = functools.partial(submit_ice_link, instance, strains_to_link)
        connection.on_commit(partial)
        logger.info("Save to study %d updating %d strains in ICE", instance.pk,
                    len(strains_to_link))
Esempio n. 26
0
    def signal_start(self, **kwargs):
        """Notify the task runner system to begin work on this task."""

        # Sanity check: Are we able to start the job? If not, do not attempt
        # to do so.
        if not self.can_start:
            return False

        # Get any passwords or other data that are prerequisites to running
        # the job.
        needed = self.get_passwords_needed_to_start()
        opts = dict([(field, kwargs.get(field, '')) for field in needed])
        if not all(opts.values()):
            return False

        # Sanity check: If we are running unit tests, then run synchronously.
        if getattr(settings, 'CELERY_UNIT_TEST', False):
            return self.start(None, None, **kwargs)

        # Save the pending status, and inform the SocketIO listener.
        self.update_fields(start_args=json.dumps(kwargs), status='pending')
        self.websocket_emit_status("pending")

        from awx.main.scheduler.tasks import run_job_launch
        connection.on_commit(lambda: run_job_launch.delay(self.id))

        # Each type of unified job has a different Task class; get the
        # appropirate one.
        # task_type = get_type_for_model(self)

        # Actually tell the task runner to run this task.
        # FIXME: This will deadlock the task runner
        #from awx.main.tasks import notify_task_runner
        #notify_task_runner.delay({'id': self.id, 'metadata': kwargs,
        #                          'task_type': task_type})

        # Done!
        return True
Esempio n. 27
0
    def start_task(self, task, dependent_tasks=[]):
        from cyborgbackup.main.tasks import handle_work_error, handle_work_success

        task_actual = {
            'type': get_type_for_model(type(task)),
            'id': task.id,
        }
        dependencies = [{'type': get_type_for_model(type(t)), 'id': t.id} for t in dependent_tasks]

        error_handler = handle_work_error.s(subtasks=[task_actual] + dependencies)
        success_handler = handle_work_success.s(task_actual=task_actual)

        task.status = 'waiting'
        (start_status, opts) = task.pre_start()
        if not start_status:
            task.status = 'failed'
            if task.job_explanation:
                task.job_explanation += ' '
            task.job_explanation += 'Task failed pre-start check.'
            task.save()
            # TODO: run error handler to fail sub-tasks and send notifications
        else:
            logger.info('Submitting %s to instance group cyborgbackup.', task.log_format)
            with disable_activity_stream():
                task.celery_task_id = str(uuid.uuid4())
                task.save()

            self.consume_capacity(task, 'cyborgbackup')

        def post_commit():
            task.websocket_emit_status(task.status)
            if task.status != 'failed':
                task.start_celery_task(opts,
                                       error_callback=error_handler,
                                       success_callback=success_handler,
                                       queue='cyborgbackup')

        connection.on_commit(post_commit)
Esempio n. 28
0
    def put(self):
        profile = self.profile
        ser = self.serializer(self.request,
                              profile,
                              data=self.data,
                              partial=True)

        if not ser.is_valid():
            return FailureTaskResponse(self.request,
                                       ser.errors,
                                       obj=profile,
                                       dc_bound=False)

        ser.save()
        connection.on_commit(lambda: user_relationship_changed.send(
            user_name=ser.object.user.username))
        return SuccessTaskResponse(self.request,
                                   ser.data,
                                   obj=self.user,
                                   detail_dict=ser.detail_dict(),
                                   owner=ser.object.user,
                                   msg=LOG_PROFILE_UPDATE,
                                   dc_bound=False)
Esempio n. 29
0
def on_new_history_entry(sender, instance, created, **kwargs):
    if not settings.WEBHOOKS_ENABLED:
        return None

    if instance.is_hidden:
        return None

    model = history_service.get_model_from_key(instance.key)
    pk = history_service.get_pk_from_key(instance.key)
    try:
        obj = model.objects.get(pk=pk)
    except model.DoesNotExist:
        # Catch simultaneous DELETE request
        return None

    webhooks = _get_project_webhooks(obj.project)

    if instance.type == HistoryType.create:
        task = tasks.create_webhook
        extra_args = []
    elif instance.type == HistoryType.change:
        task = tasks.change_webhook
        extra_args = [instance]
    elif instance.type == HistoryType.delete:
        task = tasks.delete_webhook
        extra_args = []

    by = instance.owner
    date = timezone.now()

    webhooks_args = []
    for webhook in webhooks:
        args = [webhook["id"], webhook["url"], webhook["key"], by, date, obj
                ] + extra_args
        webhooks_args.append(args)

    connection.on_commit(lambda: _execute_task(task, webhooks_args))
Esempio n. 30
0
 def process_finished_workflow_jobs(self, workflow_jobs):
     result = []
     for workflow_job in workflow_jobs:
         dag = WorkflowDAG(workflow_job)
         if workflow_job.cancel_flag:
             logger.debug(
                 'Canceling spawned jobs of %s due to cancel flag.',
                 workflow_job.log_format)
             cancel_finished = dag.cancel_node_jobs()
             if cancel_finished:
                 logger.info(
                     'Marking %s as canceled, all spawned jobs have concluded.',
                     workflow_job.log_format)
                 workflow_job.status = 'canceled'
                 workflow_job.start_args = ''  # blank field to remove encrypted passwords
                 workflow_job.save(update_fields=['status', 'start_args'])
                 connection.on_commit(
                     lambda: workflow_job.websocket_emit_status(workflow_job
                                                                .status))
         else:
             is_done, has_failed = dag.is_workflow_done()
             if not is_done:
                 continue
             logger.info('Marking %s as %s.', workflow_job.log_format,
                         'failed' if has_failed else 'successful')
             result.append(workflow_job.id)
             new_status = 'failed' if has_failed else 'successful'
             logger.debug(
                 six.text_type("Transitioning {} to {} status.").format(
                     workflow_job.log_format, new_status))
             workflow_job.status = new_status
             workflow_job.start_args = ''  # blank field to remove encrypted passwords
             workflow_job.save(update_fields=['status', 'start_args'])
             connection.on_commit(
                 lambda: workflow_job.websocket_emit_status(workflow_job.
                                                            status))
     return result
Esempio n. 31
0
def on_new_history_entry(sender, instance, created, **kwargs):
    if not settings.WEBHOOKS_ENABLED:
        return None

    if instance.is_hidden:
        return None

    model = history_service.get_model_from_key(instance.key)
    pk = history_service.get_pk_from_key(instance.key)
    try:
        obj = model.objects.get(pk=pk)
    except model.DoesNotExist:
        # Catch simultaneous DELETE request
        return None

    webhooks = _get_project_webhooks(obj.project)

    if instance.type == HistoryType.create:
        task = tasks.create_webhook
        extra_args = []
    elif instance.type == HistoryType.change:
        task = tasks.change_webhook
        extra_args = [instance]
    elif instance.type == HistoryType.delete:
        task = tasks.delete_webhook
        extra_args = []

    by = instance.owner
    date = timezone.now()

    for webhook in webhooks:
        args = [webhook["id"], webhook["url"], webhook["key"], by, date, obj] + extra_args

        if settings.CELERY_ENABLED:
            connection.on_commit(lambda: task.delay(*args))
        else:
            connection.on_commit(lambda: task(*args))
Esempio n. 32
0
File: core.py Progetto: JBEI/edd
def study_update_ice(sender, instance, created, raw, using, **kwargs):
    """
    Checks whether the study has been renamed. If it has, and if the study
    is associated with any ICE strains, updates those ICE entries to label
    links to this study with its new name.
    """
    if check_ice_cannot_proceed():
        # abort when no ICE configured
        return
    if raw:
        # cannot access database when doing raw signal
        return
    if getattr(instance, "_pre_save_name", instance.name) == instance.name:
        # abort if no change detected in name
        return
    eligible = Q(
        line__study_id=instance.pk,
        registry_url__isnull=False,
        registry_id__isnull=False,
    )
    queryset = models.Strain.objects.filter(eligible).distinct()
    to_link = set(queryset.values_list("id", flat=True))
    partial = functools.partial(submit_ice_link, instance.pk, to_link)
    connection.on_commit(partial)
Esempio n. 33
0
    def send_notification_templates(self, status):
        from awx.main.tasks import send_notifications  # avoid circular import
        if status not in ['running', 'succeeded', 'failed']:
            raise ValueError(_("status must be either running, succeeded or failed"))
        try:
            notification_templates = self.get_notification_templates()
        except Exception:
            logger.warn("No notification template defined for emitting notification")
            return

        if not notification_templates:
            return

        for nt in set(notification_templates.get(self.STATUS_TO_TEMPLATE_TYPE[status], [])):
            (msg, body) = self.build_notification_message(nt, status)

            # Use kwargs to force late-binding
            # https://stackoverflow.com/a/3431699/10669572
            def send_it(local_nt=nt, local_msg=msg, local_body=body):
                def _func():
                    send_notifications.delay([local_nt.generate_notification(local_msg, local_body).id],
                                             job_id=self.id)
                return _func
            connection.on_commit(send_it())
Esempio n. 34
0
def enqueue_task(action, instance, **kwargs):
    """
    Common utility for enqueing a task for the given action and
    model instance.
    """
    identifier = get_identifier(instance)
    options = {}
    if settings.CELERY_HAYSTACK_QUEUE:
        options['queue'] = settings.CELERY_HAYSTACK_QUEUE
    if settings.CELERY_HAYSTACK_COUNTDOWN:
        options['countdown'] = settings.CELERY_HAYSTACK_COUNTDOWN

    task = get_update_task()
    task_func = lambda: task.apply_async(
        (action, identifier), kwargs, **options)

    if hasattr(transaction, 'on_commit'):
        # Django 1.9 on_commit hook
        transaction.on_commit(task_func)
    elif hasattr(connection, 'on_commit'):
        # Django-transaction-hooks
        connection.on_commit(task_func)
    else:
        task_func()
Esempio n. 35
0
 def perform_update(self, serializer):
     settings_qs = self.get_queryset()
     user = self.request.user if self.category_slug == 'user' else None
     settings_change_list = []
     for key, value in serializer.validated_data.items():
         if key == 'LICENSE' or settings_registry.is_setting_read_only(key):
             continue
         if settings_registry.is_setting_encrypted(key) and isinstance(
                 value, str) and value.startswith('$encrypted$'):
             continue
         setattr(serializer.instance, key, value)
         setting = settings_qs.filter(key=key).order_by('pk').first()
         if not setting:
             setting = Setting.objects.create(key=key,
                                              user=user,
                                              value=value)
             settings_change_list.append(key)
         elif setting.value != value:
             setting.value = value
             setting.save(update_fields=['value'])
             settings_change_list.append(key)
     if settings_change_list:
         connection.on_commit(
             lambda: handle_setting_changes.delay(settings_change_list))
Esempio n. 36
0
def activity_stream_create(sender, instance, created, **kwargs):
    if created and activity_stream_enabled:
        _type = type(instance)
        if getattr(_type, '_deferred', False):
            return
        object1 = camelcase_to_underscore(instance.__class__.__name__)
        changes = model_to_dict(instance, model_serializer_mapping())
        # Special case where Job survey password variables need to be hidden
        if type(instance) == Job:
            changes['credentials'] = [
                '{} ({})'.format(c.name, c.id)
                for c in instance.credentials.iterator()
            ]
            changes['labels'] = [
                label.name for label in instance.labels.iterator()
            ]
            if 'extra_vars' in changes:
                changes['extra_vars'] = instance.display_extra_vars()
        if type(instance) == OAuth2AccessToken:
            changes['token'] = CENSOR_VALUE
        activity_entry = get_activity_stream_class()(
            operation='create',
            object1=object1,
            changes=json.dumps(changes),
            actor=get_current_user_or_none())
        #TODO: Weird situation where cascade SETNULL doesn't work
        #      it might actually be a good idea to remove all of these FK references since
        #      we don't really use them anyway.
        if instance._meta.model_name != 'setting':  # Is not conf.Setting instance
            activity_entry.save()
            getattr(activity_entry, object1).add(instance.pk)
        else:
            activity_entry.setting = conf_to_dict(instance)
            activity_entry.save()
        connection.on_commit(
            lambda: emit_activity_stream_change(activity_entry))
Esempio n. 37
0
    def post(self):
        dc, group = self.dc, self.role

        if group.dc_set.filter(id=dc.id).exists():
            raise ObjectAlreadyExists(model=Role)

        ser = self.serializer(self.request, group)
        group.dc_set.add(dc)
        res = SuccessTaskResponse(self.request,
                                  ser.data,
                                  obj=group,
                                  status=status.HTTP_201_CREATED,
                                  detail_dict=ser.detail_dict(),
                                  msg=LOG_GROUP_ATTACH)
        task_id = res.data.get('task_id')
        connection.on_commit(lambda: group_relationship_changed.send(
            task_id,
            group_name=group.name,  # Signal!
            dc_name=dc.name))
        self._remove_dc_binding(task_id)
        self._remove_user_dc_binding(task_id)
        self._update_affected_users()

        return res
Esempio n. 38
0
 def spawn_workflow_graph_jobs(self, workflow_jobs):
     for workflow_job in workflow_jobs:
         dag = WorkflowDAG(workflow_job)
         spawn_nodes = dag.bfs_nodes_to_run()
         for spawn_node in spawn_nodes:
             if spawn_node.unified_job_template is None:
                 continue
             kv = spawn_node.get_job_kwargs()
             job = spawn_node.unified_job_template.create_unified_job(**kv)
             spawn_node.job = job
             spawn_node.save()
             if job._resources_sufficient_for_launch():
                 can_start = job.signal_start(**kv)
                 if not can_start:
                     job.job_explanation = _("Job spawned from workflow could not start because it "
                                             "was not in the right state or required manual credentials")
             else:
                 can_start = False
                 job.job_explanation = _("Job spawned from workflow could not start because it "
                                         "was missing a related resource such as project or inventory")
             if not can_start:
                 job.status = 'failed'
                 job.save(update_fields=['status', 'job_explanation'])
                 connection.on_commit(lambda: job.websocket_emit_status('failed'))
Esempio n. 39
0
def create_mapstory_thumbnail_tx_aware(instance, overwrite):
    connection.on_commit(lambda: run_task_story(instance.pk, overwrite))
Esempio n. 40
0
def defer(f, *args, **kwargs):
    with atomic():
        connection.on_commit(lambda: f(*args, **kwargs))
Esempio n. 41
0
 def websocket_emit_status(self, status):
     connection.on_commit(lambda: self._websocket_emit_status(status))
Esempio n. 42
0
    def start_task(self, task, rampart_group, dependent_tasks=None, instance=None):
        self.start_task_limit -= 1
        if self.start_task_limit == 0:
            # schedule another run immediately after this task manager
            schedule_task_manager()
        from awx.main.tasks import handle_work_error, handle_work_success

        dependent_tasks = dependent_tasks or []

        task_actual = {
            'type': get_type_for_model(type(task)),
            'id': task.id,
        }
        dependencies = [{'type': get_type_for_model(type(t)), 'id': t.id} for t in dependent_tasks]

        task.status = 'waiting'

        (start_status, opts) = task.pre_start()
        if not start_status:
            task.status = 'failed'
            if task.job_explanation:
                task.job_explanation += ' '
            task.job_explanation += 'Task failed pre-start check.'
            task.save()
            # TODO: run error handler to fail sub-tasks and send notifications
        else:
            if type(task) is WorkflowJob:
                task.status = 'running'
                task.send_notification_templates('running')
                logger.debug('Transitioning %s to running status.', task.log_format)
                schedule_task_manager()
            elif rampart_group.is_container_group:
                task.instance_group = rampart_group
                if task.capacity_type == 'execution':
                    # find one real, non-containerized instance with capacity to
                    # act as the controller for k8s API interaction
                    try:
                        task.controller_node = Instance.choose_online_control_plane_node()
                        task.log_lifecycle("controller_node_chosen")
                    except IndexError:
                        logger.warning("No control plane nodes available to run containerized job {}".format(task.log_format))
                        return
                else:
                    # project updates and system jobs don't *actually* run in pods, so
                    # just pick *any* non-containerized host and use it as the execution node
                    task.execution_node = Instance.choose_online_control_plane_node()
                    task.log_lifecycle("execution_node_chosen")
                    logger.debug('Submitting containerized {} to queue {}.'.format(task.log_format, task.execution_node))
            else:
                task.instance_group = rampart_group
                task.execution_node = instance.hostname
                task.log_lifecycle("execution_node_chosen")
                if instance.node_type == 'execution':
                    try:
                        task.controller_node = Instance.choose_online_control_plane_node()
                        task.log_lifecycle("controller_node_chosen")
                    except IndexError:
                        logger.warning("No control plane nodes available to manage {}".format(task.log_format))
                        return
                else:
                    # control plane nodes will manage jobs locally for performance and resilience
                    task.controller_node = task.execution_node
                    task.log_lifecycle("controller_node_chosen")
                logger.debug('Submitting job {} to queue {} controlled by {}.'.format(task.log_format, task.execution_node, task.controller_node))
            with disable_activity_stream():
                task.celery_task_id = str(uuid.uuid4())
                task.save()
                task.log_lifecycle("waiting")

            if rampart_group is not None:
                self.consume_capacity(task, rampart_group.name, instance=instance)

        def post_commit():
            if task.status != 'failed' and type(task) is not WorkflowJob:
                # Before task is dispatched, ensure that job_event partitions exist
                create_partition(task.event_class._meta.db_table, start=task.created)
                task_cls = task._get_task_class()
                task_cls.apply_async(
                    [task.pk],
                    opts,
                    queue=task.get_queue_name(),
                    uuid=task.celery_task_id,
                    callbacks=[{'task': handle_work_success.name, 'kwargs': {'task_actual': task_actual}}],
                    errbacks=[{'task': handle_work_error.name, 'args': [task.celery_task_id], 'kwargs': {'subtasks': [task_actual] + dependencies}}],
                )

        task.websocket_emit_status(task.status)  # adds to on_commit
        connection.on_commit(post_commit)
Esempio n. 43
0
def activity_stream_associate(sender, instance, **kwargs):
    if not activity_stream_enabled:
        return
    if kwargs['action'] in ['pre_add', 'pre_remove']:
        if kwargs['action'] == 'pre_add':
            action = 'associate'
        elif kwargs['action'] == 'pre_remove':
            action = 'disassociate'
        else:
            return
        obj1 = instance
        _type = type(instance)
        if getattr(_type, '_deferred', False):
            return
        object1 = camelcase_to_underscore(obj1.__class__.__name__)
        obj_rel = sender.__module__ + "." + sender.__name__

        for entity_acted in kwargs['pk_set']:
            obj2 = kwargs['model']
            obj2_id = entity_acted
            obj2_actual = obj2.objects.filter(id=obj2_id)
            if not obj2_actual.exists():
                continue
            obj2_actual = obj2_actual[0]
            _type = type(obj2_actual)
            if getattr(_type, '_deferred', False):
                return
            if isinstance(obj2_actual,
                          Role) and obj2_actual.content_object is not None:
                obj2_actual = obj2_actual.content_object
                object2 = camelcase_to_underscore(
                    obj2_actual.__class__.__name__)
            else:
                object2 = camelcase_to_underscore(obj2.__name__)
            # Skip recording any inventory source, or system job template changes here.
            if isinstance(obj1, InventorySource) or isinstance(
                    obj2_actual, InventorySource):
                continue
            if isinstance(obj1, SystemJobTemplate) or isinstance(
                    obj2_actual, SystemJobTemplate):
                continue
            if isinstance(obj1, SystemJob) or isinstance(
                    obj2_actual, SystemJob):
                continue
            activity_entry = get_activity_stream_class()(
                changes=json.dumps(
                    dict(object1=object1,
                         object1_pk=obj1.pk,
                         object2=object2,
                         object2_pk=obj2_id,
                         action=action,
                         relationship=obj_rel)),
                operation=action,
                object1=object1,
                object2=object2,
                object_relationship_type=obj_rel,
                actor=get_current_user_or_none())
            activity_entry.save()
            getattr(activity_entry, object1).add(obj1.pk)
            getattr(activity_entry, object2).add(obj2_actual.pk)

            # Record the role for RBAC changes
            if 'role' in kwargs:
                role = kwargs['role']
                if role.content_object is not None:
                    obj_rel = '.'.join([
                        role.content_object.__module__,
                        role.content_object.__class__.__name__, role.role_field
                    ])

                # If the m2m is from the User side we need to
                # set the content_object of the Role for our entry.
                if type(instance) == User and role.content_object is not None:
                    getattr(activity_entry,
                            role.content_type.name.replace(' ', '_')).add(
                                role.content_object)

                activity_entry.role.add(role)
                activity_entry.object_relationship_type = obj_rel
                activity_entry.save()
            connection.on_commit(
                lambda: emit_activity_stream_change(activity_entry))
Esempio n. 44
0
def schedule_policy_task():
    from awx.main.tasks import apply_cluster_membership_policies
    connection.on_commit(
        lambda: apply_cluster_membership_policies.apply_async())
Esempio n. 45
0
    def group_modify(self, update=False):
        group = self.group
        request = self.request

        if update:
            # We are deleting users that are not assigned to group any more, so we have to store all of them before
            # deleting because we have to update task log for user so he can see he was removed from group
            original_group_users = set(
                group.user_set.select_related('dc_bound', 'default_dc').all())
        else:
            group.alias = group.name  # just a default
            original_group_users = set()

        ser = self.serializer(request, group, data=self.data, partial=update)

        if not ser.is_valid():
            return FailureTaskResponse(request,
                                       ser.errors,
                                       obj=group,
                                       dc_bound=False)

        ser.save()
        if update:
            msg = LOG_GROUP_UPDATE
            status = HTTP_200_OK
        else:
            msg = LOG_GROUP_CREATE
            status = HTTP_201_CREATED

        connection.on_commit(lambda: group_relationship_changed.send(
            group_name=ser.object.name))
        res = SuccessTaskResponse(request,
                                  ser.data,
                                  status=status,
                                  obj=group,
                                  msg=msg,
                                  detail_dict=ser.detail_dict(),
                                  dc_bound=False)

        # let's get the task_id so we use the same one for each log message
        task_id = res.data.get('task_id')
        removed_users = None

        if group.dc_bound and not update:
            attach_dc_virt_object(res.data.get('task_id'),
                                  LOG_GROUP_ATTACH,
                                  group,
                                  group.dc_bound,
                                  user=request.user)

        if ser.object._users_to_save is not None:
            # Update Users log that are attached to group
            current_users = set(ser.object._users_to_save)
            added_users = current_users - original_group_users
            removed_users = original_group_users - current_users
            affected_users = current_users.symmetric_difference(
                original_group_users)

            # Remove user.dc_bound flag for newly added users if group is attached to multiple DCs or
            #                                                          to one DC that is different from user.dc_bound
            if added_users:
                group_dcs_count = group.dc_set.count()

                if group_dcs_count >= 1:
                    if group_dcs_count == 1:
                        dc = group.dc_set.get()
                    else:
                        dc = None

                    for user in added_users:
                        remove_user_dc_binding(task_id, user, dc=dc)

            # Update Users that were removed from group or added to group
            for user in affected_users:
                detail = "groups='%s'" % ','.join(user.roles.all().values_list(
                    'name', flat=True))
                task_log_success(task_id,
                                 LOG_USER_UPDATE,
                                 obj=user,
                                 owner=user,
                                 update_user_tasks=False,
                                 detail=detail)

        # Permission or users for this group were changed, which may affect the cached list of DC admins for DCs which
        # are attached to this group. So we need to clear the list of admins cached for each affected DC
        if ser.object._permissions_to_save is not None or ser.object._users_to_save is not None:
            for dc in group.dc_set.all():
                User.clear_dc_admin_ids(dc)

            # Users were removed from this group and may loose access to DCs which are attached to this group
            # So we better set all users current_dc to default_dc
            if removed_users:
                for user in removed_users:
                    if not user.is_staff:
                        user.reset_current_dc()

        return res
Esempio n. 46
0
def trigger_delayed_deep_copy(*args, **kwargs):
    from awx.main.tasks import deep_copy_model_obj
    connection.on_commit(lambda: deep_copy_model_obj.delay(*args, **kwargs))
Esempio n. 47
0
 def enqueue_save(self, sender, instance, created, **kwargs):
     def inner():
         if created:
             return self.enqueue('create', instance, sender, **kwargs)
         return self.enqueue('save', instance, sender, **kwargs)
     return connection.on_commit(inner)
Esempio n. 48
0
    def start_task(self, task, rampart_group, dependent_tasks=None, instance=None):
        from awx.main.tasks import handle_work_error, handle_work_success

        dependent_tasks = dependent_tasks or []

        task_actual = {
            'type': get_type_for_model(type(task)),
            'id': task.id,
        }
        dependencies = [{'type': get_type_for_model(type(t)), 'id': t.id} for t in dependent_tasks]

        controller_node = None
        if task.supports_isolation() and rampart_group.controller_id:
            try:
                controller_node = rampart_group.choose_online_controller_node()
            except IndexError:
                logger.debug("No controllers available in group {} to run {}".format(
                             rampart_group.name, task.log_format))
                return

        task.status = 'waiting'

        (start_status, opts) = task.pre_start()
        if not start_status:
            task.status = 'failed'
            if task.job_explanation:
                task.job_explanation += ' '
            task.job_explanation += 'Task failed pre-start check.'
            task.save()
            # TODO: run error handler to fail sub-tasks and send notifications
        else:
            if type(task) is WorkflowJob:
                task.status = 'running'
                task.send_notification_templates('running')
                logger.debug('Transitioning %s to running status.', task.log_format)
                schedule_task_manager()
            elif not task.supports_isolation() and rampart_group.controller_id:
                # non-Ansible jobs on isolated instances run on controller
                task.instance_group = rampart_group.controller
                task.execution_node = random.choice(list(rampart_group.controller.instances.all().values_list('hostname', flat=True)))
                logger.debug('Submitting isolated {} to queue {} on node {}.'.format(
                             task.log_format, task.instance_group.name, task.execution_node))
            elif controller_node:
                task.instance_group = rampart_group
                task.execution_node = instance.hostname
                task.controller_node = controller_node
                logger.debug('Submitting isolated {} to queue {} controlled by {}.'.format(
                             task.log_format, task.execution_node, controller_node))
            elif rampart_group.is_containerized:
                # find one real, non-containerized instance with capacity to
                # act as the controller for k8s API interaction
                match = None
                for group in InstanceGroup.objects.all():
                    if group.is_containerized or group.controller_id:
                        continue
                    match = group.fit_task_to_most_remaining_capacity_instance(task)
                    if match:
                        break
                task.instance_group = rampart_group
                if match is None:
                    logger.warn(
                        'No available capacity to run containerized <{}>.'.format(task.log_format)
                    )
                else:
                    if task.supports_isolation():
                        task.controller_node = match.hostname
                    else:
                        # project updates and inventory updates don't *actually* run in pods,
                        # so just pick *any* non-isolated, non-containerized host and use it
                        # as the execution node
                        task.execution_node = match.hostname
                        logger.debug('Submitting containerized {} to queue {}.'.format(
                                     task.log_format, task.execution_node))
            else:
                task.instance_group = rampart_group
                if instance is not None:
                    task.execution_node = instance.hostname
                logger.debug('Submitting {} to <instance group, instance> <{},{}>.'.format(
                             task.log_format, task.instance_group_id, task.execution_node))
            with disable_activity_stream():
                task.celery_task_id = str(uuid.uuid4())
                task.save()

            if rampart_group is not None:
                self.consume_capacity(task, rampart_group.name)

        def post_commit():
            if task.status != 'failed' and type(task) is not WorkflowJob:
                task_cls = task._get_task_class()
                task_cls.apply_async(
                    [task.pk],
                    opts,
                    queue=task.get_queue_name(),
                    uuid=task.celery_task_id,
                    callbacks=[{
                        'task': handle_work_success.name,
                        'kwargs': {'task_actual': task_actual}
                    }],
                    errbacks=[{
                        'task': handle_work_error.name,
                        'args': [task.celery_task_id],
                        'kwargs': {'subtasks': [task_actual] + dependencies}
                    }],
                )

        task.websocket_emit_status(task.status)  # adds to on_commit
        connection.on_commit(post_commit)
Esempio n. 49
0
    def start_task(self, task, rampart_group, dependent_tasks=None, instance=None):
        from awx.main.tasks import handle_work_error, handle_work_success

        dependent_tasks = dependent_tasks or []

        task_actual = {
            'type': get_type_for_model(type(task)),
            'id': task.id,
        }
        dependencies = [{'type': get_type_for_model(type(t)), 'id': t.id} for t in dependent_tasks]

        controller_node = None
        if task.supports_isolation() and rampart_group.controller_id:
            try:
                controller_node = rampart_group.choose_online_controller_node()
            except IndexError:
                logger.debug(six.text_type("No controllers available in group {} to run {}").format(
                             rampart_group.name, task.log_format))
                return

        task.status = 'waiting'

        (start_status, opts) = task.pre_start()
        if not start_status:
            task.status = 'failed'
            if task.job_explanation:
                task.job_explanation += ' '
            task.job_explanation += 'Task failed pre-start check.'
            task.save()
            # TODO: run error handler to fail sub-tasks and send notifications
        else:
            if type(task) is WorkflowJob:
                task.status = 'running'
                logger.info('Transitioning %s to running status.', task.log_format)
            elif not task.supports_isolation() and rampart_group.controller_id:
                # non-Ansible jobs on isolated instances run on controller
                task.instance_group = rampart_group.controller
                task.execution_node = random.choice(list(rampart_group.controller.instances.all().values_list('hostname', flat=True)))
                logger.info(six.text_type('Submitting isolated {} to queue {}.').format(
                            task.log_format, task.instance_group.name, task.execution_node))
            elif controller_node:
                task.instance_group = rampart_group
                task.execution_node = instance.hostname
                task.controller_node = controller_node
                logger.info(six.text_type('Submitting isolated {} to queue {} controlled by {}.').format(
                            task.log_format, task.execution_node, controller_node))
            else:
                task.instance_group = rampart_group
                if instance is not None:
                    task.execution_node = instance.hostname
                logger.info(six.text_type('Submitting {} to <instance group, instance> <{},{}>.').format(
                            task.log_format, task.instance_group_id, task.execution_node))
            with disable_activity_stream():
                task.celery_task_id = str(uuid.uuid4())
                task.save()

            if rampart_group is not None:
                self.consume_capacity(task, rampart_group.name)

        def post_commit():
            task.websocket_emit_status(task.status)
            if task.status != 'failed' and type(task) is not WorkflowJob:
                task_cls = task._get_task_class()
                task_cls.apply_async(
                    [task.pk],
                    opts,
                    queue=task.get_queue_name(),
                    uuid=task.celery_task_id,
                    callbacks=[{
                        'task': handle_work_success.name,
                        'kwargs': {'task_actual': task_actual}
                    }],
                    errbacks=[{
                        'task': handle_work_error.name,
                        'args': [task.celery_task_id],
                        'kwargs': {'subtasks': [task_actual] + dependencies}
                    }],
                )

        connection.on_commit(post_commit)
Esempio n. 50
0
File: ha.py Progetto: wikivoks/awx
def on_instance_deleted(sender, instance, using, **kwargs):
    from awx.main.tasks import apply_cluster_membership_policies
    connection.on_commit(
        lambda: apply_cluster_membership_policies.apply_async())
Esempio n. 51
0
File: ha.py Progetto: wikivoks/awx
def on_instance_saved(sender, instance, created=False, raw=False, **kwargs):
    if created:
        from awx.main.tasks import apply_cluster_membership_policies
        connection.on_commit(
            lambda: apply_cluster_membership_policies.apply_async())
Esempio n. 52
0
def _schedule_task_manager():
    from awx.main.scheduler.tasks import run_task_manager
    from django.db import connection
    # runs right away if not in transaction
    connection.on_commit(lambda: run_task_manager.delay())
Esempio n. 53
0
 def post_delete(self, instance, **kwargs):
     kwargs = {key:getattr(instance, key) for key in self.keys}
     key = self.make_key(**kwargs)
     connection.on_commit(lambda: self.cache.set(key, self.DNE, timeout=self.timeout))
 def do(self, num):
     """Create a Thing instance and notify about it."""
     Thing.objects.create(num=num)
     connection.on_commit(lambda: self.notify(num))
Esempio n. 55
0
def create_mapstory_thumbnail_tx_aware(instance, overwrite):
    connection.on_commit(lambda: run_task_story(instance.pk, overwrite))
Esempio n. 56
0
def index_type(sender, measurement_type, using, **kwargs):
    # only submit for indexing when the database key has a matching solr key
    if using in settings.EDD_MAIN_SOLR:
        # schedule the work for after the commit (or immediately if there's no transaction)
        connection.on_commit(
            functools.partial(index_update, type_index, [measurement_type]))
Esempio n. 57
0
def remove_user(sender, doc, using, **kwargs):
    # only submit for removal when the database key has a matching solr key
    if using in settings.EDD_MAIN_SOLR:
        # schedule the work for after the commit (or immediately if there's no transaction)
        connection.on_commit(
            functools.partial(index_remove, users_index, [doc]))
Esempio n. 58
0
    def _update_from_event_data(self):
        # Update event model fields from event data.
        event_data = self.event_data
        res = event_data.get('res', None)
        if self.event in self.FAILED_EVENTS and not event_data.get(
                'ignore_errors', False):
            self.failed = True
        if isinstance(res, dict):
            if res.get('changed', False):
                self.changed = True
        if self.event == 'playbook_on_stats':
            try:
                failures_dict = event_data.get('failures', {})
                dark_dict = event_data.get('dark', {})
                self.failed = bool(
                    sum(failures_dict.values()) + sum(dark_dict.values()))
                changed_dict = event_data.get('changed', {})
                self.changed = bool(sum(changed_dict.values()))
            except (AttributeError, TypeError):
                pass

            if isinstance(self, JobEvent):
                try:
                    job = self.job
                except ObjectDoesNotExist:
                    job = None
                if job:
                    hostnames = self._hostnames()
                    self._update_host_summary_from_stats(set(hostnames))
                    if job.inventory:
                        try:
                            job.inventory.update_computed_fields()
                        except DatabaseError:
                            logger.exception(
                                'Computed fields database error saving event {}'
                                .format(self.pk))

                    # find parent links and progagate changed=T and failed=T
                    changed = (
                        job.get_event_queryset().filter(changed=True).exclude(
                            parent_uuid=None).only('parent_uuid').values_list(
                                'parent_uuid', flat=True).distinct())  # noqa
                    failed = (
                        job.get_event_queryset().filter(failed=True).exclude(
                            parent_uuid=None).only('parent_uuid').values_list(
                                'parent_uuid', flat=True).distinct())  # noqa

                    job.get_event_queryset().filter(uuid__in=changed).update(
                        changed=True)
                    job.get_event_queryset().filter(uuid__in=failed).update(
                        failed=True)

                    # send success/failure notifications when we've finished handling the playbook_on_stats event
                    from awx.main.tasks.system import handle_success_and_failure_notifications  # circular import

                    def _send_notifications():
                        handle_success_and_failure_notifications.apply_async(
                            [job.id])

                    connection.on_commit(_send_notifications)

        for field in ('playbook', 'play', 'task', 'role'):
            value = force_text(event_data.get(field, '')).strip()
            if value != getattr(self, field):
                setattr(self, field, value)
        if settings.LOG_AGGREGATOR_ENABLED:
            analytics_logger.info(
                'Event data saved.',
                extra=dict(python_objects=dict(job_event=self)))
Esempio n. 59
0
 def delete(self, instance):
     kwargs = {key:getattr(instance, key) for key in self.keys}
     key = self.make_key(**kwargs)
     connection.on_commit(lambda: self.cache.delete(key))