Пример #1
0
    def delete(self, request: HttpRequest, workflow_id: int):
        try:
            with Workflow.authorized_lookup_and_cooperative_lock(
                    "owner", request.user, request.session,
                    pk=workflow_id) as workflow:
                workflow.delete()

                if workflow.owner_id:
                    # We lock after delete, but it's still correct. DB commits
                    # are atomic: nothing is written yet.
                    lock_user_by_id(workflow.owner_id, for_write=True)
                    user_update = clientside.UserUpdate(
                        usage=query_user_usage(workflow.owner_id))
        except Workflow.DoesNotExist as err:
            if err.args[0] == "owner access denied":
                return JsonResponse(
                    {
                        "message": str(err),
                        "status_code": 403
                    },
                    status=status.FORBIDDEN,
                )
            else:
                return JsonResponse(
                    {
                        "message": "Workflow not found",
                        "status_code": 404
                    },
                    status=status.NOT_FOUND,
                )

        if workflow.owner_id:
            async_to_sync(rabbitmq.send_user_update_to_user_clients)(
                workflow.owner_id, user_update)
        return HttpResponse(status=status.NO_CONTENT)
Пример #2
0
 def test_sum_fetches(self):
     user = create_user()
     Workflow.create_and_init(owner=user, fetches_per_day=1)
     Workflow.create_and_init(owner=user, fetches_per_day=0)
     Workflow.create_and_init(owner=user, fetches_per_day=2.12)
     usage = query_user_usage(user.id)
     self.assertEqual(usage, UserUsage(fetches_per_day=3.12))
Пример #3
0
def _do_try_set_autofetch(
    scope,
    workflow: Workflow,
    step_slug: str,
    auto_update_data: bool,
    update_interval: int,
) -> Dict[str, Any]:
    with lock_workflow_for_role(workflow, scope, role="owner"):
        step = _load_step_by_slug_sync(workflow, step_slug)  # or raise HandlerError

        check_quota = (
            auto_update_data
            and step.auto_update_data
            and update_interval < step.update_interval
        ) or (auto_update_data and not step.auto_update_data)

        step.auto_update_data = auto_update_data
        step.update_interval = update_interval
        if auto_update_data:
            step.next_update = datetime.datetime.now() + datetime.timedelta(
                seconds=update_interval
            )
        else:
            step.next_update = None
        step.save(update_fields=["auto_update_data", "update_interval", "next_update"])

        workflow.recalculate_fetches_per_day()
        workflow.save(update_fields=["fetches_per_day"])

        # Locking after write is fine, because we haven't called COMMIT
        # yet so nobody else can tell the difference.
        lock_user_by_id(
            workflow.owner_id, for_write=True
        )  # we're overwriting user's (calculated) fetches_per_day
        usage = query_user_usage(workflow.owner_id)

        # Now before we commit, let's see if we've surpassed the user's limit;
        # roll back if we have.
        #
        # Only rollback if we're _increasing_ our fetch count. If we're
        # lowering it, allow that -- even if the user is over limit, we still
        # want to commit because it's an improvement.
        if check_quota:
            limit = workflow.owner.user_profile.effective_limits.max_fetches_per_day
            if usage.fetches_per_day > limit:
                raise AutofetchQuotaExceeded

    return step, usage
Пример #4
0
 def test_no_fetches(self):
     user = create_user()
     Workflow.create_and_init(fetches_per_day=0)
     usage = query_user_usage(user.id)
     self.assertEqual(usage, UserUsage(fetches_per_day=0))
Пример #5
0
 def test_no_workflows(self):
     user = create_user()
     usage = query_user_usage(user.id)
     self.assertEqual(usage, UserUsage(fetches_per_day=0))
Пример #6
0
def _first_forward_and_save_returning_clientside_updates(
    cls, workflow_id: int, **kwargs
) -> Tuple[Optional[Delta], Optional[clientside.Update],
           Optional[PendingOwnerUpdate], Optional[int], ]:
    """
    Create and execute `cls` command; return `(Delta, WebSocket data, render?)`.

    If `amend_create_kwargs()` returns `None`, return `(None, None)` here.

    All this, in a cooperative lock.

    Return `(None, None, None)` if `cls.amend_create_kwargs()` returns `None`.
    This is how `cls.amend_create_kwargs()` suggests the Delta should not be
    created at all.
    """
    now = datetime.datetime.now()
    command = NAME_TO_COMMAND[cls.__name__]
    try:
        # raise Workflow.DoesNotExist
        with Workflow.lookup_and_cooperative_lock(id=workflow_id) as workflow:
            create_kwargs = command.amend_create_kwargs(workflow=workflow,
                                                        **kwargs)
            if not create_kwargs:
                return None, None, None, None

            # Lookup unapplied deltas to delete. That's the linked list that comes
            # _after_ `workflow.last_delta_id`.
            n_deltas_deleted, _ = workflow.deltas.filter(
                id__gt=workflow.last_delta_id).delete()

            # prev_delta is none when we're at the start of the undo stack
            prev_delta = workflow.deltas.filter(
                id=workflow.last_delta_id).first()

            # Delta.objects.create() and command.forward() may raise unexpected errors
            # Defer delete_orphan_soft_deleted_models(), to reduce the risk of this
            # race: 1. Delete DB objects; 2. Delete S3 files; 3. ROLLBACK. (We aren't
            # avoiding the race _entirely_ here, but we're at least avoiding causing
            # the race through errors in Delta or Command.)
            delta = Delta.objects.create(
                command_name=cls.__name__,
                prev_delta=prev_delta,
                last_applied_at=now,
                **create_kwargs,
            )
            command.forward(delta)

            # Point workflow to us
            workflow.last_delta_id = delta.id
            workflow.updated_at = datetime.datetime.now()
            workflow.save(update_fields=["last_delta_id", "updated_at"])

            if n_deltas_deleted:
                # We just deleted deltas; now we can garbage-collect Tabs and
                # Steps that are soft-deleted and have no deltas referring
                # to them.
                workflow.delete_orphan_soft_deleted_models()

            if cls.modifies_owner_usage and workflow.owner_id:
                # We lock after running the command, but it's still correct. DB
                # commits are atomic: nothing is written yet.
                lock_user_by_id(workflow.owner_id, for_write=True)
                pending_owner_update = PendingOwnerUpdate(
                    user_id=workflow.owner_id,
                    user_update=clientside.UserUpdate(
                        usage=query_user_usage(workflow.owner_id)),
                )
            else:
                pending_owner_update = None

            return (
                delta,
                command.load_clientside_update(delta),
                pending_owner_update,
                delta.id
                if command.get_modifies_render_output(delta) else None,
            )
    except Workflow.DoesNotExist:
        return None, None, None, None