Пример #1
0
    def delete(self, request: HttpRequest, workflow_id: int):
        try:
            with Workflow.authorized_lookup_and_cooperative_lock(
                    "owner", request.user, request.session,
                    pk=workflow_id) as workflow:
                workflow.delete()

                if workflow.owner_id:
                    # We lock after delete, but it's still correct. DB commits
                    # are atomic: nothing is written yet.
                    lock_user_by_id(workflow.owner_id, for_write=True)
                    user_update = clientside.UserUpdate(
                        usage=query_user_usage(workflow.owner_id))
        except Workflow.DoesNotExist as err:
            if err.args[0] == "owner access denied":
                return JsonResponse(
                    {
                        "message": str(err),
                        "status_code": 403
                    },
                    status=status.FORBIDDEN,
                )
            else:
                return JsonResponse(
                    {
                        "message": "Workflow not found",
                        "status_code": 404
                    },
                    status=status.NOT_FOUND,
                )

        if workflow.owner_id:
            async_to_sync(rabbitmq.send_user_update_to_user_clients)(
                workflow.owner_id, user_update)
        return HttpResponse(status=status.NO_CONTENT)
Пример #2
0
    def test_workflow_delete_send_user_update(self, send_update):
        send_update.side_effect = async_noop

        to_delete = Workflow.create_and_init(owner_id=self.user.id, fetches_per_day=2)
        Workflow.create_and_init(owner_id=self.user.id, fetches_per_day=3)
        self.client.force_login(self.user)
        response = self.client.delete("/api/workflows/%d/" % to_delete.id)
        self.assertEqual(response.status_code, status.NO_CONTENT)

        send_update.assert_called_with(
            self.user.id, clientside.UserUpdate(usage=UserUsage(fetches_per_day=3))
        )
Пример #3
0
    def test_try_set_autofetch_disable_autofetch(self, update_user,
                                                 update_workflow):
        update_user.side_effect = async_noop
        update_workflow.side_effect = async_noop

        user = User.objects.create(username="******", email="*****@*****.**")
        UserProfile.objects.create(user=user)
        workflow = Workflow.create_and_init(owner=user, fetches_per_day=72.0)
        step = workflow.tabs.first().steps.create(
            order=0,
            slug="step-1",
            auto_update_data=True,
            update_interval=1200,
            next_update=datetime.datetime.now(),
        )

        response = self.run_handler(
            try_set_autofetch,
            user=user,
            workflow=workflow,
            stepSlug="step-1",
            isAutofetch=False,
            fetchInterval=300,
        )
        self.assertResponse(response, data=None)
        step.refresh_from_db()
        self.assertEqual(step.auto_update_data, False)
        self.assertEqual(step.update_interval, 300)
        self.assertIsNone(step.next_update)
        workflow.refresh_from_db()
        self.assertEqual(workflow.fetches_per_day, 0.0)

        update_workflow.assert_called_with(
            workflow.id,
            clientside.Update(
                workflow=clientside.WorkflowUpdate(fetches_per_day=0.0),
                steps={
                    step.id:
                    clientside.StepUpdate(is_auto_fetch=False,
                                          fetch_interval=300)
                },
            ),
        )
        update_user.assert_called_with(
            user.id,
            clientside.UserUpdate(usage=UserUsage(fetches_per_day=0.0)))
Пример #4
0
    def test_try_set_autofetch_happy_path(self, update_user, update_workflow):
        update_user.side_effect = async_noop
        update_workflow.side_effect = async_noop

        user = User.objects.create(username="******", email="*****@*****.**")
        UserProfile.objects.create(user=user)
        workflow = Workflow.create_and_init(owner=user)
        step = workflow.tabs.first().steps.create(order=0, slug="step-1")

        response = self.run_handler(
            try_set_autofetch,
            user=user,
            workflow=workflow,
            stepSlug="step-1",
            isAutofetch=True,
            fetchInterval=19200,
        )
        self.assertResponse(response, data=None)
        step.refresh_from_db()
        self.assertEqual(step.auto_update_data, True)
        self.assertEqual(step.update_interval, 19200)
        self.assertLess(
            step.next_update,
            datetime.datetime.now() + datetime.timedelta(seconds=19202),
        )
        self.assertGreater(
            step.next_update,
            datetime.datetime.now() + datetime.timedelta(seconds=19198),
        )
        workflow.refresh_from_db()
        self.assertEqual(workflow.fetches_per_day, 4.5)

        update_user.assert_called_with(
            user.id,
            clientside.UserUpdate(usage=UserUsage(fetches_per_day=4.5)))
        update_workflow.assert_called_with(
            workflow.id,
            clientside.Update(
                workflow=clientside.WorkflowUpdate(fetches_per_day=4.5),
                steps={
                    step.id:
                    clientside.StepUpdate(is_auto_fetch=True,
                                          fetch_interval=19200)
                },
            ),
        )
Пример #5
0
    def test_update_workflow_fetches_per_day(self, send_user_update):
        send_user_update.side_effect = async_noop

        user = User.objects.create(email="*****@*****.**")
        workflow = Workflow.create_and_init(owner_id=user.id,
                                            fetches_per_day=3.0)
        tab = workflow.tabs.first()
        tab.steps.create(
            slug="step-1",
            order=0,
            auto_update_data=True,
            update_interval=86400,
            next_update=datetime.datetime.now(),
        )
        step2 = tab.steps.create(
            slug="step-2",
            order=1,
            auto_update_data=True,
            update_interval=43200,
            next_update=datetime.datetime.now(),
        )

        self.run_with_async_db(
            commands.do(DeleteStep, workflow_id=workflow.id, step=step2))
        workflow.refresh_from_db()
        self.assertEqual(workflow.fetches_per_day, 1.0)

        send_user_update.assert_called_with(
            user.id, clientside.UserUpdate(usage=UserUsage(1.0)))
        send_user_update.reset_mock()

        # Undo doesn't increase usage (user might not expect it to)
        self.run_with_async_db(commands.undo(workflow.id))
        send_user_update.assert_not_called()
        workflow.refresh_from_db()
        self.assertEqual(workflow.fetches_per_day, 1.0)
        step2.refresh_from_db()
        self.assertEqual(step2.auto_update_data, False)
Пример #6
0
 async def test_two_clients_get_messages_on_same_user(self, communicate):
     comm1 = communicate(self.application, f"/workflows/{self.workflow.id}")
     workflow2 = await database_sync_to_async(Workflow.create_and_init
                                              )(owner=self.user)
     comm2 = communicate(self.application, f"/workflows/{workflow2.id}")
     connected1, _ = await comm1.connect()
     self.assertTrue(connected1)
     await comm1.receive_from()  # ignore initial workflow delta
     connected2, _ = await comm2.connect()
     self.assertTrue(connected2)
     await comm2.receive_from()  # ignore initial workflow delta
     async with self.global_rabbitmq_connection():
         await send_user_update_to_user_clients(
             self.user.id, clientside.UserUpdate(display_name="George"))
     response1 = await comm1.receive_from()
     self.assertEqual(
         json.loads(response1),
         {
             "type": "apply-delta",
             "data": {
                 "updateUser": {
                     "display_name": "George"
                 }
             }
         },
     )
     response2 = await comm2.receive_from()
     self.assertEqual(
         json.loads(response2),
         {
             "type": "apply-delta",
             "data": {
                 "updateUser": {
                     "display_name": "George"
                 }
             }
         },
     )
Пример #7
0
async def try_set_autofetch(
    workflow: Workflow,
    stepSlug: str,
    isAutofetch: bool,
    fetchInterval: int,
    scope,
    **kwargs,
):
    """Set step's autofetch settings, or not; respond with temporary data.

    Client-side, the amalgam of races looks like:

        1. Submit form with these `try_set_autofetch()` parameters.
        2. Server sends three pieces of data in parallel:
            a. Update the client state's step
            b. Update the client state's user usage
            c. Respond "ok"
        3. Client waits for all three messages, and shows "busy" until then.
        4. Client resets the form (because the state holds correct data now).

    Unfortunately, our client/server mechanism doesn't have a way to wait for
    _both_ 2a and 2b. (We have a "mutation" mechanism, but it can only wait
    for 2a, not both 2a and 2b.) [2021-06-17] this problem occurs nowhere else
    in our codebase, so we aren't inspired to build a big solution.

    Our hack: we assume that in practice, the client will usually receive
    2a+2b+2c nearly at the same time (since RabbitMQ is fast and the Internet
    is slow). So the client (3) waits for 2c and then waits a fixed duration;
    then (4) assumes 2a and 2b have arrived and resets the form.
    """
    step_slug = str(stepSlug)
    auto_update_data = bool(isAutofetch)
    try:
        update_interval = max(settings.MIN_AUTOFETCH_INTERVAL, int(fetchInterval))
    except (ValueError, TypeError):
        return HandlerError("BadRequest: fetchInterval must be an integer")

    try:
        step, usage = await _do_try_set_autofetch(
            scope, workflow, step_slug, auto_update_data, update_interval
        )  # updates workflow, step
    except AutofetchQuotaExceeded:
        raise HandlerError("AutofetchQuotaExceeded")

    await rabbitmq.send_user_update_to_user_clients(
        workflow.owner_id, clientside.UserUpdate(usage=usage)
    )
    await rabbitmq.send_update_to_workflow_clients(
        workflow.id,
        clientside.Update(
            workflow=clientside.WorkflowUpdate(
                fetches_per_day=workflow.fetches_per_day
            ),
            steps={
                step.id: clientside.StepUpdate(
                    is_auto_fetch=step.auto_update_data,
                    fetch_interval=step.update_interval,
                )
            },
        ),
    )
Пример #8
0
def _first_forward_and_save_returning_clientside_updates(
    cls, workflow_id: int, **kwargs
) -> Tuple[Optional[Delta], Optional[clientside.Update],
           Optional[PendingOwnerUpdate], Optional[int], ]:
    """
    Create and execute `cls` command; return `(Delta, WebSocket data, render?)`.

    If `amend_create_kwargs()` returns `None`, return `(None, None)` here.

    All this, in a cooperative lock.

    Return `(None, None, None)` if `cls.amend_create_kwargs()` returns `None`.
    This is how `cls.amend_create_kwargs()` suggests the Delta should not be
    created at all.
    """
    now = datetime.datetime.now()
    command = NAME_TO_COMMAND[cls.__name__]
    try:
        # raise Workflow.DoesNotExist
        with Workflow.lookup_and_cooperative_lock(id=workflow_id) as workflow:
            create_kwargs = command.amend_create_kwargs(workflow=workflow,
                                                        **kwargs)
            if not create_kwargs:
                return None, None, None, None

            # Lookup unapplied deltas to delete. That's the linked list that comes
            # _after_ `workflow.last_delta_id`.
            n_deltas_deleted, _ = workflow.deltas.filter(
                id__gt=workflow.last_delta_id).delete()

            # prev_delta is none when we're at the start of the undo stack
            prev_delta = workflow.deltas.filter(
                id=workflow.last_delta_id).first()

            # Delta.objects.create() and command.forward() may raise unexpected errors
            # Defer delete_orphan_soft_deleted_models(), to reduce the risk of this
            # race: 1. Delete DB objects; 2. Delete S3 files; 3. ROLLBACK. (We aren't
            # avoiding the race _entirely_ here, but we're at least avoiding causing
            # the race through errors in Delta or Command.)
            delta = Delta.objects.create(
                command_name=cls.__name__,
                prev_delta=prev_delta,
                last_applied_at=now,
                **create_kwargs,
            )
            command.forward(delta)

            # Point workflow to us
            workflow.last_delta_id = delta.id
            workflow.updated_at = datetime.datetime.now()
            workflow.save(update_fields=["last_delta_id", "updated_at"])

            if n_deltas_deleted:
                # We just deleted deltas; now we can garbage-collect Tabs and
                # Steps that are soft-deleted and have no deltas referring
                # to them.
                workflow.delete_orphan_soft_deleted_models()

            if cls.modifies_owner_usage and workflow.owner_id:
                # We lock after running the command, but it's still correct. DB
                # commits are atomic: nothing is written yet.
                lock_user_by_id(workflow.owner_id, for_write=True)
                pending_owner_update = PendingOwnerUpdate(
                    user_id=workflow.owner_id,
                    user_update=clientside.UserUpdate(
                        usage=query_user_usage(workflow.owner_id)),
                )
            else:
                pending_owner_update = None

            return (
                delta,
                command.load_clientside_update(delta),
                pending_owner_update,
                delta.id
                if command.get_modifies_render_output(delta) else None,
            )
    except Workflow.DoesNotExist:
        return None, None, None, None