def delete(self, request: HttpRequest, workflow_id: int): try: with Workflow.authorized_lookup_and_cooperative_lock( "owner", request.user, request.session, pk=workflow_id) as workflow: workflow.delete() if workflow.owner_id: # We lock after delete, but it's still correct. DB commits # are atomic: nothing is written yet. lock_user_by_id(workflow.owner_id, for_write=True) user_update = clientside.UserUpdate( usage=query_user_usage(workflow.owner_id)) except Workflow.DoesNotExist as err: if err.args[0] == "owner access denied": return JsonResponse( { "message": str(err), "status_code": 403 }, status=status.FORBIDDEN, ) else: return JsonResponse( { "message": "Workflow not found", "status_code": 404 }, status=status.NOT_FOUND, ) if workflow.owner_id: async_to_sync(rabbitmq.send_user_update_to_user_clients)( workflow.owner_id, user_update) return HttpResponse(status=status.NO_CONTENT)
def _get_workflow_as_clientside_update(self) -> WorkflowUpdateData: """Return (clientside.Update, delta_id). Raise Workflow.DoesNotExist if a race deletes the Workflow. """ with self._lookup_requested_workflow_with_auth_and_cooperative_lock( ) as workflow: if self.scope["user"].is_anonymous: user = None else: user_id = self.scope["user"].id lock_user_by_id(user_id, for_write=False) user = query_clientside_user(user_id) update = clientside.Update( user=user, workflow=workflow.to_clientside(), tabs={ tab.slug: tab.to_clientside() for tab in workflow.live_tabs }, steps={ step.id: step.to_clientside() for step in Step.live_in_workflow(workflow) }, ) return WorkflowUpdateData(update, workflow.last_delta_id)
def _render_workflows(request: HttpRequest, **kwargs) -> TemplateResponse: ctx = JsonizeContext(request.locale_id, {}) workflows = list( Workflow.objects.filter( **kwargs).filter(Q(lesson_slug__isnull=True) | Q(lesson_slug="")).prefetch_related( "acl", "owner").order_by("-updated_at")) clientside_workflows = [ w.to_clientside(include_tab_slugs=False, include_block_slugs=False) for w in workflows ] json_workflows = [ jsonize_clientside_workflow(w, ctx, is_init=True) for w in clientside_workflows ] if request.user.is_anonymous: json_user = None else: with transaction.atomic(): lock_user_by_id(request.user.id, for_write=False) json_user = jsonize_clientside_user( query_clientside_user(request.user.id)) init_state = { "loggedInUser": json_user, "workflows": json_workflows, } return TemplateResponse(request, "workflows.html", {"initState": init_state})
def create_billing_portal_session( user_id: int, billing_url: str ) -> stripe.billing_portal.Session: """Create a Stripe BillingPortalSession, suitable for a JsonResponse. Ref: https://stripe.com/docs/api/customer_portal/object Raise User.DoesNotExist or UserProfile.DoesNotExist if the user does not have a Stripe customer ID. Re-raise Stripe error if creating a BillingPortalSession fails. """ # raises UserProfile.DoesNotExist with transaction.atomic(): lock_user_by_id(user_id, for_write=False) with connection.cursor() as cursor: cursor.execute( """ SELECT stripe_customer_id FROM cjworkbench_userprofile WHERE user_id = %s AND stripe_customer_id IS NOT NULL """, [user_id], ) try: stripe_customer_id = cursor.fetchall()[0][0] except IndexError: raise UserProfile.DoesNotExist return stripe.billing_portal.Session.create( customer=stripe_customer_id, return_url=billing_url, api_key=settings.STRIPE_API_KEY, )
def make_init_state(request, workflow: Workflow, modules: Dict[str, ModuleZipfile]) -> Dict[str, Any]: """Build a dict to embed as JSON in `window.initState` in HTML. Raise Http404 if the workflow disappeared. Side-effect: update workflow.last_viewed_at. """ try: with workflow.cooperative_lock(): # raise DoesNotExist on race if request.user.is_anonymous: user = None else: lock_user_by_id(request.user.id, for_write=False) user = query_clientside_user(request.user.id) workflow.last_viewed_at = datetime.datetime.now() workflow.save(update_fields=["last_viewed_at"]) state = clientside.Init( user=user, workflow=workflow.to_clientside(), tabs={ tab.slug: tab.to_clientside() for tab in workflow.live_tabs }, steps={ step.id: step.to_clientside( force_module_zipfile=modules.get(step.module_id_name)) for step in Step.live_in_workflow( workflow).prefetch_related("tab") }, modules={ module_id: clientside.Module( spec=module.get_spec(), js_module=module.get_optional_js_module(), ) for module_id, module in modules.items() }, blocks={ block.slug: block.to_clientside() for block in workflow.blocks.all() }, settings={ "bigTableRowsPerTile": settings.BIG_TABLE_ROWS_PER_TILE, "bigTableColumnsPerTile": settings.BIG_TABLE_COLUMNS_PER_TILE, }, ) except Workflow.DoesNotExist: raise Http404("Workflow was recently deleted") ctx = JsonizeContext(request.locale_id, modules) return jsonize_clientside_init(state, ctx)
def _do_try_set_autofetch( scope, workflow: Workflow, step_slug: str, auto_update_data: bool, update_interval: int, ) -> Dict[str, Any]: with lock_workflow_for_role(workflow, scope, role="owner"): step = _load_step_by_slug_sync(workflow, step_slug) # or raise HandlerError check_quota = ( auto_update_data and step.auto_update_data and update_interval < step.update_interval ) or (auto_update_data and not step.auto_update_data) step.auto_update_data = auto_update_data step.update_interval = update_interval if auto_update_data: step.next_update = datetime.datetime.now() + datetime.timedelta( seconds=update_interval ) else: step.next_update = None step.save(update_fields=["auto_update_data", "update_interval", "next_update"]) workflow.recalculate_fetches_per_day() workflow.save(update_fields=["fetches_per_day"]) # Locking after write is fine, because we haven't called COMMIT # yet so nobody else can tell the difference. lock_user_by_id( workflow.owner_id, for_write=True ) # we're overwriting user's (calculated) fetches_per_day usage = query_user_usage(workflow.owner_id) # Now before we commit, let's see if we've surpassed the user's limit; # roll back if we have. # # Only rollback if we're _increasing_ our fetch count. If we're # lowering it, allow that -- even if the user is over limit, we still # want to commit because it's an improvement. if check_quota: limit = workflow.owner.user_profile.effective_limits.max_fetches_per_day if usage.fetches_per_day > limit: raise AutofetchQuotaExceeded return step, usage
def handle_checkout_session_completed( checkout_session: stripe.api_resources.checkout.Session, ) -> None: """Create a Subscription based on a Stripe Webhook call. Assumes the Subscription is for a Stripe Customer we already created. See UserProfile.stripe_customer_id. 1. Request Subscription and Customer details from Stripe. 2. Find UserProfile with stripe_customer_id == Customer.id. 3. Upsert Subscription based on stripe_subscription_id. Raise error (intended to cause 500 Server Error) on any problem. Tested: [✓] ValueError if Stripe data does not match expectations. [✓] Price.DoesNotExist if subscribed to a non-price. [✓] UserProfile.DoesNotExist if customer cannot be found in our database. """ stripe_subscription_id = checkout_session.subscription stripe_customer_id = checkout_session.customer stripe_subscription = stripe.Subscription.retrieve( stripe_subscription_id, api_key=settings.STRIPE_API_KEY, ) # Raise all sorts of errors items = stripe_subscription["items"].data # not ".items": stripe-python is weird if len(items) != 1: raise ValueError("len(items) != 1") item = items[0] price = Price.objects.get(stripe_price_id=item.price.id) # raise Price.DoesNotExist with transaction.atomic(): user_profile = UserProfile.objects.get( stripe_customer_id=stripe_customer_id ) # raise UserProfile.DoesNotExist lock_user_by_id(user_profile.user_id, for_write=True) # raise User.DoesNotExist Subscription.objects.update_or_create( user_id=user_profile.user_id, stripe_subscription_id=stripe_subscription_id, defaults=dict( price=price, stripe_status=stripe_subscription.status, created_at=_unix_timestamp_to_datetime(stripe_subscription.created), renewed_at=_unix_timestamp_to_datetime( stripe_subscription.current_period_start ), ), )
def _render_course(request, course, lesson_url_prefix): logged_in_user = None if request.user and request.user.is_authenticated: with transaction.atomic(): lock_user_by_id(request.user.id, for_write=False) logged_in_user = jsonize_clientside_user( query_clientside_user(request.user.id)) try: courses = AllCoursesByLocale[course.locale_id] except KeyError: courses = [] # We render using HTML, not React, to make this page SEO-friendly. return TemplateResponse( request, "course.html", { "initState": json.dumps({ "loggedInUser": logged_in_user, "courses": [ dict(href=course.href, title=course.title) for course in AllCoursesByLocale.get( request.locale_id, []) ], }), "course": course, "courses": courses, "lessons": list(course.lessons.values()), "lesson_url_prefix": lesson_url_prefix, }, )
def create_checkout_session( user_id: int, price: Price, billing_url: str ) -> stripe.checkout.Session: """Create a Stripe CheckoutSession, suitable for a JsonResponse. Ref: https://stripe.com/docs/billing/subscriptions/checkout/fixed-price#create-a-checkout-session Create a Stripe Customer for the given User if the User does not already have one. Re-raise Stripe error if creating a Customer or CheckoutSession fails. """ with transaction.atomic(): lock_user_by_id(user_id, for_write=True) user = User.objects.select_related("user_profile").get(id=user_id) user_profile = user.user_profile if user_profile.stripe_customer_id is None: stripe_customer = stripe.Customer.create( email=user.email, name=user.get_full_name(), preferred_locales=[user_profile.locale_id], api_key=settings.STRIPE_API_KEY, ) user_profile.stripe_customer_id = stripe_customer.id user_profile.save(update_fields=["stripe_customer_id"]) # COMMIT before creating checkout session. Otherwise, if we fail to create a # Session the user_profile.stripe_customer_id will be reset. return stripe.checkout.Session.create( customer=user_profile.stripe_customer_id, payment_method_types=["card"], line_items=[{"price": price.stripe_price_id, "quantity": 1}], locale=user_profile.locale_id, success_url=billing_url, cancel_url=billing_url, mode="subscription", api_key=settings.STRIPE_API_KEY, )
def _first_forward_and_save_returning_clientside_updates( cls, workflow_id: int, **kwargs ) -> Tuple[Optional[Delta], Optional[clientside.Update], Optional[PendingOwnerUpdate], Optional[int], ]: """ Create and execute `cls` command; return `(Delta, WebSocket data, render?)`. If `amend_create_kwargs()` returns `None`, return `(None, None)` here. All this, in a cooperative lock. Return `(None, None, None)` if `cls.amend_create_kwargs()` returns `None`. This is how `cls.amend_create_kwargs()` suggests the Delta should not be created at all. """ now = datetime.datetime.now() command = NAME_TO_COMMAND[cls.__name__] try: # raise Workflow.DoesNotExist with Workflow.lookup_and_cooperative_lock(id=workflow_id) as workflow: create_kwargs = command.amend_create_kwargs(workflow=workflow, **kwargs) if not create_kwargs: return None, None, None, None # Lookup unapplied deltas to delete. That's the linked list that comes # _after_ `workflow.last_delta_id`. n_deltas_deleted, _ = workflow.deltas.filter( id__gt=workflow.last_delta_id).delete() # prev_delta is none when we're at the start of the undo stack prev_delta = workflow.deltas.filter( id=workflow.last_delta_id).first() # Delta.objects.create() and command.forward() may raise unexpected errors # Defer delete_orphan_soft_deleted_models(), to reduce the risk of this # race: 1. Delete DB objects; 2. Delete S3 files; 3. ROLLBACK. (We aren't # avoiding the race _entirely_ here, but we're at least avoiding causing # the race through errors in Delta or Command.) delta = Delta.objects.create( command_name=cls.__name__, prev_delta=prev_delta, last_applied_at=now, **create_kwargs, ) command.forward(delta) # Point workflow to us workflow.last_delta_id = delta.id workflow.updated_at = datetime.datetime.now() workflow.save(update_fields=["last_delta_id", "updated_at"]) if n_deltas_deleted: # We just deleted deltas; now we can garbage-collect Tabs and # Steps that are soft-deleted and have no deltas referring # to them. workflow.delete_orphan_soft_deleted_models() if cls.modifies_owner_usage and workflow.owner_id: # We lock after running the command, but it's still correct. DB # commits are atomic: nothing is written yet. lock_user_by_id(workflow.owner_id, for_write=True) pending_owner_update = PendingOwnerUpdate( user_id=workflow.owner_id, user_update=clientside.UserUpdate( usage=query_user_usage(workflow.owner_id)), ) else: pending_owner_update = None return ( delta, command.load_clientside_update(delta), pending_owner_update, delta.id if command.get_modifies_render_output(delta) else None, ) except Workflow.DoesNotExist: return None, None, None, None