def create(self, request, *args, **kwargs): document_type = request.path.split('/')[2] document_id = get_document_id_from_path(request) document = RELATED_DISCUSSION_MODELS[document_type].objects.get( id=document_id) unified_document = document.unified_document unified_doc_id = unified_document.id if request.query_params.get('created_location') == 'progress': request.data['created_location'] = ( BaseComment.CREATED_LOCATION_PROGRESS) response = super().create(request, *args, **kwargs) hubs = list(unified_document.hubs.all().values_list('id', flat=True)) self.sift_track_create_content_comment(request, response, Reply) discussion_id = response.data['id'] create_contribution.apply_async((Contribution.COMMENTER, { 'app_label': 'discussion', 'model': 'reply' }, request.user.id, unified_doc_id, discussion_id), priority=3, countdown=10) doc_type = get_doc_type_key(unified_document) reset_unified_document_cache(hub_ids=hubs, document_type=[doc_type, 'all'], filters=[DISCUSSED, TRENDING]) return self.get_self_upvote_response(request, response, Reply)
def handle_spam_user_task(user_id): User = apps.get_model('user.User') user = User.objects.filter(id=user_id).first() if user: user.papers.update(is_removed=True) user.paper_votes.update(is_removed=True) hub_ids = list( Hub.objects.filter( papers__in=list(user.papers.values_list(flat=True)) ).values_list( flat=True ).distinct() ) # Update discussions for thr in Thread.objects.filter(created_by=user): thr.remove_nested() thr.update_discussion_count() for com in Comment.objects.filter(created_by=user): com.remove_nested() com.update_discussion_count() for rep in Reply.objects.filter(created_by=user): rep.remove_nested() rep.update_discussion_count() reset_unified_document_cache(hub_ids)
def create(self, request, *args, **kwargs): user = request.user data = request.data authors = data.get('authors', []) renderable_text = data.get('renderable_text', '') src = data.get('full_src', '') title = data.get('title', '') note_id = data.get('note_id', None) unified_doc = self._create_unified_doc(request) file_name, file = self._create_src_content_file(unified_doc, src, user) hypo = Hypothesis.objects.create(created_by=user, note_id=note_id, renderable_text=renderable_text, title=title, unified_document=unified_doc) hypo.src.save(file_name, file) hypo.authors.set(authors) serializer = HypothesisSerializer(hypo) data = serializer.data hub_ids = unified_doc.hubs.values_list('id', flat=True) reset_unified_document_cache( hub_ids, document_type=['all', 'hypothesis'], filters=[NEWEST], with_default_hub=True, ) return Response(data, status=200)
def update_existing_researchhub_posts(self, request): data = request.data created_by = request.user authors = data.pop('authors', None) hubs = data.pop('hubs', None) title = data.get('title', '') assign_doi = data.get('assign_doi', False) doi = generate_doi() if assign_doi else None if assign_doi and created_by.get_balance() - CROSSREF_DOI_RSC_FEE < 0: return Response('Insufficient Funds', status=402) rh_post = ResearchhubPost.objects.get(id=data.get('post_id')) rh_post.doi = doi or rh_post.doi rh_post.save(update_fields=['doi']) serializer = ResearchhubPostSerializer( rh_post, data=request.data, partial=True ) serializer.is_valid(raise_exception=True) serializer.save() post = serializer.instance file_name = f'RH-POST-{request.data.get("document_type")}-USER-{request.user.id}.txt' full_src_file = ContentFile(request.data['full_src'].encode()) post.discussion_src.save(file_name, full_src_file) if type(authors) is list: rh_post.authors.set(authors) if type(hubs) is list: unified_doc = post.unified_document unified_doc.hubs.set(hubs) hub_ids = list( rh_post.unified_document.hubs.values_list( 'id', flat=True ) ) reset_unified_document_cache( hub_ids, document_type=['all', 'posts'], filters=[NEWEST, DISCUSSED, TOP, TRENDING], ) if assign_doi: crossref_response = register_doi(created_by, title, doi, rh_post) if crossref_response.status_code != 200: return Response('Crossref API Failure', status=400) charge_doi_fee(created_by, rh_post) return Response(serializer.data, status=200)
def preload_hub_feeds(): from researchhub_document.utils import ( reset_unified_document_cache, ) from hub.views import HubViewSet view = HubViewSet() qs = view.get_ordered_queryset('score') ids = qs.values_list('id', flat=True) reset_unified_document_cache(hub_ids=ids, document_type=["all"])
def get_unified_documents(self, request): is_anonymous = request.user.is_anonymous query_params = request.query_params subscribed_hubs = query_params.get('subscribed_hubs', 'false') time_scope = query_params.get('time', 'today') if subscribed_hubs == 'true' and not is_anonymous: return self._get_subscribed_unified_documents(request) document_request_type = query_params.get('type', 'all') hub_id = query_params.get('hub_id', 0) page_number = int(query_params.get('page', 1)) filtering = self._get_document_filtering(query_params) cache_hit = self._get_unifed_document_cache_hit( document_request_type, filtering, hub_id, page_number, time_scope) if cache_hit and page_number == 1: cache_hit = self._cache_hit_with_latest_metadata(cache_hit) return Response(cache_hit) elif not cache_hit and page_number == 1: with_default_hub = True if hub_id == 0 else False reset_unified_document_cache( hub_ids=[hub_id], document_type=[document_request_type], filters=[filtering], date_ranges=[time_scope], with_default_hub=with_default_hub, ) documents = self.get_filtered_queryset( document_request_type, filtering, hub_id, time_scope, ) context = self._get_serializer_context() page = self.paginate_queryset(documents) serializer = self.dynamic_serializer_class(page, _include_fields=[ 'documents', 'document_type', 'hot_score', 'hot_score_v2', 'reviews', 'score', ], many=True, context=context) serializer_data = serializer.data return self.get_paginated_response(serializer_data)
def update_purchases(): PAPER_CONTENT_TYPE = ContentType.objects.get(app_label='paper', model='paper') purchases = Purchase.objects.filter(boost_time__gt=0) for purchase in purchases: purchase.boost_time = purchase.get_boost_time() purchase.save() if purchase.content_type == PAPER_CONTENT_TYPE: paper = PAPER_CONTENT_TYPE.get_object_for_this_type( id=purchase.object_id) paper.calculate_hot_score() reset_unified_document_cache(with_default_hub=True)
def restore(self, request, pk=None): doc = self.get_object() doc.is_removed = False doc.save() doc_type = get_doc_type_key(doc) hub_ids = doc.hubs.values_list('id', flat=True) reset_unified_document_cache( hub_ids, document_type=[doc_type, 'all'], filters=[NEWEST, TOP, TRENDING, DISCUSSED], ) return Response(self.get_serializer(instance=doc).data, status=200)
def update_or_create_vote(request, user, item, vote_type): cache_filters_to_reset = [TOP, TRENDING] if isinstance(item, (Thread, Comment, Reply)): cache_filters_to_reset = [TRENDING] hub_ids = [0] # NOTE: Hypothesis citations do not have a unified document attached has_unified_doc = hasattr(item, "unified_document") if has_unified_doc: hub_ids += list(item.unified_document.hubs.values_list("id", flat=True)) vote = retrieve_vote(user, item) # TODO: calvinhlee - figure out how to handle contributions if vote is not None: vote.vote_type = vote_type vote.save(update_fields=["updated_date", "vote_type"]) if has_unified_doc: doc_type = get_doc_type_key(item.unified_document) reset_unified_document_cache( hub_ids, document_type=[doc_type, "all"], filters=cache_filters_to_reset ) # events_api.track_content_vote(user, vote, request) return get_vote_response(vote, 200) vote = create_vote(user, item, vote_type) if has_unified_doc: doc_type = get_doc_type_key(item.unified_document) reset_unified_document_cache( hub_ids, document_type=[doc_type, "all"], filters=cache_filters_to_reset ) app_label = item._meta.app_label model = item._meta.model.__name__.lower() # events_api.track_content_vote(user, vote, request) create_contribution.apply_async( ( Contribution.UPVOTER, {"app_label": app_label, "model": model}, request.user.id, vote.unified_document.id, vote.id, ), priority=2, countdown=10, ) return get_vote_response(vote, 201)
def create(self, request, *args, **kwargs): model = request.path.split('/')[2] model_id = get_document_id_from_path(request) instance = RELATED_DISCUSSION_MODELS[model].objects.get(id=model_id) if model == 'citation': unified_document = instance.source else: unified_document = instance.unified_document if request.query_params.get('created_location') == 'progress': request.data['created_location'] = ( BaseComment.CREATED_LOCATION_PROGRESS) response = super().create(request, *args, **kwargs) response = self.get_self_upvote_response(request, response, Thread) created_thread = Thread.objects.get(id=response.data['id']) if request.data.get('review'): created_thread.review_id = request.data.get('review') created_thread.save() hubs = list(unified_document.hubs.all().values_list('id', flat=True)) discussion_id = response.data['id'] self.sift_track_create_content_comment(request, response, Thread, is_thread=True) create_contribution.apply_async((Contribution.COMMENTER, { 'app_label': 'discussion', 'model': 'thread' }, request.user.id, unified_document.id, discussion_id), priority=1, countdown=10) doc_type = get_doc_type_key(unified_document) reset_unified_document_cache( hub_ids=hubs, document_type=[doc_type, 'all'], filters=[DISCUSSED, TRENDING], ) return Response(self.serializer_class(created_thread).data, status=status.HTTP_201_CREATED)
def handle(self, *args, **options): three_days_ago = timezone.now().date() - timedelta(days=3) papers = Paper.objects.filter(doi__isnull=True, uploaded_date__gte=three_days_ago, is_removed=False) count = papers.count() for i, paper in enumerate(papers): if paper.id == 832969: continue print(f'Paper: {paper.id} - {i + 1}/{count}') if not paper.doi: censored_paper_cleanup(paper.id) hub_ids = list( Hub.objects.filter(papers__in=list(papers.values_list( flat=True))).values_list(flat=True).distinct()) print(hub_ids) reset_unified_document_cache(hub_ids)
def censor(self, request, pk=None): item = self.get_object() item.remove_nested() item.update_discussion_count() content_id = f"{type(item).__name__}_{item.id}" user = request.user content_creator = item.created_by events_api.track_flag_content(content_creator, content_id, user.id) decisions_api.apply_bad_content_decision( content_creator, content_id, "MANUAL_REVIEW", user ) content_type = get_content_type_for_model(item) Contribution.objects.filter( content_type=content_type, object_id=item.id ).delete() try: if item.review: item.review.is_removed = True item.review.save() doc = item.unified_document doc_type = get_doc_type_key(doc) hubs = list(doc.hubs.all().values_list('id', flat=True)) reset_unified_document_cache( hub_ids=hubs, document_type=[doc_type, 'all'], filters=[DISCUSSED, TRENDING] ) except Exception as e: pass try: if item.paper: item.paper.reset_cache() except Exception as e: pass return Response( self.get_serializer(instance=item).data, status=200 )
def update(self, request, *args, **kwargs): update_response = super().update(request, *args, **kwargs) hub_ids = list(self.get_object().hubs.values_list('pk', flat=True)) hub_ids.append(0) reset_latest_acitvity_cache(','.join( [str(hub_id) for hub_id in hub_ids])) doc = self.get_object() doc_type = get_doc_type_key(doc) reset_unified_document_cache( hub_ids, document_type=[doc_type, 'all'], filters=[NEWEST, TOP, TRENDING, DISCUSSED], ) return update_response
def update(self, request, *args, **kwargs): response = super().update(request, *args, **kwargs) try: thread = Thread.objects.get(review_id=response.data['id']) doc = thread.unified_document doc_type = get_doc_type_key(doc) hubs = list(doc.hubs.all().values_list('id', flat=True)) reset_unified_document_cache( hub_ids=hubs, document_type=[doc_type, 'all'], filters=[DISCUSSED, TRENDING] ) except Exception as e: pass return response
def restore_paper(self, request, pk=None): paper = None try: paper = self.get_object() except Exception: paper = Paper.objects.get(id=request.data["id"]) pass paper.is_removed = False paper.save() paper.reset_cache(use_celery=False) hub_ids = paper.hubs.values_list("id", flat=True) reset_unified_document_cache( hub_ids, filters=[TRENDING, TOP, DISCUSSED, NEWEST], document_type=["all", "paper"], ) return Response(self.get_serializer(instance=paper).data, status=200) return Response(self.get_serializer(instance=paper).data, status=200)
def censor(self, request, pk=None): hub = self.get_object() # Remove Papers with no other hubs Paper.objects.annotate( cnt=Count('hubs', filter=Q(hubs__is_removed=False))).filter( cnt__lte=1, hubs__id=hub.id).update(is_removed=True) # Update Hub hub.is_removed = True hub.paper_count = hub.get_paper_count() hub.discussion_count = hub.get_discussion_count() hub.save( update_fields=['is_removed', 'paper_count', 'discussion_count']) reset_unified_document_cache(with_default_hub=True) return Response(self.get_serializer(instance=hub).data, status=200)
def downvote(self, request, pk=None): paper = self.get_object() hub_ids = list(paper.hubs.values_list("id", flat=True)) user = request.user vote_exists = find_vote(user, paper, Vote.DOWNVOTE) if vote_exists: return Response("This vote already exists", status=status.HTTP_400_BAD_REQUEST) response = update_or_create_vote(request, user, paper, Vote.DOWNVOTE) reset_unified_document_cache( hub_ids, filters=[TRENDING, TOP], document_type=["all", "paper"], ) paper.reset_cache() return response
def reinstate_user_task(user_id): User = apps.get_model('user.User') ResearchhubUnifiedDocument = apps.get_model( 'researchhub_document.ResearchhubUnifiedDocument' ) user = User.objects.get(id=user_id) papers = Paper.objects.filter(uploaded_by=user) papers.update(is_removed=False) user.paper_votes.update(is_removed=False) ResearchhubUnifiedDocument.objects.filter(paper__in=papers).update(is_removed=False) hub_ids = list( Hub.objects.filter( papers__in=list(user.papers.values_list(flat=True)) ).values_list(flat=True).distinct() ) # Update discussions for thr in Thread.objects.filter(created_by=user): thr.update_discussion_count() thr.is_removed = False thr.save() for com in Comment.objects.filter(created_by=user): com.update_discussion_count() com.is_removed = False com.save() for rep in Reply.objects.filter(created_by=user): rep.update_discussion_count() rep.is_removed = False rep.save() reset_unified_document_cache(hub_ids, {}, None)
def censor(self, request, pk=None): paper = self.get_object() paper_id = paper.id unified_doc = paper.unified_document cache_key = get_cache_key("paper", paper_id) cache.delete(cache_key) hub_ids = list(paper.hubs.values_list("id", flat=True)) content_id = f"{type(paper).__name__}_{paper_id}" user = request.user content_creator = paper.uploaded_by if content_creator: events_api.track_flag_content(content_creator, content_id, user.id) decisions_api.apply_bad_content_decision(content_creator, content_id, "MANUAL_REVIEW", user) decisions_api.apply_bad_user_decision(content_creator, "MANUAL_REVIEW", user) Contribution.objects.filter(unified_document=unified_doc).delete() paper.is_removed = True paper.save() censored_paper_cleanup.apply_async((paper_id, ), priority=3) unified_document = paper.unified_document unified_document.is_removed = True unified_document.save() reset_unified_document_cache( hub_ids, filters=[TRENDING, TOP, DISCUSSED, NEWEST], document_type=["all", "paper"], with_default_hub=True, ) return Response("Paper was deleted.", status=200)
def create_researchhub_post(self, request): try: data = request.data created_by = request.user document_type = data.get('document_type') editor_type = data.get('editor_type') authors = data.get('authors', []) note_id = data.get('note_id', None) title = data.get('title', '') assign_doi = data.get('assign_doi', False) peer_review_is_requested = data.get('request_peer_review', False) is_discussion = document_type == DISCUSSION doi = generate_doi() if assign_doi else None if assign_doi and created_by.get_balance() - CROSSREF_DOI_RSC_FEE < 0: return Response('Insufficient Funds', status=402) # logical ordering & not using signals to avoid race-conditions access_group = self.create_access_group(request) unified_document = self.create_unified_doc(request) if (access_group is not None): unified_document.access_groups = access_group unified_document.save() rh_post = ResearchhubPost.objects.create( created_by=created_by, document_type=document_type, doi=doi, editor_type=CK_EDITOR if editor_type is None else editor_type, note_id=note_id, prev_version=None, preview_img=data.get('preview_img'), renderable_text=data.get('renderable_text'), title=title, unified_document=unified_document, ) file_name = f'RH-POST-{document_type}-USER-{created_by.id}.txt' full_src_file = ContentFile(data['full_src'].encode()) rh_post.authors.set(authors) if not TESTING: if is_discussion: rh_post.discussion_src.save(file_name, full_src_file) else: rh_post.eln_src.save(file_name, full_src_file) hub_ids = list( unified_document.hubs.values_list( 'id', flat=True ) ) reset_unified_document_cache( hub_ids, document_type=['all', 'posts'], filters=[NEWEST], with_default_hub=True, ) if assign_doi: crossref_response = register_doi(created_by, title, doi, rh_post) if crossref_response.status_code != 200: return Response('Crossref API Failure', status=400) charge_doi_fee(created_by, rh_post) if peer_review_is_requested and note_id: request_peer_review( request=request, requested_by=request.user, post=rh_post ) return Response( ResearchhubPostSerializer( rh_post ).data, status=200 ) except (KeyError, TypeError) as exception: log_error(exception) return Response(exception, status=400)
def create(self, validated_data): request = self.context.get("request", None) if request: user = request.user else: user = None validated_data["uploaded_by"] = user if "url" in validated_data or "pdf_url" in validated_data: error = Exception("URL uploading is deprecated") sentry.log_error(error) raise error # Prepare validated_data by removing m2m authors = validated_data.pop("authors") hubs = validated_data.pop("hubs") hypothesis_id = validated_data.pop("hypothesis_id", None) citation_type = validated_data.pop("citation_type", None) file = validated_data.get("file") try: with transaction.atomic(): # Temporary fix for updating read only fields # Not including file, pdf_url, and url because # those fields are processed for read_only_field in self.Meta.read_only_fields: if read_only_field in validated_data: validated_data.pop(read_only_field, None) # valid_doi = self._check_valid_doi(validated_data) # if not valid_doi: # raise IntegrityError('DETAIL: Invalid DOI') self._add_url(file, validated_data) self._clean_abstract(validated_data) self._add_raw_authors(validated_data) paper = None if paper is None: # It is important to note that paper signals # are ran after call to super paper = super(PaperSerializer, self).create(validated_data) paper.full_clean(exclude=["paper_type"]) unified_doc = paper.unified_document unified_doc_id = paper.unified_document.id if hypothesis_id: self._add_citation(user, hypothesis_id, unified_doc, citation_type) paper_id = paper.id paper_title = paper.paper_title or "" file = paper.file self._check_pdf_title(paper, paper_title, file) # NOTE: calvinhlee - This is an antipattern. Look into changing Vote.objects.create(paper=paper, created_by=user, vote_type=Vote.UPVOTE) # Now add m2m values properly if validated_data["paper_type"] == Paper.PRE_REGISTRATION: paper.authors.add(user.author_profile) # TODO: Do we still need add authors from the request content? paper.authors.add(*authors) self._add_orcid_authors(paper) paper.hubs.add(*hubs) for hub in hubs: hub.paper_count = hub.get_paper_count() hub.save(update_fields=["paper_count"]) try: self._add_file(paper, file) except Exception as e: sentry.log_error(e, ) paper.set_paper_completeness() # Fix adding references # self._add_references(paper) paper.pdf_license = paper.get_license(save=False) update_unified_document_to_paper(paper) tracked_paper = events_api.track_content_paper( user, paper, request) update_user_risk_score(user, tracked_paper) create_contribution.apply_async( ( Contribution.SUBMITTER, { "app_label": "paper", "model": "paper" }, user.id, unified_doc_id, paper_id, ), priority=3, countdown=10, ) celery_calculate_paper_twitter_score.apply_async((paper_id, ), priority=5, countdown=10) hub_ids = unified_doc.hubs.values_list("id", flat=True) if hub_ids.exists(): reset_unified_document_cache( hub_ids, document_type=["paper", "all"], filters=[NEWEST], with_default_hub=True, ) return paper except IntegrityError as e: sentry.log_error(e) raise e except Exception as e: error = PaperSerializerError(e, "Failed to create paper") sentry.log_error(error, base_error=error.trigger) raise error
def preload_homepage_feed(): from researchhub_document.utils import ( reset_unified_document_cache, ) reset_unified_document_cache(with_default_hub=True)
def update(self, instance, validated_data): request = self.context.get("request", None) authors = validated_data.pop("authors", [None]) hubs = validated_data.pop("hubs", [None]) raw_authors = validated_data.pop("raw_authors", []) file = validated_data.get("file", None) try: with transaction.atomic(): # Temporary fix for updating read only fields # Not including file, pdf_url, and url because # those fields are processed read_only_fields = (self.Meta.read_only_fields + self.Meta.patch_read_only_fields) for read_only_field in read_only_fields: if read_only_field in validated_data: validated_data.pop(read_only_field, None) self._add_url(file, validated_data) self._clean_abstract(validated_data) paper = super(PaperSerializer, self).update(instance, validated_data) paper.full_clean(exclude=["paper_type"]) unified_doc = paper.unified_document paper_title = paper.paper_title or "" file = paper.file self._check_pdf_title(paper, paper_title, file) new_hubs = [] remove_hubs = [] if hubs: current_hubs = paper.hubs.all() for current_hub in current_hubs: if current_hub not in hubs: remove_hubs.append(current_hub) for hub in hubs: if hub not in current_hubs: new_hubs.append(hub) paper.hubs.remove(*remove_hubs) paper.hubs.add(*hubs) unified_doc.hubs.remove(*remove_hubs) unified_doc.hubs.add(*hubs) for hub in remove_hubs: hub.paper_count = hub.get_paper_count() hub.save(update_fields=["paper_count"]) for hub in new_hubs: hub.paper_count = hub.get_paper_count() hub.save(update_fields=["paper_count"]) if authors: current_authors = paper.authors.all() remove_authors = [] for author in current_authors: if author not in authors: remove_authors.append(author) new_authors = [] for author in authors: if author not in current_authors: new_authors.append(author) paper.authors.remove(*remove_authors) paper.authors.add(*new_authors) paper.set_paper_completeness() if file: self._add_file(paper, file) updated_hub_ids = list( map(lambda hub: hub.id, remove_hubs + new_hubs)) if len(updated_hub_ids) > 0: reset_unified_document_cache( hub_ids=updated_hub_ids, document_type=["paper", "all"], filters=[NEWEST, TOP, TRENDING, DISCUSSED], with_default_hub=True, ) if request: tracked_paper = events_api.track_content_paper( request.user, paper, request, update=True) update_user_risk_score(request.user, tracked_paper) return paper except Exception as e: error = PaperSerializerError(e, "Failed to update paper") sentry.log_error(e, base_error=error.trigger) raise error
def create(self, request): user = request.user data = request.data amount = data['amount'] purchase_method = data['purchase_method'] purchase_type = data['purchase_type'] content_type_str = data['content_type'] object_id = data['object_id'] transfer_rsc = False recipient = None if content_type_str not in self.ALLOWED_CONTENT_TYPES: return Response(status=400) if purchase_method not in (Purchase.OFF_CHAIN, Purchase.ON_CHAIN): return Response(status=400) decimal_amount = decimal.Decimal(amount) if decimal_amount <= 0: return Response(status=400) content_type = ContentType.objects.get(model=content_type_str) with transaction.atomic(): if purchase_method == Purchase.ON_CHAIN: purchase = Purchase.objects.create( user=user, content_type=content_type, object_id=object_id, purchase_method=purchase_method, purchase_type=purchase_type, amount=amount) else: user_balance = user.get_balance() if user_balance - decimal_amount < 0: return Response('Insufficient Funds', status=402) purchase = Purchase.objects.create( user=user, content_type=content_type, object_id=object_id, purchase_method=purchase_method, purchase_type=purchase_type, amount=amount, paid_status=Purchase.PAID) source_type = ContentType.objects.get_for_model(purchase) Balance.objects.create( user=user, content_type=source_type, object_id=purchase.id, amount=f'-{amount}', ) purchase_hash = purchase.hash() purchase.purchase_hash = purchase_hash purchase_boost_time = purchase.get_boost_time(amount) purchase.boost_time = purchase_boost_time purchase.group = purchase.get_aggregate_group() purchase.save() item = purchase.item context = { 'purchase_minimal_serialization': True, 'exclude_stats': True } # transfer_rsc is set each time just in case we want # to disable rsc transfer for a specific item if content_type_str == 'paper': paper = Paper.objects.get(id=object_id) unified_doc = paper.unified_document paper.calculate_hot_score() recipient = paper.uploaded_by cache_key = get_cache_key('paper', object_id) cache.delete(cache_key) transfer_rsc = True hub_ids = paper.hubs.values_list('id', flat=True) reset_unified_document_cache( hub_ids, document_type=['all', 'paper'], filters=[TRENDING], ) elif content_type_str == 'thread': transfer_rsc = True recipient = item.created_by unified_doc = item.unified_document elif content_type_str == 'comment': transfer_rsc = True unified_doc = item.unified_document recipient = item.created_by elif content_type_str == 'reply': transfer_rsc = True unified_doc = item.unified_document recipient = item.created_by elif content_type_str == 'summary': transfer_rsc = True recipient = item.proposed_by unified_doc = item.paper.unified_document elif content_type_str == 'bulletpoint': transfer_rsc = True recipient = item.created_by unified_doc = item.paper.unified_document elif content_type_str == 'researchhubpost': transfer_rsc = True recipient = item.created_by unified_doc = item.unified_document hub_ids = unified_doc.hubs.values_list('id', flat=True) reset_unified_document_cache( hub_ids, document_type=['all', 'posts'], filters=[TRENDING], ) if unified_doc.is_removed: return Response('Content is removed', status=403) if transfer_rsc and recipient and recipient != user: distribution = create_purchase_distribution(amount) distributor = Distributor(distribution, recipient, purchase, time.time()) distributor.distribute() serializer = self.serializer_class(purchase, context=context) serializer_data = serializer.data if recipient and user: self.send_purchase_notification(purchase, unified_doc, recipient) self.send_purchase_email(purchase, recipient, unified_doc) create_contribution.apply_async((Contribution.SUPPORTER, { 'app_label': 'purchase', 'model': 'purchase' }, user.id, unified_doc.id, purchase.id), priority=2, countdown=10) return Response(serializer_data, status=201)