def notify_paper_submission_status(self, event):
        # Send message to webSocket (Frontend)
        extra_metadata = {}
        submission_id = event["id"]

        if "duplicate_ids" in event:
            duplicate_ids = event["duplicate_ids"]
            extra_metadata[
                "duplicate_papers"] = self._get_duplicate_paper_data(
                    duplicate_ids)

        submission = PaperSubmission.objects.get(id=submission_id)
        serialized_data = PaperSubmissionSerializer(submission).data
        current_paper_data = DynamicPaperSerializer(
            submission.paper, _include_fields=["id", "paper_title"]).data

        if "id" not in current_paper_data:
            current_paper_data["id"] = ""

        data = {
            "data": serialized_data,
            "current_paper": current_paper_data,
            **extra_metadata,
        }
        self.send(text_data=json.dumps(data))
Example #2
0
 def get_authored_papers(self, request, pk=None):
     author = self.get_object()
     prefetch_lookups = PaperViewSet.prefetch_lookups(self)
     authored_papers = (author.authored_papers.filter(
         is_removed=False).prefetch_related(
             *prefetch_lookups, ).order_by("-score"))
     context = self._get_authored_papers_context()
     page = self.paginate_queryset(authored_papers)
     serializer = DynamicPaperSerializer(
         page,
         _include_fields=[
             "id",
             "abstract",
             "authors",
             "boost_amount",
             "file",
             "first_preview",
             "hubs",
             "paper_title",
             "score",
             "title",
             "uploaded_by",
             "uploaded_date",
             "url",
             "paper_publish_date",
             "slug",
         ],
         many=True,
         context=context,
     )
     response = self.get_paginated_response(serializer.data)
     return response
Example #3
0
    def get_user_contributions(self, request, pk=None):
        author = self.get_object()
        user = author.user

        if user:
            prefetch_lookups = PaperViewSet.prefetch_lookups(self)
            user_paper_uploads = user.papers.filter(
                is_removed=False).prefetch_related(*prefetch_lookups)
        else:
            user_paper_uploads = self.queryset.none()

        context = self._get_user_contributions_context()
        page = self.paginate_queryset(user_paper_uploads)
        serializer = DynamicPaperSerializer(
            page,
            _include_fields=[
                "id",
                "abstract",
                "boost_amount",
                "file",
                "hubs",
                "paper_title",
                "score",
                "title",
                "slug",
                "uploaded_by",
                "uploaded_date",
            ],
            many=True,
            context=context,
        )
        response = self.get_paginated_response(serializer.data)

        return response
Example #4
0
    def create(self, *args, **kwargs):
        data = self.request.data
        url = data.get("url", "")

        # Appends http if protocol does not exist
        parsed_url = urlparse(url)
        if not parsed_url.scheme:
            url = f"http://{parsed_url.geturl()}"
            data["url"] = url

        duplicate_papers = Paper.objects.filter(
            Q(url__icontains=url) | Q(pdf_url__icontains=url))
        if duplicate_papers:
            serializer = DynamicPaperSerializer(
                duplicate_papers,
                _include_fields=["doi", "id", "title", "url"],
                many=True,
            )
            duplicate_data = {"data": serializer.data}
            return Response(duplicate_data, status=status.HTTP_403_FORBIDDEN)

        data["uploaded_by"] = self.request.user.id
        response = super().create(*args, **kwargs)
        if response.status_code == 201:
            data = response.data
            celery_process_paper.apply_async(
                (data["id"], ),
                priority=1,
                countdown=3,
            )
        return response
Example #5
0
    def get_paper(self, thread):
        from paper.serializers import DynamicPaperSerializer

        paper = thread.paper
        if not paper:
            return None

        context = self.context
        _context_fields = context.get('dis_dts_get_paper', {})

        serializer = DynamicPaperSerializer(paper,
                                            context=context,
                                            **_context_fields)
        return serializer.data
Example #6
0
    def create_from_doi(self, request):
        data = request.data
        # TODO: Sanitize?
        doi = data.get("doi", None)

        # DOI validity check
        doi_url = urlparse(doi)
        doi_res = requests.post("https://dx.doi.org/",
                                data={"hdl": doi},
                                allow_redirects=False,
                                timeout=5)
        invalid_doi_res = Response(
            {
                "data":
                "Invalid DOI - Ensure it is in the form of '10.1000/abc123'"
            },
            status=status.HTTP_404_NOT_FOUND,
        )
        if doi_url.scheme or "doi.org" in doi:
            # Avoiding data that comes in as a url or as a DOI url
            return invalid_doi_res
        elif doi_res.status_code == status.HTTP_404_NOT_FOUND:
            return invalid_doi_res

        # Duplicate DOI check
        duplicate_papers = Paper.objects.filter(doi__contains=doi)
        if duplicate_papers:
            serializer = DynamicPaperSerializer(
                duplicate_papers,
                _include_fields=["doi", "id", "title", "url"],
                many=True,
            )
            duplicate_data = {"data": serializer.data}
            return Response(duplicate_data, status=status.HTTP_403_FORBIDDEN)

        data["uploaded_by"] = request.user.id
        response = super().create(request)
        if response.status_code == 201:
            data = response.data
            celery_process_paper.apply_async(
                (data["id"], ),
                priority=1,
                countdown=3,
            )
        return response
Example #7
0
 def create(self, request, *args, **kwargs):
     try:
         doi = request.data.get("doi", "")
         duplicate_papers = Paper.objects.filter(doi=doi)
         if duplicate_papers:
             serializer = DynamicPaperSerializer(
                 duplicate_papers[:1],
                 _include_fields=["doi", "id", "title", "url"],
                 many=True,
             )
             duplicate_data = {"data": serializer.data}
             return Response(duplicate_data,
                             status=status.HTTP_403_FORBIDDEN)
         response = super().create(request, *args, **kwargs)
         return response
     except IntegrityError as e:
         return self._get_integrity_error_response(e)
     except PaperSerializerError as e:
         return Response(str(e), status=status.HTTP_400_BAD_REQUEST)
 def get_documents(self, unified_doc):
     context = self.context
     _context_fields = context.get('doc_duds_get_documents', {})
     doc_type = unified_doc.document_type
     if (doc_type in [DISCUSSION, ELN]):
         return DynamicPostSerializer(
             unified_doc.posts,
             many=True,
             context=context,
             **_context_fields
         ).data
     elif doc_type == HYPOTHESIS:
         from hypothesis.serializers import DynamicHypothesisSerializer
         return DynamicHypothesisSerializer(
             unified_doc.hypothesis,
             context=context,
             **_context_fields
         ).data
     else:
         return DynamicPaperSerializer(
             unified_doc.paper,
             context=context,
             **_context_fields
         ).data
 def _get_duplicate_paper_data(self, ids):
     papers = Paper.objects.filter(id__in=ids)
     serializer = DynamicPaperSerializer(
         papers, many=True, _include_fields=["doi", "id", "title"])
     return serializer.data