def test_nested_empty_json_object(self):
     obj = Author.objects.annotate(json_object=JSONObject(
         name='name',
         nested_json_object=JSONObject(),
     )).first()
     self.assertEqual(obj.json_object, {
         'name': 'Ivan Ivanov',
         'nested_json_object': {},
     })
 def test_nested_empty_json_object(self):
     obj = Author.objects.annotate(json_object=JSONObject(
         name="name",
         nested_json_object=JSONObject(),
     )).first()
     self.assertEqual(
         obj.json_object,
         {
             "name": "Ivan Ivanov",
             "nested_json_object": {},
         },
     )
Exemple #3
0
 def test_annotated_array_subquery_with_json_objects(self):
     inner_qs = NullableIntegerArrayModel.objects.exclude(
         pk=models.OuterRef('pk')).values(
             json=JSONObject(order='order', field='field'))
     siblings_json = NullableIntegerArrayModel.objects.annotate(
         siblings_json=ArraySubquery(inner_qs), ).values_list(
             'siblings_json', flat=True).get(order=1)
     self.assertSequenceEqual(
         siblings_json,
         [
             {
                 'field': [2],
                 'order': 2
             },
             {
                 'field': [2, 3],
                 'order': 3
             },
             {
                 'field': [20, 30, 40],
                 'order': 4
             },
             {
                 'field': None,
                 'order': 5
             },
         ],
     )
 def test_nested_json_object(self):
     obj = Author.objects.annotate(json_object=JSONObject(
         name='name',
         nested_json_object=JSONObject(
             alias='alias',
             age='age',
         ),
     )).first()
     self.assertEqual(
         obj.json_object, {
             'name': 'Ivan Ivanov',
             'nested_json_object': {
                 'alias': 'iivanov',
                 'age': 30,
             },
         })
Exemple #5
0
 def get_for_gq(cls, user, only_member=False):
     """
     Used by graphql schema
     """
     current_user_role_subquery = models.Subquery(
         ProjectMembership.objects.filter(
             project=models.OuterRef('pk'),
             member=user,
         ).order_by('role__level').values('role__type')[:1],
         output_field=models.CharField(),
     )
     current_user_membership_data_subquery = JSONObject(
         user_id=models.Value(user.id),
         role=models.F('current_user_role'),
         badges=models.Subquery(
             ProjectMembership.objects.filter(
                 project=models.OuterRef('pk'),
                 member=user,
             ).order_by('badges').values('badges')[:1],
             output_field=ArrayField(models.CharField()),
         ),
     )
     visible_projects = cls.objects\
         .annotate(
             # For using within query filters
             current_user_role=current_user_role_subquery,
         ).annotate(
             # NOTE: This is used by permission module
             current_user_membership_data=current_user_membership_data_subquery,
             # NOTE: Exclude if project is private + user is not a member
         ).exclude(is_private=True, current_user_role__isnull=True)
     if only_member:
         return visible_projects.filter(current_user_role__isnull=False)
     return visible_projects
 def test_textfield(self):
     Article.objects.create(
         title="The Title",
         text="x" * 4000,
         written=timezone.now(),
     )
     obj = Article.objects.annotate(json_object=JSONObject(
         text=F("text"))).first()
     self.assertEqual(obj.json_object, {"text": "x" * 4000})
 def test_nested_json_object(self):
     obj = Author.objects.annotate(json_object=JSONObject(
         name="name",
         nested_json_object=JSONObject(
             alias="alias",
             age="age",
         ),
     )).first()
     self.assertEqual(
         obj.json_object,
         {
             "name": "Ivan Ivanov",
             "nested_json_object": {
                 "alias": "iivanov",
                 "age": 30,
             },
         },
     )
 def test_textfield(self):
     Article.objects.create(
         title='The Title',
         text='x' * 4000,
         written=timezone.now(),
     )
     obj = Article.objects.annotate(json_object=JSONObject(
         text=F('text'))).first()
     self.assertEqual(obj.json_object, {'text': 'x' * 4000})
Exemple #9
0
    def geography(self, request):
        """
        SIG-3988

        To speed up the generation of the GeoJSON we are now skipping DRF serializers and are constructing the response
        in the database using the same type of query as used in the PublicSignalMapViewSet.list method.

        However we are not creating a raw query, instead we are creating the query in Django ORM and
        annotating/aggregating the result in the database. This way we keep all the benefits of the SignalFilterSet and
        the 'filter_for_user' functionality AND gain all the performance by skipping DRF and letting the database
        generate the GeoJSON.
        """
        # Annotate Signal queryset with GeoJSON features and filter it according
        # to "Signalen" project access rules:
        features_qs = self.filter_queryset(
            self.geography_queryset.annotate(
                feature=JSONObject(
                    type=Value('Feature', output_field=CharField()),
                    geometry=AsGeoJSON('location__geometrie'),
                    properties=JSONObject(
                        id='id',
                        created_at='created_at',
                    ),
                )
            ).filter_for_user(
                user=request.user
            )
        )

        # Paginate our queryset and turn it into a GeoJSON feature collection:
        headers = []
        feature_collection = {'type': 'FeatureCollection', 'features': []}
        paginator = LinkHeaderPaginationForQuerysets(page_query_param='geopage', page_size=SIGNALS_API_GEO_PAGINATE_BY)
        page_qs = paginator.paginate_queryset(features_qs, self.request, view=self)

        if page_qs is not None:
            features = page_qs.aggregate(features=JSONAgg('feature'))
            feature_collection.update(features)
            headers = paginator.get_pagination_headers()

        return Response(feature_collection, headers=headers)
 def for_activity_stream(self):
     """
     Convert a queryset into a form suitable for serialisation in the activity stream feed.
     """
     fields = self.model._meta.get_fields()
     # Find all fields that are foreign keys or one-to-one keys, as their values will be adjusted on serialisation
     # to match the `id` values required by Activity Stream.
     # NOTE: check other relations, such as ManyToMany, just in case any show up in future models.
     foreign_key_fields = [
         [field.name, field.related_model.__name__] for field in fields
         if field.is_relation and (field.many_to_one or field.one_to_one)
     ]
     # Get all non-foreign-key field names so they can be serialised by the database.
     field_names = [
         field.name for field in fields
         if not field.is_relation or (field.many_to_one or field.one_to_one)
     ]
     # `JSONObject` expects a dict of JSON names mapped to field names;
     # we just want all fields to have the same name in JSON as they do in the DB.
     json_object_kwargs = dict(zip(field_names, field_names))
     # Add the raw ID value for use in Data Flow,
     # as we modify it when serialising to match Activity Stream's definition of ID
     json_object_kwargs["pk"] = json_object_kwargs["id"]
     return (
         self
         # Get the DB to serialise all non-foreign-key fields to JSON;
         .annotate(json=JSONObject(**json_object_kwargs))
         # Add the list of foreign key field names for later handling as noted above;
         .annotate(foreign_keys=JSONObject(keys=Value(foreign_key_fields)))
         # Add the model class name, as we need that info at feed serialisation time
         # so we can make sense of the foreign key field names
         .annotate(object_type=Value(self.model.__name__))
         # Finally, select the values we want to have in the dict that will ultimately be serialised.
         # This ensures that all querysets present the same model structure,
         # which is a requirement for constructiong a union of all the querysets,
         # which in turn is necessary for delivering a feed containing all objects
         # ordered by their `last_modified` timestamps
         # without pulling everything into memory and processing it all there.
         .values("id", "last_modified", "json", "foreign_keys", "object_type")
     )
Exemple #11
0
 def test_expressions(self):
     obj = Author.objects.annotate(json_object=JSONObject(
         name=Lower('name'),
         alias='alias',
         goes_by='goes_by',
         salary=Value(30000.15),
         age=F('age') * 2,
     )).first()
     self.assertEqual(
         obj.json_object, {
             'name': 'ivan ivanov',
             'alias': 'iivanov',
             'goes_by': None,
             'salary': 30000.15,
             'age': 60,
         })
Exemple #12
0
    def annotate_config_context_data(self):
        """
        Attach the subquery annotation to the base queryset.

        Order By clause in Subquery is not guaranteed to be respected within the aggregated JSON array, which is why
        we include "weight" and "name" into the result so that we can sort it within Python to ensure correctness.
        """
        from nautobot.extras.models import ConfigContext

        return self.annotate(config_context_data=Subquery(
            ConfigContext.objects.filter(self._get_config_context_filters()).
            order_by("weight", "name").annotate(_data=EmptyGroupByJSONBAgg(
                JSONObject(
                    data=F("data"),
                    name=F("name"),
                    weight=F("weight"),
                ))).values("_data"))).distinct()
 def test_expressions(self):
     obj = Author.objects.annotate(json_object=JSONObject(
         name=Lower("name"),
         alias="alias",
         goes_by="goes_by",
         salary=Value(30000.15),
         age=F("age") * 2,
     )).first()
     self.assertEqual(
         obj.json_object,
         {
             "name": "ivan ivanov",
             "alias": "iivanov",
             "goes_by": None,
             "salary": 30000.15,
             "age": 60,
         },
     )
Exemple #14
0
 def test_annotated_array_subquery_with_json_objects(self):
     inner_qs = NullableIntegerArrayModel.objects.exclude(
         pk=models.OuterRef("pk")
     ).values(json=JSONObject(order="order", field="field"))
     siblings_json = (
         NullableIntegerArrayModel.objects.annotate(
             siblings_json=ArraySubquery(inner_qs),
         )
         .values_list("siblings_json", flat=True)
         .get(order=1)
     )
     self.assertSequenceEqual(
         siblings_json,
         [
             {"field": [2], "order": 2},
             {"field": [2, 3], "order": 3},
             {"field": [20, 30, 40], "order": 4},
             {"field": None, "order": 5},
         ],
     )
 def test_basic(self):
     obj = Author.objects.annotate(json_object=JSONObject(
         name="name")).first()
     self.assertEqual(obj.json_object, {"name": "Ivan Ivanov"})
Exemple #16
0
 def test_empty(self):
     obj = Author.objects.annotate(json_object=JSONObject()).first()
     self.assertEqual(obj.json_object, {})
Exemple #17
0
    def geography(self, request) -> Response:
        """
        Returns a GeoJSON of all Signal's that are in an "Open" state and in a publicly available category.
        Additional filtering can be done by adding query parameters.
        """
        qs = self.get_queryset()

        if request.query_params.get('group_by', '').lower() == 'category':
            # Group by category and return the oldest signal created_at date
            qs = qs.values('category_assignment__category_id').annotate(
                created_at=Min('created_at'))

        queryset = self.filter_queryset(
            qs.annotate(
                # Transform the output of the query to GeoJSON in the database.
                # This is much faster than using a DRF Serializer.
                feature=JSONObject(
                    type=Value('Feature', output_field=CharField()),
                    geometry=AsGeoJSON('location__geometrie'),
                    properties=JSONObject(
                        category=JSONObject(
                            # Return the category public_name. If the public_name is empty, return the category name
                            # name=Coalesce('category_assignment__category__public_name',
                            #               'category_assignment__category__name'),
                            name=Case(
                                When(
                                    category_assignment__category__public_name__exact
                                    ='',
                                    then='category_assignment__category__name'
                                ),
                                When(
                                    category_assignment__category__public_name__isnull
                                    =True,
                                    then='category_assignment__category__name'
                                ),
                                default=
                                'category_assignment__category__public_name',
                                output_field=CharField(),
                            )),
                        # Creation date of the Signal
                        created_at='created_at',
                    ),
                ))
        ).exclude(
            # Only signals that are in an "Open" state
            status__state__in=[
                AFGEHANDELD, AFGEHANDELD_EXTERN, GEANNULEERD,
                VERZOEK_TOT_HEROPENEN
            ],

            # Only Signal's that are in categories that are publicly accessible
            category_assignment__category__is_public_accessible=False,
        )

        # Paginate our queryset and turn it into a GeoJSON feature collection:
        headers = []
        feature_collection = {'type': 'FeatureCollection', 'features': []}
        paginator = LinkHeaderPaginationForQuerysets(
            page_query_param='geopage', page_size=SIGNALS_API_GEO_PAGINATE_BY)
        page_qs = paginator.paginate_queryset(queryset,
                                              self.request,
                                              view=self)

        if page_qs is not None:
            features = page_qs.aggregate(features=JSONAgg('feature'))
            feature_collection.update(features)
            headers = paginator.get_pagination_headers()

        return Response(feature_collection, headers=headers)
Exemple #18
0
    def annotate_for_analysis_summary(cls, project_id, queryset, user):
        """
        This is used by AnalysisSummarySerializer and AnalysisViewSet.get_summary
        """
        # NOTE: Using the entries  and lead in the project for total entries and leads in analysis level
        total_sources = Lead.objects\
            .filter(project=project_id)\
            .annotate(entries_count=models.Count('entry'))\
            .filter(entries_count__gt=0)\
            .count()
        total_entries = Entry.objects.filter(project=project_id).count()

        # Prefetch for AnalysisSummaryPillarSerializer.
        analysispillar_prefetch = models.Prefetch(
            'analysispillar_set',
            queryset=(AnalysisPillar.objects.select_related(
                'assignee',
                'assignee__profile',
            ).annotate(dragged_entries=models.functions.Coalesce(
                models.Subquery(
                    AnalyticalStatement.objects.filter(
                        analysis_pillar=models.OuterRef('pk')).order_by().
                    values('analysis_pillar').annotate(count=models.Count(
                        'entries',
                        distinct=True,
                        filter=models.Q(
                            entries__lead__published_on__lte=models.OuterRef(
                                'analysis__end_date')))).values('count')[:1],
                    output_field=models.IntegerField(),
                ), 0),
                       discarded_entries=models.functions.Coalesce(
                           models.Subquery(
                               DiscardedEntry.objects.filter(
                                   analysis_pillar=models.OuterRef('pk')).
                               order_by().values('analysis_pillar').annotate(
                                   count=models.Count(
                                       'entry',
                                       distinct=True,
                                       filter=models.
                                       Q(entry__lead__published_on__lte=models.
                                         OuterRef('analysis__end_date'
                                                  )))).values('count')[:1],
                               output_field=models.IntegerField(),
                           ), 0),
                       analyzed_entries=models.F('dragged_entries') +
                       models.F('discarded_entries'))),
        )

        publication_date_subquery = models.Subquery(
            AnalyticalStatementEntry.objects.filter(
                analytical_statement__analysis_pillar__analysis=models.
                OuterRef('pk'), ).order_by().
            values('analytical_statement__analysis_pillar__analysis').annotate(
                published_on_min=models.Min('entry__lead__published_on'),
                published_on_max=models.Max('entry__lead__published_on'),
            ).annotate(publication_date=JSONObject(
                start_date=models.F('published_on_min'),
                end_date=models.F('published_on_max'),
            )).values('publication_date')[:1],
            output_field=models.JSONField(),
        )

        return queryset.select_related(
            'team_lead',
            'team_lead__profile',
        ).prefetch_related(analysispillar_prefetch, ).annotate(
            team_lead_name=models.F('team_lead__username'),
            total_entries=models.Value(total_entries,
                                       output_field=models.IntegerField()),
            total_sources=models.Value(total_sources,
                                       output_field=models.IntegerField()),
            publication_date=publication_date_subquery,
        )
Exemple #19
0
 def test_not_supported(self):
     msg = 'JSONObject() is not supported on this database backend.'
     with self.assertRaisesMessage(NotSupportedError, msg):
         Author.objects.annotate(json_object=JSONObject()).get()
Exemple #20
0
 def test_basic(self):
     obj = Author.objects.annotate(json_object=JSONObject(
         name='name')).first()
     self.assertEqual(obj.json_object, {'name': 'Ivan Ivanov'})