Exemple #1
0
def get_path_fragment_loopback(source_device, address, all_links):
    try:
        destination_device = Device.objects.get(
            physicalinterface__name="lo0",
            physicalinterface__logicalinterface__address=address)

        multi_links = (all_links.filter(
            local_interface__aggregate_interface__isnull=False).values(
                "local_interface__device",
                "remote_interface__device",
                "local_interface__aggregate_interface",
                "remote_interface__aggregate_interface",
            ).annotate(connection_id=StringAgg(
                Cast("id", TextField()),
                delimiter="_",
                ordering="id",
                output_field=TextField(),
            )))

        single_links = all_links.filter(
            local_interface__aggregate_interface__isnull=True)

        single_links_same_direction = single_links.filter(
            local_interface__device=source_device,
            remote_interface__device=destination_device).values(
                connection_id=Cast("id", TextField()),
                source_device_name=F("local_interface__device__name"),
                destination_device=F("remote_interface__device"),
                destination_device_name=F("remote_interface__device__name"),
                direction=Value("same", TextField()),
            )

        single_links_opposite_direction = single_links.filter(
            remote_interface__device=source_device,
            local_interface__device=destination_device).values(
                connection_id=Cast("id", TextField()),
                source_device_name=F("remote_interface__device__name"),
                destination_device=F("local_interface__device"),
                destination_device_name=F("local_interface__device__name"),
                direction=Value("opposite", TextField()),
            )

        multi_links_same_direction = multi_links.filter(
            local_interface__device=source_device,
            remote_interface__device=destination_device).values(
                "connection_id",
                source_device_name=F("local_interface__device__name"),
                destination_device=F("remote_interface__device"),
                destination_device_name=F("remote_interface__device__name"),
                direction=Value("same", TextField()),
            )

        multi_links_opposite_direction = multi_links.filter(
            remote_interface__device=source_device,
            local_interface__device=destination_device).values(
                "connection_id",
                source_device_name=F("remote_interface__device__name"),
                destination_device=F("local_interface__device"),
                destination_device_name=F("local_interface__device__name"),
                direction=Value("opposite", TextField()),
            )
        connections = single_links_same_direction.union(
            single_links_opposite_direction, multi_links_same_direction,
            multi_links_opposite_direction).order_by("connection_id")
        connection_ids = []
        first_connection = None
        for connection in connections:
            connection_ids.append(
                f"{connection['connection_id']}:{connection['direction']}")
            if not first_connection:
                first_connection = connection
        if not connection_ids:
            return None
        separator = "&"
        fragment_id = separator.join(connection_ids)
        return {
            "information": {
                "id":
                fragment_id,
                "details":
                f"{first_connection['source_device_name']} - {first_connection['destination_device_name']}",
            },
            "destination_device": first_connection["destination_device"],
        }

    except Device.DoesNotExist:
        return None
    except Device.MultipleObjectsReturned:
        return None
Exemple #2
0
 def test_string_agg_distinct_true(self):
     values = AggregateTestModel.objects.aggregate(
         stringagg=StringAgg('char_field', delimiter=' ', distinct=True))
     self.assertEqual(values['stringagg'].count('Foo'), 1)
     self.assertEqual(values['stringagg'].count('Bar'), 1)
Exemple #3
0
    def sort_by_attribute(self,
                          attribute_pk: Union[int, str],
                          descending: bool = False):
        """Sort a query set by the values of the given product attribute.

        :param attribute_pk: The database ID (must be a numeric) of the attribute
                             to sort by.
        :param descending: The sorting direction.
        """
        from ..attribute.models import AttributeProduct, AttributeValue

        qs: models.QuerySet = self
        # If the passed attribute ID is valid, execute the sorting
        if not (isinstance(attribute_pk, int) or attribute_pk.isnumeric()):
            return qs.annotate(
                concatenated_values_order=Value(
                    None, output_field=models.IntegerField()),
                concatenated_values=Value(None,
                                          output_field=models.CharField()),
            )

        # Retrieve all the products' attribute data IDs (assignments) and
        # product types that have the given attribute associated to them
        associated_values = tuple(
            AttributeProduct.objects.filter(
                attribute_id=attribute_pk).values_list("pk",
                                                       "product_type_id"))

        if not associated_values:
            qs = qs.annotate(
                concatenated_values_order=Value(
                    None, output_field=models.IntegerField()),
                concatenated_values=Value(None,
                                          output_field=models.CharField()),
            )

        else:
            attribute_associations, product_types_associated_to_attribute = zip(
                *associated_values)

            qs = qs.annotate(
                # Contains to retrieve the attribute data (singular) of each product
                # Refer to `AttributeProduct`.
                filtered_attribute=FilteredRelation(
                    relation_name="attributes",
                    condition=Q(
                        attributes__assignment_id__in=attribute_associations),
                ),
                # Implicit `GROUP BY` required for the `StringAgg` aggregation
                grouped_ids=Count("id"),
                # String aggregation of the attribute's values to efficiently sort them
                concatenated_values=Case(
                    # If the product has no association data but has
                    # the given attribute associated to its product type,
                    # then consider the concatenated values as empty (non-null).
                    When(
                        Q(product_type_id__in=
                          product_types_associated_to_attribute)
                        & Q(filtered_attribute=None),
                        then=models.Value(""),
                    ),
                    default=StringAgg(
                        F("filtered_attribute__values__name"),
                        delimiter=",",
                        ordering=([
                            f"filtered_attribute__values__{field_name}" for
                            field_name in AttributeValue._meta.ordering or []
                        ]),
                    ),
                    output_field=models.CharField(),
                ),
                concatenated_values_order=Case(
                    # Make the products having no such attribute be last in the sorting
                    When(concatenated_values=None, then=2),
                    # Put the products having an empty attribute value at the bottom of
                    # the other products.
                    When(concatenated_values="", then=1),
                    # Put the products having an attribute value to be always at the top
                    default=0,
                    output_field=models.IntegerField(),
                ),
            )

        # Sort by concatenated_values_order then
        # Sort each group of products (0, 1, 2, ...) per attribute values
        # Sort each group of products by name,
        # if they have the same values or not values
        ordering = "-" if descending else ""
        return qs.order_by(
            f"{ordering}concatenated_values_order",
            f"{ordering}concatenated_values",
            f"{ordering}name",
        )
Exemple #4
0
 def test_string_agg_requires_delimiter(self):
     with self.assertRaises(TypeError):
         AggregateTestModel.objects.aggregate(
             stringagg=StringAgg('char_field'))
Exemple #5
0
 def test_string_agg_charfield(self):
     values = AggregateTestModel.objects.aggregate(
         stringagg=StringAgg('char_field', delimiter=';'))
     self.assertEqual(values, {'stringagg': 'Foo1;Foo2;Foo4;Foo3'})
def transaction_search_annotations():
    annotation_fields = {
        "action_date_fiscal_year":
        FiscalYear("action_date"),
        "treasury_accounts_funding_this_award":
        Subquery(
            Award.objects.filter(id=OuterRef("award_id")).annotate(
                value=StringAgg(
                    "financial_set__treasury_account__tas_rendering_label",
                    ";",
                    distinct=True)).values("value"),
            output_field=TextField(),
        ),
        "federal_accounts_funding_this_award":
        Subquery(
            Award.objects.filter(id=OuterRef("award_id")).
            annotate(value=StringAgg(
                "financial_set__treasury_account__federal_account__federal_account_code",
                ";",
                distinct=True)).values("value"),
            output_field=TextField(),
        ),
        "usaspending_permalink":
        ConcatAll(
            Value(AWARD_URL),
            Func(F("transaction__award__generated_unique_award_id"),
                 function="urlencode"), Value("/")),
        "disaster_emergency_fund_codes_for_overall_award":
        Case(
            When(
                transaction__action_date__gte=datetime.date(2020, 4, 1),
                then=Subquery(
                    FinancialAccountsByAwards.objects.filter(
                        filter_limit_to_closed_periods(),
                        award_id=OuterRef("award_id")).
                    annotate(value=ExpressionWrapper(
                        Case(
                            When(
                                disaster_emergency_fund__code__isnull=False,
                                then=ConcatAll(
                                    F("disaster_emergency_fund__code"),
                                    Value(": "),
                                    F("disaster_emergency_fund__public_law"),
                                ),
                            ),
                            default=Value(None, output_field=TextField()),
                            output_field=TextField(),
                        ),
                        output_field=TextField(),
                    )).values("award_id").annotate(total=StringAgg(
                        "value", ";", distinct=True)).values("total"),
                    output_field=TextField(),
                ),
            )),
        "outlayed_amount_funded_by_COVID-19_supplementals_for_overall_award":
        Case(
            When(
                transaction__action_date__gte=datetime.date(2020, 4, 1),
                then=_covid_outlay_subquery(),
            ), ),
        "obligated_amount_funded_by_COVID-19_supplementals_for_overall_award":
        Case(
            When(
                transaction__action_date__gte=datetime.date(2020, 4, 1),
                then=_covid_obligation_subquery(),
            ), ),
        "object_classes_funding_this_award":
        Subquery(
            FinancialAccountsByAwards.objects.filter(
                filter_limit_to_closed_periods(),
                award_id=OuterRef("award_id"),
                object_class_id__isnull=False).annotate(
                    value=ExpressionWrapper(
                        ConcatAll(F("object_class__object_class"), Value(": "),
                                  F("object_class__object_class_name")),
                        output_field=TextField(),
                    )).values("award_id").annotate(total=StringAgg(
                        "value", ";", distinct=True)).values("total"),
            output_field=TextField(),
        ),
        "program_activities_funding_this_award":
        Subquery(
            FinancialAccountsByAwards.objects.filter(
                filter_limit_to_closed_periods(),
                award_id=OuterRef("award_id"),
                program_activity_id__isnull=False).annotate(
                    value=ExpressionWrapper(
                        ConcatAll(
                            F("program_activity__program_activity_code"),
                            Value(": "),
                            F("program_activity__program_activity_name"),
                        ),
                        output_field=TextField(),
                    )).values("award_id").annotate(total=StringAgg(
                        "value", ";", distinct=True)).values("total"),
            output_field=TextField(),
        ),
    }
    return annotation_fields
Exemple #7
0
    def get_queryset(self):
        qs = self.queryset

        protocol_condition = Case(
            When(benthiclit__id__isnull=False,
                 then=Value(mermaid.BENTHICLIT_PROTOCOL)),
            When(benthicpit__id__isnull=False,
                 then=Value(mermaid.BENTHICPIT_PROTOCOL)),
            When(
                habitatcomplexity__id__isnull=False,
                then=Value(mermaid.HABITATCOMPLEXITY_PROTOCOL),
            ),
            When(
                bleachingquadratcollection__id__isnull=False,
                then=Value(mermaid.BLEACHINGQC_PROTOCOL),
            ),
            When(beltfish__id__isnull=False,
                 then=Value(mermaid.FISHBELT_PROTOCOL)),
            output_field=CharField(),
        )

        site_name_condition = Case(
            When(
                benthiclit__transect__sample_event__site__name__isnull=False,
                then="benthiclit__transect__sample_event__site__name",
            ),
            When(
                benthicpit__transect__sample_event__site__name__isnull=False,
                then="benthicpit__transect__sample_event__site__name",
            ),
            When(
                habitatcomplexity__transect__sample_event__site__name__isnull=
                False,
                then="habitatcomplexity__transect__sample_event__site__name",
            ),
            When(
                bleachingquadratcollection__quadrat__sample_event__site__name__isnull
                =False,
                then=
                "bleachingquadratcollection__quadrat__sample_event__site__name",
            ),
            When(
                beltfish__transect__sample_event__site__name__isnull=False,
                then="beltfish__transect__sample_event__site__name",
            ),
        )

        management_name_condition = Case(
            When(
                benthiclit__transect__sample_event__management__name__isnull=
                False,
                then="benthiclit__transect__sample_event__management__name",
            ),
            When(
                benthicpit__transect__sample_event__management__name__isnull=
                False,
                then="benthicpit__transect__sample_event__management__name",
            ),
            When(
                habitatcomplexity__transect__sample_event__management__name__isnull
                =False,
                then=
                "habitatcomplexity__transect__sample_event__management__name",
            ),
            When(
                bleachingquadratcollection__quadrat__sample_event__management__name__isnull
                =False,
                then=
                "bleachingquadratcollection__quadrat__sample_event__management__name",
            ),
            When(
                beltfish__transect__sample_event__management__name__isnull=
                False,
                then="beltfish__transect__sample_event__management__name",
            ),
        )

        sample_unit_number_condition = Case(
            When(
                benthiclit__transect__number__isnull=False,
                then="benthiclit__transect__number",
            ),
            When(
                benthicpit__transect__number__isnull=False,
                then="benthicpit__transect__number",
            ),
            When(
                habitatcomplexity__transect__number__isnull=False,
                then="habitatcomplexity__transect__number",
            ),
            When(
                beltfish__transect__number__isnull=False,
                then="beltfish__transect__number",
            ),
        )

        depth_condition = Case(
            When(
                benthiclit__transect__sample_event__depth__isnull=False,
                then="benthiclit__transect__sample_event__depth",
            ),
            When(
                benthicpit__transect__sample_event__depth__isnull=False,
                then="benthicpit__transect__sample_event__depth",
            ),
            When(
                habitatcomplexity__transect__sample_event__depth__isnull=False,
                then="habitatcomplexity__transect__sample_event__depth",
            ),
            When(
                bleachingquadratcollection__quadrat__sample_event__depth__isnull
                =False,
                then="bleachingquadratcollection__quadrat__sample_event__depth",
            ),
            When(
                beltfish__transect__sample_event__depth__isnull=False,
                then="beltfish__transect__sample_event__depth",
            ),
        )

        sample_date_condition = Case(
            When(
                benthiclit__transect__sample_event__sample_date__isnull=False,
                then="benthiclit__transect__sample_event__sample_date",
            ),
            When(
                benthicpit__transect__sample_event__sample_date__isnull=False,
                then="benthicpit__transect__sample_event__sample_date",
            ),
            When(
                habitatcomplexity__transect__sample_event__sample_date__isnull=
                False,
                then="habitatcomplexity__transect__sample_event__sample_date",
            ),
            When(
                bleachingquadratcollection__quadrat__sample_event__sample_date__isnull
                =False,
                then=
                "bleachingquadratcollection__quadrat__sample_event__sample_date",
            ),
            When(
                beltfish__transect__sample_event__sample_date__isnull=False,
                then="beltfish__transect__sample_event__sample_date",
            ),
        )

        size_condition = Case(
            When(
                benthiclit__id__isnull=False,
                then=Concat(
                    Cast("benthiclit__transect__len_surveyed", CharField()),
                    Value("m")),
            ),
            When(
                benthicpit__id__isnull=False,
                then=Concat(
                    Cast("benthicpit__transect__len_surveyed", CharField()),
                    Value("m")),
            ),
            When(
                habitatcomplexity__id__isnull=False,
                then=Concat(
                    Cast("habitatcomplexity__transect__len_surveyed",
                         CharField()),
                    Value("m"),
                ),
            ),
            When(
                beltfish__id__isnull=False,
                then=Concat(
                    Cast("beltfish__transect__len_surveyed", CharField()),
                    Value("m x "),
                    Cast("beltfish__transect__width__name", CharField()),
                ),
            ),
            When(
                bleachingquadratcollection__id__isnull=False,
                then=Concat(
                    Cast("bleachingquadratcollection__quadrat__quadrat_size",
                         CharField()),
                    Value("m"),
                ),
            ),
        )

        observers = StringAgg(
            Concat(
                Cast("observers__profile__first_name", CharField()),
                Value(" "),
                Cast("observers__profile__last_name", CharField()),
            ),
            delimiter=", ",
            distinct=True,
        )

        qs = qs.annotate(
            protocol_name=protocol_condition,
            site_name=site_name_condition,
            management_name=management_name_condition,
            sample_unit_number=sample_unit_number_condition,
            depth=depth_condition,
            sample_date=sample_date_condition,
            size_display=size_condition,
            observers_display=observers,
        )

        return qs
Exemple #8
0
    def get(self, request):

        field_aliases = {
            "agency_code":
            F("agency__abbrev"),
            "species_code":
            F("species__abbrev"),
            "strain":
            F("strain_raw__strain__strain_code"),
            "grid10":
            F("grid_10__slug"),
            "lifestage_code":
            F("lifestage__abbrev"),
            "stockingMethod":
            F("stocking_method__stk_meth"),
            "jurisdiction_code":
            F("jurisdiction__slug"),
            "man_unit":
            F("management_unit__slug"),
            "lake":
            F("jurisdiction__lake__abbrev"),
            "stateProv":
            F("jurisdiction__stateprov__abbrev"),
            "latitude":
            F("geom_lat"),
            "longitude":
            F("geom_lon"),
            "clip":
            F("clip_code__clip_code"),
            "_tags":
            StringAgg("fish_tags__tag_code",
                      delimiter=";",
                      ordering="fish_tags__tag_code"),
            "_marks":
            F("physchem_marks__mark_code"),
            # "_marks": StringAgg(
            #     "physchem_marks__mark_code",
            #     delimiter=";",
            #     ordering="physchem_marks__mark_code",
            # ),
        }

        fields = [
            "stock_id",
            "lake",
            "jurisdiction_code",
            "man_unit",
            "stateProv",
            "grid10",
            "latitude",
            "longitude",
            "st_site",
            "year",
            "date",
            "month",
            "clip",
            "_tags",
            "_marks",
            "year_class",
            "agency_code",
            "species_code",
            "strain",
            "lifestage_code",
            "stockingMethod",
            "no_stocked",
            "yreq_stocked",
        ]

        queryset = (StockingEvent.objects.defer(
            "jurisdiction__geom",
            "jurisdiction__lake__geom",
            "mangement_unit__geom",
            "mangement_unit__lake__geom",
            "mangement_unit__jurisdiction__lake__geom",
            "grid_10__geom",
            "grid_10__lake__geom",
        ).select_related(
            "agency",
            "species",
            "strain",
            "lifestage",
            "grid_10",
            "stocking_method",
            "management_unit",
            "jurisdiction",
            "jurisdiction__lake",
            "jurisdiction__stateprov",
        ).annotate(**field_aliases))

        filtered = StockingEventFilter(request.GET,
                                       queryset=queryset).qs.values(*fields)

        maxEvents = settings.MAX_FILTERED_EVENT_COUNT

        return Response(filtered[:maxEvents])
Exemple #9
0
    def sort_by_attribute(self,
                          attribute_pk: Union[int, str],
                          ascending: bool = True):
        """Sort a query set by the values of the given product attribute.

        :param attribute_pk: The database ID (must be a number) of the attribute
                             to sort by.
        :param ascending: The sorting direction.
        """
        qs: models.QuerySet = self

        # Retrieve all the products' attribute data IDs (assignments) and
        # product types that have the given attribute associated to them
        attribute_associations, product_types_associated_to_attribute = zip(
            *AttributeProduct.objects.filter(attribute_id=attribute_pk).
            values_list("pk", "product_type_id"))

        qs = qs.annotate(
            # Contains to retrieve the attribute data (singular) of each product
            # Refer to `AttributeProduct`.
            filtered_attribute=FilteredRelation(
                relation_name="attributes",
                condition=Q(
                    attributes__assignment_id__in=attribute_associations),
            ),
            # Implicit `GROUP BY` required for the `StringAgg` aggregation
            grouped_ids=Count("id"),
            # String aggregation of the attribute's values to efficiently sort them
            concatenated_values=Case(
                # If the product has no association data but has the given attribute
                # associated to its product type, then consider the concatenated values
                # as empty (non-null).
                When(
                    Q(product_type_id__in=product_types_associated_to_attribute
                      )
                    & Q(filtered_attribute=None),
                    then=models.Value(""),
                ),
                default=StringAgg(
                    F("filtered_attribute__values__name"),
                    delimiter=",",
                    ordering=([
                        f"filtered_attribute__values__{field_name}"
                        for field_name in AttributeValue._meta.ordering
                    ]),
                ),
                output_field=models.CharField(),
            ),
        )

        qs = qs.extra(order_by=[
            Case(
                # Make the products having no such attribute be last in the sorting
                When(concatenated_values=None, then=2),
                # Put the products having an empty attribute value at the bottom of
                # the other products.
                When(concatenated_values="", then=1),
                # Put the products having an attribute value to be always at the top
                default=0,
                output_field=models.IntegerField(),
            ),
            # Sort each group of products (0, 1, 2, ...) per attribute values
            "concatenated_values",
            # Sort each group of products by name,
            # if they have the same values or not values
            "name",
        ])

        # Descending sorting
        if not ascending:
            return qs.reverse()
        return qs
Exemple #10
0
 def with_documents(self):
     vector = (SearchVector('text', weight='A') +
                 SearchVector(StringAgg('hashtags__text_lower', delimiter=' '), weight='A') +
                 SearchVector(StringAgg('urls__expanded_url', delimiter=' '), weight='B'))
     return self.get_queryset().annotate(document=vector)
Exemple #11
0
    def get_queryset(self):

        field_aliases = {
            "glfsd_stock_id": F("stock_id"),
            "agency_code": F("agency__abbrev"),
            "_lake": F("jurisdiction__lake__abbrev"),
            "state_prov": F("jurisdiction__stateprov__abbrev"),
            "manUnit": F("management_unit__label"),
            "grid_10min": F("grid_10__grid"),
            "location_primary": F("st_site"),
            "location_secondary": F("site"),
            "latitude": F("dd_lat"),
            "longitude": F("dd_lon"),
            "stock_method": F("stocking_method__stk_meth"),
            "species_code": F("species__abbrev"),
            "_strain": F("strain_raw__strain__strain_code"),
            "yearclass": F("year_class"),
            "life_stage": F("lifestage__abbrev"),
            "age_months": F("agemonth"),
            "_clip": F("clip_code__clip_code"),
            "phys_chem_mark": F("physchem_marks"),
            "cwt_number": F("tag_no"),
            "tag_retention": F("tag_ret"),
            "mean_length_mm": F("length"),
            "total_weight_kg": F("weight"),
            "stocking_mortality": F("condition__condition"),
            "lot_code": F("lotcode"),
            "hatchery_abbrev": F("hatchery__abbrev"),
            "number_stocked": F("no_stocked"),
            "tag_type": StringAgg("fish_tags__tag_code", ""),
        }

        fields = [
            "glfsd_stock_id",
            "agency_stock_id",
            "agency_code",
            "_lake",
            "state_prov",
            "manUnit",
            "grid_10min",
            "location_primary",
            "location_secondary",
            "latitude",
            "longitude",
            "year",
            "month",
            "day",
            "stock_method",
            "species_code",
            "_strain",
            "yearclass",
            "life_stage",
            "age_months",
            "_clip",
            "clip_efficiency",
            "phys_chem_mark",
            "tag_type",
            "cwt_number",
            "tag_retention",
            "mean_length_mm",
            "total_weight_kg",
            "stocking_mortality",
            "lot_code",
            "hatchery_abbrev",
            "number_stocked",
            "notes",
        ]

        queryset = (StockingEvent.objects.select_related(
            "jurisdiction",
            "agency",
            "species",
            "strain_raw__strain",
            "lifestage",
            "condition",
            "grid_10",
            "stocking_method",
            "hatchery",
            "fish_tags",
            "jurisdiction__lake",
            "jurisdiction__stateprov",
        ).defer(
            "jurisdiction__geom",
            "jurisdiction__lake__geom",
            "grid_10__geom",
            "grid_10__lake__geom",
        ).order_by("-year",
                   "stock_id").annotate(**field_aliases).values(*fields))

        filtered = StockingEventFilter(self.request.GET,
                                       queryset=queryset).qs.values(*fields)

        return filtered
Exemple #12
0
from django.contrib.postgres.aggregates import StringAgg
from django.contrib.postgres.search import (
    SearchQuery,
    SearchRank,
    SearchVector,
    TrigramSimilarity,
)
from django.db import models

search_vectors = (SearchVector("nickname", weight="A", config="english") +
                  SearchVector(StringAgg("genres__name", delimiter=" "),
                               weight="B",
                               config="english") +
                  SearchVector("description", weight="D", config="english"))


class BandManager(models.Manager):
    def search(self, text):
        search_query = SearchQuery(text, config="english")
        search_rank = SearchRank(search_vectors, search_query)
        trigram_similarity = TrigramSimilarity("nickname", text)
        return (self.get_queryset().annotate(search=search_vectors).filter(
            search=search_query).annotate(
                rank=search_rank + trigram_similarity).order_by("-rank"))
from django.contrib.postgres.aggregates import StringAgg
from django.contrib.postgres.indexes import GinIndex
from django.contrib.postgres.search import (SearchQuery, SearchRank,
                                            SearchVector, SearchVectorField,
                                            TrigramSimilarity)
from django.db import models
from django.urls import reverse

# Create your models here.


search_vectors = (
    SearchVector('title', weight='A')
    + SearchVector('content', weight='B')
    + SearchVector(StringAgg('content', delimiter=' '), weight='D')
)


class PageManager(models.Manager):
    def search(self, text):
        print("using this one")
        search_query = SearchQuery(text)
        search_rank = SearchRank(search_vectors, search_query)
        trigram_similarity = TrigramSimilarity('content', text)
        return (
            self.get_queryset()
            .annotate(search=search_vectors)
            .filter(search=search_query)
            .annotate(rank=search_rank + trigram_similarity)
            .order_by('-rank')
        )
Exemple #14
0
from django.contrib.postgres.aggregates import StringAgg
from django.contrib.postgres.search import (
    SearchQuery, SearchRank, SearchVector, TrigramSimilarity,
)
from django.db import models

search_vectors = (
    SearchVector('short_name', weight='A', config='english') +
    SearchVector(StringAgg('shortDescrip', delimiter=' '), weight='B', config='english') +
    SearchVector('long_name', weight='D', config='english')
)
class ProduceSearchManager(models.Manager):
    def search(self, text):
        search_query = SearchQuery(text, config='english')
        search_rank = SearchRank(search_vectors, search_query)
        trigram_similarity = TrigramSimilarity('short_name', text)
        return self.get_queryset().annotate(
            search=search_vectors
        ).filter(
            search=search_query
        ).annotate(
            rank=search_rank + trigram_similarity
        ).order_by('-rank')
Exemple #15
0
    def duty_sentence(
        self,
        component_parent: Union["measures.Measure",
                                "measures.MeasureCondition"],
    ):
        """
        Returns the human-readable "duty sentence" for a Measure or
        MeasureCondition instance as a string. The returned string value is a
        (Postgresql) string aggregation of all the measure's "current"
        components.

        This operation relies on the `prefix` and `abbreviation` fields being
        filled in on duty expressions and units, which are not supplied by the
        TARIC3 XML by default.

        Strings output by this aggregation should be valid input to the
        :class:`~measures.parsers.DutySentenceParser`.

        The string aggregation will be generated using the below SQL:

        .. code:: SQL

            STRING_AGG(
              TRIM(
                CONCAT(
                  CASE
                    WHEN (
                      "measures_dutyexpression"."prefix" IS NULL
                      OR "measures_dutyexpression"."prefix" = ''
                    ) THEN
                    ELSE CONCAT("measures_dutyexpression"."prefix",' ')
                  END,
                  CONCAT(
                    "measures_measureconditioncomponent"."duty_amount",
                    CONCAT(
                      CASE
                        WHEN (
                          "measures_measureconditioncomponent"."duty_amount" IS NOT NULL
                          AND "measures_measureconditioncomponent"."monetary_unit_id" IS NULL
                        ) THEN '%'
                        WHEN "measures_measureconditioncomponent"."duty_amount" IS NULL THEN ''
                        ELSE CONCAT(' ', "measures_monetaryunit"."code")
                      END,
                      CONCAT(
                        CASE
                          WHEN "measures_measurementunit"."abbreviation" IS NULL THEN ''
                          WHEN "measures_measureconditioncomponent"."monetary_unit_id" IS NULL THEN "measures_measurementunit"."abbreviation"
                          ELSE CONCAT(' / ', "measures_measurementunit"."abbreviation")
                        END,
                        CASE
                          WHEN "measures_measurementunitqualifier"."abbreviation" IS NULL THEN
                          ELSE CONCAT(
                            ' / ',
                            "measures_measurementunitqualifier"."abbreviation"
                          )
                        END
                      )
                    )
                  )
                )
              ),
            ) AS "duty_sentence"
        """

        # Components with the greatest transaction_id that is less than
        # or equal to component_parent's transaction_id, are considered 'current'.
        component_qs = component_parent.components.approved_up_to_transaction(
            component_parent.transaction, )
        if not component_qs:
            return ""
        latest_transaction_id = component_qs.aggregate(
            latest_transaction_id=Max(
                "transaction_id", ), ).get("latest_transaction_id")
        component_qs = component_qs.filter(
            transaction_id=latest_transaction_id)

        # Aggregate all the current Components for component_parent to form its
        # duty sentence.
        duty_sentence = component_qs.aggregate(duty_sentence=StringAgg(
            expression=Trim(
                Concat(
                    Case(
                        When(
                            Q(duty_expression__prefix__isnull=True)
                            | Q(duty_expression__prefix=""),
                            then=Value(""),
                        ),
                        default=Concat(
                            F("duty_expression__prefix"),
                            Value(" "),
                        ),
                    ),
                    "duty_amount",
                    Case(
                        When(
                            monetary_unit=None,
                            duty_amount__isnull=False,
                            then=Value("%"),
                        ),
                        When(
                            duty_amount__isnull=True,
                            then=Value(""),
                        ),
                        default=Concat(
                            Value(" "),
                            F("monetary_unit__code"),
                        ),
                    ),
                    Case(
                        When(
                            Q(component_measurement=None)
                            | Q(component_measurement__measurement_unit=None)
                            |
                            Q(component_measurement__measurement_unit__abbreviation
                              =None, ),
                            then=Value(""),
                        ),
                        When(
                            monetary_unit__isnull=True,
                            then=F(
                                "component_measurement__measurement_unit__abbreviation",
                            ),
                        ),
                        default=Concat(
                            Value(" / "),
                            F(
                                "component_measurement__measurement_unit__abbreviation",
                            ),
                        ),
                    ),
                    Case(
                        When(
                            component_measurement__measurement_unit_qualifier__abbreviation
                            =None,
                            then=Value(""),
                        ),
                        default=Concat(
                            Value(" / "),
                            F(
                                "component_measurement__measurement_unit_qualifier__abbreviation",
                            ),
                        ),
                    ),
                    output_field=CharField(),
                ), ),
            delimiter=" ",
            ordering="duty_expression__sid",
        ), )
        return duty_sentence.get("duty_sentence", "")
Exemple #16
0

class Condition(TimeStampedModel):
    name = models.CharField(max_length=512)

    def __str__(self):
        return self.name


search_vectors = (
    SearchVector('sku', weight='A', config='english') +
    SearchVector('title', weight='B', config='english') +
    SearchVector('brand__name', weight='C', config='english') +
    SearchVector('size__name', weight='D', config='english') +
    SearchVector('condition__name', weight='D', config='english') +
    SearchVector(StringAgg('colors__name', delimiter=' '),
                 weight='D',
                 config='english'))


class Category(MPTTModel):
    name = models.TextField(blank=True, null=True)
    parent = TreeForeignKey('Category',
                            blank=True,
                            null=True,
                            related_name="children",
                            db_index=True,
                            on_delete=models.PROTECT)

    class MPTTMeta:
        order_insertion_by = ['name']
Exemple #17
0
def get_datasets_data_for_user_matching_query(datasets: QuerySet,
                                              query,
                                              use=None,
                                              user=None,
                                              id_field='id'):
    """
    Filters the dataset queryset for:
        1) visibility (whether the user can know if the dataset exists)
        2) matches the search terms

    Annotates the dataset queryset with:
        1) `has_access`, if the user can use the dataset's data.
    """
    is_reference_query = datasets.model is ReferenceDataset

    # Filter out datasets that the user is not allowed to even know about.
    visibility_filter = Q(published=True)

    if user:
        if is_reference_query:
            reference_type = DataSetType.REFERENCE.value
            reference_perm = dataset_type_to_manage_unpublished_permission_codename(
                reference_type)

            if user.has_perm(reference_perm):
                visibility_filter |= Q(published=False)

        if datasets.model is DataSet:
            master_type, datacut_type = (
                DataSetType.MASTER.value,
                DataSetType.DATACUT.value,
            )
            master_perm = dataset_type_to_manage_unpublished_permission_codename(
                master_type)
            datacut_perm = dataset_type_to_manage_unpublished_permission_codename(
                datacut_type)

            if user.has_perm(master_perm):
                visibility_filter |= Q(published=False, type=master_type)

            if user.has_perm(datacut_perm):
                visibility_filter |= Q(published=False, type=datacut_type)

    datasets = datasets.filter(visibility_filter)

    # Filter out datasets that don't match the search terms
    search = (SearchVector('name', weight='A', config='english') +
              SearchVector('short_description', weight='B', config='english') +
              SearchVector(StringAgg('tags__name', delimiter='\n'),
                           weight='B',
                           config='english'))
    search_query = SearchQuery(query, config='english')

    datasets = datasets.annotate(search=search,
                                 search_rank=SearchRank(search, search_query))

    if query:
        datasets = datasets.filter(search=search_query)

    # Mark up whether the user can access the data in the dataset.
    access_filter = Q()

    if user and datasets.model is not ReferenceDataset:
        access_filter &= (Q(user_access_type='REQUIRES_AUTHENTICATION')
                          & (Q(datasetuserpermission__user=user)
                             | Q(datasetuserpermission__isnull=True))) | Q(
                                 user_access_type='REQUIRES_AUTHORIZATION',
                                 datasetuserpermission__user=user)

    datasets = datasets.annotate(_has_access=Case(
        When(access_filter, then=True),
        default=False,
        output_field=BooleanField(),
    ) if access_filter else Value(True, BooleanField()), )

    # Pull in the source tag IDs for the dataset
    datasets = datasets.annotate(source_tag_ids=ArrayAgg(
        'tags', filter=Q(tags__type=TagType.SOURCE.value), distinct=True))
    datasets = datasets.annotate(source_tag_names=ArrayAgg(
        'tags__name', filter=Q(
            tags__type=TagType.SOURCE.value), distinct=True))

    # Pull in the topic tag IDs for the dataset
    datasets = datasets.annotate(topic_tag_ids=ArrayAgg(
        'tags', filter=Q(tags__type=TagType.TOPIC.value), distinct=True))
    datasets = datasets.annotate(topic_tag_names=ArrayAgg(
        'tags__name', filter=Q(tags__type=TagType.TOPIC.value), distinct=True))

    # Define a `purpose` column denoting the dataset type.
    if is_reference_query:
        datasets = datasets.annotate(
            purpose=Value(DataSetType.REFERENCE.value, IntegerField()))
    else:
        datasets = datasets.annotate(purpose=F('type'))

    # We are joining on the user permissions table to determine `_has_access`` to the dataset, so we need to
    # group them and remove duplicates. We aggregate all the `_has_access` fields together and return true if any
    # of the records say that access is available.
    datasets = datasets.values(
        id_field,
        'name',
        'slug',
        'short_description',
        'search_rank',
        'source_tag_names',
        'source_tag_ids',
        'topic_tag_names',
        'topic_tag_ids',
        'purpose',
        'published',
        'published_at',
    ).annotate(has_access=BoolOr('_has_access'))

    return datasets.values(
        id_field,
        'name',
        'slug',
        'short_description',
        'search_rank',
        'source_tag_names',
        'source_tag_ids',
        'topic_tag_names',
        'topic_tag_ids',
        'purpose',
        'published',
        'published_at',
        'has_access',
    )
    def get(self, request):
        config = PartnershipConfiguration.get_configuration()
        current_year = config.get_current_academic_year_for_api()

        continents = Continent.objects.prefetch_related(
            Prefetch(
                'country_set',
                queryset=Country.objects.annotate(
                    cities=StringAgg('entityversionaddress__city', ';', distinct=True)
                )
            )
        )
        partners = (
            Partner.objects
            .having_partnerships()
            .distinct('pk')
            .order_by('pk')
            .values('uuid', 'organization__name')
        )

        # Get UCL entity parents which children have a partnership
        ucl_universities = (
            EntityProxy.objects
            .ucl_entities_parents()
            .with_title()
            .with_acronym_path()
            .distinct()
            .order_by('acronym_path')
        )

        # label = 'title_fr' if get_language() == settings.LANGUAGE_CODE_FR else 'title_en'
        # education_fields = (
        #     DomainIsced.objects
        #     .filter(partnershipyear__academic_year=current_year)
        #     .distinct()
        #     .values('uuid', label)
        # )
        education_levels = (
            PartnershipYearEducationLevel.objects
                .filter(partnerships_years__academic_year=current_year,
                        partnerships_years__partnership__isnull=False)
                .distinct('pk')
                .order_by('pk')
                .values('code', 'label')
        )

        label = 'title' if get_language() == settings.LANGUAGE_CODE_FR else 'title_english'
        year_offers = EducationGroupYear.objects.filter(
            partnerships__isnull=False,
        ).values('uuid', 'acronym', label).distinct().order_by('acronym', label)

        view = FundingAutocompleteView()
        view.q = ''
        fundings = view.get_list()

        tags = (
             PartnershipTag.objects
             .filter(partnerships__isnull=False)
             .values_list('value', flat=True)
             .distinct('value')
             .order_by('value')
        )
        partner_tags = (
             PartnerTag.objects
             .of_partners_having_partnerships()
             .values_list('value', flat=True)
             .distinct('value')
             .order_by('value')
        )

        data = {
            'continents': ContinentConfigurationSerializer(continents, many=True).data,
            'partners': PartnerConfigurationSerializer(partners, many=True).data,
            'ucl_universities': UCLUniversityConfigurationSerializer(ucl_universities, many=True).data,
            # 'education_fields': EducationFieldConfigurationSerializer(education_fields, many=True).data,
            'education_levels': EducationLevelSerializer(education_levels, many=True).data,
            'fundings': list(fundings),
            'partnership_types': PartnershipTypeSerializer(PartnershipType.all(), many=True).data,
            'tags': list(tags),
            'partner_tags': list(partner_tags),
            'offers': OfferSerializer(year_offers, many=True).data,
        }
        return Response(data)
Exemple #19
0
from django.contrib.postgres.aggregates import StringAgg
from django.contrib.postgres.search import (
    SearchQuery,
    SearchRank,
    SearchVector,
    TrigramSimilarity,
)
from django.db import models

search_vectors = (SearchVector('nickname', weight='A', config='english') +
                  SearchVector(StringAgg('genres__name', delimiter=' '),
                               weight='B',
                               config='english') +
                  SearchVector('description', weight='D', config='english'))


class BandManager(models.Manager):
    def search(self, text):
        search_query = SearchQuery(text, config='english')
        search_rank = SearchRank(search_vectors, search_query)
        trigram_similarity = TrigramSimilarity('nickname', text)
        return self.get_queryset().annotate(search=search_vectors).filter(
            search=search_query).annotate(rank=search_rank +
                                          trigram_similarity).order_by('-rank')
Exemple #20
0
 def get_search_vector(self):
     return (
         SearchVector('title', weight='A') +
         SearchVector('description', weight='C') +
         SearchVector(StringAgg('tags__name', delimiter=' '), weight='B'))
Exemple #21
0
 def test_string_agg_empty_result(self):
     AggregateTestModel.objects.all().delete()
     values = AggregateTestModel.objects.aggregate(
         stringagg=StringAgg('char_field', delimiter=';'))
     self.assertEqual(values, {'stringagg': ''})
Exemple #22
0
 def get_queryset(self, request):
     queryset = super().get_queryset(request)
     if 'changelist' in request.resolver_match.view_name:
         return queryset.annotate(
             noc=StringAgg('operators', ', ', distinct=True))
     return queryset
Exemple #23
0
 def test_string_agg_delimiter_escaping(self):
     values = AggregateTestModel.objects.aggregate(
         stringagg=StringAgg('char_field', delimiter="'"))
     self.assertEqual(values, {'stringagg': "Foo1'Foo2'Foo4'Foo3"})
Exemple #24
0
 def get_queryset(self, request, **kwargs):
     queryset = super().get_queryset(request, **kwargs)
     return queryset.annotate(operators=StringAgg(
         'route__service__operator', ', ', distinct=True))
Exemple #25
0
 def test_string_agg_default_output_field(self):
     values = AggregateTestModel.objects.aggregate(stringagg=StringAgg(
         'text_field', delimiter=';'), )
     self.assertEqual(values, {'stringagg': 'Text1;Text2;Text4;Text3'})
def generate_federal_account_query(queryset, account_type, tas_id, filters):
    """ Group by federal account (and budget function/subfunction) and SUM all other fields """
    derived_fields = {
        "reporting_agency_name": StringAgg("submission__reporting_agency_name", "; ", distinct=True),
        "budget_function": StringAgg(f"{tas_id}__budget_function_title", "; ", distinct=True),
        "budget_subfunction": StringAgg(f"{tas_id}__budget_subfunction_title", "; ", distinct=True),
        "submission_period": get_fyp_or_q_notation("submission"),
        "agency_identifier_name": get_agency_name_annotation(tas_id, "agency_id"),
        "last_modified_date"
        + NAMING_CONFLICT_DISCRIMINATOR: Cast(Max("submission__published_date"), output_field=DateField()),
        "gross_outlay_amount": Sum(generate_gross_outlay_amount_derived_field(filters, account_type)),
        "gross_outlay_amount_fyb_to_period_end": Sum(generate_gross_outlay_amount_derived_field(filters, account_type)),
    }

    if account_type != "account_balances":
        derived_fields.update(
            {
                "downward_adj_prior_yr_ppaid_undeliv_orders_oblig_refunds_cpe": Sum(
                    generate_ussgl487200_derived_field(filters)
                ),
                "downward_adj_prior_yr_paid_delivered_orders_oblig_refunds_cpe": Sum(
                    generate_ussgl497200_derived_field(filters)
                ),
            }
        )
    if account_type == "award_financial":
        derived_fields = award_financial_derivations(derived_fields)

    queryset = queryset.annotate(**derived_fields)

    # List of all columns that may appear in A, B, or C files that can be summed
    all_summed_cols = [
        "budget_authority_unobligated_balance_brought_forward",
        "adjustments_to_unobligated_balance_brought_forward",
        "budget_authority_appropriated_amount",
        "borrowing_authority_amount",
        "contract_authority_amount",
        "spending_authority_from_offsetting_collections_amount",
        "total_other_budgetary_resources_amount",
        "total_budgetary_resources",
        "obligations_incurred",
        "deobligations_or_recoveries_or_refunds_from_prior_year",
        "unobligated_balance",
        "status_of_budgetary_resources_total",
        "transaction_obligated_amount",
    ]

    # Group by all columns within the file that can't be summed
    fed_acct_values_dict = query_paths[account_type]["federal_account"]
    grouped_cols = [fed_acct_values_dict[val] for val in fed_acct_values_dict if val not in all_summed_cols]
    queryset = queryset.values(*grouped_cols)

    # Sum all fields from all_summed_cols that appear in this file
    values_dict = query_paths[account_type]
    summed_cols = {
        val: Sum(values_dict["treasury_account"].get(val, None))
        for val in values_dict["federal_account"]
        if val in all_summed_cols
    }

    return queryset.annotate(**summed_cols)
Exemple #27
0
 def get_queryset(self, request):
     queryset = super().get_queryset(request)
     if 'changelist' in request.resolver_match.view_name:
         return queryset.annotate(operators=StringAgg(
             'trip__route__service__operator', ', ', distinct=True))
     return queryset
Exemple #28
0
    def get_xls_data(self):
        queryset = self.filterset.qs
        year_qs = PartnershipYear.objects.select_related(
            'academic_year',
            'funding_source',
            'funding_program',
            'funding_type',
        ).prefetch_related(
            Prefetch('entities', queryset=EntityProxy.objects.with_acronym()),
            'education_levels',
        ).filter(academic_year=self.academic_year)
        queryset = (queryset.annotate_financing(
            self.academic_year
        ).annotate_partner_address('country__continent__name').annotate(
            tags_list=StringAgg('partnership__tags__value', ', '),
            is_valid_for_year=Case(
                When(
                    partnership__partnership_type=PartnershipType.PROJECT.name,
                    then=True,
                ),
                default=Exists(
                    PartnershipAgreement.objects.filter(
                        partnership=OuterRef('partnership_id'),
                        status=AgreementStatus.VALIDATED.name,
                        start_academic_year__year__lte=self.academic_year.year,
                        end_academic_year__year__gte=self.academic_year.year,
                    ))),
        ).prefetch_related(
            Prefetch(
                'partnership__years',
                queryset=year_qs,
                to_attr='selected_year',
            ),
            Prefetch(
                'partnership__years',
                queryset=PartnershipYear.objects.select_related(
                    'academic_year'),
            ),
            Prefetch(
                'partnership__years',
                queryset=PartnershipYear.objects.select_related(
                    'academic_year').reverse(),
                to_attr='reverse_years',
            ),
            'entity__entityversion_set',
        ).select_related('entity__partnerentity', ))
        for rel in queryset.distinct():
            partnership = rel.partnership
            year = partnership.selected_year[
                0] if partnership.selected_year else ''
            last_agreement = partnership.last_valid_agreements[
                0] if partnership.last_valid_agreements else None

            # Replace funding values if financing is eligible for mobility and not overridden in year
            if partnership.is_mobility and year and year.eligible and rel.financing_source and not year.funding_source:
                funding_source = rel.financing_source
                funding_program = rel.financing_program
                funding_type = rel.financing_type
            else:
                funding_source = year and year.funding_source
                funding_program = year and year.funding_program
                funding_type = year and year.funding_type

            parts = partnership.acronym_path[
                1:] if partnership.acronym_path else []

            partner = rel.entity.organization.partner
            yield [
                partnership.pk,
                partnership.get_partnership_type_display(),
                str(partnership.subtype or ''),
                str(funding_source or ''),
                str(funding_program or ''),
                str(funding_type or ''),
                str(rel.country_continent_name),
                str(rel.country_name),
                str(rel.entity.organization.name),
                str(rel.entity.organization.code or ''),
                str(partner.erasmus_code or ''),
                str(partner.pic_code or ''),
                hasattr(rel.entity, 'partnerentity')
                and rel.entity.partnerentity.name or '',
                str(parts[0] if len(parts) > 0 else ''),
                str(parts[1] if len(parts) > 1 else ''),
                str(parts[2] if len(parts) > 2 else ''),
                str(partnership.supervisor or ''),
                year and ', '.join(
                    map(lambda x: x.acronym or '', year.entities.all())),
                year and ', '.join(map(str, year.education_levels.all())),
                rel.tags_list,
                partnership.created.strftime('%Y-%m-%d'),
                partnership.modified.strftime('%Y-%m-%d'),
                year and year.is_sms,
                year and year.is_smp,
                year and year.is_sta,
                year and year.is_stt,
                str(partnership.years.first().academic_year)
                if partnership.is_mobility else partnership.start_date,
                str(partnership.reverse_years[0].academic_year)
                if partnership.is_mobility else partnership.end_date,
                getattr(
                    last_agreement,
                    'end_academic_year'
                    if partnership.is_mobility else 'end_date',
                    '',
                ),
                rel.is_valid_for_year,  # from annotation
                partnership.external_id,
                partnership.project_acronym,
                year and year.eligible,
            ]
def idv_order_annotations():
    annotation_fields = {
        "award_base_action_date_fiscal_year":
        FiscalYear("date_signed"),
        "treasury_accounts_funding_this_award":
        Subquery(
            Award.objects.filter(id=OuterRef("id")).annotate(value=StringAgg(
                "financial_set__treasury_account__tas_rendering_label",
                ";",
                distinct=True)).values("value"),
            output_field=TextField(),
        ),
        "federal_accounts_funding_this_award":
        Subquery(
            Award.objects.filter(id=OuterRef("id")).annotate(value=StringAgg(
                "financial_set__treasury_account__federal_account__federal_account_code",
                ";",
                distinct=True)).values("value"),
            output_field=TextField(),
        ),
        "usaspending_permalink":
        Concat(Value(AWARD_URL),
               Func(F("generated_unique_award_id"), function="urlencode"),
               Value("/")),
        "disaster_emergency_fund_codes":
        Subquery(
            FinancialAccountsByAwards.objects.filter(
                filter_limit_to_closed_periods(),
                award_id=OuterRef("id")).annotate(value=ExpressionWrapper(
                    Case(
                        When(
                            disaster_emergency_fund__code__isnull=False,
                            then=Concat(
                                F("disaster_emergency_fund__code"),
                                Value(": "),
                                F("disaster_emergency_fund__public_law"),
                            ),
                        ),
                        default=Value(None, output_field=TextField()),
                        output_field=TextField(),
                    ),
                    output_field=TextField(),
                )).values("award_id").annotate(total=StringAgg(
                    "value", ";", distinct=True)).values("total"),
            output_field=TextField(),
        ),
        "outlayed_amount_funded_by_COVID-19_supplementals":
        Subquery(
            FinancialAccountsByAwards.objects.filter(
                filter_by_latest_closed_periods(),
                award_id=OuterRef("id"),
                disaster_emergency_fund__group_name="covid_19",
                submission__reporting_period_start__gte=str(
                    datetime.date(2020, 4, 1)),
            ).values("award_id").annotate(
                sum=Sum("gross_outlay_amount_by_award_cpe")).values("sum"),
            output_field=DecimalField(),
        ),
        "obligated_amount_funded_by_COVID-19_supplementals":
        Subquery(
            FinancialAccountsByAwards.objects.filter(
                filter_limit_to_closed_periods(),
                award_id=OuterRef("id"),
                disaster_emergency_fund__group_name="covid_19",
                submission__reporting_period_start__gte=str(
                    datetime.date(2020, 4, 1)),
            ).values("award_id").annotate(
                sum=Sum("transaction_obligated_amount")).values("sum"),
            output_field=DecimalField(),
        ),
        "award_latest_action_date_fiscal_year":
        FiscalYear(F("latest_transaction__action_date")),
        "object_classes_funding_this_award":
        Subquery(
            FinancialAccountsByAwards.objects.filter(
                filter_limit_to_closed_periods(),
                award_id=OuterRef("id")).annotate(value=ExpressionWrapper(
                    Concat(F("object_class__object_class"), Value(": "),
                           F("object_class__object_class_name")),
                    output_field=TextField(),
                )).values("award_id").annotate(total=StringAgg(
                    "value", ";", distinct=True)).values("total"),
            output_field=TextField(),
        ),
        "program_activities_funding_this_award":
        Subquery(
            FinancialAccountsByAwards.objects.filter(
                filter_limit_to_closed_periods(),
                award_id=OuterRef("id"),
                program_activity_id__isnull=False).annotate(
                    value=ExpressionWrapper(
                        Concat(
                            F("program_activity__program_activity_code"),
                            Value(": "),
                            F("program_activity__program_activity_name"),
                        ),
                        output_field=TextField(),
                    )).values("award_id").annotate(total=StringAgg(
                        "value", ";", distinct=True)).values("total"),
            output_field=TextField(),
        ),
    }
    return annotation_fields
Exemple #30
0
def get_path_fragment_interface_address(device, neighbour_interface_address,
                                        all_links):
    multi_links = (all_links.filter(
        local_interface__aggregate_interface__isnull=False).values(
            "local_interface__device",
            "remote_interface__device",
            "local_interface__aggregate_interface",
            "remote_interface__aggregate_interface",
        ).annotate(connection_id=StringAgg(
            Cast("id", output_field=TextField()),
            delimiter="_",
            ordering="id",
            output_field=TextField(),
        )))

    single_links = all_links.filter(
        local_interface__aggregate_interface__isnull=True)

    single_links_same_direction = single_links.filter(
        local_interface__device=device,
        remote_interface__logicalinterface__address=neighbour_interface_address
    ).values(
        connection_id=Cast("id", TextField()),
        source_device_name=F("local_interface__device__name"),
        source_interface_name=F("local_interface__name"),
        destination_device=F("remote_interface__device"),
        destination_device_name=F("remote_interface__device__name"),
        destination_interface_name=F("remote_interface__name"),
        direction=Value("same", TextField()),
    )

    single_links_opposite_direction = single_links.filter(
        remote_interface__device=device,
        local_interface__logicalinterface__address=neighbour_interface_address
    ).values(
        connection_id=Cast("id", TextField()),
        source_device_name=F("remote_interface__device__name"),
        source_interface_name=F("remote_interface__name"),
        destination_device=F("local_interface__device"),
        destination_device_name=F("local_interface__device__name"),
        destination_interface_name=F("local_interface__name"),
        direction=Value("opposite", TextField()),
    )

    multi_links_same_direction = multi_links.filter(
        local_interface__device=device,
        remote_interface__aggregate_interface__logicalinterface__address=
        neighbour_interface_address,
    ).values(
        "connection_id",
        source_device_name=F("local_interface__device__name"),
        source_interface_name=F("local_interface__aggregate_interface__name"),
        destination_device=F("remote_interface__device"),
        destination_device_name=F("remote_interface__device__name"),
        destination_interface_name=F(
            "remote_interface__aggregate_interface__name"),
        direction=Value("same", TextField()),
    )

    multi_links_opposite_direction = multi_links.filter(
        remote_interface__device=device,
        local_interface__aggregate_interface__logicalinterface__address=
        neighbour_interface_address,
    ).values(
        "connection_id",
        source_device_name=F("remote_interface__device__name"),
        source_interface_name=F("remote_interface__aggregate_interface__name"),
        destination_device=F("local_interface__device"),
        destination_device_name=F("local_interface__device__name"),
        destination_interface_name=F(
            "local_interface__aggregate_interface__name"),
        direction=Value("opposite", TextField()),
    )
    try:
        connection = single_links_same_direction.union(
            single_links_opposite_direction, multi_links_same_direction,
            multi_links_opposite_direction).get()
        return {
            "information": {
                "id":
                f"{connection['connection_id']}:{connection['direction']}",
                "details":
                f"{connection['source_device_name']} ({connection['source_interface_name']}) - "
                f"{connection['destination_device_name']} ({connection['destination_interface_name']})",
            },
            "destination_device": connection["destination_device"],
        }
    except Link.DoesNotExist:
        logger.warning(
            f"LSP PATH: Interface with address {neighbour_interface_address} was not found "
            f"in neighbours of  {device.ip_address}, pk: {device.pk})")
        return None
    except Link.MultipleObjectsReturned:
        logger.warning(
            f"LSP PATH: Multiple interfaces with address {neighbour_interface_address}"
            f"in neighbours of  {device.ip_address}, pk: {device.pk})")
        return None