コード例 #1
0
ファイル: tests.py プロジェクト: romina660/nuevo
 def test_range_unbound(self):
     """A query with RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING."""
     qs = Employee.objects.annotate(sum=Window(
         expression=Sum('salary'),
         partition_by='age',
         order_by=[F('age').asc()],
         frame=ValueRange(start=None, end=None),
     )).order_by('department', 'hire_date', 'name')
     self.assertIn('RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING', str(qs.query))
     self.assertQuerysetEqual(qs, [
         ('Jones', 'Accounting', 45000, datetime.date(2005, 11, 1), 165000),
         ('Jenson', 'Accounting', 45000, datetime.date(2008, 4, 1), 165000),
         ('Williams', 'Accounting', 37000, datetime.date(2009, 6, 1), 165000),
         ('Adams', 'Accounting', 50000, datetime.date(2013, 7, 1), 130000),
         ('Wilkinson', 'IT', 60000, datetime.date(2011, 3, 1), 194000),
         ('Moore', 'IT', 34000, datetime.date(2013, 8, 1), 194000),
         ('Miller', 'Management', 100000, datetime.date(2005, 6, 1), 194000),
         ('Johnson', 'Management', 80000, datetime.date(2005, 7, 1), 130000),
         ('Smith', 'Marketing', 38000, datetime.date(2009, 10, 1), 165000),
         ('Johnson', 'Marketing', 40000, datetime.date(2012, 3, 1), 148000),
         ('Smith', 'Sales', 55000, datetime.date(2007, 6, 1), 148000),
         ('Brown', 'Sales', 53000, datetime.date(2009, 9, 1), 148000)
     ], transform=lambda row: (row.name, row.department, row.salary, row.hire_date, row.sum))
コード例 #2
0
ファイル: tests.py プロジェクト: romina660/nuevo
 def test_row_number_no_ordering(self):
     """
     The row number window function computes the number based on the order
     in which the tuples were inserted.
     """
     # Add a default ordering for consistent results across databases.
     qs = Employee.objects.annotate(row_number=Window(
         expression=RowNumber(),
     )).order_by('pk')
     self.assertQuerysetEqual(qs, [
         ('Jones', 'Accounting', 1),
         ('Williams', 'Accounting', 2),
         ('Jenson', 'Accounting', 3),
         ('Adams', 'Accounting', 4),
         ('Smith', 'Sales', 5),
         ('Brown', 'Sales', 6),
         ('Johnson', 'Marketing', 7),
         ('Smith', 'Marketing', 8),
         ('Wilkinson', 'IT', 9),
         ('Moore', 'IT', 10),
         ('Miller', 'Management', 11),
         ('Johnson', 'Management', 12),
     ], lambda entry: (entry.name, entry.department, entry.row_number))
コード例 #3
0
ファイル: tests.py プロジェクト: romina660/nuevo
 def test_multiple_partitioning(self):
     """
     Find the maximum salary for each department for people hired in the
     same year.
     """
     qs = Employee.objects.annotate(max=Window(
         expression=Max('salary'),
         partition_by=[F('department'), ExtractYear(F('hire_date'))],
     )).order_by('department', 'hire_date', 'name')
     self.assertQuerysetEqual(qs, [
         ('Jones', 45000, 'Accounting', datetime.date(2005, 11, 1), 45000),
         ('Jenson', 45000, 'Accounting', datetime.date(2008, 4, 1), 45000),
         ('Williams', 37000, 'Accounting', datetime.date(2009, 6, 1), 37000),
         ('Adams', 50000, 'Accounting', datetime.date(2013, 7, 1), 50000),
         ('Wilkinson', 60000, 'IT', datetime.date(2011, 3, 1), 60000),
         ('Moore', 34000, 'IT', datetime.date(2013, 8, 1), 34000),
         ('Miller', 100000, 'Management', datetime.date(2005, 6, 1), 100000),
         ('Johnson', 80000, 'Management', datetime.date(2005, 7, 1), 100000),
         ('Smith', 38000, 'Marketing', datetime.date(2009, 10, 1), 38000),
         ('Johnson', 40000, 'Marketing', datetime.date(2012, 3, 1), 40000),
         ('Smith', 55000, 'Sales', datetime.date(2007, 6, 1), 55000),
         ('Brown', 53000, 'Sales', datetime.date(2009, 9, 1), 53000),
     ], transform=lambda row: (row.name, row.salary, row.department, row.hire_date, row.max))
コード例 #4
0
ファイル: tests.py プロジェクト: guyi2020/blog
 def test_min_department(self):
     """An alternative way to specify a query for FirstValue."""
     qs = Employee.objects.annotate(min_salary=Window(
         expression=Min('salary'),
         partition_by=F('department'),
         order_by=[F('salary').asc(), F('name').asc()])).order_by(
             'department', 'salary', 'name')
     self.assertQuerysetEqual(
         qs, [
             ('Williams', 'Accounting', 37000, 37000),
             ('Jenson', 'Accounting', 45000, 37000),
             ('Jones', 'Accounting', 45000, 37000),
             ('Adams', 'Accounting', 50000, 37000),
             ('Moore', 'IT', 34000, 34000),
             ('Wilkinson', 'IT', 60000, 34000),
             ('Johnson', 'Management', 80000, 80000),
             ('Miller', 'Management', 100000, 80000),
             ('Smith', 'Marketing', 38000, 38000),
             ('Johnson', 'Marketing', 40000, 38000),
             ('Brown', 'Sales', 53000, 53000),
             ('Smith', 'Sales', 55000, 53000),
         ], lambda row:
         (row.name, row.department, row.salary, row.min_salary))
コード例 #5
0
ファイル: tests.py プロジェクト: guyi2020/blog
 def test_dense_rank(self):
     qs = Employee.objects.annotate(rank=Window(
         expression=DenseRank(),
         order_by=ExtractYear(F('hire_date')).asc(),
     ))
     self.assertQuerysetEqual(qs, [
         ('Jones', 45000, 'Accounting', datetime.date(2005, 11, 1), 1),
         ('Miller', 100000, 'Management', datetime.date(2005, 6, 1), 1),
         ('Johnson', 80000, 'Management', datetime.date(2005, 7, 1), 1),
         ('Smith', 55000, 'Sales', datetime.date(2007, 6, 1), 2),
         ('Jenson', 45000, 'Accounting', datetime.date(2008, 4, 1), 3),
         ('Smith', 38000, 'Marketing', datetime.date(2009, 10, 1), 4),
         ('Brown', 53000, 'Sales', datetime.date(2009, 9, 1), 4),
         ('Williams', 37000, 'Accounting', datetime.date(2009, 6, 1), 4),
         ('Wilkinson', 60000, 'IT', datetime.date(2011, 3, 1), 5),
         ('Johnson', 40000, 'Marketing', datetime.date(2012, 3, 1), 6),
         ('Moore', 34000, 'IT', datetime.date(2013, 8, 1), 7),
         ('Adams', 50000, 'Accounting', datetime.date(2013, 7, 1), 7),
     ],
                              lambda entry:
                              (entry.name, entry.salary, entry.department,
                               entry.hire_date, entry.rank),
                              ordered=False)
コード例 #6
0
ファイル: tests.py プロジェクト: guyi2020/blog
 def test_function_list_of_values(self):
     qs = Employee.objects.annotate(lead=Window(
         expression=Lead(expression='salary'),
         order_by=[F('hire_date').asc(),
                   F('name').desc()],
         partition_by='department',
     )).values_list('name', 'salary', 'department', 'hire_date', 'lead')
     self.assertNotIn('GROUP BY', str(qs.query))
     self.assertSequenceEqual(qs, [
         ('Jones', 45000, 'Accounting', datetime.date(2005, 11, 1), 45000),
         ('Jenson', 45000, 'Accounting', datetime.date(2008, 4, 1), 37000),
         ('Williams', 37000, 'Accounting', datetime.date(2009, 6,
                                                         1), 50000),
         ('Adams', 50000, 'Accounting', datetime.date(2013, 7, 1), None),
         ('Wilkinson', 60000, 'IT', datetime.date(2011, 3, 1), 34000),
         ('Moore', 34000, 'IT', datetime.date(2013, 8, 1), None),
         ('Miller', 100000, 'Management', datetime.date(2005, 6, 1), 80000),
         ('Johnson', 80000, 'Management', datetime.date(2005, 7, 1), None),
         ('Smith', 38000, 'Marketing', datetime.date(2009, 10, 1), 40000),
         ('Johnson', 40000, 'Marketing', datetime.date(2012, 3, 1), None),
         ('Smith', 55000, 'Sales', datetime.date(2007, 6, 1), 53000),
         ('Brown', 53000, 'Sales', datetime.date(2009, 9, 1), None),
     ])
コード例 #7
0
ファイル: tests.py プロジェクト: guyi2020/blog
 def test_avg_salary_department(self):
     qs = Employee.objects.annotate(avg_salary=Window(
         expression=Avg('salary'),
         order_by=F('department').asc(),
         partition_by='department',
     )).order_by('department', '-salary', 'name')
     self.assertQuerysetEqual(
         qs, [
             ('Adams', 50000, 'Accounting', 44250.00),
             ('Jenson', 45000, 'Accounting', 44250.00),
             ('Jones', 45000, 'Accounting', 44250.00),
             ('Williams', 37000, 'Accounting', 44250.00),
             ('Wilkinson', 60000, 'IT', 47000.00),
             ('Moore', 34000, 'IT', 47000.00),
             ('Miller', 100000, 'Management', 90000.00),
             ('Johnson', 80000, 'Management', 90000.00),
             ('Johnson', 40000, 'Marketing', 39000.00),
             ('Smith', 38000, 'Marketing', 39000.00),
             ('Smith', 55000, 'Sales', 54000.00),
             ('Brown', 53000, 'Sales', 54000.00),
         ],
         transform=lambda row:
         (row.name, row.salary, row.department, row.avg_salary))
コード例 #8
0
ファイル: tests.py プロジェクト: niraj070/django-1
 def test_window_expression_within_subquery(self):
     subquery_qs = Employee.objects.annotate(
         highest=Window(FirstValue('id'),
                        partition_by=F('department'),
                        order_by=F('salary').desc())).values('highest')
     highest_salary = Employee.objects.filter(pk__in=subquery_qs)
     self.assertSequenceEqual(highest_salary.values('department', 'salary'),
                              [{
                                  'department': 'Accounting',
                                  'salary': 50000
                              }, {
                                  'department': 'Sales',
                                  'salary': 55000
                              }, {
                                  'department': 'Marketing',
                                  'salary': 40000
                              }, {
                                  'department': 'IT',
                                  'salary': 60000
                              }, {
                                  'department': 'Management',
                                  'salary': 100000
                              }])
コード例 #9
0
ファイル: service.py プロジェクト: manikya/leagueAssignment
def filter_query_with_percentile(percentile, queryset):
    """
    Calculate players percentile and filter to given value
    :param percentile:
    :param queryset:
    :return:
    """
    # filter on percentile when user requested
    if percentile:
        queryset = queryset.annotate(
            Avg('player__score')).annotate(percentile=Window(
                expression=PercentRank(),
                order_by=F('player__score__avg').asc(),
            ))

        # queryset does not support filtering after window operation
        final_data = []
        for elm in queryset:
            if elm.percentile >= float(percentile):
                final_data.append(elm)
        queryset = final_data

    return queryset
コード例 #10
0
 def test_multiple_ordering(self):
     """
     Accumulate the salaries over the departments based on hire_date.
     If two people were hired on the same date in the same department, the
     ordering clause will render a different result for those people.
     """
     qs = Employee.objects.annotate(sum=Window(
         expression=Sum('salary'),
         partition_by='department',
         order_by=[F('hire_date').asc(),
                   F('name').asc()],
     )).order_by('department', 'sum')
     self.assertQuerysetEqual(
         qs, [
             ('Jones', 45000, 'Accounting', datetime.date(2005, 11,
                                                          1), 45000),
             ('Jenson', 45000, 'Accounting', datetime.date(2008, 4,
                                                           1), 90000),
             ('Williams', 37000, 'Accounting', datetime.date(2009, 6,
                                                             1), 127000),
             ('Adams', 50000, 'Accounting', datetime.date(2013, 7,
                                                          1), 177000),
             ('Wilkinson', 60000, 'IT', datetime.date(2011, 3, 1), 60000),
             ('Moore', 34000, 'IT', datetime.date(2013, 8, 1), 94000),
             ('Miller', 100000, 'Management', datetime.date(2005, 6,
                                                            1), 100000),
             ('Johnson', 80000, 'Management', datetime.date(2005, 7,
                                                            1), 180000),
             ('Smith', 38000, 'Marketing', datetime.date(2009,
                                                         10, 1), 38000),
             ('Johnson', 40000, 'Marketing', datetime.date(2012, 3,
                                                           1), 78000),
             ('Smith', 55000, 'Sales', datetime.date(2007, 6, 1), 55000),
             ('Brown', 53000, 'Sales', datetime.date(2009, 9, 1), 108000),
         ],
         transform=lambda row:
         (row.name, row.salary, row.department, row.hire_date, row.sum))
コード例 #11
0
 def test_last_value(self):
     qs = Employee.objects.annotate(last_value=Window(
         expression=LastValue('hire_date'),
         partition_by=F('department'),
         order_by=F('hire_date').asc(),
     ))
     self.assertQuerysetEqual(qs, [
         ('Adams', 'Accounting', datetime.date(
             2013, 7, 1), 50000, datetime.date(2013, 7, 1)),
         ('Jenson', 'Accounting', datetime.date(
             2008, 4, 1), 45000, datetime.date(2008, 4, 1)),
         ('Jones', 'Accounting', datetime.date(
             2005, 11, 1), 45000, datetime.date(2005, 11, 1)),
         ('Williams', 'Accounting', datetime.date(
             2009, 6, 1), 37000, datetime.date(2009, 6, 1)),
         ('Moore', 'IT', datetime.date(
             2013, 8, 1), 34000, datetime.date(2013, 8, 1)),
         ('Wilkinson', 'IT', datetime.date(
             2011, 3, 1), 60000, datetime.date(2011, 3, 1)),
         ('Miller', 'Management', datetime.date(
             2005, 6, 1), 100000, datetime.date(2005, 6, 1)),
         ('Johnson', 'Management', datetime.date(
             2005, 7, 1), 80000, datetime.date(2005, 7, 1)),
         ('Johnson', 'Marketing', datetime.date(
             2012, 3, 1), 40000, datetime.date(2012, 3, 1)),
         ('Smith', 'Marketing', datetime.date(
             2009, 10, 1), 38000, datetime.date(2009, 10, 1)),
         ('Brown', 'Sales', datetime.date(
             2009, 9, 1), 53000, datetime.date(2009, 9, 1)),
         ('Smith', 'Sales', datetime.date(
             2007, 6, 1), 55000, datetime.date(2007, 6, 1)),
     ],
                              transform=lambda row:
                              (row.name, row.department, row.hire_date, row.
                               salary, row.last_value),
                              ordered=False)
コード例 #12
0
ファイル: query_handler.py プロジェクト: lcouzens/koku
    def get_rank_window_function(self, group_by_value):
        """Generate a limit ranking window function."""
        tag_column = self._mapper.tag_column
        rank_orders = []
        rank_field = group_by_value.pop()
        default_ordering = self._mapper.report_type_map.get("default_ordering")

        if self.order_field == "delta" and "__" in self._delta:
            delta_field_one, delta_field_two = self._delta.split("__")
            rank_orders.append(
                getattr(
                    F(delta_field_one) / F(delta_field_two),
                    self.order_direction)())
        elif self.parameters.get("order_by", default_ordering):
            rank_orders.append(
                getattr(F(self.order_field), self.order_direction)())
        if tag_column in rank_field:
            rank_orders.append(self.get_tag_order_by(rank_field))
        else:
            rank_orders.append(getattr(F(rank_field), self.order_direction)())

        return Window(expression=RowNumber(),
                      partition_by=F("date"),
                      order_by=rank_orders)
コード例 #13
0
 def test_max_per_year(self):
     """
     Find the maximum salary awarded in the same year as the
     employee was hired, regardless of the department.
     """
     qs = Employee.objects.annotate(max_salary_year=Window(
         expression=Max('salary'),
         order_by=ExtractYear('hire_date').asc(),
         partition_by=ExtractYear('hire_date')
     )).order_by(ExtractYear('hire_date'), 'salary')
     self.assertQuerysetEqual(qs, [
         ('Jones', 'Accounting', 45000, 2005, 100000),
         ('Johnson', 'Management', 80000, 2005, 100000),
         ('Miller', 'Management', 100000, 2005, 100000),
         ('Smith', 'Sales', 55000, 2007, 55000),
         ('Jenson', 'Accounting', 45000, 2008, 45000),
         ('Williams', 'Accounting', 37000, 2009, 53000),
         ('Smith', 'Marketing', 38000, 2009, 53000),
         ('Brown', 'Sales', 53000, 2009, 53000),
         ('Wilkinson', 'IT', 60000, 2011, 60000),
         ('Johnson', 'Marketing', 40000, 2012, 40000),
         ('Moore', 'IT', 34000, 2013, 50000),
         ('Adams', 'Accounting', 50000, 2013, 50000),
     ], lambda row: (row.name, row.department, row.salary, row.hire_date.year, row.max_salary_year))
コード例 #14
0
 def test_percent_rank(self):
     """
     Calculate the percentage rank of the employees across the entire
     company based on salary and name (in case of ambiguity).
     """
     qs = Employee.objects.annotate(percent_rank=Window(
         expression=PercentRank(),
         order_by=[F('salary').asc(), F('name').asc()],
     )).order_by('percent_rank')
     # Round to account for precision differences among databases.
     self.assertQuerysetEqual(qs, [
         ('Moore', 'IT', 34000, 0.0),
         ('Williams', 'Accounting', 37000, 0.0909090909),
         ('Smith', 'Marketing', 38000, 0.1818181818),
         ('Johnson', 'Marketing', 40000, 0.2727272727),
         ('Jenson', 'Accounting', 45000, 0.3636363636),
         ('Jones', 'Accounting', 45000, 0.4545454545),
         ('Adams', 'Accounting', 50000, 0.5454545455),
         ('Brown', 'Sales', 53000, 0.6363636364),
         ('Smith', 'Sales', 55000, 0.7272727273),
         ('Wilkinson', 'IT', 60000, 0.8181818182),
         ('Johnson', 'Management', 80000, 0.9090909091),
         ('Miller', 'Management', 100000, 1.0),
     ], transform=lambda row: (row.name, row.department, row.salary, round(row.percent_rank, 10)))
コード例 #15
0
 def test_rank(self):
     """
     Rank the employees based on the year they're were hired. Since there
     are multiple employees hired in different years, this will contain
     gaps.
     """
     qs = Employee.objects.annotate(rank=Window(
         expression=Rank(),
         order_by=ExtractYear(F('hire_date')).asc(),
     ))
     self.assertQuerysetEqual(qs, [
         ('Jones', 45000, 'Accounting', datetime.date(2005, 11, 1), 1),
         ('Miller', 100000, 'Management', datetime.date(2005, 6, 1), 1),
         ('Johnson', 80000, 'Management', datetime.date(2005, 7, 1), 1),
         ('Smith', 55000, 'Sales', datetime.date(2007, 6, 1), 4),
         ('Jenson', 45000, 'Accounting', datetime.date(2008, 4, 1), 5),
         ('Smith', 38000, 'Marketing', datetime.date(2009, 10, 1), 6),
         ('Brown', 53000, 'Sales', datetime.date(2009, 9, 1), 6),
         ('Williams', 37000, 'Accounting', datetime.date(2009, 6, 1), 6),
         ('Wilkinson', 60000, 'IT', datetime.date(2011, 3, 1), 9),
         ('Johnson', 40000, 'Marketing', datetime.date(2012, 3, 1), 10),
         ('Moore', 34000, 'IT', datetime.date(2013, 8, 1), 11),
         ('Adams', 50000, 'Accounting', datetime.date(2013, 7, 1), 11),
     ], lambda entry: (entry.name, entry.salary, entry.department, entry.hire_date, entry.rank), ordered=False)
コード例 #16
0
 def test_ntile(self):
     """
     Compute the group for each of the employees across the entire company,
     based on how high the salary is for them. There are twelve employees
     so it divides evenly into four groups.
     """
     qs = Employee.objects.annotate(ntile=Window(
         expression=Ntile(num_buckets=4),
         order_by=F('salary').desc(),
     )).order_by('ntile', '-salary', 'name')
     self.assertQuerysetEqual(qs, [
         ('Miller', 'Management', 100000, 1),
         ('Johnson', 'Management', 80000, 1),
         ('Wilkinson', 'IT', 60000, 1),
         ('Smith', 'Sales', 55000, 2),
         ('Brown', 'Sales', 53000, 2),
         ('Adams', 'Accounting', 50000, 2),
         ('Jenson', 'Accounting', 45000, 3),
         ('Jones', 'Accounting', 45000, 3),
         ('Johnson', 'Marketing', 40000, 3),
         ('Smith', 'Marketing', 38000, 4),
         ('Williams', 'Accounting', 37000, 4),
         ('Moore', 'IT', 34000, 4),
     ], lambda x: (x.name, x.department, x.salary, x.ntile))
コード例 #17
0
 def test_cume_dist(self):
     """
     Compute the cumulative distribution for the employees based on the
     salary in increasing order. Equal to rank/total number of rows (12).
     """
     qs = Employee.objects.annotate(cume_dist=Window(
         expression=CumeDist(),
         order_by=F('salary').asc(),
     )).order_by('salary', 'name')
     # Round result of cume_dist because Oracle uses greater precision.
     self.assertQuerysetEqual(qs, [
         ('Moore', 'IT', 34000, 0.0833333333),
         ('Williams', 'Accounting', 37000, 0.1666666667),
         ('Smith', 'Marketing', 38000, 0.25),
         ('Johnson', 'Marketing', 40000, 0.3333333333),
         ('Jenson', 'Accounting', 45000, 0.5),
         ('Jones', 'Accounting', 45000, 0.5),
         ('Adams', 'Accounting', 50000, 0.5833333333),
         ('Brown', 'Sales', 53000, 0.6666666667),
         ('Smith', 'Sales', 55000, 0.75),
         ('Wilkinson', 'IT', 60000, 0.8333333333),
         ('Johnson', 'Management', 80000, 0.9166666667),
         ('Miller', 'Management', 100000, 1),
     ], lambda row: (row.name, row.department, row.salary, round(row.cume_dist, 10)))
コード例 #18
0
def newest_books_by_author(request):
    """
    Try emulating the following query:
        select
            id,
            title,
            published,
            author_id,
            last_value(id) over (
                partition by author_id
                order by published
                range between unbounded preceding and unbounded following
            ) last
        from books_book
        order by author_id, published;
    """
    books = Book.objects.annotate(last_id=Window(
        expression=LastValue('id'),
        partition_by=F('author_id'),
        order_by=F('published'),
        frame=RowRange(start=None, end=None),
    ))

    return render(request, 'books/newest.html', {'books': books})
コード例 #19
0
ファイル: artist.py プロジェクト: louiseGrandjonc/kyo_game
    def get_context_data(self, **kwargs):
        context = super().get_context_data(**kwargs)

        # number of single words in album
        context['single_word'] = (Word.objects.filter(
            artist_id=self.object.pk).values('album__name').annotate(
                total=Count('id')).order_by('-total')[0:10])

        # words that appear in most songs
        # we want the word and in how many song it appears
        """WITH distinct_song AS (SELECT DISTINCT value, song_id FROM kyo_word WHERE artist_id=6)
SELECT value, COUNT(*) FROM distinct_song GROUP BY value ORDER BY count(*) DESC LIMIT 20; """

        # Rank albums by popularity

        dense_rank_by_album = Window(expression=DenseRank(),
                                     order_by=F("popularity").desc())

        context['album_by_popularity'] = (Album.objects.filter(
            artist=self.object).annotate(
                ranking=dense_rank_by_album).order_by('ranking'))

        # Words with rank on their frequency
        dense_rank_by_album = Window(expression=DenseRank(),
                                     partition_by=F("album_id"),
                                     order_by=F("frequency").desc())

        context['words'] = (Word.objects.filter(artist_id=self.object).values(
            'value', 'album_id').annotate(
                frequency=Count('value'),
                ranking=dense_rank_by_album).order_by('ranking'))

        # top 10 words per album
        # Adding a where on ranking won't work, would need to put it in a subquery
        # So let's do it raw SQL

        query = """
        SELECT value, a.name as album_name, frequency, ranking FROM (

        SELECT value,
        album.name,
        count(*) as frequency,
        dense_rank() OVER (PARTITION BY album.id
        ORDER BY COUNT(*)
        DESC
        ) ranking
        FROM kyo_word INNER JOIN kyo_album album ON album.id = kyo_word.album_id WHERE kyo_word.artist_id = %s   AND value <> 'refrain' GROUP BY value, album.id ORDER BY album.id) a

        WHERE a.ranking < 9 AND a.frequency > 5;"""

        with connection.cursor() as cursor:
            cursor.execute(query, [self.object.pk])
            context['top_10_words'] = dictfetchall(cursor)

        # album with the next
        query = """
        SELECT album.id, album.year, album.popularity,
               next_album.id as next_album_pk,
               next_album.name as next_album_name,
               next_album.year as next_album_year
        FROM kyo_album album
        LEFT OUTER JOIN LATERAL (
          SELECT * FROM kyo_album next_album
          WHERE next_album.artist_id=album.artist_id
        AND next_album.year > album.year
          ORDER BY year ASC LIMIT 1) next_album on true
        WHERE album.artist_id=%s
        ORDER BY album.year;"""

        context['albums'] = Album.objects.raw(query, [self.object.pk])

        # top words total

        context['top_words'] = Word.objects.filter(
            artist_id=self.object.pk).values('value').annotate(
                total=Count('id')).order_by('-total')[:30]

        return context
コード例 #20
0
    def execute_query(self):  # noqa: C901
        """Execute query and return provided data.

        Returns:
            (Dict): Dictionary response of query params, data, and total

        """
        query_sum = self.initialize_totals()
        data = []

        with tenant_context(self.tenant):
            query = self.query_table.objects.filter(self.query_filter)
            query_data = query.annotate(**self.annotations)
            group_by_value = self._get_group_by()
            query_group_by = ["date"] + group_by_value
            query_order_by = ["-date"]
            query_order_by.extend([self.order])

            annotations = self._mapper.report_type_map.get("annotations")
            query_data = query_data.values(*query_group_by).annotate(
                **annotations)

            if self._limit:
                rank_order = getattr(F(self.order_field),
                                     self.order_direction)()
                rank_by_total = Window(expression=RowNumber(),
                                       partition_by=F("date"),
                                       order_by=rank_order)
                query_data = query_data.annotate(rank=rank_by_total)
                query_order_by.insert(1, "rank")
                query_data = self._ranked_list(query_data)

            if query.exists():
                aggregates = self._mapper.report_type_map.get("aggregates")
                metric_sum = query.aggregate(**aggregates)
                query_sum = {key: metric_sum.get(key) for key in aggregates}

            if self._delta:
                query_data = self.add_deltas(query_data, query_sum)

            is_csv_output = self.parameters.accept_type and "text/csv" in self.parameters.accept_type
            query_data = self.order_by(query_data, query_order_by)
            cost_units_value = self._mapper.report_type_map.get(
                "cost_units_fallback", "USD")
            usage_units_value = self._mapper.report_type_map.get(
                "usage_units_fallback")
            count_units_value = self._mapper.report_type_map.get(
                "count_units_fallback")
            if query_data:
                cost_units_value = query_data[0].get("cost_units")
                if self._mapper.usage_units_key:
                    usage_units_value = query_data[0].get("usage_units")
                if self._mapper.report_type_map.get("annotations",
                                                    {}).get("count_units"):
                    count_units_value = query_data[0].get("count_units")

            if is_csv_output:
                if self._limit:
                    data = self._ranked_list(list(query_data))
                else:
                    data = list(query_data)
            else:
                groups = copy.deepcopy(query_group_by)
                groups.remove("date")
                data = self._apply_group_by(list(query_data), groups)
                data = self._transform_data(query_group_by, 0, data)

        init_order_keys = []
        query_sum["cost_units"] = cost_units_value
        if self._mapper.usage_units_key and usage_units_value:
            init_order_keys = ["usage_units"]
            query_sum["usage_units"] = usage_units_value
        if self._mapper.report_type_map.get(
                "annotations", {}).get("count_units") and count_units_value:
            query_sum["count_units"] = count_units_value
        key_order = list(init_order_keys + list(annotations.keys()))
        ordered_total = {
            total_key: query_sum[total_key]
            for total_key in key_order if total_key in query_sum
        }
        ordered_total.update(query_sum)
        self._pack_data_object(ordered_total, **self._mapper.PACK_DEFINITIONS)

        self.query_sum = ordered_total
        self.query_data = data
        return self._format_query_response()
コード例 #21
0
def daybarchart(request):
    f = folium.Figure()
    m = folium.Map(location=[28.5, 2],
                   zoom_start=5,
                   tiles=tilesServer,
                   attr="openmaptiles-server")
    m.add_child(fullscreen)
    if request.method == 'POST':
        myfilter = intervalledate(request.POST, prefix='charts')
        debut = request.POST.get('debut')
        fin = request.POST.get('fin')
        data = Accident.objects.filter(date__range=[debut, fin])
        evolution = 5
    else:
        data = Accident.objects.all()
        myfilter = intervalledate(prefix='charts')

    latitude = list(data.values_list("latitude", flat=True))
    #flat to resturn a QuerySet of single values instead of 1-tuples: <QuerySet [1, 2]> instead of <QuerySet [(1,), (2,)]>
    longitude = list(data.values_list("longitude", flat=True))
    bless = (data.values("accident").annotate(
        accidents=Sum('nbre_bless'))[0]['accidents'])
    dec = (data.values("accident").annotate(
        accidents=Sum('nbre_dec'))[0]['accidents'])
    acc = (data.values("accident").annotate(
        accidents=Count('accident'))[0]['accidents'])
    wdata = data.values("wilaya").annotate(
        accidents=Sum('accident'),
        dec_count=Sum('nbre_dec'),
        bless_count=Sum('nbre_bless')).order_by('wilaya')
    ddata = data.values('jour').annotate(
        dec_count=Sum('nbre_dec'),
        bless_count=Sum('nbre_bless'),
        accidents=Sum('accident')).order_by('-accidents')
    accident = data.values("mois").annotate(accidents=Sum('accident'),
                                            dec_count=Sum('nbre_dec'),
                                            bless_count=Sum('nbre_bless'))
    if len(accident) > 1:
        evolution = round(
            ((list(accident.distinct())[-1]['accidents'] -
              list(accident.distinct())[-2]['accidents']) * 100 / acc), 2)
    else:
        evolution = 0

    mdata = (data.values('mois').annotate(dec_count=Sum('nbre_dec'),
                                          bless_count=Sum('nbre_bless')))
    routedata = data.values('type_route').annotate(
        route_count=Count('type_route')).order_by('-route_count')[:8]
    catdata = list(
        data.values('cat_veh').annotate(
            cat_count=Count('cat_veh')).order_by('-cat_count'))[:8]
    hdata = list(
        data.values('heure').annotate(
            accidents=Count('accident')).order_by('heure').order_by('heure'))
    # hdata= list(Accident.objects.values('heure').annotate(accidents=Count('accident')).order_by('heure'))
    temperaturedata = data.values("age_chauff").annotate(
        accidents=Sum('accident')).order_by('age_chauff')
    precipitationdata = data.values("couverturenuage").annotate(
        accidents=Sum('accident')).order_by('couverturenuage')

    cum_acc = data.values('mois').annotate(
        cum_acc=Window(Count('mois'), order_by=F('mois').asc())).distinct()

    causes = list(
        data.values("cause_acc").annotate(
            cause=Count("cause_acc")).order_by('-cause'))
    causes = causes[:6]
    att = list(zip(latitude, longitude))
    MarkerCluster(att, options={
        'maxClusterRadius': 50
    }).add_to((folium.FeatureGroup(name='Regroupement').add_to(m)))
    HeatMap(att, radius=15, min_opacity=0.8).add_to(
        folium.FeatureGroup(name='HeatMap').add_to(m))
    colormap = branca.colormap.LinearColormap(
        colors=['blue', 'lime', 'yellow', 'red'], vmin=0, vmax=0.8)
    colormap.add_to(m)  # add color bar at the top of the map
    # folium.LayerControl().add_to(m)
    folium.map.LayerControl('topleft', collapsed=True).add_to(m)
    vmax = len(att) / 2
    m.add_to(f)
    m = f._repr_html_()  # updated

    return render(
        request, 'home/myCharts.html', {
            'daydata': ddata,
            'monthdata': mdata,
            'my_map': m,
            'wilaya_data': wdata,
            'routedata': routedata,
            'catdata': catdata,
            'accidents': acc,
            "bless": bless,
            "dec": dec,
            "evolution": evolution,
            'causes': causes,
            'accident': accident,
            'cum_acc': cum_acc,
            'hourdata': hdata,
            'temperaturedata': temperaturedata,
            "precipitationdata": precipitationdata,
            'myfilter': myfilter
        })
コード例 #22
0
 def get_annotation(self, annotators, min_field, min_rounds):
     return Window(
         expression=Rank(),
         order_by=self._get_ordering(annotators, min_field, min_rounds),
         partition_by=F('institution_id'),
     )
コード例 #23
0
    def execute_query(self):
        """Execute query and return provided data.

        Returns:
            (Dict): Dictionary response of query params, data, and total

        """
        data = []

        q_table = self._mapper.query_table
        with tenant_context(self.tenant):
            query = q_table.objects.filter(self.query_filter)
            query_data = query.annotate(**self.annotations)
            query_group_by = ['date'] + self._get_group_by()
            query_order_by = ['-date', ]
            query_order_by.extend([self.order])

            annotations = self._mapper.report_type_map.get('annotations')
            query_data = query_data.values(*query_group_by).annotate(**annotations)
            query_sum = self._build_sum(query)

            if self._limit:
                rank_order = getattr(F(self.order_field), self.order_direction)()
                rank_by_total = Window(
                    expression=RowNumber(),
                    partition_by=F('date'),
                    order_by=rank_order
                )
                query_data = query_data.annotate(rank=rank_by_total)
                query_order_by.insert(1, 'rank')
                query_data = self._ranked_list(query_data)

            if self._delta:
                query_data = self.add_deltas(query_data, query_sum)

            is_csv_output = self.parameters.accept_type and 'text/csv' in self.parameters.accept_type

            query_data, query_group_by = self.strip_label_column_name(
                query_data,
                query_group_by
            )
            query_data = self.order_by(query_data, query_order_by)

            if is_csv_output:
                if self._limit:
                    data = self._ranked_list(list(query_data))
                else:
                    data = list(query_data)
            else:
                groups = copy.deepcopy(query_group_by)
                groups.remove('date')
                data = self._apply_group_by(list(query_data), groups)
                data = self._transform_data(query_group_by, 0, data)

        key_order = list(['units'] + list(annotations.keys()))
        ordered_total = {total_key: query_sum[total_key]
                         for total_key in key_order if total_key in query_sum}
        ordered_total.update(query_sum)

        self.query_sum = ordered_total
        self.query_data = data
        return self._format_query_response()
コード例 #24
0
    def _group_by_ranks(self, query, data):  # noqa: C901
        """Handle grouping data by filter limit."""
        group_by_value = self._get_group_by()
        gb = group_by_value if group_by_value else ["date"]
        tag_column = self._mapper.tag_column
        rank_orders = []

        rank_annotations = {}
        if "delta" in self.order:
            if "__" in self._delta:
                a, b = self._delta.split("__")
                rank_annotations = {
                    a: self.report_annotations[a],
                    b: self.report_annotations[b]
                }
                rank_orders.append(
                    getattr(F(a) / F(b), self.order_direction)())
            else:
                rank_annotations = {
                    self._delta: self.report_annotations[self._delta]
                }
                rank_orders.append(
                    getattr(F(self._delta), self.order_direction)())
        elif self._limit and "offset" in self.parameters.get(
                "filter", {}) and self.parameters.get("order_by"):
            if self.report_annotations.get(self.order_field):
                rank_annotations = {
                    self.order_field:
                    self.report_annotations.get(self.order_field)
                }
            # AWS is special and account alias is a foreign key field so special_rank was annotated on the query
            if self.order_field == "account_alias":
                rank_orders.append(
                    getattr(F("special_rank"), self.order_direction)())
            else:
                rank_orders.append(
                    getattr(F(self.order_field), self.order_direction)())
        else:
            for key, val in self.default_ordering.items():
                order_field, order_direction = key, val
            rank_annotations = {
                order_field: self.report_annotations.get(order_field)
            }
            rank_orders.append(getattr(F(order_field), order_direction)())

        if tag_column in gb[0]:
            rank_orders.append(self.get_tag_order_by(gb[0]))

        # this is a sub-query, but not really.
        # in the future, this could be accomplished using CTEs.
        rank_by_total = Window(expression=Rank(), order_by=rank_orders)
        if rank_annotations:
            ranks = (query.annotate(**self.annotations).values(
                *group_by_value).annotate(**rank_annotations).annotate(
                    rank=rank_by_total))
        else:
            ranks = query.annotate(**self.annotations).values(
                *group_by_value).annotate(rank=rank_by_total)

        rankings = []
        for rank in ranks:
            rank_value = rank.get(group_by_value[0])
            rank_value = self.check_missing_rank_value(rank_value)
            if rank_value not in rankings:
                rankings.append(rank_value)

        for query_return in data:
            query_return = self._apply_group_null_label(query_return, gb)
        return self._ranked_list(data, rankings)
コード例 #25
0
 def test_invalid_order_by(self):
     msg = 'order_by must be either an Expression or a sequence of expressions'
     with self.assertRaisesMessage(ValueError, msg):
         Window(expression=Sum('power'), order_by='-horse')
コード例 #26
0
 def test_invalid_filter(self):
     msg = 'Window is disallowed in the filter clause'
     with self.assertRaisesMessage(NotSupportedError, msg):
         Employee.objects.annotate(dense_rank=Window(expression=DenseRank())).filter(dense_rank__gte=1)
コード例 #27
0
 def test_empty_group_by_cols(self):
     window = Window(expression=Sum('pk'))
     self.assertEqual(window.get_group_by_cols(), [])
     self.assertFalse(window.contains_aggregate)
コード例 #28
0
ファイル: query_handler.py プロジェクト: taftsanders/koku
    def execute_individual_query(self, org_unit_applied=False):  # noqa: C901
        """Execute query and return provided data.

        Returns:
            (Dict): Dictionary response of query params, data, and total

        """
        data = []

        with tenant_context(self.tenant):
            query_table = self.query_table
            LOG.debug(f"Using query table: {query_table}")
            tag_results = None
            query = query_table.objects.filter(self.query_filter)
            query_data = query.annotate(**self.annotations)
            query_group_by = ["date"] + self._get_group_by()
            query_order_by = ["-date"]
            query_order_by.extend([self.order])

            annotations = copy.deepcopy(
                self._mapper.report_type_map.get("annotations", {}))
            if not self.parameters.parameters.get("compute_count"):
                # Query parameter indicates count should be removed from DB queries
                annotations.pop("count", None)
                annotations.pop("count_units", None)

            query_data = query_data.values(*query_group_by).annotate(
                **annotations)

            if "account" in query_group_by:
                query_data = query_data.annotate(account_alias=Coalesce(
                    F(self._mapper.provider_map.get("alias")),
                    "usage_account_id"))

                if self.parameters.parameters.get("check_tags"):
                    tag_results = self._get_associated_tags(
                        query_table, self.query_filter)

            query_sum = self._build_sum(query, annotations)

            if self._limit and query_data and not org_unit_applied:
                rank_orders = []
                if self.order_field == "delta":
                    rank_orders.append(
                        getattr(F(self._delta), self.order_direction)())
                else:
                    rank_orders.append(
                        getattr(F(self.order_field), self.order_direction)())
                rank_by_total = Window(expression=RowNumber(),
                                       partition_by=F("date"),
                                       order_by=rank_orders)
                query_data = query_data.annotate(rank=rank_by_total)
                query_order_by.insert(1, "rank")
                query_data = self._ranked_list(query_data)

            if self._delta:
                query_data = self.add_deltas(query_data, query_sum)

            query_data = self.order_by(query_data, query_order_by)

            # Fetch the data (returning list(dict))
            query_results = list(query_data)

            # Resolve tag exists for unique account returned
            # if tag_results is not Falsey
            # Append the flag to the query result for the report
            if tag_results is not None:
                # Add the tag results to the report query result dicts
                for res in query_results:
                    res["tags_exist"] = tag_results.get(
                        res["account_alias"], False)

            if not self.is_csv_output:
                groups = copy.deepcopy(query_group_by)
                groups.remove("date")
                data = self._apply_group_by(query_results, groups)
                data = self._transform_data(query_group_by, 0, data)
            else:
                data = query_results

        key_order = list(["units"] + list(annotations.keys()))
        ordered_total = {
            total_key: query_sum[total_key]
            for total_key in key_order if total_key in query_sum
        }
        ordered_total.update(query_sum)

        query_sum = ordered_total
        query_data = data
        return query_data, query_sum
コード例 #29
0
 def test_invalid_source_expression(self):
     msg = "Expression 'Upper' isn't compatible with OVER clauses."
     with self.assertRaisesMessage(ValueError, msg):
         Window(expression=Upper('name'))
コード例 #30
0
ファイル: tests.py プロジェクト: GravyHands/django
 def test_empty_group_by_cols(self):
     window = Window(expression=Sum('pk'))
     self.assertEqual(window.get_group_by_cols(), [])
     self.assertFalse(window.contains_aggregate)
コード例 #31
0
ファイル: neighbours.py プロジェクト: dreggs76/CoGs
def get_neighbour_pks(model, pk, filterset=None, ordering=None):
    '''
    Given a model and pk that identify an object (model instance) will, given an ordering
    (defaulting to the models ordering) and optionally a filterset (from url_filter), will
    return a tuple that contains two PKs that of the prior and next neighbour in the list
    either of all objects by that ordering or the filtered list (if a filterset is provided)
    
    :returns: a 4 tuple containing (prior_pk, next_pk, row_number, list_length)
     
    :param model:        The model the object is an instance of
    :param pk:           The primary key of the model instance being considered
    :param filterset:    An optional filterset (see https://github.com/miki725/django-url-filter)
    :param ordering:     An optional ordering (otherwise default model ordering is used). See: https://docs.djangoproject.com/en/2.0/ref/models/options/#ordering  
    '''
    # If a filterset is provided ensure it's of the same model as specified (consistency).
    if filterset and not filterset.Meta.model == model:
        return (None, None)

    # Get the ordering list for the model (a list of fields
    # See: https://docs.djangoproject.com/en/2.0/ref/models/options/#ordering
    if ordering is None:
        ordering = model._meta.ordering

    order_by = []
    for f in ordering:
        if f.startswith("-"):
            order_by.append(F(f[1:]).desc())
        else:
            order_by.append(F(f).asc())

    # A default order. We need an order or the window functions crash
    if len(order_by) == 0:
        order_by = ['pk']

    # Define the window functions for each neighbour
    window_lag = Window(expression=Lag("pk"), order_by=order_by)
    window_lead = Window(expression=Lead("pk"), order_by=order_by)
    window_rownnum = Window(expression=RowNumber(), order_by=order_by)

    # Get a queryset annotated with neighbours. If annotated attrs clash with existing attrs an exception
    # will be raised: https://code.djangoproject.com/ticket/11256
    try:
        # Start with all objects
        qs = model.objects.all()

        # Now apply a filterset if we have one
        if not filterset is None:
            # We respect the filterset. BUT we need to wrap it inside a sub query, so that
            # we can apply a DISTNCT ON Pk to avoid duplicate tuples that the window
            # functions can introduce when we are matching multiple remote objects.
            # Alas that's what they do. So we have to constrain it to one tuple per
            # PK.
            #
            # FIXME: Aaargh this won't work for injecting the current PK into the query!
            # My desire is to make sure that the query results include the provided pk.
            # Needs testing in both cases. I can't think of a way to do it alas. This is
            # frustrating me. Problem is across related object filters, or JOINS.
            # qs = filterset.filter() | (model.objects.filter(pk=pk).distinct() & filterset.filter())
            qs = qs.filter(pk__in=Subquery(filterset.filter().distinct(
                'pk').order_by('pk').values('pk')))

        # Now order the objects properly
        qs = qs.order_by(*order_by)

        # Now annotate the queryset with the prior and next PKs
        qs = qs.annotate(neighbour_prior=window_lag,
                         neighbour_next=window_lead,
                         row_number=window_rownnum)
    except:
        return None

    # Finally we need some trickery alas to do a query on the queryset! We can't add this WHERE
    # as a filter because the LAG and LEAD Window functions fail then, they are empty because
    # there is no lagger or leader on the one line result! So we have to run that query on the
    # whole table, then extract from the result the one line we want! Wish I could find a way to
    # do this in the Django ORM not with a raw() call.

    # First we need the SQL from the existing query. Many on-line sources seem to recommend
    # str(qs.query) but this does not return reliable SQL! A bug in Django and much discussed:
    #    https://code.djangoproject.com/ticket/30132
    #    https://code.djangoproject.com/ticket/25705
    #    https://code.djangoproject.com/ticket/25092
    #    https://code.djangoproject.com/ticket/24991
    #    https://code.djangoproject.com/ticket/17741
    #
    # But this, it seems is the reliable method which involves dipping into Django's
    # innards a litte (the SQL compiler)
    sql, params = qs.query.get_compiler(using=qs.db).as_sql()

    # Now we wrap the SQL
    sql = "SELECT * FROM ({}) ao WHERE {}={}".format(sql, model._meta.pk.name,
                                                     pk)

    # And create a new QuerySet
    ao = model.objects.raw(sql, params)

    try:
        if ao:
            if len(ao) == 1:
                return (ao[0].neighbour_prior, ao[0].neighbour_next,
                        ao[0].row_number, qs.count())
            else:
                raise ValueError(
                    "Query error: object appears more than once in neighbour hunt."
                )
        else:
            return (None, ) * 4
    except:
        return (None, ) * 4