Example #1
0
    def get_queryset(self):
        # last_response_count is the number of submissions for the problem part and must
        # be divided by the number of problem parts to get the problem submission rather
        # than the problem *part* submissions
        aggregation_query = """
SELECT
    module_id,
    SUM(last_response_count)/COUNT(DISTINCT part_id) AS total_submissions,
    SUM(CASE WHEN correct=1 THEN last_response_count ELSE 0 END)/COUNT(DISTINCT part_id) AS correct_submissions,
    GROUP_CONCAT(DISTINCT part_id) AS part_ids,
    MAX(created) AS created
FROM answer_distribution
WHERE course_id = %s
GROUP BY module_id;
        """

        connection = connections[settings.ANALYTICS_DATABASE]
        with connection.cursor() as cursor:
            if connection.vendor == 'mysql':
                # The default value of group_concat_max_len, 1024, is too low for some course data. Increase this value
                # to its maximum possible value. For more information see
                # http://code.openark.org/blog/mysql/those-oversized-undersized-variables-defaults.
                cursor.execute(
                    "SET @@group_concat_max_len = @@max_allowed_packet;")

                cursor.execute("DESCRIBE answer_distribution;")
                column_names = [row[0] for row in cursor.fetchall()]
            # Alternate query for sqlite test database
            else:
                cursor.execute("PRAGMA table_info(answer_distribution)")
                column_names = [row[1] for row in cursor.fetchall()]

            if 'last_response_count' in column_names:
                cursor.execute(aggregation_query, [self.course_id])
            else:
                cursor.execute(
                    aggregation_query.replace('last_response_count', 'count'),
                    [self.course_id])

            rows = dictfetchall(cursor)

        for row in rows:
            # Convert the comma-separated list into an array of strings.
            row['part_ids'] = row['part_ids'].split(',')

            # Convert the aggregated decimal fields to integers
            row['total_submissions'] = int(row['total_submissions'])
            row['correct_submissions'] = int(row['correct_submissions'])

            # Rather than write custom SQL for the SQLite backend, simply parse the timestamp.
            created = row['created']
            if not isinstance(created, datetime.datetime):
                row['created'] = datetime.datetime.strptime(
                    created, '%Y-%m-%d %H:%M:%S')

        return rows
    def get_queryset(self):
        # last_response_count is the number of submissions for the problem part and must
        # be divided by the number of problem parts to get the problem submission rather
        # than the problem *part* submissions
        aggregation_query = """
SELECT
    module_id,
    SUM(last_response_count)/COUNT(DISTINCT part_id) AS total_submissions,
    SUM(CASE WHEN correct=1 THEN last_response_count ELSE 0 END)/COUNT(DISTINCT part_id) AS correct_submissions,
    GROUP_CONCAT(DISTINCT part_id) AS part_ids,
    MAX(created) AS created
FROM answer_distribution
WHERE course_id = %s
GROUP BY module_id;
        """

        connection = connections[settings.ANALYTICS_DATABASE]
        with connection.cursor() as cursor:
            if connection.vendor == 'mysql':
                # The default value of group_concat_max_len, 1024, is too low for some course data. Increase this value
                # to its maximum possible value. For more information see
                # http://code.openark.org/blog/mysql/those-oversized-undersized-variables-defaults.
                cursor.execute("SET @@group_concat_max_len = @@max_allowed_packet;")

                cursor.execute("DESCRIBE answer_distribution;")
                column_names = [row[0] for row in cursor.fetchall()]
            # Alternate query for sqlite test database
            else:
                cursor.execute("PRAGMA table_info(answer_distribution)")
                column_names = [row[1] for row in cursor.fetchall()]

            if u'last_response_count' in column_names:
                cursor.execute(aggregation_query, [self.course_id])
            else:
                cursor.execute(aggregation_query.replace('last_response_count', 'count'), [self.course_id])

            rows = dictfetchall(cursor)

        for row in rows:
            # Convert the comma-separated list into an array of strings.
            row['part_ids'] = row['part_ids'].split(',')

            # Convert the aggregated decimal fields to integers
            row['total_submissions'] = int(row['total_submissions'])
            row['correct_submissions'] = int(row['correct_submissions'])

            # Rather than write custom SQL for the SQLite backend, simply parse the timestamp.
            created = row['created']
            if not isinstance(created, datetime.datetime):
                row['created'] = datetime.datetime.strptime(created, '%Y-%m-%d %H:%M:%S')

        return rows
Example #3
0
    def get_queryset(self):
        sql = """
SELECT
    module_id,
    SUM(count) AS total_submissions,
    SUM(CASE WHEN correct=1 THEN count ELSE 0 END) AS correct_submissions,
    GROUP_CONCAT(DISTINCT part_id) AS part_ids,
    MAX(created) AS created
FROM answer_distribution
WHERE course_id = %s
GROUP BY module_id;
        """
        connection = connections[settings.ANALYTICS_DATABASE]
        with connection.cursor() as cursor:
            if connection.vendor == 'mysql':
                # The default value of group_concat_max_len, 1024, is too low for some course data. Increase this value
                # to its maximum possible value. For more information see
                # http://code.openark.org/blog/mysql/those-oversized-undersized-variables-defaults.
                cursor.execute(
                    "SET @@group_concat_max_len = @@max_allowed_packet;")

            cursor.execute(sql, [self.course_id])
            rows = dictfetchall(cursor)

        for row in rows:
            # Convert the comma-separated list into an array of strings.
            row['part_ids'] = row['part_ids'].split(',')

            # Convert the aggregated decimal fields to integers
            row['total_submissions'] = int(row['total_submissions'])
            row['correct_submissions'] = int(row['correct_submissions'])

            # Rather than write custom SQL for the SQLite backend, simply parse the timestamp.
            created = row['created']
            if not isinstance(created, datetime.datetime):
                row['created'] = datetime.datetime.strptime(
                    created, '%Y-%m-%d %H:%M:%S.%f')

        return rows