Beispiel #1
0
def get_rune_report(qs, total_log_count):
    if qs.count() == 0:
        return None

    # Substat distribution
    # Unable to use database aggregation on an ArrayField without ORM gymnastics, so post-process data in python
    all_substats = qs.annotate(
        flat_substats=Func(F('substats'), function='unnest')).values_list(
            'flat_substats', flat=True)
    substat_counts = Counter(all_substats)

    # Sell value ranges
    min_value, max_value = qs.aggregate(Min('value'), Max('value')).values()
    min_value = int(floor_to_nearest(min_value, 1000))
    max_value = int(ceil_to_nearest(max_value, 1000))

    return {
        'stars': {
            'type':
            'occurrences',
            'total':
            qs.count(),
            'data':
            transform_to_dict(
                list(
                    qs.values(grade=Concat(Cast(
                        'stars', CharField()), Value('⭐'))).annotate(
                            count=Count('pk')).order_by('-count'))),
        },
        'type': {
            'type':
            'occurrences',
            'total':
            qs.count(),
            'data':
            transform_to_dict(
                replace_value_with_choice(
                    list(
                        qs.values('type').annotate(
                            count=Count('pk')).order_by('-count')),
                    {'type': qs.model.TYPE_CHOICES})),
        },
        'quality': {
            'type':
            'occurrences',
            'total':
            qs.count(),
            'data':
            transform_to_dict(
                replace_value_with_choice(
                    list(
                        qs.values('quality').annotate(
                            count=Count('pk')).order_by('-count')),
                    {'quality': qs.model.QUALITY_CHOICES})),
        },
        'slot': {
            'type':
            'occurrences',
            'total':
            qs.count(),
            'data':
            transform_to_dict(
                list(
                    qs.values('slot').annotate(
                        count=Count('pk')).order_by('-count'))),
        },
        'main_stat': {
            'type':
            'occurrences',
            'total':
            qs.count(),
            'data':
            transform_to_dict(
                replace_value_with_choice(
                    list(
                        qs.values('main_stat').annotate(
                            count=Count('main_stat')).order_by('main_stat')),
                    {'main_stat': qs.model.STAT_CHOICES}))
        },
        'innate_stat': {
            'type':
            'occurrences',
            'total':
            qs.count(),
            'data':
            transform_to_dict(
                replace_value_with_choice(
                    list(
                        qs.values('innate_stat').annotate(
                            count=Count('pk')).order_by('innate_stat')),
                    {'innate_stat': qs.model.STAT_CHOICES}))
        },
        'substats': {
            'type':
            'occurrences',
            'total':
            len(all_substats),
            'data':
            transform_to_dict(
                replace_value_with_choice(
                    sorted([{
                        'substat': k,
                        'count': v
                    } for k, v in substat_counts.items()],
                           key=lambda count: count['substat']),
                    {'substat': qs.model.STAT_CHOICES}), )
        },
        'max_efficiency': {
            'type':
            'histogram',
            'width':
            5,
            'data':
            histogram(qs,
                      'max_efficiency',
                      range(0, 100, 5),
                      slice_on='quality'),
        },
        'value': {
            'type':
            'histogram',
            'width':
            500,
            'data':
            histogram(qs,
                      'value',
                      range(min_value, max_value, 500),
                      slice_on='quality')
        }
    }
Beispiel #2
0
 def get_mean_price(self):
     return \
         self.itinerary_set.all().annotate(min_price=Min('pricingoption__price')).aggregate(Avg('min_price'))[
             'min_price__avg']
Beispiel #3
0
 def get_queryset(self, *args, **kwargs):
     qs = super(TriggerCRUDL.List, self).get_queryset(*args, **kwargs)
     qs = qs.filter(is_active=True, is_archived=False).annotate(earliest_group=Min('groups__name')).order_by('keyword', 'earliest_group')
     return qs
Beispiel #4
0
    def test_populate_line_item_daily_summary_table(self, mock_vacuum):
        """Test that the line item daily summary table populates."""
        self.tearDown()
        self.reporting_period = self.creator.create_ocp_report_period(
            provider_id=self.ocp_provider_id, cluster_id=self.cluster_id)
        self.report = self.creator.create_ocp_report(self.reporting_period)
        report_table_name = OCP_REPORT_TABLE_MAP['report']
        summary_table_name = OCP_REPORT_TABLE_MAP['line_item_daily_summary']

        report_table = getattr(self.accessor.report_schema, report_table_name)
        summary_table = getattr(self.accessor.report_schema,
                                summary_table_name)

        for _ in range(25):
            self.creator.create_ocp_usage_line_item(self.reporting_period,
                                                    self.report)
        with schema_context(self.schema):
            report_entry = report_table.objects.all().aggregate(
                Min('interval_start'), Max('interval_start'))
            start_date = report_entry['interval_start__min']
            end_date = report_entry['interval_start__max']

            start_date = start_date.replace(hour=0,
                                            minute=0,
                                            second=0,
                                            microsecond=0)
            end_date = end_date.replace(hour=0,
                                        minute=0,
                                        second=0,
                                        microsecond=0)

            query = self.accessor._get_db_obj_query(summary_table_name)
            initial_count = query.count()

            self.accessor.populate_line_item_daily_table(
                start_date, end_date, self.cluster_id)
            self.accessor.populate_line_item_daily_summary_table(
                start_date, end_date, self.cluster_id)

            self.assertNotEqual(query.count(), initial_count)

            summary_entry = summary_table.objects.all().aggregate(
                Min('usage_start'), Max('usage_start'))
            result_start_date = summary_entry['usage_start__min']
            result_end_date = summary_entry['usage_start__max']

            self.assertEqual(result_start_date, start_date)
            self.assertEqual(result_end_date, end_date)

            entry = query.first()

        summary_columns = [
            'cluster_id',
            'namespace',
            'node',
            'node_capacity_cpu_core_hours',
            'node_capacity_cpu_cores',
            'node_capacity_memory_gigabyte_hours',
            'node_capacity_memory_gigabytes',
            'pod',
            'pod_labels',
            'pod_limit_cpu_core_hours',
            'pod_limit_memory_gigabyte_hours',
            'pod_request_cpu_core_hours',
            'pod_request_memory_gigabyte_hours',
            'pod_usage_cpu_core_hours',
            'pod_usage_memory_gigabyte_hours',
            'usage_end',
            'usage_start',
        ]

        for column in summary_columns:
            self.assertIsNotNone(getattr(entry, column))
Beispiel #5
0
    def list(self, request):
        nodes = []
        # Loop through the last active nodes
        for last_active in LastActiveNodes.objects.iterator():
            # Get the current node
            node = Node.objects.filter(Q(id=last_active.node.id),
                                       ~Q(sensors=None)).get()

            # The last acive date
            last_data_received_at = last_active.last_data_received_at

            # last_data_received_at
            stats = []
            moved_to = None
            # Get data stats from 5mins before last_data_received_at
            if last_data_received_at:
                last_5_mins = last_data_received_at - datetime.timedelta(
                    minutes=5)
                stats = (
                    SensorDataValue.objects.filter(
                        Q(sensordata__sensor__node=last_active.node.id),
                        Q(sensordata__location=last_active.location.id),
                        Q(sensordata__timestamp__gte=last_5_mins),
                        Q(sensordata__timestamp__lte=last_data_received_at),
                        # Ignore timestamp values
                        ~Q(value_type="timestamp"),
                        # Match only valid float text
                        Q(value__regex=r"^\-?\d+(\.?\d+)?$"),
                    ).order_by().values("value_type").annotate(
                        sensor_id=F("sensordata__sensor__id"),
                        start_datetime=Min("sensordata__timestamp"),
                        end_datetime=Max("sensordata__timestamp"),
                        average=Avg(Cast("value", FloatField())),
                        minimum=Min(Cast("value", FloatField())),
                        maximum=Max(Cast("value", FloatField())),
                    ))

            # If the last_active node location is not same as current node location
            # then the node has moved locations since it was last active
            if last_active.location.id != node.location.id:
                moved_to = {
                    "name": node.location.location,
                    "longitude": node.location.longitude,
                    "latitude": node.location.latitude,
                    "city": {
                        "name": node.location.city,
                        "slug": slugify(node.location.city),
                    },
                }

            nodes.append({
                "node_moved": moved_to is not None,
                "moved_to": moved_to,
                "node": {
                    "uid": last_active.node.uid,
                    "id": last_active.node.id
                },
                "location": {
                    "name": last_active.location.location,
                    "longitude": last_active.location.longitude,
                    "latitude": last_active.location.latitude,
                    "city": {
                        "name": last_active.location.city,
                        "slug": slugify(last_active.location.city),
                    },
                },
                "last_data_received_at": last_data_received_at,
                "stats": stats,
            })
        return Response(nodes)
Beispiel #6
0
def climb_query(request, climb_type='boulder'):
    '''
    Display a table where each row is the summary of a day of setting for
    specified gym.
    '''

    gym = get_user(request).current_gym

    try:
        climb_selection = request.session['select_climbs']
    except:
        climb_selection =[]

    if request.method == "POST" and request.session.get('round_two'):
        climb_ids = list(request.POST.getlist('selection'))
        request.session['remove_climbs'] = climb_ids
        num_climbs = len(climb_ids)
        climbs = Climb.objects.filter(pk__in=climb_ids)
        areas = Climb.objects.values('area__location_name').filter(pk__in=climb_ids).annotate(count=Count('grade'))
        table = ClimbRemoveTable(climbs)

        return render(request, 'climbs/replace_climbs.html', {'table': table, 'areas': areas, 'gym':gym, 'climb_type':climb_type })

    elif request.method =="POST":
        request.session['round_two'] = True
        date_created = list(request.POST.getlist('selection'))
        climb_selection = date_created
        climbs = Climb.objects.filter(date_created__in=date_created, status__status='current')
        areas = Climb.objects.values('area__location_name').filter(date_created__in=date_created).annotate(count=Count('grade'))
        table = ClimbQueryTable(climbs)
        area = Area.objects.get(location_name='Alcove')

        return render(request, 'climbs/climbs_list.html', {'table': table, 'areas': areas, 'gym':gym, 'climb_type':climb_type })

    #Present summary of climbs by date
    query = Climb.objects.values('date_created', 'area__location_name')\
        .filter(area__gym__name=gym, status__status='current')\
        .order_by('date_created')\
        .annotate(count = Count('grade'), min_grade=Min('grade__pk'), max_grade=Max('grade__pk'))

    table_data = []
    for item in list(query):
        grade_pk = []

        for item1 in list(query):
            if item['date_created'] == item1['date_created'] and item['area__location_name'] not in item1['area__location_name']:
                item['area__location_name'] += ', '+ item1['area__location_name']
                pk= [item['min_grade'],item['max_grade'], item1['min_grade'],item1['max_grade']]
                item['count'] += item1['count']
                grade_pk += pk

        if grade_pk:
            item['min_grade'] = Grade.objects.get(pk=min(grade_pk)).grade
            item['max_grade'] = Grade.objects.get(pk=max(grade_pk)).grade

        else:
            item['min_grade'] = Grade.objects.get(pk=item['min_grade']).grade
            item['max_grade'] = Grade.objects.get(pk=item['max_grade']).grade

        validater = []
        for route_set in table_data:
            validater.append(route_set['date_created'])

        if item['date_created'] not in validater:
            table_data.append(item)

    table = ClimbQuery(table_data) #table_data
    RequestConfig(request).configure(table)

    return render(request, 'climbs/climbs_list.html',
        {'table': table, 'gym': gym})
Beispiel #7
0
def min_anio_oferta():
    """Función que retorna el menor de los años de las ofertas guardadas
    """
    return Oferta.objects.all().aggregate(Min('anio')).get('anio__min')
Beispiel #8
0
    def do_calc_limit_alerts(obj_man, is_host, time, mode='last before', device_ids=None):
        assert mode in ('last before', 'first after')

        group_by_fields = ['device_id', 'state', 'state_type']
        additional_fields = ['date', 'msg']
        if not is_host:
            group_by_fields.extend(['service_id', 'service_info'])

        # NOTE: code was written for 'last_before' mode and then generalised, hence some vars are called 'latest...'
        try:
            if mode == 'last before':
                latest_dev_independent_service_alert =\
                    obj_man.filter(date__lte=time, device_independent=True).latest('date')
            else:
                latest_dev_independent_service_alert =\
                    obj_man.filter(date__gte=time, device_independent=True).earliest('date')

            # can't use values() on single entry
            latest_dev_independent_service_alert = {key: getattr(latest_dev_independent_service_alert, key)
                                                    for key in (group_by_fields + additional_fields)}
        except obj_man.model.DoesNotExist:
            latest_dev_independent_service_alert = None

        # get last service alert of each service before the start time
        additional_device_filter = {}
        if device_ids is not None:
            additional_device_filter = {'device__in': device_ids}
        last_service_alert_cache = {}
        if mode == 'last before':
            queryset = obj_man.filter(date__lte=time, device_independent=False, **additional_device_filter)
        else:
            queryset = obj_man.filter(date__gte=time, device_independent=False, **additional_device_filter)

        queryset = queryset.values(*group_by_fields)

        # only get these values and annotate with extreme date, then we get the each field-tuple with their extreme date

        if mode == 'last before':
            queryset = queryset.annotate(extreme_date=Max('date'))
        else:
            queryset = queryset.annotate(extreme_date=Min('date'))

        for entry in queryset:
            # prefer latest info if there is dev independent one

            if mode == 'last before':
                comp = lambda x, y: x > y
            else:
                comp = lambda x, y: x < y
            if latest_dev_independent_service_alert is not None and \
                    comp(latest_dev_independent_service_alert['date'], entry['extreme_date']):
                relevant_entry = latest_dev_independent_service_alert
            else:
                relevant_entry = entry

            if is_host:
                key = entry['device_id']
            else:
                key = entry['device_id'], entry['service_id'], entry['service_info']

            # the query above is not perfect, it should group only by device and service
            # this seems to be hard in django:
            # http://stackoverflow.com/questions/19923877/django-orm-get-latest-for-each-group
            # so we do the last grouping by this key here manually
            if key not in last_service_alert_cache or comp(entry['extreme_date'], last_service_alert_cache[key][1]):
                last_service_alert_cache[key] = relevant_entry, entry['extreme_date']

        # NOTE: apparently, in django, if you use group_by, you can only select the elements you group_by and
        #       the annotated elements therefore we retrieve the extra parameters manually
        for k, v in last_service_alert_cache.items():
            if any(key not in v[0] for key in additional_fields):
                if is_host:
                    additional_fields_query = obj_man.filter(device_id=k, date=v[1])
                else:
                    additional_fields_query = obj_man.filter(device_id=k[0], service_id=k[1], service_info=k[2], date=v[1])

                if len(additional_fields_query) == 0:  # must be dev independent
                    additional_fields_query = obj_man.filter(device_independent=True, date=v[1])

                v[0].update(additional_fields_query.values(*additional_fields)[0])

        # drop extreme date
        return {k: v[0] for (k, v) in last_service_alert_cache.items()}
Beispiel #9
0
def user_course_activity_view(request, user_id, course_id):

    view_user, response = get_user(request, user_id)
    if response is not None:
        return response

    dashboard_accessed.send(sender=None, request=request, data=None)
    course = can_view_course(request, course_id)

    act_quizzes = Activity.objects.filter(section__course=course,
                                          type=Activity.QUIZ).order_by(
                                              'section__order', 'order')

    quizzes_attempted = 0
    quizzes_passed = 0
    course_pretest = None

    quizzes = []
    for aq in act_quizzes:
        try:
            quizobjs = Quiz.objects.filter(quizprops__value=aq.digest,
                                           quizprops__name="digest")
            if quizobjs.count() <= 0:
                continue
            else:
                quiz = quizobjs[0]
        except Quiz.DoesNotExist:
            quiz = None

        attempts = QuizAttempt.objects.filter(quiz=quiz, user=view_user)
        num_attempts = attempts.count()
        passed = False
        if num_attempts > 0:

            quiz_maxscore = float(attempts[0].maxscore)
            attemps_stats = attempts.aggregate(max=Max('score'),
                                               min=Min('score'),
                                               avg=Avg('score'))
            max_score = 100 * float(attemps_stats['max']) / quiz_maxscore
            min_score = 100 * float(attemps_stats['min']) / quiz_maxscore
            avg_score = 100 * float(attemps_stats['avg']) / quiz_maxscore
            first_date = attempts.aggregate(date=Min('attempt_date'))['date']
            recent_date = attempts.aggregate(date=Max('attempt_date'))['date']
            first_score = 100 * float(
                attempts.filter(
                    attempt_date=first_date)[0].score) / quiz_maxscore
            latest_score = 100 * float(
                attempts.filter(
                    attempt_date=recent_date)[0].score) / quiz_maxscore

            passed = max_score is not None and max_score > 75
            if aq.section.order == 0:
                course_pretest = first_score
            else:
                quizzes_attempted += 1
                quizzes_passed = (quizzes_passed +
                                  1) if passed else quizzes_passed

        else:
            max_score = None
            min_score = None
            avg_score = None
            first_score = None
            latest_score = None

        quiz = {
            'quiz': aq,
            'quiz_order': aq.order,
            'no_attempts': num_attempts,
            'max_score': max_score,
            'min_score': min_score,
            'first_score': first_score,
            'latest_score': latest_score,
            'avg_score': avg_score,
            'passed': passed
        }
        quizzes.append(quiz)

    activities_completed = course.get_activities_completed(course, view_user)
    activities_total = course.get_no_activities()
    activities_percent = (activities_completed * 100) / activities_total

    activity = []
    start_date = timezone.now() - datetime.timedelta(days=31)
    end_date = timezone.now()
    no_days = (end_date - start_date).days + 1

    trackers = Tracker.objects.filter(course=course,
                                      user=view_user,
                                      tracker_date__gte=start_date,
                                      tracker_date__lte=end_date) \
                                      .extra({'activity_date':"date(tracker_date)"}) \
                                      .values('activity_date') \
                                      .annotate(count=Count('id'))
    for i in range(0, no_days, +1):
        temp = start_date + datetime.timedelta(days=i)
        count = next(
            (dct['count']
             for dct in trackers if dct['activity_date'] == temp.date()), 0)
        activity.append([temp.strftime("%d %b %Y"), count])

    order_options = [
        'quiz_order', 'no_attempts', 'max_score', 'min_score', 'first_score',
        'latest_score', 'avg_score'
    ]
    default_order = 'quiz_order'
    ordering = request.GET.get('order_by', default_order)
    inverse_order = ordering.startswith('-')
    if inverse_order:
        ordering = ordering[1:]
    if ordering not in order_options:
        ordering = default_order
        inverse_order = False

    quizzes.sort(key=operator.itemgetter(ordering), reverse=inverse_order)

    return render_to_response(
        'oppia/profile/user-course-scorecard.html', {
            'view_user': view_user,
            'course': course,
            'quizzes': quizzes,
            'quizzes_passed': quizzes_passed,
            'quizzes_attempted': quizzes_attempted,
            'pretest_score': course_pretest,
            'activities_completed': activities_completed,
            'activities_total': activities_total,
            'activities_percent': activities_percent,
            'page_ordering': ('-' if inverse_order else '') + ordering,
            'activity_graph_data': activity
        },
        context_instance=RequestContext(request))
Beispiel #10
0
def data_oper(req):

    # 1.Django的惰性机制
    obj_set = models.Book.object.filter(id=2)  # 获得querySet对象,此句并不走数据库
    # 1.1:验证Django的缓存机制
    #     for obj in obj_set:
    #         print(obj.title)
    #     obj_set.update(title="yyy")  # 不加此句只有两个for循环不走数据库只走缓存,加上此句既走数据库也走缓存
    #     for obj in obj_set:
    #         print(obj.title)
    # 1.2:使用迭代器解决所有数据放进缓存池的问题
    #     if obj_set.iterator():
    #         pass
    # 1.3:对querySet对象进行切片,走了数据库
    #     obj = obj_set[0]

    # ###################################################################################################################################

    # 2.对象形式的查找
    # 2.1.正向查找
    obj1 = models.Book.objects.filter(title="Go")[0]  # 查询某本书的出版社所在的城市
    print(obj1.publisher.city)
    ret1 = models.Book.objects.first()
    print(ret1.title)
    print(ret1.price)
    print(ret1.publisher)
    print(ret1.publisher.name)  # 因为一对多的关系所以ret1.publisher是一个对象,而不是一个queryset集合
    # 2.2.反向查找
    obj2 = models.Publish.objects.filter(name="人民出版社")  # 查询出版社是"人民出版社"的数据
    print(obj2.book_set.all.values("title").distinct())  # distinct(),对结果去重
    ret2 = models.Publish.objects.last()
    print(ret2.name)
    print(ret2.city)
    # 如何拿到与它绑定的Book对象呢?
    print(ret2.book_set.all())  # ret2.book_set是一个queryset集合

    # ###################################################################################################################################

    # 3.双下划线(__)之单表条件查询
    models.Book.objects.filter(id__lt=10, id__gt=1)  # 获取id大于1 且 小于10的值
    models.Book.objects.filter(id__in=[11, 22, 33])  # 获取id等于11、22、33的数据
    models.Book.objects.exclude(id__in=[11, 22, 33])  # not in
    models.Book.objects.filter(name__contains="ven")  # 区分大小写过滤的
    models.Book.objects.filter(name__icontains="ven")  # 不区分大小写过滤的
    models.Book.objects.filter(id__range=[1, 2])  # 范围bettwen and
    models.Book.objects.filter(
        title__startwith="P")  # startswith,istartswith, endswith, iendswith,

    # ###################################################################################################################################

    # 4.双下划线(__)之多表条件关联查询
    # 4.1.正向查找(条件)
    ret3 = models.Book.objects.filter(title='Python').values('id')
    print(ret3)  #[{'id': 1}]
    # 4.1.1.正向查找(条件)之一对多
    ret4 = models.Book.objects.filter(title='Python').values('publisher__city')
    print(ret4)  #[{'publisher__city': '北京'}]
    # 4.1.2.正向查找(条件)之多对多
    ret5 = models.Book.objects.filter(title='Python').values('author__name')
    print(ret5)
    ret6 = models.Book.objects.filter(author__name="alex").values('title')
    print(ret6)
    # 注意
    # 正向查找的publisher__city或者author__name中的publisher,author是book表中绑定的字段
    # 一对多和多对多在这里用法没区别

    # 4.2.反向查找(条件)
    # 4.2.1.反向查找之一对多:
    ret8 = models.Publisher.objects.filter(book__title='Python').values('name')
    print(ret8)  #[{'name': '人大出版社'}]  注意,book__title中的book就是Publisher的关联表名
    ret9 = models.Publisher.objects.filter(
        book__title='Python').values('book__authors')
    print(ret9)  #[{'book__authors': 1}, {'book__authors': 2}]
    # 4.2.2.反向查找之多对多:
    ret10 = models.Author.objects.filter(book__title='Python').values('name')
    print(ret10)  #[{'name': 'alex'}, {'name': 'alvin'}]
    # 注意
    # 正向查找的book__title中的book是表名Book
    # 一对多和多对多在这里用法没区别

    # ###################################################################################################################################

    # 5.聚合查询和分组查询
    # 5.1.聚合查询--aggregate(*args,**kwargs):通过对QuerySet进行计算,返回一个聚合值的字典。aggregate()中每一个参数都指定一个包含在字典中的返回值。即在查询集上生成聚合
    # 计算所有在售书的平均价钱。
    Book.objects.all().aggregate(Avg('price'))  # {'price__avg': 34.35}
    Book.objects.aggregate(
        average_price=Avg('price'))  # {'average_price': 34.35}
    # 所有图书价格的最大值和最小值:
    Book.objects.aggregate(
        Avg('price'), Max('price'), Min('price')
    )  # {'price__avg': 34.35, 'price__max': Decimal('81.20'), 'price__min': Decimal('12.99')}
    # 查询alex出的书总价格
    Book.objects.filter(author__name='alex').aggregate(Sum('price'))
    # 5.2.分组查询--annotate(*args,**kwargs):可以通过计算查询结果中每一个对象所关联的对象集合,从而得出总计值(也可以是平均值或总和),即为查询集的每一项生成聚合
    # 查询各个作者出的书的总价格,这里就涉及到分组了,分组条件是authors__name
    Book.objects.values("author__name").annotate(Sum('price'))
    # 查询各个出版社最便宜的书价是多少
    Book.objects.values("publisher__name").annotate(Min('price'))

    return HttpResponse("前端打印ok")
Beispiel #11
0
def store_air_quality_historic_data():
    """ Stores max min and avg values for yesterday
    """
    logger.info('BEGIN -- running task: store_air_quality_historic_data')
    yesterday = datetime.datetime.now() - datetime.timedelta(days=1)

    for station in AirQualityStation.objects.active():
        try:
            # @CHECKME maybe data should be filtered date > yesterday 00:06:00
            data = AirQualityData.objects.filter(
                station=station,
                datetime__year=yesterday.year,
                datetime__month=yesterday.month,
                datetime__day=yesterday.day,
            )

            # air quality index
            max_air_quality_index = data.aggregate(
                Max('air_quality_index_max'))
            min_air_quality_index = data.aggregate(
                Min('air_quality_index_min'))
            avg_air_quality_index = data.aggregate(Avg('air_quality_index'))
            # pm1
            max_pm1 = data.aggregate(Max('pm1_max'))
            min_pm1 = data.aggregate(Min('pm1_min'))
            avg_pm1 = data.aggregate(Avg('pm1'))
            # pm2.5
            max_pm25 = data.aggregate(Max('pm25_max'))
            min_pm25 = data.aggregate(Min('pm25_min'))
            avg_pm25 = data.aggregate(Avg('pm25'))
            # temperature
            max_pm10 = data.aggregate(Max('pm10_max'))
            min_pm10 = data.aggregate(Min('pm10_min'))
            avg_pm10 = data.aggregate(Avg('pm10'))

            history = HistoricAirQualityData(
                station=station,
                date=yesterday.date(),
                air_quality_index_max=max_air_quality_index[
                    'air_quality_index_max__max'],
                air_quality_index_min=min_air_quality_index[
                    'air_quality_index_min__min'],
                air_quality_index_mean=avg_air_quality_index[
                    'air_quality_index_min__avg'],
                pm1_max=max_pm1['pm1_max__max'],
                pm1_min=min_pm1['pm1_min__min'],
                pm1_mean=avg_pm1['pm1__avg'],
                pm25_max=max_pm25['pm25_max__max'],
                pm25_min=min_pm25['pm25_min__min'],
                pm25_mean=avg_pm25['pm25__avg'],
                pm10_max=max_pm10['pm10_max__max'],
                pm10_min=min_pm10['pm10_min__min'],
                pm10_mean=avg_pm10['pm10__avg'],
            )
            history.save()
            logger.info('station %s history save successfull' % (station.name))
        except Exception as e:
            logger.warn('station %s history save failed: %s' %
                        (station.name, str(e)))  # noqa

    logger.info('END -- running task: store_air_quality_historic_data')
    def get(self, request: Request, team: Team) -> Response:
        """
        Returns cumulative counts of unresolved groups per day within the stats period time range.
        Response:
        {
            <project_id>: {
                <isoformat_date>: {"unresolved": <unresolved_count>},
                ...
            }
            ...
        }
        """
        if not features.has("organizations:team-insights",
                            team.organization,
                            actor=request.user):
            return Response(
                {"detail": "You do not have the insights feature enabled"},
                status=400)

        # Team has no projects
        project_list = Project.objects.get_for_team_ids(team_ids=[team.id])
        if len(project_list) == 0:
            return Response({})

        start, end = get_date_range_from_params(request.GET)
        end = end.replace(hour=0, minute=0, second=0,
                          microsecond=0) + timedelta(days=1)
        start = start.replace(hour=0, minute=0, second=0,
                              microsecond=0) + timedelta(days=1)

        # First we get the count of unresolved groups from before we were writing `GroupHistory`
        # records. This will be reasonably accurate but not perfect.
        oldest_history_date = GroupHistory.objects.filter_to_team(
            team).aggregate(Min("date_added"), )["date_added__min"]

        project_unresolved = {
            r["project"]: r["unresolved"]
            for r in (Group.objects.filter_to_team(team).filter(
                first_seen__lt=oldest_history_date).values("project").annotate(
                    total=Count("id"),
                    resolved=Count(
                        "id", filter=Q(resolved_at__lt=oldest_history_date)),
                ).annotate(unresolved=ExpressionWrapper(
                    F("total") - F("resolved"), output_field=IntegerField())))
        }
        # TODO: Could get ignored counts from `GroupSnooze` too

        prev_status_sub_qs = Coalesce(
            Subquery(
                GroupHistory.objects.filter(
                    group_id=OuterRef("group_id"),
                    date_added__lt=OuterRef("date_added"),
                    status__in=OPEN_STATUSES + CLOSED_STATUSES,
                ).order_by("-id").values("status")[:1]),
            -1,
        )
        dedupe_status_filter = Q((~Q(prev_status__in=OPEN_STATUSES)
                                  & Q(status__in=OPEN_STATUSES))
                                 | (~Q(prev_status__in=CLOSED_STATUSES)
                                    & Q(status__in=CLOSED_STATUSES)))

        # Next, if there's data in the group history table before the stats period then grab that
        # and use it to help calculate the initial unresolved value
        if oldest_history_date < start:
            new_for_projects = (Group.objects.filter_to_team(team).filter(
                first_seen__gte=oldest_history_date,
                first_seen__lt=start,
            ).values("project").annotate(open=Count("id")))
            initial_project_history_counts = (
                GroupHistory.objects.filter_to_team(team).filter(
                    date_added__gte=oldest_history_date,
                    date_added__lt=start,
                ).annotate(prev_status=prev_status_sub_qs).filter(
                    dedupe_status_filter).values("project").annotate(
                        reopened=Count("id",
                                       filter=Q(status__in=OPEN_STATUSES)),
                        closed=Count("id",
                                     filter=Q(status__in=CLOSED_STATUSES)),
                    ))
            for row in new_for_projects:
                project_unresolved.setdefault(row["project"], 0)
                project_unresolved[row["project"]] += row["open"]
            for row in initial_project_history_counts:
                project_unresolved.setdefault(row["project"], 0)
                project_unresolved[
                    row["project"]] += row["reopened"] - row["closed"]

        # Just a failsafe to make sure we haven't gone below 0
        for project in list(project_unresolved.keys()):
            project_unresolved[project] = max(0, project_unresolved[project])

        # Now we grab the rest of the data bucketed by day
        new_issues = (Group.objects.filter_to_team(team).filter(
            first_seen__gte=start,
            first_seen__lt=end,
        ).annotate(bucket=TruncDay("first_seen")).order_by("bucket").values(
            "project", "bucket").annotate(open=Count("id")))

        bucketed_issues = (GroupHistory.objects.filter_to_team(team).filter(
            date_added__gte=start,
            date_added__lte=end,
        ).annotate(
            bucket=TruncDay("date_added"),
            prev_status=prev_status_sub_qs,
        ).filter(dedupe_status_filter).order_by("bucket").values(
            "project", "bucket").annotate(
                open=Count("id", filter=Q(status__in=OPEN_STATUSES)),
                closed=Count("id", filter=Q(status__in=CLOSED_STATUSES)),
            ))

        current_day, date_series_dict = start, {}
        while current_day < end:
            date_series_dict[current_day.isoformat()] = {
                "open": 0,
                "closed": 0
            }
            current_day += timedelta(days=1)

        agg_project_precounts = {
            project.id: copy.deepcopy(date_series_dict)
            for project in project_list
        }
        for r in chain(bucketed_issues, new_issues):
            bucket = agg_project_precounts[r["project"]][
                r["bucket"].isoformat()]
            bucket["open"] += r.get("open", 0)
            bucket["closed"] += r.get("closed", 0)

        agg_project_counts = {}

        for project, precounts in agg_project_precounts.items():
            open = project_unresolved.get(project, 0)
            sorted_bucket_keys = sorted(precounts.keys())
            project_counts = {}
            for bucket_key in sorted_bucket_keys:
                bucket = precounts[bucket_key]
                open = max(open + bucket["open"] - bucket["closed"], 0)
                project_counts[bucket_key] = {"unresolved": open}
            agg_project_counts[project] = project_counts

        return Response(agg_project_counts)
Beispiel #13
0
def grade_summary_report(qs, grade_choices):
    report_data = []

    drops = get_drop_querysets(qs)
    total_log_count = qs.count()

    # List of all drops. Currently only care about monsters and items
    all_items = GameItem.objects.filter(pk__in=drops['items'].values_list(
        'item', flat=True)) if 'items' in drops else []
    all_monsters = Monster.objects.filter(pk__in=drops['monsters'].values_list(
        'monster', flat=True)) if 'monsters' in drops else []
    all_runes = drops['runes'] if 'runes' in drops else []

    for grade_id, grade_name in grade_choices:
        grade_qs = qs.filter(grade=grade_id)
        grade_report = {
            'grade': grade_name,
            'drops': [],
        }
        for item in all_items:
            result = drops['items'].filter(
                log__in=grade_qs,
                item=item,
            ).aggregate(
                count=Count('pk'),
                min=Min('quantity'),
                max=Max('quantity'),
                avg=Avg('quantity'),
                drop_chance=Cast(Count('pk'), FloatField()) / total_log_count *
                100,
                avg_per_run=Cast(Sum('quantity'), FloatField()) /
                total_log_count,
            )

            grade_report['drops'].append({
                'type': 'item',
                'name': item.name,
                'icon': item.icon,
                **result,
            })

        for monster in all_monsters:
            result = drops['monsters'].filter(
                log__in=grade_qs, monster=monster).aggregate(
                    count=Count('pk'),
                    drop_chance=Cast(Count('pk'), FloatField()) /
                    total_log_count * 100,
                    avg_per_run=Cast(Count('pk'), FloatField()) /
                    total_log_count,
                )

            grade_report['drops'].append({
                'type': 'monster',
                'name': monster.name,
                'icon': monster.image_filename,
                'stars': monster.base_stars,
                **result,
            })

        report_data.append(grade_report)

        for stars in all_runes.values_list('stars', flat=True):
            result = drops['runes'].filter(
                log__in=grade_qs,
                stars=stars,
            ).aggregate(
                count=Count('pk'),
                drop_chance=Cast(Count('pk'), FloatField()) / total_log_count *
                100,
                avg_per_run=Cast(Count('pk'), FloatField()) / total_log_count,
            )

            grade_report['drops'].append({
                'type': 'rune',
                'name': f'{stars}⭐ Rune',
                **result,
            })

    return report_data
Beispiel #14
0
def get_report_summary(drops, total_log_count):
    summary = {
        'table': {},
        'chart': [],
    }

    # Chart data: list of {'drop': <string>, 'count': <int>}
    # Table data: dict (by drop type) of lists of items which drop, with stats. 'count' is only required stat.
    for drop_type, qs in drops.items():
        if drop_type == models.ItemDrop.RELATED_NAME:
            # Chart excludes currency
            chart_data = list(
                qs.exclude(item__category=GameItem.CATEGORY_CURRENCY, ).values(
                    name=F('item__name'), ).annotate(
                        count=Count('pk'), ).filter(
                            count__gt=0).order_by('-count'))

            table_data = list(
                qs.values(
                    name=F('item__name'),
                    icon=F('item__icon'),
                ).annotate(
                    count=Count('pk'),
                    min=Min('quantity'),
                    max=Max('quantity'),
                    avg=Avg('quantity'),
                    drop_chance=Cast(Count('pk'), FloatField()) /
                    total_log_count * 100,
                    avg_per_run=Cast(Sum('quantity'), FloatField()) /
                    total_log_count,
                ).filter(count__gt=0).order_by('item__category', '-count'))
        elif drop_type == models.MonsterDrop.RELATED_NAME:
            # Monster drops in chart are counted by stars
            chart_data = list(
                qs.values(name=Concat(Cast('grade', CharField()),
                                      Value('⭐ Monster'))).annotate(
                                          count=Count('pk')).filter(
                                              count__gt=0).order_by('-count'))

            table_data = replace_value_with_choice(
                list(
                    qs.values(
                        name=F('monster__name'),
                        slug=F('monster__bestiary_slug'),
                        icon=F('monster__image_filename'),
                        element=F('monster__element'),
                        can_awaken=F('monster__can_awaken'),
                        is_awakened=F('monster__is_awakened'),
                        stars=F('monster__base_stars'),
                    ).annotate(
                        count=Count('pk'),
                        drop_chance=Cast(Count('pk'), FloatField()) /
                        total_log_count * 100,
                    )), {'element': Monster.ELEMENT_CHOICES})
        else:
            # Chart can is name, count only
            item_name = ' '.join(
                [s.capitalize() for s in drop_type.split('_')]).rstrip('s')
            count = qs.aggregate(count=Count('pk'))['count']
            if count > 0:
                chart_data = [{
                    'name': item_name,
                    'count': count,
                }]
            else:
                chart_data = []

            # Table data based on item type
            if drop_type == models.MonsterPieceDrop.RELATED_NAME:
                table_data = replace_value_with_choice(
                    list(
                        qs.values(
                            name=F('monster__name'),
                            icon=F('monster__image_filename'),
                            element=F('monster__element'),
                            count=Count('pk'),
                            min=Min('quantity'),
                            max=Max('quantity'),
                            avg=Avg('quantity'),
                        )), {'element': Monster.ELEMENT_CHOICES})
            elif drop_type == models.RuneDrop.RELATED_NAME:
                table_data = {
                    'sets':
                    replace_value_with_choice(
                        list(
                            qs.values('type').annotate(
                                count=Count('pk')).order_by('type')),
                        {'type': Rune.TYPE_CHOICES}),
                    'slots':
                    list(
                        qs.values('slot').annotate(
                            count=Count('pk')).order_by('slot')),
                    'quality':
                    replace_value_with_choice(
                        list(
                            qs.values('quality').annotate(
                                count=Count('pk')).order_by('quality')),
                        {'quality': Rune.QUALITY_CHOICES}),
                }
            elif drop_type == models.RuneCraftDrop.RELATED_NAME:
                table_data = replace_value_with_choice(
                    list(
                        qs.values('type', 'rune', 'quality').annotate(
                            count=Count('pk'), ).order_by(
                                'type', 'rune', 'quality')), {
                                    'type': Rune.TYPE_CHOICES,
                                    'quality': Rune.QUALITY_CHOICES
                                })
            elif drop_type == models.DungeonSecretDungeonDrop.RELATED_NAME:
                table_data = replace_value_with_choice(
                    list(
                        qs.values(
                            name=F(
                                'level__dungeon__secretdungeon__monster__name'
                            ),
                            element=
                            F('level__dungeon__secretdungeon__monster__element'
                              ),
                            icon=
                            F('level__dungeon__secretdungeon__monster__image_filename'
                              ),
                        ).annotate(count=Count('pk'), )),
                    {'element': Monster.ELEMENT_CHOICES})

            else:
                raise NotImplementedError(
                    f"No summary table generation for {drop_type}")

        summary['chart'] += chart_data

        if table_data:
            summary['table'][drop_type] = table_data

    return summary
Beispiel #15
0
def check_charge_orders():
    """
    接口名称:check_charge_orders
    使用要求:每天0点到3点之间推送前一天市级平台e充网启动的所有订单信息
    1、是否是经过上报的订单
    2、开始时间还是结束时间(跨天问题)
    """
    prev_date = datetime.now().date() - timedelta(days=1)
    print("当前时间:", prev_date)
    order_totals = Order.objects.filter(end_time__date=prev_date, start_charge_seq__isnull=False, status=2, charg_pile__is_subsidy=1)\
        .aggregate(
            start_time=Min("end_time"), end_time=Max("end_time"), order_count=Count("out_trade_no"),
            total_power=Sum(F("end_reading") - F("begin_reading")), total_money=Sum("consum_money")
        )
    print(order_totals)
    check_orders = {}
    check_order_seq = '{0}{1}{2}'.format(
        settings.OPERATORID,
        datetime.now().strftime('%Y%m%d%H%M%S'), random.randint(10000, 100000))
    check_orders["CheckOrderSeq"] = check_order_seq
    check_orders["StartTime"] = order_totals["start_time"].strftime(
        "%Y-%m-%d %H:%M:%S")
    check_orders["EndTime"] = order_totals["end_time"].strftime(
        "%Y-%m-%d %H:%M:%S")
    check_orders["OrderCount"] = order_totals["order_count"]
    check_orders["TotalOrderPower"] = float(order_totals["total_power"])
    check_orders["TotalOrderMoney"] = float(order_totals["total_money"])

    orders = Order.objects.filter(end_time__date=prev_date,
                                  start_charge_seq__isnull=False,
                                  status=2,
                                  charg_pile__is_subsidy=1)
    charge_orders = []
    for order in orders:
        d_order = {}
        d_order["StartChargeSeq"] = order.start_charge_seq
        d_order["TotalPower"] = float(order.get_total_reading())
        d_order["TotalMoney"] = float(order.consum_money)
        charge_orders.append(d_order)

    check_orders["ChargeOrders"] = charge_orders
    print(json.dumps(check_orders))

    echarge = EchargeNet(settings.MQTT_REDIS_URL, settings.MQTT_REDIS_PORT)
    ret_data = echarge.check_charge_orders(**check_orders)
    if "Ret" in ret_data and ret_data["Ret"] == 0:
        # 解密
        ret_crypt_data = ret_data["Data"]
        ret_decrypt_data = data_decode(ret_crypt_data)
        # 获取到code值
        dict_decrpt_data = json.loads(ret_decrypt_data)
        check_order = {
            "CheckOrderSeq":
            dict_decrpt_data["CheckOrderSeq"],
            "StartTime":
            datetime.strptime(dict_decrpt_data["StartTime"],
                              '%Y-%m-%d %H:%M:%S'),
            "EndTime":
            datetime.strptime(dict_decrpt_data["EndTime"],
                              '%Y-%m-%d %H:%M:%S'),
            "TotalDisputeOrder":
            dict_decrpt_data["TotalDisputeOrder"],
            "TotalDisputePower":
            dict_decrpt_data["TotalDisputePower"],
            "TotalDisputeMoney":
            dict_decrpt_data["TotalDisputeMoney"],
        }
        CheckChargeOrder.objects.create(**check_order)
        DisputeOrders = dict_decrpt_data["DisputeOrders"]
        for disOrder in DisputeOrders:
            DisputeOrder.objects.create(**disOrder)
    else:
        print(ret_data["Msg"])
Beispiel #16
0
def getstats(request):
    """ get statistics for a date in terms of user action made, amount of resources, info about usage, ... """
    data = {}
    stats_uuid = request.GET.get('statsid', "")
    currdate = request.GET.get('date', '')
    if (not currdate):
        currdate = date.today()

    data['date'] = str(currdate)
    data['metashare_version'] = METASHARE_VERSION
    data['user'] = LRStats.objects.filter(
        lasttime__startswith=currdate).values('sessid').annotate(
            Count('sessid')).count()
    data['lrcount'] = resourceInfoType_model.objects.filter(
        storage_object__publication_status=PUBLISHED,
        storage_object__deleted=False).count()
    data['lrmastercount'] = StorageObject.objects.filter(
        copy_status=MASTER, publication_status=PUBLISHED,
        deleted=False).count()
    data['lrupdate'] = LRStats.objects.filter(lasttime__startswith=currdate,
                                              action=UPDATE_STAT).count()
    data['lrview'] = LRStats.objects.filter(lasttime__startswith=currdate,
                                            action=VIEW_STAT).count()
    data['lrdown'] = LRStats.objects.filter(lasttime__startswith=currdate,
                                            action=DOWNLOAD_STAT).count()
    data['queries'] = QueryStats.objects.filter(
        lasttime__startswith=currdate).count()
    extimes = QueryStats.objects.filter(
        lasttime__startswith=currdate).aggregate(Avg('exectime'),
                                                 Max('exectime'),
                                                 Min('exectime'))

    qltavg = 0
    if (extimes["exectime__avg"]):
        qltavg = QueryStats.objects.filter(
            lasttime__startswith=currdate,
            exectime__lt=int(extimes["exectime__avg"])).count()
    else:
        extimes["exectime__avg"] = 0

    data['qexec_time_avg'] = extimes["exectime__avg"]
    data['qlt_avg'] = qltavg

    ###get usage statistics
    if (stats_uuid == str(uuid.uuid3(uuid.NAMESPACE_DNS, STORAGE_PATH))):
        _models = [
            x for x in dir(sys.modules['metashare.repository.models'])
            if x.endswith('_model')
        ]
        mod = import_module("metashare.repository.models")
        _fields = {}
        for _model in _models:
            # get info about fields
            dbfields = getattr(mod, _model).__schema_fields__
            for _not_used, _field, _required in dbfields:
                model_name = _model.replace("Type_model", "")
                if not _field.endswith(
                        '_set') and not model_name + " " + _field in _fields:
                    verbose_name = globals()[_model]._meta.get_field(
                        _field).verbose_name[:]
                    _fields[model_name + " " + _field] = [
                        model_name, _field, verbose_name, _required, 0, 0
                    ]

        usageset = UsageStats.objects.values('elname', 'elparent').annotate(Count('lrid', distinct=True), \
                                                                            Sum('count')).order_by('elparent',
                                                                                                   '-lrid__count',
                                                                                                   'elname')
        usagedata = {}
        if (len(usageset) > 0):
            currclass = {}
            for item in usageset:
                verbose = item["elname"]
                if item["elparent"] + " " + item["elname"] in _fields:
                    verbose = _fields[item["elparent"] + " " +
                                      item["elname"]][2]
                if not item["elparent"] in currclass:
                    usagedata[item["elparent"]] = [{"field": item["elname"], "label": verbose, \
                                                    "counters": [int(item["lrid__count"]), int(item["count__sum"])]}]
                else:
                    usagedata[item["elparent"]].append({
                        "field":
                        item["elname"],
                        "label":
                        verbose,
                        "counters":
                        [int(item["lrid__count"]),
                         int(item["count__sum"])]
                    })
        data["usagestats"] = usagedata

        # export stats about published LRs only
        lrstats = {}
        resources = resourceInfoType_model.objects \
            .filter(storage_object__publication_status=PUBLISHED, storage_object__deleted=False)
        for resource in resources:
            lrstats[resource.storage_object.identifier] = getLRStats(
                resource.storage_object.identifier)
        data["lrstats"] = lrstats

    return HttpResponse("[" + json.dumps(data) + "]",
                        content_type="application/json")
def get_suggestion(request, token):
    """ Get suggestions for a given token
    
    Args:
        token (string): Profile Token
    Received Data:
        JSON File: {auth, Array of clickable page items(text, href, page)}

    Returns:
        Render response with the ranked list of the received clickables index
    """
    if request.method == 'POST':
        # Loading Received information to a json format
        received_json_data = json.loads(request.body.decode('utf-8'))
        object_auth = received_json_data['profile']
        #  Getting profile information
        try:
            own_profile = Profile.objects.get(token=token)
            own_key = SecureAuth.objects.get(profile=own_profile).key
        except:
            return HttpResponseNotFound('Invalid Profile') 

        # Making sure the profile is activated and in order
        if own_profile and own_profile.activated and own_key == str(object_auth):
            try:
                # Remove all the pageobjects who do not have a relation with a profile.
                # PageObjectIntere
                # if Pa
                # Get last visit of user
                # Get last selected element of user location href href
                pageobjects = received_json_data['pageobjects']

                # Getting web page origin
                base_uri = pageobjects[0][0]
                handle_visit(own_profile, base_uri)
                handle_browsing_mistake(own_profile, base_uri)

                # Extracting the received pageobjects
                received_pageobjects_hrefs = [o[1] for o in pageobjects]
                received_pageobjects_text = [o[2] for o in pageobjects]
                received_pageobjects_selectors = [o[3] for o in pageobjects]
            # Throw an Index Error if there is a missmatch.
            except IndexError:
                context = {'base': "Current", 'recommendation': "None"}
                return render(request, 'suggestions.json', context)

            try:
                # Cross reference objects visible to the user and the stored objects
                page = Page.objects.get(href=base_uri)
                pageobjects = PageObject.objects.filter(page=page)
                match_pageobjects = set(pageobjects)
                matching_pageobjects_set = set(pageobjects.filter(text__in=received_pageobjects_text).values_list('id', flat=True))
                matching_pageobjects_set.update(pageobjects.filter(href__in=received_pageobjects_hrefs).values_list('id', flat=True))
                matching_pageobjects = pageobjects.filter(pk__in=matching_pageobjects_set)
                profiles_pageobjects = ProfilePageobject.objects.filter(pageobject__in=matching_pageobjects)
                profiles = Profile.objects.filter(id__in=profiles_pageobjects.values('profile').distinct())
                profiles_interests = ProfileInterest.objects.filter(profile__in=profiles)
                selectable_values = []
                pageobjectIndex_tokens = {}
                # Normalized profile pageobject selections among the profiles themselves.
                # Dictionaries for the profile page selections, profile ages, Standardized profile interests, standardized genders
                nm_pg_select = {}
                nm_pr_ages = {}
                std_pr_int = {}
                std_gender = {}
                # Find the oldest and youngest individuals
                lowest_age = profiles.aggregate(Min('age'))['age__min']
                highest_age = profiles.aggregate(Max('age'))['age__max']
                # List of all the interests
                interests = [i.name for i in Interest.objects.all().order_by('name')]
                # If their are no records of ages
                if highest_age is None or lowest_age is None:
                    context = {'base': base_uri, 'recommendation': "No known objects"}
                    return render(request, 'suggestions.json', context)
                # If the database only hase one user we have to handle a small age difference.
                if highest_age - lowest_age == 0:
                    lowest_age = lowest_age - 1
                # The different types of genders
                genders = ['Female', 'Male', 'Other', 'Irrelevant']

                # Standardising profile based attributes : Gender, Interests | Normalising profile based attributes : Age, Selections
                for profile in profiles :
                    # Retrieving highest and lowest number of selections.
                    lowest_nb_selections = profiles_pageobjects.filter(profile=profile).aggregate(Min('selections'))['selections__min']
                    highest_nb_selections = profiles_pageobjects.filter(profile=profile).aggregate(Max('selections'))['selections__max']
                    if int(highest_nb_selections) - int(lowest_nb_selections) == 0:
                        highest_nb_selections = highest_nb_selections + 1

                    # Normalising selectable value per pageobject
                    for pr_po in profiles_pageobjects.filter(profile=profile):
                        nm_pg_select[str(pr_po.id)] = float(pr_po.selections-int(lowest_nb_selections))/float(
                                int(highest_nb_selections)-int(lowest_nb_selections))

                    nm_pr_ages[str(profile.id)] = float((profile.age-lowest_age)/(highest_age-lowest_age))
                    
                    # Standardized the profile's gender
                    standardized_profile_gender = [0]*(len(genders))
                    # Standardising gender creating two rows containing 0's and 1's
                    standardized_profile_gender[genders.index(profile.gender)] = 1

                    # Add standardised list of profile interests to the dictionary of standardised interests
                    std_pr_int[str(profile.id)] = get_formatted_user_or_pageobject_interests(profile)
                    # Add standardised list of profile gender to the dictionary of standardised gender 
                    std_gender[str(profile.id)] = [standardized_profile_gender]

                # Ordered list of pageobjects
                po_indexs = []
                # Dictionary of normalised Age per profile per page object
                po_norm_age = {}
                #  Dictionary of normalised Selections per profile per page objects
                po_norm_select = {}
                # For each object obtain add each user profile interests or gender value
                # (The matrix will be normalized)
                po_std_norm_interests_matrix = []
                po_std_norm_gender_matrix = []
                for po in matching_pageobjects.order_by('href'):
                    # Adding page object to the list to allow indexing
                    po_indexs.append(po)
                    # Number of profiles who used the page object
                    po_l = int(profiles_pageobjects.filter(pageobject=po).distinct().count())
                    # Default values
                    pr_po_mn_select = 0
                    pr_po_mn_age = 0
                    pr_po_std_mn_interests = []
                    pr_po_std_mn_gender = []
                    # For each profile mapped to an object
                    for pr_po in profiles_pageobjects.filter(pageobject=po):
                        current_element_index = -1
                        if po.text in received_pageobjects_text:
                            current_element_index = received_pageobjects_text.index(po.text)
                        else:
                            current_element_index = received_pageobjects_hrefs.index(po.href)
                        pageobjectIndex_tokens.setdefault(current_element_index, []).append(pr_po.profile.token)
                        # Add the profile normalised selections and age of the object/Profile
                        pr_po_mn_select += float(nm_pg_select[str(pr_po.id)])
                        pr_po_mn_age += float(nm_pr_ages[str(pr_po.profile.id)])
                        # Add to the array interest/gender array or initialise it
                        if len(pr_po_std_mn_interests) == 0:
                            pr_po_std_mn_interests = std_pr_int[str(pr_po.profile.id)]
                        else:
                            pr_po_std_mn_interests = np.add(pr_po_std_mn_interests, std_pr_int[str(pr_po.profile.id)])
                        if len(pr_po_std_mn_gender) == 0:
                            pr_po_std_mn_gender = std_gender[str(pr_po.profile.id)]
                        else:
                            pr_po_std_mn_gender = np.add(pr_po_std_mn_gender, std_gender[str(pr_po.profile.id)])
                    # Page object standardised to be normalised interest/gender matrix
                    po_std_norm_interests_matrix[len(po_std_norm_interests_matrix):] = [pr_po_std_mn_interests]
                    po_std_norm_gender_matrix[len(po_std_norm_gender_matrix):] = pr_po_std_mn_gender
                    # Normalise the selection and age prior
                    pr_po_mn_select /= po_l
                    po_norm_select[po] = [pr_po_mn_select]
                    pr_po_mn_age /= po_l
                    po_norm_age[po] = [pr_po_mn_age]

                complete_matrix = []
                # Converting to numpy array and normalise interest sub-matrix to allow PageobjecIntests to exist
                postdnormintmtx = np.array(po_std_norm_interests_matrix)
                postdnormintmtx = normalize(postdnormintmtx, axis=0, norm='l1')

                # It is necessary to hold a current reference to the pageobject interests for the learning process
                thr = threading.Thread(target=pageobject_interests_update(interests, po_indexs, postdnormintmtx), args=(), kwargs={})
                thr.start()

                postdnormgendmtx = np.array(po_std_norm_gender_matrix)
                np.seterr(divide='ignore', invalid='ignore')

                # Create the complete matrix
                # Matrix structure : age, interests , gender, selection
                for po in matching_pageobjects.order_by('href'):
                    complete_matrix.append(np.append(np.append(np.append(np.append(po_norm_age[po],
                        postdnormintmtx[po_indexs.index(po)]),
                    postdnormgendmtx[po_indexs.index(po)]),
                    po_norm_select[po]),
                    (1.0-float(time.mktime(po.updated_at.now().timetuple()))/float(time.mktime(datetime.now().timetuple())))))
                
                # Individual Row, Could be simplified to simply getting the actual user row in the matrix
                standardized_own_profile_gender = [0]*(len(genders))
                standardized_own_profile_gender[genders.index(own_profile.gender)] = 1

                standardized_own_profile_interests = get_formatted_user_or_pageobject_interests(own_profile)
                own_porfile_properties = np.append([float((profile.age-lowest_age)/(highest_age-lowest_age))],
                    np.append(standardized_own_profile_interests,
                    np.append(standardized_own_profile_gender, 
                    np.append([1.0],[1.0]))))

                # Normalize columns
                complete_matrix = np.matrix(normalize(complete_matrix, axis=0, norm='l1'))

                #  Computing the Euclidean Distance between 
                #  the Complete Matrix items and given Profile for KNN.
                profile_po_distance = []
                for rows in range(complete_matrix.shape[0]):
                    current_row = 0
                    for columns in range(complete_matrix.shape[1]):
                        current_row += np.square(own_porfile_properties[columns]-complete_matrix.item(rows,columns))
                    profile_po_distance.append((po_indexs[rows],(np.sqrt(current_row))))

                # Transform the dictionary to a sorted list of items mapped 
                # to the originally received clickable indexs
                itemIndex_distance = {}
                for item in profile_po_distance:
                    if item[0].text in received_pageobjects_text:
                        itemIndex_distance[received_pageobjects_text.index(item[0].text)] = item[1]
                    else:
                        itemIndex_distance[received_pageobjects_hrefs.index(item[0].href)] = item[1]
                error_flag = ""

                # ProfileBased and UU Demographic filtering
                User_Item_and_User_User_demographic = UI_UU_mixed_filtering(base_uri, token, pageobjectIndex_tokens, itemIndex_distance)
                # Sorted index list
                final_received_clickables_ranked_indexs = User_Item_and_User_User_demographic[0]
                # error_flag if any, depending on if neo4j is available
                error_flag = User_Item_and_User_User_demographic[1]
                # Ranks
                ranks = User_Item_and_User_User_demographic[2]
                # To be sent content with recommendation
                context = {'base':ranks, 'recommendation': final_received_clickables_ranked_indexs, 'error': error_flag}
            # Exception thrown if no page is available
            except (Page.DoesNotExist):
                # To be sent content with warning
                context = {'base': base_uri, 'recommendation': "No known objects"}
            # Key violation caused by the user
            except KeyError as e:
                # To be sent content with warning
                context = {'base': base_uri, 'recommendation': "Cross matching issue"}
            #  Render the created content in the form of a json file
            return render(request, 'suggestions.json', context)
        else:
            # If the user is not authenticated throw an error
            context = {'base': "Token Issue", 'recommendation': "Authentication Token or Key do not match"}
            return render(request, 'suggestions.json', context)
Beispiel #18
0
    def get_object_list(self, credit_set, category_list):
        """
            basically the tabular output of the category display

            returns a row for each filter with the average scores for each
            category in the creditset

            Ex:
                        ER          PAE         OP
            Canada      12.2        14.7        22.1
            US          13.2        17.4        21.2

            so the return val would be something like:

            [
                {
                    "title": "Canada",
                    "columns": [
                        {"avg": 12.2, "stddev": 2.2, "cat": ER},
                        {"avg": 14.7, "stddev": 2.1, "cat": PAE},
                        {"avg": 21.2, "stddev": 2.1, "cat": OP},
                    ]
                },
                ...
            ]
        """

        object_list = []
        ss_list = None

        for f, v in self.get_selected_filter_objects():

            # get a unique key for each row @TODO - add version
            cache_key = "%s-%s" % (self.convertCacheKey(
                f.get_active_title(v)), credit_set.version)
            # see if a cached version is available
            row = cache.get(cache_key)

            # if not, generate that row
            if not row:
                row = {}
                row['title'] = f.get_active_title(v)

                # all the submissionsets from that filter
                ss_list = f.get_results(v).exclude(rating__publish_score=False)
                row['total'] = ss_list.count()

                columns = []
                """
                    for each submission in the ss_list

                    run a query to get the average `score` for each category

                """
                for cat in category_list:
                    obj = {'cat': cat.title}

                    qs = SubcategorySubmission.objects.all()
                    qs = qs.filter(
                        category_submission__submissionset__in=ss_list)
                    qs = qs.filter(subcategory=cat)

                    result = qs.aggregate(Avg('percentage_score'),
                                          StdDev('points'), Min('points'),
                                          Max('points'))

                    if result['percentage_score__avg']:
                        obj['avg'] = result['percentage_score__avg'] * 100
                    else:
                        obj['avg'] = 0
                    obj['std'] = result['points__stddev'] or 0
                    obj['min'] = result['points__min'] or 0
                    obj['max'] = result['points__max'] or 0

                    columns.append(obj)

                row['columns'] = columns

                cache.set(cache_key, row, 60 * 60 * 6)  # cache for 6 hours

            object_list.insert(0, row)

        return object_list
Beispiel #19
0
def articleList(request):
    form = ArticleForm(request.POST)
    clearSearch = request.POST.get('clearSearch', "False") == "False"
    newSearch = request.POST.get('newSearch', "False") == "True"

    if clearSearch and newSearch and form.is_valid():
        showAll = form.cleaned_data['showAll']
        feedname = form.cleaned_data['feedname']
        startDate = form.cleaned_data['startDate']
        endDate = form.cleaned_data['endDate']
        searchTerms = form.cleaned_data['searchTerms']
        categories = form.cleaned_data['category']
        page = 1

        request.session['article_hasSearch'] = True

    elif clearSearch and request.session.get('article_hasSearch', False):
        request.session['article_hasSearch'] = True

        showAll = request.session['article_showAll']
        feedname = request.session['article_feedname']
        startDate = request.session['article_startDate']
        endDate = request.session['article_endDate']
        searchTerms = request.session['article_searchTerms']
        categories = request.session['article_category']

        form = ArticleForm({
            'showAll': showAll,
            'feedname': feedname,
            'startDate': startDate,
            'endDate': endDate,
            'searchTerms': searchTerms,
            'categories': categories
        })

        try:
            page = int(request.POST.get('page', 'invalid'))
        except ValueError:
            page = request.session.get('article_page', 1)

    else:
        form = ArticleForm()

        showAll = False
        feedname = 'ALL'
        startDate = None
        endDate = None
        searchTerms = ''
        categories = []

        # 3/2/2012 John Nicholson
        # added so paging works even when there is no search
        try:
            page = int(request.POST.get('page', 'invalid'))
        except ValueError:
            page = request.session.get('article_page', 1)

        request.session['article_hasSearch'] = False

    request.session['article_showAll'] = showAll
    request.session['article_feedname'] = feedname
    request.session['article_startDate'] = startDate
    request.session['article_endDate'] = endDate
    request.session['article_searchTerms'] = searchTerms
    request.session['article_category'] = categories
    request.session['article_page'] = page

    article_list = Article.objects.all().distinct().order_by('-created')
    if not showAll:
        article_list = article_list.filter(relevant=True)
    if feedname != '' and feedname != 'ALL':
        article_list = article_list.filter(feedname=feedname)
    if startDate != None:
        startDate = datetime.strptime("%s 00:00:00" % startDate,
                                      "%Y-%m-%d %H:%M:%S")
        article_list = article_list.filter(created__gte=startDate)
    if endDate != None:
        endDate = datetime.strptime("%s 23:59:59" % endDate,
                                    "%Y-%m-%d %H:%M:%S")
        article_list = article_list.filter(created__lte=endDate)
    if searchTerms:
        article_list = article_list.filter(
            Q(title__icontains=searchTerms)
            | Q(bodytext__icontains=searchTerms))
    if categories:
        article_list = article_list.filter(categories__in=categories)

    paginator = Paginator(article_list, 20)  # Show 20 articles per page

    try:
        articles = paginator.page(page)
    except (EmptyPage, InvalidPage):
        articles = paginator.page(paginator.num_pages)

    dateRange = Article.objects.all().aggregate(minDate=Min('created'),
                                                maxDate=Max('created'))

    data = {
        'articles': articles,
        'form': form,
        'dateRange': dateRange,
    }
    return render_to_response('newsarticles/articleList.html',
                              data,
                              context_instance=RequestContext(request))
Beispiel #20
0
def get_last_push_and_clean_up():
    '''Find the starting point for the push changesource.

    This sets the changesource such that missed pushes since the last shut
    down get retriggered.
    Also, if the master didn't shut down cleanly, re-schedule the affected
    changes, and clean up the mbdb.
    '''
    from life.models import Changeset, Push
    from mbdb.models import Build, BuildRequest, Change
    from django.db.models import F, Max, Min, Q
    # Check for debris of a bad shut-down
    # Indications:
    # - Pending builds (build requests w/out builds, but with changes)
    # - Unfinished builds (no endtime)
    #
    # Find all revisions, find the latest push for each,
    # find the earliest of those pushes.
    revs = []
    pending_requests = (BuildRequest.objects.filter(
        builds__isnull=True, sourcestamp__changes__isnull=False))
    pending_query = Q(stamps__requests__in=pending_requests)
    unfinished_builds = (Build.objects.filter(endtime__isnull=True))
    unfinished_query = Q(stamps__builds__in=unfinished_builds)
    revs.extend(
        Change.objects.filter(pending_query | unfinished_query).filter(
            revision__isnull=False).values_list('revision',
                                                flat=True).distinct())
    if revs:
        # clean up
        # remove pending build requests
        pending_requests.delete()
        # set end time on builds to last step endtime or starttime
        # result of build and last step to EXCEPTION
        for build in unfinished_builds:
            (build.steps.filter(endtime__isnull=True).update(
                endtime=F('starttime'), result=EXCEPTION))
            build.endtime = max(
                list(build.steps.values_list('endtime', flat=True)) +
                [build.starttime])
            build.result = EXCEPTION
            build.save()
        # now that we cleaned up the debris, let's see where we want to start
        changesets = (Changeset.objects.filter(revision__in=revs).annotate(
            last_push=Max('pushes')))
        last_push = changesets.aggregate(Min('last_push'))['last_push__min']
        if last_push is not None:
            # let's redo starting from that push, so return that - 1
            log.msg("replaying revisions: %s, %d changesets, first push: %d" %
                    (", ".join(revs), changesets.count(), last_push))
            return last_push - 1

    # OK, so either there wasn't any debris, or there was no push on it
    # Find the last push with a run, in id ordering, not push_date ordering.
    for p in Push.objects.order_by('-pk').prefetch_related('changesets')[:100]:
        if p.tip.run_set.exists():
            log.msg("restarting after a push with run: %d" % p.id)
            return p.id

    # We don't have recent builds, just use the last push
    try:
        p = Push.objects.order_by('-pk')[0]
        log.msg("restarting after the last push: %d" % p.id)
        return p.id
    except IndexError:
        # new data
        log.msg("new data, starting poller with 0")
        return 0
Beispiel #21
0
 def qs_with_price(queryset: QuerySet, channel_slug: str) -> QuerySet:
     validate_channel_slug(channel_slug)
     return queryset.annotate(min_variants_price_amount=Min(
         "variants__channel_listings__price_amount",
         filter=Q(variants__channel_listings__channel__slug=channel_slug),
     ))
Beispiel #22
0
 def qs_with_price(queryset: QuerySet) -> QuerySet:
     return queryset.annotate(
         min_variants_price_amount=Min("variants__price_amount"))
Beispiel #23
0
    def get_queryset(self):
        sensor_type = self.kwargs["sensor_type"]

        city_slugs = self.request.query_params.get("city", None)
        from_date = self.request.query_params.get("from", None)
        to_date = self.request.query_params.get("to", None)
        interval = self.request.query_params.get("interval", None)

        if to_date and not from_date:
            raise ValidationError(
                {"from": "Must be provide along with to query"})
        if from_date:
            validate_date(from_date,
                          {"from": "Must be a date in the format Y-m-d."})
        if to_date:
            validate_date(to_date,
                          {"to": "Must be a date in the format Y-m-d."})

        value_type_to_filter = self.request.query_params.get(
            "value_type", None)

        filter_value_types = value_types[sensor_type]
        if value_type_to_filter:
            filter_value_types = set(
                value_type_to_filter.upper().split(",")) & set(
                    [x.upper() for x in value_types[sensor_type]])

        if not from_date and not to_date:
            to_date = timezone.now().replace(minute=0, second=0, microsecond=0)
            from_date = to_date - datetime.timedelta(hours=24)
            interval = 'day' if not interval else interval
        elif not to_date:
            from_date = beginning_of_day(from_date)
            # Get data from_date until the end
            # of day yesterday which is the beginning of today
            to_date = beginning_of_today()
        else:
            from_date = beginning_of_day(from_date)
            to_date = end_of_day(to_date)

        queryset = SensorDataStat.objects.filter(
            value_type__in=filter_value_types,
            timestamp__gte=from_date,
            timestamp__lte=to_date,
        )

        if interval == 'month':
            truncate = TruncMonth("timestamp")
        elif interval == 'day':
            truncate = TruncDay("timestamp")
        else:
            truncate = TruncHour("timestamp")

        if city_slugs:
            queryset = queryset.filter(city_slug__in=city_slugs.split(","))

        return (queryset.values("value_type", "city_slug").annotate(
            truncated_timestamp=truncate,
            start_datetime=Min("timestamp"),
            end_datetime=Max("timestamp"),
            calculated_average=ExpressionWrapper(
                Sum(F("average") * F("sample_size")) / Sum("sample_size"),
                output_field=FloatField(),
            ),
            calculated_minimum=Min("minimum"),
            calculated_maximum=Max("maximum"),
        ).values("value_type", "city_slug", "truncated_timestamp",
                 "start_datetime", "end_datetime", "calculated_average",
                 "calculated_minimum",
                 "calculated_maximum").order_by("city_slug",
                                                "-truncated_timestamp"))
Beispiel #24
0
 def index_range(self, zoom):
     """
     Compute the index range for this rasterlayer at a given zoom leve.
     """
     return self.rastertile_set.filter(tilez=zoom).aggregate(
         Min('tilex'), Max('tilex'), Min('tiley'), Max('tiley'))
Beispiel #25
0
 def get_max_price(self):
     return self.itinerary_set.all().annotate(min_price=Min(
         'pricingoption__price')).order_by('-min_price').first()
Beispiel #26
0
    def get_context_data(self, **kwargs):
        ctx = super().get_context_data(**kwargs)
        tz = timezone.get_current_timezone()

        if 'latest' in self.request.GET:
            clear_cache(self.request.event)

        subevent = None
        if self.request.GET.get("subevent", "") != "" and self.request.event.has_subevents:
            i = self.request.GET.get("subevent", "")
            try:
                subevent = self.request.event.subevents.get(pk=i)
            except SubEvent.DoesNotExist:
                pass

        cache = self.request.event.cache
        ckey = str(subevent.pk) if subevent else 'all'

        p_date = OrderPayment.objects.filter(
            order=OuterRef('pk'),
            state__in=(OrderPayment.PAYMENT_STATE_CONFIRMED, OrderPayment.PAYMENT_STATE_REFUNDED),
            payment_date__isnull=False
        ).values('order').annotate(
            m=Max('payment_date')
        ).values(
            'm'
        ).order_by()
        op_date = OrderPayment.objects.filter(
            order=OuterRef('order'),
            state__in=(OrderPayment.PAYMENT_STATE_CONFIRMED, OrderPayment.PAYMENT_STATE_REFUNDED),
            payment_date__isnull=False
        ).values('order').annotate(
            m=Max('payment_date')
        ).values(
            'm'
        ).order_by()

        # Orders by day
        ctx['obd_data'] = cache.get('statistics_obd_data' + ckey)
        if not ctx['obd_data']:
            oqs = Order.objects.annotate(payment_date=Subquery(p_date, output_field=DateTimeField()))
            if subevent:
                oqs = oqs.filter(all_positions__subevent_id=subevent, all_positions__canceled=False).distinct()

            ordered_by_day = {}
            for o in oqs.filter(event=self.request.event).values('datetime'):
                day = o['datetime'].astimezone(tz).date()
                ordered_by_day[day] = ordered_by_day.get(day, 0) + 1
            paid_by_day = {}
            for o in oqs.filter(event=self.request.event, payment_date__isnull=False).values('payment_date'):
                day = o['payment_date'].astimezone(tz).date()
                paid_by_day[day] = paid_by_day.get(day, 0) + 1

            data = []
            for d in dateutil.rrule.rrule(
                    dateutil.rrule.DAILY,
                    dtstart=min(ordered_by_day.keys()) if ordered_by_day else datetime.date.today(),
                    until=max(
                        max(ordered_by_day.keys() if paid_by_day else [datetime.date.today()]),
                        max(paid_by_day.keys() if paid_by_day else [datetime.date(1970, 1, 1)])
                    )):
                d = d.date()
                data.append({
                    'date': d.strftime('%Y-%m-%d'),
                    'ordered': ordered_by_day.get(d, 0),
                    'paid': paid_by_day.get(d, 0)
                })

            ctx['obd_data'] = json.dumps(data)
            cache.set('statistics_obd_data' + ckey, ctx['obd_data'])

        # Orders by product
        ctx['obp_data'] = cache.get('statistics_obp_data' + ckey)
        if not ctx['obp_data']:
            opqs = OrderPosition.objects
            if subevent:
                opqs = opqs.filter(subevent=subevent)
            num_ordered = {
                p['item']: p['cnt']
                for p in (opqs
                          .filter(order__event=self.request.event)
                          .values('item')
                          .annotate(cnt=Count('id')).order_by())
            }
            num_paid = {
                p['item']: p['cnt']
                for p in (opqs
                          .filter(order__event=self.request.event, order__status=Order.STATUS_PAID)
                          .values('item')
                          .annotate(cnt=Count('id')).order_by())
            }
            item_names = {
                i.id: str(i)
                for i in Item.objects.filter(event=self.request.event)
            }
            ctx['obp_data'] = json.dumps([
                {
                    'item': item_names[item],
                    'item_short': item_names[item] if len(item_names[item]) < 15 else (item_names[item][:15] + "…"),
                    'ordered': cnt,
                    'paid': num_paid.get(item, 0)
                } for item, cnt in num_ordered.items()
            ])
            cache.set('statistics_obp_data' + ckey, ctx['obp_data'])

        ctx['rev_data'] = cache.get('statistics_rev_data' + ckey)
        if not ctx['rev_data']:
            rev_by_day = {}
            if subevent:
                for o in OrderPosition.objects.annotate(
                        payment_date=Subquery(op_date, output_field=DateTimeField())
                ).filter(order__event=self.request.event,
                         subevent=subevent,
                         order__status=Order.STATUS_PAID,
                         payment_date__isnull=False).values('payment_date', 'price'):
                    day = o['payment_date'].astimezone(tz).date()
                    rev_by_day[day] = rev_by_day.get(day, 0) + o['price']
            else:
                for o in Order.objects.annotate(
                        payment_date=Subquery(p_date, output_field=DateTimeField())
                ).filter(event=self.request.event,
                         status=Order.STATUS_PAID,
                         payment_date__isnull=False).values('payment_date', 'total'):
                    day = o['payment_date'].astimezone(tz).date()
                    rev_by_day[day] = rev_by_day.get(day, 0) + o['total']

            data = []
            total = 0
            for d in dateutil.rrule.rrule(
                    dateutil.rrule.DAILY,
                    dtstart=min(rev_by_day.keys() if rev_by_day else [datetime.date.today()]),
                    until=max(rev_by_day.keys() if rev_by_day else [datetime.date.today()])):
                d = d.date()
                total += float(rev_by_day.get(d, 0))
                data.append({
                    'date': d.strftime('%Y-%m-%d'),
                    'revenue': round(total, 2),
                })
            ctx['rev_data'] = json.dumps(data)
            cache.set('statistics_rev_data' + ckey, ctx['rev_data'])

        ctx['has_orders'] = self.request.event.orders.exists()

        ctx['seats'] = {}

        if not self.request.event.has_subevents or (ckey != "all" and subevent):
            ev = subevent or self.request.event
            if ev.seating_plan_id is not None:
                seats_qs = ev.free_seats(sales_channel=None, include_blocked=True)
                ctx['seats']['blocked_seats'] = seats_qs.filter(blocked=True).count()
                ctx['seats']['free_seats'] = seats_qs.filter(blocked=False).count()
                ctx['seats']['purchased_seats'] = \
                    ev.seats.count() - ctx['seats']['blocked_seats'] - ctx['seats']['free_seats']

                seats_qs = seats_qs.values('product', 'blocked').annotate(count=Count('id'))\
                    .order_by('product__category__position', 'product__position', 'product', 'blocked')

                ctx['seats']['products'] = {}
                ctx['seats']['stats'] = {}
                item_cache = {i.pk: i for i in
                              self.request.event.items.annotate(has_variations=Count('variations')).filter(
                                  pk__in={p['product'] for p in seats_qs if p['product']}
                              )}
                item_cache[None] = None

                for item in seats_qs:
                    product = item_cache[item['product']]
                    if item_cache[item['product']] not in ctx['seats']['products']:
                        price = None
                        if product and product.has_variations:
                            price = product.variations.filter(
                                active=True
                            ).aggregate(Min('default_price'))['default_price__min']
                        if product and not price:
                            price = product.default_price
                        if not price:
                            price = Decimal('0.00')

                        ctx['seats']['products'][product] = {
                            'free': {
                                'seats': 0,
                                'potential': Decimal('0.00'),
                            },
                            'blocked': {
                                'seats': 0,
                                'potential': Decimal('0.00'),
                            },
                            'price': price,
                        }
                    data = ctx['seats']['products'][product]

                    if item['blocked']:
                        data['blocked']['seats'] = item['count']
                        data['blocked']['potential'] = item['count'] * data['price']
                    else:
                        data['free']['seats'] = item['count']
                        data['free']['potential'] = item['count'] * data['price']

        return ctx
Beispiel #27
0
def results(request):
    plan = GroupSubject.objects.all().values(
        'group__name', 'exam_test').annotate(
            count_subject=Count('id'),
            max_ball=Case(When(exam_test="экзамен", then=5 * Count('id')),
                          When(exam_test='зачет', then=(1 * Count('id'))),
                          output_field=IntegerField())).order_by(
                              'group', 'exam_test')

    fact = ExamMark.objects.all().values(
        'student__number', 'student__name', 'student__surname',
        'student__patronymic', 'group_subject__exam_test',
        'student__group__name').annotate(count_marks=Count('id'),
                                         min_mark=Min('mark__value'),
                                         sum_mark=Sum('mark__value')).order_by(
                                             'student__group__name',
                                             'student__number',
                                             'group_subject__exam_test')

    for data in fact:
        data['student'] = data.get('student__surname') + ' ' + data.get(
            'student__name')[0] + '.' + data.get(
                'student__patronymic')[0] + '.'
        exam_test = data.get('group_subject__exam_test')
        min_mark = data.get('min_mark')
        data['total'] = 'да' if (
            (exam_test == 'экзамен' and min_mark > 2) or
            (exam_test == 'зачет' and min_mark > 0)) else 'нет'

    enumeration = enumerate(fact)
    for idx, data in enumeration:
        try:
            if fact[idx + 1]['student'] == data['student']:
                if (fact[idx + 1]['total'] == 'да' and data['total'] == 'да'):
                    max_ball_left = plan.filter(
                        group__name=data['student__group__name'],
                        exam_test=data['group_subject__exam_test']
                    )[0]['max_ball']
                    max_ball_right = plan.filter(
                        group__name=fact[idx + 1]['student__group__name'],
                        exam_test=fact[idx + 1]
                        ['group_subject__exam_test'])[0]['max_ball']
                    if (fact[idx + 1]['sum_mark'] +
                            data['sum_mark'] == max_ball_left +
                            max_ball_right):
                        fact[idx + 1]['grant'] = '200'
                        data['grant'] = '200'
                    elif (fact[idx + 1]['sum_mark'] +
                          data['sum_mark'] == max_ball_left + max_ball_right -
                          1):
                        fact[idx + 1]['grant'] = '150'
                        data['grant'] = '150'
                    elif (fact[idx + 1]['min_mark'] > 3):
                        fact[idx + 1]['grant'] = '100'
                        data['grant'] = '100'
                else:
                    fact[idx + 1]['grant'] = '0'
                    data['grant'] = '0'

                next(enumeration)
            elif ((data['min_mark'] > 3
                   and data['group_subject__exam_test'] == 'экзамен')
                  or (data['min_mark'] > 0
                      and data['group_subject__exam_test'] == 'зачет')):
                data['grant'] = '100'
            else:
                data['grant'] = '0'
        except IndexError:
            pass
            if ((data['min_mark'] > 3
                 and data['group_subject__exam_test'] == 'экзамен')
                    or (data['min_mark'] > 0
                        and data['group_subject__exam_test'] == 'зачет')):
                data['grant'] = '100'
            else:
                data['grant'] = '0'

    uniq_students = list({v['student__number']: v for v in fact}.values())

    return render(request, 'blog/results.html', {
        'plan': plan,
        'fact': fact,
        'uniq_students': uniq_students
    })
import os

os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'ModelExam.settings')
from django import setup
setup()

from ModelApp.models import Students, Classes

student = Students.objects.get(pk=1)
for test_result in student.testresults_set.all():
    print(student.class_fk.name, student.name, test_result.test.name,
          test_result.score)

from django.db.models import Sum, Avg, Max, Min
#group by クラス名, テスト名
for class_summary in Classes.objects.values(
        'name', 'students__testresults__test__name').annotate(
            max_score=Max('students__testresults__score'),
            min_score=Min('students__testresults__score'),
            avg_score=Avg('students__testresults__score'),
            sum_score=Sum('students__testresults__score'),
        ):
    print(
        class_summary['name'],
        class_summary['students__testresults__test__name'],
        class_summary['max_score'],
        class_summary['min_score'],
        class_summary['avg_score'],
        class_summary['sum_score'],
    )
Beispiel #29
0
def fieldwork_average(request, slug):
    # Averages gathered in the field as a list(array).
    temperature = get_object_or_404(FieldData, slug=slug)
    qs = temperature
    qs.days = int(qs.days)
    # temperature
    ft = qs.core_temperature_readings
    array_ft = ft.split(',')
    ft_np = np.array(array_ft)
    numpy_ft = np.array(ft_np, int)
    qs.average_temperature = np.average(numpy_ft)

    if 70 <= qs.average_temperature and qs.days >= 7:
        qs.is_pile_ready = True
        print(qs.is_pile_ready)

    elif 131 >= qs.average_temperature <= 140 and qs.days >= 3:
        qs.must_turn_pile = True
        print(qs.average_temperature, qs.must_turn_pile)

    elif 141 >= qs.average_temperature <= 150 and qs.days >= 2:
        qs.must_turn_pile = True
        print(qs.average_temperature, qs.must_turn_pile)

    elif 151 >= qs.average_temperature <= 160 and qs.days >= 1:
        qs.must_turn_pile = True
        print(qs.average_temperature, qs.must_turn_pile)

    elif qs.average_temperature >= 161:
        qs.must_turn_pile = True
        print(qs.average_temperature, qs.must_turn_pile)

    # humidity
    fh = qs.core_humidity_readings
    array_fh = fh.split(',')
    fh_np = np.array(array_fh)
    numpy_fh = np.array(fh_np, int)
    qs.average_humidity = np.average(numpy_fh)
    if qs.average_humidity < 45:
        qs.must_water_now = True
        qs.save()
        print(qs.must_water_now)

    print(qs.average_temperature)
    print(qs.average_humidity)
    print(qs.is_pile_ready)
    print(qs.must_turn_pile)
    print(qs.days)
    bell_temp = FieldData.objects.aggregate(Max('average_temperature'),
                                            Min('average_temperature'))
    qs.bell_temperature = bell_temp
    print(qs.bell_temperature)
    bell_humid = FieldData.objects.aggregate(Max('average_humidity'),
                                             Min('average_humidity'))
    qs.bell_humid = bell_humid
    print(bell_humid)
    qs.save()
    context = {
        "title": qs.title,
        "instance": qs,
    }
    if qs.turned:
        qs.days = 0
        qs.turn_cycle += 1
        qs.save()
        print(qs.days)
        print(qs.turn_cycle)
    else:
        qs.days += 1
        qs.save()

    return render(request, "fieldwork/fieldwork_average_detail.html", context)
Beispiel #30
0
    def get(self, request):
        data = {'id': request.user.id}

        # requestingUser 	= User.objects.get(id=data['id'])
        requestingUser = User.objects.get(id=2)
        userProfile = Userprofile.objects.get(userID=requestingUser)
        companyUsers = Userprofile.objects.filter(
            companyID=userProfile.companyID).order_by('firstname')
        # realUsers 		= User.objects.filter(id__in=[userID['userID'] for userID in list(companyUsers.values('userID'))])
        realUsers = User.objects.filter(
            id__in=companyUsers.values_list('userID', flat=True))
        sessions = Session.objects.filter(
            user__in=realUsers).order_by('sessionNumber')

        sessionElapsedDaysAllList = []

        examScoreAvgList = []
        examAttemptsAvgList = []
        sessionElapsedDaysAvgList = []

        examScoreMaxList = []
        examAttemptsMaxList = []
        sessionElapsedDaysMaxList = []

        examScoreMinList = []
        examAttemptsMinList = []
        sessionElapsedDaysMinList = []

        startDates = []
        endDates = []

        # Things get a bit ugly here, this should be improved, basically here we calculate all the avarages and sums and mins of the whole set of users for the admin
        # screen not yet released for public.
        for x in range(1, 13):
            examScoreAvgList.append(
                Exam.objects.filter(id__in=sessions.filter(
                    sessionNumber=x).values_list(
                        'examID_id', flat=True)).aggregate(Avg('examScore')))
            examAttemptsAvgList.append(
                Exam.objects.filter(id__in=sessions.filter(
                    sessionNumber=x).values_list(
                        'examID_id', flat=True)).aggregate(Avg('attempts')))
            examScoreMaxList.append(
                Exam.objects.filter(id__in=sessions.filter(
                    sessionNumber=x).values_list(
                        'examID_id', flat=True)).aggregate(Max('examScore')))
            examAttemptsMaxList.append(
                Exam.objects.filter(id__in=sessions.filter(
                    sessionNumber=x).values_list(
                        'examID_id', flat=True)).aggregate(Max('attempts')))
            examScoreMinList.append(
                Exam.objects.filter(id__in=sessions.filter(
                    sessionNumber=x).values_list(
                        'examID_id', flat=True)).aggregate(Min('examScore')))
            examAttemptsMinList.append(
                Exam.objects.filter(id__in=sessions.filter(
                    sessionNumber=x).values_list(
                        'examID_id', flat=True)).aggregate(Min('attempts')))
            startDates.append(
                sessions.filter(sessionNumber=x).values_list('startDate',
                                                             flat=True))
            endDates.append(
                sessions.filter(sessionNumber=x).values_list('endDate',
                                                             flat=True))

        for x in range(0, 12):
            sessionElapsedDaysAllList.append([
                (endDates[x][y] -
                 startDates[x][y]).days if endDates[x][y] != None else 0
                for y in range(0, len(startDates[0]))
            ])
            sessionElapsedDaysAvgList.append(mean(
                sessionElapsedDaysAllList[x]))
            sessionElapsedDaysMaxList.append(max(sessionElapsedDaysAllList[x]))
            sessionElapsedDaysMinList.append(min(sessionElapsedDaysAllList[x]))

        response = {
            'avgs': {
                'scores':
                [theList['examScore__avg'] for theList in examScoreAvgList],
                'attempts':
                [theList['attempts__avg'] for theList in examAttemptsAvgList],
                'days':
                sessionElapsedDaysAvgList
            },
            'maxes': {
                'scores':
                [theList['examScore__max'] for theList in examScoreMaxList],
                'attempts':
                [theList['attempts__max'] for theList in examAttemptsMaxList],
                'days':
                sessionElapsedDaysMaxList
            },
            'mins': {
                'scores':
                [theList['examScore__min'] for theList in examScoreMinList],
                'attempts':
                [theList['attempts__min'] for theList in examAttemptsMinList],
                'days':
                sessionElapsedDaysMinList
            }
        }
        return JsonResponse(response, safe=False)