Beispiel #1
0
    def handle(self, *args, **options):
        """
        达标妈妈活跃度, 一个达标过的妈妈与未达标妈妈的数据对比
        需求待分析
        """
        start_week = options.get('start_week')[0]

        earliest_finish_valuelist = MamaMissionRecord.objects.filter(
            mission__cat_type=MamaMission.CAT_SALE_MAMA,
            # status= MamaMissionRecord.FINISHED,
            finish_time__isnull=False,
            # mama_id=44
        ).values('mama_id').annotate(Min('finish_time')).values_list(
            'mama_id', 'finish_time__min')
        earliest_map = dict(earliest_finish_valuelist)

        week_list = self.get_start_weeklist(start_week)
        mama_data = {}
        base_qs = MamaMissionRecord.objects.filter(
            mission__cat_type=MamaMission.CAT_SALE_MAMA,
            # mama_id=44,
        )
        for year_week in week_list:
            week_start, week_end = week_range(
                datetime.datetime.strptime('%s-0' % year_week, '%Y-%W-%w'))
            base_week_qs = base_qs.filter(year_week=year_week,
                                          created__range=(week_start,
                                                          week_end),
                                          finish_value__gt=0)

            mama_week_data = base_week_qs.values_list('mama_id',
                                                      'target_value',
                                                      'finish_value',
                                                      'finish_time')
            for mama_id, target_value, finish_value, finish_time in mama_week_data:
                first_finish_time = earliest_map.get(mama_id) or finish_time
                if not first_finish_time: continue
                if mama_data.has_key(mama_id):
                    mama_data[mama_id].setdefault(
                        year_week, {
                            'mama_id': mama_id,
                            'target_value': target_value,
                            'finish_value': finish_value,
                            'first_finish_time': first_finish_time
                        })
                else:
                    mama_data.setdefault(
                        mama_id, {
                            year_week: {
                                'mama_id': mama_id,
                                'target_value': target_value,
                                'finish_value': finish_value,
                                'first_finish_time': first_finish_time
                            }
                        })

        week_dd = []
        week_len = len(week_list)
        for week in week_list:
            week_dl = map(lambda x: [], range(week_len))
            week_dd.append(week_dl)

        for mama_id, data_dict in mama_data.iteritems():
            for inn_year_week, mm_data in data_dict.iteritems():
                earliest_year_week = mm_data['first_finish_time'].strftime(
                    '%Y-%W')
                week_dd[week_list.index(earliest_year_week)][week_list.index(
                    inn_year_week)].append(mm_data)

        def _calc_value(group_data):
            num_a = len(group_data)
            num_b = len([
                d['finish_value'] for d in group_data
                if d['finish_value'] >= d['target_value']
            ])
            unfinish_ll = [(d['finish_value'] * 1.0) / d['target_value']
                           for d in group_data
                           if d['finish_value'] < d['target_value']]
            rate_a = unfinish_ll and sum(unfinish_ll) / len(unfinish_ll) or 0
            return '/'.join([str(num_a), str(num_b), '%.4f' % rate_a])

        print '\t'.join(['week'] + week_list)

        for index, ddx in enumerate(week_dd):
            ddx_ll = [week_list[index]] + [_calc_value(ddl) for ddl in ddx]
            print '\t'.join(ddx_ll)
Beispiel #2
0
def index(request):
    pictures_json = {
        'data_count':
        Pictures.objects.count(),
        # aggregate 聚合
        'total_like_count':
        Pictures.objects.aggregate(Sum('picture_like'))['picture_like__sum'],
        'avg_like_count':
        Pictures.objects.aggregate(Avg('picture_like'))['picture_like__avg'],
        'max_like_count':
        Pictures.objects.aggregate(Max('picture_like'))['picture_like__max'],
        # order_by 排序,加负号代表降序
        'max_like_count_image_url':
        Pictures.objects.order_by('-picture_like')[0].picture_url,
        'max_like_count_image_date':
        Pictures.objects.order_by('-picture_like')[0].picture_date,
        'max_like_count_image_text':
        Pictures.objects.order_by('-picture_like')[0].picture_description,
        'min_like_count':
        Pictures.objects.aggregate(Min('picture_like'))['picture_like__min'],
        'total_download_count':
        Pictures.objects.aggregate(
            Sum('picture_download'))['picture_download__sum'],
        'avg_download_count':
        Pictures.objects.aggregate(
            Avg('picture_download'))['picture_download__avg'],
        'max_download_count':
        Pictures.objects.aggregate(
            Max('picture_download'))['picture_download__max'],
        'max_download_count_image_url':
        Pictures.objects.order_by('-picture_download')[0].picture_url,
        'max_download_count_image_date':
        Pictures.objects.order_by('-picture_download')[0].picture_date,
        'max_download_count_image_text':
        Pictures.objects.order_by('-picture_download')[0].picture_description,
        'min_download_count':
        Pictures.objects.aggregate(
            Min('picture_download'))['picture_download__min'],
        # 获取所有的图片数据
        'num_data':
        list(Pictures.objects.all().values('id', 'picture_date',
                                           'picture_like',
                                           'picture_download')),
    }

    # 获取每个作者的词数量列表
    author_poem_list = Poems.objects.values('poem_author').annotate(
        Count('poem_author')).all().order_by('-poem_author__count')

    # 获取每个词牌名的词数量列表
    poem_title_list = Poems.objects.values('poem_title').annotate(
        Count('poem_title')).all().order_by('-poem_title__count')

    poems_json = {
        'data_count':
        Poems.objects.count(),
        'author_data_count':
        PoemsAuthors.objects.count(),
        'author_count':
        Poems.objects.values('poem_author').distinct().count(),
        'title_count':
        Poems.objects.values('poem_title').distinct().count(),
        'author_poem_data':
        list(author_poem_list),
        'author_avg_poem_count':
        Poems.objects.count() / PoemsAuthors.objects.count(),
        'poem_title_data':
        list(poem_title_list),
        'poem_avg_title_count':
        Poems.objects.count() /
        Poems.objects.values('poem_title').distinct().count(),
        # 只有一个作品的作者人数
        'poem_count_1':
        author_poem_list.filter(poem_author__count=1).count(),
    }

    sentences_json = {
        'data_count':
        Sentences.objects.count(),
        # 句子的分类数据
        'sentence_data':
        list(
            Sentences.objects.values(
                'sentence_category', 'sentence_category_description').annotate(
                    Count('sentence_category')).all().order_by(
                        '-sentence_category__count'))
    }

    # 概览数据
    overview_json = {
        'total_data_count':
        pictures_json['data_count'] + poems_json['data_count'] +
        poems_json['author_data_count'] + sentences_json['data_count'],
        'category_count':
        3,
        'database_table_count':
        4,
        'pictures_data_count':
        pictures_json['data_count'],
        'pictures_image_count':
        pictures_json['data_count'],
        'pictures_total_like_count':
        pictures_json['total_like_count'],
        'pictures_total_download_count':
        pictures_json['total_download_count'],
        'poems_data_count':
        poems_json['data_count'] + poems_json['author_data_count'],
        'poems_poem_count':
        poems_json['data_count'],
        'poems_author_count':
        poems_json['author_data_count'],
        'sentences_data_count':
        sentences_json['data_count'],
        'sentences_try_count':
        6400,
        'sentences_category_count':
        8,
    }

    # 返回 JSON 数据,下同
    return JsonResponse({
        'code': '0',
        'message': 'ok',
        'overview': overview_json,
        'pictures': pictures_json,
        'poems': poems_json,
        'sentences': sentences_json,
    })
Beispiel #3
0
    def test_populate_line_item_daily_summary_table(self):
        """Test that the daily summary table is populated."""
        summary_table_name = AZURE_REPORT_TABLE_MAP["line_item_daily_summary"]
        summary_table = getattr(self.accessor.report_schema,
                                summary_table_name)

        bills = self.accessor.get_cost_entry_bills_query_by_provider(
            self.azure_provider_uuid)
        with schema_context(self.schema):
            bill_ids = [str(bill.id) for bill in bills.all()]

        table_name = AZURE_REPORT_TABLE_MAP["line_item"]
        line_item_table = getattr(self.accessor.report_schema, table_name)
        tag_query = self.accessor._get_db_obj_query(table_name)
        possible_keys = []
        possible_values = []
        with schema_context(self.schema):
            for item in tag_query:
                possible_keys += list(item.tags.keys())
                possible_values += list(item.tags.values())

            li_entry = line_item_table.objects.all().aggregate(
                Min("usage_date"), Max("usage_date"))
            start_date = li_entry["usage_date__min"]
            end_date = li_entry["usage_date__max"]

        start_date = start_date.date() if isinstance(
            start_date, datetime.datetime) else start_date
        end_date = end_date.date() if isinstance(
            end_date, datetime.datetime) else end_date

        query = self.accessor._get_db_obj_query(summary_table_name)
        with schema_context(self.schema):
            query.delete()
            initial_count = query.count()

        self.accessor.populate_line_item_daily_summary_table(
            start_date, end_date, bill_ids)
        with schema_context(self.schema):
            self.assertNotEqual(query.count(), initial_count)

            summary_entry = summary_table.objects.all().aggregate(
                Min("usage_start"), Max("usage_start"))
            result_start_date = summary_entry["usage_start__min"]
            result_end_date = summary_entry["usage_start__max"]

            self.assertEqual(result_start_date, start_date)
            self.assertEqual(result_end_date, end_date)

            entry = query.order_by("-id")

            summary_columns = [
                "usage_start",
                "usage_quantity",
                "pretax_cost",
                "cost_entry_bill_id",
                "meter_id",
                "tags",
            ]

            for column in summary_columns:
                self.assertIsNotNone(getattr(entry.first(), column))

            found_keys = []
            found_values = []
            for item in query.all():
                found_keys += list(item.tags.keys())
                found_values += list(item.tags.values())

            self.assertEqual(set(sorted(possible_keys)),
                             set(sorted(found_keys)))
            self.assertEqual(set(sorted(possible_values)),
                             set(sorted(found_values)))
Beispiel #4
0
    def test_aggregate_multi_join(self):
        vals = Store.objects.aggregate(Max("books__authors__age"))
        self.assertEqual(vals, {'books__authors__age__max': 57})

        vals = Author.objects.aggregate(Min("book__publisher__num_awards"))
        self.assertEqual(vals, {'book__publisher__num_awards__min': 1})
Beispiel #5
0
    def get_queryset(self):
        sensor_type = self.kwargs["sensor_type"]

        city_slugs = self.request.query_params.get("city", None)
        from_date = self.request.query_params.get("from", None)
        to_date = self.request.query_params.get("to", None)
        interval = self.request.query_params.get("interval", None)

        if to_date and not from_date:
            raise ValidationError({"from": "Must be provide along with to query"})
        if from_date:
            validate_date(from_date, {"from": "Must be a date in the format Y-m-d."})
        if to_date:
            validate_date(to_date, {"to": "Must be a date in the format Y-m-d."})

        value_type_to_filter = self.request.query_params.get("value_type", None)

        filter_value_types = value_types[sensor_type]
        if value_type_to_filter:
            filter_value_types = set(value_type_to_filter.upper().split(",")) & set(
                [x.upper() for x in value_types[sensor_type]]
            )

        if not from_date and not to_date:
            to_date = timezone.now().replace(minute=0, second=0, microsecond=0)
            from_date = to_date - datetime.timedelta(hours=24)
            interval = "day" if not interval else interval
        elif not to_date:
            from_date = beginning_of_day(from_date)
            # Get data from_date until the end
            # of day yesterday which is the beginning of today
            to_date = beginning_of_today()
        else:
            from_date = beginning_of_day(from_date)
            to_date = end_of_day(to_date)

        queryset = SensorDataStat.objects.filter(
            value_type__in=filter_value_types,
            timestamp__gte=from_date,
            timestamp__lte=to_date,
        )

        if interval == "month":
            truncate = TruncMonth("timestamp")
        elif interval == "day":
            truncate = TruncDay("timestamp")
        else:
            truncate = TruncHour("timestamp")

        if city_slugs:
            queryset = queryset.filter(city_slug__in=city_slugs.split(","))

        return (
            queryset.values("value_type", "city_slug")
            .annotate(
                truncated_timestamp=truncate,
                start_datetime=Min("timestamp"),
                end_datetime=Max("timestamp"),
                calculated_average=ExpressionWrapper(
                    Sum(F("average") * F("sample_size")) / Sum("sample_size"),
                    output_field=FloatField(),
                ),
                calculated_minimum=Min("minimum"),
                calculated_maximum=Max("maximum"),
            )
            .values(
                "value_type",
                "city_slug",
                "truncated_timestamp",
                "start_datetime",
                "end_datetime",
                "calculated_average",
                "calculated_minimum",
                "calculated_maximum",
            )
            .order_by("city_slug", "-truncated_timestamp")
        )
Beispiel #6
0
def get_seasons():
    start_year = MatchDate.objects.all().aggregate(Min('year'))['year__min']
    return range(date.today().year, start_year - 1, -1)
Beispiel #7
0
    def get(self, request, product_id):
        product = Product.objects.get(id=product_id)
        detail_data_set = {
            'product_id':
            product.id,
            'name':
            product.name,
            'ticker':
            product.ticker,
            'series':
            product.category.name,
            'sub_category':
            product.category.sub_category.name,
            'main_category':
            product.category.sub_category.main_category.name,
            'description':
            product.description,
            'style':
            product.style,
            'colorway':
            product.colorway,
            'retail_price':
            '$' + str(int(product.retail_price)),
            'release_date':
            product.release_date.date,
            'average_price':
            '$' +
            str(product.average_price) if product.average_price else '$0',
            'price_premium':
            str(product.price_premium / 10) +
            '%' if product.price_premium else '0%',
            'detail_images':
            Image.objects.filter(product=product_id, image_type=2)[0].url,
            'volatility':
            str(product.volatility * 100) +
            '%' if product.volatility else '0.0%'
        }
        #################################################################################
        #   BASIC PRODUCT INFORM Complete

        related_product_list = []
        all_product_count = Product.objects.count()
        related_range = list(
            range(product.id + 1, product.id +
                  16)) if all_product_count // 2 > product.id else list(
                      range(product.id - 1, product.id - 16, -1))
        related_product = Product.objects.filter(
            id__in=related_range).order_by('id')
        related_product_images = Image.objects.filter(
            product__in=related_range, image_type=1).order_by('product')
        for each_product in related_product:
            related_product_list.append({
                'product_id':
                each_product.id,
                'name':
                each_product.name,
                'thumnail':
                related_product_images[each_product.id - (product_id + 1)].url,
                'average_price':
                '$' + str(int(each_product.average_price))
                if each_product.average_price else '$0'
            })
        detail_data_set['related_product_list'] = related_product_list
        ##################################################################################
        #   RELATED PRODUCT Complete

        product_size_info_list = []
        week_52_low, week_52_high = None, None
        product_size_list = [
            each.id for each in ProductSize.objects.filter(product=product_id)
        ]

        productsize_list = ProductSize.objects.filter(product=product_id)
        for productsize in productsize_list:
            target_orders = Order.objects.filter(
                ask__product_size_id=productsize.id).select_related(
                    'ask').order_by('date')
            if len(target_orders) > 1:
                recent_price, before_price, recent_date = target_orders[
                    0].ask.price, target_orders[1].ask.price, target_orders[
                        0].date
            elif len(target_orders) == 1:
                recent_price, before_price, recent_date = target_orders[
                    0].ask.price, 0, target_orders[0].date
            else:
                recent_price, before_price, recent_date = 0, 0, None

            difference = int(recent_price - before_price)
            percentage = 0 if difference == 0 else int(
                (difference / recent_price) * 100)

            lowAsk = Ask.objects.filter(
                product_size_id=productsize.id).aggregate(
                    Min('price'))['price__min']
            highBid = Bid.objects.filter(
                product_size_id=productsize.id).aggregate(
                    Max('price'))['price__max']

            if highBid:
                if not (week_52_high): week_52_high = int(highBid)
                else:
                    week_52_high = int(
                        highBid) if week_52_high < highBid else week_52_high
            if lowAsk:
                if not (week_52_low): week_52_low = int(lowAsk)
                else:
                    week_52_low = int(
                        lowAsk) if week_52_low > lowAsk else week_52_low

            product_size_info_list.append({
                'size':
                productsize.size.name,
                'lowestAsk':
                '$' + str(int(lowAsk)) if lowAsk else '$0',
                'highestBid':
                '$' + str(int(highBid)) if highBid else '$0',
                'lastSale':
                '$' + str(int(recent_price)),
                'lastSize':
                None,
                'difference':
                '-$' + str(difference)[1:] if difference < 0 else '+$' +
                str(difference) if difference > 0 else str(difference),
                'percentage':
                str(percentage) + '%' if percentage < 0 else '+' +
                str(percentage) + '%' if percentage > 0 else str(percentage) +
                '%',
                'lastDate':
                recent_date
            })

        temp_lastDate = 0
        size_all = {
            'size': 'All',
            'lowestAsk': '$100000',
            'highestBid': '$0',
            'lastSale': 0,
            'lastSize': '',
            'difference': 0,
            'percentage': 0,
            'lastDate': None
        }

        for item in product_size_info_list:
            if not (item['highestBid'] == None) and int(
                    size_all['highestBid'][1:]) < int(item['highestBid'][1:]):
                size_all['highestBid'] = item['highestBid']
            if not (item['lowestAsk'] == None) and int(
                    size_all['lowestAsk'][1:]) > int(item['lowestAsk'][1:]):
                size_all['lowestAsk'] = item['lowestAsk']

            if item['lastDate'] == None:
                continue
            else:
                if temp_lastDate == 0:
                    temp_lastDate = item['lastDate']
                if temp_lastDate <= item['lastDate']:
                    temp_lastDate = item['lastDate']
                    size_all['size'] = 'All'
                    size_all['lastSale'] = item['lastSale']
                    size_all['lastSize'] = item['size']
                    size_all['difference'] = item['difference']
                    size_all['percentage'] = item['percentage']
        product_size_info_list.insert(0, size_all)

        detail_data_set['52week_high'] = '$' + str(week_52_high)
        detail_data_set['52week_low'] = '$' + str(week_52_low)
        detail_data_set['product_size_info_list'] = product_size_info_list
        ####################################################################################
        #   SIZE RELATED INFORMATION Complete

        all_sale_list = []
        target_asks = Ask.objects.filter(
            product_size_id__in=product_size_list).prefetch_related(
                'order_set').order_by('order__date')
        for ask in target_asks:
            full_date = str(ask.order_set.get().date).split(' ')
            all_sale_list.append({
                'size':
                ProductSize.objects.get(id=ask.product_size_id).size.name,
                'sale_price':
                int(ask.price),
                'date':
                full_date[0],
                'time':
                full_date[1]
            })
        detail_data_set['all_sale_list'] = all_sale_list

        return JsonResponse({'message': detail_data_set}, status=200)
Beispiel #8
0
    def get(self, request, id, tipo_id, format=None):
        cursor = connections['vote_db'].cursor()

        comuna = Comuna.objects.get(pk=id)
        comunaSer = ComunaFullSerial(comuna, many=False).data

        #        region = Region.objects.get(pk=comuna.region.id)
        #        regionSer = RegionSerial(region, many=False).data
        #        comunaSer['region'] = regionSer

        delincuencia = Delincuencia.objects.filter(comuna_id=id)
        delincuenciaSer = DelincuenciaSerial(delincuencia, many=True).data
        comunaSer['delincuencia'] = delincuenciaSer

        educacion = Educacion.objects.filter(comuna_id=id)
        educacionSer = EducacionSerial(educacion, many=True).data
        comunaSer['educacion'] = educacionSer

        pobreza = Pobreza.objects.filter(comuna_id=id)
        pobrezaSer = PobrezaSerial(pobreza, many=True).data
        comunaSer['pobreza'] = pobrezaSer

        salud = Salud.objects.filter(comuna_id=id)
        saludSer = SaludSerial(salud, many=True).data
        comunaSer['salud'] = saludSer

        ambiente = Ambiente.objects.filter(comuna_id=id)
        ambienteSer = AmbienteSerial(ambiente, many=True).data
        comunaSer['ambiente'] = ambienteSer

        educacionLimit = Educacion.objects.filter(
            establecimiento_id=1).aggregate(Min('psu_promedio'),
                                            Max('psu_promedio'))
        educacionExt = Educacion.objects.filter(
            Q(psu_promedio=educacionLimit['psu_promedio__min'])
            | Q(psu_promedio=educacionLimit['psu_promedio__max'])).order_by(
                'psu_promedio')
        educacionExtSer = EducacionSerial(educacionExt, many=True).data

        pobrezaLimit = Pobreza.objects.aggregate(Min('poblacion_idx'),
                                                 Max('poblacion_idx'))
        pobrezaExt = Pobreza.objects.filter(
            Q(poblacion_idx=pobrezaLimit['poblacion_idx__min'])
            | Q(poblacion_idx=pobrezaLimit['poblacion_idx__max'])).order_by(
                'poblacion_idx')
        pobrezaExtSer = PobrezaSerial(pobrezaExt, many=True).data

        ambienteLimit = Ambiente.objects.aggregate(Min('metros_idx'),
                                                   Max('metros_idx'))
        ambienteExt = Ambiente.objects.filter(
            Q(metros_idx=ambienteLimit['metros_idx__min'])
            | Q(metros_idx=ambienteLimit['metros_idx__max'])).order_by(
                'metros_idx')
        ambienteExtSer = AmbienteSerial(ambienteExt, many=True).data

        comunaSer['extremos'] = {}
        comunaSer['extremos']['educacion'] = educacionExtSer
        comunaSer['extremos']['pobreza'] = pobrezaExtSer
        comunaSer['extremos']['ambiente'] = ambienteExtSer

        desempleo = Desempleo.objects.filter(region_id=comuna.region.id)
        desempleoSer = DesempleoSerial(desempleo, many=True).data
        comunaSer['desempleo'] = desempleoSer

        #        poblacion = Poblacion.objects.filter(comuna_id=id)
        #        poblacionSer = PoblacionSerial(poblacion, many=True).data
        #        comunaSer['poblacion'] = poblacionSer

        comunaSer['poblacion_adultos_cnt'] = Poblacion.objects.filter(
            anno=2016, comuna_id=comunaSer['id']).aggregate(
                total=Sum('padron_cnt'))['total']
        comunaSer['poblacion_adultos_idx'] = (
            comunaSer['poblacion_adultos_cnt'] *
            100.0) / Poblacion.objects.filter(anno=2016).aggregate(
                total=Sum('padron_cnt'))['total']

        representantes = Representante.objects.filter(comuna_id=id)
        representantesSer = RepresentanteSerial(representantes, many=True).data
        comunaSer['candidatos_alcalde'] = representantesSer

        votos_concejales = []
        cursor.execute(
            "SELECT pacto_id, p.nombre, SUM(votos_cnt) as votos_total FROM resultado r, pacto p WHERE p.id=r.pacto_id and r.comuna_id=%s and r.anno=2012 and r.eleccion_tipo_id=4 group by r.pacto_id ORDER BY votos_total DESC",
            [id])
        rows = cursor.fetchall()
        for row in rows:
            #            comuna = Comuna.objects.get(pk=row[0])
            #            comunaSer = ComunaSerial(comuna, many=False).data
            pacto = {"pacto_id": row[0], "nombre": row[1], "votos_cnt": row[2]}
            #            comunaSer['emitidos_cnt'] = row[1]
            #            comunaSer['poblacion_adultos_cnt'] = row[2]
            #            comunaSer['participacion'] = row[3]
            votos_concejales.append(pacto)
        comunaSer['votos_pacto'] = votos_concejales

        tipos = EleccionTipo.objects.filter(eleccion_grupo_id=tipo_id)

        for tipo in tipos:

            min_anno = Participacion.objects.filter(
                comuna_id=id, eleccion_tipo_id=tipo.id).aggregate(Min('anno'))
            print min_anno

            if min_anno['anno__min'] == None:
                continue

            elecciones = EleccionFecha.objects.filter(
                eleccion_tipo_id=tipo.id,
                anno__gte=min_anno['anno__min']).order_by('-anno')
            eleccionesSer = EleccionSerial(elecciones, many=True).data

            for eleccion in eleccionesSer:
                eleccion['poblacion_adultos_cnt'] = Poblacion.objects.filter(
                    anno=eleccion['anno'],
                    comuna_id=comunaSer['id']).aggregate(
                        total=Sum('padron_cnt'))['total']
                participacion = Participacion.objects.filter(
                    anno=eleccion['anno'],
                    eleccion_tipo_id=eleccion['eleccion_tipo']['id'],
                    vuelta=eleccion['vuelta'],
                    comuna_id=comunaSer['id']).values().annotate(
                        emitidos_cnt=Sum('emitidos_cnt'),
                        validos_cnt=Sum('validos_cnt'),
                        blancos_cnt=Sum('blancos_cnt'),
                        nulos_cnt=Sum('nulos_cnt')).order_by('-anno')

                if participacion.count() > 0:
                    eleccion['participacion'] = ParticipacionSerial(
                        participacion, many=True).data[0]

                candidatos = Resultado.objects.filter(
                    eleccion_tipo_id=eleccion['eleccion_tipo']['id'],
                    comuna_id=id,
                    anno=eleccion['anno']).order_by('-votos_cnt')
                eleccion['candidatos'] = ResultadoSerial(candidatos,
                                                         many=True).data

            comunaSer[tipo.nombre.lower()] = eleccionesSer
        return Response(comunaSer)
    def handle(self, *args, **options):
        resultid = int(options['resultid'][0])
        email = str(options['email'][0])
        # response_data = {}
        createorupdateL1 = str(options['createorupdateL1'][0])
        pl1 = Processinglevels.objects.get(processinglevelid=2)
        pl0 = Processinglevels.objects.get(processinglevelid=1)
        valuesadded = 0
        tsresultTocopyBulk = []
        tsresultL1 =None
        # print('starting L1 create or update')
        # print(createorupdateL1)
        # print('result')
        # print(resultid)
        # print('email')
        # print(email)
        if createorupdateL1 == "create":
        #print('create')
            resultTocopy = Results.objects.get(resultid=resultid)
            tsresultTocopy = Timeseriesresults.objects.get(resultid=resultid)
            resultTocopy.resultid = None
            resultTocopy.processing_level = pl1
            resultTocopy.save()
            tsrvToCopy = Timeseriesresultvalues.objects.filter(resultid=tsresultTocopy)
            tsresultTocopy.resultid = resultTocopy
            tsresultTocopy.save()
            tsresultL1 = tsresultTocopy.resultid
            # tsrvToCopy.update(resultid=tsresultTocopy)
            for tsrv in tsrvToCopy:
                tsrv.resultid = tsresultTocopy
                try:
                    tsrva = Timeseriesresultvalueannotations.objects.get(valueid = tsrv.valueid)
                    tsrv.valueid = None
                    tsrv.save()
                    tsrva.valueid = tsrv
                    # print(tsrv.valueid)
                    tsrva.save()
                except ObjectDoesNotExist:
                    tsrv.valueid = None
                    tsresultTocopyBulk.append(tsrv)
            newtsrv = Timeseriesresultvalues.objects.bulk_create(tsresultTocopyBulk)

        elif createorupdateL1 == "update":
            # print('update')
            tsresultL1 = Timeseriesresults.objects.get(resultid=resultid)
            resultL1 = Results.objects.get(resultid=resultid)
            # tsrvL1 = Timeseriesresultvalues.objects.filter(resultid=tsresultL1)
            tsrvAddToL1Bulk = []
            relatedL0result = Results.objects.filter(
                    featureactionid = resultL1.featureactionid).filter(
                    variableid = resultL1.variableid
                ).filter(unitsid = resultL1.unitsid).filter(
                processing_level=pl0)

            # newresult = relatedL0result.resultid
            relateL0tsresults = Timeseriesresults.objects.filter(resultid__in= relatedL0result)
            relateL0tsresult = None
            for L0result in relateL0tsresults:
                if L0result.intendedtimespacing == tsresultL1.intendedtimespacing and L0result.intendedtimespacingunitsid == tsresultL1.intendedtimespacingunitsid:
                    relateL0tsresult =L0result
            tsrvL0 = Timeseriesresultvalues.objects.filter(resultid=relateL0tsresult)
            # print(relateL0tsresult)
            # maxtsrvL1=Timeseriesresultvalues.objects.filter(resultid=relateL1tsresult).annotate(
            #        Max('valuedatetime')). \
            #        order_by('-valuedatetime')
            # print(relateL1tsresult)
            # for r in maxtsrvL1:
            #     print(r)
            # print('L1 result')
            # print(tsresultL1)

            maxtsrvL0=Timeseriesresultvalues.objects.filter(resultid=relateL0tsresult).annotate(
                    Max('valuedatetime')). \
                    order_by('-valuedatetime')[0].valuedatetime
            maxtsrvL1=Timeseriesresultvalues.objects.filter(resultid=tsresultL1).annotate(
                    Max('valuedatetime')). \
                    order_by('-valuedatetime')[0].valuedatetime
            mintsrvL0=Timeseriesresultvalues.objects.filter(resultid=relateL0tsresult).annotate(
                    Min('valuedatetime')). \
                    order_by('valuedatetime')[0].valuedatetime
            mintsrvL1=Timeseriesresultvalues.objects.filter(resultid=tsresultL1).annotate(
                    Min('valuedatetime')). \
                    order_by('valuedatetime')[0].valuedatetime
            # print('max L0')
            # print(maxtsrvL0)
            # print('max L1')
            # print(maxtsrvL1)
            if maxtsrvL1 < maxtsrvL0:
                tsrvAddToL1 = tsrvL0.filter(valuedatetime__gt=maxtsrvL1)
                for tsrv in tsrvAddToL1:
                    tsrv.resultid = tsresultL1
                    try:
                        tsrva = Timeseriesresultvalueannotations.objects.get(valueid = tsrv.valueid)
                        tsrv.valueid = None
                        tsrv.save()
                        tsrva.valueid = tsrv
                        # print(tsrv.valueid)
                        tsrva.save()
                    except ObjectDoesNotExist:
                        # print('doesnt exist')
                        tsrv.valueid = None
                        tsresultTocopyBulk.append(tsrv)
            if mintsrvL1 > mintsrvL0:
                tsrvAddToL1 = tsrvL0.filter(valuedatetime__lt=mintsrvL1)
                for tsrv in tsrvAddToL1:
                    # print(tsresultL1)
                    tsrv.resultid = tsresultL1
                    try:
                        tsrva = Timeseriesresultvalueannotations.objects.get(valueid = tsrv.valueid)
                        tsrv.valueid = None
                        tsrv.save()
                        tsrva.valueid = tsrv
                        # print(tsrv.valueid)
                        tsrva.save()
                    except ObjectDoesNotExist:
                        tsrv.valueid = None
                        tsresultTocopyBulk.append(tsrv)
            newtsrv = Timeseriesresultvalues.objects.bulk_create(tsresultTocopyBulk)
        valuesadded = newtsrv.__len__()
        # print('values added')
        # print(valuesadded)
        # for tsrv in newtsrv:
        #     print(tsrv.resultid.resultid)
        #     print(tsrv)
        emailtitle = "L1 complete"
        tolist = []
        emailtext = ' L1 result complete for new or updated result: ' + str(tsresultL1) + '\n' + ' values added to time series: ' + str(valuesadded)
        for admin in settings.ADMINS:
            tolist.append(admin['email'])
        tolist.append(email)
        # print(tolist)
        if len(email) > 0:
            email = EmailMessage(emailtitle, emailtext, settings.EMAIL_FROM_ADDRESS, tolist)
            # print('email')
            # print(emailtext)
            emailout = email.send()
Beispiel #10
0
    def test_populate_line_item_daily_summary_table(self, mock_vaccum):
        """Test that the daily summary table is populated."""
        summary_table_name = AZURE_REPORT_TABLE_MAP['line_item_daily_summary']
        summary_table = getattr(self.accessor.report_schema,
                                summary_table_name)

        for _ in range(10):
            bill = self.creator.create_azure_cost_entry_bill(
                provider_id=self.azure_provider.id)
            product = self.creator.create_azure_cost_entry_product()
            meter = self.creator.create_azure_meter()
            service = self.creator.create_azure_service()
            self.creator.create_azure_cost_entry_line_item(
                bill, product, meter, service)

        bills = self.accessor.get_cost_entry_bills_query_by_provider(
            self.azure_provider.id)
        with schema_context(self.schema):
            bill_ids = [str(bill.id) for bill in bills.all()]

        table_name = AZURE_REPORT_TABLE_MAP['line_item']
        line_item_table = getattr(self.accessor.report_schema, table_name)
        tag_query = self.accessor._get_db_obj_query(table_name)
        possible_keys = []
        possible_values = []
        with schema_context(self.schema):
            for item in tag_query:
                possible_keys += list(item.tags.keys())
                possible_values += list(item.tags.values())

            li_entry = line_item_table.objects.all().aggregate(
                Min('usage_date_time'), Max('usage_date_time'))
            start_date = li_entry['usage_date_time__min']
            end_date = li_entry['usage_date_time__max']

        start_date = start_date.replace(hour=0,
                                        minute=0,
                                        second=0,
                                        microsecond=0)
        end_date = end_date.replace(hour=0, minute=0, second=0, microsecond=0)

        query = self.accessor._get_db_obj_query(summary_table_name)
        with schema_context(self.schema):
            initial_count = query.count()

        self.accessor.populate_line_item_daily_summary_table(
            start_date, end_date, bill_ids)
        with schema_context(self.schema):
            self.assertNotEqual(query.count(), initial_count)

            summary_entry = summary_table.objects.all().aggregate(
                Min('usage_date_time'), Max('usage_date_time'))
            result_start_date = summary_entry['usage_date_time__min']
            result_end_date = summary_entry['usage_date_time__max']

            self.assertEqual(result_start_date, start_date)
            self.assertEqual(result_end_date, end_date)

            entry = query.order_by('-id')

            summary_columns = [
                'usage_date_time',
                'usage_quantity',
                'pretax_cost',
                'offer_id',
                'cost_entry_bill_id',
                'meter_id',
                'tags',
            ]

            for column in summary_columns:
                self.assertIsNotNone(getattr(entry.first(), column))

            found_keys = []
            found_values = []
            for item in query.all():
                found_keys += list(item.tags.keys())
                found_values += list(item.tags.values())

            self.assertEqual(set(sorted(possible_keys)),
                             set(sorted(found_keys)))
            self.assertEqual(set(sorted(possible_values)),
                             set(sorted(found_values)))
Beispiel #11
0
    def post(self, request, format=None):
        serializer = ScoreSerializer(data=request.data,
                                     context={'request': request})
        if serializer.is_valid():
            s = serializer.save()
            datetime_current = datetime.today()
            year = datetime_current.year
            month = datetime_current.month
            num_days = calendar.monthrange(year, month)[1]

            datetime_start = datetime(year, month, 1, 0, 0)

            datetime_end = datetime(year, month, num_days, 23, 59)

            user_request = User.objects.get(pk=request.user.id)
            competitor_request = Competitor.objects.get(user=user_request)

            if s.score <= 25000:
                s.delete()
                return Response({
                    "message": "Score added",
                    "rank": 110
                },
                                status=status.HTTP_201_CREATED)

            #score_competitor = Score.objects.filter(competitor=competitor_request).filter(
            #    creationDate__range=(datetime_start, datetime_end)).order_by('score')[:1]

            # competitor_request = Competitor.objects.get(user=user_request)
            # scores = Score.objects.filter(creationDate__range=(datetime_start, datetime_end)).order_by('score')[:100]
            scores = Score.objects.filter(creationDate__range=(
                datetime_start, datetime_end)).values('competitor').annotate(
                    score=Min('score')).order_by('score')[:105]
            i = 1

            for score in scores:
                if s.score <= score['score']:
                    return Response(
                        {
                            "message": "Score added",
                            "rank": i,
                            "score": s.score,
                            "larger": score['score']
                        },
                        status=status.HTTP_201_CREATED)
                i += 1

            if i <= 100:
                return Response({
                    "message": "Score added",
                    "rank": i
                },
                                status=status.HTTP_201_CREATED)
            else:
                return Response({
                    "message": "Score added",
                    "rank": i
                },
                                status=status.HTTP_201_CREATED)

        return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
Beispiel #12
0
def ReproduceSentenceEval(request, pk):
    if request.method != 'POST':
        raise Http404
    example_inst = get_object_or_404(Example, pk=pk)
    verb_pk = example_inst.verb.pk
    verb = example_inst.verb
    if request.method == 'POST':
        form = ReproduceSentenceForm(request.POST)
        if form.is_valid():
            # fuzzy string comparison to examine the accuracy of user input
            user_input = strip_punct_lower(form.cleaned_data['sentence_field'])
            answer = strip_punct_lower(example_inst.russian_text)
            score = fuzz.ratio(user_input, answer)
            request.session['current_score'].append(score)
            request.session['progress'] += round(1 / 12, 3) * 100
            if request.session['progress'] > 95:
                request.session['progress'] = 100
            """
            calculate the average score from the session list (this is what
            gets written to the PerformancePerExample model)
            """
            average_score = sum(request.session['current_score']) / len(
                request.session['current_score'])
            average_score = round(average_score, 2)
            average_score /= 100
            try:
                request.session['quiz_summary'].append(
                    (example_inst.russian_text, example_inst.translation_text,
                     request.session['current_score'][0],
                     request.session['current_score'][1],
                     request.session['current_score'][2], score,
                     int(average_score * 100)))
            except KeyError:
                request.session['quiz_summary'] = []
            request.session.modified = True
            """
            test whether the user already has a PerformancePerExample for
            this verb
            """
            if not PerformancePerExample.objects.filter(
                    example=example_inst, user=request.user).exists():
                PerformanceObj = PerformancePerExample(
                    example=example_inst,
                    user=request.user,
                    easiness_factor=2.5,
                    last_interval=1,
                    date_last_studied=datetime.date.today(),
                    due_date=datetime.date.today())
            else:
                PerformanceObj = PerformancePerExample.objects.get(
                    example=example_inst, user=request.user)
            PerformanceObj.update_interval(average_score)

            # after update, determine which example is due next
            pk = Example.objects.annotate(
                due=Min('performanceperexample__due_date',
                        filter=Q(performanceperexample__user_id=request.user.pk
                                 ))).filter(verb=verb).order_by(
                                     F('due').asc(nulls_first=True))[0].pk
            """
            If the user has previously studied the next example, send them right
            into the quiz; otherwise, send them to the study page before the
            quiz
            """
            if PerformancePerExample.objects.filter(
                    pk=pk, user=request.user).exists():

                not_studied = 0
            else:
                not_studied = 1
            try:
                request.session['quiz_counter'] += 1
            except KeyError:
                request.session['quiz_counter'] = 1
            if request.session['quiz_counter'] >= 3:
                request.session['quiz_state'] += 1
            else:
                request.session['quiz_state'] = 0
            print(request.session['quiz_state'],
                  request.session['quiz_counter'])
            request.session.modified = True
            return render(request,
                          'ruskeyverbs/answer_evaluation.html',
                          context={
                              'pk':
                              pk,
                              'score':
                              score,
                              'user_input':
                              user_input,
                              'answer':
                              example_inst.russian_text.replace(
                                  stress_mark, ''),
                              'russian_text':
                              example_inst.russian_text,
                              'quiz_state':
                              4,
                              'verb_pk':
                              verb_pk,
                              'file':
                              example_inst.example_audio,
                              'not_studied':
                              not_studied
                          })
        else:
            messages.warning(
                request, """Something went wrong.
                             You did not enter a valid answer.""")
            return HttpResponseRedirect(reverse('arrange-words', args=[pk]))
Beispiel #13
0
 def min(self):
     q = self.userSitesDay.aggregate(Min('dailyTime'))
     return self.get_data(q, 'min')
Beispiel #14
0
    def get(self, request, slug):
        try:
            price_range_min_filter = float(request.GET['min-value'])
        except:
            price_range_min_filter = 0
        try:
            price_range_max_filter = float(request.GET['max-value'])
        except:
            price_range_max_filter = 10 * 100
        user_slug = slug
        category_slug = slug.split('/')
        parent = None
        root = Category.objects.all()

        try:
            for slug in category_slug[:-1]:
                parent = root.get(parent=parent, slug=slug)
        except:
            return HttpResponse('404 - 1')

        try:
            category = get_object_or_404(Category,
                                         parent=parent,
                                         slug=category_slug[-1])

        except:
            product = get_object_or_404(Product,
                                        slug=category_slug[-1],
                                        is_publish=True)

            if product.get_product_url() != user_slug:
                return HttpResponse('404 - 2')
            form = CartAddProductForm(request.POST or None,
                                      extra={
                                          'slug': category_slug[-1],
                                          'cart': Cart(request)
                                      })
            return render(request,
                          'shop/product-virtual.html',
                          context={
                              'product': product,
                              'form': form
                          })

        else:
            products_by_category = Product.objects.filter(
                category__in=category.get_descendants(include_self=True),
                is_publish=True,
                price__gte=price_range_min_filter,
                price__lte=price_range_max_filter)
            context = self.get_pagination(products_by_category)
            context['all_category'] = Category.objects.all()
            context['price_range'] = Product.objects.filter(
                category__in=category.get_descendants(include_self=True),
                is_publish=True).aggregate(Min('price'), Max('price'))
            context['price_range']['price__min'] = str(
                context['price_range']['price__min'])
            context['price_range']['price__max'] = str(
                context['price_range']['price__max'])
            context['obj_selected_category'] = category
            return render(request, 'shop/shop.html', context=context)
Beispiel #15
0
    def handle(self, *args, **options):
        # Loop over classes
        class_slugs = list(ProteinFamily.objects.filter(parent__slug="000") \
                            .filter(slug__startswith="00").values_list("slug"))

        for slug in class_slugs:
            print("Processing class {}".format(slug[0]))

            # grab all PDB-codes for this class
            structure_ids = list(Structure.objects.filter(protein_conformation__protein__family__slug__startswith=slug[0]) \
                                .values_list("pdb_code__index"))
            structure_ids = [x[0] for x in structure_ids]

#            print("Identified the following PDBs")
#            print(structure_ids)

            # Get all PDB-codes for G-protein coupled structures in this class
            # extra: filter 6CMO (unfit reference, see hardcoded exceptions)
            active_ids = list(SignprotComplex.objects.filter(structure__pdb_code__index__in=structure_ids) \
                                .exclude(structure__pdb_code__index="6CMO") \
                                .values_list("structure__pdb_code__index"))
            active_ids = [x[0] for x in active_ids] # flatten
            #                print("The following PDBs are G-prot complex structures:")
            #                print(active_ids)

            # Grab most inactive PDB per ligandType -> 2x46 - 6x37 distance present and <13Å (all classes)
            inactive_ids = list(Distance.objects.filter(distance__lt=1300) \
                                .filter(gn1="2x46").filter(gn2="6x37") \
                                .filter(structure__pdb_code__index__in=structure_ids) \
                                .order_by("structure__protein_conformation__protein__family__parent__parent__name", "distance") \
                                .distinct("structure__protein_conformation__protein__family__parent__parent__name") \
                                .values_list("structure__pdb_code__index"))
            inactive_ids = [x[0] for x in inactive_ids]

            if len(structure_ids) > 0 and len(active_ids) > 0 and len(inactive_ids) > 0:

#                print("The following PDBs are inactive state structures:")
#                print(inactive_ids)

                # create distance matrix for given structures on lower half TM + G-Prot only residues
                dis = Distances()
                dis.lower_only = True # only lower half TM + G-prot only helices
                dis.load_pdbs(structure_ids)
                distance_matrix = dis.get_distance_matrix(True, False) # normalize, but don't use the cache
                distance_matrix = pd.DataFrame(distance_matrix, columns=dis.pdbs, index=dis.pdbs)

                # # Calculate score per pdbs directly based on distance matrix
                # scoring_results = {}
                # for pdb in dis.pdbs:
                #     print("Processing {}".format(pdb))
                #     min_active_distance = min(distance_matrix.loc[pdb, active_ids])
                #     min_inactive_distance = min(distance_matrix.loc[pdb, inactive_ids])
                #
                #     scoring_results[pdb] = min_active_distance-min_inactive_distance


                # hierarchical clustering -> create distance matrix from tree
                hclust = sch.linkage(ssd.squareform(distance_matrix), method='average')
                tree = sch.to_tree(hclust, False)
                tree_distance = getDistanceMatrix(tree, dis.pdbs)
                finalMap = {}
                for d in tree_distance:
                    finalMap.update(d)

                # Calculate score per pdbs
                scoring_results = {}
                for pdb in dis.pdbs:
#                    print("Processing {}".format(pdb))
                    min_active_distance = mean([ finalMap[pdb+"_"+x] for x in active_ids ])
                    min_inactive_distance = mean([ finalMap[pdb+"_"+x] for x in inactive_ids ])
                    scoring_results[pdb] = min_active_distance-min_inactive_distance

#                    print("{}|{}|{}|{}".format(pdb, scoring_results[pdb], min_active_distance, min_inactive_distance))

                # Hardcoded annotations
                hardcoded = {
                    "6CMO" : "active", # Complex with G prot - irregular conformation
                    "5ZKP" : "unknown" # Unknown state (auto-inhibited with H8?)
                }

                distances = list(Distance.objects.filter(gn1="2x46").filter(gn2="6x37") \
                                    .filter(structure__pdb_code__index__in=structure_ids) \
                                    .distinct("gns_pair", "structure") \
                                    .values_list("structure__pdb_code__index", "distance"))

                range_distance = Distance.objects.filter(gn1="2x46").filter(gn2="6x37") \
                                    .filter(structure__pdb_code__index__in=structure_ids) \
                                    .aggregate(Max('distance'), Min('distance'))

                min_open = range_distance['distance__min']
                max_open = range_distance['distance__max']

                # find smallest distance between any active structure and any inactive structure
                lowest_inactive_distance = min([ finalMap[y+"_"+x] for y in inactive_ids for x in active_ids ])
                for entry in distances:
                    # Percentage score
                    percentage = int(round((entry[1]-min_open)/(max_open-min_open)*100))
                    if percentage < 0:
                        percentage = 0
                    elif percentage > 100:
                        percentage = 100

                    # Classification
                    score = scoring_results[entry[0]]
                    structure_state = "inactive"
                    if entry[1] >= 13: # below this distance always inactive structure
                        if score < -0.95*lowest_inactive_distance:
                            structure_state = "active"
                        elif score < 0:
                            structure_state = "intermediate"

                    # UGLY: knowledge-based hardcoded corrections
                    if entry[0] in hardcoded:
                        structure_state = hardcoded[entry[0]]

                    # Store for structure
                    struct = Structure.objects.get(pdb_code__index=entry[0])
                    struct.state, created = ProteinState.objects.get_or_create(slug=structure_state, defaults={'name': structure_state.capitalize()})
                    struct.tm6_angle = percentage
                    struct.save()

                    #print("Class {}: structure {} to state {} and opening is {}%".format(slug, entry[0], structure_state, percentage))
            elif len(structure_ids) > 0:
                distances = list(Distance.objects.filter(gn1="2x46").filter(gn2="6x37") \
                                    .filter(structure__pdb_code__index__in=structure_ids) \
                                    .distinct("gns_pair", "structure") \
                                    .values_list("structure__pdb_code__index", "distance"))

                range_distance = Distance.objects.filter(gn1="2x46").filter(gn2="6x37") \
                                    .aggregate(Max('distance'), Min('distance'))

                min_open = range_distance['distance__min']
                max_open = range_distance['distance__max']
                for entry in distances:
                    # Percentage score
                    percentage = int(round((entry[1]-min_open)/(max_open-min_open)*100))
                    if percentage < 0:
                        percentage = 0
                    elif percentage > 100:
                        percentage = 100

                    # Store for structure
                    struct = Structure.objects.get(pdb_code__index=entry[0])
                    struct.tm6_angle = percentage

                    # Definitely an inactive state structure When distance is smaller than 13Å
                    if entry[1] < 13:
                        struct.state, created = ProteinState.objects.get_or_create(slug="inactive", defaults={'name': "Inactive"})

                    # Save changes
                    struct.save()
Beispiel #16
0
 def top(self):
     smallest_order = Folder.objects.aggregate(Min('order'))["order__min"]
     self.order = smallest_order - 1
     self.save()
Beispiel #17
0
def index(request, list_all=0, page=1):
    """index"""
    errors = ''
    success = True
    active_filters = []
    saved_filters = []
    data = Traffic.objects
    filters = WebSavedFilter.objects.all().filter(user=request.user)
    filter_form = FilterForm()
    if request.method == 'POST':
        filter_form = FilterForm(request.POST)
        if filter_form.is_valid():
            cleaned_data = filter_form.cleaned_data
            in_field = force_escape(cleaned_data['filtered_field'])
            in_value = force_escape(cleaned_data['filtered_value'])
            in_filtered_by = int(cleaned_data['filtered_by'])
            if not request.session.get('web_filter_by', False):
                request.session['web_filter_by'] = []
                request.session['web_filter_by'].append({
                    'field': in_field,
                    'filter': in_filtered_by,
                    'value': in_value
                })
            else:
                fitem = {
                    'field': in_field,
                    'filter': in_filtered_by,
                    'value': in_value
                }
                if not fitem in request.session['web_filter_by']:
                    request.session['web_filter_by'].append(fitem)
                    request.session.modified = True
                else:
                    success = False
                    errors = _("The requested filter is already being used")
            filter_list = request.session.get('web_filter_by')
            data = gen_dynamic_query(data, filter_list, active_filters, True)
        else:
            success = False
            error_list = filter_form.errors.values()[0]
            errors = error_list[0]
            if request.session.get('web_filter_by', False):
                filter_list = request.session.get('web_filter_by')
                data = gen_dynamic_query(data, filter_list, active_filters,
                                         True)
    else:
        if request.session.get('web_filter_by', False):
            filter_list = request.session.get('web_filter_by')
            data = gen_dynamic_query(data, filter_list, active_filters, True)
    data = data.aggregate(count=Count('date'),
                          newest=Max('date'),
                          oldest=Min('date'))
    if filters.count() > 0:
        if request.session.get('web_filter_by', False):
            filter_list = request.session.get('web_filter_by')
        else:
            filter_list = []
        for filt in filters:
            loaded = 0
            if filter_list:
                loaded = 0
                for fitem in filter_list:
                    if fitem['filter'] == filt.op_field and (
                            fitem['value'] == filt.value
                            and fitem['field'] == filt.field):
                        loaded = 1
                        break
            saved_filters.append({
                'filter_id': filt.id,
                'filter_name': force_escape(filt.name),
                'is_loaded': loaded
            })
    if request.is_ajax():
        if not data['newest'] is None and not data['oldest'] is None:
            data['newest'] = data['newest'].strftime("%a %d %b %Y @ %H:%M %p")
            data['oldest'] = data['oldest'].strftime("%a %d %b %Y @ %H:%M %p")
        else:
            data['newest'] = ''
            data['oldest'] = ''
        response = anyjson.dumps({
            'success': success,
            'data': data,
            'errors': errors,
            'active_filters': active_filters,
            'saved_filters': saved_filters
        })
        return HttpResponse(
            response, content_type='application/javascript; charset=utf-8')

    return render_to_response('web/reports/index.html', {
        'form': filter_form,
        'data': data,
        'errors': errors,
        'active_filters': active_filters,
        'saved_filters': saved_filters
    },
                              context_instance=RequestContext(request))
Beispiel #18
0
def WriteToExcel(weather_data, town=None):
    output = io.StringIO()
    workbook = xlsxwriter.Workbook(output)
    worksheet_s = workbook.add_worksheet("Summary")

    # excel styles
    title = workbook.add_format({
        'bold': True,
        'font_size': 14,
        'align': 'center',
        'valign': 'vcenter'
    })
    header = workbook.add_format({
        'bg_color': '#F7F7F7',
        'color': 'black',
        'align': 'center',
        'valign': 'top',
        'border': 1
    })
    cell = workbook.add_format({
        'align': 'left',
        'valign': 'top',
        'text_wrap': True,
        'border': 1
    })
    cell_center = workbook.add_format({
        'align': 'center',
        'valign': 'top',
        'border': 1
    })

    # write title
    if town:
        town_text = town.name
    else:
        town_text = ugettext("all recorded towns")
    title_text = "{0} {1}".format(ugettext("Weather History for"), town_text)
    # merge cells
    worksheet_s.merge_range('B2:I2', title_text, title)

    # write header
    worksheet_s.write(4, 0, ugettext("No"), header)
    worksheet_s.write(4, 1, ugettext("Town"), header)
    worksheet_s.write(4, 2, ugettext("Date"), header)
    worksheet_s.write(4, 3, ugettext("Description"), header)
    worksheet_s.write(4, 4, ugettext("Max T. (℃)"), header)
    worksheet_s.write(4, 5, ugettext("Min T. (℃)"), header)
    worksheet_s.write(4, 6, ugettext("Wind (km/h)"), header)
    worksheet_s.write(4, 7, ugettext("Precip. (mm)"), header)
    worksheet_s.write(4, 8, ugettext("Precip. (%)"), header)
    worksheet_s.write(4, 9, ugettext("Observations"), header)

    # column widths
    town_col_width = 10
    description_col_width = 10
    observations_col_width = 25

    # add data to the table
    for idx, data in enumerate(weather_data):
        row = 5 + idx
        worksheet_s.write_number(row, 0, idx + 1, cell_center)

        worksheet_s.write_string(row, 1, data.town.name, cell)
        if len(data.town.name) > town_col_width:
            town_col_width = len(data.town.name)

        worksheet_s.write(row, 2, data.date.strftime('%d/%m/%Y'), cell_center)
        worksheet_s.write_string(row, 3, data.description, cell)
        if len(data.description) > description_col_width:
            description_col_width = len(data.description)

        worksheet_s.write_number(row, 4, data.max_temperature, cell_center)
        worksheet_s.write_number(row, 5, data.min_temperature, cell_center)
        worksheet_s.write_number(row, 6, data.wind_speed, cell_center)
        worksheet_s.write_number(row, 7, data.precipitation, cell_center)
        worksheet_s.write_number(row, 8,
                                 data.precipitation_probability, cell_center)

        observations = data.observations.replace('\r', '')
        worksheet_s.write_string(row, 9, observations, cell)
        observations_rows = compute_rows(observations, observations_col_width)
        worksheet_s.set_row(row, 15 * observations_rows)

    # change column widths
    worksheet_s.set_column('B:B', town_col_width)  # Town column
    worksheet_s.set_column('C:C', 11)  # Date column
    worksheet_s.set_column('D:D', description_col_width)  # Description column
    worksheet_s.set_column('E:E', 10)  # Max Temp column
    worksheet_s.set_column('F:F', 10)  # Min Temp column
    worksheet_s.set_column('G:G', 10)  # Wind Speed column
    worksheet_s.set_column('H:H', 11)  # Precipitation column
    worksheet_s.set_column('I:I', 11)  # Precipitation % column
    worksheet_s.set_column('J:J', observations_col_width)  # Observations column

    row = row + 1
    # Adding some functions for the data
    max_temp_avg = Weather.objects.all().aggregate(Avg('max_temperature'))
    worksheet_s.write_formula(row, 4,
                              '=average({0}{1}:{0}{2})'.format('E', 6, row),
                              cell_center,
                              max_temp_avg['max_temperature__avg'])
    min_temp_avg = Weather.objects.all().aggregate(Avg('min_temperature'))
    worksheet_s.write_formula(row, 5,
                              '=average({0}{1}:{0}{2})'.format('F', 6, row),
                              cell_center,
                              min_temp_avg['min_temperature__avg'])
    wind_avg = Weather.objects.all().aggregate(Avg('wind_speed'))
    worksheet_s.write_formula(row, 6,
                              '=average({0}{1}:{0}{2})'.format('G', 6, row),
                              cell_center,
                              wind_avg['wind_speed__avg'])
    precip_sum = Weather.objects.all().aggregate(Sum('precipitation'))
    worksheet_s.write_formula(row, 7,
                              '=sum({0}{1}:{0}{2})'.format('H', 6, row),
                              cell_center,
                              precip_sum['precipitation__sum'])
    precip_prob_avg = Weather.objects.all() \
        .aggregate(Avg('precipitation_probability'))
    worksheet_s.write_formula(row, 8,
                              '=average({0}{1}:{0}{2})'.format('I', 6, row),
                              cell_center,
                              precip_prob_avg['precipitation_probability__avg'])

    # add more Sheets
    worksheet_c = workbook.add_worksheet("Charts")
    worksheet_d = workbook.add_worksheet("Chart Data")

    if town:
        towns = [town]
    else:
        towns = Town.objects.all()

    # Creating the Line Chart
    line_chart = workbook.add_chart({'type': 'line'})
    # adding dates for the values
    dates = Weather.objects.order_by('date').filter(
        town=Town.objects.first()).values_list('date', flat=True)
    str_dates = []
    for d in dates:
        str_dates.append(d.strftime('%d/%m/%Y'))
    worksheet_d.write_column('A1', str_dates)
    worksheet_d.set_column('A:A', 10)

    # add data for the line chart
    for idx, t in enumerate(towns):
        data = Weather.objects.order_by('date').filter(town=t)
        letter_max_t = chr(ord('B') + idx)
        letter_min_t = chr(ord('B') + idx + len(towns))
        worksheet_d.write_column(
            "{0}1".format(letter_max_t),
            data.values_list('max_temperature', flat=True))
        worksheet_d.write_column(
            "{0}1".format(letter_min_t),
            data.values_list('min_temperature', flat=True))

        # add data to line chart series
        line_chart.add_series({
            'categories': '=Chart Data!$A1:$A${0}'.format(len(dates)),
            'values': '=Chart Data!${0}${1}:${0}${2}'
            .format(letter_max_t, 1, len(data)),
            'marker': {'type': 'square'},
            'name': "{0} {1}".format(ugettext("Max T."), t.name)
        })
        line_chart.add_series({
            'categories': '=Chart Data!$A1:$A${0}'.format(len(dates)),
            'values': '=Chart Data!${0}${1}:${0}${2}'
            .format(letter_min_t, 1, len(data)),
            'marker': {'type': 'circle'},
            'name': "{0} {1}".format(ugettext("Min T."), t.name)
        })
    # adding other options
    line_chart.set_title({'name': ugettext("Maximum and Minimum Temperatures")})
    line_chart.set_x_axis({
        'text_axis': True,
        'date_axis': False
    })
    line_chart.set_y_axis({
        'num_format': '## ℃'
    })
    # Insert Chart to "Charts" Sheet
    worksheet_c.insert_chart('B2', line_chart, {'x_scale': 2, 'y_scale': 1})

    # Creating the column chart
    bar_chart = workbook.add_chart({'type': 'column'})

    # creating data for column chart
    cell_index = len(towns) * 2 + 2
    for idx, t in enumerate(towns):
        max_wind = Weather.objects.filter(town=t).aggregate(Max('wind_speed'))
        min_wind = Weather.objects.filter(town=t).aggregate(Min('wind_speed'))
        worksheet_d.write_string(idx, cell_index, t.name)
        worksheet_d.write_number(
            idx, cell_index + 1, max_wind['wind_speed__max'])
        worksheet_d.write_number(
            idx, cell_index + 2, min_wind['wind_speed__min'])

    # add series
    bar_chart.add_series({
        'name': 'Max Speed',
        'values': '=Chart Data!${0}${1}:${0}${2}'
        .format(chr(ord('A') + cell_index + 1), 1, len(towns)),
        'categories': '=Chart Data!${0}${1}:${0}${2}'
        .format(chr(ord('A') + cell_index), 1, len(towns)),
        'data_labels': {'value': True, 'num_format': '#0 "km/h"'}
    })
    bar_chart.add_series({
        'name': 'Min Speed',
        'values': '=Chart Data!${0}${1}:${0}${2}'
        .format(chr(ord('A') + cell_index + 2), 1, len(towns)),
        'categories': '=Chart Data!${0}${1}:${0}${2}'
        .format(chr(ord('A') + cell_index), 1, len(towns)),
        'data_labels': {'value': True, 'num_format': '#0 "km/h"'}
    })
    # adding other options
    bar_chart.set_title({'name': ugettext("Maximum and minimum wind speeds")})

    worksheet_c.insert_chart('B20', bar_chart, {'x_scale': 1, 'y_scale': 1})

    # Creating the pie chart
    pie_chart = workbook.add_chart({'type': 'pie'})

    # creating data for pie chart
    pie_values = []
    pie_values.append(Weather.objects.filter(max_temperature__gt=20).count())
    pie_values.append(Weather.objects.filter(max_temperature__lte=20,
                                             max_temperature__gte=10).count())
    pie_values.append(Weather.objects.filter(max_temperature__lt=10).count())
    pie_categories = ["T >18", "10 < T < 18", "T < 10"]

    # adding the data to "Chart Data" Sheet
    cell_index = cell_index + 4
    worksheet_d.write_column("{0}1".format(chr(ord('A') + cell_index)),
                             pie_values)
    worksheet_d.write_column("{0}1".format(chr(ord('A') + cell_index + 1)),
                             pie_categories)

    # adding the data to the chart
    pie_chart.add_series({
        'name': ugettext('Temperature statistics'),
        'values': '=Chart Data!${0}${1}:${0}${2}'
        .format(chr(ord('A') + cell_index), 1, 3),
        'categories': '=Chart Data!${0}${1}:${0}${2}'
        .format(chr(ord('A') + cell_index + 1), 1, 3),
        'data_labels': {'percentage': True}
    })

    # insert the chart on "Charts" Sheet
    worksheet_c.insert_chart('J20', pie_chart)

    # close workbook
    workbook.close()
    xlsx_data = output.getvalue()
    return xlsx_data
Beispiel #19
0
    def recalculate_concepts(self, concepts, lang=None):
        """
        Recalculated given concepts for given users

        Args:
            concepts (dict): user id (int -> set of concepts to recalculate)
            lang(Optional[str]): language used to get items in all concepts (cached).
                Defaults to None, in that case are get items only in used concepts
        """
        if len(concepts) == 0:
            return

        if lang is None:
            items = Concept.objects.get_concept_item_mapping(
                concepts=Concept.objects.filter(
                    pk__in=set(flatten(concepts.values()))))
        else:
            items = Concept.objects.get_concept_item_mapping(lang=lang)

        environment = get_environment()
        mastery_threshold = get_mastery_trashold()
        for user, concepts in concepts.items():
            all_items = list(set(flatten([items[c] for c in concepts])))
            answer_counts = dict(
                list(
                    zip(
                        all_items,
                        environment.number_of_answers_more_items(
                            all_items, user))))
            correct_answer_counts = dict(
                list(
                    zip(
                        all_items,
                        environment.number_of_correct_answers_more_items(
                            all_items, user))))
            predictions = dict(
                list(
                    zip(
                        all_items,
                        get_predictive_model().predict_more_items(environment,
                                                                  user,
                                                                  all_items,
                                                                  time=None))))
            new_user_stats = []
            stats_to_delete_condition = Q()
            for concept in concepts:
                answer_aggregates = Answer.objects.filter(
                    user=user, item__in=items[concept]).aggregate(
                        time_spent=Sum("response_time"),
                        sessions=Count("session", True),
                        time_first=Min("time"),
                        time_last=Max("time"),
                    )
                stats = {
                    "answer_count":
                    sum(answer_counts[i] for i in items[concept]),
                    "correct_answer_count":
                    sum(correct_answer_counts[i] for i in items[concept]),
                    "item_count":
                    len(items[concept]),
                    "practiced_items_count":
                    sum([answer_counts[i] > 0 for i in items[concept]]),
                    "mastered_items_count":
                    sum([
                        predictions[i] >= mastery_threshold
                        for i in items[concept]
                    ]),
                    "prediction":
                    sum([predictions[i]
                         for i in items[concept]]) / len(items[concept]),
                    "time_spent":
                    answer_aggregates["time_spent"] / 1000,
                    "session_count":
                    answer_aggregates["sessions"],
                    "time_first":
                    answer_aggregates["time_first"].timestamp(),
                    "time_last":
                    answer_aggregates["time_last"].timestamp(),
                }
                stats_to_delete_condition |= Q(user=user, concept=concept)
                for stat_name, value in stats.items():
                    new_user_stats.append(
                        UserStat(user_id=user,
                                 concept_id=concept,
                                 stat=stat_name,
                                 value=value))
            self.filter(stats_to_delete_condition).delete()
            self.bulk_create(new_user_stats)
Beispiel #20
0
    def get_context_data(self, **kwargs):
        context = super(EventsView, self).get_context_data(**kwargs)

        # Get year range for datepicker.
        aggregates = Event.objects.aggregate(Min('start_time'),
                                             Max('start_time'))

        context['year_range_min'] = aggregates['start_time__min'].year
        context['year_range_max'] = aggregates['start_time__max'].year

        # Did the user set date boundaries?
        date_str = self.request.GET.get('form_datetime')
        day_grouper = lambda x: (x.start_time.year, x.start_time.month, x.
                                 start_time.day)
        context['select_date'] = ''

        # If yes, then filter for dates.
        if date_str:
            context['date'] = date_str
            date_time = parser.parse(date_str)

            select_events = Event.objects.filter(start_time__gt=date_time)\
                .filter(start_time__lt=(date_time + relativedelta(months=1)))\
                .order_by('start_time')

            org_select_events = []

            for event_date, events in itertools.groupby(select_events,
                                                        key=day_grouper):
                events = sorted(events, key=attrgetter('start_time'))
                org_select_events.append([date(*event_date), events])

            context['select_events'] = org_select_events
            context['select_date'] = date_time.strftime(
                "%B") + " " + date_time.strftime("%Y")

        # If no, then return upcoming events.
        else:
            # Upcoming events for the current month.
            upcoming_events = Event.objects.filter(start_time__gt=timezone.now())\
                .filter(start_time__lt=(timezone.now() + relativedelta(months=1)))\
                .order_by('start_time')

            if len(upcoming_events) < 3:
                # Upcoming events for the next month, plus two or three from previous months.
                upcoming_events = Event.objects.filter(start_time__gt=timezone.now())\
                    .filter(start_time__lt=(timezone.now() + relativedelta(months=2)))\
                    .order_by('start_time')

            org_upcoming_events = []

            for event_date, events in itertools.groupby(upcoming_events,
                                                        key=day_grouper):
                events = sorted(events, key=attrgetter('start_time'))
                org_upcoming_events.append([date(*event_date), events])

            context['upcoming_events'] = org_upcoming_events

        context['user_subscribed'] = False
        if self.request.user.is_authenticated():
            user = self.request.user
            context['user'] = user

            if settings.USING_NOTIFICATIONS:
                if (len(user.eventssubscriptions.all()) > 0):
                    context['user_subscribed'] = True

        return context
Beispiel #21
0
    def get(self, request):
        context = {
            "states": craw_covid()["county"],
        }
        context["states"].sort()
        print(context)

        data = None
        state = request.GET.get('state')
        print(state)

        if state and state != "All States":
            data = CountyCase.objects.filter(county__stateName=state) if (
                state != "All States") else CountyCase.objects.all()

            fromDate = request.GET.get('from')
            print(fromDate)
            if fromDate:
                data = data.filter(date__gte=fromDate)

            toDate = request.GET.get('to')
            print(toDate)
            if toDate:
                data = data.filter(date__lte=toDate)

            if data:
                data = data.order_by('county__stateName', '-date')

            mode = request.GET.get('mode')
            print(mode)
            if fromDate and mode == "Increment":
                print(
                    data.values('county__countyName', "cases",
                                "date").order_by('county__countyName'))
                data = data.values('county__countyName') \
                    .annotate(cases_from=Min('cases'),
                              cases_to=Max('cases'),
                              deaths_from=Min('deaths'),
                              deaths_to=Max('deaths'))
                print(data)
                data_formatted = [{
                    'county__countyName':
                    row['county__countyName'],
                    'cases':
                    row['cases_to'] - row['cases_from'],
                    'deaths':
                    row['deaths_to'] - row['deaths_from'],
                } for row in data]
                data = sorted(data_formatted, key=lambda row: -row['cases'])

            elif mode in ["SUM", "Increment"]:
                data = data.values('county__countyName') \
                    .annotate(cases=Max('cases'), deaths=Max('deaths')) \
                    .order_by('-cases')
            elif mode in ["plot", "plot top-5"]:

                data = data.values('county__countyName', 'cases', 'deaths', 'date')\
                    .order_by('county__countyName', 'date')
                print(data)
                traces = []

                current_county = None
                dates = []
                cases = []
                deaths = []
                for row in data:
                    if not current_county:
                        current_county = row["county__countyName"]
                    elif current_county != row["county__countyName"]:  #save
                        traces.append((go.Scatter(
                            x=dates,
                            y=cases,
                            name=current_county,
                        ), cases[-1]))

                        current_county = None
                        dates = []
                        cases = []
                        deaths = []

                        current_county = row["county__countyName"]
                    dates.append(row["date"])
                    cases.append(row["cases"])

                traces.append((go.Scatter(
                    x=dates,
                    y=cases,
                    name=current_county,
                ), cases[-1]))

                if mode == "plot top-5":
                    print(traces)
                    traces = sorted(traces, key=lambda trace: -trace[1])
                    print(traces)
                    traces = traces[:5]
                traces = [trace[0] for trace in traces]
                print(traces)

                context["plot"] = plot(go.Data(traces), output_type='div')
                data = None
            else:
                date = None

        context["data"] = data
        return render(request, 'finalProject507/data.html', context)
Beispiel #22
0
    def _execute_seen_stats_query(self,
                                  item_list,
                                  start=None,
                                  end=None,
                                  conditions=None,
                                  environment_ids=None):
        project_ids = list(set([item.project_id for item in item_list]))
        group_ids = [item.id for item in item_list]
        aggregations = [
            ["count()", "", "times_seen"],
            ["min", "timestamp", "first_seen"],
            ["max", "timestamp", "last_seen"],
            ["uniq", "tags[sentry:user]", "count"],
        ]
        filters = {"project_id": project_ids, "group_id": group_ids}
        if self.environment_ids:
            filters["environment"] = self.environment_ids
        result = snuba.aliased_query(
            dataset=snuba.Dataset.Events,
            start=start,
            end=end,
            groupby=["group_id"],
            conditions=conditions,
            filter_keys=filters,
            aggregations=aggregations,
            referrer=
            "serializers.GroupSerializerSnuba._execute_seen_stats_query",
        )
        seen_data = {
            issue["group_id"]: fix_tag_value_data(
                dict(
                    filter(lambda key: key[0] != "group_id",
                           six.iteritems(issue))))
            for issue in result["data"]
        }
        user_counts = {
            item_id: value["count"]
            for item_id, value in seen_data.items()
        }
        last_seen = {
            item_id: value["last_seen"]
            for item_id, value in seen_data.items()
        }
        if start or end or conditions:
            first_seen = {
                item_id: value["first_seen"]
                for item_id, value in seen_data.items()
            }
            times_seen = {
                item_id: value["times_seen"]
                for item_id, value in seen_data.items()
            }
        else:
            if environment_ids:
                first_seen = {
                    ge["group_id"]: ge["first_seen__min"]
                    for ge in GroupEnvironment.objects.filter(
                        group_id__in=[item.id for item in item_list],
                        environment_id__in=environment_ids,
                    ).values("group_id").annotate(Min("first_seen"))
                }
            else:
                first_seen = {item.id: item.first_seen for item in item_list}
            times_seen = {item.id: item.times_seen for item in item_list}

        attrs = {}
        for item in item_list:
            attrs[item] = {
                "times_seen": times_seen.get(item.id, 0),
                "first_seen": first_seen.get(item.id),
                "last_seen": last_seen.get(item.id),
                "user_count": user_counts.get(item.id, 0),
            }
        return attrs
Beispiel #23
0
    def list(self, request):
        nodes = []
        # Loop through the last active nodes
        for last_active in LastActiveNodes.objects.iterator():
            # Get the current node
            node = Node.objects.filter(
                Q(id=last_active.node.id), ~Q(sensors=None)
            ).get()

            # The last acive date
            last_data_received_at = last_active.last_data_received_at

            # last_data_received_at
            stats = []
            moved_to = None
            # Get data stats from 5mins before last_data_received_at
            if last_data_received_at:
                last_5_mins = last_data_received_at - datetime.timedelta(minutes=5)
                stats = (
                    SensorDataValue.objects.filter(
                        Q(sensordata__sensor__node=last_active.node.id),
                        Q(sensordata__location=last_active.location.id),
                        Q(sensordata__timestamp__gte=last_5_mins),
                        Q(sensordata__timestamp__lte=last_data_received_at),
                        # Ignore timestamp values
                        ~Q(value_type="timestamp"),
                        # Match only valid float text
                        Q(value__regex=r"^\-?\d+(\.?\d+)?$"),
                    )
                    .order_by()
                    .values("value_type")
                    .annotate(
                        sensor_id=F("sensordata__sensor__id"),
                        start_datetime=Min("sensordata__timestamp"),
                        end_datetime=Max("sensordata__timestamp"),
                        average=Avg(Cast("value", FloatField())),
                        minimum=Min(Cast("value", FloatField())),
                        maximum=Max(Cast("value", FloatField())),
                    )
                )

            # If the last_active node location is not same as current node location
            # then the node has moved locations since it was last active
            if last_active.location.id != node.location.id:
                moved_to = {
                    "name": node.location.location,
                    "longitude": node.location.longitude,
                    "latitude": node.location.latitude,
                    "city": {
                        "name": node.location.city,
                        "slug": slugify(node.location.city),
                    },
                }

            nodes.append(
                {
                    "node_moved": moved_to is not None,
                    "moved_to": moved_to,
                    "node": {"uid": last_active.node.uid, "id": last_active.node.id, "owner": last_active.node.owner.id},
                    "location": {
                        "name": last_active.location.location,
                        "longitude": last_active.location.longitude,
                        "latitude": last_active.location.latitude,
                        "city": {
                            "name": last_active.location.city,
                            "slug": slugify(last_active.location.city),
                        },
                    },
                    "last_data_received_at": last_data_received_at,
                    "stats": stats,
                }
            )
        return Response(nodes)
def complete(request):
    earliest_db_str = DailyData.objects.all().aggregate(
        Min("date")).get('date__min').strftime('%d/%m/%Y')
    latest_db_str = DailyData.objects.all().aggregate(
        Max("date")).get('date__max').strftime('%d/%m/%Y')
    context = {'start_db': earliest_db_str, 'end_db': latest_db_str}

    form_regressors = ClimaticIndex.objects.all().values('id', 'name')
    context['form_regressors'] = form_regressors

    if request.method == "POST" and request.POST.get('startDate') is not None:

        start_date_str = request.POST.get('startDate')
        end_date_str = request.POST.get('endDate')
        start_date = datetime.strptime(start_date_str, "%d/%m/%Y").date()
        end_date = datetime.strptime(end_date_str, "%d/%m/%Y").date()

        # Creation of lists of vintages
        vintages = get_vintages_list(start_date, end_date)
        if vintages is None:
            context[
                'err_msg'] = "Dati sulle annate mancanti tra il " + start_date_str + " e il " + end_date_str
            return render(request, "MontalcinoForecastsApp/complete.html",
                          context)

        # Data array creation
        regr_combo = get_regr_combo(request.POST)
        x, regr_list, failed_regr_list = get_data(start_date, end_date,
                                                  regr_combo)

        # Error if unable to get all data (all nan or all nan except one)
        if len(failed_regr_list) > 0:
            err_msg = "Dati mancanti o non sufficienti tra il " + start_date_str + " e il " + end_date_str + \
                                 " per poter calcolare i seguenti indici: "
            for regr in failed_regr_list:
                err_msg += regr + ", "
            context['err_msg'] = err_msg[:-2] + "."
            return render(request, "MontalcinoForecastsApp/complete.html",
                          context)

        # Error if no regressor was selected
        if len(regr_list) is 0:
            context['err_msg'] = "Nessun indice selezionato"
            return render(request, "MontalcinoForecastsApp/complete.html",
                          context)

        # Error if more regressors than data
        if len(vintages) <= len(regr_list):
            err_msg = "Pochi anni e troppi regressori, impossibile usare la regressione lineare."
            context['err_msg'] = err_msg
            return render(request, "MontalcinoForecastsApp/complete.html",
                          context)

        # Linear regression
        x2 = sm.add_constant(x, has_constant='add')
        model = sm.OLS(vintages, x2)
        results = model.fit()
        vin_pred = results.predict(x2)

        # Chart and cards data creation
        chart = create_quality_chart(vintages, vin_pred)
        icons = get_icons(regr_combo)
        table_data = list(
            zip(["Intercetta"] + regr_list, ["fas fa-chart-line"] + icons,
                results.params, results.pvalues))

        # Update context
        context['start_train_date'] = start_date_str
        context['end_train_date'] = end_date_str
        context['chart_out'] = chart.render()
        context['regr_count'] = len(regr_list)
        context['r2'] = results.rsquared
        context['adj_r2'] = results.rsquared_adj
        context['table_data'] = table_data
        context['anchor'] = "results-section"
        context['regr_combo'] = regr_combo

        return render(request, "MontalcinoForecastsApp/complete.html", context)

    else:
        return render(request, "MontalcinoForecastsApp/complete.html", context)
Beispiel #25
0
def getuseraction(request,userid):
    e = Action.objects.filter(userid=userid)
    z = e.values("token").annotate(log=Max("time"),out=Min("time"))
    return get_response(200,json.dumps(list(z),cls=DjangoJSONEncoder),{})
Beispiel #26
0
def measure(request):
    '''
    Gets data specific to a channel/platform/measure combination
    '''
    channel_name = request.GET.get('channel')
    platform_name = request.GET.get('platform')
    measure_name = request.GET.get('measure')
    interval = request.GET.get('interval')
    start = request.GET.get('start')
    relative = request.GET.get('relative')
    versions = request.GET.getlist('version')

    if not all([channel_name, platform_name, measure_name, interval]):
        return HttpResponseBadRequest("All of channel, platform, measure, interval required")
    if not all([val is None or val.isdigit() for val in (start, interval)]):
        return HttpResponseBadRequest(
            "Interval / start time must be specified in seconds (as an integer) %s" % interval)

    builds = Build.objects.filter(channel__name=channel_name,
                                  platform__name=platform_name)
    if versions:
        builds = builds.filter(version__in=versions)

    try:
        measure = Measure.objects.get(name=measure_name,
                                      platform__name=platform_name)
    except Measure.DoesNotExist:
        return HttpResponseNotFound("Measure not available")

    datums = Datum.objects.filter(build__in=builds, measure=measure)

    ret = {}

    if relative is None or (relative.isdigit() and not int(relative)):
        # default is to get latest data for all series
        datums = _filter_datums_to_time_interval(datums, start, interval)

        for (build_id, version, timestamp, value, usage_hours) in datums.values_list(
                'build__build_id', 'build__version',
                'timestamp', 'value', 'usage_hours').order_by('timestamp'):
            if not ret.get(build_id):
                ret[build_id] = {
                    'data': [],
                    'version': version
                }
            ret[build_id]['data'].append((timestamp, value, usage_hours))
    else:
        if not versions:
            # if the user does not specify a list of versions, generate our
            # own based on the latest version with data
            latest_build_id = datums.filter(
                timestamp__gt=(datetime.datetime.now() -
                               datetime.timedelta(days=1))
            ).aggregate(
                Max('build__build_id'))['build__build_id__max']
            if int(interval) == 0:
                # if interval is 0 for relative, just use the interval of the latest
                # released version
                timestamps_for_latest = datums.filter(
                    build__build_id=latest_build_id).aggregate(
                        Min('timestamp'), Max('timestamp'))
                interval = (timestamps_for_latest['timestamp__max'] -
                            timestamps_for_latest['timestamp__min']).total_seconds()
            # get data for current + up to three previous versions (handling each
            # build id for each version, if there are multiple)
            versions = _sorted_version_list(
                [str(d[0]) for d in datums.values_list('build__version').distinct()]
            )[:4]
        version_timestamps = {
            (d[0], d[1]): d[2] for d in datums.filter(
                build__version__in=versions).values_list(
                    'build__version', 'build__build_id').distinct().annotate(
                        Min('timestamp'))
        }

        # for each version/buildid combo, grab their data relative to the
        # latest version
        for (version_tuple, base_timestamp) in version_timestamps.items():
            (version, build_id) = version_tuple
            ret[build_id] = {
                'version': version,
                'data': []
            }
            if start:
                start_timestamp = base_timestamp + datetime.timedelta(seconds=int(start))
            else:
                start_timestamp = base_timestamp
            ret[build_id]['data'] = [
                [int((timestamp - base_timestamp).total_seconds()), value, usage_hours] for
                (timestamp, value, usage_hours) in datums.filter(
                    build__version=version,
                    build__build_id=build_id,
                    timestamp__range=(start_timestamp,
                                      start_timestamp + datetime.timedelta(seconds=int(interval)))
                ).order_by('timestamp').values_list('timestamp', 'value', 'usage_hours')]

    return JsonResponse(data={'measure_data': ret})
Beispiel #27
0
def search(request):
    template = 'recommendation.html'

    query = request.GET.get('q')
    tep = "%%%s%%" % query
    filter_title = Director.objects.raw(
        "SELECT m.title AS title, d.name AS name, a.name AS star \
        FROM pages_director AS d LEFT JOIN pages_movie AS m ON d.name = m.director_id \
        LEFT JOIN pages_actor AS a ON a.name = m.actor_id \
        WHERE m.title LIKE %s", [tep])

    filter_data = Director.objects.raw(
        "SELECT m.title AS title, m.rating AS rating, m.votes AS votes, m.metascore AS metascore, \
        m.gross_earning_in_mil AS gross, d.name AS name, d.award_win AS d_win, d.award_nom AS d_nom, \
        a.name AS star, a.award_win AS a_win, a.award_nom AS a_nom \
        FROM pages_director AS d LEFT JOIN pages_movie AS m ON d.name = m.director_id \
        LEFT JOIN pages_actor AS a ON a.name = m.actor_id \
        WHERE m.title LIKE %s", [tep])

    limit_tuple = filter_data[:1]
    for movie in limit_tuple:
        title = movie.title
        rating = movie.rating
        votes = movie.votes
        metascore = movie.metascore
        gross = movie.gross
        d_award = movie.d_win + movie.d_nom
        a_award = movie.a_win + movie.a_nom

    d_win_max = Director.objects.all().aggregate(r1=Max('award_win'))
    d_win_min = Director.objects.all().aggregate(r2=Min('award_win'))
    d_nom_max = Director.objects.all().aggregate(r3=Max('award_nom'))
    d_nom_min = Director.objects.all().aggregate(r4=Min('award_nom'))

    d_range = d_win_max.get('r1') + d_nom_max.get('r3') - d_win_min.get(
        'r2') - d_nom_min.get('r4')
    new_d_award = (d_award -
                   (d_win_min.get('r2') - d_nom_min.get('r4'))) / d_range * 100

    a_win_max = Actor.objects.all().aggregate(r1=Max('award_win'))
    a_win_min = Actor.objects.all().aggregate(r2=Min('award_win'))
    a_nom_max = Actor.objects.all().aggregate(r3=Max('award_nom'))
    a_nom_min = Actor.objects.all().aggregate(r4=Min('award_nom'))

    a_range = a_win_max.get('r1') + a_nom_max.get('r3') - a_win_min.get(
        'r2') - a_nom_min.get('r4')
    new_a_award = (a_award -
                   (a_win_min.get('r2') - a_nom_min.get('r4'))) / a_range * 100

    rating_max = Movie.objects.all().aggregate(rm1=Max('rating'))
    rating_min = Movie.objects.all().aggregate(rm2=Min('rating'))
    rating_range = rating_max.get('rm1') - rating_min.get('rm2')
    new_rating = (rating - rating_min.get('rm2')) / rating_range * 100

    votes_max = Movie.objects.all().aggregate(rm1=Max('votes'))
    votes_min = Movie.objects.all().aggregate(rm2=Min('votes'))
    votes_range = votes_max.get('rm1') - votes_min.get('rm2')
    new_votes = (votes - votes_min.get('rm2')) / votes_range * 100

    metascore_max = Movie.objects.all().aggregate(rm1=Max('metascore'))
    metascore_min = Movie.objects.all().aggregate(rm2=Min('metascore'))
    metascore_range = metascore_max.get('rm1') - metascore_min.get('rm2')
    new_metascore = (metascore -
                     metascore_min.get('rm2')) / metascore_range * 100

    gross_max = Movie.objects.all().aggregate(rm1=Max('gross_earning_in_mil'))
    gross_min = Movie.objects.all().aggregate(rm2=Min('gross_earning_in_mil'))
    gross_range = gross_max.get('rm1') - gross_min.get('rm2')
    new_gross = (gross - gross_min.get('rm2')) / gross_range * 100

    context = {
        'filter_title': filter_title,
        'limit_tuple': limit_tuple,
        'new_rating': new_rating,
        'new_votes': new_votes,
        'new_d_award': new_d_award,
        'new_a_award': new_a_award,
        'ew_metascoren': new_metascore,
        'new_gross': new_gross
    }
    return render(request, template, context)
Beispiel #28
0
# How to do aggregation with filter in django
from django.db.models import Min
models.ServerName.objects.filter(server_id=ServerName.id).aggregate(
    Min('time'))
Beispiel #29
0
 def get_queryset(self, *args, **kwargs):
     qs = super(TriggerCRUDL.List, self).get_queryset(*args, **kwargs)
     qs = qs.filter(is_active=True, is_archived=False).annotate(
         earliest_group=Min('groups__name')).order_by(
             'keyword', 'earliest_group')
     return qs
def action_report(request, test_id, action_id):
    '''
    Generate HTML page with detail data about some action
    which were execute during the test
    '''

    action_aggregate_data = list(
        TestActionAggregateData.objects.annotate(
            test_name=F('test__display_name')).filter(
                action_id=action_id, test_id__lte=test_id).values(
                    'test_name', 'data').order_by('-test__start_time'))[:5]
    action_data = []
    for e in action_aggregate_data:
        data = e['data']
        mean = data['mean']
        min = data['min']
        max = data['max']
        q3 = data['75%']
        q2 = data['50%']
        std = data['std']
        if '25%' in data:
            q1 = data['25%']
        else:
            # WTF lol
            q1 = q2 - (q3 - q2)
        IQR = q3 - q1
        LW = q1 - 1.5 * IQR
        if LW < 0:
            LW = 0.1
        UW = q3 + 1.5 * IQR
        test_name = e['test_name']
        action_data.append({
            "q1": q1,
            "q2": q2,
            "q3": q3,
            "IQR": IQR,
            "LW": LW,
            "UW": UW,
            "mean": mean,
            "min": min,
            "max": max,
            "std": std,
            "test_name": test_name
        })
    test_start_time = TestActionData.objects. \
        filter(test_id=test_id, data_resolution_id=1). \
        aggregate(min_timestamp=Min(
            RawSQL("((data->>%s)::timestamp)",
                  ('timestamp',))))['min_timestamp']
    test_errors = TestError.objects.annotate(text=F('error__text'),
                                             code=F('error__code')).filter(
                                                 test_id=test_id,
                                                 action_id=action_id).values(
                                                     'text', 'code', 'count')
    return render(
        request, 'action_report.html', {
            'test_id': test_id,
            'action': Action.objects.get(id=action_id),
            'action_data': action_data,
            'test_start_time': test_start_time,
            'test_errors': test_errors,
        })