Ejemplo n.º 1
0
def conditional_sum(value=1, **cond):
    """Wrapper to generate SUM on boolean/enum values"""
    return Sum(
        Case(When(then=value, **cond), default=0, output_field=IntegerField()))
Ejemplo n.º 2
0
 def get_many(self, ids):
     id_order = Case(*[When(pk=pk, then=pos) for pos, pk in enumerate(ids)])
     return self.model.objects \
         .filter(pk__in=ids) \
         .order_by(id_order)
Ejemplo n.º 3
0
def channel_page(request, channel, allow_edit=False, staging=False):
    channel_serializer = ChannelSerializer(channel)
    channel_list = Channel.objects.select_related('main_tree').prefetch_related('editors').prefetch_related('viewers')\
                          .exclude(id=channel.pk).filter(Q(deleted=False) & (Q(editors=request.user) | Q(viewers=request.user)))\
                          .annotate(is_view_only=Case(When(editors=request.user, then=Value(0)), default=Value(1), output_field=IntegerField()))\
                          .distinct().values("id", "name", "is_view_only").order_by('name')

    fileformats = get_or_set_cached_constants(FileFormat, FileFormatSerializer)
    licenses = get_or_set_cached_constants(License, LicenseSerializer)
    formatpresets = get_or_set_cached_constants(FormatPreset,
                                                FormatPresetSerializer)
    contentkinds = get_or_set_cached_constants(ContentKind,
                                               ContentKindSerializer)
    languages = get_or_set_cached_constants(Language, LanguageSerializer)

    token = None
    if channel.secret_tokens.filter(is_primary=True).exists():
        token = channel.secret_tokens.filter(is_primary=True).first().token
        token = token[:5] + "-" + token[5:]

    json_renderer = JSONRenderer()
    return render(
        request, 'channel_edit.html', {
            "allow_edit":
            allow_edit,
            "staging":
            staging,
            "is_public":
            channel.public,
            "channel":
            json_renderer.render(channel_serializer.data),
            "channel_id":
            channel.pk,
            "channel_name":
            channel.name,
            "channel_list":
            channel_list,
            "fileformat_list":
            fileformats,
            "license_list":
            licenses,
            "fpreset_list":
            formatpresets,
            "ckinds_list":
            contentkinds,
            "langs_list":
            languages,
            "roles":
            json.dumps([r[0] for r in roles.choices]),
            "mastery":
            json.dumps([
                m[0] for m in exercises.MASTERY_MODELS
                if m[0] != exercises.SKILL_CHECK
            ]),
            "current_user":
            json_renderer.render(CurrentUserSerializer(request.user).data),
            "preferences":
            json.dumps(channel.content_defaults),
            "messages":
            get_messages(),
            "primary_token":
            token or channel.pk,
            "title":
            settings.DEFAULT_TITLE,
        })
Ejemplo n.º 4
0
        def get_data(email_or_fax):
            """Helper function to get email or fax data"""
            if email_or_fax == 'email':
                address_model = EmailAddress
                confirm_rel = 'to_emails'
                error_fields = [
                    'email',
                    'datetime',
                    'recipient',
                    'code',
                    'error',
                    'event',
                    'reason',
                ]
            elif email_or_fax == 'fax':
                address_model = PhoneNumber
                confirm_rel = 'faxes'
                error_fields = [
                    'fax',
                    'datetime',
                    'recipient',
                    'error_type',
                    'error_code',
                    'error_id',
                ]

            open_requests = (
                self.agency.foiarequest_set.get_open().order_by(
                    '%s__status' % email_or_fax, email_or_fax
                ).exclude(**{
                    email_or_fax: None
                }).select_related(
                    'agency__jurisdiction',
                    'composer',
                    'email',
                    'fax',
                    'portal',
                ).annotate(
                    latest_response=ExtractDay(
                        Cast(
                            Now() - Max(
                                Case(
                                    When(
                                        communications__response=True,
                                        then='communications__datetime'
                                    )
                                )
                            ),
                            models.DurationField(),
                        )
                    )
                ).only(
                    'pk',
                    'status',
                    'title',
                    'slug',
                    'agency__jurisdiction__slug',
                    'agency__jurisdiction__id',
                    'composer__datetime_submitted',
                    'date_estimate',
                    'portal__name',
                    'email__email',
                    'email__name',
                    'email__status',
                    'fax__number',
                    'fax__status',
                )
            )
            grouped_requests = [
                (k, list(v)) for k, v in
                groupby(open_requests, lambda f: getattr(f, email_or_fax))
            ]
            # do seperate queries for per email addr/fax number stats
            # do annotations separately for performance reasons (limit joins)
            addresses = (
                address_model.objects.annotate(
                    error_count=Count('errors', distinct=True),
                    last_error=Max('errors__datetime'),
                )
            )
            addresses = addresses.in_bulk(g[0].pk for g in grouped_requests)
            addresses_confirm = address_model.objects.annotate(
                last_confirm=Max('%s__confirmed_datetime' % confirm_rel),
            )
            addresses_confirm = addresses_confirm.in_bulk(
                g[0].pk for g in grouped_requests
            )
            if email_or_fax == 'email':
                addresses_open = address_model.objects.annotate(
                    last_open=Max('opens__datetime'),
                )
                addresses_open = addresses_open.in_bulk(
                    g[0].pk for g in grouped_requests
                )

            review_data = []
            for addr, foias in grouped_requests:
                # fetch the address with the annotated stats
                addr = addresses[addr.pk]
                review_data.append({
                    'address':
                        addr,
                    'error':
                        addr.status == 'error',
                    'errors':
                        addr.errors.select_related(
                            '%s__communication__foia__agency__jurisdiction' %
                            email_or_fax
                        ).order_by('-datetime').only(
                            *error_fields + [
                                '%s__communication__foia__agency__jurisdiction__slug'
                                % email_or_fax,
                                '%s__communication__foia__slug' % email_or_fax,
                                '%s__communication__foia__title' % email_or_fax,
                            ]
                        )[:5],
                    'foias':
                        foias,
                    'unacknowledged':
                        any(f.status == 'ack' for f in foias),
                    'total_errors':
                        addr.error_count,
                    'last_error':
                        addr.last_error,
                    'last_confirm':
                        addresses_confirm[addr.pk].last_confirm,
                    'last_open':
                        addresses_open[addr.pk].last_open
                        if email_or_fax == 'email' else None,
                    'checkbox_name':
                        u'foias-%d-%s-%d' % (self.pk, email_or_fax, addr.pk),
                    'email_or_fax':
                        email_or_fax,
                })
            return review_data
Ejemplo n.º 5
0
    def handle(self, *args, **options):
        #         test_products = Product.objects.filter(Q(category__title='Ноутбуки') | Q(category__title='Планшеты'))
        #
        #         print(len(test_products))
        #         print(test_products)
        #
        #         db_profile_by_type('learn db', '', connection.queries)

        ACTION_1 = 1
        ACTION_2 = 2
        ACTION_EXPIRED = 3

        action_1__time_delta = timedelta(hours=12)
        action_2__time_delta = timedelta(days=1)

        action_1__discount = 0.3
        action_2__discount = 0.15
        action_expired__discount = 0.05

        action_1__condition = Q(order__updated__lte=F('order__created') +
                                action_1__time_delta)

        action_2__condition = Q(
            order__updated__gt=F('order__created') + action_1__time_delta) & Q(
                order__updated__lte=F('order__created') + action_2__time_delta)

        action_expired__condition = Q(order__updated__gt=F('order__created') +
                                      action_2__time_delta)

        action_1__order = When(action_1__condition, then=ACTION_1)
        action_2__order = When(action_2__condition, then=ACTION_2)
        action_expired__order = When(action_expired__condition,
                                     then=ACTION_EXPIRED)

        action_1__price = When(action_1__condition,
                               then=F('product__cost') * F('quantity') *
                               action_1__discount)

        action_2__price = When(action_2__condition,
                               then=F('product__cost') * F('quantity') *
                               -action_2__discount)

        action_expired__price = When(action_expired__condition,
                                     then=F('product__cost') * F('quantity') *
                                     action_expired__discount)

        test_orderss = OrderItem.objects.annotate(action_order=Case(
            action_1__order,
            action_2__order,
            action_expired__order,
            output_field=IntegerField(),
        )).annotate(total_price=Case(
            action_1__price,
            action_2__price,
            action_expired__price,
            output_field=DecimalField(),
        )).order_by('action_order', 'total_price').select_related()

        for orderitem in test_orderss:
            print(f'{orderitem.action_order:2}: заказ №{orderitem.pk:3}:\
                   {orderitem.product.title:15}: скидка\
                   {abs(orderitem.total_price):6.2f} руб. | \
                   {orderitem.order.updated - orderitem.order.created}')
Ejemplo n.º 6
0
def fix_suggestion(request, suggestion_id):
    cluster_colors = {
        'unprocessed': 'text-info',
        'accepted': 'text-success',
        'rejected': 'text-danger'
    }

    # Retrieve the Suggestion object if it exists else raise a 404 error
    suggestion = get_object_or_404(Suggestion.objects.select_related(
        'work', 'user', 'work__category').annotate(
            count_agrees=Count(Case(When(evidence__agrees=True, then=1))),
            count_disagrees=Count(Case(When(evidence__agrees=False, then=1)))),
                                   id=suggestion_id)

    # Retrieve the Evidence object if it exists
    evidence = None
    if request.user.is_authenticated:
        try:
            evidence = Evidence.objects.get(user=request.user,
                                            suggestion=suggestion)
        except ObjectDoesNotExist:
            evidence = None

    # Retrieve related clusters
    clusters = WorkCluster.objects.select_related(
        'resulting_work', 'resulting_work__category').prefetch_related(
            'works', 'works__category',
            'checker').filter(origin=suggestion_id).all()
    colors = [cluster_colors[cluster.status] for cluster in clusters]

    # Get the previous suggestion, ie. more recent and of the same checked status
    previous_suggestions_ids = Suggestion.objects.filter(
        date__gt=suggestion.date,
        is_checked=suggestion.is_checked).order_by('date').values_list(
            'id', flat=True)

    # If there is no more recent suggestion, and was checked, just pick from not checked suggestions
    if not previous_suggestions_ids and suggestion.is_checked:
        previous_suggestions_ids = Suggestion.objects.filter(
            is_checked=False).order_by('date').values_list('id', flat=True)

    # Get the next suggestion, ie. less recent and of the same checked status
    next_suggestions_ids = Suggestion.objects.filter(
        date__lt=suggestion.date,
        is_checked=suggestion.is_checked).order_by('-date').values_list(
            'id', flat=True)

    # If there is no less recent suggestion, and wasn't checked, just pick from checked suggestions
    if not next_suggestions_ids and not suggestion.is_checked:
        next_suggestions_ids = Suggestion.objects.filter(
            is_checked=True).order_by('-date').values_list('id', flat=True)

    context = {
        'suggestion':
        suggestion,
        'clusters':
        zip(clusters, colors) if clusters and colors else None,
        'evidence':
        evidence,
        'can_auto_fix':
        suggestion.can_auto_fix and request.user.is_staff,
        'can_close':
        request.user.is_staff,
        'can_reopen':
        request.user.is_staff,
        'next_id':
        next_suggestions_ids[0] if next_suggestions_ids else None,
        'previous_id':
        previous_suggestions_ids[0] if previous_suggestions_ids else None
    }

    return render(request, 'fix/fix_suggestion.html', context)
Ejemplo n.º 7
0
def get_severity_count(id, table):
    if table == "test":
        counts = Finding.objects.filter(test=id). \
            prefetch_related('test__engagement__product').aggregate(
            total=Sum(
                Case(When(severity__in=('Critical', 'High', 'Medium', 'Low'),
                          then=Value(1)),
                     output_field=IntegerField())),
            critical=Sum(
                Case(When(severity='Critical',
                          then=Value(1)),
                     output_field=IntegerField())),
            high=Sum(
                Case(When(severity='High',
                          then=Value(1)),
                     output_field=IntegerField())),
            medium=Sum(
                Case(When(severity='Medium',
                          then=Value(1)),
                     output_field=IntegerField())),
            low=Sum(
                Case(When(severity='Low',
                          then=Value(1)),
                     output_field=IntegerField())),
            info=Sum(
                Case(When(severity='Info',
                          then=Value(1)),
                     output_field=IntegerField())),
        )
    elif table == "engagement":
        counts = Finding.objects.filter(test__engagement=id, active=True, verified=True, duplicate=False). \
            prefetch_related('test__engagement__product').aggregate(
            total=Sum(
                Case(When(severity__in=('Critical', 'High', 'Medium', 'Low'),
                          then=Value(1)),
                     output_field=IntegerField())),
            critical=Sum(
                Case(When(severity='Critical',
                          then=Value(1)),
                     output_field=IntegerField())),
            high=Sum(
                Case(When(severity='High',
                          then=Value(1)),
                     output_field=IntegerField())),
            medium=Sum(
                Case(When(severity='Medium',
                          then=Value(1)),
                     output_field=IntegerField())),
            low=Sum(
                Case(When(severity='Low',
                          then=Value(1)),
                     output_field=IntegerField())),
            info=Sum(
                Case(When(severity='Info',
                          then=Value(1)),
                     output_field=IntegerField())),
        )
    elif table == "product":
        counts = Finding.objects.filter(test__engagement__product=id). \
            prefetch_related('test__engagement__product').aggregate(
            total=Sum(
                Case(When(severity__in=('Critical', 'High', 'Medium', 'Low'),
                          then=Value(1)),
                     output_field=IntegerField())),
            critical=Sum(
                Case(When(severity='Critical',
                          then=Value(1)),
                     output_field=IntegerField())),
            high=Sum(
                Case(When(severity='High',
                          then=Value(1)),
                     output_field=IntegerField())),
            medium=Sum(
                Case(When(severity='Medium',
                          then=Value(1)),
                     output_field=IntegerField())),
            low=Sum(
                Case(When(severity='Low',
                          then=Value(1)),
                     output_field=IntegerField())),
            info=Sum(
                Case(When(severity='Info',
                          then=Value(1)),
                     output_field=IntegerField())),
        )
    critical = 0
    high = 0
    medium = 0
    low = 0
    info = 0
    if counts["info"]:
        info = counts["info"]

    if counts["low"]:
        low = counts["low"]

    if counts["medium"]:
        medium = counts["medium"]

    if counts["high"]:
        high = counts["high"]

    if counts["critical"]:
        critical = counts["critical"]

    total = critical + high + medium + low + info
    display_counts = []

    if critical:
        display_counts.append("Critical: " + str(critical))
    if high:
        display_counts.append("High: " + str(high))
    if medium:
        display_counts.append("Medium: " + str(medium))
    if low:
        display_counts.append("Low: " + str(low))
    if info:
        display_counts.append("Info: " + str(info))

    if total > 0:
        if table == "test":
            display_counts.append("Total: " + str(total) + " Findings")
        elif table == "engagement":
            display_counts.append("Total: " + str(total) +
                                  " Active, Verified Findings")
        elif table == "product":
            display_counts.append("Total: " + str(total) + " Active Findings")

    display_counts = ", ".join([str(item) for item in display_counts])

    return display_counts
Ejemplo n.º 8
0
def metrics(request, mtype):
    template = 'dojo/metrics.html'
    page_name = 'Product Type Metrics'
    show_pt_filter = True

    findings = Finding.objects.filter(
        verified=True,
        severity__in=('Critical', 'High', 'Medium', 'Low', 'Info')
    ).prefetch_related(
        'test__engagement__product', 'test__engagement__product__prod_type',
        'test__engagement__risk_acceptance', 'risk_acceptance_set', 'reporter'
    ).extra(select={
        'ra_count':
        'SELECT COUNT(*) FROM dojo_risk_acceptance INNER JOIN '
        'dojo_risk_acceptance_accepted_findings ON '
        '( dojo_risk_acceptance.id = dojo_risk_acceptance_accepted_findings.risk_acceptance_id ) '
        'WHERE dojo_risk_acceptance_accepted_findings.finding_id = dojo_finding.id',
    }, )
    active_findings = Finding.objects.filter(
        verified=True,
        active=True,
        severity__in=('Critical', 'High', 'Medium', 'Low', 'Info')
    ).prefetch_related(
        'test__engagement__product', 'test__engagement__product__prod_type',
        'test__engagement__risk_acceptance', 'risk_acceptance_set', 'reporter'
    ).extra(select={
        'ra_count':
        'SELECT COUNT(*) FROM dojo_risk_acceptance INNER JOIN '
        'dojo_risk_acceptance_accepted_findings ON '
        '( dojo_risk_acceptance.id = dojo_risk_acceptance_accepted_findings.risk_acceptance_id ) '
        'WHERE dojo_risk_acceptance_accepted_findings.finding_id = dojo_finding.id',
    }, )

    if mtype != 'All':
        pt = Product_Type.objects.filter(id=mtype)
        request.GET._mutable = True
        request.GET.appendlist('test__engagement__product__prod_type', mtype)
        request.GET._mutable = False
        mtype = pt[0].name
        show_pt_filter = False
        page_name = '%s Metrics' % mtype
        prod_type = pt
    elif 'test__engagement__product__prod_type' in request.GET:
        prod_type = Product_Type.objects.filter(id__in=request.GET.getlist(
            'test__engagement__product__prod_type', []))
    else:
        prod_type = Product_Type.objects.all()
    findings = MetricsFindingFilter(request.GET, queryset=findings)
    active_findings = MetricsFindingFilter(request.GET,
                                           queryset=active_findings)

    findings.qs  # this is needed to load details from filter since it is lazy
    active_findings.qs  # this is needed to load details from filter since it is lazy

    start_date = findings.filters['date'].start_date
    start_date = datetime(start_date.year,
                          start_date.month,
                          start_date.day,
                          tzinfo=timezone.get_current_timezone())
    end_date = findings.filters['date'].end_date
    end_date = datetime(end_date.year,
                        end_date.month,
                        end_date.day,
                        tzinfo=timezone.get_current_timezone())

    if len(prod_type) > 0:
        findings_closed = Finding.objects.filter(
            mitigated__range=[start_date, end_date],
            test__engagement__product__prod_type__in=prod_type
        ).prefetch_related('test__engagement__product')
        # capture the accepted findings in period
        accepted_findings = Finding.objects.filter(risk_acceptance__created__range=[start_date, end_date],
                                                   test__engagement__product__prod_type__in=prod_type). \
            prefetch_related('test__engagement__product')
        accepted_findings_counts = Finding.objects.filter(risk_acceptance__created__range=[start_date, end_date],
                                                          test__engagement__product__prod_type__in=prod_type). \
            prefetch_related('test__engagement__product').aggregate(
            total=Sum(
                Case(When(severity__in=('Critical', 'High', 'Medium', 'Low'),
                          then=Value(1)),
                     output_field=IntegerField())),
            critical=Sum(
                Case(When(severity='Critical',
                          then=Value(1)),
                     output_field=IntegerField())),
            high=Sum(
                Case(When(severity='High',
                          then=Value(1)),
                     output_field=IntegerField())),
            medium=Sum(
                Case(When(severity='Medium',
                          then=Value(1)),
                     output_field=IntegerField())),
            low=Sum(
                Case(When(severity='Low',
                          then=Value(1)),
                     output_field=IntegerField())),
            info=Sum(
                Case(When(severity='Info',
                          then=Value(1)),
                     output_field=IntegerField())),
        )
    else:
        findings_closed = Finding.objects.filter(
            mitigated__range=[start_date, end_date]).prefetch_related(
                'test__engagement__product')
        accepted_findings = Finding.objects.filter(risk_acceptance__created__range=[start_date, end_date]). \
            prefetch_related('test__engagement__product')
        accepted_findings_counts = Finding.objects.filter(risk_acceptance__created__range=[start_date, end_date]). \
            prefetch_related('test__engagement__product').aggregate(
            total=Sum(
                Case(When(severity__in=('Critical', 'High', 'Medium', 'Low'),
                          then=Value(1)),
                     output_field=IntegerField())),
            critical=Sum(
                Case(When(severity='Critical',
                          then=Value(1)),
                     output_field=IntegerField())),
            high=Sum(
                Case(When(severity='High',
                          then=Value(1)),
                     output_field=IntegerField())),
            medium=Sum(
                Case(When(severity='Medium',
                          then=Value(1)),
                     output_field=IntegerField())),
            low=Sum(
                Case(When(severity='Low',
                          then=Value(1)),
                     output_field=IntegerField())),
            info=Sum(
                Case(When(severity='Info',
                          then=Value(1)),
                     output_field=IntegerField())),
        )

    r = relativedelta(end_date, start_date)
    months_between = (r.years * 12) + r.months
    # include current month
    months_between += 1

    weeks_between = int(
        ceil((((r.years * 12) + r.months) * 4.33) + (r.days / 7)))
    if weeks_between <= 0:
        weeks_between += 2

    monthly_counts = get_period_counts(active_findings.qs,
                                       findings.qs,
                                       findings_closed,
                                       accepted_findings,
                                       months_between,
                                       start_date,
                                       relative_delta='months')
    weekly_counts = get_period_counts(active_findings.qs,
                                      findings.qs,
                                      findings_closed,
                                      accepted_findings,
                                      weeks_between,
                                      start_date,
                                      relative_delta='weeks')

    top_ten = Product.objects.filter(
        engagement__test__finding__verified=True,
        engagement__test__finding__false_p=False,
        engagement__test__finding__duplicate=False,
        engagement__test__finding__out_of_scope=False,
        engagement__test__finding__mitigated__isnull=True,
        engagement__test__finding__severity__in=('Critical', 'High', 'Medium',
                                                 'Low'),
        prod_type__in=prod_type).annotate(
            critical=Sum(
                Case(When(engagement__test__finding__severity='Critical',
                          then=Value(1)),
                     output_field=IntegerField())),
            high=Sum(
                Case(When(engagement__test__finding__severity='High',
                          then=Value(1)),
                     output_field=IntegerField())),
            medium=Sum(
                Case(When(engagement__test__finding__severity='Medium',
                          then=Value(1)),
                     output_field=IntegerField())),
            low=Sum(
                Case(When(engagement__test__finding__severity='Low',
                          then=Value(1)),
                     output_field=IntegerField())),
            total=Sum(
                Case(When(engagement__test__finding__severity__in=('Critical',
                                                                   'High',
                                                                   'Medium',
                                                                   'Low'),
                          then=Value(1)),
                     output_field=IntegerField()))).order_by(
                         '-critical', '-high', '-medium', '-low')[:10]

    age_detail = [0, 0, 0, 0]

    in_period_counts = {
        "Critical": 0,
        "High": 0,
        "Medium": 0,
        "Low": 0,
        "Info": 0,
        "Total": 0
    }
    in_period_details = {}

    closed_in_period_counts = {
        "Critical": 0,
        "High": 0,
        "Medium": 0,
        "Low": 0,
        "Info": 0,
        "Total": 0
    }
    closed_in_period_details = {}

    accepted_in_period_details = {}

    for finding in findings.qs:
        if 0 <= finding.age <= 30:
            age_detail[0] += 1
        elif 30 < finding.age <= 60:
            age_detail[1] += 1
        elif 60 < finding.age <= 90:
            age_detail[2] += 1
        elif finding.age > 90:
            age_detail[3] += 1

        in_period_counts[finding.severity] += 1
        in_period_counts['Total'] += 1

        if finding.test.engagement.product.name not in in_period_details:
            in_period_details[finding.test.engagement.product.name] = {
                'path':
                reverse('view_product_findings',
                        args=(finding.test.engagement.product.id, )),
                'Critical':
                0,
                'High':
                0,
                'Medium':
                0,
                'Low':
                0,
                'Info':
                0,
                'Total':
                0
            }
        in_period_details[finding.test.engagement.product.name][
            finding.severity] += 1
        in_period_details[finding.test.engagement.product.name]['Total'] += 1

    for finding in accepted_findings:
        if finding.test.engagement.product.name not in accepted_in_period_details:
            accepted_in_period_details[
                finding.test.engagement.product.name] = {
                    'path':
                    reverse('accepted_findings') +
                    '?test__engagement__product=' +
                    str(finding.test.engagement.product.id),
                    'Critical':
                    0,
                    'High':
                    0,
                    'Medium':
                    0,
                    'Low':
                    0,
                    'Info':
                    0,
                    'Total':
                    0
                }
        accepted_in_period_details[finding.test.engagement.product.name][
            finding.severity] += 1
        accepted_in_period_details[
            finding.test.engagement.product.name]['Total'] += 1

    for f in findings_closed:
        closed_in_period_counts[f.severity] += 1
        closed_in_period_counts['Total'] += 1

        if f.test.engagement.product.name not in closed_in_period_details:
            closed_in_period_details[f.test.engagement.product.name] = {
                'path':
                reverse('closed_findings') + '?test__engagement__product=' +
                str(f.test.engagement.product.id),
                'Critical':
                0,
                'High':
                0,
                'Medium':
                0,
                'Low':
                0,
                'Info':
                0,
                'Total':
                0
            }
        closed_in_period_details[f.test.engagement.product.name][
            f.severity] += 1
        closed_in_period_details[f.test.engagement.product.name]['Total'] += 1

    punchcard = list()
    ticks = list()
    highest_count = 0

    if 'view' in request.GET and 'dashboard' == request.GET['view']:
        punchcard, ticks, highest_count = get_punchcard_data(
            findings.qs, weeks_between, start_date)
        page_name = (get_system_setting('team_name')) + " Metrics"
        template = 'dojo/dashboard-metrics.html'

    add_breadcrumb(title=page_name,
                   top_level=not len(request.GET),
                   request=request)

    return render(
        request, template, {
            'name': page_name,
            'start_date': start_date,
            'end_date': end_date,
            'findings': findings,
            'opened_per_month': monthly_counts['opened_per_period'],
            'active_per_month': monthly_counts['active_per_period'],
            'opened_per_week': weekly_counts['opened_per_period'],
            'accepted_per_month': monthly_counts['accepted_per_period'],
            'accepted_per_week': weekly_counts['accepted_per_period'],
            'top_ten_products': top_ten,
            'age_detail': age_detail,
            'in_period_counts': in_period_counts,
            'in_period_details': in_period_details,
            'accepted_in_period_counts': accepted_findings_counts,
            'accepted_in_period_details': accepted_in_period_details,
            'closed_in_period_counts': closed_in_period_counts,
            'closed_in_period_details': closed_in_period_details,
            'punchcard': punchcard,
            'ticks': ticks,
            'highest_count': highest_count,
            'show_pt_filter': show_pt_filter,
        })
Ejemplo n.º 9
0
    def get_all_tx_hashes(self,
                          safe_address: str,
                          executed: bool = False,
                          queued: bool = True,
                          trusted: bool = True) -> QuerySet:
        """
        Build a queryset with hashes for every tx for a Safe for pagination filtering. In the case of
        Multisig Transactions, as some of them are not mined, we use the SafeTxHash
        Criteria for building this list:
          - Return only multisig txs with `nonce < current Safe Nonce`
          - The endpoint should only show incoming transactions that have been mined
          - The transactions should be sorted by execution date. If an outgoing transaction doesn't have an execution
          date the execution date of the transaction with the same nonce that has been executed should be taken.
          - Incoming and outgoing transfers or Eth/tokens must be under a multisig/module tx if triggered by one.
          Otherwise they should have their own entry in the list using a EthereumTx
        :param safe_address:
        :param executed: By default `False`, all transactions are returned. With `True`, just txs executed are returned.
        :param queued: By default `True`, all transactions are returned. With `False`, just txs with
        `nonce < current Safe Nonce` are returned.
        :param trusted: By default `True`, just txs that are trusted are returned (with at least one confirmation,
        sent by a delegate or indexed). With `False` all txs are returned
        :return: List with tx hashes sorted by date (newest first)
        """

        # If tx is not mined, get the execution date of a tx mined with the same nonce
        case = Case(When(ethereum_tx__block=None,
                         then=MultisigTransaction.objects.filter(
                             safe=OuterRef('safe'),
                             nonce=OuterRef('nonce')).exclude(
                                 ethereum_tx__block=None).values(
                                     'ethereum_tx__block__timestamp')),
                    default=F('ethereum_tx__block__timestamp'))
        multisig_safe_tx_ids = MultisigTransaction.objects.filter(
            safe=safe_address).annotate(
                execution_date=case,
                block=F('ethereum_tx__block_id'),
                safe_nonce=F('nonce'),
            ).values(
                'safe_tx_hash', 'execution_date', 'created', 'block',
                'safe_nonce'
            )  # Tricky, we will merge SafeTx hashes with EthereumTx hashes
        # Block is needed to get stable ordering

        if not queued:  # Filter out txs with nonce >= Safe nonce
            last_nonce_query = MultisigTransaction.objects.filter(
                safe=safe_address).exclude(
                    ethereum_tx=None).order_by('-nonce').values('nonce')
            multisig_safe_tx_ids = multisig_safe_tx_ids.filter(
                nonce__lte=Subquery(last_nonce_query[:1]))

        if trusted:  # Just show trusted transactions
            multisig_safe_tx_ids = multisig_safe_tx_ids.filter(trusted=True)

        if executed:
            multisig_safe_tx_ids = multisig_safe_tx_ids.exclude(
                ethereum_tx__block=None)

        # Get module txs
        module_tx_ids = ModuleTransaction.objects.filter(
            safe=safe_address).annotate(
                execution_date=F('internal_tx__ethereum_tx__block__timestamp'),
                block=F('internal_tx__ethereum_tx__block_id'),
                safe_nonce=Value(0, output_field=Uint256Field()),
            ).distinct().values('internal_tx__ethereum_tx_id',
                                'execution_date', 'created', 'block',
                                'safe_nonce')

        multisig_hashes = MultisigTransaction.objects.filter(
            safe=safe_address).exclude(
                ethereum_tx=None).values('ethereum_tx_id')
        module_hashes = ModuleTransaction.objects.filter(
            safe=safe_address).values('internal_tx__ethereum_tx_id')
        multisig_and_module_hashes = multisig_hashes.union(module_hashes)

        # Get incoming/outgoing tokens not included on Multisig or Module txs.
        # Outgoing tokens can be triggered by another user after the Safe calls `approve`, that's why it will not
        # always appear as a MultisigTransaction
        event_tx_ids = EthereumEvent.objects.erc20_and_721_events().filter(
            Q(arguments__to=safe_address)
            | Q(arguments__from=safe_address)).exclude(
                ethereum_tx__in=multisig_and_module_hashes).annotate(
                    execution_date=F('ethereum_tx__block__timestamp'),
                    created=F('ethereum_tx__block__timestamp'),
                    block=F('ethereum_tx__block_id'),
                    safe_nonce=Value(0, output_field=Uint256Field()),
                ).distinct().values('ethereum_tx_id', 'execution_date',
                                    'created', 'block', 'safe_nonce')

        # Get incoming txs not included on Multisig or Module txs
        internal_tx_ids = InternalTx.objects.filter(
            call_type=EthereumTxCallType.CALL.value,
            value__gt=0,
            to=safe_address,
        ).exclude(ethereum_tx__in=multisig_and_module_hashes).annotate(
            execution_date=F('ethereum_tx__block__timestamp'),
            created=F('ethereum_tx__block__timestamp'),
            block=F('ethereum_tx__block_id'),
            safe_nonce=Value(0, output_field=Uint256Field()),
        ).distinct().values('ethereum_tx_id', 'execution_date', 'created',
                            'block', 'safe_nonce')

        # Tricky, we merge SafeTx hashes with EthereumTx hashes
        queryset = multisig_safe_tx_ids.distinct().union(event_tx_ids).union(
            internal_tx_ids).union(module_tx_ids).order_by(
                '-execution_date', '-safe_nonce', 'block', '-created')
        # Order by block because `block_number < NULL`, so txs mined will have preference,
        # and `created` to get always the same ordering with not executed transactions, as they will share
        # the same `execution_date` that the mined tx
        return queryset
Ejemplo n.º 10
0
def testing_annotations():
    cond1 = Conducente(nome="Luke Skywalker", nick="Luke", attivo=True)
    cond1.save()
    cond2 = Conducente(nome="Anakin Skywalker", nick=None, attivo=True)
    cond2.save()

    lrif = Luogo(nome='Home')
    lrif.save()

    Viaggio(annullato=True,
            data=date_enforce(datetime.date(2016, 1, 11)),
            da=lrif, a=lrif, luogoDiRiferimento=lrif,
            prezzo=1000,
            conducente=cond1, conducente_confermato=True,
            ).save()

    Viaggio(data=date_enforce(datetime.date(2016, 1, 11)),
            da=lrif, a=lrif, luogoDiRiferimento=lrif,
            prezzo=10,
            conducente=cond1, conducente_confermato=True,
            ).save()

    Viaggio(data=date_enforce(datetime.date(2016, 1, 12)),
            da=lrif, a=lrif, luogoDiRiferimento=lrif,
            prezzo=10,
            conducente=cond1, conducente_confermato=True,
            ).save()

    Viaggio(data=date_enforce(datetime.date(2016, 1, 12)),
            da=lrif, a=lrif, luogoDiRiferimento=lrif,
            prezzo=30,
            conducente=cond2, conducente_confermato=True,
            ).save()

    Viaggio(data=date_enforce(datetime.date(2016, 2, 12)),
            da=lrif, a=lrif, luogoDiRiferimento=lrif,
            prezzo=10,
            conducente=cond1, conducente_confermato=True,
            ).save()

    # Testing the query
    qs = Viaggio.objects.filter(annullato=False,
                                data__gte=date_enforce(datetime.date(2016, 1, 11)),
                                data__lt=date_enforce(datetime.date(2016, 2, 14)))
    qs = (qs
          .order_by()
          .annotate(year=Extract('data', lookup_name='year'),
                    month=Extract('data', lookup_name='month'))
          .order_by('conducente')
          .values('year', 'month')
          .annotate(tot=Sum('prezzo'),
                    commissione=Sum(
                        Case(When(tipo_commissione='F', then=F('commissione')),
                             When(tipo_commissione='P',
                                  then=F('commissione') * F('prezzo') / Value(100)),
                             ),
                        output_field=DecimalField(max_digits=9, decimal_places=2, default=0),

                    ),
                    conducente__nome=F('conducente__nome')
                    ).order_by('conducente__nome')
          )
    # print(qs.query)
    qs = list(qs)
    pprint(list(qs))
    assert len(qs) == 3, "We should have 3 rows: Luke with 2 months, Ana with 1"

    luke_runs = list(filter(lambda x: x['conducente__nome'] == 'Luke Skywalker', qs))
    assert len(luke_runs) == 2
    assert luke_runs[0]['month'] == 1 and luke_runs[0]['tot'] == 20
    assert luke_runs[1]['month'] == 2 and luke_runs[1]['tot'] == 10

    anakin_runs = list(filter(lambda x: x['conducente__nome'] == 'Anakin Skywalker', qs))
    assert len(anakin_runs) == 1
    assert anakin_runs[0]['month'] == 1 and anakin_runs[0]['tot'] == 30
Ejemplo n.º 11
0
def product_type_counts(request):
    form = ProductTypeCountsForm()
    opened_in_period_list = []
    oip = None
    cip = None
    aip = None
    all_current_in_pt = None
    top_ten = None
    pt = None
    today = timezone.now()
    first_of_month = today.replace(day=1,
                                   hour=0,
                                   minute=0,
                                   second=0,
                                   microsecond=0)
    mid_month = first_of_month.replace(day=15,
                                       hour=23,
                                       minute=59,
                                       second=59,
                                       microsecond=999999)
    end_of_month = mid_month.replace(day=monthrange(today.year,
                                                    today.month)[1],
                                     hour=23,
                                     minute=59,
                                     second=59,
                                     microsecond=999999)
    start_date = first_of_month
    end_date = end_of_month

    if request.method == 'GET' and 'month' in request.GET and 'year' in request.GET and 'product_type' in request.GET:
        form = ProductTypeCountsForm(request.GET)
        if form.is_valid():
            pt = form.cleaned_data['product_type']
            month = int(form.cleaned_data['month'])
            year = int(form.cleaned_data['year'])
            first_of_month = first_of_month.replace(month=month, year=year)

            month_requested = datetime(year, month, 1)

            end_of_month = month_requested.replace(day=monthrange(
                month_requested.year, month_requested.month)[1],
                                                   hour=23,
                                                   minute=59,
                                                   second=59,
                                                   microsecond=999999)
            start_date = first_of_month
            start_date = datetime(start_date.year,
                                  start_date.month,
                                  start_date.day,
                                  tzinfo=timezone.get_current_timezone())
            end_date = end_of_month
            end_date = datetime(end_date.year,
                                end_date.month,
                                end_date.day,
                                tzinfo=timezone.get_current_timezone())

            oip = opened_in_period(start_date, end_date, pt)

            # trending data - 12 months
            for x in range(12, 0, -1):
                opened_in_period_list.append(
                    opened_in_period(start_date + relativedelta(months=-x),
                                     end_of_month + relativedelta(months=-x),
                                     pt))

            opened_in_period_list.append(oip)

            closed_in_period = Finding.objects.filter(
                mitigated__range=[start_date, end_date],
                test__engagement__product__prod_type=pt,
                severity__in=('Critical', 'High', 'Medium',
                              'Low')).values('numerical_severity').annotate(
                                  Count('numerical_severity')).order_by(
                                      'numerical_severity')

            total_closed_in_period = Finding.objects.filter(
                mitigated__range=[start_date, end_date],
                test__engagement__product__prod_type=pt,
                severity__in=('Critical', 'High', 'Medium',
                              'Low')).aggregate(total=Sum(
                                  Case(When(severity__in=('Critical', 'High',
                                                          'Medium', 'Low'),
                                            then=Value(1)),
                                       output_field=IntegerField())))['total']

            overall_in_pt = Finding.objects.filter(
                date__lt=end_date,
                verified=True,
                false_p=False,
                duplicate=False,
                out_of_scope=False,
                mitigated__isnull=True,
                test__engagement__product__prod_type=pt,
                severity__in=('Critical', 'High', 'Medium',
                              'Low')).values('numerical_severity').annotate(
                                  Count('numerical_severity')).order_by(
                                      'numerical_severity')

            total_overall_in_pt = Finding.objects.filter(
                date__lte=end_date,
                verified=True,
                false_p=False,
                duplicate=False,
                out_of_scope=False,
                mitigated__isnull=True,
                test__engagement__product__prod_type=pt,
                severity__in=('Critical', 'High', 'Medium',
                              'Low')).aggregate(total=Sum(
                                  Case(When(severity__in=('Critical', 'High',
                                                          'Medium', 'Low'),
                                            then=Value(1)),
                                       output_field=IntegerField())))['total']

            all_current_in_pt = Finding.objects.filter(
                date__lte=end_date,
                verified=True,
                false_p=False,
                duplicate=False,
                out_of_scope=False,
                mitigated__isnull=True,
                test__engagement__product__prod_type=pt,
                severity__in=('Critical', 'High', 'Medium',
                              'Low')).prefetch_related(
                                  'test__engagement__product',
                                  'test__engagement__product__prod_type',
                                  'test__engagement__risk_acceptance',
                                  'reporter').order_by('numerical_severity')

            top_ten = Product.objects.filter(
                engagement__test__finding__date__lte=end_date,
                engagement__test__finding__verified=True,
                engagement__test__finding__false_p=False,
                engagement__test__finding__duplicate=False,
                engagement__test__finding__out_of_scope=False,
                engagement__test__finding__mitigated__isnull=True,
                engagement__test__finding__severity__in=('Critical', 'High',
                                                         'Medium', 'Low'),
                prod_type=pt).annotate(
                    critical=Sum(
                        Case(When(
                            engagement__test__finding__severity='Critical',
                            then=Value(1)),
                             output_field=IntegerField())),
                    high=Sum(
                        Case(When(engagement__test__finding__severity='High',
                                  then=Value(1)),
                             output_field=IntegerField())),
                    medium=Sum(
                        Case(When(engagement__test__finding__severity='Medium',
                                  then=Value(1)),
                             output_field=IntegerField())),
                    low=Sum(
                        Case(When(engagement__test__finding__severity='Low',
                                  then=Value(1)),
                             output_field=IntegerField())),
                    total=Sum(
                        Case(When(engagement__test__finding__severity__in=(
                            'Critical', 'High', 'Medium', 'Low'),
                                  then=Value(1)),
                             output_field=IntegerField()))).order_by(
                                 '-critical', '-high', '-medium', '-low')[:10]

            cip = {
                'S0': 0,
                'S1': 0,
                'S2': 0,
                'S3': 0,
                'Total': total_closed_in_period
            }

            aip = {
                'S0': 0,
                'S1': 0,
                'S2': 0,
                'S3': 0,
                'Total': total_overall_in_pt
            }

            for o in closed_in_period:
                cip[o['numerical_severity']] = o['numerical_severity__count']

            for o in overall_in_pt:
                aip[o['numerical_severity']] = o['numerical_severity__count']
        else:
            messages.add_message(
                request,
                messages.ERROR,
                "Please choose month and year and the Product Type.",
                extra_tags='alert-danger')

    add_breadcrumb(title="Bi-Weekly Metrics", top_level=True, request=request)

    return render(
        request, 'dojo/pt_counts.html', {
            'form': form,
            'start_date': start_date,
            'end_date': end_date,
            'opened_in_period': oip,
            'trending_opened': opened_in_period_list,
            'closed_in_period': cip,
            'overall_in_pt': aip,
            'all_current_in_pt': all_current_in_pt,
            'top_ten': top_ten,
            'pt': pt
        })
 def get_queryset(self):
     return super().get_queryset().annotate(has_price=Case(When(price=0, then=0), default=1, output_field=IntegerField()))
Ejemplo n.º 13
0
def num_answered(examlog):
    return (examlog.attemptlogs.values_list(
        "item",
        "content_id").order_by("completion_timestamp").distinct().aggregate(
            complete__sum=Count(Case(When(complete=True, then=1),
                                     default=0))).get("complete__sum"))
    def download(self, award_type, agency="all", generate_since=None):
        """ Create a delta file based on award_type, and agency_code (or all agencies) """
        logger.info(
            "Starting generation. {}, Agency: {}".format(award_type, agency if agency == "all" else agency["name"])
        )
        award_map = AWARD_MAPPINGS[award_type]

        # Create Source and update fields to include correction_delete_ind
        source = DownloadSource(
            "transaction",
            award_map["letter_name"].lower(),
            "transactions",
            "all" if agency == "all" else agency["toptier_agency_id"],
        )
        source.query_paths.update({"correction_delete_ind": award_map["correction_delete_ind"]})
        if award_type == "Contracts":
            # Add the agency_id column to the mappings
            source.query_paths.update({"agency_id": "transaction__contract_data__agency_id"})
            source.query_paths.move_to_end("agency_id", last=False)
        source.query_paths.move_to_end("correction_delete_ind", last=False)
        source.human_names = list(source.query_paths.keys())

        # Apply filters to the queryset
        filters, agency_code = self.parse_filters(award_map["award_types"], agency)
        source.queryset = VALUE_MAPPINGS["transactions"]["filter_function"](filters)

        if award_type == "Contracts":
            source.queryset = source.queryset.annotate(
                correction_delete_ind=Case(
                    When(transaction__contract_data__created_at__lt=generate_since, then=Value("C")),
                    default=Value(""),
                    output_field=CharField(),
                )
            )
        else:
            indicator_field = F("transaction__assistance_data__correction_delete_indicatr")
            source.queryset = source.queryset.annotate(
                correction_delete_ind=Case(
                    When(transaction__assistance_data__updated_at__gt=generate_since, then=indicator_field),
                    When(transaction__transactiondelta__isnull=False, then=Value("C")),
                    default=indicator_field,
                    output_field=CharField(),
                )
            )

        transaction_delta_queryset = source.queryset

        _filter = {"transaction__{}__{}__gte".format(award_map["model"], award_map["date_filter"]): generate_since}
        if self.debugging_end_date:
            _filter[
                "transaction__{}__{}__lt".format(award_map["model"], award_map["date_filter"])
            ] = self.debugging_end_date

        source.queryset = source.queryset.filter(**_filter)

        # UNION the normal results to the transaction_delta results.
        source.queryset = source.queryset.union(
            transaction_delta_queryset.filter(transaction__transactiondelta__isnull=False)
        )

        # Generate file
        file_path = self.create_local_file(award_type, source, agency_code, generate_since)
        if file_path is None:
            logger.info("No new, modified, or deleted data; discarding file")
        elif not settings.IS_LOCAL:
            # Upload file to S3 and delete local version
            logger.info("Uploading file to S3 bucket and deleting local copy")
            multipart_upload(
                settings.MONTHLY_DOWNLOAD_S3_BUCKET_NAME,
                settings.USASPENDING_AWS_REGION,
                file_path,
                os.path.basename(file_path),
            )
            os.remove(file_path)

        logger.info(
            "Finished generation. {}, Agency: {}".format(award_type, agency if agency == "all" else agency["name"])
        )
Ejemplo n.º 15
0
    def _get_queryset(self, cl, form_data):
        cqs = Checkin.objects.filter(
            position_id=OuterRef('pk'),
            list_id=cl.pk).order_by().values('position_id').annotate(
                m=Max('datetime')).values('m')

        cqsin = cqs.filter(type=Checkin.TYPE_ENTRY)
        cqsout = cqs.filter(type=Checkin.TYPE_EXIT)

        qs = OrderPosition.objects.filter(order__event=self.event, ).annotate(
            last_checked_in=Subquery(cqsin),
            last_checked_out=Subquery(cqsout),
            auto_checked_in=Exists(
                Checkin.objects.filter(
                    position_id=OuterRef('pk'),
                    list_id=cl.pk,
                    auto_checked_in=True))).prefetch_related(
                        'answers', 'answers__question', 'addon_to__answers',
                        'addon_to__answers__question').select_related(
                            'order', 'item', 'variation', 'addon_to',
                            'order__invoice_address', 'voucher', 'seat')

        if not cl.all_products:
            qs = qs.filter(
                item__in=cl.limit_products.values_list('id', flat=True))

        if cl.subevent:
            qs = qs.filter(subevent=cl.subevent)

        if form_data.get('date_from'):
            dt = make_aware(
                datetime.combine(
                    dateutil.parser.parse(form_data['date_from']).date(),
                    time(hour=0, minute=0, second=0)), self.event.timezone)
            qs = qs.filter(subevent__date_from__gte=dt)

        if form_data.get('date_to'):
            dt = make_aware(
                datetime.combine(
                    dateutil.parser.parse(form_data['date_to']).date() +
                    timedelta(days=1), time(hour=0, minute=0, second=0)),
                self.event.timezone)
            qs = qs.filter(subevent__date_from__lt=dt)

        o = ()
        if self.event.has_subevents and not cl.subevent:
            o = ('subevent__date_from', 'subevent__name')

        sort = form_data.get('sort') or 'name'
        if sort == 'name':
            qs = qs.order_by(
                *o,
                Coalesce(
                    NullIf('attendee_name_cached', Value('')),
                    NullIf('addon_to__attendee_name_cached', Value('')),
                    NullIf('order__invoice_address__name_cached', Value('')),
                    'order__code'))
        elif sort == 'code':
            qs = qs.order_by(*o, 'order__code')
        elif sort.startswith('name:'):
            part = sort[5:]
            qs = qs.annotate(resolved_name=Case(
                When(attendee_name_cached__ne='', then='attendee_name_parts'),
                When(addon_to__attendee_name_cached__isnull=False,
                     addon_to__attendee_name_cached__ne='',
                     then='addon_to__attendee_name_parts'),
                default='order__invoice_address__name_parts',
            )).annotate(resolved_name_part=JSONExtract(
                'resolved_name', part)).order_by(*o, 'resolved_name_part')

        if form_data.get('attention_only'):
            qs = qs.filter(
                Q(item__checkin_attention=True)
                | Q(order__checkin_attention=True))

        if not cl.include_pending:
            qs = qs.filter(order__status=Order.STATUS_PAID)
        else:
            qs = qs.filter(order__status__in=(Order.STATUS_PAID,
                                              Order.STATUS_PENDING))

        return qs
Ejemplo n.º 16
0
def analisis(request):
    data = {}
    check = Personal.objects.get(usuario_personal=request.user)

    respuesta = Choice.objects.filter(usuario_choice__company=check.company)

    print(respuesta)
    if Choice.objects.filter(
            usuario_choice__usuario_personal=request.user).exists() == True:

        respuesta2 = Question.objects.all().values('type').annotate(
            total=Count("choice", filter=Q(choice__company=check.company)),
            total_true=Sum(
                Case(When(choice__company=check.company,
                          choice__answer=True,
                          then=1.0),
                     default=0,
                     output_field=FloatField())),
            total_false=Sum(
                Case(When(choice__company=check.company,
                          choice__answer=False,
                          then=1.0),
                     default=0,
                     output_field=FloatField())))

        csv = open("Reporte.csv", "w")
        csv.write(
            "Dominio; Total verdaderas; Total Falsas; Porcentaje Cumplimiento; Porcentaje no cumplimiento; Recomendaciones\n"
        )
        for x in respuesta2:
            print(x)
            lista = Analisis()
            lista.type = x['type']
            csv.write(str(x['type']) + ";")

            lista.total_t = x['total_true']
            csv.write(str(x['total_true']) + ";")

            lista.total_f = x['total_false']
            csv.write(str(x['total_false']) + ";")
            lista.percentaje_t = (x['total_true']) / x['total'] * 100
            lista.percentaje_t = int(lista.percentaje_t)
            csv.write(str(int(lista.percentaje_t)) + ";")

            lista.percentaje_f = (x['total_false']) / x['total'] * 100
            lista.percentaje_f = int(lista.percentaje_f)
            csv.write(str(int(lista.percentaje_f)) + ";")
            lista.save()
            if lista.percentaje_t < 50 and lista.percentaje_t >= 0 and lista.type == "D1":
                csv.write(
                    "Se recomienda la realización de una política de seguridad informática, que debe contener los conceptos de seguridad de la información, una estructura para establecer los objetivos y las formas de control y el compromiso de la dirección con políticas.\n"
                )
            if lista.percentaje_t < 50 and lista.percentaje_t >= 0 and lista.type == "D2":
                csv.write(
                    "Se recomienda establecer una estructura para implementar la seguridad de la información en una empresa y de esta manera gestionarla de manera adecuada. Para ello, las actividades de seguridad de la información deben ser coordinadas por representantes de la organización que deben tener responsabilidad bien definidas y proteger las informaciones de carácter confidencial.\n"
                )
            if lista.percentaje_t < 50 and lista.percentaje_t >= 0 and lista.type == "D3":
                csv.write(
                    "Se recomienda identificar y clasificar los activos, de modo que un inventario pueda ser estructurado y posteriormente mantenido. Además, deben seguir reglas documentadas, que definen que tipo de uso se permite con dichos activos.\n"
                )
            if lista.percentaje_t < 50 and lista.percentaje_t >= 0 and lista.type == "D4":
                csv.write(
                    "Se recomienda la revisión de seguridad de acuerdo con políticas y procedimiento establecidos por la organización o por otra parte que los empleados de la organización reciban entrenamiento adecuado de seguridad correspondiente a sus funciones.\n"
                )
            if lista.percentaje_t < 50 and lista.percentaje_t >= 0 and lista.type == "D5":
                csv.write(
                    "Se recomienda que los equipos y instalaciones de procesamiento de información critica o sensible deben mantenerse en áreas seguras, con niveles y controles de acceso apropiados, incluyendo protección contra amenazas físicas y ambientales.\n"
                )
            if lista.percentaje_t < 50 and lista.percentaje_t >= 0 and lista.type == "D6":
                csv.write(
                    "Se recomienda que los procedimientos y responsabilidades por la gestión y operación de todos los recursos de procesamiento de la información estén definidos. Esto incluye la gestión de servicio tercerizados, la planificación de recurso del sistema para minimizar el riesgo de fallas, la creación de procedimientos para la generación de copias de seguridad y su recuperación.\n"
                )
            if lista.percentaje_t < 50 and lista.percentaje_t >= 0 and lista.type == "D7":
                csv.write(
                    "Se recomienda que los recursos de procesamiento de la información y los procesos de negocios deben ser controlado con base en los requisitos de negocio y en la seguridad de la información. Debe garantizarse el acceso de cada usuario autorizado y prevenido el acceso no autorizados a los sistema de información de manera que evite daños a documentos y recursos de procesamiento de la información que estén fuera de alcance de cualquiera.\n"
                )
            if lista.percentaje_t < 50 and lista.percentaje_t >= 0 and lista.type == "D8":
                csv.write(
                    "Se recomienda que los requisitos de seguridad de los sistema de información debe ser identificados y acordados antes de su desarrollo y/o de su implementación, para que así puedan ser protegidos para el mantenimiento de su confidencialidad, autenticidad o integridad por medio criptográficos.\n"
                )
            if lista.percentaje_t < 50 and lista.percentaje_t >= 0 and lista.type == "D9":
                csv.write(
                    "Se recomienda que los procedimientos formales de registro y escalonamiento deben ser establecidos y los empleados, proveedores y terceros deber ser conscientes de los procedimientos para notificar los eventos de seguridad de la información para asegurar que se comuniquen lo más rápido posible y corregidos en tiempo hábil.\n"
                )
            if lista.percentaje_t < 50 and lista.percentaje_t >= 0 and lista.type == "D10":
                csv.write(
                    "Se recomienda que los planes de continuidad del negocio deben ser desarrollados e implementados, con el fin de impedir la interrupción de las actividades del negocio y asegurar que las operaciones esenciales sean rápidamente recuperadas.\n"
                )
            if lista.percentaje_t < 50 and lista.percentaje_t >= 0 and lista.type == "D11":
                csv.write(
                    "Se recomienda realizar una revisión para evitar la violación de cualquier ley criminal o civil, garantizando estatutos, regulación u obligaciones contractuales y de cualesquiera requisitos de seguridad de la información. En caso necesario, la empresa puede contratar una consultoría especializada, para que verifique su cumplimiento y adherencia a los requisitos legales y reglamentarios.\n"
                )
            else:
                csv.write("\n")

        csv.close()

        data['object_list'] = Analisis.objects.all().order_by('-id')
        data['object_list'] = data['object_list'][0:11]
        data['object_list_2'] = data['object_list'][12:23]
    return render(request, "analisis.html", data)
Ejemplo n.º 17
0
    def handle(self, *args, **options):
        self.discord_token = getattr(settings, 'DISCORD_BOT_TOKEN')

        discord_guilds = self.get_discord_resource('/users/@me/guilds')
        bdo_guild_mapping = {
            g.discord_id: g
            for g in Guild.objects.all() if g.discord_id
        }
        bdo_guild_role_mapping = {
            str(r.id): r
            for r in GuildRole.objects.all()
        }

        for guild in discord_guilds:
            if guild['id'] not in bdo_guild_mapping:
                continue

            # Only sync guilds in the mapping
            discord_roles = self.get_discord_resource('/guilds/{0}'.format(
                guild['id']))['roles']
            # Sort discord roles by hierarchical position
            discord_roles = sorted(discord_roles, key=lambda r: r['position'])
            # Sort bdo role by hierarchy, return tuple (GuildRole, Discord Role Name)
            mapped_roles = bdo_guild_mapping[guild['id']].discord_roles
            mapped_bdo_roles = [
                (guild_role_id, mapped_roles[guild_role_id])
                for guild_role_id in sorted(bdo_guild_role_mapping.keys())
                if guild_role_id in mapped_roles
            ]

            # Convert the stored Discord role name to an id.
            # Take the first role matched. Role names can be duplicated but
            # this will take the highest ranking role.
            sync_roles = OrderedDict()

            for guild_role_id, discord_role_name in mapped_bdo_roles:
                for discord_role in discord_roles:
                    if discord_role['name'].lower() == discord_role_name.lower(
                    ):
                        sync_roles[discord_role['id']] = guild_role_id
                        break

            discord_members = self.get_discord_resource(
                '/guilds/{0}/members'.format(guild['id']),
                params={'limit': 1000})

            cached_members = {}
            members_by_roles = defaultdict(list)

            for discord_member in discord_members:
                if not set(discord_member['roles']) & set(sync_roles.keys()):
                    continue

                for discord_role_id, guild_role_id in sync_roles.items():
                    if discord_role_id in discord_member['roles']:
                        cached_members[discord_member['user']
                                       ['id']] = guild_role_id
                        members_by_roles[guild_role_id].append(
                            discord_member['user']['id'])
                        break

            bdo_guild = bdo_guild_mapping[guild['id']]
            bdo_guild.discord_members = cached_members

            # Update logo URLs
            bdo_guild.logo_url = "https://cdn.discordapp.com/icons/{guild_id}/{icon}.png".format(
                guild_id=guild['id'], icon=guild['icon'])

            bdo_guild.save()

            # Prune old members
            existing_users = Profile.objects.filter(
                discord_id__in=cached_members.keys())
            prune_members = (
                GuildMember.objects.filter(guild=bdo_guild).exclude(
                    role_id=1)  # Exclude GMs
                .exclude(user__in=existing_users))
            pending_war = bdo_guild.pending_war()

            if pending_war is not None:
                # Delete war attendance
                deleted_member_ids = prune_members.values_list('user_id',
                                                               flat=True)
                (WarAttendance.objects.filter(
                    war=pending_war,
                    user_profile_id__in=deleted_member_ids).delete())

            deleted = prune_members.delete()

            # Update existing member's roles
            outdated_members_q = (
                Q(user__discord_id__in=discord_ids) & ~Q(role_id=role_id)
                for role_id, discord_ids in members_by_roles.items())

            if existing_users.exists():
                members_qs = existing_users.filter(id=OuterRef('user_id'))
                updated = (
                    GuildMember.objects.filter(guild=bdo_guild).filter(
                        reduce(lambda a, b: a | b,
                               outdated_members_q)).exclude(
                                   role_id=1)  # Exclude GMs
                    .annotate(discord_id=Subquery(
                        members_qs.values('discord_id')[:1])).
                    update(role=Case(*[
                        When(discord_id__in=discord_ids,
                             then=bdo_guild_role_mapping[role_id].id)
                        for role_id, discord_ids in members_by_roles.items()
                    ])))

            # Add new members
            new_member_profiles = existing_users.exclude(
                membership__guild=bdo_guild)
            new_members = [
                GuildMember(guild=bdo_guild,
                            user=profile,
                            role=bdo_guild_role_mapping[cached_members[
                                profile.discord_id]])
                for profile in new_member_profiles
            ]

            new_stats = [
                AggregatedGuildMemberWarStats(guild=bdo_guild,
                                              user_profile=profile)
                for profile in new_member_profiles.exclude(
                    aggregatedmemberstats__guild_id=bdo_guild.id)
            ]
            GuildMember.objects.bulk_create(new_members)
            AggregatedGuildMemberWarStats.objects.bulk_create(new_stats)

            logger.info(
                "Synchronized Guild `{0}`, found {1} discord members, removed {2} members, "
                "updated {3} members, added {4} members, added {5} aggregated member stats"
                .format(bdo_guild, len(cached_members), deleted[0], updated,
                        len(new_members), len(new_stats)))
Ejemplo n.º 18
0
def section_demand_change(section_id, updated_at):
    """
    This function should be called when a section's demand changes (i.e. the number of
    active registrations changes, or the section's status is updated). It updates the
    `PcaDemandDistributionEstimate` model and `current_demand_distribution_estimate`
    cache to reflect the demand change.

    :param: section_id: the id of the section involved in the demand change
    :param: updated_at: the datetime at which the demand change occurred
    """
    section = Section.objects.get(id=section_id)
    semester = section.semester
    if semester != get_current_semester():
        return

    with transaction.atomic():
        create_new_distribution_estimate = False
        sentinel = object()
        current_demand_distribution_estimate = cache.get(
            "current_demand_distribution_estimate", sentinel)
        if (current_demand_distribution_estimate == sentinel
                or current_demand_distribution_estimate.semester != semester):
            create_new_distribution_estimate = True

        sections_qs = (Section.objects.filter(
            extra_metrics_section_filters,
            course__semester=semester).select_for_update().annotate(
                raw_demand=Case(
                    When(
                        Q(capacity__gt=0),
                        then=(Cast(
                            "registration_volume",
                            models.FloatField(),
                        ) / Cast("capacity", models.FloatField())),
                    ),
                    default=None,
                    output_field=models.FloatField(),
                ), ).order_by("raw_demand"))

        try:
            lowest_demand_section = sections_qs[:1].get()
            highest_demand_section = sections_qs[-1:].get()
        except Section.DoesNotExist:
            return  # Don't add a PcaDemandDistributionEstimate -- there are no valid sections yet

        if (create_new_distribution_estimate
                or highest_demand_section.raw_demand >
                current_demand_distribution_estimate.highest_raw_demand
                or lowest_demand_section.raw_demand <
                current_demand_distribution_estimate.lowest_raw_demand):
            closed_sections_demand_values = np.asarray(
                sections_qs.filter(status="C").values_list("raw_demand",
                                                           flat=True))
            # "The term 'closed sections positive raw demand values' is
            # sometimes abbreviated as 'csprdv'
            csrdv_frac_zero, fit_shape, fit_loc, fit_scale = (None, None, None,
                                                              None)
            if len(closed_sections_demand_values) > 0:
                closed_sections_positive_demand_values = closed_sections_demand_values[
                    np.where(closed_sections_demand_values > 0)]
                csrdv_frac_zero = 1 - len(
                    closed_sections_positive_demand_values) / len(
                        closed_sections_demand_values)
                if len(closed_sections_positive_demand_values) > 0:
                    fit_shape, fit_loc, fit_scale = stats.lognorm.fit(
                        closed_sections_positive_demand_values)
            new_demand_distribution_estimate = PcaDemandDistributionEstimate(
                semester=semester,
                highest_demand_section=highest_demand_section,
                highest_demand_section_volume=highest_demand_section.
                registration_volume,
                lowest_demand_section=lowest_demand_section,
                lowest_demand_section_volume=lowest_demand_section.
                registration_volume,
                csrdv_frac_zero=csrdv_frac_zero,
                csprdv_lognorm_param_shape=fit_shape,
                csprdv_lognorm_param_loc=fit_loc,
                csprdv_lognorm_param_scale=fit_scale,
            )
            add_drop_period = get_or_create_add_drop_period(semester)
            new_demand_distribution_estimate.save(
                add_drop_period=add_drop_period)
            new_demand_distribution_estimate.created_at = updated_at
            new_demand_distribution_estimate.save(
                add_drop_period=add_drop_period)
            cache.set(
                "current_demand_distribution_estimate",
                new_demand_distribution_estimate,
                timeout=(add_drop_period.estimated_end -
                         add_drop_period.estimated_start).total_seconds() //
                ROUGH_MINIMUM_DEMAND_DISTRIBUTION_ESTIMATES,
            )  # set timeout to roughly follow ROUGH_MINIMUM_DEMAND_DISTRIBUTION_ESTIMATES
Ejemplo n.º 19
0
 def annotate_activity(queryset):
     return queryset.annotate(is_active_today=Case(
         When(Q(last_activity__gte=time_threshold(hours=24)), then=True),
         default=False,
         output_field=BooleanField(),
     ))
Ejemplo n.º 20
0
def getClassTypeMonthlyData(year=None, series=None, typeLimit=None):
    '''
    To break out by class type and month simultaneously, get data for each
    series and aggregate by class type.
    '''

    # If no year specified, report current year to date.
    if not year:
        year = timezone.now().year

    role_list = DanceRole.objects.distinct()

    # Report data on all students registered unless otherwise specified
    if series not in ['registrations','studenthours'] and series not in [x.pluralName for x in role_list]:
        series = 'registrations'

    when_all = {
        'eventregistration__dropIn': False,
        'eventregistration__cancelled': False,
    }

    annotations = {'registrations': Sum(Case(When(Q(**when_all),then=1),output_field=FloatField()))}

    for this_role in role_list:
        annotations[this_role.pluralName] = Sum(Case(When(Q(Q(**when_all) & Q(eventregistration__role=this_role)),then=1),output_field=FloatField()))

    series_counts = Series.objects.filter(year=year).annotate(**annotations).annotate(studenthours=F('duration') * F('registrations')).select_related('classDescription__danceTypeLevel__danceType','classDescription__danceTypeLevel')

    # If no limit specified on number of types, then do not aggregate dance types.
    # Otherwise, report the typeLimit most common types individually, and report all
    # others as other.  This gets tuples of names and counts
    dance_type_counts = [(dance_type,count) for dance_type,count in Counter([x.classDescription.danceTypeLevel for x in series_counts]).items()]
    dance_type_counts.sort(key=lambda k: k[1],reverse=True)

    if typeLimit:
        dance_types = [x[0] for x in dance_type_counts[:typeLimit]]
    else:
        dance_types = [x[0] for x in dance_type_counts]

    results = []

    # Month by month, calculate the result data
    for month in range(1,13):
        this_month_result = {
            'month': month,
            'month_name': month_name[month],
        }
        for dance_type in dance_types:
            this_month_result[dance_type.__str__()] = \
                series_counts.filter(classDescription__danceTypeLevel=dance_type,month=month).aggregate(Sum(series))['%s__sum' % series]

        if typeLimit:
            this_month_result['Other'] = \
                series_counts.filter(month=month).exclude(classDescription__danceTypeLevel__in=dance_types).aggregate(Sum(series))['%s__sum' % series]

        results.append(this_month_result)

    # Now get totals
    totals_result = {
        'month': 'Totals',
        'month_name': 'totals',
    }

    for dance_type in dance_types:
        totals_result[dance_type.__str__()] = \
            series_counts.filter(classDescription__danceTypeLevel=dance_type).aggregate(Sum(series))['%s__sum' % series]

    if typeLimit:
        totals_result['Other'] = \
            series_counts.exclude(classDescription__danceTypeLevel__in=dance_types).aggregate(Sum(series))['%s__sum' % series]

    results.append(totals_result)

    return results
Ejemplo n.º 21
0
class DjangoChoices(unittest.TestCase):
    def setUp(self):
        pass

    def tearDown(self):
        pass

    def test_numeric_class_values(self):
        self.assertEqual(NumericTestClass.Item_0, 0)
        self.assertEqual(NumericTestClass.Item_1, 1)
        self.assertEqual(NumericTestClass.Item_2, 2)
        self.assertEqual(NumericTestClass.Item_3, 3)

    def test_class_labels(self):
        self.assertEqual(StringTestClass.labels.empty, "")
        self.assertEqual(NumericTestClass.labels.Item_1, "Item 1")
        self.assertEqual(NumericTestClass.labels.Item_2, "Item 2")
        self.assertEqual(NumericTestClass.labels.Item_3, "Item 3")

    def test_class_labels_inherited(self):
        self.assertEqual(SubClass2.labels.Item_2, "Item 2")
        self.assertEqual(SubClass2.labels.Item_6, "Item 6")

    def test_class_values(self):
        self.assertEqual(SubClass1.values[SubClass1.Item_1], "Item 1")
        self.assertEqual(SubClass1.values[SubClass1.Item_4], "Item 4")
        self.assertEqual(SubClass1.values[SubClass1.Item_5], "Item 5")

    def test_class_values_order(self):
        self.assertEqual(list(StringTestClass.values), ["", "O", "T", "H"])

    def test_numeric_class_order(self):
        choices = NumericTestClass.choices
        self.assertEqual(choices[0][0], 0)
        self.assertEqual(choices[1][0], 1)
        self.assertEqual(choices[2][0], 2)
        self.assertEqual(choices[3][0], 3)

    def test_string_class_values(self):
        self.assertEqual(StringTestClass.One, "O")
        self.assertEqual(StringTestClass.Two, "T")
        self.assertEqual(StringTestClass.Three, "H")

    def test_string_class_order(self):
        choices = StringTestClass.choices
        self.assertEqual(choices[0][0], "")
        self.assertEqual(choices[1][0], "O")
        self.assertEqual(choices[2][0], "T")
        self.assertEqual(choices[3][0], "H")

    def test_sub_class_level_1_choices(self):
        choices = SubClass1.choices
        self.assertEqual(choices[0][0], 0)
        self.assertEqual(choices[4][0], 4)
        self.assertEqual(choices[5][0], 5)

    def test_sub_class_level_1_values(self):
        self.assertEqual(SubClass1.Item_1, 1)
        self.assertEqual(SubClass1.Item_4, 4)
        self.assertEqual(SubClass1.Item_5, 5)

    def test_sub_class_level_2_choices(self):
        choices = SubClass2.choices
        self.assertEqual(choices[0][0], 0)
        self.assertEqual(choices[4][0], 4)
        self.assertEqual(choices[6][0], 6)
        self.assertEqual(choices[7][0], 7)

    def test_sub_class_level_2_values(self):
        self.assertEqual(SubClass2.Item_1, 1)
        self.assertEqual(SubClass2.Item_5, 5)
        self.assertEqual(SubClass2.Item_6, 6)
        self.assertEqual(SubClass2.Item_7, 7)

    def test_sub_class_name(self):
        self.assertEqual(NumericTestClass.__name__, "NumericTestClass")
        self.assertEqual(SubClass2.__name__, "SubClass2")

    def test_numeric_class_validator(self):
        from django.core.exceptions import ValidationError

        self.assertEqual(None, NumericTestClass.validator(1))
        self.assertEqual(None, NumericTestClass.validator(2))
        self.assertEqual(None, NumericTestClass.validator(3))

        self.assertRaises(ValidationError, NumericTestClass.validator, 4)
        self.assertRaises(ValidationError, NumericTestClass.validator, 5)
        self.assertRaises(ValidationError, NumericTestClass.validator, 6)
        self.assertRaises(ValidationError, NumericTestClass.validator, 7)

    def test_validation_error_message(self):
        from django.core.exceptions import ValidationError

        message = ("Select a valid choice. 4 is not "
                   "one of the available choices.")

        self.assertRaisesRegexp(ValidationError, message,
                                NumericTestClass.validator, 4)

    def test_subclass1_validator(self):
        from django.core.exceptions import ValidationError

        self.assertEqual(None, SubClass1.validator(1))
        self.assertEqual(None, SubClass1.validator(2))
        self.assertEqual(None, SubClass1.validator(3))
        self.assertEqual(None, SubClass1.validator(4))
        self.assertEqual(None, SubClass1.validator(5))

        self.assertRaises(ValidationError, SubClass1.validator, 6)
        self.assertRaises(ValidationError, SubClass1.validator, 7)

    def test_subclass_2_validator(self):
        from django.core.exceptions import ValidationError

        self.assertEqual(None, SubClass2.validator(1))
        self.assertEqual(None, SubClass2.validator(2))
        self.assertEqual(None, SubClass2.validator(3))
        self.assertEqual(None, SubClass2.validator(4))
        self.assertEqual(None, SubClass2.validator(5))
        self.assertEqual(None, SubClass2.validator(6))
        self.assertEqual(None, SubClass2.validator(7))

        self.assertRaises(ValidationError, SubClass2.validator, 8)

    def test_empty_value_class(self):
        choices = EmptyValueClass.choices
        self.assertEqual(choices[0][0], "Option1")
        self.assertEqual(choices[1][0], "Option2")
        self.assertEqual(choices[2][0], "Option3")

    def test_null_boolean_value_class(self):
        choices = NullBooleanValueClass.choices
        self.assertEqual(choices[0][0], None)
        self.assertEqual(choices[1][0], True)
        self.assertEqual(choices[2][0], False)
        self.assertEqual(choices[0][1], "Pending")
        self.assertEqual(choices[1][1], "Successful")
        self.assertEqual(choices[2][1], "Failed")

    def test_deconstructible_validator(self):
        deconstructed = NumericTestClass.validator.deconstruct()
        self.assertEqual(deconstructed, ('djchoices.choices.ChoicesValidator',
                                         (NumericTestClass.values, ), {}))

    def test_attribute_from_value(self):
        attributes = NumericTestClass.attributes
        self.assertEqual(attributes[0], 'Item_0')
        self.assertEqual(attributes[1], 'Item_1')
        self.assertEqual(attributes[2], 'Item_2')
        self.assertEqual(attributes[3], 'Item_3')

    def test_attribute_from_value_duplicates(self):
        with self.assertRaises(ValueError):
            DuplicateValuesClass.attributes

    def test_choice_item_order(self):
        choices = OrderedChoices.choices
        self.assertEqual(choices[0][0], 'b')
        self.assertEqual(choices[1][0], 'a')

    def test_get_choices(self):
        choices_class = NullBooleanValueClass

        self.assertEqual("Pending", choices_class.get_choice(None).label)
        self.assertEqual("Successful", choices_class.get_choice(True).label)
        self.assertEqual("Failed", choices_class.get_choice(False).label)

    def test_get_extra_attributes(self):
        choices_class = ExtraAttributeChoices

        self.assertEqual(
            "Option1 help text",
            choices_class.get_choice(choices_class.Option1).help_text)

        self.assertEqual(
            "Option2 help text",
            choices_class.get_choice(choices_class.Option2).help_text)

        self.assertEqual(
            "RegexValidator",
            choices_class.get_choice(
                choices_class.Option2).validator_class_name)

    def test_get_extra_attributes_unknown_attribute_throws_error(self):
        choices_class = ExtraAttributeChoices

        with self.assertRaises(AttributeError):
            choices_class.get_choice(choices_class.Option1).unknown_attribute

    def test_repr(self):
        choices_class = ExtraAttributeChoices
        repr_string = repr(choices_class.get_choice(choices_class.Option2))

        self.assertIn("<ChoiceItem value=1 label=None order=22", repr_string)
        self.assertIn("validator_class_name='RegexValidator'", repr_string)
        self.assertIn("help_text='Option2 help text'", repr_string)

    def test_iteration(self):
        """
        If this test fails it will raise:
        `TypeError: 'DjangoChoicesMeta' object is not iterable`
        """
        for _ in StringTestClass:
            pass

    def test_choices_len(self):
        self.assertEqual(len(StringTestClass), 4)

    def test_order_annotation(self):
        case = OrderedChoices.get_order_expression('dummy')

        expected = Case(When(dummy='b', then=Value(0)),
                        When(dummy='a', then=Value(1)),
                        output_field=IntegerField())

        self.assertEqual(repr(case), repr(expected))
Ejemplo n.º 22
0
def getAveragesByClassType(startDate=None,endDate=None):

    # If a date filter was passed in GET, then apply it
    when_all = {
        'classdescription__series__eventregistration__cancelled': False,
        'classdescription__series__eventregistration__dropIn': False
    }

    timeFilters = {}
    classFilters = {}
    roleFilters = Q()

    if startDate:
        timeFilters['classdescription__series__startTime__gte'] = startDate
        classFilters['startTime__gte'] = startDate
        roleFilters = roleFilters & (Q(eventrole__event__startTime__gte=startDate) | Q(eventregistration__event__startTime__gte=startDate))
    if endDate:
        timeFilters['classdescription__series__startTime__lte'] = endDate
        classFilters['startTime__lte'] = endDate
        roleFilters = roleFilters & (Q(eventrole__event__startTime__lte=endDate) | Q(eventregistration__event__startTime__lte=endDate))

    when_all.update(timeFilters)

    role_list = DanceRole.objects.filter(roleFilters).distinct()

    annotations = {'registrations': Sum(Case(When(Q(**when_all),then=1),output_field=IntegerField()))}
    values_list = ['name', 'danceType__name','registrations']

    for this_role in role_list:
        annotations[this_role.pluralName] = Sum(Case(When(Q(Q(**when_all) & Q(classdescription__series__eventregistration__role=this_role)),then=1),output_field=IntegerField()))
        values_list.append(this_role.pluralName)

    registration_counts = list(DanceTypeLevel.objects.annotate(**annotations).values_list(*values_list))
    class_counter = Counter([(x.classDescription.danceTypeLevel.name, x.classDescription.danceTypeLevel.danceType.name) for x in Series.objects.filter(**classFilters).distinct()])

    results = {}
    for list_item in registration_counts:
        type_name = ' '.join((str(list_item[0]),str(list_item[1])))

        results[type_name] = {
            'registrations': list_item[2],
        }
        m = 3
        for this_role in role_list:
            results[type_name]['total' + this_role.pluralName] = list_item[m]
            m += 1

    for k,count in class_counter.items():
        type_name = ' '.join((str(k[0]),str(k[1])))
        results[type_name].update({
            'series': count
        })
    for k,v in results.items():
        if results[k].get('series'):
            results[k].update({
                'avgRegistrations': (results[k]['registrations'] or 0) / float(results[k]['series']),
            })
            for this_role in role_list:
                results[k]['avg' + this_role.pluralName] = (results[k]['total' + this_role.pluralName] or 0) / float(results[k]['series'])

    return results
 def test_case_aggregate(self):
     agg = Sum(
         Case(When(friends__age=40, then=F('friends__age'))),
         filter=Q(friends__name__startswith='test'),
     )
     self.assertEqual(Author.objects.aggregate(age=agg)['age'], 80)
Ejemplo n.º 24
0
def getClassCountHistogramData(cohortStart=None,cohortEnd=None):

    # Note: Bins are inclusive, and 99999 means 'or more'.  That should last us awhile.
    bins = [
        (1,1),
        (2,2),
        (3,3),
        (4,4),
        (5,5),
        (6,6),
        (7,7),
        (8,8),
        (9,9),
        (10,15),
        (16,20),
        (21,99999)]

    when_all = {
        'eventregistration__dropIn': False,
        'eventregistration__cancelled':False,
    }

    cohortFilters = {}
    roleFilters = {}

    if cohortStart:
        cohortFilters['eventregistration__event__startTime__min__gte'] = cohortStart
        roleFilters['eventregistration__event__startTime__gte'] = cohortStart

    if cohortEnd:
        cohortFilters['eventregistration__event__startTime__min__lte'] = cohortEnd
        roleFilters['eventregistration__event__startTime__lte'] = cohortEnd

    role_list = DanceRole.objects.filter(**roleFilters).distinct()

    annotations = {
        'eventregistration__event__startTime__min': Min('eventregistration__event__startTime'),
        'registrations': Sum(Case(When(Q(**when_all),then=1),output_field=IntegerField())),
    }
    for this_role in role_list:
        annotations[this_role.pluralName] = Sum(Case(When(Q(Q(**when_all) & Q(eventregistration__role=this_role)),then=1),output_field=IntegerField()))

    customers = Customer.objects.annotate(**annotations).filter(**cohortFilters).distinct()

    totalCustomers = customers.filter(registrations__gt=0).count()
    totalClasses = [x.registrations for x in customers if x.registrations]
    totalClasses.sort()

    totalsByRole = {}

    for this_role in role_list:
        totalsByRole[this_role.pluralName] = {
            'customers': customers.filter(**{this_role.pluralName + '__gt': 0}).count(),
            'classes': [getattr(x,this_role.pluralName,None) for x in customers if getattr(x,this_role.pluralName,None)],
        }
        totalsByRole[this_role.pluralName]['classes'].sort()

    results = {}
    lastAll = 0
    lastByRole = {this_role.pluralName:0 for this_role in role_list}
    iByRole = {}

    for this_bin in bins:
        range_max = this_bin[1]

        if this_bin[0] == this_bin[1]:
            this_label = '%s' % this_bin[0]
        elif this_bin[1] == 99999:
            this_label = '%s or more' % this_bin[0]
        else:
            this_label = '%s-%s' % this_bin

        i_all = bisect(totalClasses,range_max,lastAll)
        iByRole = {
            this_role.pluralName:bisect(totalsByRole[this_role.pluralName]['classes'],range_max,lastByRole[this_role.pluralName])
            for this_role in role_list
        }

        # Note: These are not translated because the chart Javascript looks for these keys
        results.update({
            this_label:
            {
                '# Students': (i_all - lastAll),
                '% Students': 100 * (i_all - lastAll) / float(totalCustomers),
                'bin': this_bin,
            },
        })
        for this_role in role_list:
            results[this_label].update({
                '# ' + this_role.pluralName: (iByRole[this_role.pluralName] - lastByRole[this_role.pluralName]),
                '% ' + this_role.pluralName: 100 * (
                    iByRole[this_role.pluralName] - lastByRole[this_role.pluralName]
                ) /
                float(totalsByRole[this_role.pluralName]['customers']),
            })

        lastAll = i_all
        lastByRole = {this_role.pluralName:iByRole[this_role.pluralName] for this_role in role_list}

    return results
Ejemplo n.º 25
0
    def test_ok(self):
        user1 = User.objects.create(username='******',
                                    first_name='Foo',
                                    last_name='Bar')
        user2 = User.objects.create(username='******',
                                    first_name='1',
                                    last_name='2')
        user3 = User.objects.create(username='******')

        book1 = Book.objects.create(name='test1',
                                    price=25,
                                    author='author 1',
                                    discount=3)
        book2 = Book.objects.create(name='test2',
                                    price=55,
                                    author='author 2',
                                    owner=user1)

        UserBookRelation.objects.create(user=user1,
                                        book=book1,
                                        like=True,
                                        rate=5)
        UserBookRelation.objects.create(user=user2,
                                        book=book1,
                                        like=True,
                                        rate=5)
        UserBookRelation.objects.create(user=user3,
                                        book=book1,
                                        like=True,
                                        rate=4)

        UserBookRelation.objects.create(user=user1,
                                        book=book2,
                                        like=True,
                                        rate=4)
        user_book2 = UserBookRelation.objects.create(user=user2,
                                                     book=book2,
                                                     like=True)
        user_book2.rate = 3
        user_book2.save()
        UserBookRelation.objects.create(user=user3, book=book2, like=False)

        books = Book.objects.all().annotate(
            annotated_likes=Count(
                Case(When(userbookrelation__like=True, then=1))),
            # rating=Avg('userbookrelation__rate'),
            discount_price=F('price') - F('discount')).order_by('id')
        data = BooksSerializer(books, many=True).data
        expected_data = [{
            'id':
            book1.id,
            'name':
            'test1',
            'price':
            '25.00',
            'author':
            'author 1',
            'annotated_likes':
            3,
            'rating':
            '4.67',
            'discount':
            '3.00',
            'discount_price':
            22.0,
            'owner_name':
            '',
            'readers': [{
                'first_name': 'Foo',
                'last_name': 'Bar'
            }, {
                'first_name': '1',
                'last_name': '2'
            }, {
                'first_name': '',
                'last_name': ''
            }]
        }, {
            'id':
            book2.id,
            'name':
            'test2',
            'price':
            '55.00',
            'author':
            'author 2',
            'annotated_likes':
            2,
            'rating':
            '3.50',
            'discount':
            '0.00',
            'discount_price':
            55.0,
            'owner_name':
            user1.__str__(),
            'readers': [{
                'first_name': 'Foo',
                'last_name': 'Bar'
            }, {
                'first_name': '1',
                'last_name': '2'
            }, {
                'first_name': '',
                'last_name': ''
            }]
        }]
        self.assertEqual(expected_data, data)
Ejemplo n.º 26
0
def getMonthlyPerformance():
    '''
    This function does the work of compiling monthly performance data
    that can either be rendered as CSV or as JSON
    '''
    when_all = {
        'eventregistration__dropIn': False,
        'eventregistration__cancelled': False,
    }

    # Get objects at the Series level so that we can calculate StudentHours
    series_counts = list(Series.objects.annotate(
        eventregistrations=Sum(Case(When(Q(**when_all),then=1),output_field=IntegerField())),)
        .values('year','month','eventregistrations','duration'))

    for series in series_counts:
        series['studenthours'] = (series.get('eventregistrations') or 0) * (series.get('duration') or 0)

    all_years = set([x['year'] for x in series_counts])

    dataseries_list = ['EventRegistrations', 'Registrations','Hours','StudentHours','AvgStudents']

    yearTotals = {}

    # Initialize dictionaries
    for dataseries in dataseries_list:
        yearTotals[dataseries] = {'MonthlyAverage': {}}
        for year in all_years:
            yearTotals[dataseries][year] = {}

    # Fill in by year and month for a cleaner looping process
    for year in all_years:

        # Monthly Totals
        for month in range(1,13):
            # Total EventRegistrations per month is retrieved by the query above.
            yearTotals['EventRegistrations'][year][month] = sum([x['eventregistrations'] or 0 for x in series_counts if x['month'] == month and x['year'] == year])

            # Total Registrations per month and hours per month require a separate query for each month
            yearTotals['Registrations'][year][month] = len(Registration.objects.filter(eventregistration__dropIn=False, eventregistration__cancelled=False,eventregistration__event__year=year,eventregistration__event__month=month).distinct())
            yearTotals['Hours'][year][month] = sum([x['duration'] or 0 for x in series_counts if x['month'] == month and x['year'] == year])
            yearTotals['StudentHours'][year][month] = sum([x['studenthours'] or 0 for x in series_counts if x['month'] == month and x['year'] == year])

            if yearTotals['Hours'][year][month] > 0:
                yearTotals['AvgStudents'][year][month] = yearTotals['StudentHours'][year][month] / float(yearTotals['Hours'][year][month])
            else:
                yearTotals['AvgStudents'][year][month] = 0

        # Annual Totals
        for sub_series in ['EventRegistrations','Registrations','Hours','StudentHours']:
            yearTotals[sub_series][year]['Total'] = sum([x for x in yearTotals[sub_series][year].values()])

        # Annual (Monthly) Averages
        month_count = len([x for k,x in yearTotals['Hours'][year].items() if k in range(1,13) and x > 0])
        if month_count > 0:
            for sub_series in ['EventRegistrations','Registrations','Hours','StudentHours']:
                yearTotals[sub_series][year]['Average'] = yearTotals[sub_series][year]['Total'] / float(month_count)
            yearTotals['AvgStudents'][year]['Average'] = yearTotals['StudentHours'][year]['Total'] / float(yearTotals['Hours'][year]['Total'])

    # Monthly Averages
    for month in range(1,13):
        yearly_hours_data = [x[month] for k,x in yearTotals['Hours'].items() if k in all_years and x[month] > 0]
        yearly_studenthours_data = [x[month] for k,x in yearTotals['StudentHours'].items() if k in all_years and x[month] > 0]
        yearly_eventregistrations_data = [x[month] for k,x in yearTotals['EventRegistrations'].items() if k in all_years and yearTotals['Hours'][k][month] > 0]
        yearly_registrations_data = [x[month] for k,x in yearTotals['Registrations'].items() if k in all_years and yearTotals['Hours'][k][month] > 0]

        year_count = len(yearly_hours_data)

        if year_count > 0:
            yearTotals['EventRegistrations']['MonthlyAverage'][month] = sum([x for x in yearly_eventregistrations_data]) / year_count
            yearTotals['Registrations']['MonthlyAverage'][month] = sum([x for x in yearly_registrations_data]) / year_count
            yearTotals['Hours']['MonthlyAverage'][month] = sum([x for x in yearly_hours_data]) / year_count
            yearTotals['StudentHours']['MonthlyAverage'][month] = sum([x for x in yearly_studenthours_data]) / year_count
            yearTotals['AvgStudents']['MonthlyAverage'][month] = yearTotals['StudentHours']['MonthlyAverage'][month] / float(yearTotals['Hours']['MonthlyAverage'][month])

    return yearTotals
Ejemplo n.º 27
0
    def get_context_data(self, **kwargs):
        context = super().get_context_data(**kwargs)

        if not (self.object.ended or self.can_edit):
            raise Http404()

        queryset = Submission.objects.filter(contest_object=self.object)

        ac_count = Count(
            Case(When(result='AC', then=Value(1)),
                 output_field=IntegerField()))
        ac_rate = CombinedExpression(ac_count / Count('problem'),
                                     '*',
                                     Value(100.0),
                                     output_field=FloatField())

        status_count_queryset = list(
            queryset.values('problem__code', 'result').annotate(
                count=Count('result')).values_list('problem__code', 'result',
                                                   'count'), )
        labels, codes = [], []
        contest_problems = self.object.contest_problems.order_by(
            'order').values_list('problem__name', 'problem__code')
        if contest_problems:
            labels, codes = zip(*contest_problems)
        num_problems = len(labels)
        status_counts = [[] for i in range(num_problems)]
        for problem_code, result, count in status_count_queryset:
            if problem_code in codes:
                status_counts[codes.index(problem_code)].append(
                    (result, count))

        result_data = defaultdict(partial(list, [0] * num_problems))
        for i in range(num_problems):
            for category in _get_result_data(defaultdict(
                    int, status_counts[i]))['categories']:
                result_data[category['code']][i] = category['count']

        stats = {
            'problem_status_count': {
                'labels':
                labels,
                'datasets': [{
                    'label':
                    name,
                    'backgroundColor':
                    settings.DMOJ_STATS_SUBMISSION_RESULT_COLORS[name],
                    'data':
                    data,
                } for name, data in result_data.items()],
            },
            'problem_ac_rate':
            get_bar_chart(
                queryset.values(
                    'contest__problem__order',
                    'problem__name').annotate(ac_rate=ac_rate).order_by(
                        'contest__problem__order').values_list(
                            'problem__name', 'ac_rate'), ),
            'language_count':
            get_pie_chart(
                queryset.values('language__name').annotate(
                    count=Count('language__name')).filter(
                        count__gt=0).order_by('-count').values_list(
                            'language__name', 'count'), ),
            'language_ac_rate':
            get_bar_chart(
                queryset.values('language__name').annotate(
                    ac_rate=ac_rate).filter(ac_rate__gt=0).values_list(
                        'language__name', 'ac_rate'), ),
        }

        context['stats'] = mark_safe(json.dumps(stats))

        return context
Ejemplo n.º 28
0
highlight_colors = []


def _highlight_colors():
    for color in chart_colors:
        r, g, b = color >> 16, (color >> 8) & 0xFF, color & 0xFF
        highlight_colors.append('#%02X%02X%02X' % (min(int(
            r * 1.2), 255), min(int(g * 1.2), 255), min(int(b * 1.2), 255)))


_highlight_colors()
del _highlight_colors

chart_colors = map('#%06X'.__mod__, chart_colors)
ac_count = Count(
    Case(When(submission__result='AC', then=Value(1)),
         output_field=IntegerField()))


def repeat_chain(iterable):
    return chain.from_iterable(repeat(iterable))


def language_data(
    request,
    language_count=Language.objects.annotate(count=Count('submission'))):
    languages = language_count.filter(count__gte=1000).values(
        'key', 'name', 'short_name', 'count').order_by('-count')
    data = []
    for language, color, highlight in zip(languages, chart_colors,
                                          highlight_colors):
        data.append({
Ejemplo n.º 29
0
 def get_context_data(self, **kwargs):
     context = super().get_context_data(**kwargs)
     context['items'] = EditorItem.objects.filter(published=True).order_by('-published_date')[:3]
     context['projects'] = Project.objects.filter(public_view=True).annotate(num_items=Sum(Case(When(items__published=True,then=1),default=0,output_field=IntegerField()))).order_by('-num_items').exclude(num_items=0)[:3]
     return context
Ejemplo n.º 30
0
def get_progress_and_last_active(target_nodes, **kwargs):
    # Prepare dictionaries to output the progress and last active, keyed by content_id
    output_progress_dict = {}
    output_last_active_dict = {}
    # Get a list of all the users that we are querying
    users = list(
        get_members_or_user(kwargs['collection_kind'],
                            kwargs['collection_id']))

    # Get a list of all content ids for all target nodes and their descendants
    content_ids = target_nodes.get_descendants(
        include_self=True).order_by().values_list("content_id", flat=True)
    # get all summary logs for the current user that correspond to the content nodes and descendant content nodes
    # Filter by users and the content ids
    progress_query = ContentSummaryLog.objects\
        .filter(user__in=users, content_id__in=content_ids)
    # Conditionally filter by last active time
    if kwargs.get('last_active_time'):
        progress_query = progress_query.filter(
            end_timestamp__gte=parse(kwargs.get('last_active_time')))
    # Get an annotated list of dicts of type:
    # {
    #   'content_id': <content_id>,
    #   'kind': <kind>,
    #   'total_progress': <sum of all progress for this content>,
    #   'log_count_total': <number of summary logs for this content>,
    #   'log_count_complete': <number of complete summary logs for this content>,
    #   'last_active': <most recent end_timestamp for this content>,
    # }
    progress_list = progress_query.values('content_id', 'kind').annotate(
        total_progress=Sum('progress'),
        log_count_total=Count('pk'),
        log_count_complete=Sum(
            Case(When(progress=1, then=1),
                 default=0,
                 output_field=IntegerField())),
        last_active=Max('end_timestamp'))
    # Evaluate query and make a loop dict of all progress
    progress_dict = {item.get('content_id'): item for item in progress_list}
    if isinstance(target_nodes, ContentNode):
        # Have been passed an individual model
        target_nodes = [target_nodes]
    # Loop through each node to add progress and last active information to the output dicts
    for target_node in target_nodes:
        # In the case of a topic, we need to look at the progress and last active from each of its descendants
        if target_node.kind == content_kinds.TOPIC:
            # Get all the content_ids and kinds of each leaf node as a tuple
            # (about half the size of the dict from 'values' method)
            # Remove topics in generator comprehension, rather than using .exclude as kind is not indexed
            # Use set to remove repeated content
            leaf_nodes = set(node for node in target_node.get_descendants(
                include_self=False).order_by().values_list(
                    'content_id', 'kind') if node[1] != content_kinds.TOPIC)
            # Get a unique set of all non-topic content kinds
            leaf_kinds = sorted(set(leaf_node[1] for leaf_node in leaf_nodes))
            # Create a list of progress summary dicts for each content kind
            progress = [{
                # For total progress sum across all the progress dicts for the descendant content leaf nodes
                'total_progress': reduce(
                    # Reduce with a function that just adds the total_progress of the passed in dict to the accumulator
                    sum_progress_dicts,
                    # Get all dicts of progress for every leaf_id that has some progress recorded
                    # and matches the kind we are aggregating over
                    (progress_dict.get(leaf_node[0]) for leaf_node in leaf_nodes\
                        if leaf_node[0] in progress_dict and leaf_node[1] == kind),
                    # Pass in an initial value of total_progress as zero to initialize the reduce
                    0.0,
                ),
                'kind': kind,
                # Count the number of leaf nodes of this particular kind
                'node_count': reduce(lambda x, y: x + int(y[1] == kind), leaf_nodes, 0)
            } for kind in leaf_kinds]
            # Set the output progress for this topic to this list of progress dicts
            output_progress_dict[target_node.content_id] = progress
            # Create a generator of last active times for the leaf_ids
            last_active_times = map(
                # Return the last active time for this leaf_node
                lambda leaf_node: progress_dict[leaf_node[0]]['last_active'],
                filter(
                    # Filter leaf_nodes to those that are in the progress_dict
                    lambda leaf_node: leaf_node[0] in progress_dict,
                    leaf_nodes))
            # Max does not handle empty iterables, so try this
            try:
                # If it is not empty, great!
                output_last_active_dict[target_node.content_id] = max(
                    last_active_times)
            except (ValueError, TypeError):
                # If it is empty, catch the value error and set the last active time to None
                # If they are all none, catch the TypeError and also set to None
                output_last_active_dict[target_node.content_id] = None
        else:
            if target_node.content_id in progress_dict:
                progress = progress_dict.pop(target_node.content_id)
                output_last_active_dict[target_node.content_id] = progress.pop(
                    'last_active')
                # return as array for consistency in api
                output_progress_dict[target_node.content_id] = [{
                    'total_progress':
                    progress['total_progress'],
                    'log_count_total':
                    progress['log_count_total'],
                    'log_count_complete':
                    progress['log_count_complete'],
                }]
            elif target_node.content_id not in output_progress_dict:
                # Not in the progress dict, but also not in our output, so supply default values
                output_last_active_dict[target_node.content_id] = None
                output_progress_dict[target_node.content_id] = [{
                    'total_progress':
                    0.0,
                    'log_count_total':
                    0,
                    'log_count_complete':
                    0,
                }]
    return output_progress_dict, output_last_active_dict