Exemple #1
0
def reorder(_default=AFTER, _reverse=False, **kwargs):
    """
    Return a database expression that can be used in an order_by() so that
    the queryset will be sorted according to the order of values given.
    """
    if not 0 < len(kwargs) <= 1:
        raise TypeError("reorder() takes one non-optional keyword argument")
    fieldname, new_order = kwargs.popitem()

    if _default is BEFORE:
        _default = -1
    elif _default is AFTER:
        _default = len(new_order)

    whens = [When(**{fieldname: v, 'then': i}) for i, v in enumerate(new_order)]

    casewhen = Case(*whens, default=Value(_default), output_field=IntegerField())

    if _reverse:
        return casewhen.desc()
    else:
        return casewhen.asc()
def getAveragesByClassType(startDate=None,endDate=None):

    # If a date filter was passed in GET, then apply it
    when_all = {
        'classdescription__series__eventregistration__cancelled': False,
        'classdescription__series__eventregistration__dropIn': False
    }

    timeFilters = {}
    classFilters = {}
    roleFilters = Q()

    if startDate:
        timeFilters['classdescription__series__startTime__gte'] = startDate
        classFilters['startTime__gte'] = startDate
        roleFilters = roleFilters & (Q(eventrole__event__startTime__gte=startDate) | Q(eventregistration__event__startTime__gte=startDate))
    if endDate:
        timeFilters['classdescription__series__startTime__lte'] = endDate
        classFilters['startTime__lte'] = endDate
        roleFilters = roleFilters & (Q(eventrole__event__startTime__lte=endDate) | Q(eventregistration__event__startTime__lte=endDate))

    when_all.update(timeFilters)

    role_list = DanceRole.objects.filter(roleFilters).distinct()

    annotations = {'registrations': Sum(Case(When(Q(**when_all),then=1),output_field=IntegerField()))}
    values_list = ['name', 'danceType__name','registrations']

    for this_role in role_list:
        annotations[this_role.pluralName] = Sum(Case(When(Q(Q(**when_all) & Q(classdescription__series__eventregistration__role=this_role)),then=1),output_field=IntegerField()))
        values_list.append(this_role.pluralName)

    registration_counts = list(DanceTypeLevel.objects.annotate(**annotations).values_list(*values_list))
    class_counter = Counter([(x.classDescription.danceTypeLevel.name, x.classDescription.danceTypeLevel.danceType.name) for x in Series.objects.filter(**classFilters).distinct()])

    results = {}
    for list_item in registration_counts:
        type_name = ' '.join((str(list_item[0]),str(list_item[1])))

        results[type_name] = {
            'registrations': list_item[2],
        }
        m = 3
        for this_role in role_list:
            results[type_name]['total' + this_role.pluralName] = list_item[m]
            m += 1

    for k,count in class_counter.items():
        type_name = ' '.join((str(k[0]),str(k[1])))
        results[type_name].update({
            'series': count
        })
    for k,v in results.items():
        if results[k].get('series'):
            results[k].update({
                'avgRegistrations': (results[k]['registrations'] or 0) / float(results[k]['series']),
            })
            for this_role in role_list:
                results[k]['avg' + this_role.pluralName] = (results[k]['total' + this_role.pluralName] or 0) / float(results[k]['series'])

    return results
Exemple #3
0
def section_demand_change(section_id, updated_at):
    """
    This function should be called when a section's demand changes (i.e. the number of
    active registrations changes, or the section's status is updated). It updates the
    `PcaDemandDistributionEstimate` model and `current_demand_distribution_estimate`
    cache to reflect the demand change.

    :param: section_id: the id of the section involved in the demand change
    :param: updated_at: the datetime at which the demand change occurred
    """
    section = Section.objects.get(id=section_id)
    semester = section.semester
    if semester != get_current_semester():
        return

    with transaction.atomic():
        create_new_distribution_estimate = False
        sentinel = object()
        current_demand_distribution_estimate = cache.get(
            "current_demand_distribution_estimate", sentinel)
        if (current_demand_distribution_estimate == sentinel
                or current_demand_distribution_estimate.semester != semester):
            create_new_distribution_estimate = True

        sections_qs = (Section.objects.filter(
            extra_metrics_section_filters,
            course__semester=semester).select_for_update().annotate(
                raw_demand=Case(
                    When(
                        Q(capacity__gt=0),
                        then=(Cast(
                            "registration_volume",
                            models.FloatField(),
                        ) / Cast("capacity", models.FloatField())),
                    ),
                    default=None,
                    output_field=models.FloatField(),
                ), ).order_by("raw_demand"))

        try:
            lowest_demand_section = sections_qs[:1].get()
            highest_demand_section = sections_qs[-1:].get()
        except Section.DoesNotExist:
            return  # Don't add a PcaDemandDistributionEstimate -- there are no valid sections yet

        if (create_new_distribution_estimate
                or highest_demand_section.raw_demand >
                current_demand_distribution_estimate.highest_raw_demand
                or lowest_demand_section.raw_demand <
                current_demand_distribution_estimate.lowest_raw_demand):
            closed_sections_demand_values = np.asarray(
                sections_qs.filter(status="C").values_list("raw_demand",
                                                           flat=True))
            # "The term 'closed sections positive raw demand values' is
            # sometimes abbreviated as 'csprdv'
            csrdv_frac_zero, fit_shape, fit_loc, fit_scale = (None, None, None,
                                                              None)
            if len(closed_sections_demand_values) > 0:
                closed_sections_positive_demand_values = closed_sections_demand_values[
                    np.where(closed_sections_demand_values > 0)]
                csrdv_frac_zero = 1 - len(
                    closed_sections_positive_demand_values) / len(
                        closed_sections_demand_values)
                if len(closed_sections_positive_demand_values) > 0:
                    fit_shape, fit_loc, fit_scale = stats.lognorm.fit(
                        closed_sections_positive_demand_values)
            new_demand_distribution_estimate = PcaDemandDistributionEstimate(
                semester=semester,
                highest_demand_section=highest_demand_section,
                highest_demand_section_volume=highest_demand_section.
                registration_volume,
                lowest_demand_section=lowest_demand_section,
                lowest_demand_section_volume=lowest_demand_section.
                registration_volume,
                csrdv_frac_zero=csrdv_frac_zero,
                csprdv_lognorm_param_shape=fit_shape,
                csprdv_lognorm_param_loc=fit_loc,
                csprdv_lognorm_param_scale=fit_scale,
            )
            add_drop_period = get_or_create_add_drop_period(semester)
            new_demand_distribution_estimate.save(
                add_drop_period=add_drop_period)
            new_demand_distribution_estimate.created_at = updated_at
            new_demand_distribution_estimate.save(
                add_drop_period=add_drop_period)
            cache.set(
                "current_demand_distribution_estimate",
                new_demand_distribution_estimate,
                timeout=(add_drop_period.estimated_end -
                         add_drop_period.estimated_start).total_seconds() //
                ROUGH_MINIMUM_DEMAND_DISTRIBUTION_ESTIMATES,
            )  # set timeout to roughly follow ROUGH_MINIMUM_DEMAND_DISTRIBUTION_ESTIMATES
Exemple #4
0
    def get_all_tx_hashes(self,
                          safe_address: str,
                          executed: bool = False,
                          queued: bool = True,
                          trusted: bool = True) -> QuerySet:
        """
        Build a queryset with hashes for every tx for a Safe for pagination filtering. In the case of
        Multisig Transactions, as some of them are not mined, we use the SafeTxHash
        Criteria for building this list:
          - Return only multisig txs with `nonce < current Safe Nonce`
          - The endpoint should only show incoming transactions that have been mined
          - The transactions should be sorted by execution date. If an outgoing transaction doesn't have an execution
          date the execution date of the transaction with the same nonce that has been executed should be taken.
          - Incoming and outgoing transfers or Eth/tokens must be under a multisig/module tx if triggered by one.
          Otherwise they should have their own entry in the list using a EthereumTx
        :param safe_address:
        :param executed: By default `False`, all transactions are returned. With `True`, just txs executed are returned.
        :param queued: By default `True`, all transactions are returned. With `False`, just txs with
        `nonce < current Safe Nonce` are returned.
        :param trusted: By default `True`, just txs that are trusted are returned (with at least one confirmation,
        sent by a delegate or indexed). With `False` all txs are returned
        :return: List with tx hashes sorted by date (newest first)
        """

        # If tx is not mined, get the execution date of a tx mined with the same nonce
        case = Case(When(ethereum_tx__block=None,
                         then=MultisigTransaction.objects.filter(
                             safe=OuterRef('safe'),
                             nonce=OuterRef('nonce')).exclude(
                                 ethereum_tx__block=None).values(
                                     'ethereum_tx__block__timestamp')),
                    default=F('ethereum_tx__block__timestamp'))
        multisig_safe_tx_ids = MultisigTransaction.objects.filter(
            safe=safe_address).annotate(
                execution_date=case,
                block=F('ethereum_tx__block_id'),
                safe_nonce=F('nonce'),
            ).values(
                'safe_tx_hash', 'execution_date', 'created', 'block',
                'safe_nonce'
            )  # Tricky, we will merge SafeTx hashes with EthereumTx hashes
        # Block is needed to get stable ordering

        if not queued:  # Filter out txs with nonce >= Safe nonce
            last_nonce_query = MultisigTransaction.objects.filter(
                safe=safe_address).exclude(
                    ethereum_tx=None).order_by('-nonce').values('nonce')
            multisig_safe_tx_ids = multisig_safe_tx_ids.filter(
                nonce__lte=Subquery(last_nonce_query[:1]))

        if trusted:  # Just show trusted transactions
            multisig_safe_tx_ids = multisig_safe_tx_ids.filter(trusted=True)

        if executed:
            multisig_safe_tx_ids = multisig_safe_tx_ids.exclude(
                ethereum_tx__block=None)

        # Get module txs
        module_tx_ids = ModuleTransaction.objects.filter(
            safe=safe_address).annotate(
                execution_date=F('internal_tx__ethereum_tx__block__timestamp'),
                block=F('internal_tx__ethereum_tx__block_id'),
                safe_nonce=Value(0, output_field=Uint256Field()),
            ).distinct().values('internal_tx__ethereum_tx_id',
                                'execution_date', 'created', 'block',
                                'safe_nonce')

        multisig_hashes = MultisigTransaction.objects.filter(
            safe=safe_address).exclude(
                ethereum_tx=None).values('ethereum_tx_id')
        module_hashes = ModuleTransaction.objects.filter(
            safe=safe_address).values('internal_tx__ethereum_tx_id')
        multisig_and_module_hashes = multisig_hashes.union(module_hashes)

        # Get incoming/outgoing tokens not included on Multisig or Module txs.
        # Outgoing tokens can be triggered by another user after the Safe calls `approve`, that's why it will not
        # always appear as a MultisigTransaction
        event_tx_ids = EthereumEvent.objects.erc20_and_721_events().filter(
            Q(arguments__to=safe_address)
            | Q(arguments__from=safe_address)).exclude(
                ethereum_tx__in=multisig_and_module_hashes).annotate(
                    execution_date=F('ethereum_tx__block__timestamp'),
                    created=F('ethereum_tx__block__timestamp'),
                    block=F('ethereum_tx__block_id'),
                    safe_nonce=Value(0, output_field=Uint256Field()),
                ).distinct().values('ethereum_tx_id', 'execution_date',
                                    'created', 'block', 'safe_nonce')

        # Get incoming txs not included on Multisig or Module txs
        internal_tx_ids = InternalTx.objects.filter(
            call_type=EthereumTxCallType.CALL.value,
            value__gt=0,
            to=safe_address,
        ).exclude(ethereum_tx__in=multisig_and_module_hashes).annotate(
            execution_date=F('ethereum_tx__block__timestamp'),
            created=F('ethereum_tx__block__timestamp'),
            block=F('ethereum_tx__block_id'),
            safe_nonce=Value(0, output_field=Uint256Field()),
        ).distinct().values('ethereum_tx_id', 'execution_date', 'created',
                            'block', 'safe_nonce')

        # Tricky, we merge SafeTx hashes with EthereumTx hashes
        queryset = multisig_safe_tx_ids.distinct().union(event_tx_ids).union(
            internal_tx_ids).union(module_tx_ids).order_by(
                '-execution_date', '-safe_nonce', 'block', '-created')
        # Order by block because `block_number < NULL`, so txs mined will have preference,
        # and `created` to get always the same ordering with not executed transactions, as they will share
        # the same `execution_date` that the mined tx
        return queryset
Exemple #5
0
def product_type_counts(request):
    form = ProductTypeCountsForm()
    opened_in_period_list = []
    oip = None
    cip = None
    aip = None
    all_current_in_pt = None
    top_ten = None
    pt = None
    today = timezone.now()
    first_of_month = today.replace(day=1,
                                   hour=0,
                                   minute=0,
                                   second=0,
                                   microsecond=0)
    mid_month = first_of_month.replace(day=15,
                                       hour=23,
                                       minute=59,
                                       second=59,
                                       microsecond=999999)
    end_of_month = mid_month.replace(day=monthrange(today.year,
                                                    today.month)[1],
                                     hour=23,
                                     minute=59,
                                     second=59,
                                     microsecond=999999)
    start_date = first_of_month
    end_date = end_of_month

    if request.method == 'GET' and 'month' in request.GET and 'year' in request.GET and 'product_type' in request.GET:
        form = ProductTypeCountsForm(request.GET)
        if form.is_valid():
            pt = form.cleaned_data['product_type']
            month = int(form.cleaned_data['month'])
            year = int(form.cleaned_data['year'])
            first_of_month = first_of_month.replace(month=month, year=year)

            month_requested = datetime(year, month, 1)

            end_of_month = month_requested.replace(day=monthrange(
                month_requested.year, month_requested.month)[1],
                                                   hour=23,
                                                   minute=59,
                                                   second=59,
                                                   microsecond=999999)
            start_date = first_of_month
            start_date = datetime(start_date.year,
                                  start_date.month,
                                  start_date.day,
                                  tzinfo=timezone.get_current_timezone())
            end_date = end_of_month
            end_date = datetime(end_date.year,
                                end_date.month,
                                end_date.day,
                                tzinfo=timezone.get_current_timezone())

            oip = opened_in_period(start_date, end_date, pt)

            # trending data - 12 months
            for x in range(12, 0, -1):
                opened_in_period_list.append(
                    opened_in_period(start_date + relativedelta(months=-x),
                                     end_of_month + relativedelta(months=-x),
                                     pt))

            opened_in_period_list.append(oip)

            closed_in_period = Finding.objects.filter(
                mitigated__range=[start_date, end_date],
                test__engagement__product__prod_type=pt,
                severity__in=('Critical', 'High', 'Medium',
                              'Low')).values('numerical_severity').annotate(
                                  Count('numerical_severity')).order_by(
                                      'numerical_severity')

            total_closed_in_period = Finding.objects.filter(
                mitigated__range=[start_date, end_date],
                test__engagement__product__prod_type=pt,
                severity__in=('Critical', 'High', 'Medium',
                              'Low')).aggregate(total=Sum(
                                  Case(When(severity__in=('Critical', 'High',
                                                          'Medium', 'Low'),
                                            then=Value(1)),
                                       output_field=IntegerField())))['total']

            overall_in_pt = Finding.objects.filter(
                date__lt=end_date,
                verified=True,
                false_p=False,
                duplicate=False,
                out_of_scope=False,
                mitigated__isnull=True,
                test__engagement__product__prod_type=pt,
                severity__in=('Critical', 'High', 'Medium',
                              'Low')).values('numerical_severity').annotate(
                                  Count('numerical_severity')).order_by(
                                      'numerical_severity')

            total_overall_in_pt = Finding.objects.filter(
                date__lte=end_date,
                verified=True,
                false_p=False,
                duplicate=False,
                out_of_scope=False,
                mitigated__isnull=True,
                test__engagement__product__prod_type=pt,
                severity__in=('Critical', 'High', 'Medium',
                              'Low')).aggregate(total=Sum(
                                  Case(When(severity__in=('Critical', 'High',
                                                          'Medium', 'Low'),
                                            then=Value(1)),
                                       output_field=IntegerField())))['total']

            all_current_in_pt = Finding.objects.filter(
                date__lte=end_date,
                verified=True,
                false_p=False,
                duplicate=False,
                out_of_scope=False,
                mitigated__isnull=True,
                test__engagement__product__prod_type=pt,
                severity__in=('Critical', 'High', 'Medium',
                              'Low')).prefetch_related(
                                  'test__engagement__product',
                                  'test__engagement__product__prod_type',
                                  'test__engagement__risk_acceptance',
                                  'reporter').order_by('numerical_severity')

            top_ten = Product.objects.filter(
                engagement__test__finding__date__lte=end_date,
                engagement__test__finding__verified=True,
                engagement__test__finding__false_p=False,
                engagement__test__finding__duplicate=False,
                engagement__test__finding__out_of_scope=False,
                engagement__test__finding__mitigated__isnull=True,
                engagement__test__finding__severity__in=('Critical', 'High',
                                                         'Medium', 'Low'),
                prod_type=pt).annotate(
                    critical=Sum(
                        Case(When(
                            engagement__test__finding__severity='Critical',
                            then=Value(1)),
                             output_field=IntegerField())),
                    high=Sum(
                        Case(When(engagement__test__finding__severity='High',
                                  then=Value(1)),
                             output_field=IntegerField())),
                    medium=Sum(
                        Case(When(engagement__test__finding__severity='Medium',
                                  then=Value(1)),
                             output_field=IntegerField())),
                    low=Sum(
                        Case(When(engagement__test__finding__severity='Low',
                                  then=Value(1)),
                             output_field=IntegerField())),
                    total=Sum(
                        Case(When(engagement__test__finding__severity__in=(
                            'Critical', 'High', 'Medium', 'Low'),
                                  then=Value(1)),
                             output_field=IntegerField()))).order_by(
                                 '-critical', '-high', '-medium', '-low')[:10]

            cip = {
                'S0': 0,
                'S1': 0,
                'S2': 0,
                'S3': 0,
                'Total': total_closed_in_period
            }

            aip = {
                'S0': 0,
                'S1': 0,
                'S2': 0,
                'S3': 0,
                'Total': total_overall_in_pt
            }

            for o in closed_in_period:
                cip[o['numerical_severity']] = o['numerical_severity__count']

            for o in overall_in_pt:
                aip[o['numerical_severity']] = o['numerical_severity__count']
        else:
            messages.add_message(
                request,
                messages.ERROR,
                "Please choose month and year and the Product Type.",
                extra_tags='alert-danger')

    add_breadcrumb(title="Bi-Weekly Metrics", top_level=True, request=request)

    return render(
        request, 'dojo/pt_counts.html', {
            'form': form,
            'start_date': start_date,
            'end_date': end_date,
            'opened_in_period': oip,
            'trending_opened': opened_in_period_list,
            'closed_in_period': cip,
            'overall_in_pt': aip,
            'all_current_in_pt': all_current_in_pt,
            'top_ten': top_ten,
            'pt': pt
        })
 def get_queryset(self):
     return super().get_queryset().annotate(has_price=Case(When(price=0, then=0), default=1, output_field=IntegerField()))
    def download(self, award_type, agency="all", generate_since=None):
        """ Create a delta file based on award_type, and agency_code (or all agencies) """
        logger.info(
            "Starting generation. {}, Agency: {}".format(award_type, agency if agency == "all" else agency["name"])
        )
        award_map = AWARD_MAPPINGS[award_type]

        # Create Source and update fields to include correction_delete_ind
        source = DownloadSource(
            "transaction",
            award_map["letter_name"].lower(),
            "transactions",
            "all" if agency == "all" else agency["toptier_agency_id"],
        )
        source.query_paths.update({"correction_delete_ind": award_map["correction_delete_ind"]})
        if award_type == "Contracts":
            # Add the agency_id column to the mappings
            source.query_paths.update({"agency_id": "transaction__contract_data__agency_id"})
            source.query_paths.move_to_end("agency_id", last=False)
        source.query_paths.move_to_end("correction_delete_ind", last=False)
        source.human_names = list(source.query_paths.keys())

        # Apply filters to the queryset
        filters, agency_code = self.parse_filters(award_map["award_types"], agency)
        source.queryset = VALUE_MAPPINGS["transactions"]["filter_function"](filters)

        if award_type == "Contracts":
            source.queryset = source.queryset.annotate(
                correction_delete_ind=Case(
                    When(transaction__contract_data__created_at__lt=generate_since, then=Value("C")),
                    default=Value(""),
                    output_field=CharField(),
                )
            )
        else:
            indicator_field = F("transaction__assistance_data__correction_delete_indicatr")
            source.queryset = source.queryset.annotate(
                correction_delete_ind=Case(
                    When(transaction__assistance_data__updated_at__gt=generate_since, then=indicator_field),
                    When(transaction__transactiondelta__isnull=False, then=Value("C")),
                    default=indicator_field,
                    output_field=CharField(),
                )
            )

        transaction_delta_queryset = source.queryset

        _filter = {"transaction__{}__{}__gte".format(award_map["model"], award_map["date_filter"]): generate_since}
        if self.debugging_end_date:
            _filter[
                "transaction__{}__{}__lt".format(award_map["model"], award_map["date_filter"])
            ] = self.debugging_end_date

        source.queryset = source.queryset.filter(**_filter)

        # UNION the normal results to the transaction_delta results.
        source.queryset = source.queryset.union(
            transaction_delta_queryset.filter(transaction__transactiondelta__isnull=False)
        )

        # Generate file
        file_path = self.create_local_file(award_type, source, agency_code, generate_since)
        if file_path is None:
            logger.info("No new, modified, or deleted data; discarding file")
        elif not settings.IS_LOCAL:
            # Upload file to S3 and delete local version
            logger.info("Uploading file to S3 bucket and deleting local copy")
            multipart_upload(
                settings.MONTHLY_DOWNLOAD_S3_BUCKET_NAME,
                settings.USASPENDING_AWS_REGION,
                file_path,
                os.path.basename(file_path),
            )
            os.remove(file_path)

        logger.info(
            "Finished generation. {}, Agency: {}".format(award_type, agency if agency == "all" else agency["name"])
        )
Exemple #8
0
def get_progress_and_last_active(target_nodes, **kwargs):
    # Prepare dictionaries to output the progress and last active, keyed by content_id
    output_progress_dict = {}
    output_last_active_dict = {}
    # Get a list of all the users that we are querying
    users = list(
        get_members_or_user(kwargs['collection_kind'],
                            kwargs['collection_id']))

    # Get a list of all content ids for all target nodes and their descendants
    content_ids = target_nodes.get_descendants(
        include_self=True).order_by().values_list("content_id", flat=True)
    # get all summary logs for the current user that correspond to the content nodes and descendant content nodes
    # Filter by users and the content ids
    progress_query = ContentSummaryLog.objects\
        .filter(user__in=users, content_id__in=content_ids)
    # Conditionally filter by last active time
    if kwargs.get('last_active_time'):
        progress_query = progress_query.filter(
            end_timestamp__gte=parse(kwargs.get('last_active_time')))
    # Get an annotated list of dicts of type:
    # {
    #   'content_id': <content_id>,
    #   'kind': <kind>,
    #   'total_progress': <sum of all progress for this content>,
    #   'log_count_total': <number of summary logs for this content>,
    #   'log_count_complete': <number of complete summary logs for this content>,
    #   'last_active': <most recent end_timestamp for this content>,
    # }
    progress_list = progress_query.values('content_id', 'kind').annotate(
        total_progress=Sum('progress'),
        log_count_total=Count('pk'),
        log_count_complete=Sum(
            Case(When(progress=1, then=1),
                 default=0,
                 output_field=IntegerField())),
        last_active=Max('end_timestamp'))
    # Evaluate query and make a loop dict of all progress
    progress_dict = {item.get('content_id'): item for item in progress_list}
    if isinstance(target_nodes, ContentNode):
        # Have been passed an individual model
        target_nodes = [target_nodes]
    # Loop through each node to add progress and last active information to the output dicts
    for target_node in target_nodes:
        # In the case of a topic, we need to look at the progress and last active from each of its descendants
        if target_node.kind == content_kinds.TOPIC:
            # Get all the content_ids and kinds of each leaf node as a tuple
            # (about half the size of the dict from 'values' method)
            # Remove topics in generator comprehension, rather than using .exclude as kind is not indexed
            # Use set to remove repeated content
            leaf_nodes = set(node for node in target_node.get_descendants(
                include_self=False).order_by().values_list(
                    'content_id', 'kind') if node[1] != content_kinds.TOPIC)
            # Get a unique set of all non-topic content kinds
            leaf_kinds = sorted(set(leaf_node[1] for leaf_node in leaf_nodes))
            # Create a list of progress summary dicts for each content kind
            progress = [{
                # For total progress sum across all the progress dicts for the descendant content leaf nodes
                'total_progress': reduce(
                    # Reduce with a function that just adds the total_progress of the passed in dict to the accumulator
                    sum_progress_dicts,
                    # Get all dicts of progress for every leaf_id that has some progress recorded
                    # and matches the kind we are aggregating over
                    (progress_dict.get(leaf_node[0]) for leaf_node in leaf_nodes\
                        if leaf_node[0] in progress_dict and leaf_node[1] == kind),
                    # Pass in an initial value of total_progress as zero to initialize the reduce
                    0.0,
                ),
                'kind': kind,
                # Count the number of leaf nodes of this particular kind
                'node_count': reduce(lambda x, y: x + int(y[1] == kind), leaf_nodes, 0)
            } for kind in leaf_kinds]
            # Set the output progress for this topic to this list of progress dicts
            output_progress_dict[target_node.content_id] = progress
            # Create a generator of last active times for the leaf_ids
            last_active_times = map(
                # Return the last active time for this leaf_node
                lambda leaf_node: progress_dict[leaf_node[0]]['last_active'],
                filter(
                    # Filter leaf_nodes to those that are in the progress_dict
                    lambda leaf_node: leaf_node[0] in progress_dict,
                    leaf_nodes))
            # Max does not handle empty iterables, so try this
            try:
                # If it is not empty, great!
                output_last_active_dict[target_node.content_id] = max(
                    last_active_times)
            except (ValueError, TypeError):
                # If it is empty, catch the value error and set the last active time to None
                # If they are all none, catch the TypeError and also set to None
                output_last_active_dict[target_node.content_id] = None
        else:
            if target_node.content_id in progress_dict:
                progress = progress_dict.pop(target_node.content_id)
                output_last_active_dict[target_node.content_id] = progress.pop(
                    'last_active')
                # return as array for consistency in api
                output_progress_dict[target_node.content_id] = [{
                    'total_progress':
                    progress['total_progress'],
                    'log_count_total':
                    progress['log_count_total'],
                    'log_count_complete':
                    progress['log_count_complete'],
                }]
            elif target_node.content_id not in output_progress_dict:
                # Not in the progress dict, but also not in our output, so supply default values
                output_last_active_dict[target_node.content_id] = None
                output_progress_dict[target_node.content_id] = [{
                    'total_progress':
                    0.0,
                    'log_count_total':
                    0,
                    'log_count_complete':
                    0,
                }]
    return output_progress_dict, output_last_active_dict
Exemple #9
0
def features_of_dialect(request, dialect_id_string, section=None):
    '''The grammar features of a chosen dialect, in tree format '''
    dialect_ids = [int(x) for x in dialect_id_string.split(',')]

    is_bulk_edit = request.GET.get('edit') or False
    base_dialect_id = int(request.GET.get('base_on', 0))
    if is_bulk_edit and base_dialect_id:
        dialect_ids.append(base_dialect_id)

    preserved = Case(
        *[When(pk=pk, then=pos) for pos, pk in enumerate(dialect_ids)])
    dialects = Dialect.objects.filter(id__in=dialect_ids).order_by(preserved)
    chosen_root = get_section_root(section)

    # annotated lists are an efficient way of getting a big chunk of a treebeard tree
    # see: https://django-treebeard.readthedocs.io/en/latest/api.html#treebeard.models.Node.get_annotated_list
    max_depth = None
    feature_list = Feature.get_annotated_list(parent=chosen_root,
                                              max_depth=max_depth)

    # process bulk save if that's what's happening
    # todo - separate this out into a different function, with own url and pass feature id list
    #        through form so it doesn't rely on the above code
    bulk_text = request.POST.get('bulk_edit', None)
    entry_regex = r'^\s*(?P<entry>.+?)\s*(?P<frequency>\b[MPmp]\b){0,1}\s*(?P<comment>\".+\")?\s*$'
    if bulk_text is not None:
        row_texts = bulk_text.split('\n')
        for row_text, feature in zip_longest(
                row_texts, feature_list[1:], fillvalue=''
        ):  # skip first feature as it matches the top-level group
            dfes = []
            for entry_text in row_text.split('~'):
                matches = re.match(entry_regex,
                                   entry_text.strip('\r').strip(' '))
                if not matches:
                    continue
                matches_dict = matches.groupdict()
                if len(matches_dict['entry'].strip(' ')) < 1:
                    continue
                matches_dict['frequency'] = matches_dict['frequency'].upper(
                ) if matches_dict['frequency'] else 'P'
                matches_dict['comment'] = matches_dict['comment'].strip(
                    '"') if matches_dict['comment'] else None
                dfes.append(DialectFeatureEntry(**matches_dict))

            # load the existing DialectFeature or prepare a new one
            df = DialectFeature.objects.filter(dialect=dialects[0].id,
                                               feature=feature[0].id).first()
            if not df:
                df = DialectFeature(dialect_id=dialects[0].id,
                                    feature_id=feature[0].id)
                df.save()

            df.entries.all().delete()  # clear any existing entries
            df.entries.set(dfes, bulk=False)

            if len(
                    dfes
            ) < 1:  # delete the Dialect Feature if no entries were submitted
                df.delete()

        return HttpResponseRedirect(
            reverse('dialects:dialect-grammar-section',
                    args=(dialects[0].id, section)))

    # ( feature, stuff, [{df: _, entries: [], examples: []}, {df: _, entries: [], examples: []}, ...] )

    base_path = chosen_root.path if chosen_root else ''
    preserved = Case(
        *[When(dialect_id=pk, then=pos) for pos, pk in enumerate(dialect_ids)])
    dialectfeatures = DialectFeature.objects.filter(dialect__in=dialect_ids) \
                                            .filter(feature__path__startswith=base_path) \
                                            .values('id', 'dialect_id', 'feature_id', 'feature__path', 'feature__fullheading',
                                                    'is_absent', 'introduction', 'comment', 'category') \
                                            .order_by('feature__path', preserved)

    entries = DialectFeatureEntry.objects.filter(feature__dialect__in=dialect_ids) \
                                           .filter(feature__feature__path__startswith=base_path) \
                                           .values('feature__id', 'id', 'entry', 'frequency', 'comment') \
                                           .order_by('feature__feature__path', '-frequency')

    entries_dict = {}
    for x in entries:
        entries_dict.setdefault(x['feature__id'], []).append(x)

    examples_dict = {}
    if is_bulk_edit:
        entries = entries.filter(feature__feature__depth=max_depth)
    elif len(dialects) == 1:
        examples = DialectFeatureExample.objects.filter(feature__dialect__in=dialect_ids) \
                                                .filter(feature__feature__path__startswith=base_path) \
                                                .values('feature__id', 'id', 'example') \
                                                .order_by('feature__feature__path')
        for x in examples:
            examples_dict.setdefault(x['feature__id'], []).append(x)

    num_features = 0
    dialectfeatures_dict = {}
    for x in dialectfeatures:
        num_features += 1
        x.update({
            'entries': entries_dict.get(x['id'], []),
            'examples': examples_dict.get(x['id'], []),
        })
        dialectfeatures_dict.setdefault(x['feature_id'], []).append(x)

    for i, feature in enumerate(feature_list):
        feature_list[i] = (feature[0], feature[1],
                           dialectfeatures_dict.get(feature[0].id, []))


    context = {
        'dialect_ids':  dialect_ids,
        'dialects':     dialects,
        'section':      chosen_root,
        'breadcrumb_bits': make_breadcrumb_bits(chosen_root),
        'feature_list': feature_list,
        'num_features': num_features,
        'all_dialects': Dialect.objects.all() \
                                       .filter(features__feature__path__startswith=base_path) \
                                       .exclude(id__in=dialect_ids) \
                                       .annotate(feature_count=Count('features'))
                                       .values_list('id', 'name', 'feature_count'),
        'bulk_edit':    is_bulk_edit,
    }

    if context['bulk_edit']:

        def encode_entry(entry):
            encoded_entry = entry['entry']
            if entry['frequency'] not in ('P', None):
                encoded_entry += ' {}'.format(entry['frequency'])
            if entry['comment']:
                encoded_entry += ' "{}"'.format(entry['comment'].replace(
                    '\r', '').replace('\n', '; '))
            return encoded_entry

        raw_rows = []
        for feature, info, dialectfeatures in feature_list[1:]:
            if not dialectfeatures:
                raw_rows.append('')
                continue

            dialect_idx = -1 if base_dialect_id else 0
            entries = dialectfeatures[dialect_idx].get('entries', [])
            raw_rows.append(' ~ '.join([encode_entry(x) for x in entries]))

        # join using html newline entity to prevent textarea ignoring first newline char
        # see: https://stackoverflow.com/a/49604548
        raw_text = '&#13;'.join(raw_rows)
        context.update({
            'raw_text': raw_text,
        })
    if chosen_root:
        context['total_features'] = DialectFeature.objects.filter(
            dialect__in=dialect_ids).count()

    return render(request, 'grammar/feature_list.html', context)
Exemple #10
0
def statistics():
    """Gather statistics about users and their inboxes"""
    try:
        last_stat = models.Statistic.objects.latest("date")
    except models.Statistic.DoesNotExist:
        last_stat = None

    # the keys of these dictionaries have awful names for historical reasons
    # don't change them unless you want to do a data migration
    one_day_ago = timezone.now() - timedelta(days=1)
    user_aggregate = {
        "count":
        Count("id", distinct=True),
        "new":
        Coalesce(
            Count(
                Case(When(date_joined__gte=one_day_ago, then=F("id")), ),
                distinct=True,
            ), 0),
        "oldest_user_joined":
        Min("date_joined"),
        "with_inboxes":
        Coalesce(
            Count(
                Case(When(inbox__isnull=False, then=F("id")), ),
                distinct=True,
            ), 0),
    }

    inbox_aggregate = {
        "inbox_count__avg": Coalesce(Avg("inbox_count"), 0),
        "inbox_count__max": Coalesce(Max("inbox_count"), 0),
        "inbox_count__min": Coalesce(Min("inbox_count"), 0),
        "inbox_count__stddev": Coalesce(StdDev("inbox_count"), 0),
        "inbox_count__sum": Coalesce(Sum("inbox_count"), 0),
    }

    email_aggregate = {
        "email_count__avg": Coalesce(Avg("email_count"), 0),
        "email_count__max": Coalesce(Max("email_count"), 0),
        "email_count__min": Coalesce(Min("email_count"), 0),
        "email_count__stddev": Coalesce(StdDev("email_count"), 0),
        "email_count__sum": Coalesce(Sum("email_count"), 0),
    }

    # collect user and inbox stats
    users = get_user_model().objects.aggregate(**user_aggregate)
    inboxes = get_user_model().objects.annotate(
        inbox_count=Count("inbox__id")).aggregate(**inbox_aggregate)

    domain_count = models.Domain.objects.available(None).count()
    inboxes_possible = len(settings.INBOX_CHOICES)**settings.INBOX_LENGTH

    inboxes["total_possible"] = inboxes_possible * domain_count

    # collect email state
    inbox_qs = models.Inbox.objects.exclude(deleted=True).annotate(
        email_count=Count("email__id"))
    emails = inbox_qs.aggregate(**email_aggregate)

    inboxes["with_emails"] = inbox_qs.exclude(email_count=0).count()
    inboxes["disowned"] = models.Inbox.objects.filter(
        user__isnull=True).count()
    emails["emails_read"] = models.Email.objects.filter(read=True).count()

    if last_stat:
        email_diff = (emails["email_count__sum"]
                      or 0) - (last_stat.emails["email_count__sum"] or 0)
        emails["running_total"] = last_stat.emails["running_total"] + max(
            email_diff, 0)
    else:
        emails["running_total"] = emails["email_count__sum"] or 0

    stat = models.Statistic(
        users=users,
        emails=emails,
        inboxes=inboxes,
    )

    stat.save()

    log.info("Saved statistics (%s)", stat.date)
def generate_monthly_system_stats(
        location=SHEET_FILES_FOLDER + 'GetIN Traceability Form ' +
    str(timezone.now().strftime("%m-%d-%Y, %H:%M")) + '.xls',
        end_date=None):
    """
    :param location: name of the excel file containing the output
    :param end_date: date up to which the metrics will be generated
    Creates user performance excel sheet
    Sheet Fields: NAME OF HEALTHWORKER, PHONE NO., ROLE, DISTRICT, MONTH, YEAR, NO. OF GIRLS MAPPED, ATTENDED, MISSED, EXPECTED, FOLLOW UPS
    """
    book = WorkbookCreation()
    sheet1 = book.add_sheet('Sheet1')

    data = [["NAME OF HEALTHWORKER"], ["PHONE NO."], ["ROLE"], ["DISTRICT"],
            ["MONTH"], ["YEAR"], ["NO. OF GIRLS MAPPED"], ["ATTENDED"],
            ["MISSED"], ["EXPECTED"], ["FOLLOW UPS"]]

    for c_index, columns in enumerate(data):
        for r_index, row_item in enumerate(columns):
            sheet1.write(r_index, c_index, row_item)

    book.save(location)
    book.save(TemporaryFile())

    convert_xls_to_xlsx(location)

    for district in District.objects.all():
        created_at = timezone.datetime(2019, 10, 1).replace(tzinfo=pytz.utc)
        if not end_date:
            end_date = timezone.now()

        while created_at < end_date.replace(tzinfo=pytz.utc):
            for user in User.objects.filter(district=district):
                if user.role in [
                        USER_TYPE_DHO, USER_TYPE_AMBULANCE, USER_TYPE_DEVELOPER
                ]:
                    continue
                girls = Girl.objects.filter(
                    Q(created_at__gte=created_at)
                    & Q(created_at__lte=add_months(created_at, 1).replace(
                        tzinfo=pytz.utc)) & Q(user=user)).count()
                wb = load_workbook(location.replace('xls', 'xlsx'))
                sheet = wb['Sheet1']

                appointment_data = {"attended": 0, "expected": 0, "missed": 0}

                if user.role == USER_TYPE_MIDWIFE:
                    appointment_data = Appointment.objects.filter(
                        Q(user=user) & Q(created_at__gte=created_at)
                        & Q(created_at__lte=add_months(created_at, 1).replace(
                            tzinfo=pytz.utc))).aggregate(
                                attended=Count(
                                    Case(
                                        When(Q(status=ATTENDED), then=1),
                                        output_field=IntegerField(),
                                    )),
                                expected=Count(
                                    Case(
                                        When(Q(status=EXPECTED), then=1),
                                        output_field=IntegerField(),
                                    )),
                                missed=Count(
                                    Case(
                                        When(Q(status=MISSED), then=1),
                                        output_field=IntegerField(),
                                    )))

                followups = FollowUp.objects.filter(
                    Q(user=user) & Q(created_at__gte=created_at)
                    & Q(created_at__lte=add_months(created_at, 1).replace(
                        tzinfo=pytz.utc))).count()

                sheet.append([
                    user.first_name + " " + user.last_name, user.phone,
                    user.role, district.name,
                    created_at.strftime("%B"),
                    created_at.strftime("%Y"), girls,
                    appointment_data['attended'], appointment_data['expected'],
                    appointment_data['missed'], followups
                ])
                wb.save(location.replace('xls', 'xlsx'))
            created_at = add_months(created_at, 1).replace(tzinfo=pytz.utc)
Exemple #12
0
    def get(self, request):
        form_data = dict(request.GET)

        age_groups = form_data.get("age-group[]")
        blood_groups = form_data.get("age-group[]")
        genotypes = form_data.get("genotype[]")
        histories = form_data.get("medical-history[]")
        weight_statuses = form_data.get("weight-status[]")

        age_filter = Q(age_group__in=age_groups) if age_groups else Q()
        blood_group_filter = Q(
            blood_group__in=blood_groups) if blood_groups else Q()
        genotype_filter = Q(genotype__in=genotypes) if genotypes else Q()
        history_filter = reduce(
            operator.or_, (Q(histories__diagnosis__icontains=history)
                           | Q(histories__description__icontains=history)
                           for history in histories)) if histories else Q()
        weight_status_filter = Q(
            weight_status__in=weight_statuses) if weight_statuses else Q()

        current = date.today()

        min_date1 = date(current.year - 4, current.month, current.day)
        max_date1 = date(current.year, current.month, current.day)

        min_date2 = date(current.year - 12, current.month, current.day)
        max_date2 = date(current.year - 4, current.month, current.day)

        min_date3 = date(current.year - 19, current.month, current.day)
        max_date3 = date(current.year - 13, current.month, current.day)

        min_date4 = date(current.year - 35, current.month, current.day)
        max_date4 = date(current.year - 20, current.month, current.day)

        min_date5 = date(current.year - 50, current.month, current.day)
        max_date5 = date(current.year - 36, current.month, current.day)

        min_date6 = date(current.year - 70, current.month, current.day)
        max_date6 = date(current.year - 51, current.month, current.day)

        age_group_annotate = Case(When(dob__gte=min_date1,
                                       dob__lte=max_date1,
                                       then=Value("babies")),
                                  When(dob__gte=min_date2,
                                       dob__lte=max_date2,
                                       then=Value("childre")),
                                  When(dob__gte=min_date3,
                                       dob__lte=max_date3,
                                       then=Value("teenagers")),
                                  When(dob__gte=min_date4,
                                       dob__lte=max_date4,
                                       then=Value("youths")),
                                  When(dob__gte=min_date5,
                                       dob__lte=max_date5,
                                       then=Value("adults")),
                                  When(dob__gte=min_date6,
                                       dob__lte=max_date6,
                                       then=Value("elders")),
                                  default=Value("old"),
                                  output_field=CharField())

        weight_status_annotate = Case(
            When(bmi__lt=18.5, bmi__gte=0, then=Value("under-weight")),
            When(bmi__lt=25, bmi__gte=18.5, then=Value("normal-weight")),
            When(bmi__lt=30, bmi__gte=25, then=Value("overweight")),
            When(bmi__lt=35, bmi__gte=30, then=Value("obesity-class-1")),
            When(bmi__lt=40, bmi__gte=35, then=Value("obesity-class-2")),
            When(bmi__gte=40, then=Value("extreme-obesity-class-3")),
            default=None,
            output_field=CharField())

        bmi = F('weight') / (F("height") * F("height"))

        patients = Patient.objects.annotate(
            age_group=age_group_annotate,
            bmi=bmi).annotate(weight_status=weight_status_annotate)

        patients = patients.filter(age_filter | blood_group_filter
                                   | genotype_filter | history_filter
                                   | weight_status_filter).distinct()

        serializer = PatientSerializer(patients, many=True)
        return Response({"patients": serializer.data})
Exemple #13
0
def get_products_data(
    queryset: "QuerySet",
    export_fields: Set[str],
    attribute_ids: Optional[List[int]],
    warehouse_ids: Optional[List[int]],
    channel_ids: Optional[List[int]],
) -> List[Dict[str, Union[str, bool]]]:
    """Create data list of products and their variants with fields values.

    It return list with product and variant data which can be used as import to
    csv writer and list of attribute and warehouse headers.
    """

    products_with_variants_data = []

    product_fields = set(
        ProductExportFields.HEADERS_TO_FIELDS_MAPPING["fields"].values()
    )
    product_export_fields = export_fields & product_fields
    product_export_fields.add("variants__id")

    products_data = (
        queryset.annotate(
            product_weight=Case(
                When(weight__isnull=False, then=Concat("weight", V(" g"))),
                default=V(""),
                output_field=CharField(),
            ),
            variant_weight=Case(
                When(
                    variants__weight__isnull=False,
                    then=Concat("variants__weight", V(" g")),
                ),
                default=V(""),
                output_field=CharField(),
            ),
        )
        .order_by("pk", "variants__pk")
        .values(*product_export_fields)
        .distinct("pk", "variants__pk")
    )

    products_relations_data = get_products_relations_data(
        queryset, export_fields, attribute_ids, channel_ids
    )

    variants_relations_data = get_variants_relations_data(
        queryset, export_fields, attribute_ids, warehouse_ids, channel_ids
    )

    for product_data in products_data:
        pk = product_data["id"]
        variant_pk = product_data.pop("variants__id")

        product_relations_data: Dict[str, str] = products_relations_data.get(pk, {})
        variant_relations_data: Dict[str, str] = variants_relations_data.get(
            variant_pk, {}
        )

        data = {**product_data, **product_relations_data, **variant_relations_data}

        products_with_variants_data.append(data)

    return products_with_variants_data
Exemple #14
0
 def with_likes(self):
     return self.annotate(likes__count=Sum(
         Case(When(bloglikes__flag='like', then=1),
              default=0,
              output_field=IntegerField())))
Exemple #15
0
    def get(self, request, organization):
        """
        List saved queries for organization
        """
        if not self.has_feature(organization, request):
            return self.respond(status=404)

        queryset = (DiscoverSavedQuery.objects.filter(
            organization=organization).select_related(
                "created_by").prefetch_related("projects").extra(
                    select={"lower_name": "lower(name)"}))
        query = request.query_params.get("query")
        if query:
            tokens = tokenize_query(query)
            for key, value in tokens.items():
                if key == "name" or key == "query":
                    value = " ".join(value)
                    queryset = queryset.filter(name__icontains=value)
                elif key == "version":
                    value = " ".join(value)
                    queryset = queryset.filter(version=value)
                else:
                    queryset = queryset.none()

        sort_by = request.query_params.get("sortBy")
        if sort_by in ("name", "-name"):
            order_by = [
                "-lower_name" if sort_by.startswith("-") else "lower_name",
                "-date_created",
            ]
        elif sort_by in ("dateCreated", "-dateCreated"):
            order_by = "-date_created" if sort_by.startswith(
                "-") else "date_created"
        elif sort_by in ("dateUpdated", "-dateUpdated"):
            order_by = "-date_updated" if sort_by.startswith(
                "-") else "date_updated"
        elif sort_by == "myqueries":
            order_by = [
                Case(When(created_by_id=request.user.id, then=-1),
                     default="created_by_id"),
                "-date_created",
            ]
        else:
            order_by = "lower_name"
        if not isinstance(order_by, list):
            order_by = [order_by]
        queryset = queryset.order_by(*order_by)

        # Old discover expects all queries and uses this parameter.
        if request.query_params.get("all") == "1":
            saved_queries = list(queryset.all())
            return Response(serialize(saved_queries), status=200)

        def data_fn(offset, limit):
            return list(queryset[offset:offset + limit])

        return self.paginate(
            request=request,
            paginator=GenericOffsetPaginator(data_fn=data_fn),
            on_results=lambda x: serialize(x, request.user),
            default_per_page=25,
        )
Exemple #16
0
    def filter(self, qs, value):
        # инициализируем подключение
        client = Elasticsearch([settings.ELASTICSEARCH_HOST])
        value = value.lower()

        # формируем запрос
        search_query = {
            "bool": {
                "must_not": [  # исключает из выдачи is_published=False
                    {
                        "term": {
                            "is_published": False
                        }
                    }
                ],
                "should": [
                    {
                        "simple_query_string": {  # ищем что-то разумное
                            "fields": ["fullname", "category_name"],
                            "quote_field_suffix": ".exact",
                            "query": value
                        }
                    },
                    {
                        # частичное вхождение по строкам с транслитом (англ->рус)
                        # constant_score запрещает буст по частоте вхождения
                        "constant_score": {
                            "filter": {
                                "match": {
                                    "fullname_translit": {
                                        "query": value,
                                        "fuzziness": 1,
                                        "operator": "and",
                                    }
                                }
                            }
                        }
                    },
                ]
            }
        }

        # Инициализация запроса
        s = Search(using=client, index='offer') \
            .query(search_query)\
            .sort("_score", "-views")\
            .extra(size=self.max_result, from_=0)

        self.hits_list = []
        items = s.execute()
        if items:
            for item in items:
                self.hits_list.append(item.meta.id)
            # нужно для того, чтобы у выборки из пусгреса сохранился порядок, который вернул эластик
            self.hits_order = Case(*[
                When(pk=pk, then=pos) for pos, pk in enumerate(self.hits_list)
            ])
            qs = qs.filter(id__in=self.hits_list).order_by(self.hits_order)
        else:
            qs = qs.none()

        # TODO: старая реализация. Может, оставить, как fallback?
        # else:
        #     qs = qs.annotate(full_name=Concat(
        #         'product__article', Value(' '),
        #         'product__title', Value(' '),
        #         'product__search_title'))
        #     bits = value.split(' ')
        #     if len(bits) is 1 and bits[0].isdecimal():
        #         full_name_clauses = Q(full_name__icontains=bits[0])
        #     else:
        #         full_name_clauses = reduce(
        #             operator.and_,
        #             [Q(full_name__iregex=r'(^|\s)%s' % escape(v))
        #              for v in bits])
        #
        #     unpublished = Category.objects.get_queryset_descendants(
        #         Category.objects.filter(is_published=False),
        #         include_self=True)
        #
        #     qs = (qs.filter(full_name_clauses)
        #             .exclude(product__category__in=unpublished))
        #
        #     if self.uniq_category:
        #         products = (qs.order_by('product__category__title')
        #                       .distinct('product__category__title'))
        #         qs = (qs.filter(id__in=products)
        #                 .order_by('-product__category__views'))

        return qs
Exemple #17
0
    def sort_by_attribute(self,
                          attribute_pk: Union[int, str],
                          ascending: bool = True):
        """Sort a query set by the values of the given product attribute.

        :param attribute_pk: The database ID (must be a number) of the attribute
                             to sort by.
        :param ascending: The sorting direction.
        """
        qs: models.QuerySet = self

        # Retrieve all the products' attribute data IDs (assignments) and
        # product types that have the given attribute associated to them
        associated_values = tuple(
            AttributeProduct.objects.filter(
                attribute_id=attribute_pk).values_list("pk",
                                                       "product_type_id"))

        if not associated_values:
            if not ascending:
                return qs.reverse()
            return qs

        attribute_associations, product_types_associated_to_attribute = zip(
            *associated_values)

        qs = qs.annotate(
            # Contains to retrieve the attribute data (singular) of each product
            # Refer to `AttributeProduct`.
            filtered_attribute=FilteredRelation(
                relation_name="attributes",
                condition=Q(
                    attributes__assignment_id__in=attribute_associations),
            ),
            # Implicit `GROUP BY` required for the `StringAgg` aggregation
            grouped_ids=Count("id"),
            # String aggregation of the attribute's values to efficiently sort them
            concatenated_values=Case(
                # If the product has no association data but has the given attribute
                # associated to its product type, then consider the concatenated values
                # as empty (non-null).
                When(
                    Q(product_type_id__in=product_types_associated_to_attribute
                      )
                    & Q(filtered_attribute=None),
                    then=models.Value(""),
                ),
                default=StringAgg(
                    F("filtered_attribute__values__name"),
                    delimiter=",",
                    ordering=([
                        f"filtered_attribute__values__{field_name}"
                        for field_name in AttributeValue._meta.ordering or []
                    ]),
                ),
                output_field=models.CharField(),
            ),
            concatenated_values_order=Case(
                # Make the products having no such attribute be last in the sorting
                When(concatenated_values=None, then=2),
                # Put the products having an empty attribute value at the bottom of
                # the other products.
                When(concatenated_values="", then=1),
                # Put the products having an attribute value to be always at the top
                default=0,
                output_field=models.IntegerField(),
            ),
        )

        # Sort by concatenated_values_order then
        # Sort each group of products (0, 1, 2, ...) per attribute values
        # Sort each group of products by name,
        # if they have the same values or not values
        qs = qs.order_by("concatenated_values_order", "concatenated_values",
                         "name")

        # Descending sorting
        if not ascending:
            return qs.reverse()
        return qs
Exemple #18
0
def conditional_sum(value=1, **cond):
    """Wrapper to generate SUM on boolean/enum values"""
    return Sum(
        Case(When(then=value, **cond), default=0, output_field=IntegerField()))
Exemple #19
0
    def get_all_activity_over_time(self, start, end):
        events_qs = Event.objects.filter(start__gte=start, start__lte=end)
        swc_tag = Tag.objects.get(name='SWC')
        dc_tag = Tag.objects.get(name='DC')
        wise_tag = Tag.objects.get(name='WiSE')
        TTT_tag = Tag.objects.get(name='TTT')
        self_organized_host = Organization.objects.get(domain='self-organized')

        # count workshops: SWC, DC, total (SWC and/or DC), self-organized,
        # WiSE, TTT
        swc_workshops = events_qs.filter(tags=swc_tag)
        dc_workshops = events_qs.filter(tags=dc_tag)
        swc_dc_workshops = events_qs.filter(tags__in=[swc_tag, dc_tag]).count()
        wise_workshops = events_qs.filter(tags=wise_tag).count()
        ttt_workshops = events_qs.filter(tags=TTT_tag).count()
        self_organized_workshops = events_qs \
            .filter(administrator=self_organized_host).count()

        # total and unique instructors for both SWC and DC workshops
        swc_total_instr = Person.objects \
            .filter(task__event__in=swc_workshops,
                    task__role__name='instructor')
        swc_unique_instr = swc_total_instr.distinct().count()
        swc_total_instr = swc_total_instr.count()
        dc_total_instr = Person.objects \
            .filter(task__event__in=dc_workshops,
                    task__role__name='instructor')
        dc_unique_instr = dc_total_instr.distinct().count()
        dc_total_instr = dc_total_instr.count()

        # total learners for both SWC and DC workshops
        swc_total_learners = swc_workshops.aggregate(count=Sum('attendance'))
        swc_total_learners = swc_total_learners['count']
        dc_total_learners = dc_workshops.aggregate(count=Sum('attendance'))
        dc_total_learners = dc_total_learners['count']

        # Workshops missing any of this data.
        missing_attendance = events_qs.filter(attendance=None) \
                                      .values_list('slug', flat=True)
        missing_instructors = events_qs.annotate(instructors=Sum(
            Case(When(task__role__name='instructor', then=Value(1)),
                 default=Value(0),
                 output_field=IntegerField()))).filter(
                     instructors=0).values_list('slug', flat=True)

        return {
            'start': start,
            'end': end,
            'workshops': {
                'SWC': swc_workshops.count(),
                'DC': dc_workshops.count(),
                # This dictionary is traversed in a template where we cannot
                # write "{{ data.workshops.SWC,DC }}", because commas are
                # disallowed in templates. Therefore, we include
                # swc_dc_workshops twice, under two different keys:
                # - 'SWC,DC' - for backward compatibility,
                # - 'SWC_or_DC' - so that you can access it in a template.
                'SWC,DC': swc_dc_workshops,
                'SWC_or_DC': swc_dc_workshops,
                'WiSE': wise_workshops,
                'TTT': ttt_workshops,
                # We include self_organized_workshops twice, under two
                # different keys, for the same reason as swc_dc_workshops.
                'self-organized': self_organized_workshops,
                'self_organized': self_organized_workshops,
            },
            'instructors': {
                'SWC': {
                    'total': swc_total_instr,
                    'unique': swc_unique_instr,
                },
                'DC': {
                    'total': dc_total_instr,
                    'unique': dc_unique_instr,
                },
            },
            'learners': {
                'SWC': swc_total_learners,
                'DC': dc_total_learners,
            },
            'missing': {
                'attendance': missing_attendance,
                'instructors': missing_instructors,
            }
        }
Exemple #20
0
def num_answered(examlog):
    return (examlog.attemptlogs.values_list(
        "item",
        "content_id").order_by("completion_timestamp").distinct().aggregate(
            complete__sum=Count(Case(When(complete=True, then=1),
                                     default=0))).get("complete__sum"))
Exemple #21
0
    def for_table(self, user):
        activities = self.model.activities.rel.model
        comments = activities.comments.filter(submission=OuterRef('id')).visible_to(user)
        roles_for_review = self.model.assigned.field.model.objects.with_roles().filter(
            submission=OuterRef('id'), reviewer=user)

        review_model = self.model.reviews.field.model
        reviews = review_model.objects.filter(submission=OuterRef('id'))
        opinions = review_model.opinions.field.model.objects.filter(review__submission=OuterRef('id'))
        reviewers = self.model.assigned.field.model.objects.filter(submission=OuterRef('id'))

        return self.with_latest_update().annotate(
            comment_count=Coalesce(
                Subquery(
                    comments.values('submission').order_by().annotate(count=Count('pk')).values('count'),
                    output_field=IntegerField(),
                ),
                0,
            ),
            opinion_disagree=Subquery(
                opinions.filter(opinion=DISAGREE).values(
                    'review__submission'
                ).annotate(count=Count('*')).values('count')[:1],
                output_field=IntegerField(),
            ),
            review_staff_count=Subquery(
                reviewers.staff().values('submission').annotate(count=Count('pk')).values('count'),
                output_field=IntegerField(),
            ),
            review_count=Subquery(
                reviewers.values('submission').annotate(count=Count('pk')).values('count'),
                output_field=IntegerField(),
            ),
            review_submitted_count=Subquery(
                reviewers.reviewed().values('submission').annotate(
                    count=Count('pk', distinct=True)
                ).values('count'),
                output_field=IntegerField(),
            ),
            review_recommendation=Case(
                When(opinion_disagree__gt=0, then=MAYBE),
                default=Subquery(
                    reviews.submitted().values('submission').annotate(
                        calc_recommendation=Sum('recommendation') / Count('recommendation'),
                    ).values('calc_recommendation'),
                    output_field=IntegerField(),
                )
            ),
            role_icon=Subquery(roles_for_review[:1].values('role__icon')),
        ).prefetch_related(
            Prefetch(
                'assigned',
                queryset=AssignedReviewers.objects.reviewed().review_order().select_related(
                    'reviewer',
                ).prefetch_related(
                    Prefetch('review__opinions', queryset=ReviewOpinion.objects.select_related('author')),
                ),
                to_attr='has_reviewed'
            ),
            Prefetch(
                'assigned',
                queryset=AssignedReviewers.objects.not_reviewed().staff(),
                to_attr='hasnt_reviewed'
            )
        ).select_related(
            'page',
            'round',
            'lead',
            'user',
            'screening_status',
            'previous__page',
            'previous__round',
            'previous__lead',
        )
Exemple #22
0
def testing_annotations():
    cond1 = Conducente(nome="Luke Skywalker", nick="Luke", attivo=True)
    cond1.save()
    cond2 = Conducente(nome="Anakin Skywalker", nick=None, attivo=True)
    cond2.save()

    lrif = Luogo(nome='Home')
    lrif.save()

    Viaggio(annullato=True,
            data=date_enforce(datetime.date(2016, 1, 11)),
            da=lrif, a=lrif, luogoDiRiferimento=lrif,
            prezzo=1000,
            conducente=cond1, conducente_confermato=True,
            ).save()

    Viaggio(data=date_enforce(datetime.date(2016, 1, 11)),
            da=lrif, a=lrif, luogoDiRiferimento=lrif,
            prezzo=10,
            conducente=cond1, conducente_confermato=True,
            ).save()

    Viaggio(data=date_enforce(datetime.date(2016, 1, 12)),
            da=lrif, a=lrif, luogoDiRiferimento=lrif,
            prezzo=10,
            conducente=cond1, conducente_confermato=True,
            ).save()

    Viaggio(data=date_enforce(datetime.date(2016, 1, 12)),
            da=lrif, a=lrif, luogoDiRiferimento=lrif,
            prezzo=30,
            conducente=cond2, conducente_confermato=True,
            ).save()

    Viaggio(data=date_enforce(datetime.date(2016, 2, 12)),
            da=lrif, a=lrif, luogoDiRiferimento=lrif,
            prezzo=10,
            conducente=cond1, conducente_confermato=True,
            ).save()

    # Testing the query
    qs = Viaggio.objects.filter(annullato=False,
                                data__gte=date_enforce(datetime.date(2016, 1, 11)),
                                data__lt=date_enforce(datetime.date(2016, 2, 14)))
    qs = (qs
          .order_by()
          .annotate(year=Extract('data', lookup_name='year'),
                    month=Extract('data', lookup_name='month'))
          .order_by('conducente')
          .values('year', 'month')
          .annotate(tot=Sum('prezzo'),
                    commissione=Sum(
                        Case(When(tipo_commissione='F', then=F('commissione')),
                             When(tipo_commissione='P',
                                  then=F('commissione') * F('prezzo') / Value(100)),
                             ),
                        output_field=DecimalField(max_digits=9, decimal_places=2, default=0),

                    ),
                    conducente__nome=F('conducente__nome')
                    ).order_by('conducente__nome')
          )
    # print(qs.query)
    qs = list(qs)
    pprint(list(qs))
    assert len(qs) == 3, "We should have 3 rows: Luke with 2 months, Ana with 1"

    luke_runs = list(filter(lambda x: x['conducente__nome'] == 'Luke Skywalker', qs))
    assert len(luke_runs) == 2
    assert luke_runs[0]['month'] == 1 and luke_runs[0]['tot'] == 20
    assert luke_runs[1]['month'] == 2 and luke_runs[1]['tot'] == 10

    anakin_runs = list(filter(lambda x: x['conducente__nome'] == 'Anakin Skywalker', qs))
    assert len(anakin_runs) == 1
    assert anakin_runs[0]['month'] == 1 and anakin_runs[0]['tot'] == 30
 def with_entry_count(self):
     return self.annotate(entries_count=Count(
         Case(When(entries__is_published=True, then=1))))
Exemple #24
0
def metrics(request, mtype):
    template = 'dojo/metrics.html'
    page_name = 'Product Type Metrics'
    show_pt_filter = True

    findings = Finding.objects.filter(
        verified=True,
        severity__in=('Critical', 'High', 'Medium', 'Low', 'Info')
    ).prefetch_related(
        'test__engagement__product', 'test__engagement__product__prod_type',
        'test__engagement__risk_acceptance', 'risk_acceptance_set', 'reporter'
    ).extra(select={
        'ra_count':
        'SELECT COUNT(*) FROM dojo_risk_acceptance INNER JOIN '
        'dojo_risk_acceptance_accepted_findings ON '
        '( dojo_risk_acceptance.id = dojo_risk_acceptance_accepted_findings.risk_acceptance_id ) '
        'WHERE dojo_risk_acceptance_accepted_findings.finding_id = dojo_finding.id',
    }, )
    active_findings = Finding.objects.filter(
        verified=True,
        active=True,
        severity__in=('Critical', 'High', 'Medium', 'Low', 'Info')
    ).prefetch_related(
        'test__engagement__product', 'test__engagement__product__prod_type',
        'test__engagement__risk_acceptance', 'risk_acceptance_set', 'reporter'
    ).extra(select={
        'ra_count':
        'SELECT COUNT(*) FROM dojo_risk_acceptance INNER JOIN '
        'dojo_risk_acceptance_accepted_findings ON '
        '( dojo_risk_acceptance.id = dojo_risk_acceptance_accepted_findings.risk_acceptance_id ) '
        'WHERE dojo_risk_acceptance_accepted_findings.finding_id = dojo_finding.id',
    }, )

    if mtype != 'All':
        pt = Product_Type.objects.filter(id=mtype)
        request.GET._mutable = True
        request.GET.appendlist('test__engagement__product__prod_type', mtype)
        request.GET._mutable = False
        mtype = pt[0].name
        show_pt_filter = False
        page_name = '%s Metrics' % mtype
        prod_type = pt
    elif 'test__engagement__product__prod_type' in request.GET:
        prod_type = Product_Type.objects.filter(id__in=request.GET.getlist(
            'test__engagement__product__prod_type', []))
    else:
        prod_type = Product_Type.objects.all()
    findings = MetricsFindingFilter(request.GET, queryset=findings)
    active_findings = MetricsFindingFilter(request.GET,
                                           queryset=active_findings)

    findings.qs  # this is needed to load details from filter since it is lazy
    active_findings.qs  # this is needed to load details from filter since it is lazy

    start_date = findings.filters['date'].start_date
    start_date = datetime(start_date.year,
                          start_date.month,
                          start_date.day,
                          tzinfo=timezone.get_current_timezone())
    end_date = findings.filters['date'].end_date
    end_date = datetime(end_date.year,
                        end_date.month,
                        end_date.day,
                        tzinfo=timezone.get_current_timezone())

    if len(prod_type) > 0:
        findings_closed = Finding.objects.filter(
            mitigated__range=[start_date, end_date],
            test__engagement__product__prod_type__in=prod_type
        ).prefetch_related('test__engagement__product')
        # capture the accepted findings in period
        accepted_findings = Finding.objects.filter(risk_acceptance__created__range=[start_date, end_date],
                                                   test__engagement__product__prod_type__in=prod_type). \
            prefetch_related('test__engagement__product')
        accepted_findings_counts = Finding.objects.filter(risk_acceptance__created__range=[start_date, end_date],
                                                          test__engagement__product__prod_type__in=prod_type). \
            prefetch_related('test__engagement__product').aggregate(
            total=Sum(
                Case(When(severity__in=('Critical', 'High', 'Medium', 'Low'),
                          then=Value(1)),
                     output_field=IntegerField())),
            critical=Sum(
                Case(When(severity='Critical',
                          then=Value(1)),
                     output_field=IntegerField())),
            high=Sum(
                Case(When(severity='High',
                          then=Value(1)),
                     output_field=IntegerField())),
            medium=Sum(
                Case(When(severity='Medium',
                          then=Value(1)),
                     output_field=IntegerField())),
            low=Sum(
                Case(When(severity='Low',
                          then=Value(1)),
                     output_field=IntegerField())),
            info=Sum(
                Case(When(severity='Info',
                          then=Value(1)),
                     output_field=IntegerField())),
        )
    else:
        findings_closed = Finding.objects.filter(
            mitigated__range=[start_date, end_date]).prefetch_related(
                'test__engagement__product')
        accepted_findings = Finding.objects.filter(risk_acceptance__created__range=[start_date, end_date]). \
            prefetch_related('test__engagement__product')
        accepted_findings_counts = Finding.objects.filter(risk_acceptance__created__range=[start_date, end_date]). \
            prefetch_related('test__engagement__product').aggregate(
            total=Sum(
                Case(When(severity__in=('Critical', 'High', 'Medium', 'Low'),
                          then=Value(1)),
                     output_field=IntegerField())),
            critical=Sum(
                Case(When(severity='Critical',
                          then=Value(1)),
                     output_field=IntegerField())),
            high=Sum(
                Case(When(severity='High',
                          then=Value(1)),
                     output_field=IntegerField())),
            medium=Sum(
                Case(When(severity='Medium',
                          then=Value(1)),
                     output_field=IntegerField())),
            low=Sum(
                Case(When(severity='Low',
                          then=Value(1)),
                     output_field=IntegerField())),
            info=Sum(
                Case(When(severity='Info',
                          then=Value(1)),
                     output_field=IntegerField())),
        )

    r = relativedelta(end_date, start_date)
    months_between = (r.years * 12) + r.months
    # include current month
    months_between += 1

    weeks_between = int(
        ceil((((r.years * 12) + r.months) * 4.33) + (r.days / 7)))
    if weeks_between <= 0:
        weeks_between += 2

    monthly_counts = get_period_counts(active_findings.qs,
                                       findings.qs,
                                       findings_closed,
                                       accepted_findings,
                                       months_between,
                                       start_date,
                                       relative_delta='months')
    weekly_counts = get_period_counts(active_findings.qs,
                                      findings.qs,
                                      findings_closed,
                                      accepted_findings,
                                      weeks_between,
                                      start_date,
                                      relative_delta='weeks')

    top_ten = Product.objects.filter(
        engagement__test__finding__verified=True,
        engagement__test__finding__false_p=False,
        engagement__test__finding__duplicate=False,
        engagement__test__finding__out_of_scope=False,
        engagement__test__finding__mitigated__isnull=True,
        engagement__test__finding__severity__in=('Critical', 'High', 'Medium',
                                                 'Low'),
        prod_type__in=prod_type).annotate(
            critical=Sum(
                Case(When(engagement__test__finding__severity='Critical',
                          then=Value(1)),
                     output_field=IntegerField())),
            high=Sum(
                Case(When(engagement__test__finding__severity='High',
                          then=Value(1)),
                     output_field=IntegerField())),
            medium=Sum(
                Case(When(engagement__test__finding__severity='Medium',
                          then=Value(1)),
                     output_field=IntegerField())),
            low=Sum(
                Case(When(engagement__test__finding__severity='Low',
                          then=Value(1)),
                     output_field=IntegerField())),
            total=Sum(
                Case(When(engagement__test__finding__severity__in=('Critical',
                                                                   'High',
                                                                   'Medium',
                                                                   'Low'),
                          then=Value(1)),
                     output_field=IntegerField()))).order_by(
                         '-critical', '-high', '-medium', '-low')[:10]

    age_detail = [0, 0, 0, 0]

    in_period_counts = {
        "Critical": 0,
        "High": 0,
        "Medium": 0,
        "Low": 0,
        "Info": 0,
        "Total": 0
    }
    in_period_details = {}

    closed_in_period_counts = {
        "Critical": 0,
        "High": 0,
        "Medium": 0,
        "Low": 0,
        "Info": 0,
        "Total": 0
    }
    closed_in_period_details = {}

    accepted_in_period_details = {}

    for finding in findings.qs:
        if 0 <= finding.age <= 30:
            age_detail[0] += 1
        elif 30 < finding.age <= 60:
            age_detail[1] += 1
        elif 60 < finding.age <= 90:
            age_detail[2] += 1
        elif finding.age > 90:
            age_detail[3] += 1

        in_period_counts[finding.severity] += 1
        in_period_counts['Total'] += 1

        if finding.test.engagement.product.name not in in_period_details:
            in_period_details[finding.test.engagement.product.name] = {
                'path':
                reverse('view_product_findings',
                        args=(finding.test.engagement.product.id, )),
                'Critical':
                0,
                'High':
                0,
                'Medium':
                0,
                'Low':
                0,
                'Info':
                0,
                'Total':
                0
            }
        in_period_details[finding.test.engagement.product.name][
            finding.severity] += 1
        in_period_details[finding.test.engagement.product.name]['Total'] += 1

    for finding in accepted_findings:
        if finding.test.engagement.product.name not in accepted_in_period_details:
            accepted_in_period_details[
                finding.test.engagement.product.name] = {
                    'path':
                    reverse('accepted_findings') +
                    '?test__engagement__product=' +
                    str(finding.test.engagement.product.id),
                    'Critical':
                    0,
                    'High':
                    0,
                    'Medium':
                    0,
                    'Low':
                    0,
                    'Info':
                    0,
                    'Total':
                    0
                }
        accepted_in_period_details[finding.test.engagement.product.name][
            finding.severity] += 1
        accepted_in_period_details[
            finding.test.engagement.product.name]['Total'] += 1

    for f in findings_closed:
        closed_in_period_counts[f.severity] += 1
        closed_in_period_counts['Total'] += 1

        if f.test.engagement.product.name not in closed_in_period_details:
            closed_in_period_details[f.test.engagement.product.name] = {
                'path':
                reverse('closed_findings') + '?test__engagement__product=' +
                str(f.test.engagement.product.id),
                'Critical':
                0,
                'High':
                0,
                'Medium':
                0,
                'Low':
                0,
                'Info':
                0,
                'Total':
                0
            }
        closed_in_period_details[f.test.engagement.product.name][
            f.severity] += 1
        closed_in_period_details[f.test.engagement.product.name]['Total'] += 1

    punchcard = list()
    ticks = list()
    highest_count = 0

    if 'view' in request.GET and 'dashboard' == request.GET['view']:
        punchcard, ticks, highest_count = get_punchcard_data(
            findings.qs, weeks_between, start_date)
        page_name = (get_system_setting('team_name')) + " Metrics"
        template = 'dojo/dashboard-metrics.html'

    add_breadcrumb(title=page_name,
                   top_level=not len(request.GET),
                   request=request)

    return render(
        request, template, {
            'name': page_name,
            'start_date': start_date,
            'end_date': end_date,
            'findings': findings,
            'opened_per_month': monthly_counts['opened_per_period'],
            'active_per_month': monthly_counts['active_per_period'],
            'opened_per_week': weekly_counts['opened_per_period'],
            'accepted_per_month': monthly_counts['accepted_per_period'],
            'accepted_per_week': weekly_counts['accepted_per_period'],
            'top_ten_products': top_ten,
            'age_detail': age_detail,
            'in_period_counts': in_period_counts,
            'in_period_details': in_period_details,
            'accepted_in_period_counts': accepted_findings_counts,
            'accepted_in_period_details': accepted_in_period_details,
            'closed_in_period_counts': closed_in_period_counts,
            'closed_in_period_details': closed_in_period_details,
            'punchcard': punchcard,
            'ticks': ticks,
            'highest_count': highest_count,
            'show_pt_filter': show_pt_filter,
        })
 def with_last_modified(self):
     return self.annotate(modified=Max(
         Case(When(entries__is_published=True, then='entries__created')))
                          ).exclude(modified=None).order_by('-modified')
Exemple #26
0
def analisis(request):
    data = {}
    check = Personal.objects.get(usuario_personal=request.user)

    respuesta = Choice.objects.filter(usuario_choice__company=check.company)

    print(respuesta)
    if Choice.objects.filter(
            usuario_choice__usuario_personal=request.user).exists() == True:

        respuesta2 = Question.objects.all().values('type').annotate(
            total=Count("choice", filter=Q(choice__company=check.company)),
            total_true=Sum(
                Case(When(choice__company=check.company,
                          choice__answer=True,
                          then=1.0),
                     default=0,
                     output_field=FloatField())),
            total_false=Sum(
                Case(When(choice__company=check.company,
                          choice__answer=False,
                          then=1.0),
                     default=0,
                     output_field=FloatField())))

        csv = open("Reporte.csv", "w")
        csv.write(
            "Dominio; Total verdaderas; Total Falsas; Porcentaje Cumplimiento; Porcentaje no cumplimiento; Recomendaciones\n"
        )
        for x in respuesta2:
            print(x)
            lista = Analisis()
            lista.type = x['type']
            csv.write(str(x['type']) + ";")

            lista.total_t = x['total_true']
            csv.write(str(x['total_true']) + ";")

            lista.total_f = x['total_false']
            csv.write(str(x['total_false']) + ";")
            lista.percentaje_t = (x['total_true']) / x['total'] * 100
            lista.percentaje_t = int(lista.percentaje_t)
            csv.write(str(int(lista.percentaje_t)) + ";")

            lista.percentaje_f = (x['total_false']) / x['total'] * 100
            lista.percentaje_f = int(lista.percentaje_f)
            csv.write(str(int(lista.percentaje_f)) + ";")
            lista.save()
            if lista.percentaje_t < 50 and lista.percentaje_t >= 0 and lista.type == "D1":
                csv.write(
                    "Se recomienda la realización de una política de seguridad informática, que debe contener los conceptos de seguridad de la información, una estructura para establecer los objetivos y las formas de control y el compromiso de la dirección con políticas.\n"
                )
            if lista.percentaje_t < 50 and lista.percentaje_t >= 0 and lista.type == "D2":
                csv.write(
                    "Se recomienda establecer una estructura para implementar la seguridad de la información en una empresa y de esta manera gestionarla de manera adecuada. Para ello, las actividades de seguridad de la información deben ser coordinadas por representantes de la organización que deben tener responsabilidad bien definidas y proteger las informaciones de carácter confidencial.\n"
                )
            if lista.percentaje_t < 50 and lista.percentaje_t >= 0 and lista.type == "D3":
                csv.write(
                    "Se recomienda identificar y clasificar los activos, de modo que un inventario pueda ser estructurado y posteriormente mantenido. Además, deben seguir reglas documentadas, que definen que tipo de uso se permite con dichos activos.\n"
                )
            if lista.percentaje_t < 50 and lista.percentaje_t >= 0 and lista.type == "D4":
                csv.write(
                    "Se recomienda la revisión de seguridad de acuerdo con políticas y procedimiento establecidos por la organización o por otra parte que los empleados de la organización reciban entrenamiento adecuado de seguridad correspondiente a sus funciones.\n"
                )
            if lista.percentaje_t < 50 and lista.percentaje_t >= 0 and lista.type == "D5":
                csv.write(
                    "Se recomienda que los equipos y instalaciones de procesamiento de información critica o sensible deben mantenerse en áreas seguras, con niveles y controles de acceso apropiados, incluyendo protección contra amenazas físicas y ambientales.\n"
                )
            if lista.percentaje_t < 50 and lista.percentaje_t >= 0 and lista.type == "D6":
                csv.write(
                    "Se recomienda que los procedimientos y responsabilidades por la gestión y operación de todos los recursos de procesamiento de la información estén definidos. Esto incluye la gestión de servicio tercerizados, la planificación de recurso del sistema para minimizar el riesgo de fallas, la creación de procedimientos para la generación de copias de seguridad y su recuperación.\n"
                )
            if lista.percentaje_t < 50 and lista.percentaje_t >= 0 and lista.type == "D7":
                csv.write(
                    "Se recomienda que los recursos de procesamiento de la información y los procesos de negocios deben ser controlado con base en los requisitos de negocio y en la seguridad de la información. Debe garantizarse el acceso de cada usuario autorizado y prevenido el acceso no autorizados a los sistema de información de manera que evite daños a documentos y recursos de procesamiento de la información que estén fuera de alcance de cualquiera.\n"
                )
            if lista.percentaje_t < 50 and lista.percentaje_t >= 0 and lista.type == "D8":
                csv.write(
                    "Se recomienda que los requisitos de seguridad de los sistema de información debe ser identificados y acordados antes de su desarrollo y/o de su implementación, para que así puedan ser protegidos para el mantenimiento de su confidencialidad, autenticidad o integridad por medio criptográficos.\n"
                )
            if lista.percentaje_t < 50 and lista.percentaje_t >= 0 and lista.type == "D9":
                csv.write(
                    "Se recomienda que los procedimientos formales de registro y escalonamiento deben ser establecidos y los empleados, proveedores y terceros deber ser conscientes de los procedimientos para notificar los eventos de seguridad de la información para asegurar que se comuniquen lo más rápido posible y corregidos en tiempo hábil.\n"
                )
            if lista.percentaje_t < 50 and lista.percentaje_t >= 0 and lista.type == "D10":
                csv.write(
                    "Se recomienda que los planes de continuidad del negocio deben ser desarrollados e implementados, con el fin de impedir la interrupción de las actividades del negocio y asegurar que las operaciones esenciales sean rápidamente recuperadas.\n"
                )
            if lista.percentaje_t < 50 and lista.percentaje_t >= 0 and lista.type == "D11":
                csv.write(
                    "Se recomienda realizar una revisión para evitar la violación de cualquier ley criminal o civil, garantizando estatutos, regulación u obligaciones contractuales y de cualesquiera requisitos de seguridad de la información. En caso necesario, la empresa puede contratar una consultoría especializada, para que verifique su cumplimiento y adherencia a los requisitos legales y reglamentarios.\n"
                )
            else:
                csv.write("\n")

        csv.close()

        data['object_list'] = Analisis.objects.all().order_by('-id')
        data['object_list'] = data['object_list'][0:11]
        data['object_list_2'] = data['object_list'][12:23]
    return render(request, "analisis.html", data)
Exemple #27
0
    def get_queryset(self):
        param = self.request.query_params
        request_id = param.get('request_id', None)
        # request_by = param.get('request_by', None)
        request_status = param.get('request_status', None)
        request_type = param.get('request_type', None)
        query_surname = param.get('surname', None)
        query_given_name = param.get('given_name', None)
        section = param.get('section', None)
        only_todo = param.get('only_todo', None)
        account_type = param.get('account_type', None)
        uam_id = param.get('uam_id', None)
        qs = super().get_queryset()

        if uam_id:
            qs = qs.filter(related_user__uam_id=uam_id)
        if request_type:
            if (request_type == 'CreateAccount'):
                qs = qs.instance_of(CreateAccountRequest)
            elif (request_type == 'UpdateAccount'):
                qs = qs.instance_of(UpdateAccountRequest)
            elif (request_type == 'DeleteAccount'):
                qs = qs.instance_of(DeleteAccountRequest)
            elif (request_type == 'ResetPassword'):
                qs = qs.instance_of(ResetPasswordRequest)
            elif (request_type == 'DisableAccount'):
                qs = qs.instance_of(DisableAccountRequest)
            elif (request_type == 'ReenableAccount'):
                qs = qs.instance_of(EnableAccountRequest)
        if only_todo:
            qs = qs.filter(self._get_todo_criteria())
        if request_id:
            qs = qs.filter(request_id__contains=request_id)
        if request_status:
            qs = qs.filter(status=request_status)
        if query_surname:
            name_list = query_surname.split()
            for name in name_list:
                qs = qs.filter(query_surname__contains=name)
        if query_given_name:
            name_list = query_given_name.split()
            for name in name_list:
                qs = qs.filter(query_given_name__contains=name)
        if section:
            qs = qs.filter(section=section)
        if account_type:
            qs = qs.annotate(account_type_out=Case(
                # When(instance_of=CreateAccountRequest, then=F('accountrequest__account_type')),
                # When(instance_of=UpdateAccountRequest, then=F('accountrequest__account_type')),
                When(instance_of=AccountRequest,
                     then=F('accountrequest__account_type')),
                default=F('related_user__account_type'),
                output_field=models.IntegerField()))
            # qs = qs.filter(related_user__account_type=account_type)
            qs = qs.filter(account_type_out=account_type)
        qs = qs.values(
            'id',
            'request_id',
            'query_surname',
            'query_given_name',
            'section__id',
            'section__code',
            'query_post_title',
            'submission_date',
            'query_request_type_desc',
            'query_status_desc',
            'creation_date',
            'last_modification_date',
        )
        return qs
Exemple #28
0
def getClassTypeMonthlyData(year=None, series=None, typeLimit=None):
    '''
    To break out by class type and month simultaneously, get data for each
    series and aggregate by class type.
    '''

    # If no year specified, report current year to date.
    if not year:
        year = timezone.now().year

    role_list = DanceRole.objects.distinct()

    # Report data on all students registered unless otherwise specified
    if series not in ['registrations','studenthours'] and series not in [x.pluralName for x in role_list]:
        series = 'registrations'

    when_all = {
        'eventregistration__dropIn': False,
        'eventregistration__cancelled': False,
    }

    annotations = {'registrations': Sum(Case(When(Q(**when_all),then=1),output_field=FloatField()))}

    for this_role in role_list:
        annotations[this_role.pluralName] = Sum(Case(When(Q(Q(**when_all) & Q(eventregistration__role=this_role)),then=1),output_field=FloatField()))

    series_counts = Series.objects.filter(year=year).annotate(**annotations).annotate(studenthours=F('duration') * F('registrations')).select_related('classDescription__danceTypeLevel__danceType','classDescription__danceTypeLevel')

    # If no limit specified on number of types, then do not aggregate dance types.
    # Otherwise, report the typeLimit most common types individually, and report all
    # others as other.  This gets tuples of names and counts
    dance_type_counts = [(dance_type,count) for dance_type,count in Counter([x.classDescription.danceTypeLevel for x in series_counts]).items()]
    dance_type_counts.sort(key=lambda k: k[1],reverse=True)

    if typeLimit:
        dance_types = [x[0] for x in dance_type_counts[:typeLimit]]
    else:
        dance_types = [x[0] for x in dance_type_counts]

    results = []

    # Month by month, calculate the result data
    for month in range(1,13):
        this_month_result = {
            'month': month,
            'month_name': month_name[month],
        }
        for dance_type in dance_types:
            this_month_result[dance_type.__str__()] = \
                series_counts.filter(classDescription__danceTypeLevel=dance_type,month=month).aggregate(Sum(series))['%s__sum' % series]

        if typeLimit:
            this_month_result['Other'] = \
                series_counts.filter(month=month).exclude(classDescription__danceTypeLevel__in=dance_types).aggregate(Sum(series))['%s__sum' % series]

        results.append(this_month_result)

    # Now get totals
    totals_result = {
        'month': 'Totals',
        'month_name': 'totals',
    }

    for dance_type in dance_types:
        totals_result[dance_type.__str__()] = \
            series_counts.filter(classDescription__danceTypeLevel=dance_type).aggregate(Sum(series))['%s__sum' % series]

    if typeLimit:
        totals_result['Other'] = \
            series_counts.exclude(classDescription__danceTypeLevel__in=dance_types).aggregate(Sum(series))['%s__sum' % series]

    results.append(totals_result)

    return results
Exemple #29
0
            'creator',
            'creatoremail',
            'creatorwebsite',
            'custom_country_filter',
            'key_code',
        ],
        'startrun':
        prize_run_fields,
        'endrun':
        prize_run_fields,
        'category': ['name'],
    },
}

EVENT_DONATION_AGGREGATE_FILTER = Case(
    When(EventAggregateFilter, then=F('donation__amount')),
    output_field=DecimalField(decimal_places=2),
)

annotations = {
    'event': {
        'amount': Coalesce(Sum(EVENT_DONATION_AGGREGATE_FILTER), 0),
        'count': Count(EVENT_DONATION_AGGREGATE_FILTER),
        'max': Coalesce(Max(EVENT_DONATION_AGGREGATE_FILTER), 0),
        'avg': Coalesce(Avg(EVENT_DONATION_AGGREGATE_FILTER), 0),
    },
    'prize': {
        'numwinners':
        Count(
            Case(When(PrizeWinnersFilter, then=1),
                 output_field=IntegerField())),
    },
Exemple #30
0
def getClassCountHistogramData(cohortStart=None,cohortEnd=None):

    # Note: Bins are inclusive, and 99999 means 'or more'.  That should last us awhile.
    bins = [
        (1,1),
        (2,2),
        (3,3),
        (4,4),
        (5,5),
        (6,6),
        (7,7),
        (8,8),
        (9,9),
        (10,15),
        (16,20),
        (21,99999)]

    when_all = {
        'eventregistration__dropIn': False,
        'eventregistration__cancelled':False,
    }

    cohortFilters = {}
    roleFilters = {}

    if cohortStart:
        cohortFilters['eventregistration__event__startTime__min__gte'] = cohortStart
        roleFilters['eventregistration__event__startTime__gte'] = cohortStart

    if cohortEnd:
        cohortFilters['eventregistration__event__startTime__min__lte'] = cohortEnd
        roleFilters['eventregistration__event__startTime__lte'] = cohortEnd

    role_list = DanceRole.objects.filter(**roleFilters).distinct()

    annotations = {
        'eventregistration__event__startTime__min': Min('eventregistration__event__startTime'),
        'registrations': Sum(Case(When(Q(**when_all),then=1),output_field=IntegerField())),
    }
    for this_role in role_list:
        annotations[this_role.pluralName] = Sum(Case(When(Q(Q(**when_all) & Q(eventregistration__role=this_role)),then=1),output_field=IntegerField()))

    customers = Customer.objects.annotate(**annotations).filter(**cohortFilters).distinct()

    totalCustomers = customers.filter(registrations__gt=0).count()
    totalClasses = [x.registrations for x in customers if x.registrations]
    totalClasses.sort()

    totalsByRole = {}

    for this_role in role_list:
        totalsByRole[this_role.pluralName] = {
            'customers': customers.filter(**{this_role.pluralName + '__gt': 0}).count(),
            'classes': [getattr(x,this_role.pluralName,None) for x in customers if getattr(x,this_role.pluralName,None)],
        }
        totalsByRole[this_role.pluralName]['classes'].sort()

    results = {}
    lastAll = 0
    lastByRole = {this_role.pluralName:0 for this_role in role_list}
    iByRole = {}

    for this_bin in bins:
        range_max = this_bin[1]

        if this_bin[0] == this_bin[1]:
            this_label = '%s' % this_bin[0]
        elif this_bin[1] == 99999:
            this_label = '%s or more' % this_bin[0]
        else:
            this_label = '%s-%s' % this_bin

        i_all = bisect(totalClasses,range_max,lastAll)
        iByRole = {
            this_role.pluralName:bisect(totalsByRole[this_role.pluralName]['classes'],range_max,lastByRole[this_role.pluralName])
            for this_role in role_list
        }

        # Note: These are not translated because the chart Javascript looks for these keys
        results.update({
            this_label:
            {
                '# Students': (i_all - lastAll),
                '% Students': 100 * (i_all - lastAll) / float(totalCustomers),
                'bin': this_bin,
            },
        })
        for this_role in role_list:
            results[this_label].update({
                '# ' + this_role.pluralName: (iByRole[this_role.pluralName] - lastByRole[this_role.pluralName]),
                '% ' + this_role.pluralName: 100 * (
                    iByRole[this_role.pluralName] - lastByRole[this_role.pluralName]
                ) /
                float(totalsByRole[this_role.pluralName]['customers']),
            })

        lastAll = i_all
        lastByRole = {this_role.pluralName:iByRole[this_role.pluralName] for this_role in role_list}

    return results
Exemple #31
0
    def get(self, request, organization):
        """
        Retrieve an Organization's Dashboards
        `````````````````````````````````````

        Retrieve a list of dashboards that are associated with the given organization.
        If on the first page, this endpoint will also include any pre-built dashboards
        that haven't been replaced or removed.

        :pparam string organization_slug: the slug of the organization the
                                          dashboards belongs to.
        :qparam string query: the title of the dashboard being searched for.
        :auth: required
        """
        if not features.has("organizations:dashboards-basic",
                            organization,
                            actor=request.user):
            return Response(status=404)

        dashboards = Dashboard.objects.filter(
            organization_id=organization.id).select_related("created_by")
        query = request.GET.get("query")
        if query:
            dashboards = dashboards.filter(title__icontains=query)
        prebuilt = Dashboard.get_prebuilt_list(organization, query)

        sort_by = request.query_params.get("sort")
        if sort_by and sort_by.startswith("-"):
            sort_by, desc = sort_by[1:], True
        else:
            desc = False

        if sort_by == "title":
            order_by = [
                "-title" if desc else "title",
                "-date_added",
            ]

        elif sort_by == "dateCreated":
            order_by = "-date_added" if desc else "date_added"

        elif sort_by == "mostPopular":
            order_by = [
                "visits" if desc else "-visits",
                "-date_added",
            ]

        elif sort_by == "recentlyViewed":
            order_by = "last_visited" if desc else "-last_visited"

        elif sort_by == "mydashboards":
            order_by = [
                Case(When(created_by_id=request.user.id, then=-1),
                     default="created_by_id"),
                "-date_added",
            ]

        else:
            order_by = "title"

        if not isinstance(order_by, list):
            order_by = [order_by]

        dashboards = dashboards.order_by(*order_by)

        list_serializer = DashboardListSerializer()

        def handle_results(results):
            serialized = []
            dashboards = []
            for item in results:
                if isinstance(item, dict):
                    cloned = item.copy()
                    widgets = cloned.pop("widgets", [])
                    cloned["widgetDisplay"] = [
                        w["displayType"] for w in widgets
                    ]
                    serialized.append(cloned)
                else:
                    dashboards.append(item)

            serialized.extend(
                serialize(dashboards, request.user,
                          serializer=list_serializer))
            return serialized

        return self.paginate(
            request=request,
            sources=[prebuilt, dashboards],
            paginator_cls=ChainPaginator,
            on_results=handle_results,
        )