Пример #1
0
    def get_projects_follow_list(self, exclude_private_projects=False):
        projects = Project.objects \
            .filter(follow__creator=self,
                    follow__enabled=True,
                    is_draft=False)
        if exclude_private_projects:
            projects = projects.exclude(models.Q(access=Access.PRIVATE))

        now = timezone.now()

        sorted_active_projects = projects\
            .annotate(project_start=models.Min('module__phase__start_date'))\
            .annotate(project_end=models.Max('module__phase__end_date'))\
            .filter(project_start__lte=now, project_end__gt=now)\
            .order_by('project_end')

        sorted_future_projects = projects\
            .annotate(project_start=models.Min('module__phase__start_date'))\
            .filter(models.Q(project_start__gt=now)
                    | models.Q(project_start=None))\
            .order_by('project_start')

        sorted_past_projects = projects\
            .annotate(project_start=models.Min('module__phase__start_date'))\
            .annotate(project_end=models.Max('module__phase__end_date'))\
            .filter(project_end__lt=now)\
            .order_by('project_start')

        return sorted_active_projects, \
            sorted_future_projects, \
            sorted_past_projects
Пример #2
0
    def get_queue_status(self):
        """Returns the current status of the queue with statistics broken down by job type.

        :returns: A list of each job type with calculated statistics.
        :rtype: list[:class:`queue.models.QueueStatus`]
        """

        status_dicts = Queue.objects.values(
            *['job_type__%s' % f for f in JobType.BASE_FIELDS])
        status_dicts = status_dicts.annotate(
            count=models.Count('job_type'),
            longest_queued=models.Min('queued'),
            highest_priority=models.Min('priority'))
        status_dicts = status_dicts.order_by('job_type__is_paused',
                                             'highest_priority',
                                             'longest_queued')

        # Convert each result to a real job type model with added statistics
        results = []
        for status_dict in status_dicts:
            job_type_dict = {
                f: status_dict['job_type__%s' % f]
                for f in JobType.BASE_FIELDS
            }
            job_type = JobType(**job_type_dict)

            status = QueueStatus(job_type, status_dict['count'],
                                 status_dict['longest_queued'],
                                 status_dict['highest_priority'])
            results.append(status)
        return results
Пример #3
0
 def order_by_position(self):
     '''order by job title sort order and then by start date'''
     # annotate to avoid duplicates in the queryset due to multiple positions
     # sort on highest position title (= lowest number) and earliest start date (may
     # not be from the same position)
     return self.annotate(min_title=models.Min('user__positions__title__sort_order'),
                          min_start=models.Min('user__positions__start_date')) \
                .order_by('min_title', 'min_start', 'user__last_name')
Пример #4
0
 def join(self):
     return self.aggregate(
         models.Avg('value'),
         models.Min('value'),
         models.Max('value'),
         models.Max('timestamp'),
         models.Min('timestamp'),
     )
Пример #5
0
 def order_by_position(self):
     """order by job title sort order and then by start date"""
     # annotate to avoid duplicates in the queryset due to multiple positions
     # sort on highest position title (= lowest number) and earliest start date (may
     # not be from the same position)
     return self.annotate(
         min_title=models.Min("positions__title__sort_order"),
         min_start=models.Min("positions__start_date"),
     ).order_by("min_title", "min_start", "last_name")
Пример #6
0
 def max_bounds(cls):
     cache_key = 'mapdata:max_bounds:%s:%s' % (cls.__name__, MapUpdate.current_cache_key())
     result = cache.get(cache_key, None)
     if result is not None:
         return result
     result = cls.objects.all().aggregate(models.Min('left'), models.Min('bottom'),
                                          models.Max('right'), models.Max('top'))
     result = ((float(result['left__min']), float(result['bottom__min'])),
               (float(result['right__max']), float(result['top__max'])))
     cache.set(cache_key, result, 900)
     return result
def migrate_to_hourly_trade_aggregations(apps, *_, **__):
    Trade = apps.get_model('trades', 'Trade')
    HourlyTradeAggregate = apps.get_model('trades', 'HourlyTradeAggregate')

    earliest_trade = Trade.objects.first()
    if not earliest_trade:
        return

    earliest_trade_date = earliest_trade.created
    running_date = Trade.objects.last().created
    while running_date >= earliest_trade_date:
        # get all trades for this hour
        trades_for_this_hour_queryset = Trade.objects.filter(
            created__lt=running_date,
            created__gte=(running_date - timedelta(hours=1)),
        )
        # aggregate trades by currency combinations
        if trades_for_this_hour_queryset.exists():
            for buy_currency_id in CURRENCIES.keys():
                for sell_currency_id in CURRENCIES.keys():
                    # filter trades by buy and sell
                    # create aggregate data dictionary
                    hourly_aggregates = trades_for_this_hour_queryset.filter(
                        buy_currency_id=buy_currency_id,
                        sell_currency_id=sell_currency_id,
                    ).aggregate(
                        hour_created=models.Min('created'),
                        low=models.Min('trade_ratio'),
                        high=models.Max('trade_ratio'),
                        average=models.Avg('trade_ratio'),
                        total=models.Count('id'),
                    )

                    # create aggregate data object
                    if hourly_aggregates.get('total'):
                        new_hourly_object = HourlyTradeAggregate.objects.create(
                            buy_currency_id=buy_currency_id,
                            sell_currency_id=sell_currency_id,
                            low_ratio=hourly_aggregates.get('low'),
                            average_ratio=hourly_aggregates.get('average'),
                            high_ratio=hourly_aggregates.get('high'),
                            number_of_trades=hourly_aggregates.get('total'),
                        )
                        # reassign created date on object
                        new_hourly_object.created = hourly_aggregates.get(
                            'hour_created')
                        new_hourly_object.save()

            # delete all trades in date range
            trades_for_this_hour_queryset.delete()
        # decriment date
        running_date -= timedelta(hours=1)
Пример #8
0
def students(request):
    semester = Semester.objects.get_next()
    t0_time_agg = models.Min('t0times__time',
                             filter=models.Q(t0times__semester=semester))
    group_opening_agg = models.Min(
        'groupopeningtimes__time',
        filter=models.Q(groupopeningtimes__group__course__semester=semester))
    students = Student.get_active_students().select_related('user').annotate(
        min_t0=t0_time_agg).annotate(
            min_opening_time=group_opening_agg).order_by('min_opening_time')
    return render(request, 'statistics/students_list.html', {
        'students': students,
    })
Пример #9
0
    def get_next(kind, sentiment=None):
        if sentiment:
            lower_bound = sentiment - 1
            upper_bound = sentiment + 1
            phrase = Phrase.objects.filter(
                kind=kind,
                sentiment__gte=lower_bound,
                sentiment__lte=upper_bound
            ).annotate(models.Min('used')).order_by('used')[0]
        else:
            phrase = Phrase.objects.filter(kind=kind).annotate(models.Min('used')).order_by('used')[0]

        phrase.used += 1
        phrase.save()
        return phrase.text.replace('-', ' ')
Пример #10
0
 def get_earliest_created_at(cls):
     """
     Returns the earliest created_at time, or None
     """
     result = cls.objects.aggregate(
         earliest_created_at=models.Min('created_at'))
     return result['earliest_created_at']
Пример #11
0
def obtain_c_toptype_list():
    type_list = []
    qs = RecycleBin.objects.all()
    for c_type in ProductTopType.objects.filter(
            operator=top_type_choice.CONSUMER, in_use=True):
        try:
            unit = c_type.c_subtype.filter(in_use=True).first().unit
        except AttributeError:
            unit = None
        dic = {
            "type_id":
            c_type.id,
            "c_type":
            c_type.t_top_name,
            "unit":
            unit,
            "min_price":
            qs.filter(product_subtype__p_type__toptype_c=c_type,
                      product_subtype__p_type__in_use=True).aggregate(
                          models.Min("product_subtype__price"))
            ["product_subtype__price__min"],
            "max_price":
            qs.filter(product_subtype__p_type__toptype_c=c_type,
                      product_subtype__p_type__in_use=True).aggregate(
                          models.Max("product_subtype__price"))
            ["product_subtype__price__max"]
        }
        type_list.append(dic)
    modified_time = qs.filter(product_subtype__p_type__in_use=True).aggregate(
        models.Max("product_subtype__modified_time")
    )["product_subtype__modified_time__max"]

    return type_list, modified_time
Пример #12
0
    def __get_raw_group_privilege(self, this_user):
        """
        Return the group-based privilege of a specific user over this resource

        :param this_user: the user upon which to report
        :return: integer privilege 1-4 (PrivilegeCodes)

        This does not account for resource flags.
        """
        if __debug__:  # during testing only, check argument types and preconditions
            assert isinstance(this_user, User)

        if not this_user.is_active:
            raise PermissionDenied("Grantee user is not active")

        # Group privileges must be aggregated
        group_priv = GroupResourcePrivilege.objects\
            .filter(resource=self.resource,
                    group__gaccess__active=True,
                    group__g2ugp__user=this_user)\
            .aggregate(models.Min('privilege'))

        response2 = group_priv['privilege__min']
        if response2 is None:
            response2 = PC.NONE
        return response2
Пример #13
0
    def get_earliest_index(self):
        return self.entries.all().aggregate(models.Min('index'))['index__min']

        if index is None:
            index = 0

        return index
Пример #14
0
 def order_by_event(self):
     '''Order by earliest published event associated with profile.'''
     return self.annotate(
         earliest_event=models.Min(models.Case(
             models.When(user__event__status=CONTENT_STATUS_PUBLISHED,
                         then='user__event__start_time')))
         ).order_by('earliest_event')
Пример #15
0
    def get_start_finish_dates(self):
        """
        Retrieves the earliest start and the latest finish from all
        associated SemesterDateRange's.

        This is robust: if there are no SemesterDateRange's
        associated with this instance, than the return value will still
        be (relatively) meaningful based on the term,
        i.e., Jan - Apr; May - Aug; Sep - Dec.
        """
        results = self.semesterdaterange_set.aggregate(
            min=models.Min("start"), max=models.Max("finish"))
        start = results["min"]
        finish = results["max"]

        term_int = int(self.term[0])
        if start is None:
            month = 4 * (term_int - 1) + 1
            start = datetime.date(self.year, month, 1)
        if finish is None:
            month = 4 * term_int + 1
            year = self.year
            if month > 12:
                month -= 12
                year += 1
            finish = datetime.date(year, month, 1) - datetime.timedelta(days=1)

        return start, finish
Пример #16
0
    def get_value(self, obj):
        to_currency = self.context.get('to_currency', None)
        try:
            search_term = obj.integralfilter.name.data.values()
        except AttributeError:
            search_term = str(obj.integralfilter.name)

        if to_currency and ('Price' in search_term):
            original_prices = obj.integralfilter.integral_choices.values_list(
                'room_characteristics__price_currency__code',
                'selected_number')
            converted_prices = []
            for from_currency, original_price in original_prices:
                if not from_currency:
                    from_currency = 'USD'
                converted_prices.append(
                    int(
                        convert_money(Money(original_price, from_currency),
                                      to_currency).amount))
            self.min_value = min(converted_prices)
            self.max_value = max(converted_prices)

        else:
            aggregate_min_max = obj.integralfilter.integral_choices.aggregate(
                django_models.Max('selected_number'),
                django_models.Min('selected_number'))
            self.min_value = aggregate_min_max['selected_number__min']
            self.max_value = aggregate_min_max['selected_number__max']

        return [self.min_value, self.max_value]
Пример #17
0
def get_annotations(
    aggregation: str,
    aggregation_field: str = None,
    percentile: str = None,
    queryset: models.QuerySet = None,
    additional_filter: str = None,
) -> dict:
    if aggregation == Aggregation.COUNT:
        return {"value": models.Count('id')}

    if aggregation == Aggregation.PERCENT:
        if not additional_filter:
            raise ValidationError({"error": "'additionalFilter' is required for 'aggregation=percent'"}, code=422)

        complex_filter = ComplexFilter(model=queryset.model)
        additional_query, _ = complex_filter.generate_from_string(additional_filter)
        if not additional_query:
            raise ValidationError({"error": "Additional filter cannot be empty"}, code=422)

        return {
            "numerator": CountIf(additional_query),
            "denominator": models.Count("id"),
            "value": models.ExpressionWrapper(
                models.F("numerator") * 1.0 / models.F("denominator"),
                output_field=models.FloatField())
        }

    if not aggregation_field:
        raise ValidationError({"error": f"'aggregationField' is required for 'aggregation={aggregation}'"}, code=422)

    if aggregation == Aggregation.DISTINCT:
        return {"value": models.Count(aggregation_field, distinct=True)}

    if aggregation == Aggregation.SUM:
        return {"value": models.Sum(aggregation_field)}

    if aggregation == Aggregation.AVERAGE:
        return {"value": models.Avg(aggregation_field)}

    if aggregation == Aggregation.MIN:
        return {"value": models.Min(aggregation_field)}

    if aggregation == Aggregation.MAX:
        return {"value": models.Max(aggregation_field)}

    if aggregation == Aggregation.PERCENTILE:
        if not percentile:
            raise ValidationError({"error": "'percentile' is required for 'aggregation=percentile'"}, code=422)

        model: models.Model = queryset.model
        field = None
        for field_name in aggregation_field.split("__"):
            field = getattr(field, field_name) if field else model._meta.get_field(field_name)

        if field.get_internal_type() != "FloatField":
            return {"value": Percentile(aggregation_field, percentile,
                                        output_field=models.FloatField())}
        return {"value": Percentile(aggregation_field, percentile)}

    raise ValidationError({"error": "Unknown value for param 'aggregation'"}, code=422)
Пример #18
0
    def winning_times(cls, qs=None):
        if qs is None:
            qs = cls.all_times().filter(seconds__gt=0)
        values = qs.values_list('date').annotate(
            winning_time=models.Min('seconds'))

        return {date: winning_time for date, winning_time in values}
def get_aggregates(values, stride_field, value_field):
    try:
        return values.aggregate(min_stride=models.Min(stride_field), max_stride=models.Max(stride_field),
                                min_value=models.Min(value_field), max_value=models.Max(value_field),
                                count=models.Count('*'))
    except (AttributeError, FieldError):
        values = list(values)
        if not values:
            return {'count': 0}
        return {
            'min_stride': values[0][stride_field],
            'max_stride': values[-1][stride_field],
            'min_value': min(values, key=lambda value: value[value_field])[value_field],
            'max_value': max(values, key=lambda value: value[value_field])[value_field],
            'count': len(values),
        }
Пример #20
0
 def get_price_range(self):
     """
     Get price range (f.e. 100 rub. - 300 rub.)
     :return: {'low':Decimal, 'hight':Decimal, 'field_type':FieldType of price}
     """
     try:
         price_field_type = FieldType.objects.get(name='price',
                                                  category=self.category)
         res = Characteristic.objects\
             .filter(
                 field_type=price_field_type,
                 modification__item=self
             )\
             .annotate(
                 price_decimal=ExpressionWrapper(
                     F('value'),
                     output_field=models.DecimalField()
                 )
             )\
             .aggregate(
                 low=models.Min('price_decimal'),
                 hight=models.Max('price_decimal')
             )
         res['field_type'] = price_field_type
         return res
     except models.ObjectDoesNotExist:
         return None
Пример #21
0
def _stats_for(query_set):
    return {
        'count': query_set.count(), 'average': query_set.aggregate(
            models.Avg('score')), 'minimum': query_set.aggregate(
            models.Min('score')), 'maximum': query_set.aggregate(
                models.Max('score')), 'words': query_set.values('word').annotate(
                    models.Count("id")).order_by()}, query_set
Пример #22
0
def get_products(request):
    page = request.GET.get('page', 1)
    queryset = Item.objects\
        .values('product__id', 'product__name')\
        .annotate(min_price=models.Min('price'))\
        .order_by('min_price')

    if 'name' in request.GET:
        queryset = queryset.filter(product__name__icontains=request.GET['name'].strip())

    paginator = Paginator(queryset, per_page=request.GET.get('per-page', 10))

    products = paginator.page(page)

    for product in products:
        product['items'] = Item.objects\
            .filter(product__id=product['product__id'])\
            .values(
                'product__shop__name',
                'product__shop__inn',
                'price',
                'sum',
                'quantity',
                'check_model__date',
            )\
            .order_by('-check_model__date')[:10]

    return {
        'products': products[:],
        'num_pages': paginator.num_pages,
        'count': paginator.count,
        'page': page,
    }
Пример #23
0
 def json_sessions(self):
     return {
         "experiment_sessions": [
             es.json_min() for es in self.ES.all().annotate(
                 first_date=models.Min("ESD__date")).order_by('-first_date')
         ],
     }
Пример #24
0
    def get_projects_list(self, user):
        projects = query.filter_viewable(self.projects, user)
        now = timezone.now()

        min_module_start = models.Min('module__phase__start_date',
                                      filter=models.Q(module__is_draft=False))
        max_module_end = models.Max('module__phase__end_date',
                                    filter=models.Q(module__is_draft=False))

        sorted_active_projects = projects\
            .annotate(project_start=min_module_start)\
            .annotate(project_end=max_module_end)\
            .filter(project_start__lte=now, project_end__gt=now)\
            .order_by('project_end')

        sorted_future_projects = projects\
            .annotate(project_start=min_module_start)\
            .filter(models.Q(project_start__gt=now)
                    | models.Q(project_start=None))\
            .order_by('project_start')

        sorted_past_projects = projects\
            .annotate(project_start=min_module_start)\
            .annotate(project_end=max_module_end)\
            .filter(project_end__lt=now)\
            .order_by('project_start')

        return sorted_active_projects, \
            sorted_future_projects, \
            sorted_past_projects
Пример #25
0
 def start(self):
     """ get earliest start time from related TaskDays
     :return: earliest
     """
     earliest = list(
         self.taskday_set.aggregate(models.Min('date')).values())[0]
     return earliest
Пример #26
0
def loop():
    """
    Loop forever looking for tasks and processing them in order.
    """
    global _exitLoop
    _exitLoop = False

    def sigtermHandler(signum, frame):
        global _exitLoop
        logging.info('Received signal, ' +
                     'will end processing after current task completes')
        _exitLoop = True

    signal.signal(signal.SIGTERM, sigtermHandler)
    signal.signal(signal.SIGINT, sigtermHandler)

    while True:
        try:
            if _exitLoop:
                logging.info('Task processing loop exiting')
                return
            if not Task.objects.count():
                time.sleep(3)
                continue
            minId = Task.objects.aggregate(models.Min('id'))['id__min']
            t = Task.objects.get(id=minId)
            t.delete()
            process(t.taskType, t.modelId)
        except:
            # recover from an unexpected exception thrown by the task processor
            logging.error(traceback.format_exc())
Пример #27
0
 def past_and_running_modules(self):
     """Return past and running modules ordered by start date."""
     return self\
         .filter(is_draft=False)\
         .annotate(module_start=models.Min('phase__start_date'))\
         .filter(module_start__lte=timezone.now())\
         .order_by('module_start')
Пример #28
0
 def clean(self):
     result = Bid.objects.all().aggregate(models.Min('value'))
     min_value = result['value__min']
     if (min_value is not None and self.value < min_value):
         raise ValidationError(
             f"Não é possível criar um lance menor que o menor lance de R${min_value:.2f}"
         )
Пример #29
0
def elasticsearch_janitor(self,
                          es_url=None,
                          es_index=None,
                          dry=False,
                          to_daemon=False):
    """
    Looks for discrepancies between postgres and elastic search numbers
    Re-indexes time periods that differ in count

    """
    # get range of date_created in database; assumes current time is the max
    logger.debug('Starting Elasticsearch JanitorTask')

    min_date = AbstractCreativeWork.objects.all().aggregate(
        models.Min('date_created'))['date_created__min']
    if not min_date:
        logger.warning('No CreativeWorks are present in Postgres. Exiting')
        return

    max_date = pendulum.utcnow()
    min_date = pendulum.instance(min_date)

    pseudo_bisection.apply((es_url, es_index, min_date, max_date), {
        'dry': dry,
        'to_daemon': to_daemon
    },
                           throw=True)
Пример #30
0
    def min_priority(self) -> None:

        cur_min_priority = Task.objects.all().aggregate(
            models.Min('priority'))['priority__min']

        self.priority = cur_min_priority - 1
        self.save()