def search(cls, query): query = SearchQuery(query, config="russian") return SearchIndex.objects\ .annotate(rank=SearchRank(F("index"), query))\ .filter(index=query, rank__gte=0.1)
def resolve_filters(self, info, route, sizes, colors, effects, query): print(sizes) colors_list = list(filter(None, map(str.strip, colors.split(',')))) effects_list = list(filter(None, map(str.strip, effects.split(',')))) effect_glow_in_the_dark = 'Светится в темноте' in effects_list effect_glow_in_the_uv = 'Светится в ультрафиолете' in effects_list sizes_list = list(filter(None, map(str.strip, sizes.split(',')))) s_list = { 'size_ns': 'Без размера', 'size_xs': 'XS', 'size_s': 'S', 'size_m': 'M', 'size_l': 'L', 'size_xl': 'XL', 'size_2xl': 'XXL', 'size_3xl': 'XXXL', 'size_4xl': 'XXXXL', } s_list_rev = dict(zip(s_list.values(), s_list.keys())) s = [s_list_rev[key] for key in sizes_list] sizes_filter = Q() for size in s: sizes_filter |= Q(**{size + '__gt': 0}) colors_filter = Q() if len(colors_list) > 0: colors_filter = colors_filter & Q(colors__name__in=colors_list) effects_filter = Q() if effect_glow_in_the_dark: effects_filter = effects_filter & Q(glow_in_the_dark=True) if effect_glow_in_the_uv: effects_filter = effects_filter & Q(glow_in_the_uv=True) query_filter = Q(translations__description__icontains=query) \ | Q(model__icontains=query) \ | Q(sku__icontains=query) qs = Product.objects.filter(enable=True, total_count__gt=0) qs = qs.filter(categories__path__icontains=route) qs = qs.filter(query_filter) # qs = qs.distinct() sizes_qs = qs \ .filter(colors_filter) \ .filter(effects_filter) sizes_qs = [{ 'lable': value, 'count': sizes_qs.values(key).filter(Q(**{key + '__gt': 0})).count(), 'value': True if key in s else False } for key, value in s_list.items()] color_qs = qs \ .annotate(lable=F('colors__name')).values('lable') \ .exclude(lable__isnull=True) \ .annotate(count=Count('lable', filter=effects_filter & sizes_filter)) \ .annotate(value=Value(False, output_field=BooleanField())) \ .order_by('lable') products_dark_qs = qs \ .values('glow_in_the_dark') \ .filter(glow_in_the_dark=True) \ .annotate(count=Count('glow_in_the_dark', filter=colors_filter & sizes_filter)) \ .annotate(lable=Value('Светится в темноте', output_field=CharField())) \ .annotate(value=Value(effect_glow_in_the_dark, output_field=BooleanField())) \ .values('lable', 'count', 'value') products_uv_qs = qs \ .values('glow_in_the_uv') \ .filter(glow_in_the_uv=True) \ .annotate(count=Count('glow_in_the_uv', filter=colors_filter & sizes_filter)) \ .annotate(lable=Value('Светится в ультрафиолете', output_field=CharField())) \ .annotate(value=Value(effect_glow_in_the_uv, output_field=BooleanField())) \ .values('lable', 'count', 'value') colors = list(color_qs) for color in colors: if color['lable'] in colors_list: color['value'] = True print(sizes_qs) return [ FiltersType(title='Размер', name='size', items=sizes_qs), FiltersType(title='Цвет', name='color', items=colors), FiltersType(title='Спецэффекты', name='effects', items=list(products_dark_qs) + list(products_uv_qs)), ]
def sort_by_attribute(self, attribute_pk: Union[int, str], descending: bool = False): """Sort a query set by the values of the given product attribute. :param attribute_pk: The database ID (must be a numeric) of the attribute to sort by. :param descending: The sorting direction. """ qs: models.QuerySet = self # If the passed attribute ID is valid, execute the sorting if not (isinstance(attribute_pk, int) or attribute_pk.isnumeric()): return qs.annotate( concatenated_values_order=Value( None, output_field=models.IntegerField()), concatenated_values=Value(None, output_field=models.CharField()), ) # Retrieve all the products' attribute data IDs (assignments) and # product types that have the given attribute associated to them associated_values = tuple( AttributeProduct.objects.filter( attribute_id=attribute_pk).values_list("pk", "product_type_id")) if not associated_values: qs = qs.annotate( concatenated_values_order=Value( None, output_field=models.IntegerField()), concatenated_values=Value(None, output_field=models.CharField()), ) else: attribute_associations, product_types_associated_to_attribute = zip( *associated_values) qs = qs.annotate( # Contains to retrieve the attribute data (singular) of each product # Refer to `AttributeProduct`. filtered_attribute=FilteredRelation( relation_name="attributes", condition=Q( attributes__assignment_id__in=attribute_associations), ), # Implicit `GROUP BY` required for the `StringAgg` aggregation grouped_ids=Count("id"), # String aggregation of the attribute's values to efficiently sort them concatenated_values=Case( # If the product has no association data but has # the given attribute associated to its product type, # then consider the concatenated values as empty (non-null). When( Q(product_type_id__in= product_types_associated_to_attribute) & Q(filtered_attribute=None), then=models.Value(""), ), default=StringAgg( F("filtered_attribute__values__name"), delimiter=",", ordering=([ f"filtered_attribute__values__{field_name}" for field_name in AttributeValue._meta.ordering or [] ]), ), output_field=models.CharField(), ), concatenated_values_order=Case( # Make the products having no such attribute be last in the sorting When(concatenated_values=None, then=2), # Put the products having an empty attribute value at the bottom of # the other products. When(concatenated_values="", then=1), # Put the products having an attribute value to be always at the top default=0, output_field=models.IntegerField(), ), ) # Sort by concatenated_values_order then # Sort each group of products (0, 1, 2, ...) per attribute values # Sort each group of products by name, # if they have the same values or not values ordering = "-" if descending else "" return qs.order_by( f"{ordering}concatenated_values_order", f"{ordering}concatenated_values", f"{ordering}name", )
def metrics(self, metric, start_time=None, end_time=None, resolution='day', apply_offset=True): if resolution not in ['month', 'week', 'day', 'hour', 'bin', 'auto']: raise ValueError( 'unsupported time resolution {}'.format(resolution)) if metric not in self.TIMELINE_METRICS.keys(): raise ValueError('unsupported metric {}'.format(metric)) if resolution == 'auto': if start_time is None or end_time is None: mm = self.bins.aggregate(min=Min('sample_time'), max=Max('sample_time')) min_sample_time, max_sample_time = mm['min'], mm['max'] if start_time is None: start_time = min_sample_time else: start_time = pd.to_datetime(start_time, utc=True) if end_time is None: end_time = max_sample_time else: end_time = pd.to_datetime(end_time, utc=True) time_range = end_time - start_time if time_range < pd.Timedelta('7d'): resolution = 'bin' elif time_range < pd.Timedelta('60d'): resolution = 'hour' elif time_range < pd.Timedelta('3y'): resolution = 'day' else: resolution = 'week' if apply_offset: if resolution == 'bin': offset = pd.Timedelta('0s') elif resolution == 'hour': offset = pd.Timedelta('30m') elif resolution == 'day': offset = pd.Timedelta('12h') elif resolution == 'week': offset = pd.Timedelta('3.5d') qs = self.time_range(start_time, end_time) aggregate_fn = Avg if resolution == 'bin': result = qs.annotate(dt=F('sample_time'), metric=F(metric)).values( 'dt', 'metric').order_by('dt') else: result = qs.annotate(dt=Trunc('sample_time', resolution)). \ values('dt').annotate(metric=aggregate_fn(metric)).order_by('dt') if apply_offset: for record in result: record['dt'] += offset return result, resolution
def total_queryset(self): filters = [ self.is_in_provided_def_codes, self.is_non_zero_total_spending, self.all_closed_defc_submissions, Q(treasury_account__isnull=False), Q(treasury_account__federal_account__isnull=False), ] annotations = { "fa_code": F("treasury_account__federal_account__federal_account_code"), "description": F("treasury_account__account_title"), "code": F("treasury_account__tas_rendering_label"), "id": F("treasury_account__treasury_account_identifier"), "award_count": Value(None, output_field=IntegerField()), "fa_description": F("treasury_account__federal_account__account_title"), "fa_id": F("treasury_account__federal_account_id"), "obligation": Coalesce( Sum( Case( When( self.final_period_submission_query_filters, then=F("obligations_incurred_by_program_object_class_cpe") + F("deobligations_recoveries_refund_pri_program_object_class_cpe"), ), default=Value(0), ) ), 0, ), "outlay": Coalesce( Sum( Case( When( self.final_period_submission_query_filters, then=F("gross_outlay_amount_by_program_object_class_cpe") + F("ussgl487200_down_adj_pri_ppaid_undel_orders_oblig_refund_cpe") + F("ussgl497200_down_adj_pri_paid_deliv_orders_oblig_refund_cpe"), ), default=Value(0), ) ), 0, ), "total_budgetary_resources": Coalesce( Subquery( latest_gtas_of_each_year_queryset() .filter( disaster_emergency_fund_code__in=self.def_codes, treasury_account_identifier=OuterRef("treasury_account"), ) .annotate( amount=Func("total_budgetary_resources_cpe", function="Sum"), unobligated_balance=Func( "budget_authority_unobligated_balance_brought_forward_cpe", function="Sum" ), deobligation=Func("deobligations_or_recoveries_or_refunds_from_prior_year_cpe", function="Sum"), prior_year=Func("prior_year_paid_obligation_recoveries", function="Sum"), ) .annotate( total_budget_authority=F("amount") - F("unobligated_balance") - F("deobligation") - F("prior_year") ) .values("total_budget_authority"), output_field=DecimalField(), ), 0, ), } # Assuming it is more performant to fetch all rows once rather than # run a count query and fetch only a page's worth of results return ( FinancialAccountsByProgramActivityObjectClass.objects.filter(*filters) .values( "treasury_account__federal_account__id", "treasury_account__federal_account__federal_account_code", "treasury_account__federal_account__account_title", ) .annotate(**annotations) .values(*annotations.keys()) )
class CheckinListPositionViewSet(viewsets.ReadOnlyModelViewSet): serializer_class = OrderPositionSerializer queryset = OrderPosition.objects.none() filter_backends = (DjangoFilterBackend, RichOrderingFilter) ordering = ('attendee_name_cached', 'positionid') ordering_fields = ( 'order__code', 'order__datetime', 'positionid', 'attendee_name', 'last_checked_in', 'order__email', ) ordering_custom = { 'attendee_name': { '_order': F('display_name').asc(nulls_first=True), 'display_name': Coalesce('attendee_name_cached', 'addon_to__attendee_name_cached') }, '-attendee_name': { '_order': F('display_name').desc(nulls_last=True), 'display_name': Coalesce('attendee_name_cached', 'addon_to__attendee_name_cached') }, 'last_checked_in': { '_order': FixedOrderBy(F('last_checked_in'), nulls_first=True), }, '-last_checked_in': { '_order': FixedOrderBy(F('last_checked_in'), nulls_last=True, descending=True), }, } filterset_class = CheckinOrderPositionFilter permission = 'can_view_orders' write_permission = 'can_change_orders' @cached_property def checkinlist(self): try: return get_object_or_404(CheckinList, event=self.request.event, pk=self.kwargs.get("list")) except ValueError: raise Http404() def get_queryset(self): cqs = Checkin.objects.filter( position_id=OuterRef('pk'), list_id=self.checkinlist.pk ).order_by().values('position_id').annotate( m=Max('datetime') ).values('m') qs = OrderPosition.objects.filter( order__event=self.request.event, order__status__in=[Order.STATUS_PAID, Order.STATUS_PENDING] if self.checkinlist.include_pending else [Order.STATUS_PAID], subevent=self.checkinlist.subevent ).annotate( last_checked_in=Subquery(cqs) ).prefetch_related( Prefetch( lookup='checkins', queryset=Checkin.objects.filter(list_id=self.checkinlist.pk) ) ).select_related('item', 'variation', 'order', 'addon_to') if not self.checkinlist.all_products: qs = qs.filter(item__in=self.checkinlist.limit_products.values_list('id', flat=True)) return qs @detail_route(methods=['POST']) def redeem(self, *args, **kwargs): force = bool(self.request.data.get('force', False)) ignore_unpaid = bool(self.request.data.get('ignore_unpaid', False)) nonce = self.request.data.get('nonce') op = self.get_object() if 'datetime' in self.request.data: dt = DateTimeField().to_internal_value(self.request.data.get('datetime')) else: dt = now() given_answers = {} if 'answers' in self.request.data: aws = self.request.data.get('answers') for q in op.item.questions.filter(ask_during_checkin=True): if str(q.pk) in aws: try: given_answers[q] = q.clean_answer(aws[str(q.pk)]) except ValidationError: pass try: perform_checkin( op=op, clist=self.checkinlist, given_answers=given_answers, force=force, ignore_unpaid=ignore_unpaid, nonce=nonce, datetime=dt, questions_supported=self.request.data.get('questions_supported', True), user=self.request.user, auth=self.request.auth, ) except RequiredQuestionsError as e: return Response({ 'status': 'incomplete', 'questions': [ QuestionSerializer(q).data for q in e.questions ] }, status=400) except CheckInError as e: return Response({ 'status': 'error', 'reason': e.code }, status=400) else: return Response({ 'status': 'ok', }, status=201)
def prepare_invoice_data(data, invoice): for dat in data: stamp = dat['timestamp'] dat['date'] = str(stamp.date()) dat['time'] = str(stamp.time()).split('.')[0] if dat['status'] == '007': dat['stati'] = 'Paid' elif dat['status'] == '600': dat['stati'] = 'Returned' elif dat['status'] == '000': dat['stati'] = 'Voided' elif dat['status'] == '419': dat['stati'] = 'Unpaid' elif dat['status'] == '202': dat['stati'] = 'Overdue' elif dat['status'] == '212': dat['stati'] = 'Partly Paid' else: dat['stati'] = 'Unknown' if 'customer_id' in dat.keys() and dat['customer_id']: try: customer = Customer.objects.get(pk=dat['customer_id']) dat['customer'] = customer.name except ObjectDoesNotExist: dat['customer'] = '' else: dat['customer'] = '' if 'supplier_id' in dat.keys() and dat['supplier_id']: try: supplier = Supplier.objects.get(pk=dat['supplier_id']) dat['supplier'] = supplier.name except ObjectDoesNotExist: dat['supplier'] = '' else: dat['supplier'] = '' if dat['employee_id']: try: employee = User.objects.get(pk=dat['employee_id']) dat['employee'] = employee.username except ObjectDoesNotExist: dat['employee'] = '' else: dat['employee'] = '' if invoice == 'SalesInvoice': amount = CreditSale.objects.filter(invoice=dat['id']).aggregate( amount=Sum(F('sp') * F('quantity'), output_field=FloatField())) r_attr = SalesInvoice.objects.get(pk=dat['id']) if hasattr(r_attr, 'salesreturninvoice'): try: r_num = SalesReturnInvoice.objects.get( salesinvoice=dat['id']) dat['number'] += " / " + r_num.number except ObjectDoesNotExist: pass elif invoice == 'SalesReturnInvoice': amount = CreditSalesReturn.objects.filter( invoice=dat['id']).aggregate(amount=Sum( F('sp') * F('quantity'), output_field=FloatField())) try: r_num = SalesReturnInvoice.objects.get( salesinvoice=dat['salesinvoice_id']) dat['number'] += " / " + r_num.salesinvoice.number except ObjectDoesNotExist: pass elif invoice == 'PurchaseInvoice': amount = CreditPurchase.objects.filter( invoice=dat['id']).aggregate(amount=Sum( F('sp') * F('quantity'), output_field=FloatField())) r_attr = PurchaseInvoice.objects.get(pk=dat['id']) if hasattr(r_attr, 'purchasereturninvoice'): try: r_num = PurchaseReturnInvoice.objects.get( purchasesinvoice=dat['id']) dat['number'] += " / " + r_num.number except ObjectDoesNotExist: pass elif invoice == 'PurchaseReturnInvoice': amount = CreditPurchaseReturn.objects.filter( invoice=dat['id']).aggregate(amount=Sum( F('sp') * F('quantity'), output_field=FloatField())) try: r_num = PurchaseReturnInvoice.objects.get( purchasesinvoice=dat['purchasesinvoice_id']) dat['number'] += " / " + r_num.purchasesinvoice.number except ObjectDoesNotExist: pass dat['amount'] = amount['amount']
def matches(request): query = None results = [] if request.method == "GET": query = Qualities.objects.all() results = Qualities.objects.filter( Q(Age_group__icontains=F('Age_group')) | Q(Region__icontains=F('Region')) | Q(Gender__icontains=F('Gender')) | Q(Purpose__icontains=F('Purpose')) | Q(one__icontains=F('one')) | Q(two__icontains=F('two')) | Q(three__icontains=F('three')) | Q(four__icontains=F('four')) | Q(five__icontains=F('five')) | Q(six__icontains=F('six')) | Q(seven__icontains=F('seven')) | Q(eight__icontains=F('eight')) | Q(nine__icontains=F('nine')) | Q(ten__icontains=F('ten')) | Q(eleven__icontains=F('eleven')) | Q(twelve__icontains=F('twelve'))).exclude(user=request.user) context = {'query': query, 'results': results} return render(request, 'matches.html', context)
def paly(request): errors = request.GET.get('error') if errors == None or errors == '': error = False else: error = errors video_id = request.GET.get('video_id') key = request.GET.get('key') if request.method == 'GET': author_key = False info = Video.objects.get(id=video_id) doctor = Doctor.objects.get(id=info.doctor) try: patient_key = request.session['patient'] except: patient_key = False try: doctor_key = request.session['doctor'] except: doctor_key = False if patient_key != False: patient = patient_key['id'] video = video_id recode = Recode.objects.filter(video=video).order_by( F('id').desc()) if len(recode) == 0: re = Recode(patient=patient, video=video) re.save() else: change_time = Recode.objects.get(video=video) from datetime import datetime dt = datetime.now() current_time = dt.strftime('%Y-%m-%d %H:%M:%S') change_time.time = current_time change_time.save() if key == 'person' and doctor_key != False: author_key = True commet_info = Commet.objects.filter(video=video_id).order_by( F('id').desc()) for com in commet_info: com.patient = Patient.objects.get(id=com.patient).name limit = 9 all_commmet = len(commet_info) page_key = False if all_commmet > 9: page_key = True p = Paginator(commet_info, limit) page = request.GET.get("page", 1) loaded = p.page(page) info_video = Video.objects.all().order_by(F('id').desc()) #限量 return render( request, 'paly.html', { "video_info": info, 'doctor': doctor, 'patient_key': patient_key, "doctor_key": doctor_key, "author_key": author_key, "error": error, 'key': key, "info_video": info_video, 'all_commmet': all_commmet, 'video_id': video_id, "commet_info": loaded, "page_key": page_key, }) if request.method == 'POST': try: patient_key = request.session['patient'] patient = Patient.objects.get(id=patient_key['id']).id video = Video.objects.get(id=video_id).id info = request.POST.get('info') comemt = Commet(info=info, video=video, patient=patient) comemt.save() return HttpResponseRedirect( "/paly_video/?video_id={0}&key={1}&error=评论成功".format( video_id, key)) except Exception as e: return HttpResponseRedirect( "/paly_video/?video_id={0}&key={1}&error=不能评论个人发布的视频".format( video_id, key))
def calculate_amount(self): expression = ExpressionWrapper(F('quantity') * F('unit_price'), output_field=DecimalField()) return self.items.aggregate(amount=Sum(expression))['amount']
def pending_line_items(self): """ Return a list of pending line items for this order. Any line item where 'received' < 'quantity' will be returned. """ return self.lines.filter(quantity__gt=F('received'))
def __init__(self, provider, report_type): """Constructor.""" self._mapping = [{ "provider": Provider.PROVIDER_AZURE, "alias": "subscription_guid", # FIXME: probably wrong "annotations": {}, "end_date": "costentrybill__billing_period_end", "filters": { "subscription_guid": [{ "field": "subscription_guid", "operation": "icontains", "composition_key": "account_filter" }], "service_name": { "field": "service_name", "operation": "icontains" }, "resource_location": { "field": "resource_location", "operation": "icontains" }, "instance_type": { "field": "instance_type", "operation": "icontains" }, }, "group_by_options": [ "service_name", "subscription_guid", "resource_location", "instance_type" ], "tag_column": "tags", "report_type": { "costs": { "aggregates": { "infra_total": Sum( Coalesce(F("pretax_cost"), Value(0, output_field=DecimalField())) + Coalesce(F("markup_cost"), Value(0, output_field=DecimalField()))), "infra_raw": Sum("pretax_cost"), "infra_usage": Sum(Value(0, output_field=DecimalField())), "infra_markup": Sum( Coalesce(F("markup_cost"), Value(0, output_field=DecimalField()))), "sup_raw": Sum(Value(0, output_field=DecimalField())), "sup_usage": Sum(Value(0, output_field=DecimalField())), "sup_markup": Sum(Value(0, output_field=DecimalField())), "sup_total": Sum(Value(0, output_field=DecimalField())), "cost_total": Sum( Coalesce(F("pretax_cost"), Value(0, output_field=DecimalField())) + Coalesce(F("markup_cost"), Value(0, output_field=DecimalField()))), "cost_raw": Sum("pretax_cost"), "cost_usage": Sum(Value(0, output_field=DecimalField())), "cost_markup": Sum( Coalesce(F("markup_cost"), Value(0, output_field=DecimalField()))), }, "aggregate_key": "pretax_cost", "annotations": { "infra_total": Sum( Coalesce(F("pretax_cost"), Value(0, output_field=DecimalField())) + Coalesce(F("markup_cost"), Value(0, output_field=DecimalField()))), "infra_raw": Sum("pretax_cost"), "infra_usage": Value(0, output_field=DecimalField()), "infra_markup": Sum( Coalesce(F("markup_cost"), Value(0, output_field=DecimalField()))), "sup_raw": Value(0, output_field=DecimalField()), "sup_usage": Value(0, output_field=DecimalField()), "sup_markup": Value(0, output_field=DecimalField()), "sup_total": Value(0, output_field=DecimalField()), "cost_total": Sum( Coalesce(F("pretax_cost"), Value(0, output_field=DecimalField())) + Coalesce(F("markup_cost"), Value(0, output_field=DecimalField()))), "cost_raw": Sum("pretax_cost"), "cost_usage": Value(0, output_field=DecimalField()), "cost_markup": Sum( Coalesce(F("markup_cost"), Value(0, output_field=DecimalField()))), "cost_units": Coalesce(Max("currency"), Value("USD")), }, "delta_key": { "cost_total": Sum( Coalesce(F("pretax_cost"), Value(0, output_field=DecimalField())) + Coalesce(F("markup_cost"), Value(0, output_field=DecimalField()))) }, "filter": [{}], "cost_units_key": "currency", "cost_units_fallback": "USD", "sum_columns": ["cost_total", "sup_total", "infra_total"], "default_ordering": { "cost_total": "desc" }, }, "instance_type": { "aggregates": { "infra_total": Sum( Coalesce(F("pretax_cost"), Value(0, output_field=DecimalField())) + Coalesce(F("markup_cost"), Value(0, output_field=DecimalField()))), "infra_raw": Sum("pretax_cost"), "infra_usage": Sum(Value(0, output_field=DecimalField())), "infra_markup": Sum( Coalesce(F("markup_cost"), Value(0, output_field=DecimalField()))), "sup_raw": Sum(Value(0, output_field=DecimalField())), "sup_usage": Sum(Value(0, output_field=DecimalField())), "sup_markup": Sum(Value(0, output_field=DecimalField())), "sup_total": Sum(Value(0, output_field=DecimalField())), "cost_total": Sum( Coalesce(F("pretax_cost"), Value(0, output_field=DecimalField())) + Coalesce(F("markup_cost"), Value(0, output_field=DecimalField()))), "cost_raw": Sum("pretax_cost"), "cost_usage": Sum(Value(0, output_field=DecimalField())), "cost_markup": Sum( Coalesce(F("markup_cost"), Value(0, output_field=DecimalField()))), "count": Sum(Value(0, output_field=DecimalField())), "usage": Sum("usage_quantity"), }, "aggregate_key": "usage_quantity", "annotations": { "infra_total": Sum( Coalesce(F("pretax_cost"), Value(0, output_field=DecimalField())) + Coalesce(F("markup_cost"), Value(0, output_field=DecimalField()))), "infra_raw": Sum("pretax_cost"), "infra_usage": Value(0, output_field=DecimalField()), "infra_markup": Sum( Coalesce(F("markup_cost"), Value(0, output_field=DecimalField()))), "sup_raw": Value(0, output_field=DecimalField()), "sup_usage": Value(0, output_field=DecimalField()), "sup_markup": Value(0, output_field=DecimalField()), "sup_total": Value(0, output_field=DecimalField()), "cost_total": Sum( Coalesce(F("pretax_cost"), Value(0, output_field=DecimalField())) + Coalesce(F("markup_cost"), Value(0, output_field=DecimalField()))), "cost_raw": Sum("pretax_cost"), "cost_usage": Value(0, output_field=DecimalField()), "cost_markup": Sum( Coalesce(F("markup_cost"), Value(0, output_field=DecimalField()))), "cost_units": Coalesce(Max("currency"), Value("USD")), "count": Max("instance_count"), "count_units": Value("instance_types", output_field=CharField()), "usage": Sum("usage_quantity"), # FIXME: Waiting on MSFT for usage_units default "usage_units": Coalesce(Max("unit_of_measure"), Value("Instance Type Placeholder")), }, "delta_key": { "usage": Sum("usage_quantity") }, "filter": [{ "field": "instance_type", "operation": "isnull", "parameter": False }], "group_by": ["instance_type"], "cost_units_key": "currency", "cost_units_fallback": "USD", "usage_units_key": "unit_of_measure", "usage_units_fallback": "Instance Type Placeholder", # FIXME: Waiting on MSFT "count_units_fallback": "instances", "sum_columns": [ "usage", "cost_total", "sup_total", "infra_total", "count" ], "default_ordering": { "usage": "desc" }, }, "storage": { "aggregates": { "infra_total": Sum( Coalesce(F("pretax_cost"), Value(0, output_field=DecimalField())) + Coalesce(F("markup_cost"), Value(0, output_field=DecimalField()))), "infra_raw": Sum("pretax_cost"), "infra_usage": Sum(Value(0, output_field=DecimalField())), "infra_markup": Sum( Coalesce(F("markup_cost"), Value(0, output_field=DecimalField()))), "sup_raw": Sum(Value(0, output_field=DecimalField())), "sup_usage": Sum(Value(0, output_field=DecimalField())), "sup_markup": Sum(Value(0, output_field=DecimalField())), "sup_total": Sum(Value(0, output_field=DecimalField())), "cost_total": Sum( Coalesce(F("pretax_cost"), Value(0, output_field=DecimalField())) + Coalesce(F("markup_cost"), Value(0, output_field=DecimalField()))), "cost_raw": Sum("pretax_cost"), "cost_usage": Sum(Value(0, output_field=DecimalField())), "cost_markup": Sum( Coalesce(F("markup_cost"), Value(0, output_field=DecimalField()))), "usage": Sum("usage_quantity"), }, "aggregate_key": "usage_quantity", "annotations": { "infra_total": Sum( Coalesce(F("pretax_cost"), Value(0, output_field=DecimalField())) + Coalesce(F("markup_cost"), Value(0, output_field=DecimalField()))), "infra_raw": Sum("pretax_cost"), "infra_usage": Value(0, output_field=DecimalField()), "infra_markup": Sum( Coalesce(F("markup_cost"), Value(0, output_field=DecimalField()))), "sup_raw": Value(0, output_field=DecimalField()), "sup_usage": Value(0, output_field=DecimalField()), "sup_markup": Value(0, output_field=DecimalField()), "sup_total": Value(0, output_field=DecimalField()), "cost_total": Sum( Coalesce(F("pretax_cost"), Value(0, output_field=DecimalField())) + Coalesce(F("markup_cost"), Value(0, output_field=DecimalField()))), "cost_raw": Sum("pretax_cost"), "cost_usage": Value(0, output_field=DecimalField()), "cost_markup": Sum( Coalesce(F("markup_cost"), Value(0, output_field=DecimalField()))), "cost_units": Coalesce(Max("currency"), Value("USD")), "usage": Sum("usage_quantity"), "usage_units": Coalesce(Max("unit_of_measure"), Value("Storage Type Placeholder")), }, "delta_key": { "usage": Sum("usage_quantity") }, "filter": [{ "field": "service_name", "operation": "icontains", "parameter": "Storage" }], "cost_units_key": "currency", "cost_units_fallback": "USD", "usage_units_key": "unit_of_measure", "usage_units_fallback": "Storage Type Placeholder", # FIXME "sum_columns": ["usage", "cost_total", "sup_total", "infra_total"], "default_ordering": { "usage": "desc" }, }, "tags": { "default_ordering": { "cost_total": "desc" } }, }, "start_date": "costentrybill__billing_period_start", "tables": { "query": AzureCostEntryLineItemDailySummary }, }] self.views = { "costs": { "default": AzureCostSummary, "subscription_guid": AzureCostSummaryByAccount, "resource_location": AzureCostSummaryByLocation, "service_name": AzureCostSummaryByService, }, "instance_type": { "default": AzureComputeSummary, "instance_type": AzureComputeSummary }, "storage": { "default": AzureStorageSummary }, "database": { "default": AzureDatabaseSummary, "service_name": AzureDatabaseSummary }, "network": { "default": AzureNetworkSummary, "service_name": AzureNetworkSummary }, } super().__init__(provider, report_type)
def generar_resultados(self, fecha_inicial, fecha_final, con_mo_saldo_inicial, proyecto): context = {} mano_obra = HoraHojaTrabajo.objects.values('literal').annotate( horas_trabajadas=ExpressionWrapper( Coalesce(Sum('cantidad_minutos') / 60, 0), output_field=DecimalField(max_digits=2)), costo_total=ExpressionWrapper( Sum(Coalesce(F('cantidad_minutos') / 60, 0) * ( F('hoja__tasa__costo') / F('hoja__tasa__nro_horas_mes_trabajadas') )), output_field=DecimalField(max_digits=4)) ).filter( verificado=True, literal_id=OuterRef('id'), ) materiales = ItemsLiteralDetalle.objects.values('literal').annotate( costo_total=Coalesce(Sum('costo_total'), 0) ).filter( literal_id=OuterRef('id') ) if fecha_inicial and fecha_final: materiales = materiales.filter( lapso__lte=fecha_final, lapso__gte=fecha_inicial ) mano_obra = mano_obra.filter( hoja__fecha__lte=fecha_final, hoja__fecha__gte=fecha_inicial ) qsLiterales = Literal.objects if proyecto: qsLiterales = qsLiterales.filter( proyecto=proyecto ) qsLiterales = qsLiterales.annotate( costo_mano_obra_iniciales=ExpressionWrapper( Coalesce(Sum('mis_horas_trabajadas_iniciales__valor'), 0), output_field=DecimalField(max_digits=4) ), cantidad_mano_obra_iniciales=ExpressionWrapper( Coalesce(Sum('mis_horas_trabajadas_iniciales__cantidad_minutos'), 0) / 60, output_field=DecimalField(max_digits=4) ), cantidad_horas_trabajadas=ExpressionWrapper( Subquery(mano_obra.values('horas_trabajadas')), output_field=DecimalField(max_digits=4) ), costo_mano_obra=ExpressionWrapper( Subquery(mano_obra.values('costo_total')), output_field=DecimalField(max_digits=4) ), costo_mis_materiales=ExpressionWrapper( Coalesce(Subquery(materiales.values('costo_total')), 0), output_field=DecimalField(max_digits=4) ) ).distinct() total_costo_mo = 0 total_costo_mo_ini = 0 total_horas_mo_ini = 0 total_horas_mo = 0 for literal in qsLiterales: if literal.cantidad_horas_trabajadas: total_horas_mo += literal.cantidad_horas_trabajadas if literal.cantidad_mano_obra_iniciales and con_mo_saldo_inicial: total_horas_mo_ini += literal.cantidad_mano_obra_iniciales if literal.costo_mano_obra: total_costo_mo += literal.costo_mano_obra if literal.costo_mano_obra_iniciales and con_mo_saldo_inicial: total_costo_mo_ini += literal.costo_mano_obra_iniciales total_costo_materiales = qsLiterales.aggregate(Sum('costo_mis_materiales'))['costo_mis_materiales__sum'] context['tipo_consulta'] = 'Todo' if fecha_inicial and fecha_final: context['tipo_consulta'] = 'por lapso' context['fecha_inicial'] = fecha_inicial context['fecha_final'] = fecha_final context['literales'] = qsLiterales context['proyecto'] = proyecto context['con_mo_saldo_inicial'] = con_mo_saldo_inicial context['total_costo_mo'] = total_costo_mo context['total_costo_mo_ini'] = total_costo_mo_ini context['total_costo_materiales'] = total_costo_materiales context['total_costo'] = total_costo_mo + total_costo_mo_ini + total_costo_materiales context['total_horas_mo'] = total_horas_mo context['total_horas_mo_ini'] = total_horas_mo_ini return context
def with_duration(self): """Annotate the queryset with the duration of completed jobs""" return self.annotate(duration=F("completed_at") - F("started_at"))
'creator', 'creatoremail', 'creatorwebsite', 'custom_country_filter', 'key_code', ], 'startrun': prize_run_fields, 'endrun': prize_run_fields, 'category': ['name'], }, } EVENT_DONATION_AGGREGATE_FILTER = Case( When(EventAggregateFilter, then=F('donation__amount')), output_field=DecimalField(decimal_places=2), ) annotations = { 'event': { 'amount': Coalesce(Sum(EVENT_DONATION_AGGREGATE_FILTER), 0), 'count': Count(EVENT_DONATION_AGGREGATE_FILTER), 'max': Coalesce(Max(EVENT_DONATION_AGGREGATE_FILTER), 0), 'avg': Coalesce(Avg(EVENT_DONATION_AGGREGATE_FILTER), 0), }, 'prize': { 'numwinners': Count( Case(When(PrizeWinnersFilter, then=1), output_field=IntegerField())),
def calculate_paths(self, filter: Filter, team: Team): date_query = request_to_date_query( { "date_from": filter._date_from, "date_to": filter._date_to }, exact=False) resp = [] event, path_type, event_filter, start_comparator = self._determine_path_type( filter.path_type if filter else None) sessions = (Event.objects.add_person_id(team.pk).filter( team=team, **(event_filter), **date_query ).filter(~Q(event__in=[ "$autocapture", "$pageview", "$identify", "$pageleave", "$screen" ]) if event is None else Q()).filter( filter.properties_to_Q(team_id=team.pk) if filter and filter.properties else Q()).annotate(previous_timestamp=Window( expression=Lag("timestamp", default=None), partition_by=F("person_id"), order_by=F("timestamp").asc(), ))) sessions_sql, sessions_sql_params = sessions.query.sql_with_params() if event == "$autocapture": sessions_sql = self._add_elements(query_string=sessions_sql) events_notated = "\ SELECT *, CASE WHEN EXTRACT('EPOCH' FROM (timestamp - previous_timestamp)) >= (60 * 30) OR previous_timestamp IS NULL THEN 1 ELSE 0 END AS new_session\ FROM ({}) AS inner_sessions\ ".format(sessions_sql) sessionified = "\ SELECT events_notated.*, SUM(new_session) OVER (\ ORDER BY person_id\ ,timestamp\ ) AS session\ FROM ({}) as events_notated\ ".format(events_notated) if filter and filter.start_point: sessionified = self._apply_start_point( start_comparator=start_comparator, query_string=sessionified, start_point=filter.start_point, ) final = "\ SELECT {} as path_type, id, sessionified.session\ ,ROW_NUMBER() OVER (\ PARTITION BY person_id\ ,session ORDER BY timestamp\ ) AS event_number\ FROM ({}) as sessionified\ ".format(path_type, sessionified) counts = "\ SELECT event_number || '_' || path_type as target_event, id as target_id, LAG(event_number || '_' || path_type, 1) OVER (\ PARTITION BY session\ ) AS source_event , LAG(id, 1) OVER (\ PARTITION BY session\ ) AS source_id from \ ({}) as final\ where event_number <= 4\ ".format(final) query = "\ SELECT source_event, target_event, MAX(target_id), MAX(source_id), count(*) from ({}) as counts\ where source_event is not null and target_event is not null\ group by source_event, target_event order by count desc limit 20\ ".format(counts) cursor = connection.cursor() cursor.execute(query, sessions_sql_params) rows = cursor.fetchall() for row in rows: resp.append({ "source": row[0], "target": row[1], "target_id": row[2], "source_id": row[3], "value": row[4], }) resp = sorted(resp, key=lambda x: x["value"], reverse=True) return resp
def post(self, request, *args, **kwargs): # TODO add query to database # Prepared to the ajax pagination, remember that we need to return number of rows = length # start - selected page on the UI (if first page selected then start = 0) # sortColumn - name of the sorting column # sortColumnDir - asc or desc selected_month = int(self.request.POST.get('selectedMonth')) selected_year = int(self.request.POST.get('selectedYear')) selected_date = date(selected_year, selected_month, 1) selected_location = self.request.POST.get('selectedLocation') beneficiary_type = self.request.POST.get('selectedBeneficiaryType') draw = self.request.POST.get('draw', 0) length = self.request.POST.get('length', 0) start = self.request.POST.get('start', 0) sortColumn = self.request.POST.get('sortColumn', 0) sortColumnDir = self.request.POST.get('sortColumnDir', 0) data = [] if beneficiary_type == 'child': data = [ dict(id=1, name='test 1', age='27', gender='M', lastImmunizationType=1, lastImmunizationDate='2018-03-03'), dict(id=2, name='test 2', age='12', gender='M', lastImmunizationType=1, lastImmunizationDate='2018-03-03'), dict(id=3, name='test 3', age='3', gender='M', lastImmunizationType=1, lastImmunizationDate='2018-03-03'), dict(id=4, name='test 4', age='5', gender='M', lastImmunizationType=1, lastImmunizationDate='2018-03-03'), dict(id=5, name='test 5', age='16', gender='M', lastImmunizationType=1, lastImmunizationDate='2018-03-03'), dict(id=6, name='test 6', age='19', gender='M', lastImmunizationType=1, lastImmunizationDate='2018-03-03'), ] elif beneficiary_type == 'eligible_couple': data = ( Woman.objects.annotate(age=ExtractYear( Func(F('dob'), function='age')), ).filter( # should filter for location domain=request.domain, age__range=(19, 49), marital_status='married', ).exclude(migration_status='yes').extra( select={ 'currentFamilyPlanningMethod': 0, 'adoptionDateOfFamilyPlaning': '2018-03-01', 'id': 'person_case_id', }, where=[ "NOT daterange(%s, %s) && any(pregnant_ranges)" ], params=[ selected_date, selected_date + relativedelta(months=1) ]).values('id', 'name', 'age', 'currentFamilyPlanningMethod', 'adoptionDateOfFamilyPlaning'))[:10] data = list(data) elif beneficiary_type == 'pregnant_women': data = [ dict(id=1, name='test 1', age='22', pregMonth='2018-03-03', highRiskPregnancy=1, noOfAncCheckUps=9), dict(id=2, name='test 2', age='32', pregMonth='2018-03-03', highRiskPregnancy=0, noOfAncCheckUps=9), dict(id=3, name='test 3', age='17', pregMonth='2018-03-03', highRiskPregnancy=1, noOfAncCheckUps=9), dict(id=4, name='test 4', age='56', pregMonth='2018-03-03', highRiskPregnancy=1, noOfAncCheckUps=9), dict(id=5, name='test 5', age='48', pregMonth='2018-03-03', highRiskPregnancy=0, noOfAncCheckUps=9), dict(id=6, name='test 6', age='19', pregMonth='2018-03-03', highRiskPregnancy=1, noOfAncCheckUps=9), ] return JsonResponse( data={ 'rows': data, 'draw': draw, 'recordsTotal': len(data), 'recordsFiltered': len(data), })
def funit_dscd_er(self, format, param): print(param) print(format) report_data = UnitSancDesg.objects.values( 'u_id', 'd5', 'u_name', 'd_gdesig', 'd_id', 'd_rank', 'd_discp', 'd_name', 'd_grade', 'd_gcode', 'tot', 'san', 'req', 'comment', 'd_cadre').annotate(total_count=ExpressionWrapper( F('san') + F('tot') + F('req'), output_field=models.IntegerField())).filter( u_id=param['u_id'], total_count__gt=0).order_by('d5', 'd_gcode') print(report_data) rpt = Report(report_data) pos_x_start = 36 pos_y_start = 24 col_1_pos = (pos_x_start, pos_y_start) col_2_pos = (col_1_pos[0] + 200, pos_y_start) col_3_pos = (col_2_pos[0] + 100, pos_y_start) col_4_pos = (col_3_pos[0] + 50, pos_y_start) col_5_pos = (col_4_pos[0] + 50, pos_y_start) col_6_pos = (col_5_pos[0] + 50, pos_y_start) col_7_pos = (col_6_pos[0] + 50, pos_y_start) rpt.pageheader = Band([ Element((36, 0), ("Times-Bold", 20), getvalue=lambda x: x["u_name"].upper(), format=lambda x: "Unit Summary : %s" % x), Element(col_1_pos, ("Helvetica-Bold", 12), text="Designation"), Element(col_2_pos, ("Helvetica-Bold", 12), text="Grade", align="center"), Element(col_3_pos, ("Helvetica-Bold", 12), text="Cadre", align="center"), Element(col_4_pos, ("Helvetica-Bold", 12), text="Ext", align="center", width=50), Element(col_5_pos, ("Helvetica-Bold", 12), text="Req", align="center", width=50), Element(col_6_pos, ("Helvetica-Bold", 12), text="San", align="center", width=50), # Element(col_7_pos, ("Helvetica-Bold", 12), text="San", align="center", width=50), Rule((36, 56), 7.5 * 72, thickness=2), ]) rpt.groupheaders = [ Band( [ # Rule((36, 20), 7.5 * 72), Element( (36, 4), ("Helvetica-Bold", 10), getvalue=lambda x: x["d_gdesig"].upper(), # format=lambda x: "Group Designation : %s" % x ), ], getvalue=lambda x: x["d_gdesig"].upper()), ] comment_band = Band([ Element( (col_1_pos[0], 0), ("Helvetica", 10), key="comment", align="left", ) ]) rpt.detailband = Band([ Element( (col_1_pos[0], 0), ("Helvetica", 10), getvalue=lambda x: x["d_name"].upper(), ), Element((col_2_pos[0], 0), ("Helvetica", 10), key="d_grade", align="left"), Element((col_3_pos[0], 0), ("Helvetica", 10), key="d_cadre", align="right"), Element((col_4_pos[0], 0), ("Helvetica", 10), getvalue=lambda x: x["tot"] or 0, align="right"), Element((col_5_pos[0], 0), ("Helvetica", 10), getvalue=lambda x: x["req"] or 0, align="right"), Element((col_6_pos[0], 0), ("Helvetica", 10), getvalue=lambda x: x["san"] or 0, align="right") ]) #, additionalbands=[comment_band] rpt.groupfooters = [ Band( [ Rule((col_4_pos[0] - 24, 4), 30), Rule((col_5_pos[0] - 24, 4), 30), Rule((col_6_pos[0] - 24, 4), 30), Element((col_1_pos[0], 4), ("Helvetica-Bold", 10), getvalue=lambda x: x["d_gdesig"].upper(), format=lambda x: "Subtotal" # format=lambda x: "Subtotal for %s" % x ), SumElement((col_4_pos[0], 4), ("Helvetica-Bold", 10), key="tot", align="right"), SumElement((col_5_pos[0], 4), ("Helvetica-Bold", 10), key="req", align="right"), SumElement((col_6_pos[0], 4), ("Helvetica-Bold", 10), key="san", align="right"), Element((36, 16), ("Helvetica-Bold", 10), text=""), ], getvalue=lambda x: x["d_gdesig"].upper(), newpageafter=0), ] rpt.reportfooter = Band([ Rule((col_4_pos[0] - 24, 4), 30), Rule((col_5_pos[0] - 24, 4), 30), Rule((col_6_pos[0] - 24, 4), 30), Element((240, 4), ("Helvetica-Bold", 10), text="Unit Total"), SumElement((col_4_pos[0], 4), ("Helvetica-Bold", 10), key="tot", align="right"), SumElement((col_5_pos[0], 4), ("Helvetica-Bold", 10), key="req", align="right"), SumElement((col_6_pos[0], 4), ("Helvetica-Bold", 10), key="san", align="right"), Element((36, 16), ("Helvetica-Bold", 10), text=""), ]) rpt.pagefooter = Band([ Element((400, 16), ("Times-Bold", 10), text="Industrial Engineering Dept, WCL HQ", align="right"), Element((36, 16), ("Helvetica-Bold", 10), sysvar="pagenumber", format=lambda x: "Page %d" % x), ]) file_loc = "./report_folder/" + param['u_id'] + ".pdf" canvas = Canvas(file_loc, (210 * mm, 297 * mm)) rpt.generate(canvas) print("report generated at %s" % file_loc) canvas.save() return file_loc
def active(self, date): return self.filter(Q(usage_limit__isnull=True) | Q(used__lt=F('usage_limit')), Q(end_date__isnull=True) | Q(end_date__gte=date), start_date__lte=date)
def expand_child(model: Type[Model], child: Tuple[str, Any]) -> Lookup: arg, value = child if not isinstance(value, Value): value = Value(value) parts = arg.split(LOOKUP_SEP) opts = model._meta # type: Options inner_opts = opts field = None # type: Field pos = 0 # we need to work out the full field path, which we will put in an F() for pos, part in enumerate(parts): if part == 'pk': part = inner_opts.pk.name try: field = inner_opts.get_field(part) except FieldDoesNotExist: break else: if field.is_relation: inner_opts = field.model._meta else: # we never broke out, which means everything resolved correctly. # bump pos by one so we get no remainder pos += 1 if field is None: raise Exception("Field not found: {}".format(parts)) field_path = LOOKUP_SEP.join(parts[:pos]) expression = F(field_path) # we set lookup_expression to field as that's what we're gathering from. # It will be updated in parallel with `expression` later on lookup_expression = field # we need to wrap the F() so we can specify the output field. # It's kind of bastardised.. expression = ExpressionWrapper(expression, output_field=field) remainder = parts[pos:] if not remainder: remainder = ['exact'] # we don't try to access the last entry, as that is probably a lookup for part in remainder[:-1]: transformer = lookup_expression.get_transform(part) if not transformer: raise Exception("Invalid transform: {}".format(part)) lookup_expression = expression = transformer(expression) lookup_name = remainder[-1] lookup_class = lookup_expression.get_lookup(lookup_name) if not lookup_class: transformer = lookup_expression.get_transform(lookup_name) if not transformer: raise Exception( "invalid transform or field access: {}".format(lookup_name)) lookup_expression = expression = transformer(expression) lookup_name = 'exact' lookup_class = lookup_expression.get_lookup(lookup_name) # we'd rather use isnull instead of Eq(None) if value.value is None and lookup_name in ('exact', 'iexact'): return lookup_expression.get_lookup('isnull')(expression, True) return lookup_class(expression, value)
def fill_entity_fks(apps, schema_editor): get_model = apps.get_model for model_name in ('Action', 'Alert', 'Memo', 'ToDo', 'UserMessage'): get_model('assistants', model_name).objects.update(entity_id=F('old_entity_id'))
def with_free_places(self): qs = super().get_queryset() return qs.annotate( free_places=F('total_places') - Sum('volunteers__participant_count', output_field=IntegerField()))
def poll_post_vote(sender, poll, user, **kwargs): TopicPollChoice.objects.filter(poll=poll, votes__user=user)\ .update(vote_count=F('vote_count') + 1)
def get(self, request, course_key): """ Returns a gradebook entry/entries (i.e. both course and subsection-level grade data) for all users enrolled in a course, or a single user enrolled in a course if a `username` parameter is provided. Args: request: A Django request object. course_key: The edx course opaque key of a course object. """ course = get_course_by_id(course_key, depth=None) # We fetch the entire course structure up-front, and use this when iterating # over users to determine their subsection grades. We purposely avoid fetching # the user-specific course structure for each user, because that is very expensive. course_data = CourseData(user=None, course=course) graded_subsections = list(grades_context.graded_subsections_for_course(course_data.collected_structure)) if request.GET.get('username'): with self._get_user_or_raise(request, course_key) as grade_user: course_grade = CourseGradeFactory().read(grade_user, course) entry = self._gradebook_entry(grade_user, course, graded_subsections, course_grade) serializer = StudentGradebookEntrySerializer(entry) return Response(serializer.data) else: q_objects = [] annotations = {} if request.GET.get('user_contains'): search_term = request.GET.get('user_contains') q_objects.append( Q(user__username__icontains=search_term) | Q(programcourseenrollment__program_enrollment__external_user_key__icontains=search_term) | Q(user__email__icontains=search_term) ) if request.GET.get('username_contains'): q_objects.append(Q(user__username__icontains=request.GET.get('username_contains'))) if request.GET.get('cohort_id'): cohort = cohorts.get_cohort_by_id(course_key, request.GET.get('cohort_id')) if cohort: q_objects.append(Q(user__in=cohort.users.all())) else: q_objects.append(Q(user__in=[])) if request.GET.get('enrollment_mode'): q_objects.append(Q(mode=request.GET.get('enrollment_mode'))) if request.GET.get('assignment') and ( request.GET.get('assignment_grade_max') or request.GET.get('assignment_grade_min')): subqueryset = PersistentSubsectionGrade.objects.annotate( effective_grade_percentage=Case( When(override__isnull=False, then=( F('override__earned_all_override') / F('override__possible_all_override') ) * 100), default=(F('earned_all') / F('possible_all')) * 100 ) ) grade_conditions = { 'effective_grade_percentage__range': ( request.GET.get('assignment_grade_min', 0), request.GET.get('assignment_grade_max', 100) ) } annotations['selected_assignment_grade_in_range'] = Exists( subqueryset.filter( course_id=OuterRef('course'), user_id=OuterRef('user'), usage_key=UsageKey.from_string(request.GET.get('assignment')), **grade_conditions ) ) q_objects.append(Q(selected_assignment_grade_in_range=True)) if request.GET.get('course_grade_min') or request.GET.get('course_grade_max'): grade_conditions = {} q_object = Q() if request.GET.get('course_grade_min'): course_grade_min = float(request.GET.get('course_grade_min')) / 100 grade_conditions['percent_grade__gte'] = course_grade_min if course_grade_min == 0: subquery_grade_absent = ~Exists( PersistentCourseGrade.objects.filter( course_id=OuterRef('course'), user_id=OuterRef('user_id'), ) ) annotations['course_grade_absent'] = subquery_grade_absent q_object |= Q(course_grade_absent=True) if request.GET.get('course_grade_max'): course_grade_max = float(request.GET.get('course_grade_max')) / 100 grade_conditions['percent_grade__lte'] = course_grade_max subquery_grade_in_range = Exists( PersistentCourseGrade.objects.filter( course_id=OuterRef('course'), user_id=OuterRef('user_id'), **grade_conditions ) ) annotations['course_grade_in_range'] = subquery_grade_in_range q_object |= Q(course_grade_in_range=True) q_objects.append(q_object) entries = [] related_models = ['user'] users = self._paginate_users(course_key, q_objects, related_models, annotations=annotations) users_counts = self._get_users_counts(course_key, q_objects, annotations=annotations) with bulk_gradebook_view_context(course_key, users): for user, course_grade, exc in CourseGradeFactory().iter( users, course_key=course_key, collected_block_structure=course_data.collected_structure ): if not exc: entry = self._gradebook_entry(user, course, graded_subsections, course_grade) entries.append(entry) serializer = StudentGradebookEntrySerializer(entries, many=True) return self.get_paginated_response(serializer.data, **users_counts)
class TestModelAdmin(ModelAdmin): ordering = (F('nonexistent'), )
def set_group_data(self, group_type): m = group_type.objects.filter(megasession=self) # WE GET AND ASSIGN SENDER DESIONS TO GROUP OBJECTS HERE subquery_head = group_type.objects.filter( id=OuterRef('id') ).annotate(sender_city=F('sender__city'), receiver_city=F('receiver__city')) sender_decision = Subquery( subquery_head.annotate(sender_decision=Sum('sender__owner__trust_player__decisions__answer', filter=(Q( sender__owner__trust_player__decisions__decision_type='sender_decision' ) & Q( sender__owner__trust_player__decisions__city=F( 'receiver_city') )))).values('sender_decision')[:1] ) receiver_decision = Subquery( subquery_head.annotate(receiver_decision=Sum('receiver__owner__trust_player__decisions__answer', filter=(Q( receiver__owner__trust_player__decisions__decision_type='return_decision' ) & Q( receiver__owner__trust_player__decisions__city=F( 'sender__city') )))).values('receiver_decision')[:1] ) sender_belief_re_receiver = Subquery( subquery_head.annotate(sender_belief=Sum('sender__owner__trust_player__decisions__answer', filter=(Q( sender__owner__trust_player__decisions__decision_type='sender_belief' ) & Q( sender__owner__trust_player__decisions__city=F( 'receiver__city') )))).values('sender_belief')[:1] ) receiver_belief_re_sender = Subquery( subquery_head.annotate(receiver_belief=Sum('receiver__owner__trust_player__decisions__answer', filter=(Q( receiver__owner__trust_player__decisions__decision_type='receiver_belief' ) & Q( receiver__owner__trust_player__decisions__city=F( 'sender__city') )))).values('receiver_belief')[:1] ) m.update(sender_decision_re_receiver=sender_decision, receiver_decision_re_sender=receiver_decision, sender_belief_re_receiver=sender_belief_re_receiver, receiver_belief_re_sender=receiver_belief_re_sender, ) m = group_type.objects.filter(megasession=self) receiver_correct_guess = Case( When(sender_decision_re_receiver=receiver_belief_re_sender, then=Value(True)), default=Value(False), output_field=BooleanField(), ) m.update(sender_belief_diff=Abs(F('receiver_decision_re_sender') - F('sender_belief_re_receiver')), ) m = group_type.objects.filter(megasession=self) m.update( has_sender_sent=Case(When(~Q(sender_decision_re_receiver=0), then=Value(True)), default=Value(False), output_field=BooleanField()), receiver_correct_guess=receiver_correct_guess, sender_guess_payoff=Case( When(sender_belief_diff=0, then=Value(20)), When(sender_belief_diff=3, then=Value(10), ), default=Value(0), output_field=IntegerField() ) )
def resolve_products(self, info, page, page_size, route, sizes, colors, effects, tags, hit, new, query, order): route_filter = Q(categories__path__icontains=route) sizes_list = list(filter(None, map(str.strip, sizes.split(',')))) all_sizes_list = { 'size_ns': 'Без размера', 'size_xs': 'XS', 'size_s': 'S', 'size_m': 'M', 'size_l': 'L', 'size_xl': 'XL', 'size_2xl': 'XXL', 'size_3xl': 'XXXL', 'size_4xl': 'XXXXL', } all_sizes_list_rev = dict( zip(all_sizes_list.values(), all_sizes_list.keys())) avaliable_sizes = [all_sizes_list_rev[key] for key in sizes_list] sizes_filter = Q() for size in avaliable_sizes: sizes_filter |= Q(**{size + '__gt': 0}) color_list = list(filter(None, map(str.strip, colors.split(',')))) color_filter = Q(colors__name__in=color_list) if color_list else Q() effects_list = list(filter(None, map(str.strip, effects.split(',')))) effect_glow_in_the_dark = 'Светится в темноте' in effects_list effect_glow_in_the_uv = 'Светится в ультрафиолете' in effects_list effects_filter = Q() if effect_glow_in_the_dark: effects_filter = effects_filter & Q(glow_in_the_dark=True) if effect_glow_in_the_uv: effects_filter = effects_filter & Q(glow_in_the_uv=True) tag_list = list(filter(None, map(str.strip, tags.split(',')))) tag_filter = Q(tags__name__in=tag_list) if tag_list else Q() hit_filter = Q() if hit: hit_filter = hit_filter & Q(hit=True) new_filter = Q() if hit: new_filter = new_filter & Q(hit=True) query_filter = Q(translations__description__icontains=query) \ | Q(model__icontains=query) \ | Q(sku__icontains=query) pqs = Product.objects.filter(enable=True, total_count__gt=0) pqs = pqs.filter(route_filter) pqs = pqs.filter(query_filter) pqs = pqs.filter(hit_filter) pqs = pqs.filter(new_filter) qs = pqs.filter(color_filter) qs = qs.filter(effects_filter) qs = qs.filter(sizes_filter) qs = qs.filter(tag_filter) qs = qs.distinct() sizes_qs = [ { 'lable': value, 'count': pqs.filter(effects_filter & color_filter).values(key).filter( Q(**{key + '__gt': 0})).count() # 'value': True if key in avaliable_sizes else False } for key, value in all_sizes_list.items() ] color_qs = pqs \ .annotate(lable=F('colors__name')).values('lable') \ .exclude(lable__isnull=True) \ .annotate(count=Count('lable', filter=effects_filter & sizes_filter)) \ .order_by('lable') \ # .annotate(value=Value(False, output_field=BooleanField())) \ products_dark_qs = pqs \ .values('glow_in_the_dark') \ .filter(glow_in_the_dark=True) \ .annotate(count=Count('glow_in_the_dark', filter=color_filter & sizes_filter)) \ .annotate(lable=Value('Светится в темноте', output_field=CharField())) \ .values('lable', 'count') # .annotate(value=Value(effect_glow_in_the_dark, output_field=BooleanField())) \ # .values('lable', 'count', 'value') products_uv_qs = pqs \ .values('glow_in_the_uv') \ .filter(glow_in_the_uv=True) \ .annotate(count=Count('glow_in_the_uv', filter=color_filter & sizes_filter)) \ .annotate(lable=Value('Светится в ультрафиолете', output_field=CharField())) \ .values('lable', 'count') # .annotate(value=Value(effect_glow_in_the_uv, output_field=BooleanField())) \ # .values('lable', 'count', 'value') colors = list(color_qs) # for color in colors: # if color['lable'] in color_list: # color['value'] = True filters = [ FiltersType(title='Размер', name='size', items=sizes_qs), FiltersType(title='Цвет', name='color', items=colors), FiltersType(title='Спецэффекты', name='effects', items=list(products_dark_qs) + list(products_uv_qs)), ] if order == OrderEnum.Random.value: qs = qs.order_by('?') if order == OrderEnum.OrderInc.value: qs = qs.order_by('my_order') if order == OrderEnum.OrderDec.value: qs = qs.order_by('-my_order') if order == OrderEnum.PriceInc.value: qs = qs.order_by('price_ret', 'my_order') if order == OrderEnum.PriceDec.value: qs = qs.order_by('-price_ret', 'my_order') if order == OrderEnum.SaleInc.value: qs = qs.order_by('sale', 'my_order') if order == OrderEnum.SaleDec.value: qs = qs.order_by('-sale', 'my_order') # if order == OrderEnum.HitInc.value: qs = qs.order_by('hit', 'my_order') # if order == OrderEnum.HitDec.value: qs = qs.order_by('-hit', 'my_order') return get_paginator(qs, page, page_size, ProductCountableType, filters=filters)
def get_dataset_search_results(self, clean=True, exclude=None, tagged_with=None, library=None, sample=None, dataset_type=None, storages=None, flowcell_id_and_lane=None, sequencing_center=None, sequencing_instrument=None, sequencing_library_id=None, library_type=None, index_format=None, min_num_read_groups=None, is_production=None, aligner=None, read_group_match=None, analysis_version=None, from_last_updated_date=None, to_last_updated_date=None): """ Performs the filter search with the given fields. The "clean" flag is used to indicate whether the cleaned data should be used or not. - This method gets called in the cleaning method - where the data is not yet guaranteed to be clean, and also outside, where the data can be trusted to be clean :param tags: list of tag strings separated by commas :param library: Library id. Eg. A90652A :param sample: Sample id. Eg. SA928 :param clean: Flag indicating whether or not the data has been cleaned yet :return: """ if clean: tagged_with = self.cleaned_data['tagged_with'] exclude = self.cleaned_data['exclude'] library = self.cleaned_data['library'] sample = self.cleaned_data['sample'] dataset_type = self.cleaned_data['dataset_type'] storages = self.cleaned_data['storages'] flowcell_id_and_lane = self.cleaned_data['flowcell_id_and_lane'] sequencing_center = self.cleaned_data['sequencing_center'] sequencing_instrument = self.cleaned_data['sequencing_instrument'] sequencing_library_id = self.cleaned_data['sequencing_library_id'] library_type = self.cleaned_data['library_type'] index_format = self.cleaned_data['index_format'] min_num_read_groups = self.cleaned_data['min_num_read_groups'] aligner = self.cleaned_data['aligner'] read_group_match = self.cleaned_data['read_group_match'] analysis_version = self.cleaned_data['analysis_version'] from_last_updated_date = self.cleaned_data[ 'from_last_updated_date'] to_last_updated_date = self.cleaned_data['to_last_updated_date'] results = tantalus.models.SequenceDataset.objects.all() # TODO: add prefetch related if tagged_with: tags_list = [tag.strip() for tag in tagged_with.split(",")] exclude_list = [tag.strip() for tag in exclude.split(",")] for tag in tags_list: results = results.filter(tags__name=tag).exclude( tags__name__in=exclude_list) if sample: results = results.filter(sample__sample_id__in=sample.split()) if dataset_type: results = results.filter(dataset_type__in=dataset_type) if storages: results = results.filter( file_resources__fileinstance__storage_id__in=storages) if library: results = results.filter(library__library_id__in=library.split()) if sequencing_center: results = results.filter( sequence_lanes__sequencing_centre=sequencing_center) if sequencing_instrument: results = results.filter( sequence_lanes__sequencing_instrument=sequencing_instrument) if sequencing_library_id: results = results.filter( library__library_id__in=sequencing_library_id.split()) if library_type: results = results.filter(library__library_type=library_type) if index_format: results = results.filter(library__index_format=index_format) if min_num_read_groups is not None: results = results.annotate(sequence_lanes__lane_number=Count( 'sequence_lanes__lane_number')).filter( sequence_lanes__lane_number__gte=min_num_read_groups) if flowcell_id_and_lane: query = Q() for flowcell_lane in flowcell_id_and_lane.split(): if "_" in flowcell_lane: # parse out flowcell ID and lane number, assumed to be separated by an underscore flowcell, lane_number = flowcell_lane.split("_", 1) q = Q(sequence_lanes__flowcell_id=flowcell, sequence_lanes__lane_number=lane_number) else: q = Q(sequence_lanes__flowcell_id=flowcell_lane) query = query | q results = results.filter(query) if is_production: results = results.filter(is_production=is_production) print("READ") print(read_group_match) if aligner: results = results.filter(aligner__name__icontains=aligner) if read_group_match is not None: print(read_group_match) if read_group_match: print(read_group_match) results = results.annotate( num_read_group=Count('sequence_lanes', distinct=True), total_num_read_group=Count( 'library__sequencinglane', distinct=True)).filter( num_read_group=F('total_num_read_group')) if analysis_version: results = results.filter( analysis__version__icontains=analysis_version) if from_last_updated_date: results = results.filter(last_updated__gte=from_last_updated_date) if to_last_updated_date: results = results.filter(last_updated__lte=to_last_updated_date) results = results.distinct() return list(results.values_list('id', flat=True))
def listing(request, title): err = 0 bids_amount = 0 bidding = 0 lst_bid = 0 lst = 0 bid = 0 comments = 0 listed_by = 0 lst_author = Listing.objects.get(title=title).author if Listing.objects.all(): lst_bid = Listing.objects.filter(title=title).values_list('ebid', flat=True)[0] lst = Listing.objects.get(title=title) if Bidding.objects.all(): bids_amount = Bidding.objects.filter(t__title=title).values_list("counting", flat=True)[0] bidding = Bidding.objects.filter(t__title=title) bid = bidding.values_list('bid', flat=True)[0] if Comments.objects.all(): comments = Comments.objects.filter(t__title=title) if Cart.objects.filter(listing2_id=lst.id): listed_by = Cart.objects.get(listing2__title=title, author=lst_author) form = CreateBid(request.POST) form2 = CreateComment(request.POST) if request.method == "POST" and 'placing' in request.POST: if form.is_valid() and int(request.POST.get('bid')) > Listing.objects.get(title=title).ebid and int(request.POST.get('bid')) > signal_product_manage_latest_version_id(Bidding, Bidding.objects.get(t__title=title)).bid: Listing.objects.filter(title=title).update(ebid=form.cleaned_data['bid']) bidding.update(bid=form.cleaned_data['bid'], counting=F('counting')+1) return HttpResponseRedirect(reverse('index')) else: err = "Bid is not valid" if request.method == "POST" and 'commenting' in request.POST: if form2.is_valid(): Comments.objects.create(user=request.user, t=lst, comment=form2.cleaned_data['comment']) return HttpResponseRedirect(reverse('listing', args=[title])) if request.method == "POST" and 'removing' in request.POST: tof = Cart.objects.get(listing2__title=title, author=request.user.id).author wl_lst = Cart.objects.get(listing2__title=title, author=tof.id).listing2 wl_lst.inlist = False wl_lst.save() return HttpResponseRedirect(reverse('watchlist')) if request.method == "POST" and 'winning' in request.POST: pas_lst = Listing.objects.get(title=title) PassiveListing.objects.create(title=pas_lst.title, description=pas_lst.description, image=pas_lst.image, ebid=Bidding.objects.get(t__title=title).bid) Listing.objects.filter(title=title).delete() return HttpResponseRedirect(reverse('passive_listing', args=[title])) if request.GET.get('wl') and request.user.is_authenticated: tof = Cart.objects.get(listing2__title=title, author=request.user.id).author wl_lst = Cart.objects.get(listing2__title=title, author=tof.id).listing2 wl_lst.inlist = True wl_lst.save() return render(request, "auctions/listing.html", { "lst": lst, "form": CreateBid(), "form2": CreateComment(), "err": err, "bids_amount": bids_amount, "bidding": bidding, "lst_bid": lst_bid, "bid": bid, "comments": comments, "lst_by": listed_by })
def get_transform_queryset(self): """ Facility takes all of the raw data up to the num_registrations field. After that, the aggregated fields come from the facility's most recent registration (the field is called sub_date in the processed model and ReceiptDate in the raw model). Due to the nature of the foreign key relationships, I had to do the same calculations I did on Regisration as I did on Facility, calculating a divider using the count of accidents on the accident table. Again, problem fields on this model include all of the aggregated chemical quantity fields (anything with the _tot flag). """ qs = raw_models.tblFacility.objects.filter( tbls1facilities__FacilityID=Subquery( raw_models.tblS1Facilities.objects.filter( EPAFacilityID=OuterRef('EPAFacilityID'), ).values( 'FacilityID').annotate(max_sub_date=Max('FacilityID')). values('max_sub_date').order_by('-max_sub_date')[:1]) ).select_related('FacilityCountyFIPS', ).annotate( id=F('EPAFacilityID'), facility_name=F('FacilityName'), rmp_id=F('FacilityID'), street_1=F('FacilityStr1'), street_2=F('FacilityStr2'), city=F('FacilityCity'), state=F('FacilityState'), zip_code=F('FacilityZipCode'), zip_ext=F('Facility4DigitZipExt'), county_fips=F('FacilityCountyFIPS'), county_name=F('FacilityCountyFIPS__County_Name'), num_registrations=F('CountOfFacilityID'), latitude=F('tbls1facilities__FacilityLatDecDegs'), longitude=F('tbls1facilities__FacilityLongDecDegs'), sub_type=F('tbls1facilities__SubmissionType'), sub_date=F('tbls1facilities__ReceiptDate'), execsum_rmp_id=F('FacilityID'), exec_sub_type=F('tbls1facilities__SubmissionType'), exec_sub_date=F('tbls1facilities__ReceiptDate'), deregistration_date=F('tbls1facilities__DeRegistrationDate'), dereg_effect_date=F( 'tbls1facilities__DeRegistrationEffectiveDate'), parent=F('tbls1facilities__ParentCompanyName'), parent_2=F('tbls1facilities__Company2Name'), operator_name=F('tbls1facilities__OperatorName'), operator_city=F('tbls1facilities__OperatorCity'), operator_state=F('tbls1facilities__OperatorStateFIPS'), operator_zip=F('tbls1facilities__OperatorZipCode'), province=F('tbls1facilities__ForeignStateProv'), county=F('tbls1facilities__FacilityCountyFIPS'), country=F('tbls1facilities__ForeignCountry'), sub_reason=F('tbls1facilities__RMPSubmissionReasonCode'), dereg_reason=F('tbls1facilities__DeregistrationReasonCode'), dereg_other=F('tbls1facilities__DeregistrationReasonOtherText'), registered=Case(When(dereg_reason__gt=0, then=0), default=Value(1), output_field=CopyFromBooleanField()), num_fte=F('tbls1facilities__FTE'), all_naics=F( 'tbls1facilities__tbls1processes__tbls1process_naics__NAICSCode' ), num_accident_records=Count( 'tbls1facilities__tbls6accidenthistory', ), num_accident_actual=Count( 'tbls1facilities__tbls6accidenthistory', distinct=True, ), num_accident_divider=Case(When(num_accident_actual=0, then=1), default=F('num_accident_records') / F('num_accident_actual')), acc_flam_tot=Round( Sum( Case(When( tbls1facilities__tbls6accidenthistory__tbls6accidentchemicals__ChemicalID__ChemType ='F', then= ('tbls1facilities__tbls6accidenthistory__tbls6accidentchemicals__QuantityReleased' )), default=Value(0), output_field=CopyFromIntegerField())) / F('num_accident_divider'), ), acc_toxic_tot=Round( Sum( Case(When( tbls1facilities__tbls6accidenthistory__tbls6accidentchemicals__ChemicalID__ChemType ='T', then= ('tbls1facilities__tbls6accidenthistory__tbls6accidentchemicals__QuantityReleased' )), default=Value(0), output_field=CopyFromIntegerField())) / F('num_accident_divider'), ), acc_quantity_tot=F('acc_flam_tot') + F('acc_toxic_tot'), num_deaths=Coalesce( Round( Sum(F( 'tbls1facilities__tbls6accidenthistory__DeathsWorkers') + F('tbls1facilities__tbls6accidenthistory__DeathsPublicResponders' ) + F('tbls1facilities__tbls6accidenthistory__DeathsPublic' ), default=Value(0), output_field=CopyFromIntegerField()) / F('num_accident_divider'), ), Value(0), ), num_injuries=Coalesce( Round( Sum(F( 'tbls1facilities__tbls6accidenthistory__InjuriesPublic' ) + F( 'tbls1facilities__tbls6accidenthistory__InjuriesWorkers' ) + F( 'tbls1facilities__tbls6accidenthistory__InjuriesPublicResponders' ), default=Value(0), output_field=CopyFromIntegerField()) / F('num_accident_divider')), Value(0), ), num_evacuated=Coalesce( Round( Sum(F('tbls1facilities__tbls6accidenthistory__Evacuated'), output_field=CopyFromIntegerField()) / F('num_accident_divider')), Value(0), ), property_damage=Coalesce( Round( Sum(F( 'tbls1facilities__tbls6accidenthistory__OnsitePropertyDamage' ) + F( 'tbls1facilities__tbls6accidenthistory__OffsitePropertyDamage' ), output_field=CopyFromIntegerField()) / F('num_accident_divider')), Value(0), ), ) return qs