def get(self, request, *args): owners = (SiteVisit.objects.all().exclude(owner__isnull=True).annotate( full_name=Concat('owner__first_name', V(' '), 'owner__last_name', output_field=CharField())).distinct('full_name'). order_by('full_name').values_list('full_name', flat=True)) collectors = (BiologicalCollectionRecord.objects.filter( validated=True).exclude(collector__exact='', ).annotate( full_name=Concat('collector_user__first_name', V(' '), 'collector_user__last_name', output_field=CharField())).exclude( source_collection='gbif'). distinct('full_name').order_by('full_name').values_list( 'full_name', flat=True)) all_users = list(owners) + list(collectors) all_users = list(set(all_users)) all_users.sort() user_index = 0 if len(all_users) > 0: while all_users[user_index] == ' ': user_index += 1 return HttpResponse(json.dumps(all_users[user_index:]), content_type='application/json')
def get_queryset(self): queryset = ( DocumentMovement.objects.annotate( documentId=F("document_id"), outputName=F("output__name"), outputId=F("output_id"), stepId=F("step_id"), committeeName=Concat( F("committee__firstname"), V(" "), F("committee__middlename"), V(" "), F("committee__lastname") ), statusName=F("status__name"), ) .exclude(isDeleted=True) .order_by("-id") ) documentId = self.request.query_params.get("documentId", None) process = self.request.query_params.get("process", None) if documentId is not None: if process is not None: if process == "last": queryset = queryset.filter(document_id=documentId).order_by("-dateCreated")[:1] else: queryset = queryset.filter(document_id=documentId) return queryset.prefetch_related(Prefetch("output", Output.objects.all()))
def execute(self) -> QuerySet: # в ArrayAgg необходим Cast для указания точного типа # в противном случае при попытке проверки contains # появляется ошибка users_ids_who_likes_the_comment_arr = ArrayAgg( Cast('votes__user_id', IntegerField()), filter=Q(votes__vote=Vote.LIKE), distinct=True ) users_ids_who_dislikes_the_comment_arr = ArrayAgg( Cast('votes__user_id', IntegerField()), filter=Q(votes__vote=Vote.DISLIKE), distinct=True ) current_user_vote_to_the_comment = Case( When(liked_arr__contains=[self.user_id], then=V('like')), When(disliked_arr__contains=[self.user_id], then=V('dislike')), output_field=CharField(), default=None ) return ( Comment.objects .filter(object_id=self.instance_id, content_type__app_label=self.app, content_type__model=self.model, parent__isnull=True) .annotate(child_count=Count('children'), liked_arr=users_ids_who_likes_the_comment_arr, disliked_arr=users_ids_who_dislikes_the_comment_arr, likes=Coalesce(ArrayLength('liked_arr'), V(0)), dislikes=Coalesce(ArrayLength('disliked_arr'), V(0))) .annotate(user_vote=current_user_vote_to_the_comment) )
def get(self, request, *args, **kwargs): purchase_order_id = kwargs.get('object_id') response = redirect(to='dashboard:dashboard') context = self.get_data(request, purchase_order_id=purchase_order_id) if context: Purchase_order_file_name = purchase.PurchaseOrder.objects.filter( pk=purchase_order_id).annotate(Purchase_order_file_name=Concat( F('society__name'), V('_'), F('vendor__name'), V('-'), F('order_date'), V('.pdf'), output_field=CharField()), ).values_list( 'Purchase_order_file_name', flat=True).first() print("file_name", Purchase_order_file_name) response = PDFTemplateResponse( request=request, template=self.template_name, filename=Purchase_order_file_name, context=context, show_content_in_browser=False, cmd_options={ 'margin-top': 50, }, ) print("response", response) return response
def lead_user_list(request): """ :param request: :return: """ # Get the data search_form = SearchForm(request.POST) if not search_form.is_valid(): return HttpResponseBadRequest(search_form.errors) # First we create a search string and annotate it onto our results user_results = User.objects.annotate( search_string=Concat('username', V(' '), 'first_name', V(' '), 'last_name', V(' '), 'email', output_field=CharField())).filter( is_active=True, ) for split_row in search_form.cleaned_data['search'].split(' '): """ """ user_results.filter(search_string__icontains=split_row, ) # Return the json data return HttpResponse(serializers.serialize('json', user_results[:25]), content_type='application/json')
def get(self, request, *args, **kwargs): complaint_id = kwargs.get('object_id') response = redirect(to='dashboard:dashboard') context = self.get_data(request, complaint_id=complaint_id) if context: Complaint_file_name = Complaint.objects.filter( pk=complaint_id).annotate(Complaint_file_name=Concat( F('number'), V('_'), F('name'), V('-'), F('create_date'), V('.pdf'), output_field=CharField()), ).values_list( 'Complaint_file_name', flat=True).first() print("file_name", Complaint_file_name) response = PDFTemplateResponse( request=request, template=self.template_name, filename=Complaint_file_name, context=context, show_content_in_browser=False, cmd_options={ 'margin-top': 50, }, ) print("response", response) return response
def export_prestamos_csv(request): response = HttpResponse(content_type='text/csv') response['Content-Disposition'] = 'attachment; filename="cartera.csv"' writer = csv.writer(response) writer.writerow([ 'id', 'Capital prestado', 'Fecha prestamo', 'Porcentaje', 'Cliente', 'Abonos a capital', 'Descuentos', 'Saldo a capital' ]) descuentos_prestam = Descuento.objects.filter( prestamo=OuterRef('pk')).values('prestamo').annotate( descuento_prestamo=Coalesce(Sum('valor_descuento'), V(0))).values( 'descuento_prestamo') prestamos = Prestamo.objects.values( 'id', 'capital_prestado', 'fecha_prestamo', 'porcentaje_aplicado__porcentaje').annotate( cliente=Concat('cliente__nombres', V(' '), 'cliente__apellidos'), abonos_capital=Coalesce(Sum('abono__valor_abono_capital'), V(0)), abonos_interes=Coalesce(Sum('abono__valor_abono_interes'), V(0)), descuentos=Subquery( descuentos_prestam.values('descuento_prestamo')), saldo_capital=ExpressionWrapper( F('capital_prestado') - F('abonos_capital') - F('descuentos'), output_field=FloatField()), saldo_interes=F('saldo_capital') * F('porcentaje_aplicado__porcentaje') / 100.0).filter(estado=1).order_by('fecha_prestamo').values_list( 'id', 'capital_prestado', 'fecha_prestamo', 'porcentaje_aplicado__porcentaje', 'cliente', 'abonos_capital', 'descuentos', 'saldo_capital') for prestamo in prestamos: writer.writerow(prestamo) return response
def get_initial_queryset(self): #id_cli = self.request.GET.get('id_cliente', None) #print ("id_cli: {0}".format(id_cli)) #el django orm anterior #return Prestamo.objects.values('id','capital_prestado','fecha_prestamo','porcentaje_aplicado__porcentaje').annotate(cliente=Concat('cliente__nombres', V(' '),'cliente__apellidos'),abonos_capital=Coalesce(Sum('abono__valor_abono_capital'),V(0)),abonos_interes=Coalesce(Sum('abono__valor_abono_interes'),V(0)),descuentos=Coalesce(Sum('descuento__valor_descuento'),V(0)),saldo_capital=F('capital_prestado')- F('abonos_capital') ,saldo_interes=F('saldo_capital') * F('porcentaje_aplicado__porcentaje')/100.0 ).filter(estado=1).order_by('fecha_prestamo') #el django orm nuevo descuentos_prestam = Descuento.objects.filter( prestamo=OuterRef('pk')).values('prestamo').annotate( descuento_prestamo=Coalesce(Sum('valor_descuento'), V( 0))).values('descuento_prestamo') return Prestamo.objects.values( 'id', 'capital_prestado', 'fecha_prestamo', 'porcentaje_aplicado__porcentaje').annotate( cliente=Concat('cliente__nombres', V(' '), 'cliente__apellidos'), abonos_capital=Coalesce(Sum('abono__valor_abono_capital'), V(0)), abonos_interes=Coalesce(Sum('abono__valor_abono_interes'), V(0)), descuentos=Coalesce( Subquery(descuentos_prestam.values('descuento_prestamo')), 0), saldo_capital=ExpressionWrapper(F('capital_prestado') - F('abonos_capital') - F('descuentos'), output_field=FloatField()), saldo_interes=F('saldo_capital') * F('porcentaje_aplicado__porcentaje') / 100.0).filter(estado=1).order_by('fecha_prestamo')
def test_decrypt(self, db): ByteArrayModel.objects.create() ByteArrayModel.objects.update( content=funcs.PgpSymEncrypt(V('hello'), V('secret'))) found = ByteArrayModel.objects.annotate(decrypted=funcs.PgpSymDecrypt( 'content', V('secret'), output_field=TextField())).get() assert found.decrypted == "hello"
def get_data(self, request, daybook_id=None, company_id=None): data = {} record = dayBook.DayBook.objects.filter(pk=daybook_id, company_id=company_id).annotate( daybook_number=F('number'), daybook_customer_type=F('customer_type'), dayBook_name=Coalesce('name', V("-")), daybook_customer_name=Coalesce('customer_name__name', V("-")), daybook_employee_name=Coalesce('employee_name__name', V("-")), daybook_vendor_name=Coalesce('vendor_name__name', V("-")), daybook_description=F('description'), daybook_status=F('status'), daybook_credit_amount=F('credit_amount'), daybook_debit_amount=F('debit_amount'), daybook_date=ExpressionWrapper(Func(F('date'), V("DD/MM/YYYY"), function='TO_CHAR'), output_field=CharField()), ) for each in record: data.update({ 'pk': each.pk, 'daybook_number': each.daybook_number, 'daybook_customer_type': each.daybook_customer_type, 'daybook_name': each.dayBook_name, 'daybook_customer_name': each.daybook_customer_name, 'daybook_employee_name': each.daybook_employee_name, 'daybook_vendor_name': each.daybook_vendor_name, 'daybook_description': each.daybook_description, 'daybook_status': each.daybook_status, 'daybook_credit_amount': each.daybook_credit_amount, 'daybook_debit_amount': each.daybook_debit_amount, 'daybook_date': each.daybook_date, }) print(data) return data
def get_attributes_headers(export_info: Dict[str, list]) -> List[str]: """Get headers for exported attributes. Headers are build from slug and contains information if it's a product or variant attribute. Respectively for product: "slug-value (product attribute)" and for variant: "slug-value (variant attribute)". """ attribute_ids = export_info.get("attributes") if not attribute_ids: return [] attributes = Attribute.objects.filter( pk__in=attribute_ids).order_by("slug") products_headers = (attributes.filter( product_types__isnull=False).annotate( header=Concat("slug", V(" (product attribute)"))).values_list( "header", flat=True)) variant_headers = (attributes.filter( product_variant_types__isnull=False).annotate( header=Concat("slug", V(" (variant attribute)"))).values_list( "header", flat=True)) return list(products_headers) + list(variant_headers)
def get_products_data( queryset: "QuerySet", export_fields: Set[str], attribute_ids: Optional[List[int]], warehouse_ids: Optional[List[int]], ) -> List[Dict[str, Union[str, bool]]]: """Create data list of products and their variants with fields values. It return list with product and variant data which can be used as import to csv writer and list of attribute and warehouse headers. """ products_with_variants_data = [] product_fields = set( ProductExportFields.HEADERS_TO_FIELDS_MAPPING["fields"].values()) product_export_fields = export_fields & product_fields product_export_fields.add("variants__id") products_data = (queryset.annotate( product_weight=Case( When(weight__isnull=False, then=Concat("weight", V(" g"))), default=V(""), output_field=CharField(), ), variant_weight=Case( When( variants__weight__isnull=False, then=Concat("variants__weight", V(" g")), ), default=V(""), output_field=CharField(), ), ).order_by("pk", "variants__pk").values(*product_export_fields).distinct( "pk", "variants__pk")) products_relations_data = get_products_relations_data( queryset, export_fields, attribute_ids) variants_relations_data = get_variants_relations_data( queryset, export_fields, attribute_ids, warehouse_ids) for product_data in products_data: pk = product_data["id"] variant_pk = product_data.pop("variants__id") product_relations_data: Dict[str, str] = products_relations_data.get( pk, {}) variant_relations_data: Dict[str, str] = variants_relations_data.get( variant_pk, {}) data = { **product_data, **product_relations_data, **variant_relations_data } products_with_variants_data.append(data) return products_with_variants_data
def test_concat_coalesce_idempotent(self): pair = ConcatPair(V('a'), V('b')) # Check nodes counts self.assertEqual(len(list(pair.flatten())), 3) self.assertEqual(len(list(pair.coalesce().flatten())), 7) # + 2 Coalesce + 2 Value() self.assertEqual(len(list(pair.flatten())), 3)
def get_progress(self, target_node): kwargs = self.context['view'].kwargs if target_node.kind == content_kinds.TOPIC: kind_counts = target_node.get_descendant_kind_counts() # filter logs by each kind under target node, and sum progress over logs progress = ContentSummaryLog.objects \ .filter_by_topic(target_node) \ .filter(user__in=get_members_or_user(kwargs['collection_kind'], kwargs['collection_id'])) \ .values('kind') \ .annotate(total_progress=Sum('progress')) # add kind counts under this node to progress dict for kind in progress: kind['node_count'] = kind_counts[kind['kind']] del kind_counts[kind['kind']] # evaluate queryset so we can add data for kinds that do not have logs progress = list(progress) for key in kind_counts: progress.append({'kind': key, 'node_count': kind_counts[key], 'total_progress': 0}) return progress else: # filter logs by a specific leaf node and compute stats over queryset leaf_node_stats = ContentSummaryLog.objects \ .filter(content_id=target_node.content_id) \ .filter(user__in=get_members_or_user(kwargs['collection_kind'], kwargs['collection_id'])) \ .aggregate(total_progress=Coalesce(Sum('progress'), V(0)), log_count_total=Coalesce(Count('pk'), V(0)), log_count_complete=Coalesce(Sum(Case(When(progress=1, then=1), default=0, output_field=IntegerField())), V(0))) return [leaf_node_stats] # return as array for consistency in api
def get_queryset(self): qs = super(CTRCurrentCostManager, self).get_queryset() if self.dynamic: qs = qs.annotate( solved_count=Coalesce( Count( 'solved_by', distinct=True, ), V(0), ), current_cost=Greatest( Ceil((F('min_cost') - F('max_cost')) / (F('decay_value') * F('decay_value')) * (F('solved_count') * F('solved_count')) + F('max_cost')), F('min_cost'), ), ) else: qs = qs.annotate( solved_count=Coalesce( Count( 'solved_by', distinct=True, ), V(0), ), current_cost=F('cost'), ) return qs
def get_queryset(self): queryset = ProcessRequirementAttachment.objects.annotate( processRequirementName=F('processRequirement__name'), committeeName=Concat(F('committee__firstname'), V(' '), F('committee__middlename'), V(' '), F('committee__lastname')), ).order_by('id') processRequirementId = self.request.query_params.get( 'processRequirementId', None) if processRequirementId is not None: documentId = self.request.query_params.get('documentId', None) if documentId is not None: queryset = queryset.filter( processRequirement__id=processRequirementId, document__id=documentId) else: processRequirementAttachmentId = self.request.query_params.get( 'processRequirementAttachmentId', None) if processRequirementAttachmentId is not None: queryset = queryset.filter(id=processRequirementAttachmentId) return queryset
def test_encrypt(self, db): ByteArrayModel.objects.create() ByteArrayModel.objects.update( content=funcs.PgpSymEncrypt(V('hello'), V('secret'))) data = utils.decrypt_column_values(ByteArrayModel, 'content', 'secret') assert data == ["hello"]
def get_owners(search, limit, offset): """Return owners filtered by GET parameters 'q'. With limit and offset Args: request (Request): Http Request Returns: list[dict]: Owners found """ users = list( User.objects.annotate(full_name=Concat("first_name", V(" "), "last_name")) .annotate(full_name2=Concat("last_name", V(" "), "first_name")) .filter( Q(first_name__icontains=search) | Q(last_name__icontains=search) | Q(full_name__icontains=search) | Q(full_name2__icontains=search) ) .values( "id", "first_name", "last_name", ) )[offset : limit + offset] return JsonResponse(users, safe=False)
def post(self, request): template = loader.get_template(self.template_name) query_general = request.POST.get('general', '') pu_name = request.POST.getlist('pu_name') pu_prefix = request.POST.getlist('pu_prefix') if len(query_general) < 2 and pu_name == [] and pu_prefix == []: return HttpResponse(template.render({}, request), \ content_type='text/html') mappings = Mapping.objects.annotate( combined=Concat('nus_code', V(' '), 'pu_code', V(' '), 'pu_title', output_field=CharField())) mappings = mappings.filter(combined__icontains=query_general.upper()) if pu_name != []: mappings = mappings.filter(pu_name__in=pu_name) if pu_prefix != []: q_objects = Q() for pref in pu_prefix: q_objects |= Q(nus_code__startswith=pref) mappings = mappings.filter(q_objects) mappings = mappings.order_by('pu_name', 'nus_code') context = {'mappings': mappings} rendered_template = template.render(context, request) return HttpResponse(rendered_template, content_type='text/html')
def colrasio_dataset(request): buku_objs = PembukuanTransaksi.unclosed_book.all() if not request.user.is_staff: buku_objs = buku_objs.filter( Q(user=request.user) | Q(user__profile__profile_member=request.user.profile)) collection = buku_objs.aggregate( v_collect=Coalesce(Sum('debit', filter=Q(status_type=1)), V(0)), v_sold=Coalesce(Sum('kredit', filter=Q(status_type=9)), V(0)), ) chart = { 'chart': { 'type': 'pie' }, 'title': { 'text': 'Collection Rasio' }, 'series': [{ 'name': 'admin', 'data': [{ 'name': 'Collect', 'y': collection.get('v_collect', 0) }, { 'name': 'Uncollect', 'y': collection.get('v_sold') - collection.get('v_collect') }] }] } return JsonResponse(chart)
def expired(self): expires = timezone.now() - jwt_settings.JWT_REFRESH_EXPIRATION_DELTA return self.annotate(expired=Case( When(created__lt=expires, then=V(True)), output_field=models.BooleanField(), default=V(False), ), )
def get_queryset(self): return super().get_queryset().annotate( committeeName=Concat(F('firstname'), V(' '), F('middlename'), V(' '), F('lastname')), positionName=F('position__name'), officeName=F('position__office__name'), )
def export_excel(request): gigigubun = request.GET.get('gigigubun') response = HttpResponse(content_type='application/ms-excel') response[ 'Content-Disposition'] = 'attachment; filename=' + gigigubun + '.xls' wb = xlwt.Workbook(encoding='utf-8') ws = wb.add_sheet(gigigubun) row_num = 0 font_style = xlwt.XFStyle() font_style.font.bold = True columns = [ 'productgubun', 'buy_date', 'people', 'place', 'maker', 'model', 'ip', 'bigo' ] for col_num in range(len(columns)): ws.write(row_num, col_num, columns[col_num], font_style) font_style = xlwt.XFStyle() rows = Infogigi.objects.filter( productgubun__sub_division=gigigubun).values_list( 'productgubun', 'buy_date', 'people__name', Concat(V('['), 'place__building', V(']'), 'place__room'), 'maker', 'model', 'ip', 'bigo') for row in rows: row_num += 1 for col_num in range(len(row)): ws.write(row_num, col_num, str(row[col_num]), font_style) wb.save(response) return response
def _get_absence_queryset(start, end, *fields): """Формирует queryset к Absence. Args: start - начальная дата end - конечная дата *fields - поля запроса Returns: queryset """ # Выбираем записи частично попадающие в интервал queryset = Absence.objects.filter( end__gte=start, start__lte=end).annotate( employee=Concat( F('employment__employee__last_name'), V(' '), F('employment__employee__first_name'), V(' '), F('employment__employee__middle_name'), ), number=F('employment__number'), department=F('employment__staffing__department__name'), position=F('employment__staffing__position__name'), staff_units=F('employment__count'), absence_hours=F('hours'), ).order_by('employee').values(*fields).distinct() return queryset
def _get_assignment_queryset(start, end, *fields): """Формирует queryset к Assignment. Args: start - начальная дата end - конечная дата *fields - поля запроса Returns: queryset """ # Сумма часов по проекту для должности (employment) project_hours = Subquery( ProjectAssignment.objects.filter( project=OuterRef('projectassignments__project'), assignment__employment=OuterRef('employment'), assignment__start__gte=start, assignment__end__lte=end, ).values('project').order_by('project').annotate( sum=Coalesce(Sum('hours'), 0)).values('sum'), output_field=PositiveSmallIntegerField(), ) # Общая сумма часов для должности (employment) employment_hours = Subquery( ProjectAssignment.objects.filter( assignment__employment=OuterRef('employment'), assignment__start__gte=start, assignment__end__lte=end, ).values('assignment__employment').order_by( 'assignment__employment').annotate( sum=Coalesce(Sum('hours'), 0)).values('sum'), output_field=PositiveSmallIntegerField(), ) # Выбираем записи полностью попадающие в интервал queryset = Assignment.objects.filter( projects__isnull=False, start__gte=start, end__lte=end, ).annotate( employee=Concat( F('employment__employee__last_name'), V(' '), F('employment__employee__first_name'), V(' '), F('employment__employee__middle_name'), ), number=F('employment__number'), department=F('employment__staffing__department__name'), position=F('employment__staffing__position__name'), staff_units=F('employment__count'), employment_hours=Coalesce(employment_hours, 0), project=F('projectassignments__project__name'), project_hours=Coalesce(project_hours, 0), ).order_by('employee').values(*fields).distinct() return queryset
def lista_autores_server_json(request): json_autores = [] draw = request.POST['draw'] start = int(request.POST['start']) length = int(request.POST['length']) order_by = request.POST.get('orden_columna') tipo_orden = request.POST.get('tipo_orden') global_search = request.POST['search[value]'] busqueda_individual = request.POST.get('busqueda_individual', "{}") busqueda_individual = json.loads(busqueda_individual) kwargs_autores = {} kwargs_annotate = {} if busqueda_individual: for columna_dic in busqueda_individual: columna_name = str(columna_dic.get("columna")) valor_busqueda = columna_dic.get("valor_busqueda") if columna_name in ["nombre_completo"]: kwargs_annotate["full_name"] = Concat('nombre', V(' '), 'apellidos') kwargs_autores["full_name__icontains"] = valor_busqueda else: kwargs_autores["{0}__icontains".format( columna_name)] = valor_busqueda autores = Autor.objects.annotate(**kwargs_annotate).filter( **kwargs_autores) if global_search: autores = autores.annotate( full_name=Concat('nombre', V(' '), 'apellidos')).filter( Q(nombre__icontains=global_search) | Q(apellidos__icontains=global_search) | Q(email__icontains=global_search) | Q(full_name__icontains=global_search)) if order_by: if order_by == "nombre_completo": order_by = "nombre" autores = autores.order_by(tipo_orden + order_by) total_count = autores.count() filtered_count = total_count try: autores = autores[start:start + length] except AssertionError: pass for autor in autores: json_autores.append({ "nombre_completo": autor.nombre + " " + autor.apellidos, "nombre": autor.nombre, "apellidos": autor.apellidos, "email": autor.email, }) json_data = { "draw": draw, "recordsTotal": total_count, "recordsFiltered": filtered_count, "data": json_autores, } json_data = json.dumps(json_data) return HttpResponse(json_data, content_type='application/json')
def get_data(self, request, billOfSupply_id=None, company_id=None): data = {} print(billOfSupply_id) #bill_of_supply print(company_id) billOfSupply_lines = bill_of_supply.BillOfSupplyLines.objects.annotate(product_name=F('product__name')) print(billOfSupply_lines) record = bill_of_supply.BillOfSupply.objects.filter(pk=billOfSupply_id, company_id=company_id).annotate( billOfSupply_issue_date=ExpressionWrapper(Func(F('issue_date'), V("DD/MM/YYYY"), function='TO_CHAR'), output_field=CharField()), billOfSupply_due_date=ExpressionWrapper(Func(F('due_date'), V("DD/MM/YYYY"), function='TO_CHAR'), output_field=CharField()), billOfSupply_client=F('client__name'), ).prefetch_related(Prefetch('billofsupplylines_set', queryset=billOfSupply_lines, to_attr='billOfSupply_lines')) for each in record: data.update({ 'billOfSupply_issue_date': each.billOfSupply_issue_date, 'billOfSupply_due_date': each.billOfSupply_due_date, 'billOfSupply_client': each.billOfSupply_client, 'number': each.number, 'ship_to': each.ship_to, 'place_of_supply': each.place_of_supply, 'payment_terms': each.payment_terms, 'clean_amount': each.clean_amount, 'grand_total': each.grand_total, 'rounded_off_value': each.rounded_off_value, 'grand_total_without_round': each.grand_total_without_round, 'centralGst': each.centralGst, 'stateGst': each.stateGst, 'internationalGst': each.internationalGst, 'gst': each.gst, 'tax_amount': each.tax_amount, 'discount_amount': each.discount_amount, 'pk': each.pk, 'billOfSupply_lines': [{'product_name': line.product_name, 'uom': line.uom, 'quantity': line.quantity, 'unit_price': line.unit_price, 'line_discount': round( ((line.discount / 100) * line.unit_price * line.quantity), 2), 'total_without_gst': round((line.quantity * line.unit_price - ( line.discount / 100) * line.unit_price * line.quantity), 2), 'total_with_gst': round((line.quantity * line.unit_price + line.quantity * ( line.unit_price * (int(line.tax)) / 100) - ( line.discount / 100) * line.unit_price * line.quantity), 2), 'line_centralGst': round( ((line.quantity * (line.unit_price * (int(line.tax)) / 100)) / 2), 2), 'line_stateGst': round( ((line.quantity * (line.unit_price * (int(line.tax)) / 100)) / 2), 2), 'line_internationalGst': round( (line.quantity * (line.unit_price * (int(line.tax)) / 100)), 2), } for line in each.billOfSupply_lines] }) return data
def month_severity_count(self, user, kwargs={}, exclude_kwargs={}): raw_vuls = self.get_open_vul_query( user, kwargs, exclude_kwargs).order_by('created_on') months = raw_vuls.annotate( month=Concat(V('01'), V('-'), ExtractMonth('created_on'), V('-'), ExtractYear('created_on')) \ , severity=F('severity')) \ .values_list('month', 'severity') return self.process_list(months)
def dashboard(request): #Step 1: Create a DataPool with the data we want to retrieve. deliveries = \ DataPool( series= [{'options': { 'source': Delivery.objects.filter(added_at__gte=timezone.now()-timedelta(days=30))}, 'terms': [ 'amount' ]} ]) #Step 2: Create the Chart object cht = Chart(datasource=deliveries, series_options=[{ 'options': { 'type': 'line', 'stacking': False }, 'terms': { 'amount': ['amount'] } }], chart_options={ 'title': { 'text': 'Amount (Last 30 Days)' }, 'xAxis': { 'title': { 'text': 'Amount' } } }) all_deliveries = Delivery.objects.all().count() todays_deliveries = Delivery.objects.filter( added_at__date=date.today()).count() last_seven_days = Delivery.objects.filter( added_at__gte=timezone.now() - timedelta(days=7)).count( ) #I replaced 'datetime' with 'timezone' to avoid a runtime error last_30_days = Delivery.objects.filter(added_at__gte=timezone.now() - timedelta(days=30)).count() total_amount = Delivery.objects.aggregate( sum=Coalesce(Sum('amount'), V(0))).get('sum') total_amount_last_7_days = Delivery.objects.filter( added_at__gte=timezone.now() - timedelta(days=7)).aggregate( sum=Coalesce(Sum('amount'), V(0))).get('sum') template = 'jskdelivery/dashboard.html' context = { 'cht_list': cht, 'all_deliveries': all_deliveries, 'todays_deliveries': todays_deliveries, 'last_seven_days': last_seven_days, 'last_30_days': last_30_days, 'total_amount': total_amount, 'total_amount_last_7_days': total_amount_last_7_days, } return render(request, template, context)
def test_function_as_filter(self): Author.objects.create(name='John Smith', alias='SMITHJ') Author.objects.create(name='Rhonda') self.assertQuerysetEqual( Author.objects.filter(alias=Upper(V('smithj'))), ['John Smith'], lambda x: x.name) self.assertQuerysetEqual( Author.objects.exclude(alias=Upper(V('smithj'))), ['Rhonda'], lambda x: x.name)