def queryset_iterator(queryset: QuerySet, chunksize: int=5000) -> Iterator[Any]: queryset = queryset.order_by('id') while queryset.exists(): for row in queryset[:chunksize]: msg_id = row.id yield row queryset = queryset.filter(id__gt=msg_id)
def dashboard(request): documents = request.user.document_owner.get_queryset() | request.user.document_set.get_queryset() documents = documents.distinct().order_by('-last_modified')[:5] notifications = QuerySet(model=models.ChangeNotification) for document in documents: notifications |= document.changenotification_set.get_queryset()[:5] return render(request, 'dashboard/dashboard.html', { 'documents': documents, 'notifications': notifications.order_by('-modify_time').all() })
def add_reference_to_field_on_related_model(cls, qs: QuerySet, **kwargs): cls.assert_is_proxy(qs) modified_qs = qs.all() if qs.order_by('related_model_name').distinct('related_model_name').count() == 1: related_model_name = qs.first().related_model_name modified_qs = modified_qs.annotate(**{ key: F('{}__{}'.format(related_model_name, value)) for key, value in kwargs.items() }) return modified_qs
def compute_activity(self, user_activity_objects: QuerySet) -> None: # Report data from the past week. # # This is a rough report of client activity because we inconsistently # register activity from various clients; think of it as telling you # approximately how many people from a group have used a particular # client recently. For example, this might be useful to get a sense of # how popular different versions of a desktop client are. # # Importantly, this does NOT tell you anything about the relative # volumes of requests from clients. threshold = timezone_now() - datetime.timedelta(days=7) client_counts = user_activity_objects.filter( last_visit__gt=threshold).values("client__name").annotate( count=Count('client__name')) total = 0 counts = [] for client_type in client_counts: count = client_type["count"] client = client_type["client__name"] total += count counts.append((count, client)) counts.sort() for count in counts: print("%25s %15d" % (count[1], count[0])) print("Total:", total)
def unpublish_page(modeladmin: admin.ModelAdmin, request: HttpRequest, queryset: QuerySet): pages_updated = queryset.update(published=False) if pages_updated == 1: message = '1 page was' else: message = '{:d} page were'.format(pages_updated) modeladmin.message_user(request, '{:s} successfully marked as published.'.format(message))
def aggregate_by_interval( events: QuerySet, team_id: int, entity: Entity, filter: Filter, breakdown: Optional[str] = None, ) -> Tuple[Dict[str, Any], QuerySet]: interval = filter.interval if filter.interval else "day" interval_annotation = get_interval_annotation(interval) filtered_events = events.filter( filter_events(team_id, filter, entity, interval_annotation=interval_annotation[interval]) ) values = [interval] if breakdown: if filter.breakdown_type == "cohort": cohort_annotations = add_cohort_annotations( team_id, filter.breakdown if filter.breakdown and isinstance(filter.breakdown, list) else [] ) values.extend(cohort_annotations.keys()) filtered_events = filtered_events.annotate(**cohort_annotations) breakdown = "cohorts" elif filter.breakdown_type == "person": person_annotations = add_person_properties_annotations( team_id, filter.breakdown if filter.breakdown and isinstance(filter.breakdown, str) else "" ) filtered_events = filtered_events.annotate(**person_annotations) values.append(breakdown) else: values.append(breakdown) aggregates = filtered_events.annotate(**interval_annotation).values(*values).annotate(count=Count(1)).order_by() if breakdown: aggregates = aggregates.order_by("-count") aggregates = process_math(aggregates, entity) dates_filled = group_events_to_date( date_from=filter.date_from, date_to=filter.date_to, aggregates=aggregates, interval=interval, breakdown=breakdown, ) return dates_filled, filtered_events
class SecurityQuestionForm(forms.ModelForm): question = forms.ModelChoiceField( queryset=QuerySet(), label=constants.SECURITY_QUESTIONS_QUESTION_LABEL, empty_label=constants.SECURITY_QUESTIONS_EMPTY_LABEL, help_text=constants.SECURITY_QUESTIONS_QUESTION_HELP_TEXT, ) answer = forms.CharField( widget=forms.Textarea, label=constants.SECURITY_QUESTIONS_ANSWER_LABEL, help_text=constants.SECURITY_QUESTIONS_ANSWER_HELP_TEXT, error_messages=constants.SECURITY_QUESTIONS_ANSWER_VALIDATION_ERRORS) class Meta: model = models.UserSecurityQuestion fields = ["question", "answer"] def __init__(self, questions, language, *args, **kwargs): super(SecurityQuestionForm, self).__init__(*args, **kwargs) self.fields["question"].queryset = questions # Always clear out answer fields. self.initial["answer"] = "" # Choice tuple can't be directly updated. Update only the widget choice # text, value is used for validation and saving. updated_choices = [] for choice in self.fields["question"].widget.choices: if isinstance(choice[0], int): text = questions.get( id=choice[0]).questionlanguagetext_set.filter( language_code=language).first() # If there is no language specific text available, default to # the original. choice = (choice[0], text.question_text if text else choice[1]) updated_choices.append(tuple(choice)) # Replace choices with new set. self.fields["question"].widget.choices = updated_choices
class PaginationMixIn(BasicAttrsMixIn): model = None qs = QuerySet() pg = [] def dispatch(self, request, *args, **kwargs): validated = self.validate_data() if validated: return validated self.parse_basics() self.qs = self.model.objects.all() self.prepare_qs() self.pg = Paginator(self.qs, self.per_page) return self.prepare_response() def validate_data(self): return def prepare_qs(self): pass def to_dict(self, obj): return obj.to_dict() def prepare_data(self): response_data = [] for data in self.apply_page(): response_data.append(self.to_dict(data)) return response_data def apply_page(self): return self.pg.page(self.page).object_list def prepare_response(self): return JsonResponse({ 'ok': True, 'data': self.prepare_data() }, safe=False)
def generate_excel(students: QuerySet, milspecialties: QuerySet) -> Path: """Generate an Excel file with information about the students. Returns: A path to the generated Excel file. """ path = Path(f"/tmp/{uuid.uuid4()}.xlsx") workbook = xlsxwriter.Workbook(path) # define formats header_format = workbook.add_format({"bold": True, "align": "center"}) align_center = workbook.add_format({"align": "center"}) russian_date = workbook.add_format({ "num_format": "dd.mm.yyyy", "align": "center", }) mean_grade = workbook.add_format({ "align": "center", "num_format": "#,##0.00" }) for milspecialty in milspecialties: worksheet = workbook.add_worksheet(milspecialty.code) _fill_header(worksheet=worksheet, cell_format=header_format) for row, student in enumerate( students.filter(milspecialty=milspecialty), start=1, # Skip header. ): cells = _make_student_row( student=student, align_center=align_center, russian_date=russian_date, mean_grade=mean_grade, ) for col, (data, cell_format) in enumerate(cells): worksheet.write(row, col, data, cell_format) workbook.close() return path
def get_buttons( subscriber: Subscriber = None, prayer_times: QuerySet = None, prayer_at_user_pk: int = None) -> List[List[Tuple[str, str]]]: """Возвращает кнопки со статусом намазов.""" # TODO если пользователь получил 2 времени намаза в разных городах в один день, вероятно будет ошибка text_for_read_prayer = "set_prayer_status_to_unread({})" text_for_unread_prayer = "set_prayer_status_to_read({})" if prayer_at_user_pk: day = PrayerAtUser.objects.get(pk=prayer_at_user_pk).prayer.day subscriber = PrayerAtUser.objects.get(pk=prayer_at_user_pk).subscriber prayers = PrayerAtUser.objects.filter(subscriber=subscriber, prayer__day=day).order_by("pk") else: prayers = generate_prayer_at_user(subscriber.tg_chat_id, prayer_times.order_by("pk")) buttons = [] for x in prayers: handle_text = text_for_read_prayer.format(x.pk) if x.is_read else text_for_unread_prayer.format(x.pk) buttons.append( (get_emoji_for_button(x), handle_text), ) return [buttons]
def _accept_risks(accepted_risks: List[AcceptedRisk], base_findings: QuerySet, owner: User): accepted = [] for risk in accepted_risks: findings = base_findings.filter(cve=risk.cve) if findings.exists(): # TODO we could use risk.cve to name the risk_acceptance, but would need to check for existing risk_acceptances in that case # so for now we add some timestamp based suffix name = risk.cve + ' via api at ' + timezone.now().strftime( '%b %d, %Y, %H:%M:%S') acceptance = Risk_Acceptance.objects.create( owner=owner, name=name[:100], decision=Risk_Acceptance.TREATMENT_ACCEPT, decision_details=risk.justification, accepted_by=risk.accepted_by[:200]) acceptance.accepted_findings.set(findings) findings.update(risk_accepted=True) acceptance.save() accepted.append(acceptance) return accepted
def area_tree_helper(filtered_area: List[TreeItem], records: QuerySet, areas: Optional[List[TreeItem]] = None): """ Recursively build a list of areas. The resulting list is meant to be iterated over in a view """ if areas is None: # Get the root areas areas = [area for area in filtered_area if area.is_root] else: yield 'in' for area in areas: yield area children = [child for child in area.children if child in filtered_area] if len(children): area.leaf = False for x in area_tree_helper(filtered_area, records, children): yield x else: area.occupants = records.filter(area__id=area.id) area.leaf = True yield 'out'
def extract_view_queryset(query_set: QuerySet, viewed_fields: Union[str, Iterable] = None, select_all_field: bool = True, **kwagrs) -> QuerySet: """ Extract only specified field from dataset Указывает оставить в результррующем наборе данных только указанные поля :param query_set: QuerySet for data extraction :type viewed_fields: list of field for data extraction :rtype: QuerySet that contains only specified fields """ if not select_all_field and viewed_fields is not None and \ viewed_fields and query_set and (isinstance(query_set, QuerySet) or hasattr(query_set, 'values')): if isinstance(viewed_fields, str): viewed_fields = viewed_fields.split(',') view_field_desc = [ get_from_container(field_name, [('field_name', None)], True)[0] for field_name in viewed_fields ] query_set = query_set.values(*view_field_desc) return query_set
def filter_by_keyword(queryset: QuerySet, keyword: Optional[str]) -> QuerySet: if keyword is None: return queryset keyword = keyword.strip() keyword_space_removed = keyword.replace(' ', '') if len(keyword) == 0: return queryset return queryset.annotate( title_space_removed=Replace('title', Value(' '), Value('')), title_en_space_removed=Replace( 'title_en', Value(' '), Value(''))).filter( Q(title_space_removed__icontains=keyword_space_removed) | Q(title_en_space_removed__icontains=keyword_space_removed) | Q(old_code__istartswith=keyword) | Q(department__name__iexact=keyword) | Q(department__name_en__iexact=keyword) | Q(professors__professor_name__icontains=keyword) | Q(professors__professor_name_en__icontains=keyword), )
def extract_recap_documents( docs: QuerySet, skip_ocr: bool = False, order_by: Optional[str] = None, queue: Optional[str] = None, ) -> None: """Loop over RECAPDocuments and extract their contents. Use OCR if requested. :param docs: A queryset containing the RECAPDocuments to be processed. :type docs: Django Queryset :param skip_ocr: Whether OCR should be completed (False) or whether items should simply be updated to have status OCR_NEEDED. :type skip_ocr: Bool :param order_by: An optimization parameter. You may opt to order the processing by 'small-first' or 'big-first'. :type order_by: str :param queue: The celery queue to send the content to. :type queue: str """ docs = docs.exclude(filepath_local="") if skip_ocr: # Focus on the items that we don't know if they need OCR. docs = docs.filter(ocr_status=None) else: # We're doing OCR. Only work with those items that require it. docs = docs.filter(ocr_status=RECAPDocument.OCR_NEEDED) if order_by is not None: if order_by == "small-first": docs = docs.order_by("page_count") elif order_by == "big-first": docs = docs.order_by("-page_count") count = docs.count() throttle = CeleryThrottle(queue_name=queue) for i, pk in enumerate(docs.values_list("pk", flat=True)): throttle.maybe_wait() extract_recap_pdf.apply_async((pk, skip_ocr), priority=5, queue=queue) if i % 1000 == 0: msg = f"Sent {i + 1}/{count} tasks to celery so far." logger.info(msg) sys.stdout.write(f"\r{msg}") sys.stdout.flush()
def get_filter_obj(request, model_class): """ filter a table's row """ filter_dict = {} q1 = Q() q1.connector = "AND" model_objs = QuerySet() try: for k, v in request.GET.items(): if k in ["_p", "_s", "_o", "_f"]: continue if v: filter_dict[k] = v q1.children.append((k, v)) model_objs = model_class.objects.filter(q1) except FieldError: pass return model_objs, filter_dict
def filter_in_string(queryset: QuerySet, field_name: str, values: list): """ Returns a in-icontains mixed lookup with 'or' between values for a CharField. Parameters ---------- queryset : :class:`~django.db.models.QuerySet` The filtered queryset field_name : str The name of the field the queryset is being filtered by values : str The values to filter by """ if not values: return queryset # We check both content and length in order to return only exact matches icontains = f"{field_name}__icontains" condition = reduce(operator.or_, [Q(**{icontains: value}) for value in values]) return queryset.filter(condition).all()
def __init__(self, blast_type: str, name: str, sequence: str, gene_codes: QuerySet) -> None: """ :param blast_type: new :param name: name given to query sequence :param sequence: sequence to blast given by user :param gene_codes: queryset of Genes to blast against """ super(BLASTNew, self).__init__(blast_type, name, sequence, gene_codes) self.name = name self.sequence = sequence log.debug("Will do blastnew of %s", str(gene_codes)) if gene_codes: self.genes = gene_codes.order_by( "gene_code").values_list("gene_code", flat=True) else: self.genes = [] self.path = os.path.join(self.cwd, 'db', '_'.join(self.genes) + '_seqs.fas.n*') self.db = os.path.join(self.cwd, 'db', '_'.join(self.genes) + '_seqs.fas')
class ArticlePushApi(BasicListViewSet): """文章批量推送接口""" queryset = QuerySet() serializer_class = DoNothingSerializer permission_name = permissions.PER_SYSTEM_SEO def post(self, request, *args, **kwargs): """批量创建资源数据 Args: request(Request): http request *args(list): 可变参数 **kwargs(dict): 可变关键字参数 Returns: response(Response): 响应数据 """ data = request.data.get("data", list()) code, res = baidu_api_put(params.SeoSite, params.SeoSiteToken, data.split('\n')) return self.set_response(HTTP_SUCCESS, res, status=HTTP_201_CREATED)
def _sort(self, query_set: QuerySet, sorting_option: SortingOption) -> QuerySet: sorting_option_value = sorting_option["keyToSort"] if sorting_option_value == "name": query_set = self._sort_by_default_condition(query_set, sorting_option) elif sorting_option_value == "countries": query_set = self._sort_by_default_condition(query_set, self._get_ngo_address_condition(sorting_option, "country__name")) elif sorting_option_value == "cities": query_set = self._sort_by_default_condition(query_set, self._get_ngo_address_condition(sorting_option, "city")) elif sorting_option_value == "trustworthiness": query_set = self._sort_by_tw_value(sorting_option, query_set) elif sorting_option_value == 'reviewNumber': query_set = self._sort_by_ngo_number_of_reviews(sorting_option, query_set) else: query_set = self._sort_by_default_condition(query_set, sorting_option) # always sort if trustworthiness filter is applied if self._filter_config.trustworthiness_lower_bound is not None and sorting_option_value != "trustworthiness": query_set = query_set.order_by('-tw_score__total_tw_score', 'name') return query_set
class AuthPublicKeyView(BasicInfoViewSet): """RSA的public-key 接口""" queryset = QuerySet() serializer_class = DoNothingSerializer authentication_enable = False def get(self, request, *args, **kwargs): """获取资源数据 Args: request(Request): http request *args(list): 可变参数 **kwargs(dict): 可变关键字参数 Returns: response(Response): 响应数据 """ public_key, private_key = encryption.rsa_generate() data = {'public_key': public_key} request.session[params.RSA_SESSION_PRIVATE_KEY] = private_key return Response(data=data, content_type=params.JSON_CONTENT_TYPE)
def filter_users(request, users_obj: QuerySet) -> QuerySet: customer = request.GET.get('customer') if customer == 'Ingen': users_obj = users_obj.filter(customer=None) elif customer == 'Satt': users_obj = users_obj.exclude(customer=None) elif customer is not None: users_obj = users_obj.filter(customer__name=customer) has_employee = request.GET.get('employee') if has_employee == 'Ja': users_obj = users_obj.exclude(employee=None) elif has_employee == 'Nei': users_obj = users_obj.filter(employee=None).exclude(ad_object__employeeID=None) elif has_employee == 'Ukjent': users_obj = users_obj.filter(employee=None, ad_object__employeeID=None) has_student = request.GET.get('student') if has_student == 'Ja': users_obj = users_obj.exclude(student=None) elif has_student == 'Nei': users_obj = users_obj.filter(student=None) return users_obj
def connection_from_queryset_slice( qs: QuerySet, args: ConnectionArguments = None, connection_type: Any = Connection, edge_type: Any = Edge, pageinfo_type: Any = PageInfo, ) -> Connection: """Create a connection object from a QuerySet.""" args = args or {} before = args.get("before") after = args.get("after") first = args.get("first") last = args.get("last") _validate_connection_args(args) requested_count = first or last end_margin = requested_count + 1 if requested_count else None cursor = after or before try: cursor = from_global_cursor(cursor) if cursor else None except ValueError: raise GraphQLError("Received cursor is invalid.") sort_by = args.get("sort_by", {}) sorting_fields = _get_sorting_fields(sort_by, qs) sorting_direction = _get_sorting_direction(sort_by, last) if cursor and len(cursor) != len(sorting_fields): raise GraphQLError("Received cursor is invalid.") filter_kwargs = (_prepare_filter(cursor, sorting_fields, sorting_direction) if cursor else Q()) qs = qs.filter(filter_kwargs) qs = qs[:end_margin] edges, page_info = _get_edges_for_connection(edge_type, qs, args, sorting_fields) return connection_type( edges=edges, page_info=pageinfo_type(**page_info), )
def _apply_basic_filters(sections: QuerySet, course: CourseFilter): """ Applies basic filters from a CourseFilter to a section QuerySet """ # Handle honors filter if course.honors is not BasicFilter.NO_PREFERENCE: if course.honors is BasicFilter.EXCLUDE: sections = sections.filter(honors=False) elif course.honors is BasicFilter.ONLY: sections = sections.filter(honors=True) if not sections: raise NoSchedulesError(_BASIC_FILTERS_TOO_RESTRICTIVE.format( subject=course.subject, course_num=course.course_num )) # Handle remote filter if course.remote is not BasicFilter.NO_PREFERENCE: if course.remote is BasicFilter.EXCLUDE: # F2F with remote option should be included regardless of web attribute, # but F2F with remote option has web=True sections = (sections.filter(remote=False) | sections.filter(instructional_method=Section.F2F_REMOTE_OPTION)) elif course.remote is BasicFilter.ONLY: sections = sections.filter(remote=True) if not sections: raise NoSchedulesError(_BASIC_FILTERS_TOO_RESTRICTIVE.format( subject=course.subject, course_num=course.course_num )) # Handle async filter if course.asynchronous is not BasicFilter.NO_PREFERENCE: if course.asynchronous is BasicFilter.EXCLUDE: sections = sections.filter(asynchronous=False) elif course.asynchronous is BasicFilter.ONLY: sections = sections.filter(asynchronous=True) if not sections: raise NoSchedulesError(_BASIC_FILTERS_TOO_RESTRICTIVE.format( subject=course.subject, course_num=course.course_num )) return sections
def test_validate_service_schedule(service_schedules: QuerySet): """Should validate a service schedule.""" schedule = service_schedules.filter(service__is_base_schedule=True).first() service = schedule.service schedule.start_time = time(4) schedule.end_time = time(3) with pytest.raises(ValidationError) as error: validate_service_schedule(schedule) assert "interval is incorrect" in str(error) schedule.start_time = time(3) schedule.end_time = time(23) with pytest.raises(ValidationError) as error: validate_service_schedule(schedule) assert "not overlap" in str(error) schedule.start_time = None schedule.end_time = time(4) with pytest.raises(ValidationError) as error: validate_service_schedule(schedule) assert "is not set" in str(error) schedule.start_time = time(3) schedule.end_time = time(4) schedule.service = None with pytest.raises(ValidationError) as error: validate_service_schedule(schedule) assert "owner is empty" in str(error) schedule.service = service with pytest.raises(ValidationError) as error: validate_service_schedule(schedule) assert "Not allowed to create schedules" in str(error) schedule.service.is_base_schedule = False schedule.service.save() validate_service_schedule(schedule)
def filter_array(queryset: QuerySet, field_name: str, value: list): """ Returns an exact lookup for a PostgreSQL ArrayField_. .. _ArrayField: https://docs.djangoproject.com/en/2.2/ref/contrib/postgres/fields/#arrayfield Parameters ---------- queryset : :class:`~django.db.models.QuerySet` The filtered queryset. field_name : str The name of the field the queryset is being filtered by. value : list The values to filter by. """ if not value: return queryset # We check both content and length in order to return only exact matches contains = f"{field_name}__contains" length = f"{field_name}__len" return queryset.filter(**{contains: value, length: len(value)}).all()
def sanitize_queryset(self, queryset: models.QuerySet) -> models.QuerySet: if queryset.ordered: raise ValueError( "You can't use %s on a QuerySet with an ordering." % self.__class__.__name__) if queryset.query.low_mark or queryset.query.high_mark: raise ValueError("You can't use %s on a sliced QuerySet." % self.__class__.__name__) pk = queryset.model._meta.pk allowed_field = isinstance(pk, self.ALLOWED_PK_FIELD_CLASSES) or ( isinstance(pk, models.ForeignKey) and isinstance( pk.foreign_related_fields[0], self.ALLOWED_PK_FIELD_CLASSES)) if not allowed_field: # If your custom field class should be allowed, just add it to # ALLOWED_PK_FIELD_CLASSES raise ValueError( "You can't use %s on a model with a non-integer primary key." % self.__class__.__name__) return queryset.order_by("pk")
def sort_queryset(queryset: QuerySet, sort_by: SortInputObjectType, sort_enum: graphene.Enum) -> QuerySet: """Sort queryset according to given parameters. Keyword Arguments: queryset - queryset to be filtered sort_by - dictionary with sorting field and direction """ if sort_by is None or not sort_by.field: return queryset direction = sort_by.direction sorting_field = sort_by.field custom_sort_by = getattr(sort_enum, f"sort_by_{sorting_field}", None) if custom_sort_by: queryset = custom_sort_by(queryset, sort_by) else: queryset = queryset.order_by(f"{direction}{sorting_field}") return queryset
def filter(self, user, queryset: QuerySet, perm: int) -> QuerySet: """Filter a QuerySet of CremeEntities by the credentials related to this role. Beware, the model class must be a child class of CremeEntity, but cannot be CremeEntity itself. @param user: A <django.contrib.auth.get_user_model()> instance (eg: CremeUser) ; should be related to the UserRole instance. @param queryset: A Queryset on a child class of CremeEntity. @param perm: A value in (EntityCredentials.VIEW, EntityCredentials.CHANGE etc...). @return: A new (filtered) queryset on the same model. """ model = queryset.model assert issubclass(model, CremeEntity) assert model is not CremeEntity if self.is_app_allowed_or_administrable(model._meta.app_label): queryset = SetCredentials.filter(self._get_setcredentials(), user, queryset, perm) else: queryset = queryset.none() return queryset
def ANNOTATE_BREW_NUMBER(qs: QuerySet, annotate_name: str = 'brew_number') -> QuerySet: """ Annotates QuerySet `qs` with name `annotate_name` (defaults to "brew_number") :param qs: BaseBatch QuerySet to annotate :type qs: QuerySet :param annotate_name: Name to provide for the annotation. Defaults to `brew_number` :type annotate_name: str :return: Annotated QuerySet :rtype: QuerySet """ """ Depreciated by below annotate_dict = {annotate_name: Cast(Case( When(__tbatch=1, then=Concat('year', Value('000'), 'batch')), When(__tbatch=2, then=Concat('year', Value('00'), 'batch')), When(__tbatch=3, then=Concat('year', Value('0'), 'batch')), default=Concat('year', 'batch')), output_field=DecimalField(max_digits=6, decimal_places=0) )} # Take the 'batch' field on BaseBatch, cast it to a CharField `__tbatch`. The annotate_dict will # determine how many 0s to insert based on the length of `__tbatch`, and concat that with `year` field return qs.annotate(__tbatch=Length(Cast('batch', CharField(max_length=4)))).annotate(**annotate_dict) """ # The use of `annotate_name` allows the field name to be passed to the queryset. Otherwise, the name would be constant annotate_dict = { annotate_name: Cast(Concat(Right( Cast(ExtractYear('date'), output_field=CharField(max_length=4)), 2), LPad(Cast('batch', output_field=CharField(max_length=4)), 4, Value('0'), output_field=CharField(max_length=4)), output_field=DecimalField(max_digits=6, decimal_places=0)), output_field=DecimalField(max_digits=6, decimal_places=0)) } return qs.annotate(**annotate_dict)
def _check_auction_post_request_cash_requirements(auction: Auction, user_query: QuerySet, bid_value: int) -> Tuple: """ Check if auction post request requirements are fulfilled, but focus only on cash requirements. Requirements: - user must have enough cash for bid - bid must be greater than last bid value and auction starting price :param auction: aimed auction :param user_query: query of users with special uuid :param bid_value: value of new bid (try) :return: response status and information about occurred error """ data, status = {}, 0 user = user_query.first() # check user cash in wallet if user.cash < bid_value: data["summary"] = "User has not enough cash for this bet." data["userCash"] = user.cash data["bidValue"] = bid_value data["errorMessage"] = "User has less cash than minimal bid value." return data, 400 last_bid = auction.last_bid_value # check bid value correctness if (last_bid is not None and bid_value <= last_bid) or bid_value <= auction.starting_price: data["summary"] = "Wrong bid value." data["auctionStartingPrice"] = auction.starting_price data["auctionLastBidValue"] = auction.last_bid_value data["bidValue"] = bid_value data[ "errorMessage"] = "Bid must be greater than starting price and last bid value." return data, 400 return data, status
def get_provider_payment_info( list_of_settlement_payments: List[ForcedPaymentEvent], list_of_transactions: List[BatchTransferEvent], settlement_payment_claims: QuerySet, subtask_results_accepted_list: List[SubtaskResultsAccepted], ) -> Tuple[int, int]: assert isinstance(settlement_payment_claims, QuerySet) assert isinstance(list_of_settlement_payments, list) assert isinstance(list_of_transactions, list) assert isinstance(subtask_results_accepted_list, list) settlement_payment_total_price = sum_payments(list_of_settlement_payments) payments_price = sum_payments(list_of_transactions) satisfied_payments_price = settlement_payment_claims.aggregate( sum_of_already_satisfied_claims=Coalesce(Sum( 'amount'), 0))['sum_of_already_satisfied_claims'] subtasks_price = sum_subtask_price(subtask_results_accepted_list) amount_paid = payments_price + satisfied_payments_price + settlement_payment_total_price amount_pending = subtasks_price - amount_paid return (amount_paid, amount_pending)
def test_closed_periods_restriction_apply( professional_closed_periods: QuerySet): """Should apply the closed periods restriction.""" professional = professional_closed_periods.first().professional restriction = ClosedPeriodsRestriction() start = arrow.utcnow().replace( hour=0, minute=0, second=0, microsecond=0, ) request = Request() request.start_datetime = start request.end_datetime = start.shift(days=20) slot1 = AvailabilitySlot() slot1.professional = professional slot1.start_datetime = start.datetime slot1.end_datetime = start.shift(days=1).datetime slot2 = AvailabilitySlot() slot2.professional = professional slot2.start_datetime = start.shift(days=6).datetime slot2.end_datetime = start.shift(days=7).datetime with pytest.raises(AvailabilityValueError) as error: restriction.apply() assert "request is not set." in str(error) restriction.set_request(request) with pytest.raises(AvailabilityValueError) as error: restriction.apply() assert "slots are not set." in str(error) restriction.set_slots([slot1, slot2]) result = restriction.apply() assert result == [slot1]
def _filter_request(self, request: request.Request, queryset: QuerySet, team: Team) -> QuerySet: if request.GET.get("id"): ids = request.GET["id"].split(",") queryset = queryset.filter(id__in=ids) if request.GET.get("uuid"): uuids = request.GET["uuid"].split(",") queryset = queryset.filter(uuid__in=uuids) if request.GET.get("search"): parts = request.GET["search"].split(" ") contains = [] for part in parts: if ":" in part: queryset = queryset.filter( properties__has_key=part.split(":")[1]) else: contains.append(part) queryset = queryset.filter( Q(properties__icontains=" ".join(contains)) | Q(persondistinctid__distinct_id__icontains=" ".join( contains))).distinct("id") if request.GET.get("cohort"): queryset = queryset.filter(cohort__id=request.GET["cohort"]) if request.GET.get("properties"): queryset = queryset.filter( Filter(data={ "properties": json.loads(request.GET["properties"]) }).properties_to_Q(team_id=team.pk)) queryset_category_pass = None category = request.query_params.get("category") if category == "identified": queryset_category_pass = queryset.filter elif category == "anonymous": queryset_category_pass = queryset.exclude if queryset_category_pass is not None: queryset = queryset_category_pass(is_identified=True) queryset = queryset.prefetch_related( Prefetch("persondistinctid_set", to_attr="distinct_ids_cache")) return queryset
def _filter_request(self, request: request.Request, queryset: QuerySet) -> QuerySet: filters = request.GET.dict() for key in filters: if key == "saved": if str_to_bool(request.GET["saved"]): queryset = queryset.filter(Q(saved=True) | Q(dashboard__isnull=False)) else: queryset = queryset.filter(Q(saved=False)) elif key == "user": queryset = queryset.filter(created_by=request.user) elif key == "favorited": queryset = queryset.filter(Q(favorited=True)) elif key == "date_from": queryset = queryset.filter(last_modified_at__gt=relative_date_parse(request.GET["date_from"])) elif key == "date_to": queryset = queryset.filter(last_modified_at__lt=relative_date_parse(request.GET["date_to"])) elif key == INSIGHT: queryset = queryset.filter(filters__insight=request.GET[INSIGHT]) elif key == "search": queryset = queryset.filter( Q(name__icontains=request.GET["search"]) | Q(derived_name__icontains=request.GET["search"]) ) return queryset
def stickiness( filtered_events: QuerySet, entity: Entity, filter: Filter, team_id: int ) -> Dict[str, Any]: if not filter.date_to or not filter.date_from: raise ValueError("_stickiness needs date_to and date_from set") range_days = (filter.date_to - filter.date_from).days + 2 events = ( filtered_events.filter(filter_events(team_id, filter, entity)) .values("person_id") .annotate(day_count=Count(functions.TruncDay("timestamp"), distinct=True)) .filter(day_count__lte=range_days) ) events_sql, events_sql_params = events.query.sql_with_params() aggregated_query = "select count(v.person_id), v.day_count from ({}) as v group by v.day_count".format( events_sql ) aggregated_counts = execute_custom_sql(aggregated_query, events_sql_params) response: Dict[int, int] = {} for result in aggregated_counts: response[result[1]] = result[0] labels = [] data = [] for day in range(1, range_days): label = "{} day{}".format(day, "s" if day > 1 else "") labels.append(label) data.append(response[day] if day in response else 0) return { "labels": labels, "days": [day for day in range(1, range_days)], "data": data, "count": sum(data), }
def get_max(qs: QuerySet) -> int: return qs.aggregate(models.Max('number'))['number__max']
def filter_queryset(self, qs: QuerySet) -> QuerySet: return qs.filter( min_years_experience__gte=self.min_years_experience, min_years_experience__lte=self.max_years_experience, education_level=self.education_level )
def filter_queryset(self, qs: QuerySet) -> QuerySet: return qs.filter( min_years_experience__gte=self.min_years_experience, education_level__in=self.get_valid_education_levels() )
def gather_hot_conversations(user_profile: UserProfile, stream_messages: QuerySet) -> List[Dict[str, Any]]: # Gather stream conversations of 2 types: # 1. long conversations # 2. conversations where many different people participated # # Returns a list of dictionaries containing the templating # information for each hot conversation. conversation_length = defaultdict(int) # type: Dict[Tuple[int, Text], int] conversation_diversity = defaultdict(set) # type: Dict[Tuple[int, Text], Set[Text]] for user_message in stream_messages: if not user_message.message.sent_by_human(): # Don't include automated messages in the count. continue key = (user_message.message.recipient.type_id, user_message.message.subject) conversation_diversity[key].add( user_message.message.sender.full_name) conversation_length[key] += 1 diversity_list = list(conversation_diversity.items()) diversity_list.sort(key=lambda entry: len(entry[1]), reverse=True) length_list = list(conversation_length.items()) length_list.sort(key=lambda entry: entry[1], reverse=True) # Get up to the 4 best conversations from the diversity list # and length list, filtering out overlapping conversations. hot_conversations = [elt[0] for elt in diversity_list[:2]] for candidate, _ in length_list: if candidate not in hot_conversations: hot_conversations.append(candidate) if len(hot_conversations) >= 4: break # There was so much overlap between the diversity and length lists that we # still have < 4 conversations. Try to use remaining diversity items to pad # out the hot conversations. num_convos = len(hot_conversations) if num_convos < 4: hot_conversations.extend([elt[0] for elt in diversity_list[num_convos:4]]) hot_conversation_render_payloads = [] for h in hot_conversations: stream_id, subject = h users = list(conversation_diversity[h]) count = conversation_length[h] # We'll display up to 2 messages from the conversation. first_few_messages = [user_message.message for user_message in stream_messages.filter( message__recipient__type_id=stream_id, message__subject=subject)[:2]] teaser_data = {"participants": users, "count": count - len(first_few_messages), "first_few_messages": build_message_list( user_profile, first_few_messages)} hot_conversation_render_payloads.append(teaser_data) return hot_conversation_render_payloads
def assert_is_proxy(qs: QuerySet): """Is it proxy for only one related model?""" count = qs.order_by('related_model_name').distinct('related_model_name').count() assert count <= 1, 'You should split your model pages by proxy, before register it.'