def order_articles(self, articles: QuerySet): """By number of comments""" if len(articles) == 0: articles = [] elif len(articles) == 1: articles = list(articles) elif len(articles) <= 30: articles = articles.annotate( num_comments=Count('comment')).order_by('-num_comments') else: articles = articles.order_by('-last_edit')[random.randint(20, 30)] articles.annotate( num_comments=Count('comment')).order_by('-num_comments') return list(articles)
def get_ordering_value(self, qs: QuerySet, value: Any) -> Tuple[QuerySet, OrderingFieldType]: # First, join the requested answer, then annotate the QS accordingly. # Last, return a field corresponding to the value # question = Question.objects.get(pk=value) QUESTION_TYPE_TO_FIELD = { Question.TYPE_INTEGER: "value", Question.TYPE_FLOAT: "value", Question.TYPE_DATE: "date", Question.TYPE_CHOICE: "value", Question.TYPE_TEXTAREA: "value", Question.TYPE_TEXT: "value", Question.TYPE_FILE: "file", Question.TYPE_DYNAMIC_CHOICE: "value", } try: value_field = QUESTION_TYPE_TO_FIELD[question.type] except KeyError: # pragma: no cover raise exceptions.ValidationError( f"Question '{question.slug}' has unsupported type {question.type} for ordering" ) answers_subquery = Subquery( Answer.objects.filter( question=question, document=OuterRef(f"{self._document_locator_prefix}pk"), ).values(value_field)) ann_name = f"order_{value}" qs = qs.annotate(**{ann_name: answers_subquery}) # TODO: respect document_via return qs, F(ann_name)
def add_column_to_queryset(column: str, queryset: QuerySet) -> Tuple[QuerySet, str]: """ Return a queryset that contains the data for the passed SQLite column name. This will annotate the queryset for any non-material fields and also output the column name that will need to be asked for. """ expr = column_to_expr(column, queryset.model) if isinstance(expr, Expression): column = f"{column}_out" if hasattr(queryset.model, column) else column return queryset.annotate(**{column: expr}), column else: return queryset, column
def tag_cloud_context(tags: QuerySet) -> Dict[Any, Any]: if not tags.exists(): return {} min_size = 9 max_size = 20 tags = tags.annotate(Count('posts')).order_by('posts__count') min_count = tags[0].posts__count max_count = tags[tags.count()-1].posts__count def font_size(count): if max_count == min_count: return (max_size + min_size) / 2 else: slope = (max_size - min_size) / (max_count - min_count) return min_size + slope * count return {tag: font_size(tag.posts__count) for tag in tags}
def apply(cls, query: str, qs: QuerySet) -> QuerySet: if query: search_query = SearchQuery('') for query_word in query.split(): search_query = search_query | SearchQuery(query_word) if cls.vector is None: raise NotImplementedError( 'Attribute "vector" of type dict must be implemented.') vector = None for field, weight in cls.vector.items(): if not vector: vector = SearchVector(field, weight=weight) else: vector += SearchVector(field, weight=weight) if vector: qs = qs.annotate(search=vector, rank=SearchRank(vector, search_query)).filter(search=search_query)\ .order_by('-rank') return qs
def at_fps(qs: QuerySet, n: int = 1) -> QuerySet: return qs.annotate(_tmp=F('number') % ( Cast('video__fps', models.IntegerField()) / n)).filter(_tmp=0)
def qs_to_result(result: QuerySet, stride: int = 1, group: bool = False, shuffle: bool = False, deterministic_order: bool = False, custom_order_by_id: List[int] = None, frame_major: bool = True, show_count: bool = False, limit: int = 100, color: str = "red") -> Dict: count = result.count() if show_count else 0 if shuffle: result = result.order_by('?') materialized_result = [] cls = result.model bases = cls.__bases__ if bases[0] is base_models.Frame: if custom_order_by_id is not None: result = sorted(result, key=lambda x: custom_order_by_id.index(x.id)) elif not shuffle and deterministic_order: result = result.order_by('video', 'number') for frame in result[:limit * stride:stride]: materialized_result.append({ 'video': frame.video.id, 'min_frame': frame.number, 'objects': [] }) elif (cls is Face or cls is FaceGender or cls is FaceIdentity or cls is Object or cls is Pose or cls is FaceLandmarks): if cls is FaceGender or cls is FaceIdentity or cls is FaceLandmarks: frame_path = 'face__frame' if cls is FaceGender: result = result.select_related('face', 'gender') elif cls is FaceIdentity: result = result.select_related('face', 'identity') else: result = result.select_related('face') else: frame_path = 'frame' result = result.select_related(frame_path) if not shuffle and deterministic_order: result = result.order_by(frame_path + '__video', frame_path + '__number') if cls is Face: fn = bbox_to_dict elif cls is Object: fn = object_to_dict elif cls is FaceGender: fn = gender_to_dict elif cls is FaceIdentity: fn = identity_to_dict elif cls is Pose: fn = pose_to_dict elif cls is FaceLandmarks: fn = face_landmarks_to_dict if frame_major: frame_ids = set() def get_all_results(): all_results = collect( list(result.filter(**{frame_path + '__in': list(frame_ids)})), lambda t: access(t, frame_path + '__id')) return all_results if custom_order_by_id is None: frames = set() for inst in list( result.values( frame_path + '__video', frame_path + '__number', frame_path + '__id').annotate(m=F('id') % stride).filter(m=0)[:limit]): frames.add((inst[frame_path + '__video'], inst[frame_path + '__number'], inst[frame_path + '__id'])) frame_ids.add(inst[frame_path + '__id']) all_results = get_all_results() frames = list(frames) frames.sort(key=itemgetter(0, 1)) else: frames = {} id_to_position = defaultdict(lambda: float('inf')) for i, id_ in enumerate(custom_order_by_id): id_to_position[id_] = i for inst in list( result.values( 'id', frame_path + '__video', frame_path + '__number', frame_path + '__id').annotate(m=F('id') % stride).filter(m=0)): frame_key = (inst[frame_path + '__video'], inst[frame_path + '__number'], inst[frame_path + '__id']) frames[frame_key] = min(id_to_position[inst['id']], frames[frame_key] if frame_key in frames else float('inf')) frame_ids.add(inst[frame_path + '__id']) all_results = get_all_results() frames = sorted([x for x in frames.items()], key=lambda x: x[1]) frames = [x[0] for x in frames[:limit]] for (video, frame_num, frame_id) in frames: materialized_result.append({ 'video': video, 'min_frame': frame_num, 'objects': [fn(inst) for inst in all_results[frame_id]] }) else: for inst in result[:limit * stride:stride]: r = { 'video': inst.frame.video.id, 'min_frame': inst.frame.number, 'objects': [fn(inst)] } materialized_result.append(r) elif bases[0] is base_models.Track: if not shuffle and deterministic_order: result = result.order_by('video', 'min_frame') for t in result.annotate(duration=Track.duration_expr()).filter(duration__gt=0)[:limit]: result = { 'video': t.video.id, 'track': t.id, 'min_frame': t.min_frame, 'max_frame': t.max_frame, } materialized_result.append(result) if custom_order_by_id is not None: materialized_result.sort(key=lambda x: custom_order_by_id.index(x['track'])) else: materialized_result.sort(key=itemgetter('video', 'min_frame')) elif bases[0] is base_models.Video: if custom_order_by_id is not None: raise NotImplementedError() if not shuffle and deterministic_order: result = result.order_by('id') for v in result[:limit]: materialized_result.append({ 'video': v.id, 'min_frame': 0, 'max_frame': v.num_frames}) else: raise Exception("Unsupported class") ty_name = cls.__name__ if group: by_video = collect(materialized_result, itemgetter('video')) videos = collect(Video.objects.filter(id__in=by_video.keys()).all(), attrgetter('id')) groups = [{ 'type': 'contiguous', 'label': video, 'num_frames': videos[video][0].num_frames, 'elements': [{ 'video': video, 'segments': sorted(by_video[video], key=itemgetter('min_frame')), 'color': color }] } for video in sorted(by_video.keys())] else: groups = [{'type': 'flat', 'label': '', 'elements': [r]} for r in materialized_result] return {'result': groups, 'count': count, 'type': ty_name}
class QueryBuilder: """ Name : QueryBuilder Input : Model and request Desc : Generic takes model and applies filter,group and aggregation return json response upon calling build method """ def __init__(self, model, request): "Default intitailizer to initaialize the default values of the variables" self.agg = request.GET.get('agg', []) self.group = request.GET.get('group', []) self.where = request.GET.get('where', {}) self.sort = request.GET.get('sort', []) self.model = model self.qs = QuerySet(model) self.agg_literal_map = {'+': Sum, '-': Avg, '*': Count} self.agg_map = {} def _get_agg_col_mapping(self, column): """ Input : column Ouput: If the first char starts any one of the below then below mappings are applied or by default Sum is applied + -> Sum - -> Avg * -> Count e.g,: +col1 -> Avg(col1) """ _aggregation = Sum _column = column if (column[0] in self.agg_literal_map): _aggregation = self.agg_literal_map[column[0]] _column = column[1:] self.agg_map[_column] = _aggregation(_column) def _get_agg(self): """ Splits the agg param from the request and _get_agg_col_mapping is invoked to construct aggregation map """ if self.agg is not None: columns = self.agg.split(",") for column in columns: self._get_agg_col_mapping(column) return self.agg_map def _get_group(self): """ Splits the group param from the request otherwise returns empty list """ if len(self.group): return [field for field in self.group.split(",")] return EMPTY_LIST def _get_where(self): """ Converts the JSON type string where clause to dict Custmisation done only for date_to and date_from which are mapped to out of the box date__gte and date__lte fields """ if self.where: json_where_clause = json.loads(self.where) if 'date_from' in json_where_clause: json_where_clause["date__gte"] = json_where_clause["date_from"] del json_where_clause["date_from"] if 'date_to' in json_where_clause: json_where_clause["date__lte"] = json_where_clause["date_to"] del json_where_clause["date_to"] return json_where_clause return EMPTY_DICT def _get_order_by(self): """ If Sort param is present then splits otherwise return emptylist """ if self.sort: return self.sort.split(",") return EMPTY_LIST def build(self): """ From queryset applies filter,group,sort and agg methods accordingly """ is_group_by = False # caluclated field CPI cpi = ExpressionWrapper(F('spend') / F('installs'), output_field=FloatField()) self.qs = self.qs.annotate(cpi=cpi) # Aggreation are applied here if len(self._get_group()) > 0 and len(self._get_agg()) > 0: is_group_by = True self.qs = self.qs.values(*self._get_group()) \ .annotate(**self._get_agg()) # Where clause if applied here if len(self._get_where()) > 0: self.qs = self.qs.filter(**self._get_where()) # Sorting is applied if len(self._get_order_by()) > 0: self.qs = self.qs.order_by(*self._get_order_by()) # return the list of PeformanceMetric user if is_group_by: return list(self.qs) else: self.qs = self.qs.values() return list(self.qs)
def cached_col(self): _qs = QuerySet(model=self.model) _qs._fields = set(['id'], ) _qs = _qs.annotate( **{self.attname: self.function}).values_list(self.attname) return _qs.query.annotations[self.attname]