def prepare_filter_data(qs: models.QuerySet) -> Dict:
     """Prepares the data for course filter based on a given queryset."""
     all_effects = Effects.objects.all().values_list('id', 'group_name', named=True)
     all_tags = Tag.objects.all().values_list('id', 'full_name', named=True)
     all_owners = qs.values_list(
         'owner', 'owner__user__first_name', 'owner__user__last_name', named=True).distinct()
     all_types = qs.values_list('course_type', 'course_type__name', named=True).distinct()
     return {
         'allEffects': {e.id: e.group_name for e in all_effects},
         'allTags': {t.id: t.full_name for t in all_tags},
         'allOwners': {
             o.owner: [o.owner__user__first_name, o.owner__user__last_name] for o in all_owners
         },
         'allTypes': {c.course_type: c.course_type__name for c in all_types},
     }
    def get_papers_no_query(self, papers: QuerySet):
        """
        If the user did not provide a query we want to either show the newest papers or
        show those papers first that have the best matching category. We sort by newest when two papers
        have the same score. Therefore, we either give all papers score 1 or add Category Score.
        """

        category_ids = self.form['categories']

        filtered_dois = list(papers.values_list('doi', flat=True))

        score_table = dict()

        for doi in filtered_dois:
            score_table[doi] = 1

        if category_ids and len(category_ids) == 1:
            try:
                category = Category.objects.get(pk=category_ids[0])

                memberships = CategoryMembership.objects.filter(paper__in=papers, category=category). \
                    annotate(doi=F('paper__doi'))

                for membership in memberships:
                    score_table[membership.doi] = membership.score

            except Category.DoesNotExist:
                raise Exception("Provided unknown category")
            except CategoryMembership.DoesNotExist:
                raise Exception(
                    "Filtering yielded incorrect papers for category")

        return score_table
Exemple #3
0
    def apply(self, queryset: QuerySet) -> Optional[HttpResponse]:
        values = queryset.values_list(*self.fields)

        response = HttpResponse(content_type="text/csv")
        writer = csv.writer(response)
        writer.writerows(values)
        return response
Exemple #4
0
    def get_budget_stats(qs: QuerySet) -> list:
        if len(qs) == 0:
            return []

        budgets = Budget.objects.filter(
            id__in=set(qs.values_list("budget", flat=True)))
        start_date = qs.last().date
        end_date = qs.first().date
        date_range = (start_date, end_date)

        stats = []
        for budget in budgets:
            budget_stats = {
                "id":
                budget.id,
                "name":
                budget.name,
                "initial_balance":
                budget.balance(Q(date__lt=start_date)),
                "final_balance":
                budget.balance(Q(date__lte=end_date)),
                "income":
                Transaction.objects.filter(
                    budget=budget, date__range=date_range,
                    income=True).aggregate(total=Sum("amount"))["total"] or 0,
                "outcome":
                Transaction.objects.filter(
                    budget=budget, date__range=date_range,
                    income=False).aggregate(total=Sum("amount"))["total"] or 0,
            }
            budget_stats["difference"] = (budget_stats["income"] +
                                          budget_stats["outcome"])
            stats.append(budget_stats)

        return stats
Exemple #5
0
def apply_goods_rules_for_good(good, flagging_rules: QuerySet = None):
    # If the flagging rules are specified then these is the only one we expect, else get all active
    flagging_rules = get_active_flagging_rules_for_level(
        FlagLevels.GOOD) if not flagging_rules else flagging_rules

    # get a list of flag_id's where the flagging rule matching value is equivalent to the good control code
    ratings = [
        r for r in good.control_list_entries.values_list("rating", flat=True)
    ]
    group_ratings = []
    for rating in ratings:
        group_ratings.extend(get_clc_parent_nodes(rating))

    flagging_rules = flagging_rules.filter(
        Q(matching_values__overlap=ratings)
        | Q(matching_groups__overlap=group_ratings)).exclude(
            excluded_values__overlap=(ratings + group_ratings))

    if isinstance(good, Good) and good.status != GoodStatus.VERIFIED:
        flagging_rules = flagging_rules.exclude(
            is_for_verified_goods_only=True)

    flags = flagging_rules.values_list("flag_id", flat=True)

    if flags:
        good.flags.add(*flags)
Exemple #6
0
def get_flat_values_list(qs: QuerySet, field: str) -> List[Any]:
    """获取 QuerySet 中某个字段值的列表

    example:
    >>> from myuser.models import UserProfile
    >>> emails = get_flat_values_list(UserProfile.objects.all(), 'email')
    """
    return list(qs.values_list(field, flat=True))
Exemple #7
0
 def _reqs_helper(reqs: QuerySet) -> DefaultDict[str, set]:
     res: DefaultDict[int, set] = defaultdict(set)
     misc_reqs = reqs.values_list("title", "completed")
     for m_id, c_id in misc_reqs:
         if not c_id:
             res[m_id]
         else:
             res[m_id].add(c_id)
     return res
Exemple #8
0
def query_lookup(query: models.QuerySet,
                 key: str,
                 value: str,
                 default: Optional[str] = None) -> dict:
    """
    convert a query to a lookup function with a default value
    """
    di = dict(query.values_list(key, value))
    return lambda x: di.get(x, default)
    def export_as_xslx(modeladmin: ModelAdmin, request: HttpRequest, queryset: QuerySet) -> HttpResponse:

        # get fields to export
        opts = modeladmin.model._meta
        if not fields:
            field_names = [field.name for field in opts.fields]
        else:
            field_names = fields

        # Create a response header
        response = HttpResponse(
            content_type='application/application/vnd.openxmlformats-officedocument.spreadsheetml.sheet')
        response['Content-Disposition'] = 'attachment; filename={}.xlsx'.format(
            str(opts).replace('.', '_'))

        # Create a new workbook
        wb = openpyxl.Workbook()
        ws = wb.active
        ws.title = str(opts).replace('.', '_')

        # Write the header (if desired)
        if header:
            def makeHeaderCell(field):
                c = cell.Cell(ws, value=field)
                c.font = styles.Font(bold=True)
                return c
            ws.append([makeHeaderCell(field) for field in field_names])

        # Write each of the rows
        for row in queryset.values_list(*field_names):
            def makeCell(prop):
                try:
                    return to_excel(prop)
                except:
                    return str(prop)
            ws.append([makeCell(c) for c in row])

        # adjust column widths
        # adapted from https://stackoverflow.com/a/39530676
        for col in ws.columns:
            max_length = 0
            column = col[0].column  # Get the column name
            for c in col:
                try:
                    if len(str(c.value)) > max_length:
                        max_length = len(c.value)
                except:
                    pass
            adjusted_width = (max_length + 2) * 1.2
            ws.column_dimensions[get_column_letter(column)].width = adjusted_width

        # and export
        wb.save(response)
        return response
Exemple #10
0
def qs_to_df(
    *,
    qs: QuerySet,
    columns: Union[List[str], dict],
    use_iterator=True,
) -> pd.DataFrame:
    """A wrapper to get a pandas DataFrame from a queryset.
    If a dict is passed as columns parameter then keys of that dict represent
    the fields to query and the values of the dict specify the names to rename
    the queried fields to. If a value in dict is None then the corresponding
    column will not be renamed.
    ["*"] can be passed as columns to query all columns of the model
    Note that it's possible to use standard Django syntax to access nested
    relations ex: ['user__address__street', 'user__status__is_paid'], etc.
    """
    if qs is None:
        raise ValueError("None object was passed instead of a queryset.")

    dict_passed = isinstance(columns, dict)

    if dict_passed:
        fields = columns.keys()
    else:
        fields = columns
        if fields == ["*"]:
            fields = [f.name for f in qs.model._meta.fields]

    if use_iterator:
        data = qs.values_list(*fields).iterator()
    else:
        data = list(qs.values_list(*fields))

    df = pd.DataFrame(data, columns=fields)

    if dict_passed:
        return df.rename(columns={
            k: v for k, v in columns.items() if v
        })
    return df
Exemple #11
0
def update_documents_status_impl(sender, signal, documents: QuerySet,
                                 new_status_id: int, changed_by_user: User):
    from apps.rawdb.repository.raw_db_repository import RawDbRepository
    from apps.rawdb.tasks import plan_reindex_tasks_in_chunks
    repo = RawDbRepository()
    doc_ids = set(documents.values_list('pk', flat=True))
    repo.update_documents_status(doc_ids, new_status_id)
    plan_reindex_tasks_in_chunks(
        doc_ids,
        changed_by_user.pk,
        cache_system_fields=[DocumentSystemField.status.value],
        cache_generic_fields=False,
        cache_user_fields=False)
Exemple #12
0
def flush_search_index(snapshots: QuerySet):
    if not indexing_enabled() or not snapshots:
        return
    backend = import_backend()
    snapshot_ids = (str(pk) for pk in snapshots.values_list('pk', flat=True))
    try:
        backend.flush(snapshot_ids)
    except Exception as err:
        stderr()
        stderr(
            f'[X] The search backend threw an exception={err}:',
            color='red',
        )
Exemple #13
0
def plot_measurement_by_month(queryset: QuerySet) -> Figure:
    # Calculate measurement counts by month.
    values = list(queryset.values_list(*QUERYSET_FIELDS))
    df = parse_dataframe(values)
    counts = calculate_counts(df)
    source = create_source(counts)
    # Create bokeh figure.
    x_range = FactorRange(*source.data["x_range"], group_padding=0)
    p = figure(x_range=x_range, **FIGURE_KWARGS)
    add_cumulative_axis(p, source)
    plot_bar_stack(p, source, df)
    plot_cumulative_line(p, source)
    customize_figure(p)
    return p
Exemple #14
0
    def order_cols(self, f_event_id: int, orders: QuerySet = None):
        self._cols, cols = [], []
        if not orders:
            orders = Order.objects.filter(fulfillment_event_id=f_event_id)
        fields = orders.values_list(*settings.INPUT_SHEET.get(
            'ORDER_MODEL_CUSTOMER_DETAILS_HEADER_FIELDS', []))

        for cust_order_fields, obj in zip(fields, orders):
            customer_order_product_counts = tuple([
                zero_product_count(obj.product_count(p_id))
                for p_id in self._product_ids
            ])
            cols.append(cust_order_fields + customer_order_product_counts)

        self._cols = cols
        return self._cols
Exemple #15
0
    def identifiers_dataframe(self, qs: QuerySet) -> pd.DataFrame:
        """
        Returns identifiers references for an assessment from external databases or tools.

        Args:
            qs (QuerySet): A queryset

        Returns:
            pd.DataFrame: A pandas dataframe
        """
        qs = qs.prefetch_related("identifiers")

        captured = {None, constants.HERO, constants.PUBMED}
        diff = set(
            qs.values_list("identifiers__database",
                           flat=True).distinct()) - captured
        if diff:
            logging.warning(
                f"Missing some identifier IDs from id export: {diff}")

        data = defaultdict(dict)

        # capture HERO ids
        heros = qs.filter(identifiers__database=constants.HERO).values_list(
            "id", "identifiers__unique_id")
        for hawc_id, hero_id in heros:
            data[hawc_id]["hero_id"] = int(hero_id)

        # capture PUBMED ids
        pubmeds = qs.filter(
            identifiers__database=constants.PUBMED).values_list(
                "id", "identifiers__unique_id")
        for hawc_id, pubmed_id in pubmeds:
            data[hawc_id]["pubmed_id"] = int(pubmed_id)

        # create a dataframe
        df = (pd.DataFrame.from_dict(data,
                                     orient="index").reset_index().rename(
                                         columns={"index": "reference_id"}))

        # set missing columns
        for col in ["hero_id", "pubmed_id"]:
            if col not in df.columns:
                df[col] = None

        return df
Exemple #16
0
def filter_list(field: str, model: Type[Model] = None,
                queryset: QuerySet = None, unique=False):
    """
    Generate a list with field values to be used with filter lists
    :param field: Field name
    :param model: Model object
    :param queryset: Queryset object
    :param unique: Return only unique records
    :return: list with values
    """
    if model:
        queryset = model.objects.all()
    if unique:
        queryset = queryset.order_by(field).distinct()

    values = queryset.values_list(field, flat=True)
    return list(values)
Exemple #17
0
def extract_recap_documents(
    docs: QuerySet,
    skip_ocr: bool = False,
    order_by: Optional[str] = None,
    queue: Optional[str] = None,
) -> None:
    """Loop over RECAPDocuments and extract their contents. Use OCR if requested.

    :param docs: A queryset containing the RECAPDocuments to be processed.
    :type docs: Django Queryset
    :param skip_ocr: Whether OCR should be completed (False) or whether items
    should simply be updated to have status OCR_NEEDED.
    :type skip_ocr: Bool
    :param order_by: An optimization parameter. You may opt to order the
    processing by 'small-first' or 'big-first'.
    :type order_by: str
    :param queue: The celery queue to send the content to.
    :type queue: str
    """
    docs = docs.exclude(filepath_local="")
    if skip_ocr:
        # Focus on the items that we don't know if they need OCR.
        docs = docs.filter(ocr_status=None)
    else:
        # We're doing OCR. Only work with those items that require it.
        docs = docs.filter(ocr_status=RECAPDocument.OCR_NEEDED)

    if order_by is not None:
        if order_by == "small-first":
            docs = docs.order_by("page_count")
        elif order_by == "big-first":
            docs = docs.order_by("-page_count")

    count = docs.count()
    throttle = CeleryThrottle(queue_name=queue)
    for i, pk in enumerate(docs.values_list("pk", flat=True)):
        throttle.maybe_wait()
        extract_recap_pdf.apply_async((pk, skip_ocr), priority=5, queue=queue)
        if i % 1000 == 0:
            msg = f"Sent {i + 1}/{count} tasks to celery so far."
            logger.info(msg)
            sys.stdout.write(f"\r{msg}")
            sys.stdout.flush()
Exemple #18
0
def plot_bokeh_date_of_birth(queryset: QuerySet) -> Figure:
    values = queryset.values_list(*FIELDS)
    df = pd.DataFrame(values, columns=FIELDS)
    df["date_of_birth"] = df["date_of_birth"].astype("datetime64")
    df["sex"] = df["sex"].replace(SEX_VALUES)
    counts = df.groupby([df["date_of_birth"].dt.year, "sex"]).count()
    counts.columns = ["count"]
    years = counts.index.levels[0].astype(int).to_numpy()
    try:
        years = range(years.min(), years.max() + 1)
    except ValueError:
        return
    sexes = list(df["sex"].unique())
    index = pd.MultiIndex.from_product([years, sexes])
    counts = counts.reindex(index, fill_value=0)
    counts.index.names = ["year", "sex"]
    data = counts.unstack().droplevel(0, axis=1)
    source = ColumnDataSource(data=data)
    p = figure(
        title="Date of Birth Distribution by Sex",
        x_axis_label="Year",
        y_axis_label="Count",
        plot_height=250,
        plot_width=700,
        toolbar_location="above",
        tooltips=TOOLTIPS,
    )
    color = [SEX_COLORS.get(sex) for sex in sexes]
    p.vbar_stack(
        stackers=sexes,
        x="year",
        width=0.9,
        color=color,
        legend_label=sexes,
        source=source,
    )
    p.legend.location = "top_left"
    p.y_range.start = 0
    p.x_range.range_padding = 0.1
    p.xaxis.major_label_orientation = 1
    p.xgrid.grid_line_color = None
    return p
Exemple #19
0
def update_returning_pk(qs: QuerySet, updates: dict) -> Set[Any]:
    """
    Updates QuerySet items returning primary key values.
    This method should not depend on database engine, though can have optimization performances for some engines.
    :param qs: QuerySet to update
    :param updates: Update items as passed to QuerySet.update(**updates) method
    :return: A set of primary keys
    """
    qs._for_write = True
    if django_pg_returning_available(qs.db) and hasattr(
            qs, 'update_returning'):
        pk_name = qs.model._meta.pk.name
        qs = qs.only(pk_name).update_returning(**updates)
        pks = set(qs.values_list(pk_name, flat=True))
    else:
        with transaction.atomic(using=qs.db):
            pks = set(qs.select_for_update().values_list('pk', flat=True))
            QuerySet.update(qs, **updates)

    return pks
def values_list_ids(queryset: QuerySet) -> QuerySet:
    return queryset.values_list('id', flat=True)
Exemple #21
0
    def query(
        self,
        projects: Sequence[Project],
        retention_window_start: Optional[datetime],
        group_queryset: QuerySet,
        environments: Optional[Sequence[Environment]],
        sort_by: str,
        limit: int,
        cursor: Cursor,
        count_hits: bool,
        paginator_options: Optional[Mapping[str, Any]],
        search_filters: Optional[Sequence[SearchFilter]],
        date_from: Optional[datetime],
        date_to: Optional[datetime],
        max_hits: Optional[int] = None,
    ) -> CursorResult:

        now = timezone.now()
        end = None
        end_params = [
            _f for _f in
            [date_to, get_search_filter(search_filters, "date", "<")] if _f
        ]
        if end_params:
            end = min(end_params)

        if not end:
            end = now + ALLOWED_FUTURE_DELTA

            metrics.incr("snuba.search.postgres_only")

            # This search is for some time window that ends with "now",
            # so if the requested sort is `date` (`last_seen`) and there
            # are no other Snuba-based search predicates, we can simply
            # return the results from Postgres.
            if (cursor is None and sort_by == "date" and
                    # This handles tags and date parameters for search filters.
                    not [
                        sf for sf in search_filters if sf.key.name not in
                        self.postgres_only_fields.union(["date"])
                    ]):
                group_queryset = group_queryset.order_by("-last_seen")
                paginator = DateTimePaginator(group_queryset, "-last_seen",
                                              **paginator_options)
                # When its a simple django-only search, we count_hits like normal
                return paginator.get_result(limit,
                                            cursor,
                                            count_hits=count_hits,
                                            max_hits=max_hits)

        # TODO: Presumably we only want to search back to the project's max
        # retention date, which may be closer than 90 days in the past, but
        # apparently `retention_window_start` can be None(?), so we need a
        # fallback.
        retention_date = max(
            _f for _f in [retention_window_start, now - timedelta(days=90)]
            if _f)
        start_params = [
            date_from, retention_date,
            get_search_filter(search_filters, "date", ">")
        ]
        start = max(_f for _f in start_params if _f)
        end = max([retention_date, end])

        if start == retention_date and end == retention_date:
            # Both `start` and `end` must have been trimmed to `retention_date`,
            # so this entire search was against a time range that is outside of
            # retention. We'll return empty results to maintain backwards compatibility
            # with Django search (for now).
            return self.empty_result

        if start >= end:
            # TODO: This maintains backwards compatibility with Django search, but
            # in the future we should find a way to notify the user that their search
            # is invalid.
            return self.empty_result

        # Here we check if all the django filters reduce the set of groups down
        # to something that we can send down to Snuba in a `group_id IN (...)`
        # clause.
        max_candidates = options.get("snuba.search.max-pre-snuba-candidates")

        with sentry_sdk.start_span(op="snuba_group_query") as span:
            group_ids = list(
                group_queryset.values_list("id",
                                           flat=True)[:max_candidates + 1])
            span.set_data("Max Candidates", max_candidates)
            span.set_data("Result Size", len(group_ids))
        metrics.timing("snuba.search.num_candidates", len(group_ids))

        too_many_candidates = False
        if not group_ids:
            # no matches could possibly be found from this point on
            metrics.incr("snuba.search.no_candidates", skip_internal=False)
            return self.empty_result
        elif len(group_ids) > max_candidates:
            # If the pre-filter query didn't include anything to significantly
            # filter down the number of results (from 'first_release', 'status',
            # 'bookmarked_by', 'assigned_to', 'unassigned', or 'subscribed_by')
            # then it might have surpassed the `max_candidates`. In this case,
            # we *don't* want to pass candidates down to Snuba, and instead we
            # want Snuba to do all the filtering/sorting it can and *then* apply
            # this queryset to the results from Snuba, which we call
            # post-filtering.
            metrics.incr("snuba.search.too_many_candidates",
                         skip_internal=False)
            too_many_candidates = True
            group_ids = []

        sort_field = self.sort_strategies[sort_by]
        chunk_growth = options.get("snuba.search.chunk-growth-rate")
        max_chunk_size = options.get("snuba.search.max-chunk-size")
        chunk_limit = limit
        offset = 0
        num_chunks = 0
        hits = self.calculate_hits(
            group_ids,
            too_many_candidates,
            sort_field,
            projects,
            retention_window_start,
            group_queryset,
            environments,
            sort_by,
            limit,
            cursor,
            count_hits,
            paginator_options,
            search_filters,
            start,
            end,
        )
        if count_hits and hits == 0:
            return self.empty_result

        paginator_results = self.empty_result
        result_groups = []
        result_group_ids = set()

        max_time = options.get("snuba.search.max-total-chunk-time-seconds")
        time_start = time.time()
        more_results = False

        # Do smaller searches in chunks until we have enough results
        # to answer the query (or hit the end of possible results). We do
        # this because a common case for search is to return 100 groups
        # sorted by `last_seen`, and we want to avoid returning all of
        # a project's groups and then post-sorting them all in Postgres
        # when typically the first N results will do.
        while (time.time() - time_start) < max_time:
            num_chunks += 1

            # grow the chunk size on each iteration to account for huge projects
            # and weird queries, up to a max size
            chunk_limit = min(int(chunk_limit * chunk_growth), max_chunk_size)
            # but if we have group_ids always query for at least that many items
            chunk_limit = max(chunk_limit, len(group_ids))

            # {group_id: group_score, ...}
            snuba_groups, total = self.snuba_search(
                start=start,
                end=end,
                project_ids=[p.id for p in projects],
                environment_ids=environments
                and [environment.id for environment in environments],
                organization_id=projects[0].organization_id,
                sort_field=sort_field,
                cursor=cursor,
                group_ids=group_ids,
                limit=chunk_limit,
                offset=offset,
                search_filters=search_filters,
            )
            metrics.timing("snuba.search.num_snuba_results", len(snuba_groups))
            count = len(snuba_groups)
            more_results = count >= limit and (offset + limit) < total
            offset += len(snuba_groups)

            if not snuba_groups:
                break

            if group_ids:
                # pre-filtered candidates were passed down to Snuba, so we're
                # finished with filtering and these are the only results. Note
                # that because we set the chunk size to at least the size of
                # the group_ids, we know we got all of them (ie there are
                # no more chunks after the first)
                result_groups = snuba_groups
                if count_hits and hits is None:
                    hits = len(snuba_groups)
            else:
                # pre-filtered candidates were *not* passed down to Snuba,
                # so we need to do post-filtering to verify Sentry DB predicates
                filtered_group_ids = group_queryset.filter(
                    id__in=[gid
                            for gid, _ in snuba_groups]).values_list("id",
                                                                     flat=True)

                group_to_score = dict(snuba_groups)
                for group_id in filtered_group_ids:
                    if group_id in result_group_ids:
                        # because we're doing multiple Snuba queries, which
                        # happen outside of a transaction, there is a small possibility
                        # of groups moving around in the sort scoring underneath us,
                        # so we at least want to protect against duplicates
                        continue

                    group_score = group_to_score[group_id]
                    result_group_ids.add(group_id)
                    result_groups.append((group_id, group_score))

            # break the query loop for one of three reasons:
            # * we started with Postgres candidates and so only do one Snuba query max
            # * the paginator is returning enough results to satisfy the query (>= the limit)
            # * there are no more groups in Snuba to post-filter
            # TODO do we actually have to rebuild this SequencePaginator every time
            # or can we just make it after we've broken out of the loop?
            paginator_results = SequencePaginator(
                [(score, id) for (id, score) in result_groups],
                reverse=True,
                **paginator_options).get_result(limit,
                                                cursor,
                                                known_hits=hits,
                                                max_hits=max_hits)

            if group_ids or len(
                    paginator_results.results) >= limit or not more_results:
                break

        # HACK: We're using the SequencePaginator to mask the complexities of going
        # back and forth between two databases. This causes a problem with pagination
        # because we're 'lying' to the SequencePaginator (it thinks it has the entire
        # result set in memory when it does not). For this reason we need to make some
        # best guesses as to whether the `prev` and `next` cursors have more results.

        if len(paginator_results.results) == limit and more_results:
            # Because we are going back and forth between DBs there is a small
            # chance that we will hand the SequencePaginator exactly `limit`
            # items. In this case the paginator will assume there are no more
            # results, so we need to override the `next` cursor's results.
            paginator_results.next.has_results = True

        if cursor is not None and (not cursor.is_prev
                                   or len(paginator_results.results) > 0):
            # If the user passed a cursor, and it isn't already a 0 result `is_prev`
            # cursor, then it's worth allowing them to go back a page to check for
            # more results.
            paginator_results.prev.has_results = True

        metrics.timing("snuba.search.num_chunks", num_chunks)

        groups = Group.objects.in_bulk(paginator_results.results)
        paginator_results.results = [
            groups[k] for k in paginator_results.results if k in groups
        ]

        return paginator_results
Exemple #22
0
def distinct_qs_value_list(qs: QuerySet, field_name: str) -> Set:
    return set(list(qs.values_list(field_name, flat=True)))
Exemple #23
0
def get_coords(runs: QuerySet) -> Dict[str, List[int]]:
    run_ids = list(runs.values_list("id", flat=True))
    return {"Run ID": run_ids}
def subquery_pk(qs: QuerySet, using: str = 'default') -> Iterable[Any]:
    from django.db import connections
    if connections[using].vendor == 'mysql':
        return list(qs.values_list('pk', flat=True).iterator())
    return qs.values('pk')
Exemple #25
0
 def get_for_objects(cls, queryset: QuerySet) -> QuerySet:
     """ return the failing objects for all objects in a queryset """
     ct = ContentType.objects.get_for_model(queryset.model)
     return FailingObject.objects.filter(content_type=ct,
                                         object_pk__in=queryset.values_list(
                                             "pk", flat=True))
Exemple #26
0
    def _filter_by_permission(
        self,
        user: Optional[User],
        groups: models.QuerySet,
        permission: Permission,
        public: bool = True,
        with_superuser: bool = True,
    ) -> models.QuerySet:
        """Filter queryset by permissions.

        This is a generic method that is called in public methods.

        :attr user: the user which permissions should be considered.

        :attr groups: the groups which permissions should be considered.

        :attr permission: the lowest permission entity must have.

        :attr public: when True consider also public permission.

        :attr with_superuser: when false treat superuser as reguar user.
        """

        # Skip filtering for superuser when with_superuser is set.
        if user is not None and user.is_superuser and with_superuser:
            return self

        # Handle special case of Storage and Relation.
        filters_prefix = ""
        if self.model._meta.label == "flow.Storage":
            filters_prefix = "data__"

        filters = dict()
        if user:
            filters["user"] = models.Q(
                **{
                    f"{filters_prefix}permission_group__permissions__user": user,
                    f"{filters_prefix}permission_group__permissions__value__gte": permission,
                }
            )

        if public:
            filters["public"] = models.Q(
                **{
                    f"{filters_prefix}permission_group__permissions__user": get_anonymous_user(),
                    f"{filters_prefix}permission_group__permissions__value__gte": permission,
                }
            )
        if groups:
            filters["groups"] = models.Q(
                **{
                    f"{filters_prefix}permission_group__permissions__group__in": groups.values_list(
                        "pk", flat=True
                    ),
                    f"{filters_prefix}permission_group__permissions__value__gte": permission,
                }
            )

        # List here is needed otherwise more joins are performed on the query
        # bellow. Some Django queries (for example ExpressionLateralJoin) do
        # not like that and will fail without evaluating the ids query first.
        ids = list(
            self.filter(
                reduce(lambda filters, filter: filters | filter, filters.values())
            )
            .distinct()
            .values_list("id", flat=True)
        )
        return self.filter(id__in=ids)
Exemple #27
0
 def filter_queryset(self, queryset: QuerySet):
     queryset = super().filter_queryset(queryset)
     return Praise.TYPE_MODEL[self.type].objects.filter(
         id=queryset.values_list('resource_id', flat=True)).all()
Exemple #28
0
 def to_representation(self, instance: QuerySet):
     return instance.values_list('user__username', 'points')