コード例 #1
0
        def message_filter(user, time_range, lowest, highest, body, order):
            date_now = datetime.datetime.now()
            messages = Message.objects.all()
            if lowest is None:
                lowest = -100
            if highest is None:
                highest = 1000
            if user is not None:
                messages = messages.filter(author=user)

            messages = messages.filter(date_of_pub__range=(time_range,
                                                           date_now))
            messages = messages.filter(body__icontains=body)

            if lowest < highest:
                messages = messages.filter(rating__range=(lowest, highest))

            if order == 'Message':
                messages = messages.order_by(Length('body').asc())
            elif order == 'Author':
                messages = messages.order_by(Lower('author').asc())
            elif order == 'Topic':
                messages = messages.order_by('-topic')
            elif order == 'Rating':
                messages = messages.order_by('-rating')
            elif order == 'Date of pub':
                messages = messages.order_by('-date_of_pub')
            return messages
コード例 #2
0
ファイル: tests.py プロジェクト: zulip/truncated-django
    def test_length(self):
        Author.objects.create(name='John Smith', alias='smithj')
        Author.objects.create(name='Rhonda')
        authors = Author.objects.annotate(
            name_length=Length('name'),
            alias_length=Length('alias'))

        self.assertQuerysetEqual(
            authors.order_by('name'), [
                (10, 6),
                (6, None),
            ],
            lambda a: (a.name_length, a.alias_length)
        )

        self.assertEqual(authors.filter(alias_length__lte=Length('name')).count(), 1)
コード例 #3
0
    def get_auth(cls, url: str, **kwargs) -> Union[ClientAuth, None]:
        split_url = urlsplit(url)
        scheme_and_domain = urlunsplit(split_url[:2] + ("", "", ""))

        candidates = (
            cls.objects.filter(api_root__startswith=scheme_and_domain)
            .annotate(api_root_length=Length("api_root"))
            .order_by("-api_root_length")
        )

        # select the one matching
        for candidate in candidates.iterator():
            if url.startswith(candidate.api_root):
                credentials = candidate
                break
        else:
            return None

        auth = ClientAuth(
            client_id=credentials.client_id,
            secret=credentials.secret,
            user_id=credentials.user_id,
            user_representation=credentials.user_representation,
            **kwargs,
        )
        return auth
コード例 #4
0
ファイル: views.py プロジェクト: Isaacli0520/msnmatch
def get_roll_result(request):
    date = request.GET.get("date")
    tot_winners = int(request.GET.get("tot_winners"))
    dt = datetime.datetime.strptime(date, '%Y-%m-%d')
    users = []
    for user in User.objects.all():
        user_review_num = user.courseuser_set.annotate(length=Length("text")).filter(length__gt=15, date__gt=dt).count()
        if user_review_num > 0 and user.username != "admin":
            users.append({
                "pk":user.pk,
                "username":user.username,
                "name":user.first_name + " " + user.last_name,
                "reviews":user_review_num,
            })
    random.shuffle(users)
    total_num = sum([u["reviews"] for u in users])
    num_try, MAX_TRY = 0, 1000
    result = []
    while len(result) != tot_winners and num_try < MAX_TRY:
        num_try += 1
        result = []
        lottery = sorted([random.randint(0, total_num - 1) for i in range(tot_winners)])
        pointer = 0
        left, right = 0, users[0]["reviews"] - 1
        for i, user in enumerate(users):
            if lottery[pointer] >= left and lottery[pointer] <= right:
                result.append(user)
                pointer += 1
            if i == len(users) - 1 or pointer >= tot_winners:
                break
            left = right + 1
            right = right + users[i + 1]["reviews"]
    return _success_response({
        "users":result
    })
コード例 #5
0
    def show_characters_counts(cls):
        """
        Retrive and search for fields to be translated and show total character count
        """
        translatable_models = cls.get_translatable_models()
        logger.info(f'Languages: {AVAILABLE_LANGUAGES}')
        logger.info(f'Default language: {mt_settings.DEFAULT_LANGUAGE}')
        logger.info(f'Number of models: {len(translatable_models)}')

        total_count = 0
        for model in translatable_models:
            logger.info(
                f'Processing for Model: {model._meta.verbose_name.title()}')

            translatable_fields = cls.get_translatable_fields(model)
            if not translatable_fields:
                continue

            qs = model.objects.filter(cls._get_filter(translatable_fields))
            logger.info(f'\tFields: {translatable_fields}')
            logger.info('\tTotal characters:')

            for field in translatable_fields:
                count = qs.annotate(text_length=Length(field))\
                    .aggregate(total_text_length=Sum('text_length'))['total_text_length'] or 0
                total_count += count
                logger.info(f'\t\t {field} - {count}')
        logger.info(f'Total Count: {total_count}')
        logger.info(
            f'Estimated Cost: {(len(AVAILABLE_LANGUAGES) -1) * total_count * 0.000015}'
        )
コード例 #6
0
    def load_ml_data():
        """
        load data from the database and return a pandas data-frame
        :return:
        """
        model_text = []
        category_ids = Category.objects. \
            values_list('id'). \
            exclude(id__in=[10419, 10416])
        for category in category_ids.iterator():
            # parked websites
            category_id = category[0]
            print(category_id)
            db_texts = CategoryWebsiteText.objects. \
                filter(category_id=category_id). \
                values_list('category_id', 'page_text'). \
                order_by(Length('page_text').desc())
            if category_id != 10010:
                db_texts = db_texts.all()[:3000]

            category_websites = sum(1 for _ in db_texts.iterator())
            if category_websites > 150:
                for db_text in db_texts.iterator():
                    model_text.append(db_text)
                    pass
                print(category_id)
                print(category_websites)

        df = pd.DataFrame(model_text, columns=['category_id', 'page_text'])

        return df
コード例 #7
0
def embed_sample(request, cancer):
    criterias = {}
    if cancer != 'ALLL':
        criterias['cancer_type'] = cancer
    if 'tissue' in request.GET and request.GET['tissue'] != '':
        criterias['tumor_tissue_site'] = request.GET['tissue']
    if 'gender' in request.GET and request.GET['gender'] != '':
        criterias['is_male'] = request.GET['gender']
    if 'tumor' in request.GET and request.GET['tumor'] != '':
        criterias['is_tumor'] = request.GET['tumor']
    if 'age_from' in request.GET and request.GET['age_from'] != '':
        criterias['days_to_birth__lte'] = int(
            request.GET['age_from']) * -365 + 180
    if 'age_to' in request.GET and request.GET['age_to'] != '':
        criterias['days_to_birth__gte'] = int(
            request.GET['age_to']) * -365 - 180

    samples = Sample.objects.filter(**criterias).annotate(
        bc_len=Length('sample_barcode')).filter(bc_len__gt=13)
    if 'click_sort' in request.GET:
        samples = samples.order_by(request.GET['click_sort'])
    if request.method == 'GET' and 'page' in request.GET:
        page = int(request.GET["page"])
    else:
        page = 1
    if 'datas_per_page' in request.GET:
        perpage = request.GET['datas_per_page']
    else:
        perpage = 10
    paginator = Paginator(samples, perpage)
    try:
        samples_paged = paginator.page(page)
    except EmptyPage:
        samples_paged = paginator.page(paginator.num_pages)
    search_rec = []
    cri_human = {}
    for k in request.GET:
        if request.GET[k] != '' and k != 'page':
            search_rec.append('%s=%s' % (k, request.GET[k]))
            if k == 'gender':
                if request.GET[k] == '0':
                    cri_human[k] = 'male'
                else:
                    cri_human[k] = 'female'
            elif k == 'age_from':
                cri_human['older than'] = request.GET[k]
            elif k == 'age_to':
                cri_human['younger than'] = request.GET[k]
            else:
                cri_human[k] = request.GET[k]
    searched = '&'.join(search_rec)
    rd = (randint(0, 1000), randint(2000, 4000))
    return render(
        request, "embedtable_samples.html", {
            'samples': samples_paged,
            'cancer': cancer,
            'search_record': searched,
            'cri': cri_human,
            'rd': rd
        })
コード例 #8
0
ファイル: querysets.py プロジェクト: LoneStar-Swish/nautobot
    def ip_family(self, family):
        try:
            byte_len = self.ip_family_map[family]
        except KeyError:
            raise ValueError("invalid IP family {}".format(family))

        return self.annotate(address_len=Length(F("host"))).filter(address_len=byte_len)
コード例 #9
0
ファイル: i18n.py プロジェクト: siturra/kpi-old
    def get_sitewide_message(slug="welcome_message", lang=None):
        """
        Returns a sitewide message based on its slug and the specified language.
        If the language is not specified, it will use the current language.
        If there are no results found, it falls back on the global version.
        It doesn't exist at all, it returns None.
        :param slug: str
        :param lang: str|None
        :return: MarkupField|None
        """

        # Get default value if lang is not specified
        language = lang if lang else get_language()

        # Let's retrieve messages where slug is either:
        #   - "<slug>_<locale>"
        #   - "<slug>"
        # We order the results by the length of the slug to be sure
        # localized version comes first.
        sitewide_message = SitewideMessage.objects\
            .filter(
                Q(slug="{}_{}".format(slug, language)) |
                Q(slug="{}".format(slug)))\
            .order_by(Length("slug").desc())\
            .first()

        if sitewide_message is not None:
            return sitewide_message.body

        return None
コード例 #10
0
def list_notes(req, mtype="all", page=1):
    if mtype == "all":
        notes_full = Commentaire.objects.annotate(content_len=Length('content')) \
                                        .filter(content_len__gt=400)
    else:
        notes_full = Commentaire.objects.filter(oeuvre__info__mtype=mtype) \
                                        .annotate(content_len=Length('content')) \
                                        .filter(content_len__gt=400)
    notes_full = notes_full.order_by('-date')
    paginator = Paginator(notes_full, 20)
    try:
        notes = paginator.page(page)
    except EmptyPage:
        notes = paginator.page(paginator.num_pages)
    context = {'notes': notes, 'mtype': mtype}
    return render(req, 'critique/notes.html', context)
コード例 #11
0
def _prepare_forum_post_contribution_index(posts):
    min_post_length = 150
    min_likes = 3

    all_posts = posts \
        .annotate(length=Length('body')) \
        .filter(length__gte=min_post_length)

    all_posts_with_enough_likes = [
        x \
        for x in all_posts \
        if ToggleProperty.objects.toggleproperties_for_object('like', x).count() >= min_likes
    ]
    all_posts_count = len(all_posts_with_enough_likes)

    if all_posts_count == 0:
        return 0

    all_likes = 0
    for post in all_posts_with_enough_likes:
        all_likes += ToggleProperty.objects.toggleproperties_for_object('like', post).count()

    average = all_likes / float(all_posts_count)
    normalized = []

    for post in all_posts_with_enough_likes:
        likes = ToggleProperty.objects.toggleproperties_for_object('like', post).count()
        if likes >= average:
            normalized.append(likes)

    if len(normalized) == 0:
        return 0

    return _astrobin_index(normalized)
コード例 #12
0
 def annotate_parent(self):
     if self.treetype == MPTT:
         parent_field = self.qs.model._mptt_meta.parent_attr
         return TreeQuerySet(
             self.qs.annotate(_parent_pk=F(parent_field + '__pk')))
     elif self.treetype == TREEBEARD:
         if issubclass(self.qs.model, NS_Node):
             sub = self.qs.model.objects.filter(
                 tree_id=OuterRef('tree_id'),
                 lft__lt=OuterRef('lft'),
                 rgt__gt=OuterRef('rgt')).reverse()[:1]
             qs = self.qs.annotate(_parent_pk=Subquery(sub.values('pk')))
             return TreeQuerySet(qs)
         elif issubclass(self.qs.model, MP_Node):
             sub = self.qs.model.objects.filter(path=OuterRef('parentpath'))
             expr = Substr('path',
                           1,
                           Length('path') - self.qs.model.steplen,
                           output_field=CharField())
             qs = self.qs.annotate(parentpath=expr).annotate(
                 _parent_pk=Subquery(sub.values('pk')))
             return TreeQuerySet(qs)
         elif issubclass(self.qs.model, AL_Node):
             return TreeQuerySet(
                 self.qs.annotate(_parent_pk=F('parent__pk')))
     raise UnknownTreeImplementation('dont know how to annotate _parent_pk')
コード例 #13
0
ファイル: bulktofiles.py プロジェクト: vain01/relate
def convert_flow_page_visits(stdout, stderr):
    fpv_pk_qset = (
        FlowPageVisit.objects.annotate(answer_len=Length("answer")).filter(
            Q(answer__contains="base64_data")
            | (
                # code questions with long answer_data
                Q(answer__contains="answer")
                & Q(answer_len__gte=128))).values("pk"))

    fpv_pk_qset_iterator = iter(fpv_pk_qset)

    quit = False
    total_count = 0
    while not quit:
        with transaction.atomic():
            for i in range(200):
                try:
                    fpv_pk = next(fpv_pk_qset_iterator)
                except StopIteration:
                    quit = True
                    break
                fpv = (FlowPageVisit.objects.select_related(
                    "flow_session", "flow_session__course",
                    "flow_session__participation",
                    "flow_session__participation__user",
                    "page_data").get(pk=fpv_pk["pk"]))
                if convert_flow_page_visit(stderr, fpv):
                    total_count += 1

        stdout.write("converted %d page visits..." % total_count)

    stdout.write("done with visits!")
コード例 #14
0
ファイル: bulktofiles.py プロジェクト: vain01/relate
def convert_bulk_feedback(stdout, stderr):
    from course.models import BULK_FEEDBACK_FILENAME_KEY, update_bulk_feedback
    fbf_pk_qset = (FlowPageBulkFeedback.objects.annotate(
        bf_len=Length("bulk_feedback")).filter(
            ~Q(bulk_feedback__contains=BULK_FEEDBACK_FILENAME_KEY)
            & Q(bf_len__gte=256)).values("pk"))

    fbf_pk_qset_iterator = iter(fbf_pk_qset)

    quit = False
    total_count = 0
    while not quit:
        with transaction.atomic():
            for i in range(200):
                try:
                    fbf_pk = next(fbf_pk_qset_iterator)
                except StopIteration:
                    quit = True
                    break
                fbf = (FlowPageBulkFeedback.objects.select_related(
                    "page_data", "page_data__flow_session",
                    "page_data__flow_session__participation",
                    "page_data__flow_session__participation__user").get(
                        pk=fbf_pk["pk"]))

                update_bulk_feedback(fbf.page_data, fbf.grade,
                                     fbf.bulk_feedback)
                total_count += 1

        stdout.write("converted %d items of bulk feedback..." % total_count)

    stdout.write("done with bulk feedback!")
コード例 #15
0
ファイル: user.py プロジェクト: ShyScott/Air
 def filter_search(self, queryset, field_name, value):
     max_length = 999999
     return queryset.filter(
         Q(username__icontains=value)
         | Q(student_profile__student_id__icontains=value)).annotate(
             username_length=Case(
                 When(username__icontains=value,
                      then=Length(Replace('username', Value(value)))),
                 default=Value(max_length),
             ), ).annotate(student_id_length=Case(
                 When(student_profile__student_id__icontains=value,
                      then=Length(
                          Replace('student_profile__student_id',
                                  Value(value)))),
                 default=Value(max_length),
             ), ).order_by(Least('username_length', 'student_id_length'))
コード例 #16
0
ファイル: views.py プロジェクト: UdayVarkhedkar/osf.io
    def annotate_queryset_with_download_count(self, queryset):
        """
        Annotates queryset with download count of first osfstorage file

        NOTE: This is a brittle way to do this.  PageCounter _ids are of the form
        <file_action>:<node__id>:<file__id>:<sometimes version>.
        - Assumes the "download" file action is the only action with that many letters
        - Assumes node and file guids are a consistent length
        - ENG-122 would get rid of this string matching behavior
        """
        pages = PageCounter.objects.annotate(
            node_id=Substr('_id', 10, 5),
            file_id=Substr('_id', 16),
            _id_length=Length('_id'),
        ).filter(
            _id__icontains='download',
            node_id=OuterRef('guids___id'),
            file_id=OuterRef('file_id'),
        ).exclude(_id_length__gt=39)

        file_subqs = OsfStorageFile.objects.filter(
            target_content_type_id=ContentType.objects.get_for_model(
                AbstractNode),
            target_object_id=OuterRef('pk'),
        ).order_by('created')

        queryset = queryset.annotate(file_id=Subquery(
            file_subqs.values('_id')[:1]), ).annotate(download_count=Coalesce(
                Subquery(pages.values('total')[:1]), Value(0)), )
        return queryset
コード例 #17
0
def _get_matching_genes(user, query):
    """Returns genes that match the given query string, and that the user can view.

    Args:
       user: Django user
       query: String typed into the awesomebar
    Returns:
       Sorted list of matches where each match is a dictionary of strings
    """
    result = []
    matching_genes = GencodeGene.objects.filter(
        Q(gene_id__icontains=query) | Q(gene_name__icontains=query)).order_by(
            Length('gene_name').asc())
    for g in matching_genes[:MAX_RESULTS_PER_CATEGORY]:
        if query.lower() in g.gene_id.lower():
            title = g.gene_id
            description = g.gene_name
        else:
            title = g.gene_name
            description = g.gene_id

        result.append({
            'title':
            title,
            'description':
            '(' + description + ')' if description else '',
            'href':
            '/gene/' + g.gene_id,
        })

    return result
コード例 #18
0
ファイル: api.py プロジェクト: kiki0805/Make-Story-Together
 def clear_empty_content(self, request, pk=None):
     story = self.get_object()
     Plot.objects.filter(chapter__story=story).annotate(
         empty_content=Length('content')).filter(empty_content=0).delete()
     Chapter.objects.filter(story=story).annotate(
         plot_count=Count('plots')).filter(plot_count=0).delete()
     return Response(status=status.HTTP_200_OK)
コード例 #19
0
def list_verification(request):
    years = range(2017, datetime.now().year + 1)
    year = request.GET.get('year')

    year = year if year is not None and year != '' else datetime.now().year

    verification_list = Expense.objects \
        .filter(expense_date__year=year, verification__regex=r'E') \
        .order_by(Length('verification').asc(), 'verification') \
        .all()

    paginator = Paginator(verification_list, 25)
    page = request.GET.get('page')

    try:
        verifications = paginator.page(page)
    except PageNotAnInteger:
        verifications = paginator.page(1)
    except EmptyPage:
        verifications = paginator.page(paginator.num_pages)

    return render(request, 'admin/list-verification.html', {
        'expenses': verifications,
        'years': years,
        'year': year,
    })
コード例 #20
0
ファイル: views.py プロジェクト: zaeph/ledger-web
def apply_rules(ledger_data, user):
    replacement_rules = (Rule.objects.filter(user=user).order_by(
        *(Length(field).desc() for field in ['payee', 'note'])))
    for rule in replacement_rules:
        if apply_rule(ledger_data, rule):
            return True
    return False
コード例 #21
0
ファイル: naics.py プロジェクト: umeshh/usaspending-api
    def _business_logic(self, request_data: dict) -> dict:
        naics_filter = {}
        code = request_data.get("code")
        description = request_data.get("filter")

        if not code and not description:
            return self._default_view()
        if code:
            naics_filter.update({"code": request_data.get("code")})
        if description:
            naics_filter.update({"description__icontains": description})
            return self._filter_search(naics_filter)

        naics = NAICS.objects.filter(**naics_filter)
        results = []
        for naic in naics:
            result = OrderedDict()
            if len(naic.code) < 6:
                result["naics"] = naic.code
                result["naics_description"] = naic.description
                result["count"] = (
                    NAICS.objects.annotate(text_len=Length("code"))
                    .filter(code__startswith=naic.code, text_len=6)
                    .count()
                )
                result["children"] = self._fetch_children(naic.code)
            else:
                result["naics"] = naic.code
                result["naics_description"] = naic.description
                result["count"] = DEFAULT_CHILDREN
            results.append(result)

        response_content = OrderedDict({"results": results})
        return response_content
コード例 #22
0
def get_titles(request):
    if request.method == 'POST':
        try:
            to_query = request.POST["currently_typed"]
        except KeyError:
            return HttpResponse(status=400)
        if len(to_query) == 0:
            return HttpResponse(status=204)
        #First query or it's a new query
        elif "previous_query" not in request.session.keys(
        ) or request.session['previous_query'] != to_query:
            content = Titles.objects.filter(
                movie_title__icontains=to_query).values(
                    'movie_title', "movie_id",
                    "year").order_by(Length('movie_title').asc())[:50]
            if (len(content) == 0):
                return HttpResponse(status=204)
            response = JsonResponse(list(content), safe=False)
            request.session['previous_query'] = to_query
            return response
        else:
            return HttpResponse(status=204)

    else:
        return HttpResponse(status=405)
コード例 #23
0
ファイル: tests.py プロジェクト: zzjeric/django
 def test_functions(self):
     self.assertEqual(repr(Coalesce('a', 'b')), "Coalesce(F(a), F(b))")
     self.assertEqual(repr(Concat('a', 'b')), "Concat(ConcatPair(F(a), F(b)))")
     self.assertEqual(repr(Length('a')), "Length(F(a))")
     self.assertEqual(repr(Lower('a')), "Lower(F(a))")
     self.assertEqual(repr(Substr('a', 1, 3)), "Substr(F(a), Value(1), Value(3))")
     self.assertEqual(repr(Upper('a')), "Upper(F(a))")
コード例 #24
0
ファイル: tagging.py プロジェクト: dfo-mar-odis/dm_apps
    def row_parser(self, row):
        super().row_parser(row)
        row_datetime = utils.get_row_date(row)
        row_date = row_datetime.date()
        if utils.nan_to_none(row.get(self.box_key)):
            self.row_entered += utils.enter_indvd(self.anix_indv.pk,
                                                  self.cleaned_data, row_date,
                                                  row[self.box_key],
                                                  self.box_anidc_id.pk, None)
        if utils.nan_to_none(row.get(self.location_key)):
            self.row_entered += utils.enter_indvd(self.anix_indv.pk,
                                                  self.cleaned_data, row_date,
                                                  row[self.location_key],
                                                  self.boxl_anidc_id.pk, None)

        if utils.nan_to_none(row.get(self.indt_key)) and utils.nan_to_none(
                row.get(self.indt_amt_key)):
            indvtc_id = models.IndTreatCode.objects.filter(
                name__icontains=row[self.indt_key]).get()
            unit_id = models.UnitCode.objects.filter(
                name__icontains="gram").order_by(Length('name').asc()).first()
            self.row_entered += utils.enter_indvt(self.anix_indv.pk,
                                                  self.cleaned_data,
                                                  row_datetime,
                                                  row[self.indt_amt_key],
                                                  indvtc_id.pk,
                                                  unit_id=unit_id)
コード例 #25
0
def _prepare_comment_contribution_index(comments):
    min_comment_length = 150
    min_likes = 3

    all_comments = comments \
        .annotate(length=Length('text')) \
        .filter(deleted=False, length__gte=min_comment_length)

    all_comments_with_enough_likes = [
        x for x in all_comments if len(x.likes) >= min_likes
    ]
    all_comments_count = len(all_comments_with_enough_likes)

    if all_comments_count == 0:
        return 0

    all_likes = 0
    for comment in all_comments_with_enough_likes:
        all_likes += len(comment.likes)

    average = all_likes / float(all_comments_count)
    normalized = []

    for comment in all_comments_with_enough_likes:
        likes = len(comment.likes)
        if likes >= average:
            normalized.append(likes)

    if len(normalized) == 0:
        return 0

    return _astrobin_index(normalized)
コード例 #26
0
ファイル: views.py プロジェクト: tim-schilling/redditstats
    def get_context_data(self, **kwargs):
        comments = self.comments().annotate(length=Length('text'), ).filter(
            length__lt=self.length_limitation, ).order_by(
                '-score')[:self.page_size]

        return super(TopShortComments,
                     self).get_context_data(comments=comments, **kwargs)
コード例 #27
0
ファイル: tests.py プロジェクト: DYL521/django_source
    def test_nested_function_ordering(self):
        Author.objects.create(name='John Smith')
        Author.objects.create(name='Rhonda Simpson', alias='ronny')

        authors = Author.objects.order_by(Length(Coalesce('alias', 'name')))
        self.assertQuerysetEqual(authors, [
            'Rhonda Simpson',
            'John Smith',
        ], lambda a: a.name)

        authors = Author.objects.order_by(
            Length(Coalesce('alias', 'name')).desc())
        self.assertQuerysetEqual(authors, [
            'John Smith',
            'Rhonda Simpson',
        ], lambda a: a.name)
コード例 #28
0
 def prefetch_labels(self):
     """Prefetch check stats."""
     alllabels = set(
         self._object.component.project.label_set.values_list("name",
                                                              flat=True))
     if self._object.is_source:
         field = "labels__name"
     else:
         field = "source_unit__labels__name"
     stats = self._object.unit_set.values(field).annotate(
         strings=Count("pk"),
         words=Sum("num_words"),
         chars=Sum(Length("source")))
     for stat in stats:
         label_name = stat[field]
         # Filtering here is way more effective than in SQL
         if label_name is None:
             continue
         label = "label:{}".format(label_name)
         self.store(label, stat["strings"])
         self.store(label + "_words", stat["words"])
         self.store(label + "_chars", stat["chars"])
         alllabels.discard(label_name)
     for label_name in alllabels:
         label = "label:{}".format(label_name)
         self.store(label, 0)
         self.store(label + "_words", 0)
         self.store(label + "_chars", 0)
コード例 #29
0
ファイル: views.py プロジェクト: gghotted/search-in-video
def search_words(request):
    user = request.user
    find_text = request.GET.get('find_text', '')
    words = Word.objects.filter(video__user=user,
                                text__icontains=find_text)
    response_words_list = list(words.values_list('text', flat=True).order_by(Length('text')).distinct())[:10]
    return JsonResponse({'words_list': response_words_list})
コード例 #30
0
ファイル: item_dump.py プロジェクト: kcsry/kirppu
def item_dump(output, event, as_text):
    items = Item.objects.filter(vendor__event=event)

    if as_text:
        straight_column_names = [c[1] for c in COLUMNS if isinstance(c[1], str)]
        column_name_lengths = {c + "_length": Length(Cast(c, output_field=TextField())) for c in straight_column_names}
        max_column_name_lengths = {"max_" + c: Max(c + "_length") for c in straight_column_names}
        max_lengths = items.annotate(**column_name_lengths).aggregate(**max_column_name_lengths)

        column_widths = [
            (max_lengths["max_" + c[1]] if isinstance(c[1], str) else 1) or 0
            for c in COLUMNS
        ]
        writer = TextWriter(output, column_widths)
        writer.write_staggered(str(c[0]) for c in COLUMNS)
    else:
        writer = csv.writer(output)
        writer.writerow(str(c[0]) for c in COLUMNS)

    # Used here and later for buffer streaming and clearing in case of StringIO.
    yield

    for item in items.order_by("vendor__id", "name"):
        writer.writerow(_process_column(item, c, as_text) for c in COLUMNS)
        yield