示例#1
0
 def __init__(self, expression, distinct=False, **extra):
     if expression == "*":
         expression = Value(expression)
         expression._output_field = IntegerField()
     super(Count, self).__init__(
         expression, distinct="DISTINCT " if distinct else "", output_field=IntegerField(), **extra
     )
示例#2
0
 def process_rhs(self, compiler, connection):
     value = self.rhs
     if self.bilateral_transforms:
         if self.rhs_is_direct_value():
             # Do not call get_db_prep_lookup here as the value will be
             # transformed before being used for lookup
             value = Value(value, output_field=self.lhs.output_field)
         value = self.apply_bilateral_transforms(value)
         value = value.resolve_expression(compiler.query)
     if hasattr(value, 'as_sql'):
         return compiler.compile(value)
     else:
         return self.get_db_prep_lookup(value, connection)
示例#3
0
 def batch_process_rhs(self, compiler, connection, rhs=None):
     if rhs is None:
         rhs = self.rhs
     if self.bilateral_transforms:
         sqls, sqls_params = [], []
         for p in rhs:
             value = Value(p, output_field=self.lhs.output_field)
             value = self.apply_bilateral_transforms(value)
             value = value.resolve_expression(compiler.query)
             sql, sql_params = compiler.compile(value)
             sqls.append(sql)
             sqls_params.extend(sql_params)
     else:
         _, params = self.get_db_prep_lookup(rhs, connection)
         sqls, sqls_params = ['%s'] * len(params), params
     return sqls, sqls_params
示例#4
0
 def process_rhs(self, compiler, connection):
     value = self.rhs
     if self.bilateral_transforms:
         if self.rhs_is_direct_value():
             # Do not call get_db_prep_lookup here as the value will be
             # transformed before being used for lookup
             value = Value(value, output_field=self.lhs.output_field)
         value = self.apply_bilateral_transforms(value)
         value = value.resolve_expression(compiler.query)
     # Due to historical reasons there are a couple of different
     # ways to produce sql here. get_compiler is likely a Query
     # instance and as_sql just something with as_sql. Finally the value
     # can of course be just plain Python value.
     if hasattr(value, 'get_compiler'):
         value = value.get_compiler(connection=connection)
     if hasattr(value, 'as_sql'):
         sql, params = compiler.compile(value)
         return '(' + sql + ')', params
     else:
         return self.get_db_prep_lookup(value, connection)
示例#5
0
 def as_sqlite(self, compiler, connection):
     if len(self.source_expressions) < 4:
         # Always provide the z parameter for ST_Translate
         self.source_expressions.append(Value(0))
     return super().as_sqlite(compiler, connection)
示例#6
0
文件: purge.py 项目: dmacmillan/Kive
    def purge(self,
              start,
              stop,
              dataset_aging,
              log_aging,
              sandbox_aging,
              batch_size):
        logger.debug('Starting purge.')
        container_total = self.set_file_sizes(Container,
                                              'file',
                                              'file_size',
                                              'created')
        sandbox_total = self.set_file_sizes(ContainerRun,
                                            'sandbox_path',
                                            'sandbox_size',
                                            'end_time')
        log_total = self.set_file_sizes(ContainerLog,
                                        'long_text',
                                        'log_size',
                                        'run__end_time')
        dataset_total = self.set_file_sizes(Dataset,
                                            'dataset_file',
                                            'dataset_size',
                                            'date_created')

        total_storage = remaining_storage = (
                container_total + sandbox_total + log_total + dataset_total)
        if total_storage <= start:
            storage_text = self.summarize_storage(container_total,
                                                  dataset_total,
                                                  sandbox_total,
                                                  log_total)
            logger.debug(u"No purge needed for %s: %s.",
                         filesizeformat(total_storage),
                         storage_text)
            return

        sandbox_ages = ContainerRun.find_unneeded().annotate(
            entry_type=Value('r', models.CharField()),
            age=sandbox_aging * (Now() - F('end_time'))).values_list(
            'entry_type',
            'id',
            'age').order_by()

        log_ages = ContainerLog.find_unneeded().annotate(
            entry_type=Value('l', models.CharField()),
            age=log_aging * (Now() - F('run__end_time'))).values_list(
            'entry_type',
            'id',
            'age').order_by()

        dataset_ages = Dataset.find_unneeded().annotate(
            entry_type=Value('d', models.CharField()),
            age=dataset_aging * (Now() - F('date_created'))).values_list(
            'entry_type',
            'id',
            'age').order_by()

        purge_counts = Counter()
        max_purge_dates = {}
        min_purge_dates = {}
        purge_entries = sandbox_ages.union(log_ages,
                                           dataset_ages,
                                           all=True).order_by('-age')
        while remaining_storage > stop:
            entry_count = 0
            for entry_type, entry_id, age in purge_entries[:batch_size]:
                entry_count += 1
                if entry_type == 'r':
                    run = ContainerRun.objects.get(id=entry_id)
                    entry_size = run.sandbox_size
                    entry_date = run.end_time
                    logger.debug("Purged container run %d containing %s.",
                                 run.pk,
                                 filesizeformat(entry_size))
                    try:
                        run.delete_sandbox()
                    except OSError:
                        logger.error(u"Failed to purge container run %d at %r.",
                                     run.id,
                                     run.sandbox_path,
                                     exc_info=True)
                        run.sandbox_path = ''
                    run.save()
                elif entry_type == 'l':
                    log = ContainerLog.objects.get(id=entry_id)
                    entry_size = log.log_size
                    entry_date = log.run.end_time
                    logger.debug("Purged container log %d containing %s.",
                                 log.id,
                                 filesizeformat(entry_size))
                    log.long_text.delete()
                else:
                    assert entry_type == 'd'
                    dataset = Dataset.objects.get(id=entry_id)
                    entry_size = dataset.dataset_size
                    dataset_total -= dataset.dataset_size
                    entry_date = dataset.date_created
                    logger.debug("Purged dataset %d containing %s.",
                                 dataset.pk,
                                 filesizeformat(entry_size))
                    dataset.dataset_file.delete()
                purge_counts[entry_type] += 1
                purge_counts[entry_type + ' bytes'] += entry_size
                # PyCharm false positives...
                # noinspection PyUnresolvedReferences
                min_purge_dates[entry_type] = min(entry_date,
                                                  min_purge_dates.get(entry_type, entry_date))
                # noinspection PyUnresolvedReferences
                max_purge_dates[entry_type] = max(entry_date,
                                                  max_purge_dates.get(entry_type, entry_date))
                remaining_storage -= entry_size
                if remaining_storage <= stop:
                    break
            if entry_count == 0:
                break
        for entry_type, entry_name in (('r', 'container run'),
                                       ('l', 'container log'),
                                       ('d', 'dataset')):
            purged_count = purge_counts[entry_type]
            if not purged_count:
                continue
            min_purge_date = min_purge_dates[entry_type]
            max_purge_date = max_purge_dates[entry_type]
            collective = entry_name + pluralize(purged_count)
            bytes_removed = purge_counts[entry_type + ' bytes']
            start_text = naturaltime(min_purge_date)
            end_text = naturaltime(max_purge_date)
            date_range = (start_text
                          if start_text == end_text
                          else start_text + ' to ' + end_text)
            logger.info("Purged %d %s containing %s from %s.",
                        purged_count,
                        collective,
                        filesizeformat(bytes_removed),
                        date_range)
        if remaining_storage > stop:
            storage_text = self.summarize_storage(container_total,
                                                  dataset_total)
            logger.error('Cannot reduce storage to %s: %s.',
                         filesizeformat(stop),
                         storage_text)
 def get_substr(self):
     return Substr(self.source_expressions[0],
                   self.source_expressions[1] * Value(-1))
 def __init__(self, expression, text, replacement=Value(''), **extra):
     super().__init__(expression, text, replacement, **extra)
 def __init__(self, expression, length, fill_text=Value(' '), **extra):
     if not hasattr(
             length,
             'resolve_expression') and length is not None and length < 0:
         raise ValueError("'length' must be greater or equal to 0.")
     super().__init__(expression, length, fill_text, **extra)
 def get_substr(self):
     return Substr(self.source_expressions[0], Value(1),
                   self.source_expressions[1])
示例#11
0
 def test_exact_with_expression(self):
     self.assertSequenceEqual(
         NullableIntegerArrayModel.objects.filter(field__exact=[Value(1)]),
         self.objs[:1],
     )
示例#12
0
 def build_coach_condition(self, role_visibility):
     return [BooleanComparison(role_visibility, '=', Value(roles.COACH))]
示例#13
0
from copy import copy
示例#14
0
 def as_mysql(self, compiler, connection, **extra_context):
     clone = self.copy()
     # If no precision is provided, set it to the maximum.
     if len(clone.source_expressions) < 2:
         clone.source_expressions.append(Value(100))
     return clone.as_sql(compiler, connection, **extra_context)
示例#15
0
 def build_kind_condition(self, kind_id, value, comparison='='):
     return [BooleanComparison(kind_id, comparison, Value(value))]
示例#16
0
 def __init__(self, expression, **extra):
     super().__init__(Value(expression), **extra)
示例#17
0
class AgencyViewSet(viewsets.ModelViewSet):
    """API views for Agency"""

    # pylint: disable=too-many-public-methods
    queryset = (Agency.objects.order_by("id").select_related(
        "jurisdiction", "parent", "appeal_agency").prefetch_related(
            Prefetch(
                "emails",
                queryset=EmailAddress.objects.filter(
                    status="good",
                    agencyemail__request_type="primary",
                    agencyemail__email_type="to",
                ),
                to_attr="primary_emails",
            ),
            Prefetch(
                "phones",
                queryset=PhoneNumber.objects.filter(
                    type="fax",
                    status="good",
                    agencyphone__request_type="primary"),
                to_attr="primary_faxes",
            ),
            Prefetch(
                "addresses",
                queryset=Address.objects.filter(
                    agencyaddress__request_type="primary"),
                to_attr="primary_addresses",
            ),
            "types",
        ).annotate(
            average_response_time_=Coalesce(
                ExtractDay(
                    Avg(
                        F("foiarequest__datetime_done") -
                        F("foiarequest__composer__datetime_submitted"))),
                Value(0),
            ),
            fee_rate_=Coalesce(
                100 * CountWhen(foiarequest__price__gt=0,
                                output_field=FloatField()) /
                NullIf(
                    Count("foiarequest"), Value(0), output_field=FloatField()),
                Value(0),
            ),
            success_rate_=Coalesce(
                100 * CountWhen(
                    foiarequest__status__in=["done", "partial"],
                    output_field=FloatField(),
                ) / NullIf(
                    Count("foiarequest"), Value(0), output_field=FloatField()),
                Value(0),
            ),
            number_requests=Count("foiarequest"),
            number_requests_completed=CountWhen(foiarequest__status="done"),
            number_requests_rejected=CountWhen(foiarequest__status="rejected"),
            number_requests_no_docs=CountWhen(foiarequest__status="no_docs"),
            number_requests_ack=CountWhen(foiarequest__status="ack"),
            number_requests_resp=CountWhen(foiarequest__status="processed"),
            number_requests_fix=CountWhen(foiarequest__status="fix"),
            number_requests_appeal=CountWhen(foiarequest__status="appealing"),
            number_requests_pay=CountWhen(foiarequest__status="payment"),
            number_requests_partial=CountWhen(foiarequest__status="partial"),
            number_requests_lawsuit=CountWhen(foiarequest__status="lawsuit"),
            number_requests_withdrawn=CountWhen(
                foiarequest__status="abandoned"),
        ))
    serializer_class = AgencySerializer
    # don't allow ordering by computed fields
    ordering_fields = [
        f for f in AgencySerializer.Meta.fields
        if f not in ("absolute_url", "average_response_time", "fee_rate",
                     "success_rate") and not f.startswith(("has_", "number_"))
    ]

    def get_queryset(self):
        """Filter out non-approved agencies for non-staff"""
        if self.request.user.is_staff:
            return self.queryset
        else:
            return self.queryset.filter(status="approved")

    class Filter(django_filters.FilterSet):
        """API Filter for Agencies"""

        jurisdiction = django_filters.NumberFilter(
            field_name="jurisdiction__id")
        types = django_filters.CharFilter(field_name="types__name",
                                          lookup_expr="iexact")

        class Meta:
            model = Agency
            fields = ("name", "status", "jurisdiction", "types",
                      "requires_proxy")

    filterset_class = Filter
示例#18
0
 def test_contains_including_expression(self):
     self.assertSequenceEqual(
         NullableIntegerArrayModel.objects.filter(
             field__contains=[2, Value(6) / Value(2)], ),
         self.objs[2:3],
     )
示例#19
0
 def do_filter_ranking(self, engine_slug, queryset, search_text):
     """Ranks the given queryset according to the relevance of the given search text."""
     return queryset.annotate(
         watson_rank=Value(1.0, output_field=FloatField()))
示例#20
0
 def as_sqlite(self, compiler, connection, **extra_context):
     clone = self.copy()
     if len(self.source_expressions) < 4:
         # Always provide the z parameter for ST_Translate
         clone.source_expressions.append(Value(0))
     return super(Translate, clone).as_sqlite(compiler, connection, **extra_context)
示例#21
0
def phase01a(request, previewMode=False):
    # assignmentID for front-end submit javascript
    assignmentId = request.GET.get('assignmentId')
    # Need to check
    if request.method == 'POST':
        postList = pushPostList(request)

        # Get the Q and Ans for the current question, they should be at least one Q&A for all of the set
        questions = request.POST.getlist('data_q[]')
        answers = request.POST.getlist('data_a[]')

        # print("I got questions: ", questions)
        # print("I got answers: ", answers)
        # retrieve the json data for updating skip count for the previous questions
        validation_list = request.POST.getlist('data[]')

        correct_qs = []
        for q in questions:
            text = q.replace(' ', '+')
            url = f'https://api.textgears.com/check.php?text={text}&key=SFCKdx4GHmSC1j6H'
            response = requests.get(url)
            wordsC = response.json()
            # print(wordsC)
            for err in wordsC['errors']:
                bad = err['bad']
                good = err['better']
                if good:
                    q = q.replace(bad, good[0])
            correct_qs.append(q)

        # Query list for the old data in the table
        old_Qs = list(
            Question.objects.filter(isFinal=True).values_list('text', 'id'))
        # print("old questions", old_Qs)

        questions = Question.objects.bulk_create([
            Question(text=que,
                     isFinal=False,
                     imageID=list(ImageModel.objects.filter(id__in=postList)),
                     hit_id=assignmentId) for que in correct_qs
        ])
        new_Qs = [
            (que.text, que.id) for que in questions
        ]  #list(map(attrgetter('text', 'id'), questions)) # don't know which is better speedwise
        # print("new question", new_Qs)

        # Call the NLP function and get back with results, it should be something like wether it gets merged or kept
        # backend call NLP and get back the results, it should be a boolean and a string telling whether the new entry will be created or not
        # exist_q should be telling which new question got merged into
        acceptedList, id_merge, id_move = send__receive_data(new_Qs, old_Qs)
        id_merge = {int(k): v for k, v in id_merge.items()}
        id_move = {int(k): v for k, v in id_move.items()}
        # print("acceptedList is: ", acceptedList)
        #print("id_merge is: ", id_merge)
        # print("id_move is: ", id_move)

        Question.objects.filter(id__in=acceptedList).update(isFinal=True)
        #Question.objects.filter(id__in=[que.id for que in questions if que.id not in id_merge]).update(isFinal=True)

        # Store id_merge under mergeParent in the database
        id_merge_sql = Case(
            *[When(id=new, then=Value(old)) for new, old in id_merge.items()])
        Question.objects.filter(id__in=id_merge).update(
            mergeParent=id_merge_sql)

        answers = Answer.objects.bulk_create([
            Answer(question_id=id_merge.get(que.id, que.id),
                   text=ans,
                   hit_id=assignmentId,
                   imgset=-1) for que, ans in zip(questions, answers)
        ])

        with transaction.atomic():
            id_move_sql = Case(*[
                When(question_id=bad, then=Value(good))
                for bad, good in id_move.items()
            ])
            Answer.objects.filter(question_id__in=id_move).update(
                question_id=id_move_sql)
            id_move_sql = Case(*[
                When(id=bad, then=Value(good))
                for bad, good in id_move.items()
            ])
            Question.objects.filter(id__in=id_move).update(
                isFinal=False, mergeParent=id_move_sql)
            Question.objects.filter(id__in=id_move.values()).update(
                isFinal=True)

        return HttpResponse(status=201)

    # Get rounds played in total and by the current player
    rounds, roundsnum = popGetList(
        ImageModel.objects.filter(img__startswith=KEYRING).values_list(
            'id', flat=True))

    if len(rounds.post) >= ImageModel.objects.filter(
            img__startswith=KEYRING).count():
        # push all to waiting page
        return over(request, 'phase01a')

    # Single image that will be sent to front-end, will expire in 300 seconds (temporary)
    # sending 4 images at a time
    data = [i.img.url for i in ImageModel.objects.filter(id__in=roundsnum)]
    data.extend([None] * (3 - len(data)))
    # print("I got: ",     serving_img_url)
    # Previous all question pairs that will be sent to front-end

    # Get all the instructions
    instructions = Phase01_instruction.get_queryset(Phase01_instruction) or [
        'none'
    ]

    #Get text instructions
    text_inst = TextInstruction.objects.get(phase='01a')

    # Get all of the questions
    previous_questions = list(
        Question.objects.filter(isFinal=True).values_list('text', flat=True))

    return render(
        request, 'phase01a.html', {
            'url': data,
            'imgnum': roundsnum,
            'questions': previous_questions,
            'assignmentId': assignmentId,
            'previewMode': previewMode,
            'instructions': instructions,
            'text_inst': text_inst,
            'NUMROUNDS': NUMROUNDS[phase01a.__name__],
            'object': OBJECT_NAME_PLURAL
        })
示例#22
0
    def get_context_data(self, **kwargs):
        context = super(CommentedDetailView, self).get_context_data(**kwargs)
        queryset = Comment.objects.filter(page=self.get_comment_page())
        context['has_comments'] = queryset.exists()
        queryset = queryset.select_related('author__user').defer('author__about').annotate(revisions=Count('versions'))

        if self.request.user.is_authenticated:
            queryset = queryset.annotate(vote_score=Coalesce(RawSQLColumn(CommentVote, 'score'), Value(0)))
            profile = self.request.user.profile
            unique_together_left_join(queryset, CommentVote, 'comment', 'voter', profile.id)
            context['is_new_user'] = (not self.request.user.is_staff and
                                      not profile.submission_set.filter(points=F('problem__points')).exists())
        context['comment_list'] = queryset

        return context
示例#23
0
 def get_annotation(self, cte):
     return Coalesce(cte.col.assessment_count,
                     Value(0),
                     output_field=IntegerField())