Ejemplo n.º 1
0
    def get_queryset(self):
        f_id = self.kwargs['fridge_id']
        fridge_ingredients = Product.objects.filter(fridge_id=f_id)
        expiring_ingredients = list()

        for ingredient in fridge_ingredients:
            expiration_date = ingredient.expiration_date
            diff = expiration_date.date() - datetime.date.today()
            if diff.days < 3:
                expiring_ingredients.append(ingredient.category)
        if not expiring_ingredients:
            return Recipe.objects.none()

        recipes = Recipe.objects.all()
        recommendations = set()
        for recipe in recipes:
            num_of_ingredients = len(expiring_ingredients)
            required_ingredients = round(0.7 * num_of_ingredients)
            present_ingredients = 0
            recipe_ingredients = [row[0] for row in recipe.ingredients]
            for ingredient in expiring_ingredients:
                if ingredient in recipe_ingredients:
                    present_ingredients += 1
            if present_ingredients >= required_ingredients:
                recommendations.add(recipe.id)

        queryset = recipes.filter(id__in=recommendations)

        recipe_name = self.request.query_params.get('name', None)
        ingredients = self.request.query_params.get('ingredients', None)
        tags = self.request.query_params.get('tags', None)
        difficulty = self.request.query_params.get('difficulty', None)
        meal = self.request.query_params.get('meal', None)
        order = self.request.query_params.get('order', None)
        order_dict = {'pp': '-popularity', 'na': 'recipe_name', 'nd': '-recipe_name', 'ra': 'rating', 'rd': '-rating',
                      'pa': 'ratings_num', 'pd': '-ratings_num', 'ta': 'prep_time', 'td': '-prep_time'}
        queryset = queryset.annotate(
            ratings_num=Count('ratings'),
            rating=Coalesce(Avg('ratings__rating'), 0),
            popularity=Cast(Count('ratings'), FloatField()) * Cast(Coalesce(Avg('ratings__rating'), 1) ** 2,
                                                                FloatField()) + (
                               Cast(Count('comments'), FloatField()) - Cast('ratings_num', FloatField()))
        )
        if recipe_name is not None:
            queryset = queryset.filter(recipe_name__icontains=recipe_name)
        if ingredients is not None:
            queryset = queryset.filter(ingredients__contains=ingredients)
        if tags is not None:
            queryset = queryset.filter(tags__contains=tags)
        if difficulty is not None:
            queryset = queryset.filter(difficulty__contains=difficulty)
        if meal is not None:
            queryset = queryset.filter(meal__contains=meal)
        if order is not None:
            if order in order_dict:
                queryset = queryset.order_by(order_dict.get(order))
            else:
                queryset = queryset.order_by(order_dict.get('pp'))
        else:
            queryset = queryset.order_by(order_dict.get('pp'))

        return queryset
Ejemplo n.º 2
0
def leaderboard(request, challenge_phase_split_id):
    """Returns leaderboard for a corresponding Challenge Phase Split"""

    # check if the challenge exists or not
    try:
        challenge_phase_split = ChallengePhaseSplit.objects.get(
            pk=challenge_phase_split_id)
    except ChallengePhaseSplit.DoesNotExist:
        response_data = {'error': 'Challenge Phase Split does not exist'}
        return Response(response_data, status=status.HTTP_400_BAD_REQUEST)

    # Get the leaderboard associated with the Challenge Phase Split
    leaderboard = challenge_phase_split.leaderboard

    # Get the default order by key to rank the entries on the leaderboard
    try:
        default_order_by = leaderboard.schema['default_order_by']
    except:
        response_data = {
            'error':
            'Sorry, Default filtering key not found in leaderboard schema!'
        }
        return Response(response_data, status=status.HTTP_400_BAD_REQUEST)

    # Exclude the submissions done by members of the host team
    # while populating leaderboard
    challenge_obj = challenge_phase_split.challenge_phase.challenge
    challenge_hosts_emails = challenge_obj.creator.get_all_challenge_host_email(
    )
    leaderboard_data = LeaderboardData.objects.exclude(
        submission__created_by__email__in=challenge_hosts_emails)

    # Get all the successful submissions related to the challenge phase split
    leaderboard_data = leaderboard_data.filter(
        challenge_phase_split=challenge_phase_split,
        submission__is_public=True,
        submission__is_flagged=False,
        submission__status=Submission.FINISHED).order_by('created_at')
    leaderboard_data = leaderboard_data.annotate(
        filtering_score=RawSQL('result->>%s', (default_order_by, ),
                               output_field=FloatField())).values(
                                   'id',
                                   'submission__participant_team__team_name',
                                   'challenge_phase_split', 'result',
                                   'filtering_score', 'leaderboard__schema',
                                   'submission__submitted_at')

    sorted_leaderboard_data = sorted(leaderboard_data,
                                     key=lambda k: float(k['filtering_score']),
                                     reverse=True)

    distinct_sorted_leaderboard_data = []
    team_list = []

    for data in sorted_leaderboard_data:
        if data['submission__participant_team__team_name'] in team_list:
            continue
        else:
            distinct_sorted_leaderboard_data.append(data)
            team_list.append(data['submission__participant_team__team_name'])

    leaderboard_labels = challenge_phase_split.leaderboard.schema['labels']
    for item in distinct_sorted_leaderboard_data:
        item['result'] = [
            item['result'][index] for index in leaderboard_labels
        ]

    paginator, result_page = paginated_queryset(
        distinct_sorted_leaderboard_data,
        request,
        pagination_class=StandardResultSetPagination())

    challenge_host_user = is_user_a_host_of_challenge(request.user,
                                                      challenge_obj.pk)

    # Show the Private leaderboard only if the user is a challenge host
    if challenge_host_user:
        response_data = result_page
        return paginator.get_paginated_response(response_data)

    # Check if challenge phase leaderboard is public for participant user or not
    elif challenge_phase_split.visibility != ChallengePhaseSplit.PUBLIC:
        response_data = {'error': 'Sorry, the leaderboard is not public!'}
        return Response(response_data, status=status.HTTP_400_BAD_REQUEST)

    else:
        response_data = result_page
        return paginator.get_paginated_response(response_data)
Ejemplo n.º 3
0
 def get(self, request):
     total_cost = models.ResourceModel.objects.all().aggregate(
         total=Sum(F('price') *
                   F('amount'), output_field=FloatField()))['total'] or 0
     return Response(data={'total_cost': total_cost},
                     status=status.HTTP_200_OK)
Ejemplo n.º 4
0
    def total_referencia_por_mes(self, mes):

        x = self.filter(reporte__corte_mes=mes).aggregate(
            total=Sum(F("monto_pagar"), output_field=FloatField()))

        return x["total"]
Ejemplo n.º 5
0
 def deuda_total(self):
     x = self.all().aggregate(
         total=Sum(F("deuda_pagar"), output_field=FloatField()))
     return x["total"]
Ejemplo n.º 6
0
def report(request):
    sums = Category.objects.filter(pk__gt=0).annotate(
        lastMonth=Sum(
            Case(When(names__expense__date__gte=_top_of_months_ago(1),
                      names__expense__charge_number=1,
                      then=F('names__expense__charge') *
                      F('names__expense__total_charges')),
                 output_field=FloatField())),
        lastQuarter=Sum(
            Case(When(names__expense__date__gte=_top_of_months_ago(3),
                      names__expense__charge_number=1,
                      then=F('names__expense__charge') *
                      F('names__expense__total_charges')),
                 output_field=FloatField())),
        lastHalf=Sum(
            Case(When(names__expense__date__gte=_top_of_months_ago(6),
                      names__expense__charge_number=1,
                      then=F('names__expense__charge') *
                      F('names__expense__total_charges')),
                 output_field=FloatField())),
        lastYear=Sum(
            Case(When(names__expense__date__gte=_top_of_months_ago(12),
                      names__expense__charge_number=1,
                      then=F('names__expense__charge') *
                      F('names__expense__total_charges')),
                 output_field=FloatField())))

    totals = Expense.objects.all().aggregate(
        lastMonth=Sum(
            Case(When(date__gte=_top_of_months_ago(1),
                      charge_number=1,
                      then=F('charge') * F('total_charges')),
                 output_field=FloatField())),
        lastQuarter=Sum(
            Case(When(date__gte=_top_of_months_ago(3),
                      charge_number=1,
                      then=F('charge') * F('total_charges')),
                 output_field=FloatField())),
        lastHalf=Sum(
            Case(When(date__gte=_top_of_months_ago(6),
                      charge_number=1,
                      then=F('charge') * F('total_charges')),
                 output_field=FloatField())),
        lastYear=Sum(
            Case(When(date__gte=_top_of_months_ago(12),
                      charge_number=1,
                      then=F('charge') * F('total_charges')),
                 output_field=FloatField())))

    not_categorized = Expense.objects.filter(name__cat__pk=0).aggregate(
        lastMonth=totals['lastMonth'] - Sum(
            Case(When(date__gte=_top_of_months_ago(1),
                      charge_number=1,
                      name__cat__isnull=False,
                      then=F('charge') * F('total_charges')),
                 output_field=FloatField())),
        lastQuarter=totals['lastQuarter'] - Sum(
            Case(When(date__gte=_top_of_months_ago(3),
                      charge_number=1,
                      name__cat__isnull=False,
                      then=F('charge') * F('total_charges')),
                 output_field=FloatField())),
        lastHalf=totals['lastHalf'] - Sum(
            Case(When(date__gte=_top_of_months_ago(6),
                      charge_number=1,
                      name__cat__isnull=False,
                      then=F('charge') * F('total_charges')),
                 output_field=FloatField())),
        lastYear=totals['lastYear'] - Sum(
            Case(When(date__gte=_top_of_months_ago(12),
                      charge_number=1,
                      name__cat__isnull=False,
                      then=F('charge') * F('total_charges')),
                 output_field=FloatField())),
    )

    return render(
        request, 'report/report.html', {
            'sums': sums,
            'not_categorized': not_categorized,
            'totals': totals,
            'income': settings.MONTHLY_INCOME,
        })
Ejemplo n.º 7
0
class GeometryDistance(GeoFunc):
    output_field = FloatField()
    arity = 2
    function = ''
    arg_joiner = ' <-> '
    geom_param_pos = (0, 1)
Ejemplo n.º 8
0
class Median(Aggregate):
    function = 'PERCENTILE_CONT'
    name = 'median'
    output_field = FloatField()
    template = '%(function)s(0.5) WITHIN GROUP (ORDER BY %(expressions)s)'
Ejemplo n.º 9
0
    def get_context_data(self, **kwargs):
        context = super().get_context_data(**kwargs)

        if not (self.object.ended
                or self.object.is_editable_by(self.request.user)):
            raise Http404()

        queryset = Submission.objects.filter(contest_object=self.object)

        ac_count = Count(
            Case(When(result='AC', then=Value(1)),
                 output_field=IntegerField()))
        ac_rate = CombinedExpression(ac_count / Count('problem'),
                                     '*',
                                     Value(100.0),
                                     output_field=FloatField())

        status_count_queryset = list(
            queryset.values('problem__code', 'result').annotate(
                count=Count('result')).values_list('problem__code', 'result',
                                                   'count'), )
        labels, codes = [], []
        contest_problems = self.object.contest_problems.order_by(
            'order').values_list('problem__name', 'problem__code')
        if contest_problems:
            labels, codes = zip(*contest_problems)
        num_problems = len(labels)
        status_counts = [[] for i in range(num_problems)]
        for problem_code, result, count in status_count_queryset:
            if problem_code in codes:
                status_counts[codes.index(problem_code)].append(
                    (result, count))

        result_data = defaultdict(partial(list, [0] * num_problems))
        for i in range(num_problems):
            for category in _get_result_data(defaultdict(
                    int, status_counts[i]))['categories']:
                result_data[category['code']][i] = category['count']

        stats = {
            'problem_status_count': {
                'labels':
                labels,
                'datasets': [{
                    'label':
                    name,
                    'backgroundColor':
                    settings.DMOJ_STATS_SUBMISSION_RESULT_COLORS[name],
                    'data':
                    data,
                } for name, data in result_data.items()],
            },
            'problem_ac_rate':
            get_bar_chart(
                queryset.values(
                    'contest__problem__order',
                    'problem__name').annotate(ac_rate=ac_rate).order_by(
                        'contest__problem__order').values_list(
                            'problem__name', 'ac_rate'), ),
            'language_count':
            get_pie_chart(
                queryset.values('language__name').annotate(
                    count=Count('language__name')).filter(
                        count__gt=0).order_by('-count').values_list(
                            'language__name', 'count'), ),
            'language_ac_rate':
            get_bar_chart(
                queryset.values('language__name').annotate(
                    ac_rate=ac_rate).filter(ac_rate__gt=0).values_list(
                        'language__name', 'ac_rate'), ),
        }

        context['stats'] = mark_safe(json.dumps(stats))

        return context
Ejemplo n.º 10
0
class Attempt(Model):
    def __str__(self):
        return u"({}) {}, {} - {}: {} ({} - {})".format(
            self.id, self.competitor, self.competition.slug,
            self.questionset.name, self.access_code, self.start, self.finish)

    access_code = CodeField()
    competitionquestionset = ForeignKey('CompetitionQuestionSet',
                                        on_delete=CASCADE)
    competitor = ForeignKey('Competitor',
                            null=True,
                            blank=True,
                            on_delete=CASCADE)
    invalidated_by = ForeignKey('AttemptInvalidation',
                                null=True,
                                blank=True,
                                on_delete=CASCADE)
    confirmed_by = ManyToManyField('Profile',
                                   through='AttemptConfirmation',
                                   blank=True)
    random_seed = IntegerField()
    start = DateTimeField(auto_now_add=True)
    finish = DateTimeField(null=True, blank=True)
    score = FloatField(null=True, blank=True)
    objects = AttemptManager()

    @property
    def competition(self):
        return self.competitionquestionset.competition

    @property
    def questionset(self):
        return self.competitionquestionset.questionset

    @property
    def valid(self):
        return self.invalidated_by is None

    def reverse_question_mapping(self):
        return self.questionset.reverse_question_mapping(self.random_seed)

    def reverse_question_id(self, randomized_question_id):
        return self.reverse_question_mapping()[randomized_question_id]

    def question_mapping(self):
        return self.questionset.question_mapping(self.random_seed)

    def grade_answers(self,
                      grader_runtime_manager=None,
                      update_graded=False,
                      regrade=False):
        # print "grading...", self
        grader_runtime_manager = graders.init_runtimes(grader_runtime_manager)
        if update_graded:
            self.update_graded_answers(
                grader_runtime_manager=grader_runtime_manager, regrade=regrade)
            if regrade:
                return
        graded_answers = self.gradedanswer_set.all()
        if not regrade:
            graded_answers = graded_answers.filter(score=None)
        graded_answers.select_related('question', 'answer__value')
        for g_a in graded_answers:
            # print "    regrading", g_a.answer
            q = g_a.question
            grader = grader_runtime_manager.get_grader(
                q.verification_function, q.verification_function_type)
            g_a.score = grader(g_a.answer.value, self.random_seed, q)
            g_a.save()
            # a.answer.score = a.score
            # a.answer.save()

    def update_graded_answers(self,
                              check_timestamp=False,
                              grader_runtime_manager=None,
                              regrade=False):
        answered_questions = set()
        graded_list = []
        if regrade:
            GradedAnswer.objects.filter(attempt=self).delete()
        # answers = self.answer_set.order_by("-timestamp", "-id")
        answers = self.answer_set.order_by("-id")
        if check_timestamp:
            answers = answers.filter(timestamp__lte=self.finish)
        n_questions = self.questionset.questions.count()
        n_found = 0
        for a in answers.all():
            # print "a:", a
            if a.randomized_question_id not in answered_questions:
                g_a = _create_graded(a, regrade, grader_runtime_manager)
                answered_questions.add(a.randomized_question_id)
                if g_a:
                    graded_list.append(g_a)
                n_found += 1
                if n_found >= n_questions:
                    break
        GradedAnswer.objects.bulk_create(graded_list)

    def latest_answers(self):
        # get only the latest answers
        return self.gradedanswer_set.all()

    def graded_answers_by_question_id(self):
        # return self.graded_answers.order_by('gradedanswer__question_id')
        answered_questions = OrderedDict()
        # print "loading."
        graded_answers_dict = dict(
            (i.question_id, i) for i in self.gradedanswer_set.all())
        for q_id in self.questionset.ordered_question_ids():
            answered_questions[q_id] = graded_answers_dict.get(q_id, None)
        return answered_questions

    def latest_answers_sum(self):
        return float(
            sum([
                a.score for a in self.gradedanswer_set.all()
                if a.score is not None
            ]))
Ejemplo n.º 11
0
def dashboard_compare_tests_list(tests_list):
    '''Return comparasion data for dashboard'''
    tests = []
    for t in tests_list:
        test_id = t['id']
        project_id = t['project_id']
        project = Project.objects.get(id=project_id)

        project_tests = Test.objects.filter(
            project=project, id__lte=test_id).order_by('-start_time')

        if project_tests.count() > 1:
            prev_test_id = project_tests[1].id
        else:
            prev_test_id = test_id
        test_data = TestActionAggregateData.objects.filter(test_id=test_id). \
            annotate(errors=RawSQL("((data->>%s)::numeric)", ('errors',))). \
            annotate(count=RawSQL("((data->>%s)::numeric)", ('count',))). \
            annotate(weight=RawSQL("((data->>%s)::numeric)", ('weight',))). \
            aggregate(count_sum=Sum(F('count'), output_field=FloatField()),
                      errors_sum=Sum(F('errors'), output_field=FloatField()),
                      overall_avg=Sum(F('weight')) / Sum(F('count')))

        prev_test_data = TestActionAggregateData.objects. \
            filter(test_id=prev_test_id). \
            annotate(errors=RawSQL("((data->>%s)::numeric)", ('errors',))). \
            annotate(count=RawSQL("((data->>%s)::numeric)", ('count',))). \
            annotate(weight=RawSQL("((data->>%s)::numeric)", ('weight',))). \
            aggregate(
                count_sum=Sum(F('count'), output_field=FloatField()),
                errors_sum=Sum(F('errors'), output_field=FloatField()),
                overall_avg=Sum(F('weight')) / Sum(F('count'))
                )
        try:
            errors_percentage = test_data['errors_sum'] * 100 / test_data[
                'count_sum']
        except (TypeError, ZeroDivisionError) as e:
            logger.error(e)
            errors_percentage = 0
        success_requests = 100 - errors_percentage
        # TODO: improve this part
        if success_requests >= 98:
            result = 'success'
        elif success_requests < 98 and success_requests >= 95:
            result = 'warning'
        else:
            result = 'danger'
        tests.append({
            'project_name':
            t['project__project_name'],
            'display_name':
            t['display_name'],
            'parameters':
            t['parameters'],
            'start_time':
            t['start_time'],
            'success_requests':
            success_requests,
            'test_avg_response_times':
            test_data['overall_avg'],
            'prev_test_avg_response_times':
            prev_test_data['overall_avg'],
            'result':
            result,
            'prefix':
            project.confluence_page
        })
    return tests
Ejemplo n.º 12
0
class GradedAnswer(Model):
    attempt = ForeignKey('Attempt', on_delete=CASCADE)
    question = ForeignKey('Question', on_delete=CASCADE)
    answer = ForeignKey('Answer', on_delete=CASCADE)
    score = FloatField(null=True)
Ejemplo n.º 13
0
class Question(Model):
    def __str__(self):
        return self.title

    country = CharField(max_length=5)
    slug = SlugField()
    identifier = CharField(max_length=64, unique=True)
    title = TextField()
    tags = TaggableManager(blank=True)
    version = CharField(max_length=255, default='0')
    verification_function_type = IntegerField(choices=GRADER_FUNCTION_TYPES,
                                              default=0)
    verification_function = TextField(default="", blank=True)
    license = TextField(default="Creative commons CC-By")
    language = CharField(max_length=7, choices=settings.LANGUAGES)
    authors = TextField(default="Various")
    min_score = FloatField(default=-1)
    none_score = FloatField(default=0)
    max_score = FloatField(default=1)

    def index(self):
        for u in ['index.html', 'index.htm']:
            try:
                return self.resource_set.get(relative_url=u)
            except Exception:
                # TODO: handle exception
                pass
        return None

    def solution(self):
        for u in ['solution.html']:
            try:
                return self.resource_set.get(relative_url=u)
            except Exception:
                # TODO: handle exception
                pass
        return None

    def index_str(self, embed_resources=True):
        raw_index = self.index().as_bytes()
        return raw_index

    def manifest(self, safe=True):
        manifest = dict()
        manifest['id'] = self.identifier
        manifest['language'] = self.language
        manifest['country'] = self.country
        manifest['title'] = self.title
        manifest['version'] = self.version
        manifest['authors'] = self.authors
        manifest['license'] = self.license
        manifest['task'] = []
        manifest['solution'] = []
        manifest['task_modules'] = []
        manifest['solution_modules'] = []
        manifest['grader_modules'] = []
        for i in self.resource_set.filter(part_of_solution=False):
            manifest['task'].append({
                "type": i.resource_type,
                'url': i.relative_url
            })
        if not safe:
            for i in self.resource_set.filter(part_of_solution=True):
                manifest['solution'].append({
                    "type": i.resource_type,
                    'url': i.relative_url
                })
            manifest['acceptedAnswers'] = [
                int(i) for i in self.accepted_answers.split(',')
            ]
        return manifest

    @classmethod
    def from_zip(cls,
                 f,
                 identifier='-1',
                 language=None,
                 regenerate_modules=True,
                 regenerate_manifest=True,
                 remove_correct_answer_class=True):
        z = zipfile.ZipFile(f)
        # TODO: kwargs!?
        kwargs['my_open'] = z.open
        kwargs['my_path'] = lambda *x: '/'.join(x)
        kwargs['my_close'] = lambda x: None
        retval = __question_from_dirlike(cls, *args, **kwargs)
        z.close()
        return retval

    @classmethod
    def from_dir(cls, dirname, *args, **kwargs):
        kwargs['my_open'] = open
        kwargs['my_path'] = lambda *x: os.path.join(dirname, *x)
        kwargs['my_close'] = lambda x: x.close()
        return _question_from_dirlike(cls, *args, **kwargs)
Ejemplo n.º 14
0
class ExchangeRate(Model):
    currency = CharField(max_length=10)
    rate = FloatField()
    date = DateField()
Ejemplo n.º 15
0
    def ventas_by_guide(self, guide):
        """ devuelve ventas de una guia"""

        consulta = self.filter(
            anulate=False,
            detail_asignation__asignation__detail_guide__anulate=False,
            detail_asignation__asignation__detail_guide__guide=guide,
        ).filter(
            Q(count_return__gt=0) | Q(count_payment__gt=0)
        ).values(
            'detail_asignation__asignation__detail_guide',
            'detail_asignation__asignation__detail_guide__guide__date_emission',
            'detail_asignation__asignation__detail_guide__precio_unitario',
            'detail_asignation__asignation__detail_guide__count'
        ).annotate(
            guia=Upper('detail_asignation__asignation__detail_guide__guide__number'),
            facture=Upper('detail_asignation__asignation__detail_guide__guide__facture'),
            proveedor=Upper('detail_asignation__asignation__detail_guide__guide__provider__name'),
            departamento=Upper('detail_asignation__asignation__detail_guide__guide__departamento__name'),
            devuelto=Sum('count_return'),
            magazine=Upper('detail_asignation__asignation__detail_guide__magazine_day__magazine__name'),
            dia=Upper('detail_asignation__asignation__detail_guide__magazine_day__day'),
            subtotal=Sum(
                (F('detail_asignation__asignation__detail_guide__count')*F('detail_asignation__asignation__detail_guide__precio_unitario'))-(F('count_return')*F('detail_asignation__asignation__detail_guide__precio_unitario')),output_field=FloatField()
            ),
        ).order_by('facture')

        monto_calculado = consulta.aggregate(
            monto=Sum('subtotal')
        )

        if monto_calculado['monto'] != None:
            total = monto_calculado['monto']
        else:
            total = 0

        return consulta, total
Ejemplo n.º 16
0
 def calcular_total(self):
     tot = self.itemdopedido_set.all().aggregate(
         tot_ped=Sum((F('quantidade') * F('produto__preco')) * (1- F('desconto')), output_field=FloatField())
     )['tot_ped'] or 0
     tot = float(tot) - float(self.impostos) - float(self.desconto)
     self.valor = tot
     Venda.objects.filter(id=self.id).update(valor=tot)
Ejemplo n.º 17
0
    def seguimiento_agentes(self):
        """ devuelve seguimientos de agentes """
        #recuperamos fechas
        date = datetime.now().date()
        print '**************+'
        print date
        #
        consulta = self.filter(
            anulate=False,
            detail_asignation__asignation__detail_guide__anulate=False,
            detail_asignation__asignation__detail_guide__guide__pagado=False,
        ).filter(
            Q(count_return__gt=0) | Q(count_payment__gt=0)
        ).values(
            'detail_asignation__vendor__cod'
        ).annotate(
            name=Upper('detail_asignation__vendor__name'),
            total=Sum(
                ((F('detail_asignation__count')-F('count_return')-F('count_payment'))*F('detail_asignation__asignation__detail_guide__precio_unitario')),output_field=FloatField()
            ),
        ).order_by('detail_asignation__vendor__cod')

        return consulta
Ejemplo n.º 18
0
 def total(self):
     return self.orderline_set.aggregate(
         total=Sum(F("price") * F("quantity"),
                   output_field=FloatField()))["total"] or FloatField(0)
Ejemplo n.º 19
0
class Azimuth(GeoFunc):
    output_field = FloatField()
    arity = 2
    geom_param_pos = (0, 1)
Ejemplo n.º 20
0
def ajax_productivity_per_task(request):
    """
    # Devuelve un objeto {'names': [dpto1, dpto2...], 'values': [tiempo1, tiempo2...]}

    # Parámetros obligatorios:
    # task_id - ID del task

    # Parámetros opcionales:
    # start_date - fecha en formato YYYY-MM-DD que indica el inicio de la medición. Por defecto, 30 días antes de la fecha actual.
    # end_date - fecha en formato YYYY-MM-DD que indica el final de la medición. Por defecto, fecha actual.
    # offset - desplazamiento (huso) horario en formato +/-HH:MM - Por defecto +00:00

    # Si se proporcionan pero no tienen el formato correcto se lanzará un error HTTP 400 Bad Request

    """
    # ------------------------- Cortesía de Agu ------------------------------

    actor=get_actor_or_403(request)

    if "task_id" not in request.GET:
        raise SuspiciousOperation

    task_id = request.GET["task_id"]
    task=get_object_or_404(Task, pk=task_id)
    actor=check_task(request,task,for_view=True)
    same_company_or_403(actor,task.actor_id)

    # Get and parse the dates and the offset
    start_date = request.GET.get("start_date", str(date.today() - timedelta(days=30)))
    end_date = request.GET.get("end_date", str(date.today()))
    date_regex = re.compile("^\d{4}-\d{2}-\d{2}$")

    if date_regex.match(start_date) is None or date_regex.match(end_date) is None:
        raise SuspiciousOperation("Start/end date are not valid")

    offset = request.GET.get("offset", "+00:00")
    offset_regex = re.compile("^(\+|-)\d{2}:\d{2}$")

    if offset_regex.match(offset) is None:
        raise SuspiciousOperation("Time offset is not valid")

    # Append time offsets
    start_date += " 00:00" + offset
    end_date += " 00:00" + offset

    # --------------------------------------------------------------------------
    dates = []
    str_dates = []

    d1 = datetime.strptime(start_date[0:19] + start_date[20:22], '%Y-%m-%d %H:%M%z')
    d2 = datetime.strptime(end_date[0:19] + end_date[20:22], '%Y-%m-%d %H:%M%z')
    delta = d2 - d1  # timedelta

    for i in range(delta.days + 1):
        str_dates.append((d1 + timedelta(days=i)).date().strftime("%Y-%m-%d"))
        dates.append(d1 + timedelta(days=i))

    data = {"days": str_dates, "production": [], "goal_evolution": []}
    index = 0
    # Save productivity for each  date
    # for each date, we will find the asociated timelog
    for log_date in dates:
        log = TimeLog.objects.filter(task_id=task_id, workDate__year=log_date.year, workDate__month=log_date.month,
                                     workDate__day=log_date.day).aggregate(
            total_duration=Sum(  F("duration")/60.0, output_field=FloatField()),
            total_produced_units=Sum(  F("produced_units"), output_field=FloatField()))
        if log is None:
            # Not work that day
            total_productivity = 0
            total_duration = 0
        else:
            total_produced_units = log["total_produced_units"]
            total_duration = log["total_duration"]
            if total_duration == 0 or total_duration is None:
                total_productivity = 0
            else:
                # If not produced but spent time, 0 productivity (you lazy guy...)
                if total_produced_units is None:
                    total_productivity = 0
                else:
                    total_productivity = total_produced_units/total_duration

        # Find the registry date of production goal evolution which is closest to the date
        expected_productivity = GoalEvolution.objects.filter(task_id_id=task_id,
                                                             registryDate__gte=log_date).first()

        # If we do not find the goal or if the date is after the last task update, it may be the current task goal
        if total_duration==0 or total_duration is None:
            expected_productivity=0
        else:
            if expected_productivity is None or task.registryDate <= log_date:
                expected_productivity = task.production_goal
            else:
                expected_productivity = expected_productivity.production_goal

        data["production"].append(default_round(total_productivity))
        data["goal_evolution"].append(default_round(expected_productivity))


    return JsonResponse(data)
Ejemplo n.º 21
0
class LineLocatePoint(GeoFunc):
    output_field = FloatField()
    arity = 2
    geom_param_pos = (0, 1)
Ejemplo n.º 22
0
def ajax_profit_per_date(request, task_id):
    """
    # url = task/ajax_profit_per_date/<task_id>
    # Devuelve un objeto con las fechas y las rentabilidades diarias y acumuladas
    #

    # Parámetro obligatorio:
    # task_id - ID de la tarea

    # Parámetros opcionales:
    # start_date - fecha en formato YYYY-MM-DD que indica el inicio de la medición. Por defecto, 30 días antes de la fecha actual.
    # end_date - fecha en formato YYYY-MM-DD que indica el final de la medición. Por defecto, fecha actual.
    # offset - desplazamiento (huso) horario en formato +/-HH:MM - Por defecto +00:00

    # Si se proporcionan pero no tienen el formato correcto se lanzará un error HTTP 400 Bad Request

    #devuelve lo siguiente
    #{"acumExpenses": [0, 1457.18015695298, 3071.32603956358, 4438.9463044226895, 6465.819587171869, 7912.658013249849, 9791.46399488711, 11615.32872003681, 13494.726436052111, 15102.72092592163, 16718.442225021892, 18327.93613617256, 20841.87940297534, 22953.949544558982, 24314.625169466122, 25683.231076691303, 27287.16055422502, 28760.84364198999, 31104.25163724206, 32808.89759982555, 34747.27999087272, 36150.9847742294, 37523.6098087571, 38600.05927001698, 40953.76583717958, 42469.88703139726, 44081.49130458021, 45420.3135021882, 47945.57927018715, 49368.262834629466, 51133.932803674485],
    "acumIncome": [0, 155861.848663544, 262457.90948135697, 396454.85575838294, 572637.4741922909, 703418.0032829699, 889130.2419483919, 1057821.248373874, 1259349.275922576, 1393310.956579081, 1539441.608896949, 1700420.3827038072, 1955067.034572835, 2187486.6539142523, 2300530.309442004, 2429378.038836404, 2615789.2939997134, 2742614.2371285204, 3004214.3219032744, 3205025.4834073624, 3363963.7766520614, 3552325.908039063, 3718850.184141958, 3833661.86021891, 4044009.6991582112, 4159278.365569177, 4285423.634163346, 4417334.086840815, 4692230.750316469, 4819759.243153938, 4997733.5628708275],
    "dates": ["2017-03-21", "2017-03-22", "2017-03-23", "2017-03-24", "2017-03-25", "2017-03-26", "2017-03-27", "2017-03-28", "2017-03-29", "2017-03-30", "2017-03-31", "2017-04-01", "2017-04-02", "2017-04-03", "2017-04-04", "2017-04-05", "2017-04-06", "2017-04-07", "2017-04-08", "2017-04-09", "2017-04-10", "2017-04-11", "2017-04-12", "2017-04-13", "2017-04-14", "2017-04-15", "2017-04-16", "2017-04-17", "2017-04-18", "2017-04-19", "2017-04-20"],
    "income": [0, 155861.848663544, 106596.060817813, 133996.946277026, 176182.618433908, 130780.529090679, 185712.238665422, 168691.006425482, 201528.027548702, 133961.680656505, 146130.652317868, 160978.773806858, 254646.651869028, 232419.619341417, 113043.655527752, 128847.7293944, 186411.255163309, 126824.943128807, 261600.084774754, 200811.161504088, 158938.293244699, 188362.131387002, 166524.276102895, 114811.676076952, 210347.838939301, 115268.666410966, 126145.268594169, 131910.452677469, 274896.663475654, 127528.492837469, 177974.319716889],
    "expenses": [0, 1457.18015695298, 1614.1458826106, 1367.62026485911, 2026.87328274918, 1446.83842607798, 1878.80598163726, 1823.8647251497, 1879.3977160153, 1607.99448986952, 1615.72129910026, 1609.49391115067, 2513.94326680278, 2112.07014158364, 1360.67562490714, 1368.60590722518, 1603.92947753372, 1473.68308776497, 2343.40799525207, 1704.64596258349, 1938.38239104717, 1403.70478335668, 1372.6250345277, 1076.44946125988, 2353.7065671626, 1516.12119421768, 1611.60427318295, 1338.82219760799, 2525.26576799895, 1422.68356444232, 1765.66996904502]}  "expected_productivity": [9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 4.0, 4.0, 2.0, 2.0, 2.0]}}
    """
    task=get_object_or_404(Task, pk=task_id)
    actor=check_task(request,task,for_view=True)
    same_company_or_403(actor,task.actor_id)

    # Get and parse the dates
    start_date = request.GET.get("start_date", str(date.today() - timedelta(days=30)))
    end_date = request.GET.get("end_date", str(date.today()))
    date_regex = re.compile("^\d{4}-\d{2}-\d{2}$")

    if date_regex.match(start_date) is None or date_regex.match(end_date) is None:
        raise SuspiciousOperation("Start/end date are not valid")

    offset = request.GET.get("offset", "+00:00")
    offset_regex = re.compile("^(\+|-)\d{2}:\d{2}$")

    if offset_regex.match(offset) is None:
        raise SuspiciousOperation("Time offset is not valid")

    # Append time offsets
    start_date += " 00:00" + offset
    end_date += " 00:00" + offset

    # Get all dates between start and end
    dates = []
    str_dates = []

    d1 = datetime.strptime(start_date[0:19] + start_date[20:22], '%Y-%m-%d %H:%M%z')
    d2 = datetime.strptime(end_date[0:19] + end_date[20:22], '%Y-%m-%d %H:%M%z')
    delta = d2 - d1  # timedelta

    for i in range(delta.days + 1):
        str_dates.append((d1 + timedelta(days=i)).date().strftime("%Y-%m-%d"))
        dates.append(d1 + timedelta(days=i))

    data = {'dates': str_dates, 'expenses': [], 'income': [], 'acumExpenses': [], 'acumIncome': []}

    # Profit
    # for each date, we will find all logs, calculate the sum and acumulate it
    index = 0
    for log_date in dates:
        logs = TimeLog.objects.filter(task_id__id=task_id,
                                      workDate__year=log_date.year, workDate__month=log_date.month,
                                      workDate__day=log_date.day).distinct()
        expenses = logs.aggregate(
            total_expenses=Sum(F("duration") / 60.0 * F("employee_id__price_per_hour"), output_field=FloatField()))["total_expenses"]
        expenses = expenses if expenses is not None else 0
        income = logs.aggregate(total_income=Sum(F("task_id__price_per_unit") * F("produced_units"))
                                )["total_income"]
        income = income if income is not None else 0

        data['expenses'].append(default_round(expenses))
        data['income'].append(default_round(income))
        if index == 0:
            data['acumExpenses'].append(default_round(expenses))
            data['acumIncome'].append(default_round(income))
        else:
            data['acumExpenses'].append(default_round(data['acumExpenses'][index - 1] + expenses))
            data['acumIncome'].append(default_round(data['acumIncome'][index - 1] + income))
        index += 1

    return JsonResponse(data)
Ejemplo n.º 23
0
 def deuda_acumulada(self):
     x = self.all().aggregate(
         total=Sum(F("deuda_ocumulada"), output_field=FloatField()))
     return x["total"]
Ejemplo n.º 24
0
def calculate_distinct_sorted_leaderboard_data(user, challenge_obj,
                                               challenge_phase_split,
                                               only_public_entries):
    """
    Function to calculate and return the sorted leaderboard data

    Arguments:
        user {[Class object]} -- User model object
        challenge_obj {[Class object]} -- Challenge model object
        challenge_phase_split {[Class object]} -- Challenge phase split model object
        only_public_entries {[Boolean]} -- Boolean value to determine if the user wants to include private entries or not

    Returns:
        [list] -- Ranked list of participant teams to be shown on leaderboard
        [status] -- HTTP status code (200/400)
    """
    # Get the leaderboard associated with the Challenge Phase Split
    leaderboard = challenge_phase_split.leaderboard

    # Get the default order by key to rank the entries on the leaderboard
    try:
        default_order_by = leaderboard.schema["default_order_by"]
    except KeyError:
        response_data = {
            "error":
            "Sorry, default_order_by key is missing in leaderboard schema!"
        }
        return response_data, status.HTTP_400_BAD_REQUEST

    # Exclude the submissions done by members of the host team
    # while populating leaderboard
    challenge_hosts_emails = (
        challenge_obj.creator.get_all_challenge_host_email())
    is_challenge_phase_public = challenge_phase_split.challenge_phase.is_public
    # Exclude the submissions from challenge host team to be displayed on the leaderboard of public phases
    challenge_hosts_emails = ([] if not is_challenge_phase_public else
                              challenge_hosts_emails)

    challenge_host_user = is_user_a_host_of_challenge(user, challenge_obj.pk)

    all_banned_email_ids = challenge_obj.banned_email_ids

    # Check if challenge phase leaderboard is public for participant user or not
    if (challenge_phase_split.visibility != ChallengePhaseSplit.PUBLIC
            and not challenge_host_user):
        response_data = {"error": "Sorry, the leaderboard is not public!"}
        return response_data, status.HTTP_400_BAD_REQUEST

    leaderboard_data = LeaderboardData.objects.exclude(
        Q(submission__created_by__email__in=challenge_hosts_emails)
        & Q(submission__is_baseline=False))

    # Get all the successful submissions related to the challenge phase split
    all_valid_submission_status = [Submission.FINISHED]

    # Handle the case for challenges with partial submission evaluation feature
    if (challenge_phase_split.challenge_phase.
            is_partial_submission_evaluation_enabled):
        all_valid_submission_status.append(Submission.PARTIALLY_EVALUATED)

    leaderboard_data = leaderboard_data.filter(
        challenge_phase_split=challenge_phase_split,
        submission__is_flagged=False,
        submission__status__in=all_valid_submission_status,
    ).order_by("-created_at")

    leaderboard_data = leaderboard_data.annotate(
        filtering_score=RawSQL("result->>%s", (default_order_by, ),
                               output_field=FloatField()),
        filtering_error=RawSQL(
            "error->>%s",
            ("error_{0}".format(default_order_by), ),
            output_field=FloatField(),
        ),
    ).values(
        "id",
        "submission__participant_team",
        "submission__participant_team__team_name",
        "submission__participant_team__team_url",
        "submission__is_baseline",
        "submission__is_public",
        "challenge_phase_split",
        "result",
        "error",
        "filtering_score",
        "filtering_error",
        "leaderboard__schema",
        "submission__submitted_at",
        "submission__method_name",
        "submission__id",
        "submission__submission_metadata",
    )
    if only_public_entries:
        if challenge_phase_split.visibility == ChallengePhaseSplit.PUBLIC:
            leaderboard_data = leaderboard_data.filter(
                submission__is_public=True)

    all_banned_participant_team = []
    for leaderboard_item in leaderboard_data:
        participant_team_id = leaderboard_item["submission__participant_team"]
        participant_team = ParticipantTeam.objects.get(id=participant_team_id)
        all_participants_email_ids = (
            participant_team.get_all_participants_email())
        for participant_email in all_participants_email_ids:
            if participant_email in all_banned_email_ids:
                all_banned_participant_team.append(participant_team_id)
                break
        if leaderboard_item["error"] is None:
            leaderboard_item.update(filtering_error=0)
        if leaderboard_item["filtering_score"] is None:
            leaderboard_item.update(filtering_score=0)

    if challenge_phase_split.show_leaderboard_by_latest_submission:
        sorted_leaderboard_data = leaderboard_data
    else:
        sorted_leaderboard_data = sorted(
            leaderboard_data,
            key=lambda k: (
                float(k["filtering_score"]),
                float(-k["filtering_error"]),
            ),
            reverse=True if
            challenge_phase_split.is_leaderboard_order_descending else False,
        )

    distinct_sorted_leaderboard_data = []
    team_list = []
    for data in sorted_leaderboard_data:
        if (data["submission__participant_team__team_name"] in team_list
                or data["submission__participant_team"]
                in all_banned_participant_team):
            continue
        elif data["submission__is_baseline"] is True:
            distinct_sorted_leaderboard_data.append(data)
        else:
            distinct_sorted_leaderboard_data.append(data)
            team_list.append(data["submission__participant_team__team_name"])

    leaderboard_labels = challenge_phase_split.leaderboard.schema["labels"]
    for item in distinct_sorted_leaderboard_data:
        item_result = []
        for index in leaderboard_labels:
            # Handle case for partially evaluated submissions
            if index in item["result"].keys():
                item_result.append(item["result"][index])
            else:
                item_result.append("#")
        item["result"] = item_result

        if item["error"] is not None:
            item["error"] = [
                item["error"]["error_{0}".format(index)]
                for index in leaderboard_labels
            ]

    return distinct_sorted_leaderboard_data, status.HTTP_200_OK
Ejemplo n.º 25
0
class ChimeSite(ShortID, Model):
    '''
    chime site model
    '''

    id = UUIDField(primary_key=True, default=uuid4, editable=False)
    created = DateTimeField(auto_now_add=True)
    updated = DateTimeField(null=True, default=None)

    user = ForeignKey(User, on_delete=CASCADE)
    name = CharField(max_length=255)

    population = BigIntegerField(
        default=3600000,
        validators=[MinValueValidator(1)],
        help_text='Regional population',
    )
    current_hospitalized = BigIntegerField(
        default=69,
        validators=[MinValueValidator(0)],
        help_text='Currently hospitalized COVID-19 patients',
    )
    date_first_hospitalized = DateField(
        null=True,
        blank=True,
        help_text='Date of first hospitalized COVID-19 case, if known',
    )
    doubling_time = FloatField(
        default=4.0,
        validators=[MinValueValidator(0.0)],
        help_text='Doubling time in days (up to today)',
    )
    hospitalized_days = IntegerField(
        default=7,
        validators=[MinValueValidator(0)],
        help_text='Average hospital length of stay (in days)',
    )
    hospitalized_rate = FloatField(
        default=0.025,
        validators=[MinValueValidator(0.00001), MaxValueValidator(1.0)],
        help_text='Hospitalization %(total infections)',
    )
    icu_days = IntegerField(
        default=9,
        validators=[MinValueValidator(0)],
        help_text='Average days in ICU',
    )
    icu_rate = FloatField(
        default=0.0075,
        validators=[MinValueValidator(0.0), MaxValueValidator(1.0)],
        help_text='ICU %(total infections)',
    )
    infectious_days = IntegerField(
        default=14,
        validators=[MinValueValidator(0)],
        help_text='Infectious days',
    )
    market_share = FloatField(
        default=0.15,
        validators=[MinValueValidator(0.00001), MaxValueValidator(1.0)],
        help_text='Hospital market share %',
    )
    n_days = IntegerField(
        default=100,
        validators=[MinValueValidator(0)],
        help_text='Number of days to project',
    )
    mitigation_date = DateField(
        blank=True,
        default=timezone.localdate,
        help_text='Date social distancing measures went into effect',
    )
    relative_contact_rate = FloatField(
        default=0.30,
        validators=[MinValueValidator(0.0), MaxValueValidator(1.0)],
        help_text='Social distancing (% reduction in social contact going forward)',
    )
    ventilated_days = IntegerField(
        default=10,
        validators=[MinValueValidator(0)],
        help_text='Average days on ventilator',
    )
    ventilated_rate = FloatField(
        default=0.005,
        validators=[MinValueValidator(0.0), MaxValueValidator(1.0)],
        help_text='Ventilated %(total infections)',
    )

    def __str__(self):
        return f'{self.name} {self.short_id} {self.created:%Y-%m-%d}'

    @property
    def parameters(self):
        '''
        return penn_chime Parameters object populated with instance values
        '''

        doubling_time = self.doubling_time

        if self.date_first_hospitalized:
            doubling_time = None

        mitigation_date = self.mitigation_date
        relative_contact_rate = max(self.relative_contact_rate, EPSILON)

        if relative_contact_rate == EPSILON:
            mitigation_date = None

        return Parameters(
            current_date=timezone.localdate(),
            population=self.population,
            current_hospitalized=self.current_hospitalized,
            date_first_hospitalized=self.date_first_hospitalized,
            doubling_time=doubling_time,
            hospitalized=Disposition(
                days=self.hospitalized_days,
                rate=self.hospitalized_rate,
            ),
            icu=Disposition(
                days=self.icu_days,
                rate=self.icu_rate,
            ),
            infectious_days=self.infectious_days,
            market_share=self.market_share,
            n_days=self.n_days,
            mitigation_date=mitigation_date,
            relative_contact_rate=relative_contact_rate,
            ventilated=Disposition(
                days=self.ventilated_days,
                rate=self.ventilated_rate,
            ),
            recovered=0,  # not implemented
        )
Ejemplo n.º 26
0
class SQLQuery(models.Model):
    query = TextField()
    start_time = DateTimeField(null=True, blank=True, default=timezone.now)
    end_time = DateTimeField(null=True, blank=True)
    time_taken = FloatField(blank=True, null=True)
    request = ForeignKey(
        Request,
        related_name='queries',
        null=True,
        blank=True,
        db_index=True,
        on_delete=models.CASCADE,
    )
    traceback = TextField()
    objects = SQLQueryManager()

    # TODO docstring
    @property
    def traceback_ln_only(self):
        return '\n'.join(self.traceback.split('\n')[::2])

    @property
    def formatted_query(self):
        return sqlparse.format(self.query, reindent=True, keyword_case='upper')

    # TODO: Surely a better way to handle this? May return false positives
    @property
    def num_joins(self):
        return self.query.lower().count('join ')

    @property
    def tables_involved(self):
        """
        A really another rudimentary way to work out tables involved in a
        query.
        TODO: Can probably parse the SQL using sqlparse etc and pull out table
        info that way?
        """
        components = [x.strip() for x in self.query.split()]
        tables = []

        for idx, component in enumerate(components):
            # TODO: If django uses aliases on column names they will be falsely
            # identified as tables...
            if component.lower() == 'from' or component.lower(
            ) == 'join' or component.lower() == 'as':
                try:
                    _next = components[idx + 1]
                    if not _next.startswith('('):  # Subquery
                        stripped = _next.strip().strip(',')

                        if stripped:
                            tables.append(stripped)
                except IndexError:  # Reach the end
                    pass
        return tables

    @atomic()
    def save(self, *args, **kwargs):

        if self.end_time and self.start_time:
            interval = self.end_time - self.start_time
            self.time_taken = interval.total_seconds() * 1000

        if not self.pk:
            if self.request:
                self.request.num_sql_queries += 1
                self.request.save(update_fields=['num_sql_queries'])

        super(SQLQuery, self).save(*args, **kwargs)

    @atomic()
    def delete(self, *args, **kwargs):
        self.request.num_sql_queries -= 1
        self.request.save()
        super(SQLQuery, self).delete(*args, **kwargs)
Ejemplo n.º 27
0
def find_car(request):
    searchResult = Car.objects.all()
    if request.method == 'POST':
        firstDay = request.POST.get('firstDay')
        lastDay = request.POST.get('lastDay')
        CC = request.POST.get('CC')
        year = request.POST.get('Year')
        fuelType = request.POST.get('fuelType')
        pricePerDay = request.POST.get('pricePerDay')
        Transmission = request.POST.get('Transmission')

        searchResult = Car.objects.filter(firstAvailableDay__lte=firstDay)
        searchResult = searchResult.filter(lastAvailableDay__gte=lastDay)

        if CC == '500-1000':
            print("500-1000")
            searchResult = searchResult.filter(CC__gte=500).filter(
                CC__lte=1000)
        elif CC == '1000-1500':
            print("1000-1500")
            searchResult = searchResult.filter(CC__gte=1000).filter(
                CC__lte=1500)
        elif CC == '1500-2000':
            print("1500-2000")
            searchResult = searchResult.filter(CC__gte=1500).filter(
                CC__lte=2000)
        elif CC == '2000+':
            print("2000+")
            searchResult = searchResult.filter(CC__gt=2000)

        if year == '1990-1999':
            print("1990-1999")
            searchResult = searchResult.filter(year__gte=1990).filter(
                year__lte=1999)
        elif year == '2000-2005':
            print("2000-2005")
            searchResult = searchResult.filter(year__gte=2000).filter(
                year__lte=2005)
        elif year == '2006-2010':
            print("2006-2010")
            searchResult = searchResult.filter(year__gte=2006).filter(
                year__lte=2010)
        elif year == '2011-2015':
            print("2011-2015")
            searchResult = searchResult.filter(year__gte=2011).filter(
                year__lte=2015)
        elif year == '2016+':
            print("2016+")
            searchResult = searchResult.filter(year__gte=2016)

        if fuelType == 'Gasoline':
            print("Gasoline")
            searchResult = searchResult.filter(fuelType='GASOLINE')
        elif fuelType == 'Diesel':
            print("Diesel")
            searchResult = searchResult.filter(fuelType='DIESEL')
        elif fuelType == 'Gas':
            print("Gas")
            searchResult = searchResult.filter(fuelType='GAS')
        elif fuelType == 'Electric':
            print("Electric")
            searchResult = searchResult.filter(fuelType='ELECTRIC')

        if pricePerDay == '10-15':
            print("10-15")
            searchResult = searchResult.filter(pricePerDay__gte=10).filter(
                pricePerDay__lte=15)
        elif pricePerDay == '15-20':
            print("15-20")
            searchResult = searchResult.filter(pricePerDay__gte=15).filter(
                pricePerDay__lte=20)
        elif pricePerDay == '20-25':
            print("20-25")
            searchResult = searchResult.filter(pricePerDay__gte=20).filter(
                pricePerDay__lte=25)
        elif pricePerDay == '25+':
            print("25+")
            searchResult = searchResult.filter(pricePerDay__gte=25)

        if Transmission == 'Auto':
            print("Auto")
            searchResult = searchResult.filter(transmission='AUTO')
        elif Transmission == 'Manual':
            print("Manual")
            searchResult = searchResult.filter(transmission='MANUAL')
    currentLessee = LesseeProfile.objects.filter(user=request.user).first()
    currentAverageCc = currentLessee.average_cc
    print(currentAverageCc)
    searchResult = searchResult.annotate(averagegap=Func(ExpressionWrapper(
        F('CC') - currentAverageCc, output_field=FloatField()),
                                                         function='ABS'))

    searchResult = searchResult.order_by('averagegap', 'pricePerDay', '-year')

    type = userType(request)

    if (firstDay > lastDay):
        searchResult = Car.objects.none()
    if (not searchResult):
        message = "Your search returned no results"
    else:
        message = ""
    print(message)
    print(searchResult)

    context = {
        'username': request.user.username,
        'type': type,
        'title': "Find Car",
        'searchResult': searchResult,
        'firstDay': firstDay,
        'lastDay': lastDay,
        'CC': CC,
        'year': year,
        'fuelType': fuelType,
        'pricePerDay': pricePerDay,
        'Transmission': Transmission,
        'Message': message
    }
    return render(request, 'myCarApp/find_car.html', context)
Ejemplo n.º 28
0
class Request(models.Model):
    id = CharField(max_length=36, default=uuid4, primary_key=True)
    path = CharField(max_length=190, db_index=True)
    query_params = TextField(blank=True, default='')
    raw_body = TextField(blank=True, default='')
    body = TextField(blank=True, default='')
    method = CharField(max_length=10)
    start_time = DateTimeField(default=timezone.now, db_index=True)
    view_name = CharField(max_length=190,
                          db_index=True,
                          blank=True,
                          default='',
                          null=True)
    end_time = DateTimeField(null=True, blank=True)
    time_taken = FloatField(blank=True, null=True)
    encoded_headers = TextField(blank=True, default='')  # stores json
    meta_time = FloatField(null=True, blank=True)
    meta_num_queries = IntegerField(null=True, blank=True)
    meta_time_spent_queries = FloatField(null=True, blank=True)
    pyprofile = TextField(blank=True, default='')
    prof_file = FileField(max_length=300, null=True, storage=silk_storage)

    @property
    def total_meta_time(self):
        return (self.meta_time or 0) + (self.meta_time_spent_queries or 0)

    @property
    def profile_table(self):
        for n, columns in enumerate(parse_profile(self.pyprofile)):
            location = columns[-1]
            if n and '{' not in location and '<' not in location:
                r = re.compile('(?P<src>.*\.py)\:(?P<num>[0-9]+).*')
                m = r.search(location)
                group = m.groupdict()
                src = group['src']
                num = group['num']
                name = 'c%d' % n
                fmt = '<a name={name} href="?pos={n}&file_path={src}&line_num={num}#{name}">{location}</a>'
                rep = fmt.format(**dict(group, **locals()))
                yield columns[:-1] + [mark_safe(rep)]
            else:
                yield columns

    # defined in atomic transaction within SQLQuery save()/delete() as well
    # as in bulk_create of SQLQueryManager
    # TODO: This is probably a bad way to do this, .count() will prob do?
    num_sql_queries = IntegerField(default=0)  # TODO replace with count()

    @property
    def time_spent_on_sql_queries(self):
        """
        TODO: Perhaps there is a nicer way to do this with Django aggregates?
        My initial thought was to perform:
        SQLQuery.objects.filter.aggregate(Sum(F('end_time')) - Sum(F('start_time')))
        However this feature isnt available yet, however there has been talk
        for use of F objects within aggregates for four years
        here: https://code.djangoproject.com/ticket/14030. It looks
        like this will go in soon at which point this should be changed.
        """
        return sum(x.time_taken for x in SQLQuery.objects.filter(request=self))

    @property
    def headers(self):
        if self.encoded_headers:
            raw = json.loads(self.encoded_headers)
        else:
            raw = {}

        return CaseInsensitiveDictionary(raw)

    @property
    def content_type(self):
        return self.headers.get('content-type', None)

    @classmethod
    def garbage_collect(cls, force=False):
        """ Remove Request/Responses when we are at the SILKY_MAX_RECORDED_REQUESTS limit
        Note that multiple in-flight requests may call this at once causing a
        double collection """
        check_percent = SilkyConfig().SILKY_MAX_RECORDED_REQUESTS_CHECK_PERCENT
        check_percent /= 100.0
        if check_percent < random.random() and not force:
            return
        target_count = SilkyConfig().SILKY_MAX_RECORDED_REQUESTS
        # Since garbage collection is probabilistic, the target count should
        # be lowered to account for requests before the next garbage collection
        if check_percent != 0:
            target_count -= int(1 / check_percent)
        prune_count = max(cls.objects.count() - target_count, 0)
        prune_rows = cls.objects.order_by('start_time') \
            .values_list('id', flat=True)[:prune_count]
        cls.objects.filter(id__in=list(prune_rows)).delete()

    def save(self, *args, **kwargs):
        # sometimes django requests return the body as 'None'
        if self.raw_body is None:
            self.raw_body = ''

        if self.body is None:
            self.body = ''

        if self.end_time and self.start_time:
            interval = self.end_time - self.start_time
            self.time_taken = interval.total_seconds() * 1000

        super(Request, self).save(*args, **kwargs)
        Request.garbage_collect(force=False)
Ejemplo n.º 29
0
	def get_initial_queryset(self):
		id_cli = self.request.GET.get('id_cliente', None)
		#print ("id_cli: {0}".format(id_cli))
		descuentos_prestam = Descuento.objects.filter(prestamo=OuterRef('pk')).values('prestamo').annotate(descuento_prestamo=Coalesce(Sum('valor_descuento'),V(0))).values('descuento_prestamo')

		return Prestamo.objects.values('id','capital_prestado','fecha_prestamo','porcentaje_aplicado__porcentaje').annotate(abonos_capital=Coalesce(Sum('abono__valor_abono_capital'),V(0)),abonos_interes=Coalesce(Sum('abono__valor_abono_interes'),V(0)),saldo_capital=ExpressionWrapper(F('capital_prestado')- Coalesce(Sum('abono__valor_abono_capital'),V(0)) - Coalesce( Subquery(descuentos_prestam.values('descuento_prestamo')) , 0), output_field=FloatField()),saldo_interes=F('saldo_capital') * F('porcentaje_aplicado__porcentaje')/100.0 ).filter(cliente__id=id_cli,estado=1).order_by('-fecha_prestamo')
Ejemplo n.º 30
0
Archivo: models.py Proyecto: inoa/silk
class SQLQuery(models.Model):
    query = TextField()
    start_time = DateTimeField(null=True, blank=True, default=timezone.now)
    end_time = DateTimeField(null=True, blank=True)
    time_taken = FloatField(blank=True, null=True)
    request = ForeignKey('Request', related_name='queries', null=True, blank=True, db_index=True)
    traceback = TextField()
    objects = SQLQueryManager()

    # TODO: Document SILKY_PROJECT_ROOT_DIR and SILKY_PROJECT_EXCLUDE_DIRS
    project_dir = getattr(settings, 'SILKY_PROJECT_ROOT_DIR', None) or ''
    exclude_dirs = getattr(settings, 'SILKY_PROJECT_EXCLUDE_DIRS', None) or []
    if project_dir and project_dir[-1] != os.sep:
        project_dir += os.sep
    traceback_pattern = re.compile(r'File "%s(?P<file>.*)", line (?P<line>\d+), in (?P<method>.*)'
                                   % re.escape(project_dir))
    exclude_patterns = [
        re.compile(r'File ".*silk%ssql.*", line \d+, in .*' % os.sep),
        re.compile(r'File ".*django%sdb%smodels.*", line \d+, in .*' % (os.sep, os.sep)),
    ]
    for exclude_dir in exclude_dirs:
        if exclude_dir[-1] != os.sep:
            exclude_dir += os.sep
        exclude_patterns.append(re.compile(r'File "%s.*", line \d+, in .*'
                                           % re.escape(exclude_dir)))

    def match_tb(self, tb):
        tb = tb.strip()
        match = self.traceback_pattern.match(tb)
        for exclude_pattern in self.exclude_patterns:
            if not match:
                break
            if exclude_pattern.match(tb):
                match = False
        return match

    @property
    def traceback_ln_only(self):
        return '\n'.join(self.traceback.split('\n')[::2])

    @property
    def traceback_ln_only_with_highlights(self):
        return [(tb, bool(self.match_tb(tb))) for tb in self.traceback.split('\n')[::2]]

    @property
    def formatted_query(self):
        return sqlparse.format(self.query, reindent=True, keyword_case='upper')

    # TODO: Surely a better way to handle this? May return false positives
    @property
    def num_joins(self):
        return self.query.lower().count('join ')

    @property
    def tables_involved(self):
        """A really rather rudimentary way to work out tables involved in a query.
        TODO: Can probably parse the SQL using sqlparse etc and pull out table info that way?"""
        components = [x.strip() for x in self.query.split()]
        tables = []
        for idx, c in enumerate(components):
            # TODO: If django uses aliases on column names they will be falsely identified as tables...
            if c.lower() in ['from', 'join', 'as', 'into', 'update']:
                try:
                    nxt = components[idx + 1]
                    if nxt.startswith('('):  # Subquery
                        continue
                    if nxt.startswith('@'):  # Temporary table
                        continue
                    stripped = nxt.strip().strip(',')
                    if stripped:
                        tables.append(stripped)
                except IndexError:  # Reach the end
                    pass
        return tables

    @property
    def last_project_method(self):
        for tb in self.traceback.split('\n')[::2]:
            match = self.match_tb(tb)
            if match:
                return "%s in %s:%s" % (match.group('method'), match.group('file'), match.group('line'))
        return ""

    def calculate_time_taken(self):
        if self.end_time and self.start_time:
            interval = self.end_time - self.start_time
            self.time_taken = interval.total_seconds() * 1000

    @transaction.atomic()
    def save(self, *args, **kwargs):
        self.calculate_time_taken()
        super(SQLQuery, self).save(*args, **kwargs)

    @transaction.atomic()
    def delete(self, *args, **kwargs):
        self.request.num_sql_queries -= 1
        self.request.save()
        super(SQLQuery, self).delete(*args, **kwargs)