Beispiel #1
0
    def get_time_series(self, dynamic_criteria, all_criteria, request,
                        time_since, time_until, interval):
        """ Get the stats time series """
        model_name = apps.get_model(self.model_app_name, self.model_name)
        kwargs = {}
        dynamic_kwargs = []
        if request and not request.user.is_superuser and self.user_field_name:
            kwargs[self.user_field_name] = request.user
        for m2m in all_criteria:
            criteria = m2m.criteria
            # fixed mapping value passed info kwargs
            if criteria.criteria_fix_mapping:
                for key in criteria.criteria_fix_mapping:
                    # value => criteria.criteria_fix_mapping[key]
                    kwargs[key] = criteria.criteria_fix_mapping[key]

            # dynamic mapping value passed info kwargs
            dynamic_key = "select_box_dynamic_%i" % m2m.id
            if dynamic_key in dynamic_criteria:
                if dynamic_criteria[dynamic_key] != '':
                    dynamic_values = dynamic_criteria[dynamic_key]
                    dynamic_field_name = m2m.get_dynamic_criteria_field_name()
                    criteria_key = 'id' if dynamic_field_name == '' else dynamic_field_name
                    if isinstance(dynamic_values, (list, tuple)):
                        single_value = False
                    else:
                        dynamic_values = (dynamic_values, )
                        single_value = True

                    for dynamic_value in dynamic_values:
                        criteria_value = m2m.get_dynamic_choices(
                            time_since, time_until)[dynamic_value]
                        if isinstance(criteria_value, (list, tuple)):
                            criteria_value = criteria_value[0]
                        else:
                            criteria_value = dynamic_value
                        if single_value:
                            kwargs[criteria_key] = criteria_value
                        else:
                            dynamic_kwargs.append(
                                Q(**{criteria_key: criteria_value}))

        aggregate_dict = {}
        i = 0
        if not dynamic_kwargs:
            dynamic_kwargs = [None]

        for dkwargs in dynamic_kwargs:
            i += 1
            if not self.type_operation_field_name:
                self.type_operation_field_name = 'Count'
            if not self.operation_field_name:
                self.operation_field_name = 'id'

            operation = {
                'AvgCountPerInstance':
                lambda field_name, distinct, dkwargs: ExpressionWrapper(
                    1.0 * Count(field_name, distinct=distinct, filter=dkwargs)
                    / Count('id',
                            distinct=True,
                            filter=Q(**{field_name + "__isnull": False})),
                    output_field=models.FloatField()),
                'Count':
                lambda field_name, distinct, dkwargs: Count(
                    field_name, distinct=distinct, filter=dkwargs),
                'Sum':
                lambda field_name, distinct, dkwargs: Sum(
                    field_name, distinct=distinct, filter=dkwargs),
                'Avg':
                lambda field_name, distinct, dkwargs: Avg(
                    field_name, distinct=distinct, filter=dkwargs),
                'StdDev':
                lambda field_name, distinct, dkwargs: StdDev(field_name,
                                                             filter=dkwargs),
                'Max':
                lambda field_name, distinct, dkwargs: Max(field_name,
                                                          filter=dkwargs),
                'Min':
                lambda field_name, distinct, dkwargs: Min(field_name,
                                                          filter=dkwargs),
                'Variance':
                lambda field_name, distinct, dkwargs: Variance(field_name,
                                                               filter=dkwargs),
            }
            aggregate_dict['agg_%i' %
                           i] = operation[self.type_operation_field_name](
                               self.operation_field_name, self.distinct,
                               dkwargs)

        # TODO: maybe backport values_list support back to django-qsstats-magic and use it again for the query
        time_range = {
            '%s__range' % self.date_field_name: (time_since, time_until)
        }
        qs = model_name.objects
        qs = qs.filter(**time_range)
        qs = qs.filter(**kwargs)
        qs = qs.annotate(d=Trunc(self.date_field_name, interval))
        qs = qs.values_list('d')
        qs = qs.order_by('d')
        qs = qs.annotate(**aggregate_dict)
        return qs
Beispiel #2
0
def get_awcs_covered_sector_data(domain,
                                 config,
                                 loc_level,
                                 location_id,
                                 show_test=False):
    group_by = ['%s_name' % loc_level]

    config['month'] = datetime(*config['month'])

    level = config['aggregation_level']
    data = AggAwcMonthly.objects.filter(**config).values(*group_by).annotate(
        states=Sum('num_launched_states')
        if level <= 1 else Max('num_launched_states'),
        districts=Sum('num_launched_districts')
        if level <= 2 else Max('num_launched_districts'),
        blocks=Sum('num_launched_blocks')
        if level <= 3 else Max('num_launched_blocks'),
        supervisors=Sum('num_launched_supervisors')
        if level <= 4 else Max('num_launched_supervisors'),
        awcs=Sum('num_launched_awcs')
        if level <= 5 else Max('num_launched_awcs'),
    ).order_by('%s_name' % loc_level)

    if not show_test:
        data = apply_exclude(domain, data)

    chart_data = {
        'blue': [],
    }

    tooltips_data = defaultdict(lambda: {
        'districts': 0,
        'blocks': 0,
        'states': 0,
        'supervisors': 0,
        'awcs': 0
    })

    for row in data:
        name = row['%s_name' % loc_level]
        awcs = row['awcs'] or 0
        supervisors = row['supervisors'] or 0
        blocks = row['blocks'] or 0
        districts = row['districts'] or 0
        states = row['states'] or 0

        row_values = {
            'awcs': awcs,
            'supervisors': supervisors,
            'blocks': blocks,
            'districts': districts,
            'states': states,
        }
        for prop, value in row_values.items():
            tooltips_data[name][prop] += (value or 0)

    for name, value_dict in tooltips_data.items():
        chart_data['blue'].append([name, value_dict['awcs']])

    chart_data['blue'] = sorted(chart_data['blue'])

    if level == 1:
        prop = 'states'
    elif level == 2:
        prop = 'districts'
    elif level == 3:
        prop = 'blocks'
    elif level == 4:
        prop = 'supervisors'
    else:
        prop = 'awcs'

    total_awcs = sum([(x['awcs'] or 0) for x in tooltips_data.values()])
    total = sum([(x[prop] or 0) for x in tooltips_data.values()])

    info = _("{:s}<br /><br />"
             "Number of AWCs launched: {:d}".format(awcs_launched_help_text(),
                                                    total_awcs))
    if level != 5:
        info = _("{:s}<br /><br />"
                 "Number of AWCs launched: {:d} <br />"
                 "Number of {:s} launched: {:d}".format(
                     awcs_launched_help_text(), total_awcs, prop.title(),
                     total))

    return {
        "tooltips_data":
        dict(tooltips_data),
        "format":
        "number",
        "info":
        info,
        "chart_data": [{
            "values": chart_data['blue'],
            "key": "",
            "strokeWidth": 2,
            "classed": "dashed",
            "color": MapColors.BLUE
        }]
    }
Beispiel #3
0
def leaderboard(request):

	teams = Submission.objects.exclude(auc_public__isnull=True).values('user').annotate(auc=Max('auc_public'), last_update=Max('created_at'), number=Count('submissionfile')).order_by('-auc')
	for team in teams:
		team['user'] = User.objects.get(pk=team['user'])
		team['user'].name

	return render_to_response(
		'submissions/leaderboard.html',
		{'teams': teams},
		context_instance=RequestContext(request)
	)
Beispiel #4
0
 def get_next_invoice_id(self, owner):
     return (Invoice.objects.filter(owner=owner).aggregate(invoice_id=Max('invoice_id'))['invoice_id'] or 0) + 1
Beispiel #5
0
def dt_get_registrations_consultant_list(request):
    academic_period_id = request.POST.get('academic_period_id', None)
    status_id = request.POST.get('status_id', None)
    results = []
    role = get_role(request.user)

    columns = {
        'academic_period': 'academic_period',
        'status': 'status',
        'student_name': 'student_full_name',
        'consultant': 'consultant__common_name',
        'created': 'created',
        'registration_date': 'registration_date',
        'finance_date': 'finance_date',
        'customer_date': 'customer_date',
        'follow_up': 'costumer',
    }

    data_table = DataTable(request.POST, columns)

    market = get_user_market_or_main_market(request.user)

    query_set = Registration.objects.select_related(
        'costumer', 'consultant',
        'academic_period').active().annotate(student_full_name=Concat(
            'costumer__first_name', Value(' '),
            'costumer__last_name'), ).exclude(
                academic_period__start_date__year='2015')

    if role == 'corporate' or role == 'finance':
        pass
    elif role in ['coordinator', 'info']:
        query_set = query_set.by_consultantprofile_market(market)
    elif role in ['admin']:
        if 'market_id' in request.session:
            market_id = request.session['market_id']
            market = Market.objects.filter(pk=market_id).get()
            query_set = query_set.by_consultantprofile_market(market)
    else:
        query_set = query_set.filter(consultant=request.user)

    registration_list = []
    notes_dict = {}
    if role == "info" or role == "coordinator":
        # for info and coordinator roles we added last notes in each registration that can cause a lot of extra queries
        notes_values = Note.objects.values('registration_id').annotate(
            max_id=Max('id'))
        recent_notes_ids = [k['max_id'] for k in notes_values]

        notes = Note.objects.filter(id__in=recent_notes_ids)

        for note in notes:
            notes_dict[note.registration_id] = note

    default_period = AcademicPeriod.objects.next_to_current_period()

    total = query_set.count()

    if data_table.search_value != "":
        try:
            query_set = query_set.filter(
                Q(student_full_name__icontains=data_table.search_value)
                | Q(consultant__common_name__icontains=data_table.search_value)
            )
        except ValueError:
            query_set = query_set.filter(
                Q(costumer__icontains=data_table.search_value))

    if academic_period_id:
        query_set = query_set.filter(academic_period_id=academic_period_id)

    if status_id:
        query_set = query_set.filter(status=status_id)
    else:
        query_set = query_set.filter(status__in=['1', '2', '3', '4'])

    records_filtered = query_set.count()

    query_set = query_set.order_by(*data_table.get_orderings())

    registrations = query_set[data_table.start:data_table.limit]

    extra_note = ''

    for registration in registrations:
        if registration.pk in notes_dict:
            extra_note = notes_dict[registration.pk].description

        result = {
            'id':
            registration.pk,
            'academic_period':
            registration.academic_period.get_year(),
            'status':
            registration.get_status_display(),
            'student_name':
            registration.costumer.get_full_name()
            if registration.costumer.get_full_name() else u'No Name',
            'consultant':
            registration.consultant.common_name,
            'created':
            registration.created.strftime("%Y-%m-%d")
            if registration.created else '',
            'registration_date':
            registration.registration_date.strftime("%Y-%m-%d")
            if registration.registration_date else '',
            'finance_date':
            registration.finance_date.strftime("%Y-%m-%d")
            if registration.finance_date else '',
            'customer_date':
            registration.customer_date.strftime("%Y-%m-%d")
            if registration.customer_date else '',
            'follow_up':
            extra_note if extra_note else ''
        }

        results.append(result)

    response = {
        'draw': data_table.draw,
        'recordsTotal': total,
        'recordsFiltered': records_filtered,
        'data': results
    }

    return JsonResponse(response, status=200)
Beispiel #6
0
    def rows(self):
        rows = []
        products_ids = self.get_products_ids()

        if not self.config['location_id']:
            return rows

        location = SQLLocation.objects.get(location_id=self.config['location_id'])
        sql_locations = SQLLocation.objects.filter(parent__location_id=self.config['location_id'])
        is_mohsw = False
        stockouts_map = {}
        product_availabilities = {
            (pa['location_id'], pa['product']): (pa['without_stock'], pa['total'])
            for pa in ProductAvailabilityData.objects.filter(
                location_id__in=list(sql_locations.values_list('location_id', flat=True)),
                date__range=(self.config['startdate'], self.config['enddate'])
            ).values('location_id', 'product').annotate(without_stock=Avg('without_stock'), total=Max('total'))
        }
        if location.location_type.name == 'MOHSW':
            is_mohsw = True
            stockouts_map = self.get_stockouts_map(self.config['enddate'], location)

        for sql_location in sql_locations.exclude(is_archived=True):
            facilities = get_facilities(sql_location, self.config['domain'])
            facilities_count = facilities.count()

            soh_late, soh_not_responding, soh_on_time = self.get_soh_data(sql_location, facilities_count)
            if not is_mohsw:
                percent_stockouts = self.get_stockouts(facilities)
            else:
                if facilities_count > 0:
                    stockouts = stockouts_map.get(sql_location.location_id, 0)
                    percent_stockouts = stockouts * 100 / float(facilities_count)
                else:
                    percent_stockouts = 0

            row_data = self._format_row(
                percent_stockouts, soh_late, soh_not_responding, soh_on_time, sql_location
            )
            for product_id in products_ids:
                product_availability = product_availabilities.get((sql_location.location_id, product_id))
                if product_availability and product_availability[1] != 0:
                    row_data.append(
                        format_percent(
                            product_availability[0] * 100 / float(product_availability[1])
                        )
                    )
                else:
                    row_data.append("<span class='no_data'>No Data</span>")
            rows.append(row_data)
        return rows
Beispiel #7
0
def user_statistic(request):
    current_user = User.objects.get(id=request.user.id)

    passed_questions_count = UserAnswer.objects.filter(user=current_user).count()
    passed_polls_count = Poll.objects.filter(useranswer__user=current_user).distinct().count()
    user_score = UserAnswer.objects.filter(user=current_user).aggregate(total_score=Sum("score"), min_score=Min("score"), max_score=Max("score"))

    import collections
    users_scores = collections.Counter()
    for answer in UserAnswer.objects.exclude(user__is_staff=True).exclude(user=current_user):
        users_scores[answer.user] += answer.score
    share = 100 / (len(users_scores) + 1)
    result = 0
    if not user_score["total_score"]:
        user_score["total_score"] = 0
    for _, score in users_scores.items():
        if score > user_score["total_score"]:
            result += share

    user_answers = {}
    for poll in Poll.objects.filter(useranswer__user=current_user).distinct().order_by("id"):
        user_questions = {}
        for question in UserAnswer.objects.filter(user=current_user).filter(poll=poll):
            answers_set = []
            for answer in question.answers.all():
                answers_set.append(answer.text)
            user_questions[question.question_in_poll.question.text] = answers_set
        user_answers[poll.title] = user_questions

    return render(request, "user_statistic.html", {
        "title":"Ваша статистика опросов",
        "answers_title":"Ваши ответы на вопросы",
        "passed_questions_count":passed_questions_count,
        "passed_polls_count":passed_polls_count,
        "score":user_score,
        "result":round(result),
        "user_answers":user_answers
        })
Beispiel #8
0
def startRecording(source, recordingDir, recordingUrl, startTime,
                   maxFlightDuration, episode):
    if not source.videofeed_set.all():
        logging.info("video feeds set is empty")
        return
    videoFeed = source.videofeed_set.all()[0]

    # figure out next segment number for this source and episode
    try:
        maxSegmentNumber = SEGMENT_MODEL.get().objects.filter(
            episode=episode, source=source).aggregate(Max('segNumber'))
        segmentNumber = maxSegmentNumber['segNumber__max']
        recordedVideoDir = os.path.join(recordingDir,
                                        'Segment%03d' % segmentNumber)
        if not emptySegmentDir(recordedVideoDir):
            segmentNumber = segmentNumber + 1
            recordedVideoDir = os.path.join(recordingDir,
                                            'Segment%03d' % segmentNumber)

        # adjust start and end times for all prio segments
        existingSegments = SEGMENT_MODEL.get().objects.filter(source=source,
                                                              episode=episode)
        for segment in existingSegments:
            segment.adjustSegmentTimes()
    except:
        segmentNumber = 0
        recordedVideoDir = os.path.join(recordingDir,
                                        'Segment%03d' % segmentNumber)

    makedirsIfNeeded(recordedVideoDir)
    try:
        videoSettingses = SETTINGS_MODEL.get().objects.filter(
            width=videoFeed.settings.width, height=videoFeed.settings.height)
        videoSettings = videoSettingses.first()
    except:
        # make a new one
        videoSettings = SETTINGS_MODEL.get()()
        videoSettings.width = videoFeed.settings.width
        videoSettings.height = videoFeed.settings.height
        videoSettings.save()

    videoSegment, created = SEGMENT_MODEL.get().objects.get_or_create(
        directoryName="Segment",
        segNumber=segmentNumber,
        indexFileName="prog_index.m3u8",
        endTime=None,
        settings=videoSettings,
        source=source,
        episode=episode)
    videoSegment.startTime = startTime
    videoSegment.save()

    if settings.PYRAPTORD_SERVICE is True:
        pyraptord = getPyraptordClient()

    assetName = source.shortName
    vlcSvc = '%s_vlc' % assetName
    vlcCmd = (
        "%s %s --sout='#duplicate{dst=std{access=livehttp{seglen=6,splitanywhere=false,delsegs=false,numsegs=0,index=prog_index.m3u8,index-url=prog_index-#####.ts},mux=ts,dst=prog_index-#####.ts}}'"
        % (settings.XGDS_VIDEO_VLC_PATH, videoFeed.url))
    #     print vlcCmd
    if settings.PYRAPTORD_SERVICE is True:
        (pyraptord, vlcSvc)
        stopPyraptordServiceIfRunning(pyraptord, vlcSvc)
        pyraptord.updateServiceConfig(vlcSvc, {
            'command': vlcCmd,
            'cwd': recordedVideoDir
        })
        pyraptord.restart(vlcSvc)
        return vlcCmd
    return 'NO PYRAPTORD: ' + vlcCmd
Beispiel #9
0
def get_level_info(student: Student) -> LevelInfoDict:
    """Uses a bunch of expensive database queries to compute a student's levels and data,
	returning the findings as a typed dictionary."""

    psets = PSet.objects.filter(student__user=student.user,
                                approved=True,
                                eligible=True)
    pset_data = psets.aggregate(
        clubs_any=Sum('clubs'),
        clubs_D=Sum('clubs', filter=Q(unit__code__startswith='D')),
        clubs_Z=Sum('clubs', filter=Q(unit__code__startswith='Z')),
        hearts=Sum('hours'),
    )
    total_clubs = ((pset_data['clubs_any'] or 0) +
                   (pset_data['clubs_D'] or 0) * BONUS_D_UNIT +
                   (pset_data['clubs_Z'] or 0) * BONUS_Z_UNIT)
    total_hearts = pset_data['hearts'] or 0

    total_diamonds = AchievementUnlock.objects.filter(
        user=student.user).aggregate(
            Sum('achievement__diamonds'))['achievement__diamonds__sum'] or 0

    quiz_attempts = ExamAttempt.objects.filter(student__user=student.user)
    quest_completes = QuestComplete.objects.filter(student__user=student.user)
    mock_completes = MockCompleted.objects.filter(student__user=student.user)
    mock_completes = mock_completes.select_related('exam')
    market_guesses = Guess.objects.filter(
        user=student.user,
        market__end_date__lt=timezone.now(),
    ).select_related('market')
    suggested_units_queryset = ProblemSuggestion.objects.filter(
        user=student.user,
        resolved=True,
        eligible=True,
    ).values_list(
        'unit__pk',
        'unit__group__name',
        'unit__code',
    )
    suggest_units_set: SuggestUnitSet = set(suggested_units_queryset)
    hints_written = Version.objects.get_for_model(Hint)  # type: ignore
    hints_written = hints_written.filter(revision__user_id=student.user.id)
    hints_written = hints_written.values_list('revision__date_created',
                                              flat=True)
    hint_spades = get_week_count(list(hints_written))

    total_spades = (quiz_attempts.aggregate(Sum('score'))['score__sum']
                    or 0) * 2
    total_spades += quest_completes.aggregate(
        Sum('spades'))['spades__sum'] or 0
    total_spades += market_guesses.aggregate(Sum('score'))['score__sum'] or 0
    total_spades += mock_completes.count() * 3
    total_spades += len(suggest_units_set)
    # TODO total_spades += hint_spades

    meters: FourMetersDict = {
        'clubs': Meter.ClubMeter(int(total_clubs)),
        'hearts': Meter.HeartMeter(int(total_hearts)),
        'diamonds': Meter.DiamondMeter(total_diamonds),
        'spades': Meter.SpadeMeter(int(total_spades)),
    }
    level_number = sum(meter.level
                       for meter in meters.values())  # type: ignore
    level = Level.objects.filter(
        threshold__lte=level_number).order_by('-threshold').first()
    level_name = level.name if level is not None else 'No Level'
    max_level = Level.objects.all().aggregate(max=Max('threshold'))['max'] or 0
    level_data: LevelInfoDict = {
        'psets': psets,
        'pset_data': pset_data,
        'meters': meters,
        'level_number': level_number,
        'level_name': level_name,
        'is_maxed': level_number >= max_level,
        # spade properties
        'quiz_attempts': quiz_attempts,
        'quest_completes': quest_completes,
        'market_guesses': market_guesses,
        'mock_completes': mock_completes,
        'suggest_unit_set': suggest_units_set,
        'hint_spades': hint_spades,
    }
    return level_data
Beispiel #10
0
 def get_next_scoped_id(self):
     """Compute the next scoped_id."""
     query = (Solution.objects.filter(
         author=self.author, task=self.task).aggregate(
             next_scoped_id=(Coalesce(Max('scoped_id'), 0) + 1)))
     return query['next_scoped_id']
Beispiel #11
0
 def get_queryset(self):
     qs = MasterDetailCrudPermission.ListView.get_queryset(self)
     qs = qs.annotate(pk_unico=Max('pk'))
     return qs
Beispiel #12
0
 def data(self):
     return self.get_query(product=self.product).annotate(
         complete=Max('complete'))
def get_awcs_covered_sector_data(domain,
                                 config,
                                 loc_level,
                                 location_id,
                                 show_test=False):
    group_by = ['%s_name' % loc_level]

    config['month'] = datetime(*config['month'])

    level = config['aggregation_level']
    data = AggAwcMonthly.objects.filter(**config).values(*group_by).annotate(
        states=Sum('num_launched_states')
        if level <= 1 else Max('num_launched_states'),
        districts=Sum('num_launched_districts')
        if level <= 2 else Max('num_launched_districts'),
        blocks=Sum('num_launched_blocks')
        if level <= 3 else Max('num_launched_blocks'),
        supervisors=Sum('num_launched_supervisors')
        if level <= 4 else Max('num_launched_supervisors'),
        awcs=Sum('num_launched_awcs')
        if level <= 5 else Max('num_launched_awcs'),
    ).order_by('%s_name' % loc_level)

    if not show_test:
        data = apply_exclude(domain, data)

    chart_data = {
        'blue': [],
    }

    tooltips_data = defaultdict(lambda: {
        'districts': 0,
        'blocks': 0,
        'states': 0,
        'supervisors': 0,
        'awcs': 0
    })

    loc_children = get_child_locations(domain, location_id, show_test)
    result_set = set()

    for row in data:
        name = row['%s_name' % loc_level]
        awcs = row['awcs'] or 0
        supervisors = row['supervisors'] or 0
        blocks = row['blocks'] or 0
        districts = row['districts'] or 0
        states = row['states'] or 0
        result_set.add(name)

        row_values = {
            'awcs': awcs,
            'supervisors': supervisors,
            'blocks': blocks,
            'districts': districts,
            'states': states,
        }
        for prop, value in six.iteritems(row_values):
            tooltips_data[name][prop] += (value or 0)

    for name, value_dict in six.iteritems(tooltips_data):
        chart_data['blue'].append([name, value_dict['awcs']])

    for sql_location in loc_children:
        if sql_location.name not in result_set:
            chart_data['blue'].append([sql_location.name, 0])

    chart_data['blue'] = sorted(chart_data['blue'])

    if level == 1:
        prop = 'states'
    elif level == 2:
        prop = 'districts'
    elif level == 3:
        prop = 'blocks'
    elif level == 4:
        prop = 'supervisors'
    else:
        prop = 'awcs'

    total_awcs = sum([(x['awcs'] or 0) for x in six.itervalues(tooltips_data)])
    total = sum([(x[prop] or 0) for x in six.itervalues(tooltips_data)])

    info = _("Total AWCs that have launched ICDS-CAS. "
             "AWCs are considered launched after submitting at least"
             " one Household Registration form. <br /><br />"
             "Number of AWCs launched: %d" % total_awcs)
    if level != 5:
        info = _("Total AWCs that have launched ICDS-CAS. "
                 "AWCs are considered launched after submitting at least"
                 " one Household Registration form. <br /><br />"
                 "Number of AWCs launched: %d <br />" % total_awcs +
                 "Number of %s launched: %d" % (prop.title(), total))

    return {
        "tooltips_data":
        dict(tooltips_data),
        "format":
        "number",
        "info":
        info,
        "chart_data": [{
            "values": chart_data['blue'],
            "key": "",
            "strokeWidth": 2,
            "classed": "dashed",
            "color": MapColors.BLUE
        }]
    }
Beispiel #14
0
def move_navnode(request, tree):
    """move a node in the tree"""
    response = {}

    node_id = request.POST['node_id']
    ref_pos = request.POST['ref_pos']
    parent_id = request.POST.get('parent_id', 0)
    ref_id = request.POST.get('ref_id', 0)

    node = models.NavNode.objects.get(tree=tree, id=node_id)

    if parent_id:
        sibling_nodes = models.NavNode.objects.filter(tree=tree,
                                                      parent__id=parent_id)
        parent_node = models.NavNode.objects.get(tree=tree, id=parent_id)
    else:
        sibling_nodes = models.NavNode.objects.filter(tree=tree,
                                                      parent__isnull=True)
        parent_node = None

    if ref_id:
        ref_node = models.NavNode.objects.get(tree=tree, id=ref_id)
    else:
        ref_node = None

    # Update parent if changed
    if parent_node != node.parent:
        if node.parent:
            ex_siblings = models.NavNode.objects.filter(
                tree=tree, parent=node.parent).exclude(id=node.id)
        else:
            ex_siblings = models.NavNode.objects.filter(
                tree=tree, parent__isnull=True).exclude(id=node.id)

        node.parent = parent_node

        # restore ex-siblings
        for sib_node in ex_siblings.filter(ordering__gt=node.ordering):
            sib_node.ordering -= 1
            sib_node.save()

        # move siblings if inserted
        if ref_node:
            if ref_pos == "before":
                to_be_moved = sibling_nodes.filter(
                    ordering__gte=ref_node.ordering)
                node.ordering = ref_node.ordering
            elif ref_pos == "after":
                to_be_moved = sibling_nodes.filter(
                    ordering__gt=ref_node.ordering)
                node.ordering = ref_node.ordering + 1
            for ntbm in to_be_moved:
                ntbm.ordering += 1
                ntbm.save()

        else:
            # add at the end
            max_ordering = sibling_nodes.aggregate(
                max_ordering=Max('ordering'))['max_ordering'] or 0
            node.ordering = max_ordering + 1

    else:

        # Update pos if changed
        if ref_node:
            if ref_node.ordering > node.ordering:
                # move forward
                to_be_moved = sibling_nodes.filter(
                    ordering__lt=ref_node.ordering, ordering__gt=node.ordering)
                for next_sibling_node in to_be_moved:
                    next_sibling_node.ordering -= 1
                    next_sibling_node.save()

                if ref_pos == "before":
                    node.ordering = ref_node.ordering - 1

                elif ref_pos == "after":
                    node.ordering = ref_node.ordering - 1

            elif ref_node.ordering < node.ordering:
                # move backward
                to_be_moved = sibling_nodes.filter(
                    ordering__gt=ref_node.ordering, ordering__lt=node.ordering)
                for next_sibling_node in to_be_moved:
                    next_sibling_node.ordering += 1
                    next_sibling_node.save()

                if ref_pos == "before":
                    node.ordering = ref_node.ordering
                    ref_node.ordering += 1
                    ref_node.save()
                elif ref_pos == "after":
                    node.ordering = ref_node.ordering + 1

        else:
            max_ordering = sibling_nodes.aggregate(
                max_ordering=Max('ordering'))['max_ordering'] or 0
            node.ordering = max_ordering + 1

    node.save()
    response['message'] = _("The node '{0}' has been moved.").format(
        node.label)

    return response
Beispiel #15
0
 def 揣上新貢獻(cls):
     return (cls.objects.filter(
         Q(翻譯文本__文本__平臺項目__推薦用字=True)
         | Q(翻譯文本__文本__文本校對__新文本__平臺項目__推薦用字=True)).distinct().annotate(
             上尾貢獻時間=Max('翻譯文本__文本__收錄時間')).order_by('-上尾貢獻時間'))
Beispiel #16
0
 def get_newest_untiled_overlay_ids():
     # assuming newer overlays have higher primary keys. Seems reasonable.
     overlay_definitions = OverlayDefinition.objects.annotate(newest_overlay_id=Max('overlay__id'))
     newest_overlays = Overlay.objects.filter(id__in=[od.newest_overlay_id for od in overlay_definitions])
     return newest_overlays.filter(is_tiled=False).values_list('id', flat=True)
Beispiel #17
0
 def save(self, *args, **kwargs):
     if self.order is None:
         max_order = self.get_ordering_queryset().aggregate(
             Max('order')).get('order__max')
         self.order = 0 if max_order is None else max_order + 1
     return super(OrderedModel, self).save(*args, **kwargs)
Beispiel #18
0
 def get(self, request):
     if request.user.has_perm("mobile_scanner.view_staffonlinehistory"):
         histories = OnlineHistory.objects.filter()\
             .values('date', 'user__last_name', 'user__first_name').annotate(min_time=Min('time'), max_time=Max('time')).order_by("-date")
         return render(request, "staff.html", locals())
     else:
         msg = "无访问权限"
         return render(request, "msg.html", locals())
Beispiel #19
0
 def last_revision(self):
     mx = PostRevision.objects.filter(post=self.post).aggregate(
         Max('revision'))['revision__max']
     if mx == -1:
         mx = 0
     return mx
Beispiel #20
0
    def with_effective_valid_between(self):
        """
        There are five ways in which measures can get end dated:

        1. Where the measure is given an explicit end date on the measure record
           itself
        2. Where the measure's generating regulation is a base regulation, and
           the base regulation itself is end-dated
        3. Where the measure's generating regulation is a modification
           regulation, and the modification regulation itself is end-dated
        4. Where the measure's generating regulation is a base regulation,
           and any of the modification regulations that modify it are end-dated
        5. Where the measure's generating regulation is a modification
           regulation, and the base regulation that it modifies is end-dated

        Numbers 2–5 also have to take account of the "effective end date" which
        if set should be used over any explicit date. The effective end date is
        set when other types of regulations are used (abrogation, prorogation,
        etc).
        """

        # Computing the end date for case 4 is expensive because it involves
        # aggregating over all of the modifications to the base regulation,
        # where there is one. So we pull this out into a CTE to let Postgres
        # know that none of this caluclation depends on the queryset filters.
        #
        # We also turn NULLs into "infinity" such that they sort to the top:
        # i.e. if any modification regulation is open-ended, so is the measure.
        # We then turn infinity back into NULL to be used in the date range.
        Regulation = self.model._meta.get_field(
            "generating_regulation", ).remote_field.model

        end_date_from_modifications = With(
            Regulation.objects.annotate(amended_end_date=NullIf(
                Max(
                    Coalesce(
                        F("amendments__enacting_regulation__effective_end_date"
                          ),
                        EndDate(
                            "amendments__enacting_regulation__valid_between"),
                        Cast(Value("infinity"), DateField()),
                    ), ),
                Cast(Value("infinity"), DateField()),
            ), ),
            "end_date_from_modifications",
        )

        return (end_date_from_modifications.join(
            self,
            generating_regulation_id=end_date_from_modifications.col.id,
        ).with_cte(end_date_from_modifications).annotate(
            db_effective_end_date=Coalesce(
                # Case 1 – explicit end date, which is always used if present
                EndDate("valid_between"),
                # Case 2 and 3 – end date of regulation
                F("generating_regulation__effective_end_date"),
                EndDate("generating_regulation__valid_between"),
                # Case 4 – generating regulation is a base regulation, and
                # the modification regulation is end-dated
                end_date_from_modifications.col.amended_end_date,
                # Case 5 – generating regulation is a modification regulation,
                # and the base it modifies is end-dated. Note that the above
                # means that this only applies if the modification has no end date.
                F("generating_regulation__amends__effective_end_date"),
                EndDate("generating_regulation__amends__valid_between"),
            ),
            db_effective_valid_between=Func(
                StartDate("valid_between"),
                F("db_effective_end_date"),
                Value("[]"),
                function="DATERANGE",
                output_field=TaricDateRangeField(),
            ),
        ))
Beispiel #21
0
def hr_statistic(request, user_id):
    current_user = User.objects.get(id=user_id)
    passed_questions_count = UserAnswer.objects.filter(user=current_user).count()
    passed_polls_count = Poll.objects.filter(useranswer__user=current_user).distinct().count()
    user_score = UserAnswer.objects.filter(user=current_user).aggregate(total_score=Sum("score"), min_score=Min("score"), max_score=Max("score"))

    import collections
    users_scores = collections.Counter()
    for answer in UserAnswer.objects.exclude(user__is_staff=True).exclude(user=current_user):
        users_scores[answer.user] += answer.score
    share = 100 / (len(users_scores) + 1)
    result = 0
    if not user_score["total_score"]:
        user_score["total_score"] = 0
    for _, score in users_scores.items():
        if score > user_score["total_score"]:
            result += share

    user_answers = {}
    for poll in Poll.objects.filter(useranswer__user=current_user).distinct().order_by("id"):
        user_questions = {}
        for question in UserAnswer.objects.filter(user=current_user).filter(poll=poll):
            answers_set = []
            for answer in question.answers.all():
                answers_set.append(answer.text)
            user_questions[question.question_in_poll.question.text] = answers_set
        user_answers[poll.title] = user_questions

    if current_user.first_name:
        username = f"{current_user.first_name.capitalize()} {current_user.last_name.capitalize()}"
    else:
        username = current_user.username

    return render(request, "user_statistic.html", {
        "title":f"Cтатистика пользователя {username}",
        "answers_title":"Ответы пользователя",
        "passed_questions_count":passed_questions_count,
        "passed_polls_count":passed_polls_count,
        "score":user_score,
        "result":round(result),
        "user_answers":user_answers
        })
Beispiel #22
0
    def implicit(cls, leagues=None,
                      locations=None,
                      dt_from=None,
                      dt_to=None,
                      duration_min=None,
                      duration_max=None,
                      month_days=None,
                      gap_days=1,
                      minimum_event_duration=2):
        '''
        Implicit events are those inferred from Session records, and not explicitly recorded as events.

        They are defined as all block of contiguous sessions that have gap_days (defaulting 1) between
        them. The remaining arguments are filters

        :param cls:
        :param leagues:      A list of League PKs to restrict the events to (a Session filter)
        :param locations:    A list of location PKs to restrict the events to (a Session filter)
        :param dt_from:      The start of a datetime window (a Session filter)
        :param dt_to:        The end of a datetime window (a Session filter)
        :param duration_min: The minimum duration (in days) of events (an Event filter)
        :param duration_max: The maximum duration (in days) of events (an Event filter)
        :param month_days    A CSV string list of month day identifiers Entries are like Monday_N
                             where N is the week of the month (1-5). Any week when N is missing.
                             Amy day win that week when the day is missing.
        :param gap_days:     The gap between sessions that marks a gap between implicit Events.
        :param minimum_event_duration: Sessions are recorded with a single time (nominally completion).
                                       Single session events will have a duration of 0 as a consequence.
                                       This, in hours expresses the average game duration of a single game
                                       session.  It's nominal and should ideally be the duration it takes
                                       that one session to play through, which we can only estimate in any
                                       case from the expected play time of the game. But to use that will
                                       require a more complicated query joining the Game model,
        :return: A QuerySet of events (lazy, i.e no database hits in preparing it)
        '''
        # Build an annotated session queury that we can use (lazy)
        # Startnig with all sessions
        sessions = Session.objects.all()  # @UndefinedVariable

        # Then applying session filters
        if leagues:
            sessions = sessions.filter(league__pk__in=leagues)
        if locations:
            sessions = sessions.filter(location__pk__in=locations)
        if dt_from:
            sessions = sessions.filter(date_time__gte=dt_from)
        if dt_to:
            sessions = sessions.filter(date_time__lte=dt_to)

        # Then get the events (as all runs of session with gap_days
        # between them.

        # We need to anotate the sessions in two tiers alas, because we need a Window
        # to get the previous essions time, and then a window to group the sessions and
        # windows can't reference windows ... doh! The solution is what is to select
        # from a subquery. Alas Django does not support selecting FROM a subquery (yet).
        # Enter the notion of a Common Table Expression (CTE) which is essentially a
        # a way of naming a query to use as the FROM target of another query. There is
        # fortunately a package "django_cte" tahat adds CTE support to querysets. It's
        # a tad clunky bt works.
        #
        # Step 1 is to do the first windowing annotation, adding the prev_date_time and
        # based on it flagging the first session in each event.
        sessions = sessions.order_by("date_time").annotate(
                    prev_date_time=Window(expression=Lag('date_time'), order_by=F('date_time').asc()),
                    dt_difference=ExpressionWrapper(F('date_time') - F('prev_date_time'), output_field=DurationField()),
                    event_start=Case(When(dt_difference__gt=timedelta(days=gap_days), then='date_time')),
                )

        # Step 2 we need to instantiate a CTE
        sessions = With(sessions, "inner_sessions")

        # Step 3 we build a new queryset (that selects from the CTE and annotate that
        # The oddity here is tha django_cte requires us to call with_cte() to include
        # the CTE's SQL in the new query's SQL. Go figure (I've checked the code, may
        # fork and patch some time).
        #
        # The grouping expression is SQL esoterica, that I pilfered from:
        #
        #    https://stackoverflow.com/a/56729571/4002633
        #    https://dbfiddle.uk/?rdbms=postgres_11&fiddle=0360fd313400e533cd76fbc39d0e22d3
        # week
        # It works because a Window that has no partition_by included, makes a single partition
        # of all the row from this one to the end. Which is why we need to ensure and order_by
        # clause in the Window. Ordered by date_time, a count of all the event_start values (nulls)
        # are not counted, returns how many event_starts there are before this row. And so a count
        # events before this row. A sneaky SQL trick. It relies on the event_start not having a
        # default value (an ELSE clause) and hence defaulting to null. Count() ignores the nulls.
        sessions_with_event = sessions.queryset().annotate(
                            event=Window(expression=Count(sessions.col.event_start), order_by=sessions.col.date_time),
                            # local_time=ExpressionWrapper(F('date_time__local'), output_field=DateTimeField())
                        )

        print_SQL(sessions_with_event)

        # Step 4: We have to bring players into the fold, and they are stored in Performance objects.
        # Now we want to select from the from the session_events queryset joined with Performance.
        # and group by events to collect session counts and player lists and player counts.
        #
        # WARNING: We need an explicit order_by('events') as the Performance object has a default
        # ordering and if that is included, it forces one row per Perfornce obvect EVEN after
        # .values('event') and .distinct() diesn't even help int hat instance (I tried). Short
        # story is, use explicit ordering on the group by field (.values() field)
        sessions_with_event = With(sessions_with_event, "outer_sessions")

        events = (sessions_with_event
                 .join(Performance, session_id=sessions_with_event.col.id)
                 .annotate(event=sessions_with_event.col.event + 1,  # Move from 0 based to 1 based
                           location_id=sessions_with_event.col.location_id,
                           game_id=sessions_with_event.col.game_id,
                           gap_time=sessions_with_event.col.dt_difference)
                 .order_by('event')
                 .values('event')
                 .annotate(start=ExpressionWrapper(Min('session__date_time__local') - timedelta(hours=minimum_event_duration), output_field=DateTimeField()),
                           end=Max('session__date_time__local'),
                           duration=F('end') - F('start'),
                           gap_time=Max('gap_time'),
                           locations=Count('location_id', distinct=True),
                           location_ids=ArrayAgg('location_id', distinct=True),
                           sessions=Count('session_id', distinct=True),
                           session_ids=ArrayAgg('session_id', distinct=True),
                           games=Count('game_id', distinct=True),
                           game_ids=ArrayAgg('game_id', distinct=True),
                           players=Count('player_id', distinct=True),
                           player_ids=ArrayAgg('player_id', distinct=True)
                          ))

        # PROBLEM: start and end are in UTC here. They do not use the recorded TZ of the ession datetime.
        # Needs fixing!

        if month_days:
            daynum = {"sunday":1, "monday":2, "tuesday":2, "wednesday":4, "thursday":5, "friday":6, "saturday":7}

            # Build a canonical list of days (lower case, and None's removed)
            days = [d.strip().lower() for d in month_days.split(",")]

            efilter = Q()

            for day in days:
                try:
                    day_filter = None
                    week_filter = None

                    # Can be of form "day", "day_n" or "n"
                    parts = day.split("_")
                    if len(parts) == 1:
                        if parts[0] in daynum:
                            day_filter = daynum[parts[0]]
                        elif isInt(parts[0]):
                            week_filter = int(parts[0])
                    else:
                        day_filter = daynum.get(parts[0], None)
                        week_filter = int(parts[1])
                except:
                    raise ValueError(f"Bad month/day specifier: {day}")

                # A dw filter is the day file AND the week filter
                if day_filter or week_filter:
                    dwfilter = Q()
                    if day_filter:
                        dwfilter &= Q(start__week_day=day_filter)
                    if week_filter:
                        dwfilter &= Q(start__month_week=week_filter)
                    # An event filter is one dw filter OR another.
                    efilter |= dwfilter

            # Q() if Falsey which is good
            if efilter:
                events = events.filter(efilter)

        # Finally, apply the event filters
        if duration_min: events = events.filter(duration__gte=duration_min)
        if duration_max: events = events.filter(duration__lte=duration_max)

        # Return a QuerySet of events (still lazy)
        return events.order_by("-end")
Beispiel #23
0
    def save(self, **kwargs):
        if not self.pk:
            #We need to create the invoice number when its getting created
	    if self.date_of_donation.month in [1,2,3]:
		financial_year_1 = self.date_of_donation.year-1
		financial_year_2 = self.date_of_donation.year
	    else:
		financial_year_1 = self.date_of_donation.year
		financial_year_2 = self.date_of_donation.year+1
  	    date_1=date(financial_year_1, 4, 1)
	    date_2=date(financial_year_2, 3, 31)
            self.receipt_number = (Donation.objects.filter(date_of_donation__gte = date_1, date_of_donation__lte = date_2).aggregate(Max('receipt_number'))["receipt_number__max"] or 0) + 1
            super(Donation, self).save(**kwargs)
            return
        super(Donation, self).save(**kwargs)
Beispiel #24
0
 def by_group(self):
     return self.values("group").annotate(
         time=Max("modified"),
         messages=GroupConcat("message"),
     ).order_by("-time")
Beispiel #25
0
 def get_wiki_pages_latest(self, node):
     wiki_page_ids = node.wikis.filter(deleted__isnull=True).values_list('id', flat=True)
     return WikiVersion.objects.annotate(name=F('wiki_page__page_name'), newest_version=Max('wiki_page__versions__identifier')).filter(identifier=F('newest_version'), wiki_page__id__in=wiki_page_ids)
Beispiel #26
0
    def test_invalid_flight_page(self):
        max_id = Flight.objects.all().aggregate(Max("id"))["id__max"]

        c = Client()
        response = c.get(f"/flights/{max_id + 1}")
        self.assertEqual(response.status_code, 404)
Beispiel #27
0
def get_awcs_covered_data_chart(domain, config, loc_level, show_test=False):
    month = datetime(*config['month'])
    three_before = datetime(*config['month']) - relativedelta(months=3)

    config['month__range'] = (three_before, month)
    del config['month']

    level = config['aggregation_level']
    chart_data = AggAwcMonthly.objects.filter(**config).values(
        'month', '%s_name' % loc_level).annotate(
            awcs=Sum('num_launched_awcs')
            if level <= 5 else Max('num_launched_awcs'), ).order_by('month')

    if not show_test:
        chart_data = apply_exclude(domain, chart_data)

    data = {'pink': OrderedDict()}

    dates = [dt for dt in rrule(MONTHLY, dtstart=three_before, until=month)]

    for date in dates:
        miliseconds = int(date.strftime("%s")) * 1000
        data['pink'][miliseconds] = {'y': 0, 'all': 0}

    best_worst = {}
    for row in chart_data:
        date = row['month']
        awcs = (row['awcs'] or 0)
        location = row['%s_name' % loc_level]

        if date.month == month.month:
            if location in best_worst:
                best_worst[location].append(awcs)
            else:
                best_worst[location] = [awcs]

        date_in_miliseconds = int(date.strftime("%s")) * 1000

        data['pink'][date_in_miliseconds]['y'] += awcs

    all_locations = [{
        'loc_name': key,
        'value': sum(value) / len(value)
    } for key, value in best_worst.items()]
    all_locations_sorted_by_name = sorted(all_locations,
                                          key=lambda x: x['loc_name'])
    all_locations_sorted_by_value_and_name = sorted(
        all_locations_sorted_by_name, key=lambda x: x['value'], reverse=True)

    return {
        "chart_data": [{
            "values": [{
                'x': key,
                'y': value['y'] / float(value['all'] or 1),
                'all': value['all']
            } for key, value in data['pink'].items()],
            "key":
            "Number of AWCs Launched",
            "strokeWidth":
            2,
            "classed":
            "dashed",
            "color":
            ChartColors.PINK
        }],
        "all_locations":
        all_locations_sorted_by_value_and_name,
        "top_five":
        all_locations_sorted_by_value_and_name[:5],
        "bottom_five":
        all_locations_sorted_by_value_and_name[-5:],
        "location_type":
        loc_level.title()
        if loc_level != LocationTypes.SUPERVISOR else 'Sector'
    }
def get_awcs_covered_sector_data(domain, config, loc_level, location_id, show_test=False):
    group_by = ['%s_name' % loc_level]

    config['month'] = datetime(*config['month'])

    level = config['aggregation_level']
    data = AggAwcMonthly.objects.filter(
        **config
    ).values(
        *group_by
    ).annotate(
        awcs=Sum('num_launched_awcs') if level <= 5 else Max('num_launched_awcs'),
    ).order_by('%s_name' % loc_level)

    if not show_test:
        data = apply_exclude(domain, data)

    chart_data = {
        'blue': [],
    }

    tooltips_data = defaultdict(lambda: {
        'districts': 0,
        'blocks': 0,
        'supervisors': 0,
        'awcs': 0
    })

    loc_children = SQLLocation.objects.get(location_id=location_id).get_children()
    result_set = set()

    for row in data:
        name = row['%s_name' % loc_level]
        awcs = row['awcs']
        result_set.add(name)

        row_values = {
            'awcs': awcs
        }
        for prop, value in row_values.iteritems():
            tooltips_data[name][prop] += (value or 0)

    for name, value_dict in tooltips_data.iteritems():
        chart_data['blue'].append([name, value_dict['awcs']])

    for sql_location in loc_children:
        if sql_location.name not in result_set:
            chart_data['blue'].append([sql_location.name, 0])

    chart_data['blue'] = sorted(chart_data['blue'])

    return {
        "tooltips_data": tooltips_data,
        "format": "number",
        "info": _((
            "Number of AWCs launched"
        )),
        "chart_data": [
            {
                "values": chart_data['blue'],
                "key": "",
                "strokeWidth": 2,
                "classed": "dashed",
                "color": BLUE
            }
        ]
    }
Beispiel #29
0
 def minimum_grade(self, minimum_grade):
     query = self.annotate(minimum_grade=Max("inspection__grade__slug"))
     return query.filter(minimum_grade__lte=minimum_grade)
Beispiel #30
0
def edit_menu(request, pk):
    if request.method == 'POST':
        menu_instance = get_object_or_404(Menu, pk=pk)
        current_parent = menu_instance.parent
        current_lvl = menu_instance.lvl
        validate_menu = MenuForm(request.POST, instance=menu_instance)

        if validate_menu.is_valid():
            updated_menu = validate_menu.save(commit=False)
            if updated_menu.parent != current_parent:
                try:
                    if updated_menu.parent.id == updated_menu.id:
                        data = {
                            'error': True,
                            'response': {
                                'parent':
                                'you can not choose the same as parent'
                            }
                        }
                        return HttpResponse(
                            json.dumps(data),
                            content_type='application/json; charset=utf-8')
                except Exception:
                    pass

                lnk_count = Menu.objects.filter(
                    parent=updated_menu.parent).count()
                updated_menu.lvl = lnk_count + 1
                lvlmax = Menu.objects.filter(parent=current_parent).aggregate(
                    Max('lvl'))['lvl__max']
                if lvlmax != 1:
                    for i in Menu.objects.filter(parent=current_parent,
                                                 lvl__gt=current_lvl,
                                                 lvl__lte=lvlmax):
                        i.lvl = i.lvl - 1
                        i.save()
            if updated_menu.url[-1] != '/':
                updated_menu.url = updated_menu.url + '/'

            if request.POST.get('status', ''):
                updated_menu.status = 'on'

            updated_menu.save()

            data = {'error': False, 'response': 'updated successfully'}
        else:
            data = {'error': True, 'response': validate_menu.errors}
        return HttpResponse(json.dumps(data),
                            content_type='application/json; charset=utf-8')

    if request.user.is_superuser:
        parent = Menu.objects.filter(parent=None).order_by('lvl')
        current_menu = get_object_or_404(Menu, pk=pk)
        c = {}
        c.update(csrf(request))
        return render(
            request, 'admin/content/menu/edit-menu-item.html', {
                'csrf_token': c['csrf_token'],
                'current_menu': current_menu,
                'parent': parent
            })
    else:
        return render_to_response('admin/accessdenied.html')