def get_context_data(self, **kwargs): kwargs['speech_list'] = self.object_list context = super(SpeakerView, self).get_context_data(**kwargs) context['section_count'] = self.object.speech_set.all().visible(self.request) \ .aggregate(Count('section', distinct=True))['section__count'] context['longest_speech'] = self.object.speech_set.annotate( length=Length('text')).order_by('-length')[:1] context['title'] = _('View Speaker: %(speaker_name)s') % { 'speaker_name': self.object.name } return context
def get_context_data(self, **kwargs): context = super(InstanceView, self).get_context_data(**kwargs) context['count_speeches'] = Speech.objects.for_instance( self.request.instance).visible(self.request).count() context['count_sections'] = Section.objects.for_instance( self.request.instance).count() context['count_speakers'] = Speaker.objects.for_instance( self.request.instance).count() context['average_length'] = Speech.objects.for_instance(self.request.instance) \ .annotate(length=Length('text')).aggregate(avg=Avg('length'))['avg'] return context
def workout_day(dayIntensity, date, user): limits = MealPlanning.busy_day(dayIntensity) mealsBreakfast = RecipeOverview.objects.filter((Q(title__icontains= 'smoothie' ) & Q(cookingMins__lte=limits['breakfast'])) & (Q(highProtein=True) | Q(highCarbs=True)| Q(highCalories=True))) mealsAmSnack = RecipeOverview.objects.filter((Q(cookingMins__lte=limits['amSnack']) & Q(percentCalories__lte=18)) & (Q(highProtein=True) | Q(highCarbs=True))) mealsLunch = RecipeOverview.objects.filter((Q(cookingMins__lte=limits['lunch']) & Q(percentCalories__gte=20.00)) & (Q(highProtein=True) | Q(highCarbs=True))) mealsPmSnack = RecipeOverview.objects.filter((Q(cookingMins__lte=limits['pmSnack']) & Q(percentCalories__lte=18)) & (Q(highProtein=True) | Q(highCarbs=True))) mealsDinner = RecipeOverview.objects.annotate(text_len=Length('directions')).filter((Q(cookingMins__gte=limits['dinner']) & Q(percentCalories__gte=20.00) & Q(text_len__lt=limits['direction'])) & (Q(highProtein=True) | Q(highCarbs=True)| Q(highCalories=True))) meals = MealPlanning.pick_meals(user, date, mealsBreakfast, mealsAmSnack, mealsLunch, mealsPmSnack, mealsDinner, dayIntensity) return meals
def test_if_basic(self): Alphabet.objects.create(d='String') Alphabet.objects.create(d='') Alphabet.objects.create(d='String') Alphabet.objects.create(d='') results = list( Alphabet.objects.annotate(has_d=If(Length('d'), Value( True), Value(False))).order_by('id').values_list('has_d', flat=True)) assert results == [True, False, True, False]
def test_if_basic(self): Alphabet.objects.create(d="String") Alphabet.objects.create(d="") Alphabet.objects.create(d="String") Alphabet.objects.create(d="") results = list( Alphabet.objects.annotate(has_d=If(Length("d"), Value( True), Value(False))).order_by("id").values_list("has_d", flat=True)) assert results == [True, False, True, False]
def handle(self, *args, **options): invalid_reports = Report.objects.annotate( l=Length('user__username')).filter( Q(user__username__startswith='2017') | Q(user__username__startswith='2016'), Q(l=10) | Q(l=9), status=True, ) for report in invalid_reports: logger.info(f"正在关闭 {report.user.username}-{report.user.name} 的填报") report.status = False report.save()
def general_stat(self, date_start, date_end, interval='hour'): result = [] if not date_start or not date_end: return json.dumps(result) qs = Cdr.objects.annotate(cl_dst=Length('dst'), cl_src=Length('src')).filter( calldate__gte=date_start, calldate__lte=date_end, cl_dst__lte=4, cl_src__gte=5).order_by('calldate') if qs.count(): result = [['Дата', 'Кол-во']] groups = itertools.groupby( qs, lambda x: self.get_interval(x.calldate, interval)) grouped = [[group, sum(1 for _ in matches)] for group, matches in groups] result += grouped return json.dumps(result)
def check_field_max_length_lteq_190(apps, schema_editor): """ Check to see if we've got any data in the table that prevents us from migrating the length of the column down to 190; if so, raise an exception. """ LtiUserData = apps.get_model('django_lti_tool_provider', 'LtiUserData') max_custom_key_length = LtiUserData.objects.aggregate( length=Max(Length('custom_key')))['length'] if max_custom_key_length > 190: raise ValueError( 'Cannot perform migration: values of \'custom_key\' with length ' '{} exceed the expected length 190.'.format(max_custom_key_length))
def test_nested_function_ordering(self): Author.objects.create(name='John Smith') Author.objects.create(name='Rhonda Simpson', alias='ronny') authors = Author.objects.order_by(Length(Coalesce('alias', 'name'))) self.assertQuerysetEqual( authors, [ 'Rhonda Simpson', 'John Smith', ], lambda a: a.name ) authors = Author.objects.order_by(Length(Coalesce('alias', 'name')).desc()) self.assertQuerysetEqual( authors, [ 'John Smith', 'Rhonda Simpson', ], lambda a: a.name )
def test_basic(self): Author.objects.create(name='John', alias='xyz') tests = ( (Repeat('name', 0), ''), (Repeat('name', 2), 'JohnJohn'), (Repeat('name', Length('alias'), output_field=CharField()), 'JohnJohnJohn'), (Repeat(Value('x'), 3, output_field=CharField()), 'xxx'), ) for function, repeated_text in tests: with self.subTest(function=function): authors = Author.objects.annotate(repeated_text=function) self.assertQuerysetEqual(authors, [repeated_text], lambda a: a.repeated_text, ordered=False)
def add_base64_padding(apps, schema_editor): U2fDevice = apps.get_model('otp_u2f', 'U2fDevice') credential_qs = U2fDevice.objects.annotate( padding=Value(4) - Mod(Length('credential'), Value(4)) ) credential_qs.filter(padding=1).update( credential=Concat(F('credential'), Value('='))) credential_qs.filter(padding=2).update( credential=Concat(F('credential'), Value('=='))) credential_qs.filter(padding=3).update( credential=Concat(F('credential'), Value('==='))) public_key_qs = U2fDevice.objects.annotate( padding=Value(4) - Mod(Length('public_key'), Value(4)) ) public_key_qs.filter(padding=1).update( public_key=Concat(F('public_key'), Value('='))) public_key_qs.filter(padding=2).update( public_key=Concat(F('public_key'), Value('=='))) public_key_qs.filter(padding=3).update( public_key=Concat(F('public_key'), Value('===')))
def __init__(self, *args, **kwargs): super(AddFormId, self).__init__(*args, **kwargs) self.list = Themes.objects.all().order_by( Length('theme').asc(), 'theme') self.fields['theme1'].queryset = self.list self.fields['theme1'].required = False self.fields['theme2'].queryset = self.list self.fields['theme2'].required = False self.fields['theme3'].queryset = self.list self.fields['theme3'].required = False self.fields['theme4'].queryset = self.list self.fields['theme4'].required = False
def handle(self, *args, **options): projects = Project.objects.all() datasets = Dataset.objects.annotate( processed_len=Length('processed_data')).filter(status='Regular', processed_len__gt=1) print(datasets.count(), 'datasets have processed data') merge = [] for project in projects: project_path = os.path.join(data_path, 'Curation', str(project.ID)) associations = Association_Project_Dataset.objects.filter( project=project) curations = Curation.objects.filter( project=project, active=True).exclude(category='') curations_Yes = curations.filter(category='Yes') samples = set() for curation in curations_Yes: annotation = os.path.join(project_path, str(curation.curator.id), curation.dataset.ID + '.meta') if os.path.exists(annotation): annotation = pandas.read_csv(annotation, sep='\t', index_col=0) if 'Treatment' not in annotation.columns or 'Condition' not in annotation.columns: continue annotation = annotation[['Treatment', 'Condition']].dropna() samples.update(annotation.index) arr = pandas.Series( [ associations.count(), associations.filter(active=True).count(), len(set([c.dataset.ID for c in curations])), len(set([c.dataset.ID for c in curations_Yes])), len(samples) ], index=['Total', 'Keyword', 'Processed', 'Hit', 'Sample'], name=project.title, ) merge.append(arr) result = pandas.concat(merge, axis=1, join='inner').transpose() result.to_csv(options['output'][0] + '.csv')
def _get_annotation_kwargs_for_node(self) -> Dict: annotation_kwargs = super()._get_annotation_kwargs_for_node() if self.cohort: # We need to join to our cohort genotype before annotate, or the counts etc will be for the whole table if self.cohort.is_sub_cohort(): cgc = self.cohort.cohort_genotype_collection sample_substrings = [] for sample in self.cohort.get_samples(): i = cgc.get_sql_index_for_sample_id(sample.pk) sample_substrings.append(Substr(f"{cgc.cohortgenotype_alias}__samples_zygosity", i, length=1)) sub_cohort_zygosity = Concat(*sample_substrings) remove_hom = Replace(sub_cohort_zygosity, Value(Zygosity.HOM_ALT), Value('')) remove_het = Replace(sub_cohort_zygosity, Value(Zygosity.HET), Value('')) remove_ref = Replace(sub_cohort_zygosity, Value(Zygosity.HOM_REF), Value('')) hom_count = Length(sub_cohort_zygosity) - Length(remove_hom) het_count = Length(sub_cohort_zygosity) - Length(remove_het) ref_count = Length(sub_cohort_zygosity) - Length(remove_ref) annotation_kwargs[self.hom_count_column] = hom_count annotation_kwargs[self.het_count_column] = het_count annotation_kwargs[self.ref_count_column] = ref_count # Just add all annotations (only those used will be actually executed) hom_and_het = F(self.hom_count_column) + F(self.het_count_column) annotation_kwargs[self.any_germline_count_column] = hom_and_het annotation_kwargs[self.any_zygosity_count_column] = hom_and_het + F(self.ref_count_column) return annotation_kwargs
def get_context_data(self, *args, **kwargs): c = super().get_context_data(**kwargs) c['most_popular_companies'] = Company.objects.filter( verified=False).order_by('-query_count')[:10] c['no_of_companies'] = Company.objects.count() c['no_of_not_verified_companies'] = Company.objects.filter( verified=False).count() c['no_of_verified_companies'] = Company.objects.filter( verified=True).count() sq = 'select count(*) from report_report where product_id=product_product.id and resolved_at is NULL' c['products_with_most_open_reports'] = Product.objects.raw( 'select ' '*, ' '(' + sq + ') as no_of_open_reports ' 'from ' 'product_product ' 'order by no_of_open_reports desc limit 10') c['most_popular_590_products'] = Product.objects.filter( company__isnull=True, code__startswith='590').order_by('-query_count')[:10] c['no_of_590_products'] = Product.objects.filter( company__isnull=True, code__startswith='590').count() c['most_popular_not_590_products'] = (Product.objects.filter( company__isnull=True).exclude( code__startswith='590').order_by('-query_count')[:10]) c['no_of_not_590_products'] = (Product.objects.filter( company__isnull=True).exclude(code__startswith='590').count()) c['companies_by_name_length'] = (Company.objects.annotate( name_length=Length('common_name')).order_by('-name_length'))[:10] c['most_popular_products_without_name'] = Product.objects.filter( name__isnull=True).order_by('-query_count')[:10] c['companies_with_most_open_reports'] = Company.objects.annotate( no_of_open_reports=Count('companies__report')).order_by( 'no_of_open_reports')[:10] # Reports c['newest_reports'] = Report.objects.only_open().order_by( '-created_at')[:10] c['no_of_open_reports'] = Report.objects.only_open().count() c['no_of_resolved_reports'] = Report.objects.only_resolved().count() c['no_of_reports'] = Report.objects.count() return c
def search(request): context_dict = {} url_parameter = request.GET.get("q") if url_parameter: films = Film.objects.filter(title__icontains=url_parameter).order_by(Length('title'))[:5] reviewers = Reviewer.objects.filter(displayName__icontains=url_parameter).order_by(Length('displayName'))[:5] else: films = Film.objects.all().order_by(Length('title')) reviewers = Reviewer.objects.all().order_by(Length('displayName')) context_dict['films'] = films context_dict['reviewers'] = reviewers if request.is_ajax(): html = render_to_string(template_name="search-results-partial.html", context = {"films":films,"reviewers":reviewers}) data_dict = {"html_from_view": html} return JsonResponse(data=data_dict, safe=False) return render(request,"search.html",context=context_dict)
def get_object(self, searchString): # Only check if the length of the entered string is greater than or equal to 3 if len(searchString) < 3: return Code.objects.none() # Get set matching first word searchwords = searchString.lower().split(' ') queryset = Code.objects.filter(keyword_terms__icontains=searchwords[0]) # Filter down set to match remaining words if len(searchwords) > 1: for searchword in searchwords[1:]: queryset = queryset.filter(keyword_terms__icontains=searchword) # return top 15 codes. shortest codes appear first, then secondary sort by the code return queryset.order_by(Length('code').asc(), 'code')[:15]
def followup(request): """Shows students that didn't leave a lot of information to allow for quick follow-up emails.""" student = None form = FollowUpForm() low_info_students = Student.objects.annotate(text_len=Length('info')).filter(text_len__lt=80)\ .filter(current_mentor=None) if request.method == 'POST': form = FollowUpForm(request.POST) if form.is_valid(): student_id = form.cleaned_data['student'] student = Student.objects.get(pk=student_id) context = {"students": low_info_students, "student": student, 'form': form} return render(request, 'match/followup.html', context)
def view_previous_transcriptions(request: HttpRequest) -> HttpResponse: """Show the user their latest transcriptions so that they can edit them if needed.""" transcriptions = ( Transcription.objects.annotate(original_id_len=Length("original_id")) .filter( author=request.user, original_id_len__lt=14, submission__title__isnull=False ) .order_by("-create_time")[:25] ) context = get_additional_context( {"transcriptions": transcriptions, "fullwidth_view": True} ) return render(request, "app/view_transcriptions.html", context)
def main_data_leak(): """ Main function: - close_old_connections() - read in our list of keywords - check_keywords(keywords) """ close_old_connections() print(str(timezone.now()) + " - CRON TASK : Fetch searx & pastebin") # read in our list of keywords keywords = Keyword.objects.all().order_by(Length('name').desc()) check_keywords(keywords)
def desc_corta_resumida(self): consulta = self.annotate(longitud=Length('desc_corta')).annotate( desc_resumida=Case( When(longitud__gt=50, then=Concat(Left('desc_corta', 50), V('...'))), default=('desc_corta'), output_field=CharField(), )).filter(isbn__in=('1933988592', '1884777791', '1884777589', '193239415X', '1933988495')).values('isbn', 'desc_resumida', 'longitud') return consulta
def test_chaining_transforms(self): Company.objects.create(name=' Django Software Foundation ') Company.objects.create(name='Yahoo') with register_lookup(CharField, Trim), register_lookup(CharField, Length): for expr in [Length('name__trim'), F('name__trim__length')]: with self.subTest(expr=expr): self.assertCountEqual( Company.objects.annotate(length=expr).values('name', 'length'), [ {'name': ' Django Software Foundation ', 'length': 26}, {'name': 'Yahoo', 'length': 5}, ], )
def get_whois_servers_for_domain(domain): splitted = domain.split('.') variants = ['.'.join(splitted[i:]) for i in range(len(splitted)) ] # a.b.c.d.e -> ['a.b.c.d.e','b.c.d.e','c.d.e','d.e','e'] q = TopLevelDomain.objects.filter(name__in=variants).order_by( Length('name').asc()) assert q.count(), "Not valid domain name" sub = q.last() # the longest subdomain if sub.whois.count(): return sub.whois.all() elif sub.parent: return sub.parent.whois.all() return []
def test_if_output_field(self): Alphabet.objects.create(a=0, d="Aaa") Alphabet.objects.create(a=1, d="Bb") Alphabet.objects.create(a=2, d="Ccc") results = list( Alphabet.objects.annotate(d_length=If( "a", Length("d"), Value(0), output_field=IntegerField(), )).order_by("id").values_list("d_length", flat=True)) assert results == [0, 2, 3]
def migrate_language(apps, schema_editor): """Convert EventRequest.language string to key""" EventRequest = apps.get_model('workshops', 'EventRequest') Language = apps.get_model('workshops', 'Language') english = Language.objects.get(name='English') for request in EventRequest.objects.all(): # Get the most precisely matching languages language = Language.objects.filter(name__icontains=request.language)\ .order_by(Length('name')-len(request.language)).first() if not language: language = english request.language_new = language request.save()
def get_queryset(self, *args, **kwargs): qs = super().get_queryset(*args, **kwargs).annotate(username_length=Length('username')) return qs.prefetch_related( 'groups', 'thesis_supervisor', 'thesis_supervisor__authors', 'thesis_opponent', 'thesis_opponent__authors', 'review_user', 'review_user__thesis', 'thesis_author_author', 'thesis_author_author__author', ).order_by('username_length')
def get_research_terms_featured(self, obj): retval = [] terms = list( obj.research_terms.annotate( name_length=Length('term_name')).filter(name_length__gt=1)) if terms: terms_sorted = sorted(terms, key=lambda obj: obj.researchers.count(), reverse=True)[:10] retval = [t.term_name for t in terms_sorted] return retval
def list_verification(request): return render( request, 'admin/list-verification.html', { 'expenses': json.dumps([ expense.to_dict() for expense in Expense.objects.filter(verification__regex=r'E') .order_by(Length('verification').asc(), 'verification').all() ], default=json_serial), 'years': range(2017, datetime.now().year + 1) })
def smart_score_copier(apps, schema_editor): Contest = apps.get_model('contests', 'Contest') ProblemInstance = apps.get_model('contests', 'ProblemInstance') Submission = apps.get_model('contests', 'Submission') SubmissionReport = apps.get_model('contests', 'SubmissionReport') TestReport = apps.get_model('programs', 'TestReport') db_alias = schema_editor.connection.alias # Firstly, all max_scores will be set as equal to test_max_scores # provided that they are not None – this is the behaviour used # all contests except for the Algorithmic Engagements # and the ACM type contests. # This operates on raw, serialized data, which is a bit dirty but works. TestReport.objects.using(db_alias).filter(test_max_score__isnull=False) \ .update( max_score=Concat( V('int:'), Substr( Concat(V('0000000000000000000'), 'test_max_score'), Length(Concat(V('0000000000000000000'), 'test_max_score')) - 18, 19 ) ) ) # Secondly, all max_scores related to the Algorithmic Engagements # will be set to either 1 or 0, the same way they are defined # in pa_test_scorer from oioioi/pa/utils.py pa_test_reports = TestReport.objects.using(db_alias).filter( submission_report__submission__problem_instance__contest__controller_name ='oioioi.pa.controllers.PAContestController', test_max_score__isnull=False) pa_test_reports.update(max_score=IntegerScore(1)) pa_test_reports.filter(test_max_score=0).update(max_score=IntegerScore(0)) # In the end, all max_scores related to the ACM type contests will be left # as none, which agrees with their behaviour defined in the ACM contest # controller. acm_test_reports = TestReport.objects.using(db_alias).filter( submission_report__submission__problem_instance__contest__controller_name ='oioioi.acm.controllers.ACMContestController') acm_test_reports.update(max_score=None)
def migrate_history_lines(apps, schema_editor): HistoryLines = apps.get_registered_model('main', 'HistoryLines') qs = HistoryLines.objects.annotate(line_len=Length('line')) qs = qs.filter(line_len__gt=2048) lines = [] for hist_line in qs: history = hist_line.history value = hist_line.line number = hist_line.line_number hist_line.delete() lines += [ l for l in __bulking_lines(history, value, number, HistoryLines) ] HistoryLines.objects.bulk_create(lines)