def get_objects_for_user(user, perms, klass=None, use_groups=True, any_perm=False, with_superuser=True, accept_global_perms=True): """ Returns queryset of objects for which a given ``user`` has *all* permissions present at ``perms``. :param user: ``User`` or ``AnonymousUser`` instance for which objects would be returned. :param perms: single permission string, or sequence of permission strings which should be checked. If ``klass`` parameter is not given, those should be full permission names rather than only codenames (i.e. ``auth.change_user``). If more than one permission is present within sequence, their content type **must** be the same or ``MixedContentTypeError`` exception would be raised. :param klass: may be a Model, Manager or QuerySet object. If not given this parameter would be computed based on given ``params``. :param use_groups: if ``False``, wouldn't check user's groups object permissions. Default is ``True``. :param any_perm: if True, any of permission in sequence is accepted. Default is ``False``. :param with_superuser: if ``True`` and if ``user.is_superuser`` is set, returns the entire queryset. Otherwise will only return objects the user has explicit permissions. This must be ``True`` for the accept_global_perms parameter to have any affect. Default is ``True``. :param accept_global_perms: if ``True`` takes global permissions into account. Object based permissions are taken into account if more than one permission is handed in in perms and at least one of these perms is not globally set. If any_perm is set to false then the intersection of matching object is returned. Note, that if with_superuser is False, accept_global_perms will be ignored, which means that only object permissions will be checked! Default is ``True``. :raises MixedContentTypeError: when computed content type for ``perms`` and/or ``klass`` clashes. :raises WrongAppError: if cannot compute app label for given ``perms``/ ``klass``. Example:: >>> from django.contrib.auth.models import User >>> from guardian.shortcuts import get_objects_for_user >>> joe = User.objects.get(username='******') >>> get_objects_for_user(joe, 'auth.change_group') [] >>> from guardian.shortcuts import assign_perm >>> group = Group.objects.create('some group') >>> assign_perm('auth.change_group', joe, group) >>> get_objects_for_user(joe, 'auth.change_group') [<Group some group>] The permission string can also be an iterable. Continuing with the previous example: >>> get_objects_for_user(joe, ['auth.change_group', 'auth.delete_group']) [] >>> get_objects_for_user(joe, ['auth.change_group', 'auth.delete_group'], any_perm=True) [<Group some group>] >>> assign_perm('auth.delete_group', joe, group) >>> get_objects_for_user(joe, ['auth.change_group', 'auth.delete_group']) [<Group some group>] Take global permissions into account: >>> jack = User.objects.get(username='******') >>> assign_perm('auth.change_group', jack) # this will set a global permission >>> get_objects_for_user(jack, 'auth.change_group') [<Group some group>] >>> group2 = Group.objects.create('other group') >>> assign_perm('auth.delete_group', jack, group2) >>> get_objects_for_user(jack, ['auth.change_group', 'auth.delete_group']) # this retrieves intersection [<Group other group>] >>> get_objects_for_user(jack, ['auth.change_group', 'auth.delete_group'], any_perm) # this retrieves union [<Group some group>, <Group other group>] If accept_global_perms is set to ``True``, then all assigned global permissions will also be taken into account. - Scenario 1: a user has view permissions generally defined on the model 'books' but no object based permission on a single book instance: - If accept_global_perms is ``True``: List of all books will be returned. - If accept_global_perms is ``False``: list will be empty. - Scenario 2: a user has view permissions generally defined on the model 'books' and also has an object based permission to view book 'Whatever': - If accept_global_perms is ``True``: List of all books will be returned. - If accept_global_perms is ``False``: list will only contain book 'Whatever'. - Scenario 3: a user only has object based permission on book 'Whatever': - If accept_global_perms is ``True``: List will only contain book 'Whatever'. - If accept_global_perms is ``False``: List will only contain book 'Whatever'. - Scenario 4: a user does not have any permission: - If accept_global_perms is ``True``: Empty list. - If accept_global_perms is ``False``: Empty list. """ if isinstance(perms, str): perms = [perms] ctype = None app_label = None codenames = set() # Compute codenames set and ctype if possible for perm in perms: if '.' in perm: new_app_label, codename = perm.split('.', 1) if app_label is not None and app_label != new_app_label: raise MixedContentTypeError("Given perms must have same app " "label (%s != %s)" % (app_label, new_app_label)) else: app_label = new_app_label else: codename = perm codenames.add(codename) if app_label is not None: new_ctype = ContentType.objects.get(app_label=app_label, permission__codename=codename) if ctype is not None and ctype != new_ctype: raise MixedContentTypeError("ContentType was once computed " "to be %s and another one %s" % (ctype, new_ctype)) else: ctype = new_ctype # Compute queryset and ctype if still missing if ctype is None and klass is not None: queryset = _get_queryset(klass) ctype = get_content_type(queryset.model) elif ctype is not None and klass is None: queryset = _get_queryset(ctype.model_class()) elif klass is None: raise WrongAppError("Cannot determine content type") else: queryset = _get_queryset(klass) if ctype.model_class() != queryset.model: raise MixedContentTypeError("Content type for given perms and " "klass differs") # At this point, we should have both ctype and queryset and they should # match which means: ctype.model_class() == queryset.model # we should also have ``codenames`` list # First check if user is superuser and if so, return queryset immediately if with_superuser and user.is_superuser: return queryset # Check if the user is anonymous. The # django.contrib.auth.models.AnonymousUser object doesn't work for queries # and it's nice to be able to pass in request.user blindly. if user.is_anonymous: user = get_anonymous_user() global_perms = set() has_global_perms = False # a superuser has by default assigned global perms for any if accept_global_perms and with_superuser: for code in codenames: if user.has_perm(ctype.app_label + '.' + code): global_perms.add(code) for code in global_perms: codenames.remove(code) # prerequisite: there must be elements in global_perms otherwise just follow the procedure for # object based permissions only AND # 1. codenames is empty, which means that permissions are ONLY set globally, therefore return the full queryset. # OR # 2. any_perm is True, then the global permission beats the object based permission anyway, # therefore return full queryset if len(global_perms) > 0 and (len(codenames) == 0 or any_perm): return queryset # if we have global perms and still some object based perms differing from global perms and any_perm is set # to false, then we have to flag that global perms exist in order to merge object based permissions by user # and by group correctly. Scenario: global perm change_xx and object based perm delete_xx on object A for user, # and object based permission delete_xx on object B for group, to which user is assigned. # get_objects_for_user(user, [change_xx, delete_xx], use_groups=True, any_perm=False, accept_global_perms=True) # must retrieve object A and B. elif len(global_perms) > 0 and (len(codenames) > 0): has_global_perms = True # Now we should extract list of pk values for which we would filter # queryset user_model = get_user_obj_perms_model(queryset.model) user_obj_perms_queryset = (user_model.objects.filter(user=user).filter( permission__content_type=ctype)) if len(codenames): user_obj_perms_queryset = user_obj_perms_queryset.filter( permission__codename__in=codenames) direct_fields = ['content_object__pk', 'permission__codename'] generic_fields = ['object_pk', 'permission__codename'] if user_model.objects.is_generic(): user_fields = generic_fields else: user_fields = direct_fields if use_groups: group_model = get_group_obj_perms_model(queryset.model) group_filters = { 'permission__content_type': ctype, 'group__%s' % get_user_model().groups.field.related_query_name(): user, } if len(codenames): group_filters.update({ 'permission__codename__in': codenames, }) groups_obj_perms_queryset = group_model.objects.filter(**group_filters) if group_model.objects.is_generic(): group_fields = generic_fields else: group_fields = direct_fields if not any_perm and len(codenames) > 1 and not has_global_perms: user_obj_perms = user_obj_perms_queryset.values_list(*user_fields) groups_obj_perms = groups_obj_perms_queryset.values_list( *group_fields) data = list(user_obj_perms) + list(groups_obj_perms) # sorting/grouping by pk (first in result tuple) keyfunc = lambda t: t[0] data = sorted(data, key=keyfunc) pk_list = [] for pk, group in groupby(data, keyfunc): obj_codenames = {e[1] for e in group} if codenames.issubset(obj_codenames): pk_list.append(pk) objects = queryset.filter(pk__in=pk_list) return objects if not any_perm and len(codenames) > 1: counts = user_obj_perms_queryset.values( user_fields[0]).annotate(object_pk_count=Count(user_fields[0])) user_obj_perms_queryset = counts.filter( object_pk_count__gte=len(codenames)) is_cast_integer = _is_cast_integer_pk(queryset) field_pk = user_fields[0] values = user_obj_perms_queryset if is_cast_integer: values = values.annotate(obj_pk=Cast(field_pk, BigIntegerField())) field_pk = 'obj_pk' values = values.values_list(field_pk, flat=True) q = Q(pk__in=values) if use_groups: field_pk = group_fields[0] values = groups_obj_perms_queryset if is_cast_integer: values = values.annotate(obj_pk=Cast(field_pk, BigIntegerField())) field_pk = 'obj_pk' values = values.values_list(field_pk, flat=True) q |= Q(pk__in=values) return queryset.filter(q)
def test_cast_from_field(self): numbers = Author.objects.annotate(cast_string=Cast( 'age', models.CharField(max_length=255)), ) self.assertEqual(numbers.get().cast_string, '1')
def test_cast_to_char_field_with_max_length(self): names = Author.objects.annotate( cast_string=Cast('name', models.CharField(max_length=1))) self.assertEqual(names.get().cast_string, 'B')
def delete_notifications_for_participants(participants, type): Notification.objects.order_by().not_expired() \ .filter(type=type) \ .annotate(participant_id=Cast(KeyTextTransform('activity_participant', 'context'), IntegerField())) \ .filter(participant_id__in=participants.values_list('id', flat=True)) \ .delete()
def get_field(self): return (Cast('debateteam__teamscore__votes_given', FloatField()) / NullIf('debateteam__teamscore__votes_possible', 0, output_field=FloatField()) * self.adjs_per_debate)
def revert_backup_hook(hook_cls): hook_cls.objects.update(object_id=Cast(F("object_id2"), output_field=PositiveIntegerField())) hook_cls.objects.update(object_id2=None)
def delete_notifications_for_collectors(collectors, type): Notification.objects.order_by().not_expired() \ .filter(type=type) \ .annotate(collector_id=Cast(KeyTextTransform('pickup_collector', 'context'), IntegerField())) \ .filter(collector_id__in=collectors.values_list('id', flat=True)) \ .delete()
def average_page_view(self, **kwargs): return self.filter(**kwargs).values('session_key').aggregate( views=Cast(Count('pk'), FloatField()) / Cast(Count('session_key', distinct=True), FloatField()))
def percent_authenticated(self, **kwargs): return self.filter(**kwargs) \ .annotate(users=Cast(Count('pk', distinct=True), FloatField()), authenticated=Cast(Count('pk', filter=Q(is_authenticated=True)), FloatField())) \ .aggregate(percent=Sum('authenticated') / Sum('users'))
def get_products_data( queryset: "QuerySet", export_fields: Set[str], attribute_ids: Optional[List[int]], warehouse_ids: Optional[List[int]], channel_ids: Optional[List[int]], ) -> List[Dict[str, Union[str, bool]]]: """Create data list of products and their variants with fields values. It return list with product and variant data which can be used as import to csv writer and list of attribute and warehouse headers. """ products_with_variants_data = [] export_variant_id = "variants__id" in export_fields product_fields = set( ProductExportFields.HEADERS_TO_FIELDS_MAPPING["fields"].values() ) product_export_fields = export_fields & product_fields if not export_variant_id: product_export_fields.add("variants__id") products_data = ( queryset.annotate( product_weight=Case( When(weight__isnull=False, then=Concat("weight", V(" g"))), default=V(""), output_field=CharField(), ), variant_weight=Case( When( variants__weight__isnull=False, then=Concat("variants__weight", V(" g")), ), default=V(""), output_field=CharField(), ), description_as_str=Cast("description", CharField()), ) .order_by("pk", "variants__pk") .values(*product_export_fields) .distinct("pk", "variants__pk") ) products_relations_data = get_products_relations_data( queryset, export_fields, attribute_ids, channel_ids ) variants_relations_data = get_variants_relations_data( queryset, export_fields, attribute_ids, warehouse_ids, channel_ids ) for product_data in products_data: pk = product_data["id"] if export_variant_id: variant_pk = product_data.get("variants__id") else: variant_pk = product_data.pop("variants__id") product_relations_data: Dict[str, str] = products_relations_data.get(pk, {}) variant_relations_data: Dict[str, str] = variants_relations_data.get( variant_pk, {} ) product_data["id"] = graphene.Node.to_global_id("Product", pk) if export_variant_id: product_data["variants__id"] = graphene.Node.to_global_id( "ProductVariant", variant_pk ) data = {**product_data, **product_relations_data, **variant_relations_data} products_with_variants_data.append(data) return products_with_variants_data
def download_sass_summary_data_task(filename, filters, path_file): from bims.utils.celery import memcache_lock import random lock_id = '{0}-lock-{1}'.format(filename, len(filters)) oid = random.randint(1, 101) with memcache_lock(lock_id, oid) as acquired: if acquired: search = CollectionSearch(filters) context = {'filters': filters} collection_records = search.process_search() collection_ids = list( collection_records.values_list('id', flat=True)) # Get SASS data site_visit_taxa = SiteVisitTaxon.objects.filter( id__in=collection_ids) summary = site_visit_taxa.annotate( date=F('collection_date'), ).values('date').annotate( sampling_date=F('site_visit__site_visit_date'), full_name=Concat('site_visit__owner__first_name', Value(' '), 'site_visit__owner__last_name', output_field=CharField()) ).values('sampling_date', 'full_name').annotate( count=Count('sass_taxon'), sass_score=Sum( Case(When(condition=Q( site_visit__sass_version=5, sass_taxon__sass_5_score__isnull=False), then='sass_taxon__sass_5_score'), default='sass_taxon__score')), sass_id=F('site_visit__id'), FBIS_site_code=Case( When( site_visit__location_site__site_code__isnull=False, then='site_visit__location_site__site_code'), default='site_visit__location_site__name'), site_id=F('site_visit__location_site__id'), sass_version=F('site_visit__sass_version'), site_description=F( 'site_visit__location_site__site_description'), river_name=Case(When( site_visit__location_site__river__isnull=False, then='site_visit__location_site__river__name'), default=Value('-')), latitude=F('site_visit__location_site__latitude'), longitude=F('site_visit__location_site__longitude'), source_reference=F('source_reference'), ecological_category=F( 'site_visit__' 'sitevisitecologicalcondition__' 'ecological_condition__category')).annotate( aspt=Cast(F('sass_score'), FloatField()) / Cast(F('count'), FloatField()), ).order_by('sampling_date') context['location_contexts'] = LocationContext.objects.filter( site__in=site_visit_taxa.values('site_visit__location_site')) serializer = SassSummaryDataSerializer(summary, many=True, context=context) headers = serializer.data[0].keys() rows = serializer.data formatted_headers = [] # Rename headers for header in headers: header_split = [ word[0].upper() + word[1:] for word in header.split('_') ] header = ' '.join(header_split) formatted_headers.append(header) with open(path_file, 'wb') as csv_file: writer = csv.DictWriter(csv_file, fieldnames=formatted_headers) writer.writeheader() writer.fieldnames = headers for row in rows: try: writer.writerow(row) except ValueError: writer.fieldnames = row.keys() writer.writerow(row) return logger.info('Csv %s is already being processed by another worker', filename)
def get_order_by(self): """ Return a list of 2-tuples of form (expr, (sql, params, is_ref)) for the ORDER BY clause. The order_by clause can alter the select clause (for example it can add aliases to clauses that do not yet have one, or it can add totally new select clauses). """ if self.query.extra_order_by: ordering = self.query.extra_order_by elif not self.query.default_ordering: ordering = self.query.order_by elif self.query.order_by: ordering = self.query.order_by elif self.query.get_meta().ordering: ordering = self.query.get_meta().ordering self._meta_ordering = ordering else: ordering = [] if self.query.standard_ordering: asc, desc = ORDER_DIR['ASC'] else: asc, desc = ORDER_DIR['DESC'] order_by = [] for field in ordering: if hasattr(field, 'resolve_expression'): if isinstance(field, Value): # output_field must be resolved for constants. field = Cast(field, field.output_field) if not isinstance(field, OrderBy): field = field.asc() if not self.query.standard_ordering: field = field.copy() field.reverse_ordering() order_by.append((field, True)) else: order_by.append((field, False)) continue if field == '?': # random order_by.append((OrderBy(Random()), False)) continue col, order = get_order_dir(field, asc) descending = order == 'DESC' if col in self.query.annotation_select: # Reference to expression in SELECT clause order_by.append( (OrderBy(Ref(col, self.query.annotation_select[col]), descending=descending), True)) continue if col in self.query.annotations: # References to an expression which is masked out of the SELECT # clause. if self.query.combinator and self.select: # Don't use the resolved annotation because other # combined queries might define it differently. expr = F(col) else: expr = self.query.annotations[col] if isinstance(expr, Value): # output_field must be resolved for constants. expr = Cast(expr, expr.output_field) order_by.append((OrderBy(expr, descending=descending), False)) continue if '.' in field: # This came in through an extra(order_by=...) addition. Pass it # on verbatim. table, col = col.split('.', 1) order_by.append((OrderBy(RawSQL( '%s.%s' % (self.quote_name_unless_alias(table), col), []), descending=descending), False)) continue if not self.query.extra or col not in self.query.extra: if self.query.combinator and self.select: # Don't use the first model's field because other # combined queries might define it differently. order_by.append((OrderBy(F(col), descending=descending), False)) else: # 'col' is of the form 'field' or 'field1__field2' or # '-field1__field2__field', etc. order_by.extend( self.find_ordering_name(field, self.query.get_meta(), default_order=asc)) else: if col not in self.query.extra_select: order_by.append((OrderBy(RawSQL(*self.query.extra[col]), descending=descending), False)) else: order_by.append( (OrderBy(Ref(col, RawSQL(*self.query.extra[col])), descending=descending), True)) result = [] seen = set() for expr, is_ref in order_by: resolved = expr.resolve_expression(self.query, allow_joins=True, reuse=None) if self.query.combinator and self.select: src = resolved.get_source_expressions()[0] expr_src = expr.get_source_expressions()[0] # Relabel order by columns to raw numbers if this is a combined # query; necessary since the columns can't be referenced by the # fully qualified name and the simple column names may collide. for idx, (sel_expr, _, col_alias) in enumerate(self.select): if is_ref and col_alias == src.refs: src = src.source elif col_alias and not (isinstance(expr_src, F) and col_alias == expr_src.name): continue if src == sel_expr: resolved.set_source_expressions( [RawSQL('%d' % (idx + 1), ())]) break else: if col_alias: raise DatabaseError( 'ORDER BY term does not match any column in the result set.' ) # Add column used in ORDER BY clause without an alias to # the selected columns. order_by_idx = len(self.query.select) + 1 col_name = f'__orderbycol{order_by_idx}' for q in self.query.combined_queries: q.add_annotation(expr_src, col_name) self.query.add_select_col(src, col_name) resolved.set_source_expressions( [RawSQL(f'{order_by_idx}', ())]) sql, params = self.compile(resolved) # Don't add the same column twice, but the order direction is # not taken into account so we strip it. When this entire method # is refactored into expressions, then we can check each part as we # generate it. without_ordering = self.ordering_parts.search(sql)[1] params_hash = make_hashable(params) if (without_ordering, params_hash) in seen: continue seen.add((without_ordering, params_hash)) result.append((resolved, (sql, params, is_ref))) return result
from ui.models import * from django.db.models.functions import Cast from django.contrib.gis.db.models import GeometryField import json canton = Canton.objects.get(name='Genève') output = [] city_count = canton.city_set.count() for idx, city in enumerate(canton.city_set.all()): c_dict = {} c_dict['tree_density'] = city.tree_density c_dict['tree_sparsity'] = city.tree_sparsity c_dict['name'] = city.name c_dict['geometry'] = city.mpoly.json tiles = Tile.objects.annotate(geom=Cast('mpoly', GeometryField())).filter( geom__within=city.mpoly) c_dict['data'] = [] for tile in tiles: centroid = tile.mpoly.centroid c_dict['data'].append({ 'x': centroid.x, 'y': centroid.y, 'value': tile.tree_density }) output.append(c_dict) print('Parsed city {}/{}'.format(idx + 1, city_count)) with open('geneva-cities.json', 'w') as f: json.dump(output, f)
def get_objects_for_group(group, perms, klass=None, any_perm=False, accept_global_perms=True): """ Returns queryset of objects for which a given ``group`` has *all* permissions present at ``perms``. :param group: ``Group`` instance for which objects would be returned. :param perms: single permission string, or sequence of permission strings which should be checked. If ``klass`` parameter is not given, those should be full permission names rather than only codenames (i.e. ``auth.change_user``). If more than one permission is present within sequence, their content type **must** be the same or ``MixedContentTypeError`` exception would be raised. :param klass: may be a Model, Manager or QuerySet object. If not given this parameter would be computed based on given ``params``. :param any_perm: if True, any of permission in sequence is accepted :param accept_global_perms: if ``True`` takes global permissions into account. If any_perm is set to false then the intersection of matching objects based on global and object based permissions is returned. Default is ``True``. :raises MixedContentTypeError: when computed content type for ``perms`` and/or ``klass`` clashes. :raises WrongAppError: if cannot compute app label for given ``perms``/ ``klass``. Example: Let's assume we have a ``Task`` model belonging to the ``tasker`` app with the default add_task, change_task and delete_task permissions provided by Django:: >>> from guardian.shortcuts import get_objects_for_group >>> from tasker import Task >>> group = Group.objects.create('some group') >>> task = Task.objects.create('some task') >>> get_objects_for_group(group, 'tasker.add_task') [] >>> from guardian.shortcuts import assign_perm >>> assign_perm('tasker.add_task', group, task) >>> get_objects_for_group(group, 'tasker.add_task') [<Task some task>] The permission string can also be an iterable. Continuing with the previous example: >>> get_objects_for_group(group, ['tasker.add_task', 'tasker.delete_task']) [] >>> assign_perm('tasker.delete_task', group, task) >>> get_objects_for_group(group, ['tasker.add_task', 'tasker.delete_task']) [<Task some task>] Global permissions assigned to the group are also taken into account. Continuing with previous example: >>> task_other = Task.objects.create('other task') >>> assign_perm('tasker.change_task', group) >>> get_objects_for_group(group, ['tasker.change_task']) [<Task some task>, <Task other task>] >>> get_objects_for_group(group, ['tasker.change_task'], accept_global_perms=False) [<Task some task>] """ if isinstance(perms, str): perms = [perms] ctype = None app_label = None codenames = set() # Compute codenames set and ctype if possible for perm in perms: if '.' in perm: new_app_label, codename = perm.split('.', 1) if app_label is not None and app_label != new_app_label: raise MixedContentTypeError("Given perms must have same app " "label (%s != %s)" % (app_label, new_app_label)) else: app_label = new_app_label else: codename = perm codenames.add(codename) if app_label is not None: new_ctype = ContentType.objects.get(app_label=app_label, permission__codename=codename) if ctype is not None and ctype != new_ctype: raise MixedContentTypeError("ContentType was once computed " "to be %s and another one %s" % (ctype, new_ctype)) else: ctype = new_ctype # Compute queryset and ctype if still missing if ctype is None and klass is not None: queryset = _get_queryset(klass) ctype = get_content_type(queryset.model) elif ctype is not None and klass is None: queryset = _get_queryset(ctype.model_class()) elif klass is None: raise WrongAppError("Cannot determine content type") else: queryset = _get_queryset(klass) if ctype.model_class() != queryset.model: raise MixedContentTypeError("Content type for given perms and " "klass differs") # At this point, we should have both ctype and queryset and they should # match which means: ctype.model_class() == queryset.model # we should also have ``codenames`` list global_perms = set() if accept_global_perms: global_perm_set = group.permissions.values_list('codename', flat=True) for code in codenames: if code in global_perm_set: global_perms.add(code) for code in global_perms: codenames.remove(code) if len(global_perms) > 0 and (len(codenames) == 0 or any_perm): return queryset # Now we should extract list of pk values for which we would filter # queryset group_model = get_group_obj_perms_model(queryset.model) groups_obj_perms_queryset = (group_model.objects.filter( group=group).filter(permission__content_type=ctype)) if len(codenames): groups_obj_perms_queryset = groups_obj_perms_queryset.filter( permission__codename__in=codenames) if group_model.objects.is_generic(): fields = ['object_pk', 'permission__codename'] else: fields = ['content_object__pk', 'permission__codename'] if not any_perm and len(codenames): groups_obj_perms = groups_obj_perms_queryset.values_list(*fields) data = list(groups_obj_perms) keyfunc = lambda t: t[ 0] # sorting/grouping by pk (first in result tuple) data = sorted(data, key=keyfunc) pk_list = [] for pk, group in groupby(data, keyfunc): obj_codenames = {e[1] for e in group} if any_perm or codenames.issubset(obj_codenames): pk_list.append(pk) objects = queryset.filter(pk__in=pk_list) return objects is_cast_integer = _is_cast_integer_pk(queryset) field_pk = fields[0] values = groups_obj_perms_queryset if is_cast_integer: values = values.annotate(obj_pk=Cast(field_pk, BigIntegerField())) field_pk = 'obj_pk' values = values.values_list(field_pk, flat=True) return queryset.filter(pk__in=values)
def space_used(self): """Return summed up size of all blobs in this volume.""" return self.blob_set.all().annotate(size_value=Cast( KeyTextTransform(ATTR_BLOB_SIZE_BYTES, 'attributes'), BigIntegerField())).aggregate(sum=Sum('size_value')).get('sum', 0)
def index(request): """Analytics homepage.""" top_user = ( Analytics.objects.filter(access_date__gte=(timezone.now() - timedelta(days=7))) .values("user___first_name") .annotate( count=Count("user"), average=Avg(Cast("load_time", PositiveIntegerField())) ) .order_by("-count") )[:10] top_pages = ( Analytics.objects.filter(access_date__gte=(timezone.now() - timedelta(days=7))) .values("pathname") .annotate( count=Count("user"), average=Avg(Cast("load_time", PositiveIntegerField())) ) .order_by("-count") )[:10] access = ( Analytics.objects.order_by("access_date") .filter( access_date__isnull=False, access_date__lte=(timezone.now().replace(day=1) - timedelta(days=1)), ) .annotate(month=Trunc("access_date", "month")) .values("month") .annotate(count=Count("analytics_id")) .order_by("month") ) search = ( Analytics.objects.order_by("access_date") .filter( access_date__isnull=False, access_date__lte=timezone.now().replace(day=1) - timedelta(days=1), pathname="/search", ) .annotate(month=Trunc("access_date", "month")) .values("month") .annotate(count=Count("analytics_id")) .order_by("month") ) report = ( Analytics.objects.order_by("access_date") .filter( access_date__isnull=False, access_date__lte=timezone.now().replace(day=1) - timedelta(days=1), pathname="/reports", ) .annotate(month=Trunc("access_date", "month")) .values("month") .annotate(count=Count("analytics_id")) .order_by("month") ) term = ( Analytics.objects.order_by("access_date") .filter( access_date__isnull=False, access_date__lte=timezone.now().replace(day=1) - timedelta(days=1), pathname="/terms", ) .annotate(month=Trunc("access_date", "month")) .values("month") .annotate(count=Count("analytics_id")) .order_by("month") ) collection = ( Analytics.objects.order_by("access_date") .filter( access_date__isnull=False, access_date__lte=timezone.now().replace(day=1) - timedelta(days=1), pathname="/collections", ) .annotate(month=Trunc("access_date", "month")) .values("month") .annotate(count=Count("analytics_id")) .order_by("month") ) context = { "top_users": top_user, "top_pages": top_pages, "access": access, "search": search, "report": report, "term": term, "collection": collection, "title": "Analytics", } return render( request, "analytics/index.html.dj", context, )
def filter_by_gift_card(qs, value, gift_card_type): gift_card_events = GiftCardEvent.objects.filter( type=gift_card_type).values( order_id=Cast("parameters__order_id", IntegerField())) lookup = Exists(gift_card_events.filter(order_id=OuterRef("id"))) return qs.filter(lookup) if value is True else qs.exclude(lookup)
def get_queryset(self): config = PartnershipConfiguration.get_configuration() academic_year = config.get_current_academic_year_for_api() self.academic_year = academic_year academic_year_repr = Concat( Cast(F('academic_year__year'), models.CharField()), Value('-'), Right( Cast(F('academic_year__year') + 1, output_field=models.CharField()), 2), ) return (PartnershipPartnerRelation.objects.filter_for_api( academic_year ).annotate_partner_address( 'country__continent__name', 'country__iso_code', 'country__name', 'country_id', 'city', 'location', ).select_related( 'entity__partnerentity', 'entity__organization', ).prefetch_related( Prefetch( 'partnership', queryset=Partnership.objects.add_acronyms().select_related( 'subtype', 'supervisor', ).prefetch_related( 'contacts', 'missions', Prefetch( 'partner_entities', queryset=EntityProxy.objects.with_partner_info(), ))), Prefetch( 'partnership__medias', queryset=Media.objects.select_related('type').filter( is_visible_in_portal=True), ), Prefetch( 'entity__organization__partner', queryset=(Partner.objects.annotate_address( 'country__iso_code', 'country__name', 'city', ).annotate_website( ).select_related('organization').prefetch_related( Prefetch( 'medias', queryset=Media.objects.filter( is_visible_in_portal=True).select_related('type')), ).annotate_partnerships_count()), to_attr='partner_prefetched', ), Prefetch('partnership__ucl_entity', queryset=EntityProxy.objects.select_related( 'uclmanagement_entity__academic_responsible', 'uclmanagement_entity__administrative_responsible', 'uclmanagement_entity__contact_out_person', 'uclmanagement_entity__contact_in_person', ).with_title().with_acronym()), Prefetch( 'partnership__years', queryset=(PartnershipYear.objects.select_related( 'academic_year', 'funding_source', 'funding_program', 'funding_type', ).prefetch_related( Prefetch('entities', queryset=EntityProxy.objects.with_title( ).with_acronym()), 'education_fields', 'education_levels', 'offers', ).filter(academic_year=academic_year)), to_attr='current_year_for_api', ), Prefetch( 'partnership__agreements', queryset=(PartnershipAgreement.objects.select_related( 'media', 'end_academic_year').filter( status=AgreementStatus.VALIDATED.name).filter( start_academic_year__year__lte=academic_year.year, end_academic_year__year__gte=academic_year.year, )), to_attr='valid_current_agreements', ), ).annotate( validity_end_year=Subquery( AcademicYear.objects.filter( partnership_agreements_end__partnership=OuterRef( 'partnership_id'), partnership_agreements_end__status=AgreementStatus. VALIDATED.name).order_by('-end_date').values('year')[:1]), agreement_start=Subquery( PartnershipAgreement.objects.filter( partnership=OuterRef('partnership_id'), start_date__lte=Now(), end_date__gte=Now(), ).order_by('-end_date').values('start_date')[:1]), start_year=Subquery( PartnershipYear.objects.filter( partnership=OuterRef('partnership_id'), ).annotate( name=academic_year_repr).order_by( 'academic_year').values('name')[:1]), end_year=Subquery( PartnershipYear.objects.filter( partnership=OuterRef('partnership_id'), ).annotate( name=academic_year_repr).order_by( '-academic_year').values('name')[:1]), agreement_end=Subquery( PartnershipAgreement.objects.filter( partnership=OuterRef('partnership_id'), start_date__lte=Now(), end_date__gte=Now(), ).order_by('-end_date').values('end_date')[:1]), ).annotate( validity_years=Concat(Value(academic_year.year), Value('-'), F('validity_end_year') + 1, output_field=models.CharField()), agreement_status=Subquery( PartnershipAgreement.objects.filter( partnership=OuterRef('partnership_id'), start_academic_year__year__lte=academic_year.year, end_academic_year__year__gte=academic_year.year, ).order_by('-end_academic_year__year').values('status')[:1]), funding_name=Subquery( Financing.objects.filter( academic_year=academic_year, countries=OuterRef('country_id'), ).values('type__name')[:1]), funding_url=Subquery( Financing.objects.filter( academic_year=academic_year, countries=OuterRef('country_id'), ).values('type__url')[:1]), ).distinct('pk').order_by('pk'))
def device(request): daily_count = list() monthly_count = list() daily_datetime = list() monthly_datetime = list() monthly_sum = 0 weekly_sum = 0 daily_sum = 0 today = timezone.localdate() year = Q(date_first_registered__year=today.year) month = Q(date_first_registered__month=today.month) day = Q(date_first_registered__day=today.day) today_count = Device.objects.filter(year & month & day).count() total = Device.objects.count() for dm in range(12): m = today - relativedelta(months=dm) monthly_datetime.append(m) for dd in range(30): d = today - timedelta(days=dd) daily_datetime.append(d) daily_datetime.reverse() monthly_datetime.reverse() num = 0 thirty_day = today - timedelta(days=30) ninety_day = today - timedelta(days=90) thirty_total = Device.objects.filter(date_offline__lte=thirty_day).filter( date_offline__gt=ninety_day).count() ninety_total = Device.objects.filter(date_offline__lte=ninety_day).count() thirty_days = (thirty_total / total) * 100 ninety_days = (ninety_total / total) * 100 for d in daily_datetime: year_q = Q(date_first_registered__year=d.year) month_q = Q(date_first_registered__month=d.month) day_q = Q(date_first_registered__day=d.day) daily_count.append( Device.objects.filter(year_q & month_q & day_q).count()) daily_sum += Device.objects.filter(year_q & month_q & day_q).count() if num < 7: weekly_sum += Device.objects.filter(year_q & month_q & day_q).count() num += 1 weekly_average = weekly_sum / 7 daily_average = daily_sum / 30 for dt in monthly_datetime: year_q = Q(date_first_registered__year=dt.year) month_q = Q(date_first_registered__month=dt.month) monthly_count.append(Device.objects.filter(year_q & month_q).count()) monthly_sum += Device.objects.filter(year_q & month_q).count() monthly_average = monthly_sum / 365 windows = Device.objects.filter( Q(os_version__startswith='Microsoft') | Q(os_version__startswith='Win32NT')) mac = Device.objects.filter(os_version__startswith='macOS') linux = Device.objects.filter( Q(os_version__startswith='Red Hat') | Q(os_version__startswith='Amazon') | Q(os_version__startswith='CentOS') | Q(os_version__startswith='Ubuntu')) update = Device.objects.filter(agent_version='2.1.1550').count() unupdate = total - update online = (Device.objects.filter(state='Online').count() / total) * 100 address = MACAddress.objects.values_list('mac_address', flat=True) \ .annotate(count=Count('mac_address')) \ .filter(count__gt=1) f = Q() for i in range(address.count()): f |= Q(macaddress__mac_address=address[i]) invalid = (Device.objects.filter(f).annotate(count=Count('id')) | Device.objects.filter(date_offline__lte=ninety_day)).count() non_default = Device.objects.exclude(policy__policy_name='Default').count() over_ninety_days = Device.objects.filter(date_offline__lte=ninety_day)[:10] mac_duplicate = Device.objects.filter(f).annotate(count=Count('id'))[:10] os_count = Tenant.objects.annotate(windows_count=Count('device', filter=Q(device__os_version__startswith='Microsoft') | Q(device__os_version__startswith='Win32NT')))\ .annotate(mac_count=Count('device', filter=Q(device__os_version__startswith='macOS')))\ .annotate(linux_count=Count('device', filter=Q(device__os_version__startswith='Red Hat') | Q(device__os_version__startswith='Amazon') | Q(device__os_version__startswith='CentOS') | Q(device__os_version__startswith='Ubuntu')))[:10] policy_per_tenant = Policy.objects.annotate(count=Count('tenant'))[:10] unupdate_count = Tenant.objects.annotate(unupdate_count=Count( 'device', filter=~Q(device__agent_version='2.1.1550'))) fourty_count = Tenant.objects.annotate(fourty_count=Count( 'device', filter=Q(device__agent_version='2.0.1540'))) thirtyfour_count = Tenant.objects.annotate(thirtyfour_count=Count( 'device', filter=Q(device__agent_version='2.0.1534'))) thirty_count = Tenant.objects.annotate(thirty_count=Count( 'device', filter=Q(device__agent_version='2.0.1530'))) twenty_count = Tenant.objects.annotate(twenty_count=Count( 'device', filter=Q(device__agent_version='2.0.1520'))) rate = Tenant.objects.annotate(update_count=Count('device', filter=Q(device__agent_version='2.1.1550')))\ .annotate(td_total=Count('device'))\ .annotate(rate=Case( When( condition=Q(td_total__gt=0), then=Cast( (Cast(F('update_count'), FloatField()) / Cast(F('td_total'), FloatField()) * 100.0), IntegerField() ) ), default=0 ) )[:10] # rate = list() # # for d in Tenant.objects.all(): # di = dict() # di['tenant_id'] = d.id # try: # di['rate'] = int((Device.objects.filter(Q(agent_version='2.1.1550') & Q(tenant_id=d.id)).count() / # Device.objects.filter(tenant_id=d.id).count()) * 100) # except ZeroDivisionError: # di['rate'] = 0 # rate.append(di) return render( request, 'TIP/device.html', # context={ # "daily_count": daily_count, # "today_count": today_count, # # "daily_average": int(daily_average), # "total": total, # "weekly_average": int(weekly_average), # "weekly_sum": weekly_sum, # "monthly_average": int(monthly_average), # "monthly_sum": monthly_sum, # # "non_default": non_default, # "non_default_rate": int(non_default / total * 100), # # "monthly_count": monthly_count, # # "windows": windows.count(), # "mac": mac.count(), # "linux": linux.count(), # "unknown": total - windows.count() - mac.count() - linux.count(), # # "invalid": invalid, # "rest": total - invalid, # # "update": update, # "unupdate": unupdate, # # "online": int(online), # "thirty": int(thirty_days), # "ninety": int(ninety_days), # # "os_count": os_count, # # "policy_per_tenant": policy_per_tenant, # # "over_ninety_days": over_ninety_days, # "mac_duplicate": mac_duplicate, # # "rate": rate, # "unupdate_count": unupdate_count, # "fourty_count": fourty_count, # "thirtyfour_count": thirtyfour_count, # "thirty_count": thirty_count, # "twenty_count": twenty_count # } )
def frames_count(self): result = self.tasks.annotate( val=Cast(KeyTextTransform('frames_count', 'info'), models.IntegerField())).aggregate(sum=Sum('val'))['sum'] return result if result is not None else 0
class SearchContactExportAPIView(SearchContactAPIViewMixin, SearchExportAPIView): """Company search export view.""" def _is_valid_email(self, value): """Validate if emails are valid and return a boolean flag.""" try: validate_email(value) return True except ValidationError: return False consent_page_size = 100 db_sort_by_remappings = { 'address_country.name': 'computed_country_name', 'address_area.name': 'computed_area_name', } queryset = DBContact.objects.annotate( name=get_full_name_expression(), link=get_front_end_url_expression('contact', 'pk'), company_sector_name=get_sector_name_subquery('company__sector'), company_link=get_front_end_url_expression('company', 'company__pk'), computed_country_name=Case( When(address_same_as_company=True, then='company__address_country__name'), default='address_country__name', ), computed_area_name=Case( When(address_same_as_company=True, then='company__address_area__name'), default='address_area__name', ), computed_postcode=Case( When(address_same_as_company=True, then='company__address_postcode'), default='address_postcode', ), date_of_latest_interaction=get_aggregate_subquery( DBContact, Max('interactions__date'), ), teams_of_latest_interaction=get_top_related_expression_subquery( DBInteraction.contacts.field, get_string_agg_subquery( DBInteraction, Cast('dit_participants__team__name', CharField()), distinct=True, ), ('-date', ), ), ) field_titles = { 'name': 'Name', 'job_title': 'Job title', 'created_on': 'Date created', 'archived': 'Archived', 'link': 'Link', 'company__name': 'Company', 'company_sector_name': 'Company sector', 'company_link': 'Company link', 'company__uk_region__name': 'Company UK region', 'computed_country_name': 'Country', 'computed_area_name': 'Area', 'computed_postcode': 'Postcode', 'full_telephone_number': 'Phone number', 'email': 'Email address', 'accepts_dit_email_marketing': 'Accepts DIT email marketing', 'date_of_latest_interaction': 'Date of latest interaction', 'teams_of_latest_interaction': 'Teams of latest interaction', 'created_by__dit_team__name': 'Created by team', } def _add_consent_response(self, rows): """ Transforms iterable to add user consent from the consent service. The consent lookup makes an external API call to return consent. For perfromance reasons the consent amount is limited by consent_page_size. Due to this limitaion the iterable are sliced into chunks requesting consent for 100 rows at a time. """ # Slice iterable into chunks row_chunks = slice_iterable_into_chunks(rows, self.consent_page_size) for chunk in row_chunks: """ Loop over the chunks and extract the email and item. Save the item because the iterator cannot be used twice. """ rows = list(chunk) # Peform constent lookup on emails POST request consent_lookups = consent.get_many([ row['email'] for row in rows if self._is_valid_email(row['email']) ], ) for row in rows: # Assign contact consent boolean to accepts_dit_email_marketing # and yield modified result. row['accepts_dit_email_marketing'] = consent_lookups.get( row['email'], False) yield row def _get_rows(self, ids, search_ordering): """ Get row queryset for constent service. This populates accepts_dit_email_marketing field from the consent service and removes the accepts_dit_email_marketing from the field query because the field is not in the db. """ db_ordering = self._translate_search_ordering_to_django_ordering( search_ordering) field_titles = self.field_titles.copy() del field_titles['accepts_dit_email_marketing'] rows = self.queryset.filter(pk__in=ids, ).order_by( *db_ordering, ).values(*field_titles, ).iterator() return self._add_consent_response(rows)
def processing_time(self): result = self.tasks.annotate( val=Cast(KeyTextTransform('processing_time', 'info'), models.FloatField())).aggregate(sum=Sum('val'))['sum'] return result if result is not None else 0
def get_queryset(self): """ called by the template system to get the queryset or list of objects for the template """ functie_pk = self.kwargs['functie_pk'] try: self._functie = Functie.objects.get(pk=functie_pk) except Functie.DoesNotExist: # foutieve functie_pk raise Http404('Verkeerde functie') mag_beheerder_wijzigen_of_403(self.request, self._functie) self._form = ZoekBeheerdersForm(self.request.GET) self._form.full_clean() # vult cleaned_data # huidige beheerders willen we niet opnieuw vinden beheerder_accounts = self._functie.accounts.all() for account in beheerder_accounts: account.geo_beschrijving = '' if account.sporter_set.count() > 0: sporter = account.sporter_set.all()[0] if sporter.bij_vereniging: regio = sporter.bij_vereniging.regio if not regio.is_administratief: account.geo_beschrijving = "regio %s / rayon %s" % ( regio.regio_nr, regio.rayon.rayon_nr) if not sporter.bij_vereniging: # deze melding komt na 15 januari account.let_op = 'LET OP: geen lid meer bij een vereniging' elif self._functie.nhb_ver and sporter.bij_vereniging != self._functie.nhb_ver: # functie voor beheerder van een vereniging # lid is overgestapt account.let_op = 'LET OP: geen lid bij deze vereniging' # for self._huidige_beheerders = beheerder_accounts zoekterm = self._form.cleaned_data['zoekterm'] if len(zoekterm) >= 2: # minimaal twee tekens van de naam/nummer self._zoekterm = zoekterm # let op: we koppelen een account, maar zoeken een NHB lid, # om te kunnen filteren op vereniging # accounts die geen lid zijn worden hier niet gevonden qset = (Sporter.objects.exclude(account=None).exclude( is_actief_lid=False).exclude( account__in=beheerder_accounts).annotate( hele_reeks=Concat('voornaam', Value(' '), 'achternaam') ).annotate(lid_nr_str=Cast('lid_nr', CharField())).filter( Q(lid_nr_str__contains=zoekterm) | Q(voornaam__icontains=zoekterm) | Q(achternaam__icontains=zoekterm) | Q(hele_reeks__icontains=zoekterm)).order_by('lid_nr') ) is_vereniging_rol = (self._functie.rol in ('SEC', 'HWL', 'WL')) if is_vereniging_rol: # alleen leden van de vereniging laten kiezen qset = qset.filter(bij_vereniging=self._functie.nhb_ver) objs = list() for sporter in qset[:50]: account = sporter.account account.geo_beschrijving = '' account.lid_nr_str = str(sporter.lid_nr) if is_vereniging_rol: account.vereniging_naam = str( sporter.bij_vereniging) # [1234] Naam else: regio = sporter.bij_vereniging.regio if not regio.is_administratief: account.geo_beschrijving = "regio %s / rayon %s" % ( regio.regio_nr, regio.rayon.rayon_nr) objs.append(account) # for return objs self._zoekterm = "" return None
def frame_rate(self): result = self.tasks.annotate( val=Cast(KeyTextTransform('frame_rate', 'info'), models.FloatField())).aggregate(avg=Avg('val'))['avg'] return result if result is not None else 0
def test_cast_from_value(self): numbers = Author.objects.annotate( cast_integer=Cast(Value('0'), models.IntegerField())) self.assertEqual(numbers.get().cast_integer, 0)
class RackViewSet(viewsets.ModelViewSet): # View Housekeeping (permissions, serializers, filter fields, etc def get_permissions(self): if self.action in ADMIN_ACTIONS: try: user = User.objects.get(username=self.request.user.username) datacenter_url = self.request.data.get('datacenter') datacenter = Datacenter.objects.all().get(pk=datacenter_url[-2]) print(user.is_staff) print(user.is_superuser) print(len(user.permission_set.all().filter(name='global_asset'))) print(len(user.permission_set.all().filter(name='asset', datacenter=datacenter))) if user.is_staff or user.is_superuser or len(user.permission_set.all().filter(name='global_asset')) > 0 or len(user.permission_set.all().filter(name='asset', datacenter=datacenter)) > 0: permission_classes = [IsAuthenticated] else: permission_classes = [IsAdmin] except: print('exception') permission_classes = [IsAdmin] else: permission_classes = [IsAuthenticated] return [permission() for permission in permission_classes] queryset = Rack.objects.all() \ .annotate(rack_letter=Substr('rack_number', 1, 1)) \ .annotate(numstr_in_rack=Substr('rack_number', 2)) queryset = queryset.annotate(number_in_rack=Cast('numstr_in_rack', IntegerField())) ordering = ['datacenter', 'rack_letter', 'number_in_rack'] #['rack_letter', 'number_in_rack'] ordering_fields = RACK_ORDERING_FILTERING_FIELDS filter_backends = [OrderingFilter, djfiltBackend.DjangoFilterBackend, RackFilter] filterset_fields = RACK_ORDERING_FILTERING_FIELDS def get_serializer_class(self): if self.request.method == GET: serializer_class = RackFetchSerializer else: serializer_class = RackSerializer return serializer_class # Overriding of super functions def destroy(self, request, *args, **kwargs): slots = ['u{}'.format(i) for i in range(1, 43)] offending_assets = [] for slot in slots: match = getattr(self.get_object(), slot) if match: offending_assets.append(match.hostname.__str__() + ' at ' + match.rack.rack_number.__str__() + ' ' + slot.__str__()) if len(offending_assets) > 0: err_message = RACK_DESTROY_SINGLE_ERR_MSG + ', '.join(offending_assets) return Response({ 'Error:', err_message }, status=status.HTTP_400_BAD_REQUEST) return super().destroy(self, request, *args, **kwargs) # New Actions @action(detail=False, methods=[GET]) def filter_fields(self, request, *args, **kwargs): fields = RACK_ORDERING_FILTERING_FIELDS.copy() fields.extend(['rack_num_start', 'rack_num_end']) return Response({ 'filter_fields': fields }) @action(detail=False, methods=[GET]) def sorting_fields(self, request, *args, **kwargs): return Response({ 'sorting_fields': self.ordering_fields }) @action(detail=False, methods=[POST, DELETE]) def many(self, request, *args, **kwargs): try: dc = request.data['datacenter'] srn = request.data['rack_num_start'] ern = request.data['rack_num_end'] except KeyError: return Response({ 'Error': RACK_MANY_INCOMPLETE_QUERY_PARAMS_ERROR_MSG }, status=status.HTTP_400_BAD_REQUEST) try: s_letter = srn[0].upper() e_letter = ern[0].upper() s_number = int(srn[1:]) e_number = int(ern[1:]) try: assert (s_letter <= e_letter) except AssertionError: return Response({ 'Error': RACK_MANY_BAD_LETTER_ERROR_MSG }, status=status.HTTP_400_BAD_REQUEST) try: assert (s_number <= e_number) except AssertionError: return Response({ 'Error': RACK_MANY_BAD_NUMBER_ERROR_MSG }, status=status.HTTP_400_BAD_REQUEST) rack_numbers = [x + y for x in (chr(i) for i in range(ord(s_letter), ord(e_letter) + 1)) for y in (str(j) for j in range(s_number, e_number + 1)) ] create_success = [] create_failure = [] delete_success = [] delete_nonexistent = [] delete_failure = [] for rn in rack_numbers: rn_request_data = { "datacenter": dc, "rack_number": rn } if request.method == POST: try: serializer = self.get_serializer(data=rn_request_data) serializer.is_valid(raise_exception=True) serializer.save() create_success.append(rn) except ValidationError: create_failure.append(rn) elif request.method == DELETE: try: rack = self.queryset.filter(datacenter=dc).get(rack_number__iexact=rn) except self.queryset.model.DoesNotExist: delete_nonexistent.append(rn) continue try: rack.delete() except ProtectedError: delete_failure.append(rn) continue delete_success.append(rn) except (IndexError, ValueError) as e: return Response({ 'Error': e.detail }, status=status.HTTP_400_BAD_REQUEST) # return Response({ # 'results': ', '.join(results) # }, status=status.HTTP_207_MULTI_STATUS) if request.method == POST: return Response({ 'results': { 'successfully_created': '{} racks'.format(len(create_success)), 'failed_to_create': '{} racks'.format(len(create_failure)), 'failed_racks': ', '.join(create_failure) } }, status=status.HTTP_207_MULTI_STATUS) if request.method == DELETE: return Response({ 'results': { 'successfully_deleted': '{} racks'.format(len(delete_success)), 'failed_to_delete_nonexistent': '{} racks'.format(len(delete_nonexistent)), 'failed_to_delete_occupied': '{} racks'.format(len(delete_failure)), 'failed_racks': ', '.join(delete_failure) } }, status=status.HTTP_207_MULTI_STATUS) @action(detail=True, methods=[GET]) def assets(self, request, *args, **kwargs): matches = self.get_object().asset_set serializer = AssetShortSerializer(matches, many=True, context={'request': request}) return Response(serializer.data) @action(detail=True, methods=[GET]) def is_empty(self, request, *args, **kwargs): u_filled = 0 slots = ['u{}'.format(i) for i in range(1, 43)] for field_name in slots: if getattr(self.get_object(), field_name): u_filled += 1 if u_filled > 0: return Response({ 'is_empty': 'false' }) return Response({ 'is_empty': 'true' }) @action(detail=True, methods=[GET]) def get_open_pdu_slots(self, request, *args, **kwargs): pdu_l = self.get_object().pdu_l pdu_r = self.get_object().pdu_r pp_l = pdu_l.power_port_set.all() if pdu_l else [] pp_r = pdu_r.power_port_set.all() if pdu_r else [] l_occ = [int(pp.port_number) for pp in pp_l] r_occ = [int(pp.port_number) for pp in pp_r] l_free = [x for x in range(1, 25) if x not in l_occ] r_free = [x for x in range(1, 25) if x not in r_occ] resp_list = {'left': l_free, 'right': r_free} # r_free = [True if x not in r_occ else False for x in range(0, 25)] # # l_free = [True if x not in l_occ else False for x in range(0, 25)] # r_free = [True if x not in r_occ else False for x in range(0, 25)] # # resp_list = [] # for x in range(0,25): # resp_list.append({'pduSlot': x, 'left': l_free[x], 'right': r_free[x]}) return Response({ 'pdu_slots': resp_list })
def test_cast_to_char_field_without_max_length(self): numbers = Author.objects.annotate( cast_string=Cast('age', models.CharField())) self.assertEqual(numbers.get().cast_string, '1')
def generate_site_visit_ecological_condition(site_visits): """ Generate site visit ecological condition from list of site visit :param site_visits: list of site visit query object """ for site_visit in site_visits: log('Generate ecological condition for site visit : {}'.format( site_visit.id )) site_visit_taxa = SiteVisitTaxon.objects.filter( site_visit=site_visit ) summary = site_visit_taxa.annotate( count=Count('sass_taxon'), sass_score=Coalesce(Sum(Case( When( condition=Q(site_visit__sass_version=5, sass_taxon__sass_5_score__isnull=False, taxon_abundance__isnull=False), then='sass_taxon__sass_5_score'), When( condition=Q(site_visit__sass_version=4, sass_taxon__score__isnull=False, taxon_abundance__isnull=False), then='sass_taxon__score'), default=0), ), 0), sass_id=F('site_visit__id') ).annotate( aspt=Cast(F('sass_score'), FloatField()) / Cast(F('count'), FloatField()), ).values('sass_score', 'aspt', 'count') if not summary: continue aspt_score = summary[0]['aspt'] sass_score = summary[0]['sass_score'] site_visit_ecological, created = ( SiteVisitEcologicalCondition.objects.get_or_create( site_visit=site_visit, sass_score=sass_score, aspt_score=aspt_score ) ) try: location_context = json.loads( site_visit.location_site.location_context ) eco_region = ( location_context['context_group_values'][ 'river_ecoregion_group'][ 'service_registry_values']['eco_region_1'][ 'value'].encode( 'utf-8') ) geo_class = ( location_context['context_group_values'][ 'geomorphological_group'][ 'service_registry_values']['geo_class'][ 'value'].encode( 'utf-8') ) # Fix eco_region name eco_region_splits = eco_region.split(' ') if eco_region_splits[0].isdigit(): eco_region_splits.pop(0) eco_region = ' '.join(eco_region_splits) except (TypeError, ValueError, KeyError): continue sass_ecological_conditions = ( SassEcologicalCondition.objects.filter( ecoregion_level_1__icontains=eco_region.strip(), geomorphological_zone__icontains=geo_class.strip() ) ) found_ecological_condition = False for sass_ecological_condition in sass_ecological_conditions: if ( sass_score > sass_ecological_condition.sass_score_precentile or aspt_score > sass_ecological_condition.aspt_score_precentile ): site_visit_ecological.ecological_condition = ( sass_ecological_condition.ecological_category ) site_visit_ecological.save() found_ecological_condition = True log( 'Found ecological condition : {}'.format( sass_ecological_condition.ecological_category )) break if found_ecological_condition: continue # Set to lowest category lowest_category = SassEcologicalCategory.objects.filter( Q(category__icontains='e') | Q(category__icontains='f') ).order_by('category') if not lowest_category: continue log( 'Set to lowest ecological category : {}'.format( lowest_category[0].category )) site_visit_ecological.ecological_condition = lowest_category[0] site_visit_ecological.save()
def test_cast_from_python_to_date(self): today = datetime.date.today() dates = Author.objects.annotate( cast_date=Cast(today, models.DateField())) self.assertEqual(dates.get().cast_date, today)
def total(self, request, qs): return qs.annotate( total=Cast(F("price") * F("quantity"), output_field=FloatField()))