Ejemplo n.º 1
0
 def paginate_queryset(self, queryset):
     # Aucune pagination si toutes les données sont demandées ou qu'il ne s'agit pas d'un QuerySet
     if not isinstance(queryset, QuerySet) or str_to_bool(
             self.request.query_params.get('all', None)):
         return None
     try:
         return super().paginate_queryset(queryset)
     except ProgrammingError as error:
         raise ValidationError(str(error).split('\n'))
Ejemplo n.º 2
0
 def add_field_to_serializer(fields, field_name):
     field_name = field_name.strip()
     source = field_name.strip().replace('.', '__')
     # Champ spécifique en cas d'énumération
     choices = getattr(get_field_by_path(queryset.model, field_name), 'flatchoices', None)
     if choices and str_to_bool(get_from_url_params('display')):
         fields[field_name + '_display'] = ChoiceDisplayField(choices=choices, source=source)
     # Champ spécifique pour l'affichage de la valeur
     fields[field_name] = ReadOnlyObjectField(source=source if '.' in field_name else None)
Ejemplo n.º 3
0
 def wrapper(item, *args, **kwargs):
     # "request = item.request" dans le cas d'une ViewSet, "item" dans le cas d'une api_view
     request = item.request if hasattr(item, 'request') else item
     valid = None
     valid_date = None
     params = request.data if request.data else request.query_params
     if params:
         valid = str_to_bool(params.get('valid', None))
         valid_date = parsedate(params.get('valid_date', None))
     setattr(request, 'valid', valid)
     setattr(request, 'valid_date', valid_date)
     setattr(request, 'valid_filter', dict(valid=valid, date=valid_date))
     return func(item, *args, **kwargs)
Ejemplo n.º 4
0
def url_value(filter, value):
    """
    Transforme la valeur dans l'URL à partir du filtre
    :param filter: Filtre
    :param value: Valeur
    :return: Valeur
    """
    if not isinstance(value, str):
        return value
    if filter:
        if any(filter.endswith(lookup) for lookup in ('__in', '__range', '__any', '__all')):
            return value.split(',')
        if any(filter.endswith(lookup) for lookup in ('__isnull', '__isempty')):
            return str_to_bool(value)
        if any(filter.endswith(lookup) for lookup in ('__hasdict', '__indict')):
            try:
                return json_decode(value)
            except JSONDecodeError:
                data = {}
                for subvalue in value.split(','):
                    key, val = subvalue.split(':')
                    data[key] = val
                return data
    return value
Ejemplo n.º 5
0
    def get_serializer_class(self):
        # Le serializer par défaut est utilisé en cas de modification/suppression
        default_serializer = getattr(self, 'default_serializer', None)
        if default_serializer and self.action not in ('list', 'retrieve',
                                                      'update',
                                                      'partial_update'):
            return default_serializer

        # Le serializer peut être substitué en fonction des paramètres d'appel de l'API
        query_params = getattr(self.request, 'query_params', None)
        url_params = self.url_params or (query_params.dict()
                                         if query_params else {})
        if default_serializer:

            # Fonction utilitaire d'ajout de champ au serializer
            def add_field_to_serializer(fields, field_name):
                if not field_name:
                    return
                field_name = field_name.strip()
                source = field_name.replace('.', '__')
                # Champ spécifique en cas d'énumération
                choices = getattr(
                    get_field_by_path(self.queryset.model, field_name),
                    'flatchoices', None)
                if choices and str_to_bool(url_params.get('display')):
                    fields[field_name + '_display'] = ChoiceDisplayField(
                        choices=choices, source=source)
                # Champ spécifique pour l'affichage de la valeur
                fields[field_name] = ReadOnlyObjectField(
                    source=source if '.' in field_name else None)

            # Ajoute les champs d'aggregation au serializer
            aggregations = {}
            for aggregate in AGGREGATES.keys():
                for field in url_params.get(aggregate, '').split(','):
                    if not field:
                        continue
                    field_name = field.strip() + '_' + aggregate
                    source = field_name.replace('.',
                                                '__') if '.' in field else None
                    aggregations[field_name] = serializers.ReadOnlyField(
                        source=source)

            # Ajoute les regroupements au serializer
            if 'group_by' in url_params or aggregations:
                fields = {}
                for field in url_params.get('group_by', '').split(','):
                    add_field_to_serializer(fields, field)
                fields.update(aggregations)
                # Un serializer avec les données regroupées est créé à la volée
                return type(default_serializer.__name__,
                            (serializers.Serializer, ), fields)

            # Ajoute la restriction des champs au serializer
            elif 'fields' in url_params:
                fields = {}
                for field in url_params.get('fields').split(','):
                    add_field_to_serializer(fields, field)
                # Un serializer avec restriction des champs est créé à la volée
                return type(default_serializer.__name__,
                            (serializers.Serializer, ), fields)

            # Utilisation du serializer simplifié
            elif str_to_bool(url_params.get('simple')):
                return getattr(self, 'simple_serializer', default_serializer)

            # Utilisation du serializer par défaut en cas de mise à jour sans altération des données
            elif self.action in ('update', 'partial_update'):
                return default_serializer

        return super().get_serializer_class()
Ejemplo n.º 6
0
    def get_queryset(self):
        # Evite la ré-évaluation du QuerySet en cas d'erreur
        if getattr(self, 'queryset_error', False):
            return

        try:
            # Détournement en cas d'aggregation sans annotation ou de non QuerySet
            queryset = super().get_queryset()
            if not isinstance(queryset, QuerySet):
                return queryset

            options = dict(aggregates=None,
                           distinct=None,
                           filters=None,
                           order_by=None)
            self.url_params = url_params = self.request.query_params.dict()

            # Fonction de récupération des données depuis les paramètres
            def get_from_url_params(name):
                return url_params.get(name, '').replace('.', '__')

            # Mots-clés réservés dans les URLs
            default_reserved_query_params = ['format'] + ([
                self.paginator.page_query_param,
                self.paginator.page_size_query_param
            ] if self.paginator else [])
            reserved_query_params = default_reserved_query_params + RESERVED_QUERY_PARAMS

            # Critères de recherche dans le cache
            cache_key = url_params.pop('cache', None)
            if cache_key:
                from django.core.cache import cache
                cache_params = cache.get(CACHE_PREFIX + cache_key, {})
                new_url_params = {}
                new_url_params.update(**cache_params)
                new_url_params.update(**url_params)
                self.url_params = url_params = new_url_params
                new_cache_params = {
                    key: value
                    for key, value in url_params.items()
                    if key not in default_reserved_query_params
                }
                if new_cache_params:
                    from django.utils.timezone import now
                    from datetime import timedelta
                    cache_timeout = int(
                        url_params.pop('timeout', CACHE_TIMEOUT)) or None
                    cache.set(CACHE_PREFIX + cache_key,
                              new_cache_params,
                              timeout=cache_timeout)
                    options['cache_expires'] = now() + timedelta(
                        seconds=cache_timeout)
                cache_url = '{}?cache={}'.format(
                    self.request.build_absolute_uri(self.request.path),
                    cache_key)
                plain_url = cache_url
                for key, value in url_params.items():
                    url_param = '&{}={}'.format(key, value)
                    if key in default_reserved_query_params:
                        cache_url += url_param
                    plain_url += url_param
                options['cache_data'] = new_cache_params
                options['cache_url'] = cache_url
                options['raw_url'] = plain_url

            # Erreurs silencieuses
            silent = str_to_bool(get_from_url_params('silent'))

            # Requête simplifiée et/ou extraction de champs spécifiques
            fields = get_from_url_params('fields')
            if str_to_bool(get_from_url_params('simple')) or fields:
                # Supprime la récupération des relations
                if queryset.query.select_related:
                    queryset = queryset.select_related(None).prefetch_related(
                        None)
                # Champs spécifiques
                try:
                    relateds = set()
                    field_names = set()
                    for field in fields.split(','):
                        if not field:
                            continue
                        field_names.add(field)
                        *related, field_name = field.split('__')
                        if related:
                            relateds.add('__'.join(related))
                    if relateds:
                        queryset = queryset.select_related(*relateds)
                    if field_names:
                        queryset = queryset.values(*field_names)
                except Exception as error:
                    if not silent:
                        raise ValidationError("fields: {}".format(error))
            else:
                # Récupération des métadonnées
                metadata = str_to_bool(get_from_url_params('meta'))
                if metadata and hasattr(self, 'metadata'):
                    # Permet d'éviter les conflits entre prefetch lookups identiques
                    viewset_lookups = [
                        prefetch if isinstance(prefetch, str) else
                        prefetch.prefetch_through
                        for prefetch in queryset._prefetch_related_lookups
                    ]
                    lookups_metadata = []
                    for lookup in self.metadata or []:
                        if isinstance(lookup, str):
                            lookup = Prefetch(lookup)
                        if lookup.prefetch_through not in viewset_lookups:
                            lookups_metadata.append(lookup)
                        lookup.queryset = MetaData.objects.select_valid()
                    if lookups_metadata:
                        queryset = queryset.prefetch_related(*lookups_metadata)

            # Filtres (dans une fonction pour être appelé par les aggregations sans group_by)
            def do_filter(queryset):
                try:
                    filters, excludes = {}, {}
                    for key, value in url_params.items():
                        key = key.replace('.', '__')
                        if value.startswith('(') and value.endswith(')'):
                            value = F(value[1:-1])
                        if key in reserved_query_params:
                            continue
                        if key.startswith('-'):
                            key = key[1:].strip()
                            excludes[key] = url_value(key, value)
                        else:
                            key = key.strip()
                            filters[key] = url_value(key, value)
                    if filters:
                        queryset = queryset.filter(**filters)
                    if excludes:
                        queryset = queryset.exclude(**excludes)
                    # Filtres génériques
                    others = get_from_url_params('filters')
                    if others:
                        queryset = queryset.filter(parse_filters(others))
                    if filters or others:
                        options['filters'] = True
                except Exception as error:
                    if not silent:
                        raise ValidationError("filters: {}".format(error))
                    options['filters'] = False
                    if settings.DEBUG:
                        options['filters_error'] = str(error)
                return queryset

            # Aggregations (uniquement sur les listes)
            if self.action == 'list':
                try:
                    aggregations = {}
                    for aggregate, function in AGGREGATES.items():
                        for field in get_from_url_params(aggregate).split(','):
                            if not field:
                                continue
                            distinct = field.startswith(' ')
                            field = field.strip().replace('.', '__')
                            aggregations[field + '_' + aggregate] = function(
                                field, distinct=distinct)
                    group_by = get_from_url_params('group_by')
                    if group_by:
                        _queryset = queryset.values(*group_by.split(','))
                        if aggregations:
                            _queryset = _queryset.annotate(**aggregations)
                        else:
                            _queryset = _queryset.distinct()
                        queryset = _queryset
                        options['aggregates'] = True
                    elif aggregations:
                        queryset = do_filter(queryset)  # Filtres éventuels
                        return queryset.aggregate(**aggregations)
                except Exception as error:
                    if not silent:
                        raise ValidationError("aggregates: {}".format(error))
                    options['aggregates'] = False
                    if settings.DEBUG:
                        options['aggregates_error'] = str(error)

            # Filtres
            queryset = do_filter(queryset)

            # Tris
            try:
                order_by = get_from_url_params('order_by')
                if order_by:
                    _queryset = queryset.order_by(*order_by.split(','))
                    str(_queryset.query
                        )  # Force SQL evaluation to retrieve exception
                    queryset = _queryset
                    options['order_by'] = True
            except EmptyResultSet:
                pass
            except Exception as error:
                if not silent:
                    raise ValidationError("order_by: {}".format(error))
                options['order_by'] = False
                if settings.DEBUG:
                    options['order_by_error'] = str(error)

            # Distinct
            distincts = []
            try:
                distinct = get_from_url_params('distinct')
                if distinct:
                    distincts = distinct.split(',')
                    if str_to_bool(distinct) is not None:
                        distincts = []
                    queryset = queryset.distinct(*distincts)
                    options['distinct'] = True
            except EmptyResultSet:
                pass
            except Exception as error:
                if not silent:
                    raise ValidationError("distinct: {}".format(error))
                options['distinct'] = False
                if settings.DEBUG:
                    options['distinct_error'] = str(error)

            # Ajout des options de filtres/tris dans la pagination
            if self.paginator and hasattr(self.paginator, 'additional_data'):
                # Force un tri sur la clé primaire en cas de pagination
                if hasattr(queryset, 'ordered') and not queryset.ordered:
                    queryset = queryset.order_by(
                        *(getattr(queryset, '_fields', None) or distincts
                          or [queryset.model._meta.pk.name]))
                self.paginator.additional_data = dict(options=options)
            return queryset
        except ValidationError as error:
            self.queryset_error = error
            raise error
Ejemplo n.º 7
0
    def importer(self, file):
        """
        Importe les données d'un document Excel de tarification
        :param file: Chemin vers le document Excel
        :return: Cache
        """
        cache = {}
        metadata = {}

        workbook = load_workbook(filename=file, read_only=True, data_only=True)
        # Récupération de toutes les feuilles par nom
        worksheets = {}
        for worksheet in workbook.worksheets:
            worksheets[worksheet.title.lower()] = worksheet

        # Si elle existe, nous traitons la feuille des métadonnées
        metadata_sheet_name = str(METADATA_NAME)
        if metadata_sheet_name in worksheets:
            worksheet = worksheets.get(metadata_sheet_name)
            headers = {}
            title = True
            for row_number, row in enumerate(worksheet.iter_rows()):
                code_meta = ''
                line = []
                for col_number, cell in enumerate(row):
                    value = cell.value
                    if isinstance(value, str):
                        value = value.strip()
                    if value is None or not str(value).strip():
                        continue
                    # Si c'est la ligne des titres, on ne récupère que les données liées aux colonnes
                    if title:
                        value = value.lower()
                        headers[col_number] = value
                        continue
                    field = headers[col_number]
                    if field == 'code':
                        if value not in metadata:
                            metadata[value] = []
                        code_meta = value
                        continue
                    line.append(value)
                # Si c'est la ligne des titres, on n'enregistre aucune donnée
                if title:
                    title = False
                    continue
                metadata[code_meta].append(line)

        done = []
        for model in self.models:
            code_field = getattr(model, '_code_field', 'id')
            # Retrait des espaces et des caractères superflus
            model_name = re.sub(r'[^\w]+', ' ',
                                str(model._meta.verbose_name).lower())
            # Récupération de la feuille correspondante au modèle
            if model_name not in worksheets:
                self.log.warning(
                    _("La feuille correspondant au modèle '{model_name}' "
                      "n'a pu être trouvée dans le fichier.").format(
                          model_name=model_name))
                continue
            worksheet = worksheets.get(model_name)
            # Récupération des champs du modèle
            fields = {}
            for field in chain(model._meta.fields, model._meta.many_to_many):
                if field.name != code_field and (
                        field.auto_created
                        or not (field.editable or self.non_editables)):
                    continue
                field.m2m = field in model._meta.many_to_many
                fields[str(field.verbose_name).lower()] = field
            # Parcours des lignes de la feuille
            self.delayed_models = []
            headers = {}
            title = True
            for row_number, row in enumerate(worksheet.iter_rows()):
                instance = model()
                current_metadata = {}
                delayed = False
                m2m = {}
                fks = {}
                # Parcours des cellules de la ligne
                has_data = False
                for col_number, cell in enumerate(row):
                    # Récupération de la valeur de la cellule, ignorée si vide
                    value = cell.value
                    if isinstance(value, str):
                        value = value.strip()
                    if value is None or not str(value).strip():
                        continue
                    # Si c'est la ligne des titres, on ne récupère que les données liées aux colonnes
                    if title:
                        value = value.lower()
                        if value in fields:
                            headers[col_number] = fields[value]
                        continue
                    # Si la colonne n'est pas référencée comme un champ connu, elle est ignorée
                    if col_number not in headers:
                        continue
                    field = headers[col_number]
                    # Gestion des types spécifiques mal gérés par Excel
                    type = field.get_internal_type()
                    if field.m2m:
                        if field.related_model == model:
                            delayed = True
                        value = [v.strip() for v in value.split(',')]
                        m2m[field.name] = (field.related_model, value)
                        has_data = True
                        continue
                    elif field.remote_field is not None and field.related_model is MetaData:
                        current_metadata = dict(metadata.get(value, []))
                        continue
                    elif field.remote_field:
                        if field.related_model == model:
                            delayed = True
                        fks[field.name] = (field.related_model, value)
                        has_data = True
                        continue
                    elif field.choices:
                        choices = {
                            str(value): str(key)
                            for key, value in field.flatchoices
                        }
                        if hasattr(field, 'max_choices'):  # MultiSelectField
                            value = [
                                choices[val] for val in choices.keys()
                                if val in value
                            ]
                        else:
                            value = choices[value]
                    elif type in ['DateField', 'DateTimeField']:
                        value = parsedate(value, dayfirst=True)
                    elif type == 'DecimalField':
                        value = decimal(value, precision=20)
                    elif type == 'BooleanField':
                        value = str_to_bool(value)
                    has_data = True
                    # Récupération des données existantes
                    if field.name == code_field and field.unique:
                        existing = model.objects.filter(**{code_field: value})
                        if existing.count() == 1:
                            instance = existing.first()
                    # Modification des propriétés du modèle
                    setattr(instance, field.name, value)
                # Si c'est la ligne des titres, on n'enregistre aucune donnée
                if title:
                    title = False
                    continue
                # Si la ligne est vide, on passe à la suivante
                if not has_data:
                    continue
                # Mise en cache de l'instance  courante
                code = getattr(instance, code_field, id(instance))
                if model not in cache:
                    cache[model] = {}
                cache[model][code] = instance
                # Enregistrement immédiat (si possible)
                if delayed:
                    self.delayed_models.append(
                        (instance, fks, m2m, current_metadata))
                    continue
                self._save_instance(instance,
                                    metadata=current_metadata,
                                    cache=cache,
                                    fks=fks,
                                    m2m=m2m)
            # Enregistrement différé
            for instance, fks, m2m, current_metadata in self.delayed_models:
                self._save_instance(instance,
                                    metadata=current_metadata,
                                    cache=cache,
                                    fks=fks,
                                    m2m=m2m)
            # Intégration terminée
            done.append(model)
        return cache
Ejemplo n.º 8
0
def api_paginate(request, queryset, serializer, pagination=None, enable_options=True,
                 context=None, query_func=None, func_args=None, func_kwargs=None):
    """
    Ajoute de la pagination aux résultats d'un QuerySet dans un serializer donné
    :param request: Requête HTTP
    :param queryset: QuerySet
    :param serializer: Serializer
    :param pagination: Classe de pagination
    :param enable_options: Active toutes les options de filtre/tri/aggregation/distinct
    :param context: Contexte du serializer
    :param query_func: Fonction spécifique à exécuter sur le QuerySet avant la pagination
    :param func_args: Arguments de la fonction
    :param func_kwargs: Arguments mots-clés de la fonction
    :return: Réponse HTTP des résultats avec pagination
    """
    from common.api.pagination import CustomPageNumberPagination
    pagination = pagination or CustomPageNumberPagination

    # Mots-clés réservés dans les URLs
    default_reserved_query_params = ['format', pagination.page_query_param, pagination.page_size_query_param]
    reserved_query_params = default_reserved_query_params + RESERVED_QUERY_PARAMS

    url_params = request.query_params.dict()
    context = dict(request=request, **(context or {}))
    options = dict(aggregates=None, distinct=None, filters=None, order_by=None)

    # Fonction de récupération des données depuis les paramètres
    def get_from_url_params(name):
        return url_params.get(name, '').replace('.', '__')

    # Activation des options
    if enable_options:

        # Critères de recherche dans le cache
        cache_key = url_params.pop('cache', None)
        if cache_key:
            from django.core.cache import cache
            cache_params = cache.get(CACHE_PREFIX + cache_key, {})
            new_url_params = {}
            new_url_params.update(**cache_params)
            new_url_params.update(**url_params)
            url_params = new_url_params
            new_cache_params = {
                key: value for key, value in url_params.items()
                if key not in default_reserved_query_params}
            if new_cache_params:
                from django.utils.timezone import now
                from datetime import timedelta
                cache_timeout = int(url_params.pop('timeout', CACHE_TIMEOUT)) or None
                cache.set(CACHE_PREFIX + cache_key, new_cache_params, timeout=cache_timeout)
                options['cache_expires'] = now() + timedelta(seconds=cache_timeout)
            cache_url = '{}?cache={}'.format(request.build_absolute_uri(request.path), cache_key)
            plain_url = cache_url
            for key, value in url_params.items():
                url_param = '&{}={}'.format(key, value)
                if key in default_reserved_query_params:
                    cache_url += url_param
                plain_url += url_param
            options['cache_data'] = new_cache_params
            options['cache_url'] = cache_url
            options['raw_url'] = plain_url

        # Erreurs silencieuses
        silent = str_to_bool(get_from_url_params('silent'))

        # Extraction de champs spécifiques
        fields = get_from_url_params('fields')
        if fields:
            # Supprime la récupération des relations
            queryset = queryset.select_related(None).prefetch_related(None)
            # Champs spécifiques
            try:
                relateds = set()
                field_names = set()
                for field in fields.split(','):
                    if not field:
                        continue
                    field_names.add(field)
                    *related, field_name = field.split('__')
                    if related:
                        relateds.add('__'.join(related))
                if relateds:
                    queryset = queryset.select_related(*relateds)
                if field_names:
                    queryset = queryset.values(*field_names)
            except Exception as error:
                if not silent:
                    raise ValidationError("fields: {}".format(error))

        # Filtres (dans une fonction pour être appelé par les aggregations sans group_by)
        def do_filter(queryset):
            try:
                filters, excludes = {}, {}
                for key, value in url_params.items():
                    key = key.replace('.', '__')
                    if value.startswith('(') and value.endswith(')'):
                        value = F(value[1:-1])
                    if key in reserved_query_params:
                        continue
                    if key.startswith('-'):
                        key = key[1:].strip()
                        excludes[key] = url_value(key, value)
                    else:
                        key = key.strip()
                        filters[key] = url_value(key, value)
                if filters:
                    queryset = queryset.filter(**filters)
                if excludes:
                    queryset = queryset.exclude(**excludes)
                # Filtres génériques
                others = get_from_url_params('filters')
                if others:
                    queryset = queryset.filter(parse_filters(others))
                if filters or excludes or others:
                    options['filters'] = True
            except Exception as error:
                if not silent:
                    raise ValidationError("filters: {}".format(error))
                options['filters'] = False
                if settings.DEBUG:
                    options['filters_error'] = str(error)
            return queryset

        # Aggregations
        try:
            aggregations = {}
            for aggregate, function in AGGREGATES.items():
                for field in get_from_url_params(aggregate).split(','):
                    if not field:
                        continue
                    distinct = field.startswith(' ')
                    field = field.strip().replace('.', '__')
                    aggregations[field + '_' + aggregate] = function(field, distinct=distinct)
            group_by = get_from_url_params('group_by')
            if group_by:
                _queryset = queryset.values(*group_by.split(','))
                if aggregations:
                    _queryset = _queryset.annotate(**aggregations)
                else:
                    _queryset = _queryset.distinct()
                queryset = _queryset
                options['aggregates'] = True
            elif aggregations:
                queryset = do_filter(queryset)  # Filtres éventuels
                return queryset.aggregate(**aggregations)
        except Exception as error:
            if not silent:
                raise ValidationError("aggregates: {}".format(error))
            options['aggregates'] = False
            if settings.DEBUG:
                options['aggregates_error'] = str(error)

        # Filtres
        queryset = do_filter(queryset)

        # Tris
        try:
            order_by = get_from_url_params('order_by')
            if order_by:
                temp_queryset = queryset.order_by(*order_by.split(','))
                str(temp_queryset.query)  # Force SQL evaluation to retrieve exception
                queryset = temp_queryset
                options['order_by'] = True
        except EmptyResultSet:
            pass
        except Exception as error:
            if not silent:
                raise ValidationError("order_by: {}".format(error))
            options['order_by'] = False
            if settings.DEBUG:
                options['order_by_error'] = str(error)

        # Distinct
        distincts = []
        try:
            distinct = get_from_url_params('distinct')
            if distinct:
                distincts = distinct.split(',')
                if str_to_bool(distinct) is not None:
                    distincts = []
                queryset = queryset.distinct(*distincts)
                options['distinct'] = True
        except EmptyResultSet:
            pass
        except Exception as error:
            if not silent:
                raise ValidationError("distinct: {}".format(error))
            options['distinct'] = False
            if settings.DEBUG:
                options['distinct_error'] = str(error)

        # Fonction utilitaire d'ajout de champ au serializer
        def add_field_to_serializer(fields, field_name):
            field_name = field_name.strip()
            source = field_name.strip().replace('.', '__')
            # Champ spécifique en cas d'énumération
            choices = getattr(get_field_by_path(queryset.model, field_name), 'flatchoices', None)
            if choices and str_to_bool(get_from_url_params('display')):
                fields[field_name + '_display'] = ChoiceDisplayField(choices=choices, source=source)
            # Champ spécifique pour l'affichage de la valeur
            fields[field_name] = ReadOnlyObjectField(source=source if '.' in field_name else None)

        # Création de serializer à la volée en cas d'aggregation ou de restriction de champs
        aggregations = {}
        for aggregate in AGGREGATES.keys():
            for field in get_from_url_params(aggregate).split(','):
                if not field:
                    continue
                field_name = field.strip() + '_' + aggregate
                source = field_name.replace('.', '__') if '.' in field else None
                aggregations[field_name] = serializers.ReadOnlyField(source=source)
        # Regroupements & aggregations
        if 'group_by' in url_params or aggregations:
            fields = {}
            for field in get_from_url_params('group_by').split(','):
                add_field_to_serializer(fields, field)
            fields.update(aggregations)
            # Un serializer avec les données groupées est créé à la volée
            serializer = type(serializer.__name__, (serializers.Serializer, ), fields)
        # Restriction de champs
        elif 'fields' in url_params:
            fields = {}
            for field in get_from_url_params('fields').split(','):
                add_field_to_serializer(fields, field)
            # Un serializer avec restriction des champs est créé à la volée
            serializer = type(serializer.__name__, (serializers.Serializer, ), fields)

    # Fonction spécifique
    if query_func:
        func_args = func_args or []
        func_kwargs = func_kwargs or {}
        queryset = query_func(queryset, *func_args, **func_kwargs)

    # Uniquement si toutes les données sont demandées
    all_data = str_to_bool(get_from_url_params('all'))
    if all_data:
        return Response(serializer(queryset, context=context, many=True).data)

    # Pagination avec ajout des options de filtres/tris dans la pagination
    paginator = pagination()
    if enable_options and hasattr(paginator, 'additional_data'):
        paginator.additional_data = dict(options=options)
    # Force un tri sur la clé primaire en cas de pagination
    if hasattr(queryset, 'ordered') and not queryset.ordered:
        queryset = queryset.order_by(*(
            getattr(queryset, '_fields', None) or (enable_options and distincts) or [queryset.model._meta.pk.name]))
    serializer = serializer(paginator.paginate_queryset(queryset, request), context=context, many=True)
    return paginator.get_paginated_response(serializer.data)