Ejemplo n.º 1
0
 def update_index(self, index, documents):
     model = self.index_model_factory(index)
     with transaction.atomic():
         
         # We won't allow records who utilize unused fields
         # Delete them so are always getting unique query results.
         if index._unused_fields:
             q = Q()
             for field_name in six.iterkeys(index._unused_fields):
                 q |= Q(**{'%s__isnull' % field_name: False})
             invalid = model.objects.filter(q, document__in=documents)
             invalid.delete()
             
         # We also won't allow records who have haven't all 
         # _unique_together fields populated. Clean them up.
         q = Q()
         for field_name in six.iterkeys(index._unique_together):
             q |= Q(**{'%s__isnull' % field_name: True})
         stale = model.objects.filter(q, document__in=documents)
         stale.delete()
         
         for document in documents:
             records = index.build_records(document)
             for record in records:
                 model.objects.update_or_create(**dict(defaults=record, **{
                     field_name: record[field_name]
                     for field_name in six.iterkeys(index._unique_together)
                 }))   
Ejemplo n.º 2
0
 def test_get_fields_with_include_all_and_exclude(self):
     expected = six.iterkeys(UserSerializer().get_all_fields())
     serializer = UserSerializer(
         include_fields='*',
         exclude_fields=['id']
     )
     self.assertEqual(list(six.iterkeys(serializer.fields)), list(expected))
Ejemplo n.º 3
0
 def __all__(self):
     names = set()
     for name in six.iterkeys(self._attrs):
         names.add(name)
     for name in six.iterkeys(self._base_attrs):
         names.add(name)
     return list(names)
Ejemplo n.º 4
0
    def sync_index(self, index, documents, full=False):

        model = index._model
        related_models = index._related_models
        all_pks = set()

        # Sync documents
        for document in documents:
            records = index.build_records(document)
            pks = set()

            for record in records:

                # Filter out related fields, we'll handle
                # these later on
                defaults = {
                    field_name: record[field_name]
                    for field_name in six.iterkeys(index.fields)
                    if field_name not in related_models
                }

                # Construct the unique query kwargs
                unique_together = {
                    field_name: record[field_name] for field_name in six.iterkeys(index._unique_together)
                }

                # Check for existing records
                existing = model.objects.filter(**unique_together)
                if existing.count() > 1:
                    # Some records have become stale
                    # delete all but one
                    model.objects.filter(**unique_together).exclude(pk=existing.first().pk).delete()

                with transaction.atomic():
                    # Update or create index
                    obj, created = model.objects.update_or_create(**dict(defaults=defaults, **unique_together))
                    pks.add(obj.pk)

                    # Update or create related indexes
                    for field_name, related_model in six.iteritems(related_models):
                        values = record[field_name] or []
                        # Delete stale records
                        related_model.objects.filter(index=obj).exclude(value__in=values).delete()
                        # Ensure related records
                        for value in values:
                            related_model.objects.get_or_create(index=obj, value=value)

            # Check for stale records
            stale = model.objects.filter(document=document).exclude(pk__in=pks)
            if stale.count() > 0:
                logger.info("Deleting stale records (%s) for document %s" % (stale.count(), document))
                stale.delete()

            all_pks |= pks

        if full:
            stale = model.objects.exclude(pk__in=all_pks)
            if stale.count() > 0:
                logger.info("Deleting stale records (%s)" % stale.count())
                stale.delete()
Ejemplo n.º 5
0
def merge_amount_dicts(a, b, merger):
    return {
        key: merger(a.get(key, 0), b.get(key, 0))
        for key in (
            set(six.iterkeys(a))
            | set(six.iterkeys(b))
        )
    }
Ejemplo n.º 6
0
    def test_dict_translation(self):
        mvd = MultiValueDict({"devs": ["Bob", "Joe"], "pm": ["Rory"]})
        d = mvd.dict()
        self.assertEqual(sorted(six.iterkeys(d)), sorted(six.iterkeys(mvd)))
        for key in six.iterkeys(mvd):
            self.assertEqual(d[key], mvd[key])

        self.assertEqual({}, MultiValueDict().dict())
Ejemplo n.º 7
0
 def test_get_all_fields(self):
     s = GroupSerializer()
     all_keys1 = six.iterkeys(s.get_all_fields())
     f2 = s.fields
     all_keys2 = six.iterkeys(s.get_all_fields())
     expected = ['id', 'name']
     self.assertEqual(list(six.iterkeys(f2)), expected)
     self.assertEqual(list(all_keys1), list(all_keys2))
Ejemplo n.º 8
0
 def get_etag(self, request, obj, *args, **kwargs):
     if obj.is_profile_visible(request.user):
         return self.generate_etag(obj, six.iterkeys(self.fields), request)
     else:
         return self.generate_etag(obj, [
             field
             for field in six.iterkeys(self.fields)
             if field not in self.hidden_fields
         ], request)
Ejemplo n.º 9
0
 def test_get_fields_with_exclude_fields(self):
     exclude = ['id']
     expected = set(
         six.iterkeys(UserSerializer().get_fields())
     ) - set(exclude)
     serializer = UserSerializer(
         exclude_fields=exclude,
     )
     self.assertEqual(set(six.iterkeys(serializer.fields)), expected)
Ejemplo n.º 10
0
 def test_get_fields_with_include_fields(self):
     include = ['permissions']
     expected = set(
         six.iterkeys(UserSerializer().get_fields())
     ) | set(include)
     serializer = UserSerializer(
         include_fields=include
     )
     self.assertEqual(set(six.iterkeys(serializer.fields)), expected)
Ejemplo n.º 11
0
 def test_delete_and_insert(self):
     """
     Deleting an item, then inserting the same key again will place it
     at the end.
     """
     del self.d2[7]
     self.assertEqual(list(six.iterkeys(self.d2)), [1, 9, 0])
     self.d2[7] = 'lucky number 7'
     self.assertEqual(list(six.iterkeys(self.d2)), [1, 9, 0, 7])
Ejemplo n.º 12
0
    def test_dict_translation(self):
        mvd = MultiValueDict({
            'devs': ['Bob', 'Joe'],
            'pm': ['Rory'],
        })
        d = mvd.dict()
        self.assertEqual(sorted(six.iterkeys(d)), sorted(six.iterkeys(mvd)))
        for key in six.iterkeys(mvd):
            self.assertEqual(d[key], mvd[key])

        self.assertEqual({}, MultiValueDict().dict())
Ejemplo n.º 13
0
    def _get_queryset(self, request, is_list=False, *args, **kwargs):
        """Returns an optimized queryset.

        This calls out to the resource's get_queryset(), and then performs
        some optimizations to better fetch related objects, reducing future
        lookups in this request.
        """
        queryset = self.get_queryset(request, is_list=is_list, *args, **kwargs)

        if not hasattr(self, '_select_related_fields'):
            self._select_related_fields = []

            for field in six.iterkeys(self.fields):
                if hasattr(self, 'serialize_%s_field' % field):
                    continue

                field_type = getattr(self.model, field, None)

                if (field_type and
                    isinstance(field_type,
                               ReverseSingleRelatedObjectDescriptor)):
                    self._select_related_fields.append(field)

        if self._select_related_fields:
            queryset = \
                queryset.select_related(*self._select_related_fields)

        if is_list:
            if not hasattr(self, '_prefetch_related_fields'):
                self._prefetch_related_fields = []

                for field in six.iterkeys(self.fields):
                    if hasattr(self, 'serialize_%s_field' % field):
                        continue

                    field_type = getattr(self.model, field, None)

                    if (field_type and
                        isinstance(field_type,
                                   (ReverseManyRelatedObjectsDescriptor,
                                    ManyRelatedObjectsDescriptor))):
                        self._prefetch_related_fields.append(field)

            if self._prefetch_related_fields:
                queryset = \
                    queryset.prefetch_related(*self._prefetch_related_fields)

        return queryset
Ejemplo n.º 14
0
    def get_models(self, app_mod=None,
                   include_auto_created=False, include_deferred=False,
                   only_installed=True, include_swapped=False):
        """
        Given a module containing models, returns a list of the models.
        Otherwise returns a list of all installed models.

        By default, auto-created models (i.e., m2m models without an
        explicit intermediate table) are not included. However, if you
        specify include_auto_created=True, they will be.

        By default, models created to satisfy deferred attribute
        queries are *not* included in the list of models. However, if
        you specify include_deferred, they will be.

        By default, models that aren't part of installed apps will *not*
        be included in the list of models. However, if you specify
        only_installed=False, they will be. If you're using a non-default
        AppCache, this argument does nothing - all models will be included.

        By default, models that have been swapped out will *not* be
        included in the list of models. However, if you specify
        include_swapped, they will be.
        """
        if not self.loads_installed:
            only_installed = False
        cache_key = (app_mod, include_auto_created, include_deferred, only_installed, include_swapped)
        model_list = None
        try:
            model_list = self._get_models_cache[cache_key]
            if self.available_apps is not None and only_installed:
                model_list = [m for m in model_list if m._meta.app_label in self.available_apps]

            return model_list
        except KeyError:
            pass
        self._populate()
        if app_mod:
            if app_mod in self.app_store:
                app_list = [self.app_models.get(self._label_for(app_mod), ModelDict())]
            else:
                app_list = []
        else:
            if only_installed:
                app_list = [self.app_models.get(app_label, ModelDict())
                            for app_label in six.iterkeys(self.app_labels)]
            else:
                app_list = six.itervalues(self.app_models)
        model_list = []
        for app in app_list:
            model_list.extend(
                model for model in app.values()
                if ((not model._deferred or include_deferred) and
                    (not model._meta.auto_created or include_auto_created) and
                    (not model._meta.swapped or include_swapped))
            )
        self._get_models_cache[cache_key] = model_list
        if self.available_apps is not None and only_installed:
            model_list = [m for m in model_list if m._meta.app_label in self.available_apps]
        return model_list
Ejemplo n.º 15
0
    def get_indexes(self):
        indexes = {
            name: self.get_index(name)
            for name in six.iterkeys(self._registry)
        }

        return indexes
Ejemplo n.º 16
0
    def test_multiple_keys(self):
        """Test QueryDict with two key/value pairs with same keys."""

        q = QueryDict(str('vote=yes&vote=no'))

        self.assertEqual(q['vote'], 'no')
        self.assertRaises(AttributeError, q.__setitem__, 'something', 'bar')

        self.assertEqual(q.get('vote', 'default'), 'no')
        self.assertEqual(q.get('foo', 'default'), 'default')
        self.assertEqual(q.getlist('vote'), ['yes', 'no'])
        self.assertEqual(q.getlist('foo'), [])

        self.assertRaises(AttributeError, q.setlist, 'foo', ['bar', 'baz'])
        self.assertRaises(AttributeError, q.setlist, 'foo', ['bar', 'baz'])
        self.assertRaises(AttributeError, q.appendlist, 'foo', ['bar'])

        if not six.PY3:
            self.assertEqual(q.has_key('vote'), True)
        self.assertEqual('vote' in q, True)
        if not six.PY3:
            self.assertEqual(q.has_key('foo'), False)
        self.assertEqual('foo' in q, False)
        self.assertEqual(list(six.iteritems(q)), [('vote', 'no')])
        self.assertEqual(list(six.iterlists(q)), [('vote', ['yes', 'no'])])
        self.assertEqual(list(six.iterkeys(q)), ['vote'])
        self.assertEqual(list(six.itervalues(q)), ['no'])
        self.assertEqual(len(q), 1)

        self.assertRaises(AttributeError, q.update, {'foo': 'bar'})
        self.assertRaises(AttributeError, q.pop, 'foo')
        self.assertRaises(AttributeError, q.popitem)
        self.assertRaises(AttributeError, q.clear)
        self.assertRaises(AttributeError, q.setdefault, 'foo', 'bar')
        self.assertRaises(AttributeError, q.__delitem__, 'vote')
Ejemplo n.º 17
0
    def __init__(self, connection):
        super(DatabaseOperations, self).__init__(connection)

        # Determine the version of the SpatiaLite library.
        try:
            vtup = self.spatialite_version_tuple()
            version = vtup[1:]
            if version < (2, 3, 0):
                raise ImproperlyConfigured('GeoDjango only supports SpatiaLite versions '
                                           '2.3.0 and above')
            self.spatial_version = version
        except ImproperlyConfigured:
            raise
        except Exception as msg:
            raise ImproperlyConfigured('Cannot determine the SpatiaLite version for the "%s" '
                                       'database (error was "%s").  Was the SpatiaLite initialization '
                                       'SQL loaded on this database?' %
                                       (self.connection.settings_dict['NAME'], msg))

        # Creating the GIS terms dictionary.
        gis_terms = ['isnull']
        gis_terms += list(six.iterkeys(self.geometry_functions))
        self.gis_terms = dict([(term, None) for term in gis_terms])

        if version >= (2, 4, 0):
            # Spatialite 2.4.0-RC4 added AsGML and AsKML, however both
            # RC2 (shipped in popular Debian/Ubuntu packages) and RC4
            # report version as '2.4.0', so we fall back to feature detection
            try:
                self._get_spatialite_func("AsGML(GeomFromText('POINT(1 1)'))")
                self.gml = 'AsGML'
                self.kml = 'AsKML'
            except DatabaseError:
                # we are using < 2.4.0-RC4
                pass
Ejemplo n.º 18
0
    def resolve_columns(self, row, fields=()):
        """
        This routine is necessary so that distances and geometries returned
        from extra selection SQL get resolved appropriately into Python
        objects.
        """
        values = []
        aliases = list(six.iterkeys(self.query.extra_select))

        # Have to set a starting row number offset that is used for
        # determining the correct starting row index -- needed for
        # doing pagination with Oracle.
        rn_offset = 0
        if self.connection.ops.oracle:
            if self.query.high_mark is not None or self.query.low_mark: rn_offset = 1
        index_start = rn_offset + len(aliases)

        # Converting any extra selection values (e.g., geometries and
        # distance objects added by GeoQuerySet methods).
        values = [self.query.convert_values(v,
                               self.query.extra_select_fields.get(a, None),
                               self.connection)
                  for v, a in zip(row[rn_offset:index_start], aliases)]
        if self.connection.ops.oracle or getattr(self.query, 'geo_values', False):
            # We resolve the rest of the columns if we're on Oracle or if
            # the `geo_values` attribute is defined.
            for value, field in map(None, row[index_start:], fields):
                values.append(self.query.convert_values(value, field, self.connection))
        else:
            values.extend(row[index_start:])
        return tuple(values)
Ejemplo n.º 19
0
    def keys_with_prefix(self, prefix, version=None):
        if self.reverse_key_func is None:
            raise ValueError(
                "To use the _with_prefix commands with a custom KEY_FUNCTION, "
                "you need to specify a custom REVERSE_KEY_FUNCTION too."
            )

        if version is None:
            version = self.version

        db = router.db_for_read(self.cache_model_class)
        table = connections[db].ops.quote_name(self._table)

        prefix = self.make_key(prefix + '%', version=version)

        with connections[db].cursor() as cursor:
            cursor.execute(
                """SELECT cache_key FROM {table}
                   WHERE cache_key LIKE %s AND
                         expires >= %s""".format(table=table),
                (prefix, self._now())
            )
            rows = cursor.fetchall()
            full_keys = {row[0] for row in rows}

            keys = {}
            for full_key in full_keys:
                key, key_prefix, key_version = self.reverse_key_func(full_key)

                if key_version == version:
                    keys[key] = key_version
            return set(six.iterkeys(keys))
Ejemplo n.º 20
0
    def similar_objects(self):
        lookup_kwargs = self._lookup_kwargs()
        lookup_keys = sorted(lookup_kwargs)
        qs = self.through.objects.values(*six.iterkeys(lookup_kwargs))
        qs = qs.annotate(n=models.Count("pk"))
        qs = qs.exclude(**lookup_kwargs)
        qs = qs.filter(tag__in=self.all())
        qs = qs.order_by("-n")

        # TODO: This all feels like a bit of a hack.
        items = {}
        if len(lookup_keys) == 1:
            # Can we do this without a second query by using a select_related()
            # somehow?
            f = _get_field(self.through, lookup_keys[0])
            objs = f.rel.to._default_manager.filter(**{"%s__in" % f.rel.field_name: [r["content_object"] for r in qs]})
            for obj in objs:
                items[(getattr(obj, f.rel.field_name),)] = obj
        else:
            preload = {}
            for result in qs:
                preload.setdefault(result["content_type"], set())
                preload[result["content_type"]].add(result["object_id"])

            for ct, obj_ids in preload.items():
                ct = ContentType.objects.get_for_id(ct)
                for obj in ct.model_class()._default_manager.filter(pk__in=obj_ids):
                    items[(ct.pk, obj.pk)] = obj

        results = []
        for result in qs:
            obj = items[tuple(result[k] for k in lookup_keys)]
            obj.similar_tags = result["n"]
            results.append(obj)
        return results
Ejemplo n.º 21
0
    def test_multiple_keys(self):
        """Test QueryDict with two key/value pairs with same keys."""

        q = QueryDict(str("vote=yes&vote=no"))

        self.assertEqual(q["vote"], "no")
        self.assertRaises(AttributeError, q.__setitem__, "something", "bar")

        self.assertEqual(q.get("vote", "default"), "no")
        self.assertEqual(q.get("foo", "default"), "default")
        self.assertEqual(q.getlist("vote"), ["yes", "no"])
        self.assertEqual(q.getlist("foo"), [])

        self.assertRaises(AttributeError, q.setlist, "foo", ["bar", "baz"])
        self.assertRaises(AttributeError, q.setlist, "foo", ["bar", "baz"])
        self.assertRaises(AttributeError, q.appendlist, "foo", ["bar"])

        if six.PY2:
            self.assertEqual(q.has_key("vote"), True)
        self.assertEqual("vote" in q, True)
        if six.PY2:
            self.assertEqual(q.has_key("foo"), False)
        self.assertEqual("foo" in q, False)
        self.assertEqual(list(six.iteritems(q)), [("vote", "no")])
        self.assertEqual(list(six.iterlists(q)), [("vote", ["yes", "no"])])
        self.assertEqual(list(six.iterkeys(q)), ["vote"])
        self.assertEqual(list(six.itervalues(q)), ["no"])
        self.assertEqual(len(q), 1)

        self.assertRaises(AttributeError, q.update, {"foo": "bar"})
        self.assertRaises(AttributeError, q.pop, "foo")
        self.assertRaises(AttributeError, q.popitem)
        self.assertRaises(AttributeError, q.clear)
        self.assertRaises(AttributeError, q.setdefault, "foo", "bar")
        self.assertRaises(AttributeError, q.__delitem__, "vote")
Ejemplo n.º 22
0
    def serialize_change_entry(self, changedesc):
        """Serialize a change entry for public consumption.

        This will output a version of the change entry for use in the API.
        It can be the same content stored in the
        :py:class:`~reviewboard.changedescs.models.ChangeDescription`, but
        does not need to be.

        Args:
            changedesc (reviewboard.changedescs.models.ChangeDescription):
                The change description whose field is to be serialized.

        Returns:
            list:
            An appropriate serialization for the field.
        """
        data = changedesc.fields_changed[self.field_id]

        return [
            {
                'old': data[six.text_type(obj.pk)]['old'][0],
                'new': data[six.text_type(obj.pk)]['new'][0],
                self.caption_object_field: obj,
            }
            for obj in self.model.objects.filter(pk__in=six.iterkeys(data))
        ]
Ejemplo n.º 23
0
    def collect(self, request=None, files=[]):
        if self.request and self.request is request:
            return
        self.request = request
        found_files = OrderedDict()
        for finder in finders.get_finders():
            # Ignore our finder to avoid looping
            if isinstance(finder, PipelineFinder):
                continue
            for path, storage in finder.list(['CVS', '.*', '*~']):
                # Prefix the relative path if the source storage contains it
                if getattr(storage, 'prefix', None):
                    prefixed_path = os.path.join(storage.prefix, path)
                else:
                    prefixed_path = path

                if (prefixed_path not in found_files and
                    (not files or prefixed_path in files)):
                    found_files[prefixed_path] = (storage, path)
                    self.copy_file(path, prefixed_path, storage)

                if files and len(files) == len(found_files):
                    break

        return six.iterkeys(found_files)
Ejemplo n.º 24
0
    def __new__(cls, name, bases, attrs):
        fields = [(field_name, attrs.pop(field_name))
                  for field_name, obj in list(six.iteritems(attrs))
                  if isinstance(obj, SearchField)]
        fields.sort(key=lambda x: x[1].creation_counter)

        for base in bases[::-1]:
            if hasattr(base, 'declared_fields'):
                fields = list(six.iteritems(base.declared_fields)) + fields

        field_dict = dict(fields)

        for k in list(six.iterkeys(field_dict)):
            if k in attrs and attrs[k] is None:
                del field_dict[k]

        attrs['declared_fields'] = field_dict

        new_class = super(DeclarativeSearchFieldMetaclass, cls).__new__(cls, name, bases, attrs)
        
        
        OptionsClass = new_class._options_class
        if OptionsClass:
            options_list = [c.Meta for c in new_class.mro() if hasattr(c, 'Meta')]
            new_class._meta = OptionsClass(options_list)
        
        return new_class
Ejemplo n.º 25
0
 def formfield(self, form_class=forms.CharField, **kwargs):
     """
     Returns a django.forms.Field instance for this database Field.
     """
     defaults = {'required': not self.blank,
                 'label': capfirst(self.verbose_name),
                 'help_text': self.help_text}
     if self.has_default():
         if callable(self.default):
             defaults['initial'] = self.default
             defaults['show_hidden_initial'] = True
         else:
             defaults['initial'] = self.get_default()
     if self.choices:
         # Fields with choices get special treatment.
         include_blank = (self.blank or
                          not (self.has_default() or 'initial' in kwargs))
         defaults['choices'] = self.get_choices(include_blank=include_blank)
         defaults['coerce'] = self.to_python
         if self.null:
             defaults['empty_value'] = None
         form_class = forms.TypedChoiceField
         # Many of the subclass-specific formfield arguments (min_value,
         # max_value) don't apply for choice fields, so be sure to only pass
         # the values that TypedChoiceField will understand.
         for k in list(six.iterkeys(kwargs)):
             if k not in ('coerce', 'empty_value', 'choices', 'required',
                          'widget', 'label', 'initial', 'help_text',
                          'error_messages', 'show_hidden_initial'):
                 del kwargs[k]
     defaults.update(kwargs)
     return form_class(**defaults)
Ejemplo n.º 26
0
    def test_mergedict_merges_multivaluedict(self):
        """ MergeDict can merge MultiValueDicts """

        multi1 = MultiValueDict({'key1': ['value1'],
                                 'key2': ['value2', 'value3']})

        multi2 = MultiValueDict({'key2': ['value4'],
                                 'key4': ['value5', 'value6']})

        mm = MergeDict(multi1, multi2)

        # Although 'key2' appears in both dictionaries,
        # only the first value is used.
        self.assertEqual(mm.getlist('key2'), ['value2', 'value3'])
        self.assertEqual(mm.getlist('key4'), ['value5', 'value6'])
        self.assertEqual(mm.getlist('undefined'), [])

        self.assertEqual(sorted(six.iterkeys(mm)), ['key1', 'key2', 'key4'])
        self.assertEqual(len(list(six.itervalues(mm))), 3)

        self.assertTrue('value1' in six.itervalues(mm))

        self.assertEqual(sorted(six.iteritems(mm), key=lambda k: k[0]),
                          [('key1', 'value1'), ('key2', 'value3'),
                           ('key4', 'value6')])

        self.assertEqual([(k,mm.getlist(k)) for k in sorted(mm)],
                          [('key1', ['value1']),
                           ('key2', ['value2', 'value3']),
                           ('key4', ['value5', 'value6'])])
Ejemplo n.º 27
0
    def _to_xml(self, xml, data, tag_name=None):

        if tag_name:
            xml.startElement(tag_name, {})

        if hasattr(data, 'write_xml'):
            # Support for the `IWriteXML` protocol.
            data.write_xml(xml)
        elif data is True:
            xml.characters('true')
        elif data is False:
            xml.characters('false')
        elif isinstance(data, (list, tuple)):
            for item in data:
                self._to_xml(xml, item, tag_name=(getattr(data, "xml_tag", None) or self.item_tag_name))
        elif isinstance(data, dict):
            key_order = getattr(data, "key_order", ())
            for key in sorted(six.iterkeys(data), key=order_by_sort_order(key_order)):
                self._to_xml(xml, data[key], key)
        elif data is None:  # Don't output any value
            pass
        else:
            xml.characters(smart_text(data))

        if tag_name:
            xml.endElement(tag_name)
Ejemplo n.º 28
0
def templatetag(parser, token):
    """
    Outputs one of the bits used to compose template tags.

    Since the template system has no concept of "escaping", to display one of
    the bits used in template tags, you must use the ``{% templatetag %}`` tag.

    The argument tells which template bit to output:

        ==================  =======
        Argument            Outputs
        ==================  =======
        ``openblock``       ``{%``
        ``closeblock``      ``%}``
        ``openvariable``    ``{{``
        ``closevariable``   ``}}``
        ``openbrace``       ``{``
        ``closebrace``      ``}``
        ``opencomment``     ``{#``
        ``closecomment``    ``#}``
        ==================  =======
    """
    bits = token.contents.split()
    if len(bits) != 2:
        raise TemplateSyntaxError("'templatetag' statement takes one argument")
    tag = bits[1]
    if tag not in TemplateTagNode.mapping:
        raise TemplateSyntaxError("Invalid templatetag argument: '%s'."
                                  " Must be one of: %s" %
                                  (tag, list(six.iterkeys(TemplateTagNode.mapping))))
    return TemplateTagNode(tag)
Ejemplo n.º 29
0
    def test_mergedict_merges_multivaluedict(self):
        """ MergeDict can merge MultiValueDicts """

        multi1 = MultiValueDict({"key1": ["value1"], "key2": ["value2", "value3"]})

        multi2 = MultiValueDict({"key2": ["value4"], "key4": ["value5", "value6"]})

        mm = MergeDict(multi1, multi2)

        # Although 'key2' appears in both dictionaries,
        # only the first value is used.
        self.assertEqual(mm.getlist("key2"), ["value2", "value3"])
        self.assertEqual(mm.getlist("key4"), ["value5", "value6"])
        self.assertEqual(mm.getlist("undefined"), [])

        self.assertEqual(sorted(six.iterkeys(mm)), ["key1", "key2", "key4"])
        self.assertEqual(len(list(six.itervalues(mm))), 3)

        self.assertIn("value1", six.itervalues(mm))

        self.assertEqual(
            sorted(six.iteritems(mm), key=lambda k: k[0]), [("key1", "value1"), ("key2", "value3"), ("key4", "value6")]
        )

        self.assertEqual(
            [(k, mm.getlist(k)) for k in sorted(mm)],
            [("key1", ["value1"]), ("key2", ["value2", "value3"]), ("key4", ["value5", "value6"])],
        )
Ejemplo n.º 30
0
    def _perform_delete_app_test(self, sql_name, database=None):
        # Simulate deletion of the app.
        self.set_base_model(
            self.default_base_model,
            extra_models=self.default_extra_models,
            db_name=database)

        end_sig = self.start_sig.clone()
        end_sig.remove_app_sig('tests')

        d = Diff(self.start_sig, end_sig)
        self.assertEqual(sorted(six.iterkeys(d.deleted)), ['tests'])
        self.assertEqual(d.deleted['tests'],
                         ['TestModel', 'AppDeleteAnchor1', 'AppDeleteAnchor2',
                          'CustomTestModel'])

        mutation = DeleteApplication()
        self.perform_simulations([mutation], end_sig, ignore_apps=True,
                                 db_name=database)

        test_database_state = self.database_state.clone()
        test_sig = self.start_sig.clone()

        app_mutator = AppMutator(app_label='tests',
                                 project_sig=test_sig,
                                 database_state=test_database_state,
                                 database=database)
        app_mutator.run_mutation(mutation)
        sql = app_mutator.to_sql()

        self.assertEqual('\n'.join(sql),
                         self.get_sql_mapping(sql_name, database))
Ejemplo n.º 31
0
 def list(self):
     return six.iterkeys(self._users)
Ejemplo n.º 32
0
 def list(self, service, user=None):
     if user is None:
         return six.iterkeys(self._groups[self._service(service)])
     else:
         groups = self._groups[self._service(service)]
         return [k for k, v in six.iteritems(groups) if v.is_member(user)]
Ejemplo n.º 33
0
 def get_models(self):
     """
     Get all models in the registry
     """
     return set(iterkeys(self._models))
Ejemplo n.º 34
0
setting_changed.connect(settings_changed_handler)


class CachedSettingsMixin(object):
    def __init__(self, *args, **kwargs):
        self.update_patterns()
        super(CachedSettingsMixin, self).__init__(*args, **kwargs)

    def update_patterns(self):
        if not self.js_assets_repl_enabled:
            return

        esc_tag = re.escape(self.js_assets_repl_tag)
        self.patterns += (("*.js", ((
            r"""(/\*!\s*%s(?:\((.*?)\))?\s*\*/\s*['"](.*?)['"]\s*/\*!\s*end%s\s*\*/(\n)?)"""
            % (esc_tag, esc_tag), """'%s'"""), )), )


class SettingProxy(object):
    def __init__(self, key):
        self.key = key

    def __call__(self, instance):
        return get_cached_setting_key(self.key)


# Dynamically create properties, whose names are lower-cased keys of
# settings_defaults
for key in iterkeys(settings_defaults):
    setattr(CachedSettingsMixin, key.lower(), property(SettingProxy(key)))
Ejemplo n.º 35
0
    def serialize_object(self, obj, *args, **kwargs):
        """Serializes the object, transforming text fields.

        This is a specialization of serialize_object that transforms any
        text fields that support text types. It also handles attaching
        the raw text to the payload, on request.
        """
        data = super(MarkdownFieldsMixin,
                     self).serialize_object(obj, *args, **kwargs)

        request = kwargs.get('request')

        if not request:
            force_text_type = None
        elif request.method == 'GET':
            force_text_type = request.GET.get('force-text-type')
        else:
            force_text_type = request.POST.get('force_text_type')

        if force_text_type not in self.TEXT_TYPES:
            force_text_type = None

        extra_text_type_fields = dict(
            (extra_text_type, {})
            for extra_text_type in self._get_extra_text_types(obj, **kwargs))

        for field, field_info in six.iteritems(self.fields):
            if not field_info.get('supports_text_types'):
                continue

            get_func = getattr(self, 'get_is_%s_rich_text' % field, None)

            if six.callable(get_func):
                getter = lambda obj, *args: get_func(obj)
            else:
                getter = lambda obj, data, rich_text_field, text_type_field: \
                    getattr(obj, rich_text_field, None)

            self._serialize_text_info(obj, data, extra_text_type_fields, field,
                                      force_text_type, getter)

        if 'extra_data' in data:
            extra_data = data['extra_data']
            all_text_types_extra_data = {}

            # Work on a copy of extra_data, in case we change it.
            for field, value in six.iteritems(obj.extra_data.copy()):
                if not self.get_extra_data_field_supports_markdown(obj, field):
                    continue

                # If all_text_types_extra_data is empty that implies we have
                # encountered the first field in extra_data which supports
                # markdown. In this case we must initialize the dictionary
                # with the extra text types that should be included in the
                # payload.
                if not all_text_types_extra_data:
                    all_text_types_extra_data = dict(
                        (k, {}) for k in six.iterkeys(extra_text_type_fields))

                # Note that we assume all custom fields are in Markdown by
                # default. This is to preserve compatibility with older
                # fields. New fields will always have the text_type flag
                # set to the proper value.
                self._serialize_text_info(obj, extra_data,
                                          all_text_types_extra_data, field,
                                          force_text_type,
                                          self._extra_data_rich_text_getter)

            for key, values in six.iteritems(all_text_types_extra_data):
                extra_text_type_fields[key]['extra_data'] = values

        for key, values in six.iteritems(extra_text_type_fields):
            data[key + '_text_fields'] = values

        return data
Ejemplo n.º 36
0
    def get_models(self,
                   app_mod=None,
                   include_auto_created=False,
                   include_deferred=False,
                   only_installed=True,
                   include_swapped=False):
        """
        Given a module containing models, returns a list of the models.
        Otherwise returns a list of all installed models.

        By default, auto-created models (i.e., m2m models without an
        explicit intermediate table) are not included. However, if you
        specify include_auto_created=True, they will be.

        By default, models created to satisfy deferred attribute
        queries are *not* included in the list of models. However, if
        you specify include_deferred, they will be.

        By default, models that aren't part of installed apps will *not*
        be included in the list of models. However, if you specify
        only_installed=False, they will be.

        By default, models that have been swapped out will *not* be
        included in the list of models. However, if you specify
        include_swapped, they will be.
        """
        cache_key = (app_mod, include_auto_created, include_deferred,
                     only_installed, include_swapped)
        model_list = None
        try:
            model_list = self._get_models_cache[cache_key]
            if self.available_apps is not None and only_installed:
                model_list = [
                    m for m in model_list
                    if m._meta.app_label in self.available_apps
                ]
            return model_list
        except KeyError:
            pass
        self._populate()
        if app_mod:
            if app_mod in self.app_store:
                app_list = [
                    self.app_models.get(self._label_for(app_mod), SortedDict())
                ]
            else:
                app_list = []
        else:
            if only_installed:
                app_list = [
                    self.app_models.get(app_label, SortedDict())
                    for app_label in six.iterkeys(self.app_labels)
                ]
            else:
                app_list = six.itervalues(self.app_models)
        model_list = []
        for app in app_list:
            model_list.extend(
                model for model in app.values()
                if ((not model._deferred or include_deferred) and (
                    not model._meta.auto_created or include_auto_created) and (
                        not model._meta.swapped or include_swapped)))
        self._get_models_cache[cache_key] = model_list
        if self.available_apps is not None and only_installed:
            model_list = [
                m for m in model_list
                if m._meta.app_label in self.available_apps
            ]
        return model_list
Ejemplo n.º 37
0
 def changed_fields(self):
     return (set(name for name in six.iterkeys(self._changed_fields))
             | set(name
                   for name, value in six.iteritems(self._mutable_fields)
                   if getattr(self, name, DoesNotExist) != value))
Ejemplo n.º 38
0
    def _get_real_instances(self, base_result_objects):
        """
        Polymorphic object loader

        Does the same as:

            return [ o.get_real_instance() for o in base_result_objects ]

        but more efficiently.

        The list base_result_objects contains the objects from the executed
        base class query. The class of all of them is self.model (our base model).

        Some, many or all of these objects were not created and stored as
        class self.model, but as a class derived from self.model. We want to re-fetch
        these objects from the db as their original class so we can return them
        just as they were created/saved.

        We identify these objects by looking at o.polymorphic_ctype, which specifies
        the real class of these objects (the class at the time they were saved).

        First, we sort the result objects in base_result_objects for their
        subclass (from o.polymorphic_ctype), and then we execute one db query per
        subclass of objects. Here, we handle any annotations from annotate().

        Finally we re-sort the resulting objects into the correct order and
        return them as a list.
        """
        ordered_id_list = []    # list of ids of result-objects in correct order
        results = {}            # polymorphic dict of result-objects, keyed with their id (no order)

        # dict contains one entry per unique model type occurring in result,
        # in the format idlist_per_model[modelclass]=[list-of-object-ids]
        idlist_per_model = defaultdict(list)

        # django's automatic ".pk" field does not always work correctly for
        # custom fields in derived objects (unclear yet who to put the blame on).
        # We get different type(o.pk) in this case.
        # We work around this by using the real name of the field directly
        # for accessing the primary key of the the derived objects.
        # We might assume that self.model._meta.pk.name gives us the name of the primary key field,
        # but it doesn't. Therefore we use polymorphic_primary_key_name, which we set up in base.py.
        pk_name = self.model.polymorphic_primary_key_name

        # - sort base_result_object ids into idlist_per_model lists, depending on their real class;
        # - also record the correct result order in "ordered_id_list"
        # - store objects that already have the correct class into "results"
        base_result_objects_by_id = {}
        content_type_manager = ContentType.objects.db_manager(self.db)
        self_model_class_id = content_type_manager.get_for_model(self.model, for_concrete_model=False).pk
        self_concrete_model_class_id = content_type_manager.get_for_model(self.model, for_concrete_model=True).pk

        for base_object in base_result_objects:
            ordered_id_list.append(base_object.pk)

            # check if id of the result object occurres more than once - this can happen e.g. with base_objects.extra(tables=...)
            if base_object.pk not in base_result_objects_by_id:
                base_result_objects_by_id[base_object.pk] = base_object

                if base_object.polymorphic_ctype_id == self_model_class_id:
                    # Real class is exactly the same as base class, go straight to results
                    results[base_object.pk] = base_object

                else:
                    real_concrete_class = base_object.get_real_instance_class()
                    real_concrete_class_id = base_object.get_real_concrete_instance_class_id()

                    if real_concrete_class_id is None:
                        # Dealing with a stale content type
                        continue
                    elif real_concrete_class_id == self_concrete_model_class_id:
                        # Real and base classes share the same concrete ancestor,
                        # upcast it and put it in the results
                        results[base_object.pk] = transmogrify(real_concrete_class, base_object)
                    else:
                        real_concrete_class = content_type_manager.get_for_id(real_concrete_class_id).model_class()
                        idlist_per_model[real_concrete_class].append(getattr(base_object, pk_name))

        # For each model in "idlist_per_model" request its objects (the real model)
        # from the db and store them in results[].
        # Then we copy the annotate fields from the base objects to the real objects.
        # Then we copy the extra() select fields from the base objects to the real objects.
        # TODO: defer(), only(): support for these would be around here
        for real_concrete_class, idlist in idlist_per_model.items():
            real_objects = real_concrete_class.base_objects.db_manager(self.db).filter(**{
                ('%s__in' % pk_name): idlist,
            })
            real_objects.query.select_related = self.query.select_related  # copy select related configuration to new qs

            # Copy deferred fields configuration to the new queryset
            deferred_loading_fields = []
            existing_fields = self.polymorphic_deferred_loading[0]
            for field in existing_fields:
                try:
                    translated_field_name = translate_polymorphic_field_path(
                        real_concrete_class, field)
                except AssertionError:
                    if '___' in field:
                        # The originally passed argument to .defer() or .only()
                        # was in the form Model2B___field2, where Model2B is
                        # now a superclass of real_concrete_class. Thus it's
                        # sufficient to just use the field name.
                        translated_field_name = field.rpartition('___')[-1]
                    else:
                        raise

                deferred_loading_fields.append(translated_field_name)
            real_objects.query.deferred_loading = (set(deferred_loading_fields), self.query.deferred_loading[1])

            for real_object in real_objects:
                o_pk = getattr(real_object, pk_name)
                real_class = real_object.get_real_instance_class()

                # If the real class is a proxy, upcast it
                if real_class != real_concrete_class:
                    real_object = transmogrify(real_class, real_object)

                if _query_annotations(self.query):
                    for anno_field_name in six.iterkeys(_query_annotations(self.query)):
                        attr = getattr(base_result_objects_by_id[o_pk], anno_field_name)
                        setattr(real_object, anno_field_name, attr)

                if self.query.extra_select:
                    for select_field_name in six.iterkeys(self.query.extra_select):
                        attr = getattr(base_result_objects_by_id[o_pk], select_field_name)
                        setattr(real_object, select_field_name, attr)

                results[o_pk] = real_object

        # re-create correct order and return result list
        resultlist = [results[ordered_id] for ordered_id in ordered_id_list if ordered_id in results]

        # set polymorphic_annotate_names in all objects (currently just used for debugging/printing)
        if _query_annotations(self.query):
            annotate_names = list(six.iterkeys(_query_annotations(self.query)))  # get annotate field list
            for real_object in resultlist:
                real_object.polymorphic_annotate_names = annotate_names

        # set polymorphic_extra_select_names in all objects (currently just used for debugging/printing)
        if self.query.extra_select:
            extra_select_names = list(six.iterkeys(self.query.extra_select))  # get extra select field list
            for real_object in resultlist:
                real_object.polymorphic_extra_select_names = extra_select_names

        return resultlist
Ejemplo n.º 39
0
    def search(self, query, search_fields=None, order_results=True,
                     decorate_results=True):
        """
        Build a queryset matching words in the given search query, treating
        quoted terms as exact phrases and taking into account + and - symbols
        as modifiers controlling which terms to require and exclude.
        """
        assert self.query.can_filter(), \
               'Cannot filter a query once a slice has been taken.'

        helper = import_string(self._search_helper)
        queryset = self._clone()
        queryset._search_ordered = order_results
        queryset._search_decorated = order_results or decorate_results

        #### DETERMINE FIELDS TO SEARCH ###

        # Use search_fields argument if given, otherwise use queryset._search_fields
        # property (which is initially configured by the manager class).
        if search_fields:
            queryset._search_fields = search_fields_to_dict(search_fields)
        if not queryset._search_fields:
            return queryset.none()

        #### BUILD LIST OF TERMS TO SEARCH FOR ###

        # Remove extra spaces, put modifiers inside quoted terms.
        terms = ' '.join(query.split()).replace('+"', '"+')    \
                                       .replace('-"', '"-')    \
                                       .split('"')

        # Strip punctuation other than modifiers from terms and create terms
        # list, first from quoted terms and then remaining words.
        terms = [
            (t[0] if t.startswith(('-', '+')) else '') + t.strip(punctuation)
            for t
            in terms[1::2] + ''.join(terms[::2]).split()
        ]

        # Remove stop words from terms that aren't quoted or use modifiers,
        # since words with these are an explicit part of the search query.
        # If doing so ends up with an empty term list, then keep the stop
        # words.
        terms_no_stopwords = [
            t
            for t in terms
            if t.startswith(('-', '+', '"'))
            or t.lower() not in registry['core:STOP_WORDS']
        ]
        positive_terms = [
            (t if not t.startswith('+') else t[1:]).lower()
            for t in terms_no_stopwords
            if not t.startswith('-')
        ]
        if not positive_terms:
            positive_terms = [
                (t if not t.startswith('+') else t[1:]).lower()
                for t in terms
                if not t.startswith('-')
            ]
        else:
            terms = terms_no_stopwords

        # Avoid too short or too long queries.
        query_len = len(''.join(terms))
        if (query_len < settings.SEARCH_MIN_QUERY_LEN
                or query_len > settings.SEARCH_MAX_QUERY_LEN):
            return queryset.none()

        # Remove too short words.
        positive_terms = [
            t
            for t in positive_terms
            if len(t) >= settings.SEARCH_MIN_WORD_LEN
        ]
        terms = [
            t
            for t in terms
            if len(t.strip('-+')) >= settings.SEARCH_MIN_WORD_LEN
        ]

        # Append positive terms (those without the negative modifier) to the
        # internal list for sorting when results are iterated.
        if not positive_terms:
            return queryset.none()
        else:
            queryset._search_terms.update(positive_terms)

        ### BUILD QUERYSET FILTER ###

        engine = connections[queryset.db].vendor

        # Filter the queryset combining each set of terms.
        field_lookups = [
            helper.prepare_field_lookup(f)
            for f
            in six.iterkeys(queryset._search_fields)
        ]
        excluded = []
        required = []
        optional = []
        for t in terms:
            if t.startswith('-'):
                term = helper.prepare_term(t[1:], engine)
                excluded.append(reduce(
                    iand,
                    [~Q(**{lookup: term}) for lookup in field_lookups]
                ))
            elif t.startswith('+'):
                term = helper.prepare_term(t[1:], engine)
                required.append(reduce(
                    ior,
                    [Q(**{lookup: term}) for lookup in field_lookups]
                ))
            else:
                term = helper.prepare_term(t, engine)
                optional.append(reduce(
                    ior,
                    [Q(**{lookup: term}) for lookup in field_lookups]
                ))

        queryset.query.add_distinct_fields()
        queryset.query.clear_ordering(force_empty=True)
        if excluded:
            queryset = queryset.filter(reduce(iand, excluded))
        if required:
            queryset = queryset.filter(reduce(iand, required))
        elif optional:
            # Optional terms aren't relevant to the filter if there are terms
            # that are explicitly required.
            queryset = queryset.filter(reduce(ior, optional))

        return queryset
Ejemplo n.º 40
0
class KeyTransform(Transform):

    SPEC_MAP = {
        date: 'DATE',
        datetime: 'DATETIME',
        float: 'DOUBLE',
        int: 'INTEGER',
        six.text_type: 'CHAR',
        time: 'TIME',
        dict: 'BINARY',
    }
    if six.PY2:
        from __builtin__ import long  # make source lintable on Python 3
        SPEC_MAP[long] = 'INTEGER'

    SPEC_MAP_NAMES = ', '.join(sorted(x.__name__ for x in
                                      six.iterkeys(SPEC_MAP)))

    TYPE_MAP = {
        'BINARY': DynamicField,
        'CHAR': TextField(),
        'DATE': DateField(),
        'DATETIME': DateTimeField(),
        'DOUBLE': FloatField(),
        'INTEGER': IntegerField(),
        'TIME': TimeField(),
    }

    def __init__(self, key_name, data_type, *args, **kwargs):
        subspec = kwargs.pop('subspec', None)
        super(KeyTransform, self).__init__(*args, **kwargs)
        self.key_name = key_name
        self.data_type = data_type

        try:
            output_field = self.TYPE_MAP[data_type]
        except KeyError:  # pragma: no cover
            raise ValueError("Invalid data_type '{}'".format(data_type))

        if data_type == 'BINARY':
            self.output_field = output_field(spec=subspec)
        else:
            self.output_field = output_field

    def as_sql(self, compiler, connection):
        lhs, params = compiler.compile(self.lhs)
        return (
            "COLUMN_GET({}, %s AS {})".format(lhs, self.data_type),
            params + [self.key_name],
        )

    if django.VERSION[:3] <= (1, 8, 2):  # pragma: no cover
        # Backport of bugfix for transforms with arguments, taken from:
        # https://code.djangoproject.com/ticket/24744
        def copy(self):
            return copy(self)

        def relabeled_clone(self, relabels):
            copy = self.copy()
            copy.lhs = self.lhs.relabeled_clone(relabels)
            return copy
Ejemplo n.º 41
0
 def copy(self):
     other = self.__class__()
     for key in six.iterkeys(self):
         other[key] = self[key]
     return other
Ejemplo n.º 42
0
 def test_get_fields_with_only_fields(self):
     expected = ['id', 'last_name']
     serializer = UserSerializer(only_fields=expected)
     self.assertEqual(list(six.iterkeys(serializer.fields)), expected)
Ejemplo n.º 43
0
 def test_copy(self):
     orig = SortedDict(((1, "one"), (0, "zero"), (2, "two")))
     copied = copy.copy(orig)
     self.assertEqual(list(six.iterkeys(orig)), [1, 0, 2])
     self.assertEqual(list(six.iterkeys(copied)), [1, 0, 2])
Ejemplo n.º 44
0
 def reverted(self):
     return dict(
         six.moves.zip(six.itervalues(self.enum_dict),
                       six.iterkeys(self.enum_dict)))
Ejemplo n.º 45
0
 def test_append_items(self):
     """ New items go to the end. """
     self.d1[0] = 'nil'
     self.assertEqual(list(six.iterkeys(self.d1)), [7, 1, 9, 0])
Ejemplo n.º 46
0
 def test_get_fields_with_include_all(self):
     expected = six.iterkeys(UserSerializer().get_all_fields())
     serializer = UserSerializer(
         include_fields='*'
     )
     self.assertEqual(list(six.iterkeys(serializer.fields)), list(expected))
Ejemplo n.º 47
0
 def test_overwrite(self):
     self.d1[1] = 'not one'
     self.assertEqual(self.d1[1], 'not one')
     self.assertEqual(list(six.iterkeys(self.d1)),
                      list(six.iterkeys(self.d1.copy())))
Ejemplo n.º 48
0
 def keys(self):
     return six.iterkeys(self.enum_dict)
Ejemplo n.º 49
0
 def test_basic_methods(self):
     self.assertEqual(list(six.iterkeys(self.d1)), [7, 1, 9])
     self.assertEqual(list(six.itervalues(self.d1)),
                      ['seven', 'one', 'nine'])
     self.assertEqual(list(six.iteritems(self.d1)),
                      [(7, 'seven'), (1, 'one'), (9, 'nine')])