Esempio n. 1
0
    def setUp(self):
        self.d1 = SortedDict()
        self.d1[7] = 'seven'
        self.d1[1] = 'one'
        self.d1[9] = 'nine'

        self.d2 = SortedDict()
        self.d2[1] = 'one'
        self.d2[9] = 'nine'
        self.d2[0] = 'nil'
        self.d2[7] = 'seven'
Esempio n. 2
0
class AppDirectoriesFinder(BaseFinder):
    """
    A static files finder that looks in the directory of each app as
    specified in the source_dir attribute of the given storage class.
    """
    storage_class = AppStaticStorage

    def __init__(self, apps=None, *args, **kwargs):
        # The list of apps that are handled
        self.apps = []
        # Mapping of app module paths to storage instances
        self.storages = SortedDict()
        if apps is None:
            apps = settings.INSTALLED_APPS
        for app in apps:
            app_storage = self.storage_class(app)
            if os.path.isdir(app_storage.location):
                self.storages[app] = app_storage
                if app not in self.apps:
                    self.apps.append(app)
        super(AppDirectoriesFinder, self).__init__(*args, **kwargs)

    def list(self, ignore_patterns):
        """
        List all files in all app storages.
        """
        for storage in six.itervalues(self.storages):
            if storage.exists(''):  # check if storage location exists
                for path in utils.get_files(storage, ignore_patterns):
                    yield path, storage

    def find(self, path, all=False):
        """
        Looks for files in the app directories.
        """
        matches = []
        for app in self.apps:
            match = self.find_in_app(app, path)
            if match:
                if not all:
                    return match
                matches.append(match)
        return matches

    def find_in_app(self, app, path):
        """
        Find a requested static file in an app's static locations.
        """
        storage = self.storages.get(app, None)
        if storage:
            if storage.prefix:
                prefix = '%s%s' % (storage.prefix, os.sep)
                if not path.startswith(prefix):
                    return None
                path = path[len(prefix):]
            # only try to find a file if the source dir actually exists
            if storage.exists(path):
                matched_path = storage.path(path)
                if matched_path:
                    return matched_path
Esempio n. 3
0
def fields_for_model(model, fields=None, exclude=None, widgets=None, formfield_callback=None):
    """
    Returns a ``SortedDict`` containing form fields for the given model.

    ``fields`` is an optional list of field names. If provided, only the named
    fields will be included in the returned fields.

    ``exclude`` is an optional list of field names. If provided, the named
    fields will be excluded from the returned fields, even if they are listed
    in the ``fields`` argument.
    """
    field_list = []
    ignored = []
    opts = model._meta
    for f in sorted(opts.fields + opts.many_to_many):
        if not f.editable:
            continue
        if fields is not None and not f.name in fields:
            continue
        if exclude and f.name in exclude:
            continue
        if widgets and f.name in widgets:
            kwargs = {'widget': widgets[f.name]}
        else:
            kwargs = {}

        if formfield_callback is None:
            formfield = f.formfield(**kwargs)
        elif not callable(formfield_callback):
            raise TypeError('formfield_callback must be a function or callable')
        else:
            formfield = formfield_callback(f, **kwargs)

        if formfield:
            field_list.append((f.name, formfield))
        else:
            ignored.append(f.name)
    field_dict = SortedDict(field_list)
    if fields:
        field_dict = SortedDict(
            [(f, field_dict.get(f)) for f in fields
                if ((not exclude) or (exclude and f not in exclude)) and (f not in ignored)]
        )
    return field_dict
Esempio n. 4
0
 def __init__(self, apps=None, *args, **kwargs):
     # The list of apps that are handled
     self.apps = []
     # Mapping of app module paths to storage instances
     self.storages = SortedDict()
     if apps is None:
         apps = settings.INSTALLED_APPS
     for app in apps:
         app_storage = self.storage_class(app)
         if os.path.isdir(app_storage.location):
             self.storages[app] = app_storage
             if app not in self.apps:
                 self.apps.append(app)
     super(AppDirectoriesFinder, self).__init__(*args, **kwargs)
Esempio n. 5
0
 def _fill_related_objects_cache(self):
     cache = SortedDict()
     parent_list = self.get_parent_list()
     for parent in self.parents:
         for obj, model in parent._meta.get_all_related_objects_with_model(include_hidden=True):
             if (obj.field.creation_counter < 0 or obj.field.rel.parent_link) and obj.model not in parent_list:
                 continue
             if not model:
                 cache[obj] = parent
             else:
                 cache[obj] = model
     # Collect also objects which are in relation to some proxy child/parent of self.
     proxy_cache = cache.copy()
     for klass in get_models(include_auto_created=True, only_installed=False):
         for f in klass._meta.local_fields:
             if f.rel and not isinstance(f.rel.to, six.string_types):
                 if self == f.rel.to._meta:
                     cache[RelatedObject(f.rel.to, klass, f)] = None
                     proxy_cache[RelatedObject(f.rel.to, klass, f)] = None
                 elif self.concrete_model == f.rel.to._meta.concrete_model:
                     proxy_cache[RelatedObject(f.rel.to, klass, f)] = None
     self._related_objects_cache = cache
     self._related_objects_proxy_cache = proxy_cache
Esempio n. 6
0
 def __init__(self, *args, **kwargs):
     super(CachedFilesMixin, self).__init__(*args, **kwargs)
     try:
         self.cache = get_cache("staticfiles")
     except InvalidCacheBackendError:
         # Use the default backend
         self.cache = default_cache
     self._patterns = SortedDict()
     for extension, patterns in self.patterns:
         for pattern in patterns:
             if isinstance(pattern, (tuple, list)):
                 pattern, template = pattern
             else:
                 template = self.default_template
             compiled = re.compile(pattern)
             self._patterns.setdefault(extension, []).append((compiled, template))
Esempio n. 7
0
 def sort(self):
     sorted_models = []
     concrete_models = set()
     models = list(self.data)
     while len(sorted_models) < len(models):
         found = False
         for model in models:
             if model in sorted_models:
                 continue
             dependencies = self.dependencies.get(model._meta.concrete_model)
             if not (dependencies and dependencies.difference(concrete_models)):
                 sorted_models.append(model)
                 concrete_models.add(model._meta.concrete_model)
                 found = True
         if not found:
             return
     self.data = SortedDict([(model, self.data[model]) for model in sorted_models])
Esempio n. 8
0
class Collector(object):
    def __init__(self, using):
        self.using = using
        # Initially, {model: set([instances])}, later values become lists.
        self.data = {}
        self.batches = {}  # {model: {field: set([instances])}}
        self.field_updates = {}  # {model: {(field, value): set([instances])}}

        # Tracks deletion-order dependency for databases without transactions
        # or ability to defer constraint checks. Only concrete model classes
        # should be included, as the dependencies exist only between actual
        # database tables; proxy models are represented here by their concrete
        # parent.
        self.dependencies = {}  # {model: set([models])}

    def add(self, objs, source=None, nullable=False, reverse_dependency=False):
        """
        Adds 'objs' to the collection of objects to be deleted.  If the call is
        the result of a cascade, 'source' should be the model that caused it,
        and 'nullable' should be set to True if the relation can be null.

        Returns a list of all objects that were not already collected.
        """
        if not objs:
            return []
        new_objs = []
        model = objs[0].__class__
        instances = self.data.setdefault(model, set())
        for obj in objs:
            if obj not in instances:
                new_objs.append(obj)
        instances.update(new_objs)
        # Nullable relationships can be ignored -- they are nulled out before
        # deleting, and therefore do not affect the order in which objects have
        # to be deleted.
        if source is not None and not nullable:
            if reverse_dependency:
                source, model = model, source
            self.dependencies.setdefault(source._meta.concrete_model, set()).add(model._meta.concrete_model)
        return new_objs

    def add_batch(self, model, field, objs):
        """
        Schedules a batch delete. Every instance of 'model' that is related to
        an instance of 'obj' through 'field' will be deleted.
        """
        self.batches.setdefault(model, {}).setdefault(field, set()).update(objs)

    def add_field_update(self, field, value, objs):
        """
        Schedules a field update. 'objs' must be a homogenous iterable
        collection of model instances (e.g. a QuerySet).
        """
        if not objs:
            return
        model = objs[0].__class__
        self.field_updates.setdefault(model, {}).setdefault((field, value), set()).update(objs)

    def collect(
        self, objs, source=None, nullable=False, collect_related=True, source_attr=None, reverse_dependency=False
    ):
        """
        Adds 'objs' to the collection of objects to be deleted as well as all
        parent instances.  'objs' must be a homogenous iterable collection of
        model instances (e.g. a QuerySet).  If 'collect_related' is True,
        related objects will be handled by their respective on_delete handler.

        If the call is the result of a cascade, 'source' should be the model
        that caused it and 'nullable' should be set to True, if the relation
        can be null.

        If 'reverse_dependency' is True, 'source' will be deleted before the
        current model, rather than after. (Needed for cascading to parent
        models, the one case in which the cascade follows the forwards
        direction of an FK rather than the reverse direction.)
        """
        new_objs = self.add(objs, source, nullable, reverse_dependency=reverse_dependency)
        if not new_objs:
            return

        model = new_objs[0].__class__

        # Recursively collect concrete model's parent models, but not their
        # related objects. These will be found by meta.get_all_related_objects()
        concrete_model = model._meta.concrete_model
        for ptr in six.itervalues(concrete_model._meta.parents):
            if ptr:
                parent_objs = [getattr(obj, ptr.name) for obj in new_objs]
                self.collect(
                    parent_objs,
                    source=model,
                    source_attr=ptr.rel.related_name,
                    collect_related=False,
                    reverse_dependency=True,
                )

        if collect_related:
            for related in model._meta.get_all_related_objects(include_hidden=True, include_proxy_eq=True):
                field = related.field
                if related.model._meta.auto_created:
                    self.add_batch(related.model, field, new_objs)
                else:
                    sub_objs = self.related_objects(related, new_objs)
                    if not sub_objs:
                        continue
                    field.rel.on_delete(self, field, sub_objs, self.using)

            # TODO This entire block is only needed as a special case to
            # support cascade-deletes for GenericRelation. It should be
            # removed/fixed when the ORM gains a proper abstraction for virtual
            # or composite fields, and GFKs are reworked to fit into that.
            for relation in model._meta.many_to_many:
                if not relation.rel.through:
                    sub_objs = relation.bulk_related_objects(new_objs, self.using)
                    self.collect(sub_objs, source=model, source_attr=relation.rel.related_name, nullable=True)

    def related_objects(self, related, objs):
        """
        Gets a QuerySet of objects related to ``objs`` via the relation ``related``.

        """
        return related.model._base_manager.using(self.using).filter(**{"%s__in" % related.field.name: objs})

    def instances_with_model(self):
        for model, instances in six.iteritems(self.data):
            for obj in instances:
                yield model, obj

    def sort(self):
        sorted_models = []
        concrete_models = set()
        models = list(self.data)
        while len(sorted_models) < len(models):
            found = False
            for model in models:
                if model in sorted_models:
                    continue
                dependencies = self.dependencies.get(model._meta.concrete_model)
                if not (dependencies and dependencies.difference(concrete_models)):
                    sorted_models.append(model)
                    concrete_models.add(model._meta.concrete_model)
                    found = True
            if not found:
                return
        self.data = SortedDict([(model, self.data[model]) for model in sorted_models])

    @force_managed
    def delete(self):
        # sort instance collections
        for model, instances in self.data.items():
            self.data[model] = sorted(instances, key=attrgetter("pk"))

        # if possible, bring the models in an order suitable for databases that
        # don't support transactions or cannot defer constraint checks until the
        # end of a transaction.
        self.sort()

        # send pre_delete signals
        for model, obj in self.instances_with_model():
            if not model._meta.auto_created:
                signals.pre_delete.send(sender=model, instance=obj, using=self.using)

        # update fields
        for model, instances_for_fieldvalues in six.iteritems(self.field_updates):
            query = sql.UpdateQuery(model)
            for (field, value), instances in six.iteritems(instances_for_fieldvalues):
                query.update_batch([obj.pk for obj in instances], {field.name: value}, self.using)

        # reverse instance collections
        for instances in six.itervalues(self.data):
            instances.reverse()

        # delete batches
        for model, batches in six.iteritems(self.batches):
            query = sql.DeleteQuery(model)
            for field, instances in six.iteritems(batches):
                query.delete_batch([obj.pk for obj in instances], self.using, field)

        # delete instances
        for model, instances in six.iteritems(self.data):
            query = sql.DeleteQuery(model)
            pk_list = [obj.pk for obj in instances]
            query.delete_batch(pk_list, self.using)

        # send post_delete signals
        for model, obj in self.instances_with_model():
            if not model._meta.auto_created:
                signals.post_delete.send(sender=model, instance=obj, using=self.using)

        # update collected instances
        for model, instances_for_fieldvalues in six.iteritems(self.field_updates):
            for (field, value), instances in six.iteritems(instances_for_fieldvalues):
                for obj in instances:
                    setattr(obj, field.attname, value)
        for model, instances in six.iteritems(self.data):
            for instance in instances:
                setattr(instance, model._meta.pk.attname, None)
Esempio n. 9
0
    def handle(self, *app_labels, **options):
        from djangocg.db.models import get_app, get_apps, get_model

        format = options.get('format')
        indent = options.get('indent')
        using = options.get('database')
        excludes = options.get('exclude')
        show_traceback = options.get('traceback')
        use_natural_keys = options.get('use_natural_keys')
        use_base_manager = options.get('use_base_manager')

        excluded_apps = set()
        excluded_models = set()
        for exclude in excludes:
            if '.' in exclude:
                app_label, model_name = exclude.split('.', 1)
                model_obj = get_model(app_label, model_name)
                if not model_obj:
                    raise CommandError('Unknown model in excludes: %s' % exclude)
                excluded_models.add(model_obj)
            else:
                try:
                    app_obj = get_app(exclude)
                    excluded_apps.add(app_obj)
                except ImproperlyConfigured:
                    raise CommandError('Unknown app in excludes: %s' % exclude)

        if len(app_labels) == 0:
            app_list = SortedDict((app, None) for app in get_apps() if app not in excluded_apps)
        else:
            app_list = SortedDict()
            for label in app_labels:
                try:
                    app_label, model_label = label.split('.')
                    try:
                        app = get_app(app_label)
                    except ImproperlyConfigured:
                        raise CommandError("Unknown application: %s" % app_label)
                    if app in excluded_apps:
                        continue
                    model = get_model(app_label, model_label)
                    if model is None:
                        raise CommandError("Unknown model: %s.%s" % (app_label, model_label))

                    if app in app_list.keys():
                        if app_list[app] and model not in app_list[app]:
                            app_list[app].append(model)
                    else:
                        app_list[app] = [model]
                except ValueError:
                    # This is just an app - no model qualifier
                    app_label = label
                    try:
                        app = get_app(app_label)
                    except ImproperlyConfigured:
                        raise CommandError("Unknown application: %s" % app_label)
                    if app in excluded_apps:
                        continue
                    app_list[app] = None

        # Check that the serialization format exists; this is a shortcut to
        # avoid collating all the objects and _then_ failing.
        if format not in serializers.get_public_serializer_formats():
            raise CommandError("Unknown serialization format: %s" % format)

        try:
            serializers.get_serializer(format)
        except KeyError:
            raise CommandError("Unknown serialization format: %s" % format)

        def get_objects():
            # Collate the objects to be serialized.
            for model in sort_dependencies(app_list.items()):
                if model in excluded_models:
                    continue
                if not model._meta.proxy and router.allow_syncdb(using, model):
                    if use_base_manager:
                        objects = model._base_manager
                    else:
                        objects = model._default_manager
                    for obj in objects.using(using).\
                            order_by(model._meta.pk.name).iterator():
                        yield obj

        try:
            self.stdout.ending = None
            serializers.serialize(format, get_objects(), indent=indent,
                    use_natural_keys=use_natural_keys, stream=self.stdout)
        except Exception as e:
            if show_traceback:
                raise
            raise CommandError("Unable to serialize database: %s" % e)
Esempio n. 10
0
    def handle_noargs(self, **options):

        verbosity = int(options.get("verbosity"))
        interactive = options.get("interactive")
        show_traceback = options.get("traceback")
        load_initial_data = options.get("load_initial_data")

        self.style = no_style()

        # Import the 'management' module within each installed app, to register
        # dispatcher events.
        for app_name in settings.INSTALLED_APPS:
            try:
                import_module(".management", app_name)
            except ImportError as exc:
                # This is slightly hackish. We want to ignore ImportErrors
                # if the "management" module itself is missing -- but we don't
                # want to ignore the exception if the management module exists
                # but raises an ImportError for some reason. The only way we
                # can do this is to check the text of the exception. Note that
                # we're a bit broad in how we check the text, because different
                # Python implementations may not use the same text.
                # CPython uses the text "No module named management"
                # PyPy uses "No module named myproject.myapp.management"
                msg = exc.args[0]
                if not msg.startswith("No module named") or "management" not in msg:
                    raise

        db = options.get("database")
        connection = connections[db]
        cursor = connection.cursor()

        # Get a list of already installed *models* so that references work right.
        tables = connection.introspection.table_names()
        seen_models = connection.introspection.installed_models(tables)
        created_models = set()
        pending_references = {}

        # Build the manifest of apps and models that are to be synchronized
        all_models = [
            (
                app.__name__.split(".")[-2],
                [m for m in models.get_models(app, include_auto_created=True) if router.allow_syncdb(db, m)],
            )
            for app in models.get_apps()
        ]

        def model_installed(model):
            opts = model._meta
            converter = connection.introspection.table_name_converter
            return not (
                (converter(opts.db_table) in tables)
                or (opts.auto_created and converter(opts.auto_created._meta.db_table) in tables)
            )

        manifest = SortedDict(
            (app_name, list(filter(model_installed, model_list))) for app_name, model_list in all_models
        )

        # Create the tables for each model
        if verbosity >= 1:
            self.stdout.write("Creating tables ...\n")
        for app_name, model_list in manifest.items():
            for model in model_list:
                # Create the model's database table, if it doesn't already exist.
                if verbosity >= 3:
                    self.stdout.write("Processing %s.%s model\n" % (app_name, model._meta.object_name))
                sql, references = connection.creation.sql_create_model(model, self.style, seen_models)
                seen_models.add(model)
                created_models.add(model)
                for refto, refs in references.items():
                    pending_references.setdefault(refto, []).extend(refs)
                    if refto in seen_models:
                        sql.extend(
                            connection.creation.sql_for_pending_references(refto, self.style, pending_references)
                        )
                sql.extend(connection.creation.sql_for_pending_references(model, self.style, pending_references))
                if verbosity >= 1 and sql:
                    self.stdout.write("Creating table %s\n" % model._meta.db_table)
                for statement in sql:
                    cursor.execute(statement)
                tables.append(connection.introspection.table_name_converter(model._meta.db_table))

        transaction.commit_unless_managed(using=db)

        # Send the post_syncdb signal, so individual apps can do whatever they need
        # to do at this point.
        emit_post_sync_signal(created_models, verbosity, interactive, db)

        # The connection may have been closed by a syncdb handler.
        cursor = connection.cursor()

        # Install custom SQL for the app (but only if this
        # is a model we've just created)
        if verbosity >= 1:
            self.stdout.write("Installing custom SQL ...\n")
        for app_name, model_list in manifest.items():
            for model in model_list:
                if model in created_models:
                    custom_sql = custom_sql_for_model(model, self.style, connection)
                    if custom_sql:
                        if verbosity >= 2:
                            self.stdout.write(
                                "Installing custom SQL for %s.%s model\n" % (app_name, model._meta.object_name)
                            )
                        try:
                            for sql in custom_sql:
                                cursor.execute(sql)
                        except Exception as e:
                            self.stderr.write(
                                "Failed to install custom SQL for %s.%s model: %s\n"
                                % (app_name, model._meta.object_name, e)
                            )
                            if show_traceback:
                                traceback.print_exc()
                            transaction.rollback_unless_managed(using=db)
                        else:
                            transaction.commit_unless_managed(using=db)
                    else:
                        if verbosity >= 3:
                            self.stdout.write("No custom SQL for %s.%s model\n" % (app_name, model._meta.object_name))

        if verbosity >= 1:
            self.stdout.write("Installing indexes ...\n")
        # Install SQL indices for all newly created models
        for app_name, model_list in manifest.items():
            for model in model_list:
                if model in created_models:
                    index_sql = connection.creation.sql_indexes_for_model(model, self.style)
                    if index_sql:
                        if verbosity >= 2:
                            self.stdout.write(
                                "Installing index for %s.%s model\n" % (app_name, model._meta.object_name)
                            )
                        try:
                            for sql in index_sql:
                                cursor.execute(sql)
                        except Exception as e:
                            self.stderr.write(
                                "Failed to install index for %s.%s model: %s\n" % (app_name, model._meta.object_name, e)
                            )
                            transaction.rollback_unless_managed(using=db)
                        else:
                            transaction.commit_unless_managed(using=db)

        # Load initial_data fixtures (unless that has been disabled)
        if load_initial_data:
            call_command("loaddata", "initial_data", verbosity=verbosity, database=db, skip_validation=True)
Esempio n. 11
0
class SortedDictTests(SimpleTestCase):
    def setUp(self):
        self.d1 = SortedDict()
        self.d1[7] = 'seven'
        self.d1[1] = 'one'
        self.d1[9] = 'nine'

        self.d2 = SortedDict()
        self.d2[1] = 'one'
        self.d2[9] = 'nine'
        self.d2[0] = 'nil'
        self.d2[7] = 'seven'

    def test_basic_methods(self):
        self.assertEqual(list(six.iterkeys(self.d1)), [7, 1, 9])
        self.assertEqual(list(six.itervalues(self.d1)), ['seven', 'one', 'nine'])
        self.assertEqual(list(six.iteritems(self.d1)), [(7, 'seven'), (1, 'one'), (9, 'nine')])

    def test_overwrite_ordering(self):
        """ Overwriting an item keeps its place. """
        self.d1[1] = 'ONE'
        self.assertEqual(list(six.itervalues(self.d1)), ['seven', 'ONE', 'nine'])

    def test_append_items(self):
        """ New items go to the end. """
        self.d1[0] = 'nil'
        self.assertEqual(list(six.iterkeys(self.d1)), [7, 1, 9, 0])

    def test_delete_and_insert(self):
        """
        Deleting an item, then inserting the same key again will place it
        at the end.
        """
        del self.d2[7]
        self.assertEqual(list(six.iterkeys(self.d2)), [1, 9, 0])
        self.d2[7] = 'lucky number 7'
        self.assertEqual(list(six.iterkeys(self.d2)), [1, 9, 0, 7])

    if not six.PY3:
        def test_change_keys(self):
            """
            Changing the keys won't do anything, it's only a copy of the
            keys dict.

            This test doesn't make sense under Python 3 because keys is
            an iterator.
            """
            k = self.d2.keys()
            k.remove(9)
            self.assertEqual(self.d2.keys(), [1, 9, 0, 7])

    def test_init_keys(self):
        """
        Initialising a SortedDict with two keys will just take the first one.

        A real dict will actually take the second value so we will too, but
        we'll keep the ordering from the first key found.
        """
        tuples = ((2, 'two'), (1, 'one'), (2, 'second-two'))
        d = SortedDict(tuples)

        self.assertEqual(list(six.iterkeys(d)), [2, 1])

        real_dict = dict(tuples)
        self.assertEqual(sorted(six.itervalues(real_dict)), ['one', 'second-two'])

        # Here the order of SortedDict values *is* what we are testing
        self.assertEqual(list(six.itervalues(d)), ['second-two', 'one'])

    def test_overwrite(self):
        self.d1[1] = 'not one'
        self.assertEqual(self.d1[1], 'not one')
        self.assertEqual(list(six.iterkeys(self.d1)), list(six.iterkeys(self.d1.copy())))

    def test_append(self):
        self.d1[13] = 'thirteen'
        self.assertEqual(
            repr(self.d1),
            "{7: 'seven', 1: 'one', 9: 'nine', 13: 'thirteen'}"
        )

    def test_pop(self):
        self.assertEqual(self.d1.pop(1, 'missing'), 'one')
        self.assertEqual(self.d1.pop(1, 'missing'), 'missing')

        # We don't know which item will be popped in popitem(), so we'll
        # just check that the number of keys has decreased.
        l = len(self.d1)
        self.d1.popitem()
        self.assertEqual(l - len(self.d1), 1)

    def test_dict_equality(self):
        d = SortedDict((i, i) for i in range(3))
        self.assertEqual(d, {0: 0, 1: 1, 2: 2})

    def test_tuple_init(self):
        d = SortedDict(((1, "one"), (0, "zero"), (2, "two")))
        self.assertEqual(repr(d), "{1: 'one', 0: 'zero', 2: 'two'}")

    def test_pickle(self):
        self.assertEqual(
            pickle.loads(pickle.dumps(self.d1, 2)),
            {7: 'seven', 1: 'one', 9: 'nine'}
        )

    def test_copy(self):
        orig = SortedDict(((1, "one"), (0, "zero"), (2, "two")))
        copied = copy.copy(orig)
        self.assertEqual(list(six.iterkeys(orig)), [1, 0, 2])
        self.assertEqual(list(six.iterkeys(copied)), [1, 0, 2])

    def test_clear(self):
        self.d1.clear()
        self.assertEqual(self.d1, {})
        self.assertEqual(self.d1.keyOrder, [])

    def test_insert(self):
        d = SortedDict()
        with warnings.catch_warnings(record=True) as w:
            warnings.simplefilter("always")
            d.insert(0, "hello", "world")
        assert w[0].category is PendingDeprecationWarning

    def test_value_for_index(self):
        d = SortedDict({"a": 3})
        with warnings.catch_warnings(record=True) as w:
            warnings.simplefilter("always")
            self.assertEqual(d.value_for_index(0), 3)
        assert w[0].category is PendingDeprecationWarning
Esempio n. 12
0
 def test_value_for_index(self):
     d = SortedDict({"a": 3})
     with warnings.catch_warnings(record=True) as w:
         warnings.simplefilter("always")
         self.assertEqual(d.value_for_index(0), 3)
     assert w[0].category is PendingDeprecationWarning
Esempio n. 13
0
 def test_insert(self):
     d = SortedDict()
     with warnings.catch_warnings(record=True) as w:
         warnings.simplefilter("always")
         d.insert(0, "hello", "world")
     assert w[0].category is PendingDeprecationWarning
Esempio n. 14
0
class CachedFilesMixin(object):
    default_template = """url("%s")"""
    patterns = (
        (
            "*.css",
            (
                r"""(url\(['"]{0,1}\s*(.*?)["']{0,1}\))""",
                (r"""(@import\s*["']\s*(.*?)["'])""", """@import url("%s")"""),
            ),
        ),
    )

    def __init__(self, *args, **kwargs):
        super(CachedFilesMixin, self).__init__(*args, **kwargs)
        try:
            self.cache = get_cache("staticfiles")
        except InvalidCacheBackendError:
            # Use the default backend
            self.cache = default_cache
        self._patterns = SortedDict()
        for extension, patterns in self.patterns:
            for pattern in patterns:
                if isinstance(pattern, (tuple, list)):
                    pattern, template = pattern
                else:
                    template = self.default_template
                compiled = re.compile(pattern)
                self._patterns.setdefault(extension, []).append((compiled, template))

    def file_hash(self, name, content=None):
        """
        Retuns a hash of the file with the given name and optional content.
        """
        if content is None:
            return None
        md5 = hashlib.md5()
        for chunk in content.chunks():
            md5.update(chunk)
        return md5.hexdigest()[:12]

    def hashed_name(self, name, content=None):
        parsed_name = urlsplit(unquote(name))
        clean_name = parsed_name.path.strip()
        opened = False
        if content is None:
            if not self.exists(clean_name):
                raise ValueError("The file '%s' could not be found with %r." % (clean_name, self))
            try:
                content = self.open(clean_name)
            except IOError:
                # Handle directory paths and fragments
                return name
            opened = True
        try:
            file_hash = self.file_hash(clean_name, content)
        finally:
            if opened:
                content.close()
        path, filename = os.path.split(clean_name)
        root, ext = os.path.splitext(filename)
        if file_hash is not None:
            file_hash = ".%s" % file_hash
        hashed_name = os.path.join(path, "%s%s%s" % (root, file_hash, ext))
        unparsed_name = list(parsed_name)
        unparsed_name[2] = hashed_name
        # Special casing for a @font-face hack, like url(myfont.eot?#iefix")
        # http://www.fontspring.com/blog/the-new-bulletproof-font-face-syntax
        if "?#" in name and not unparsed_name[3]:
            unparsed_name[2] += "?"
        return urlunsplit(unparsed_name)

    def cache_key(self, name):
        return "staticfiles:%s" % hashlib.md5(force_bytes(name)).hexdigest()

    def url(self, name, force=False):
        """
        Returns the real URL in DEBUG mode.
        """
        if settings.DEBUG and not force:
            hashed_name, fragment = name, ""
        else:
            clean_name, fragment = urldefrag(name)
            if urlsplit(clean_name).path.endswith("/"):  # don't hash paths
                hashed_name = name
            else:
                cache_key = self.cache_key(name)
                hashed_name = self.cache.get(cache_key)
                if hashed_name is None:
                    hashed_name = self.hashed_name(clean_name).replace("\\", "/")
                    # set the cache if there was a miss
                    # (e.g. if cache server goes down)
                    self.cache.set(cache_key, hashed_name)

        final_url = super(CachedFilesMixin, self).url(hashed_name)

        # Special casing for a @font-face hack, like url(myfont.eot?#iefix")
        # http://www.fontspring.com/blog/the-new-bulletproof-font-face-syntax
        query_fragment = "?#" in name  # [sic!]
        if fragment or query_fragment:
            urlparts = list(urlsplit(final_url))
            if fragment and not urlparts[4]:
                urlparts[4] = fragment
            if query_fragment and not urlparts[3]:
                urlparts[2] += "?"
            final_url = urlunsplit(urlparts)

        return unquote(final_url)

    def url_converter(self, name, template=None):
        """
        Returns the custom URL converter for the given file name.
        """
        if template is None:
            template = self.default_template

        def converter(matchobj):
            """
            Converts the matched URL depending on the parent level (`..`)
            and returns the normalized and hashed URL using the url method
            of the storage.
            """
            matched, url = matchobj.groups()
            # Completely ignore http(s) prefixed URLs,
            # fragments and data-uri URLs
            if url.startswith(("#", "http:", "https:", "data:", "//")):
                return matched
            name_parts = name.split(os.sep)
            # Using posix normpath here to remove duplicates
            url = posixpath.normpath(url)
            url_parts = url.split("/")
            parent_level, sub_level = url.count(".."), url.count("/")
            if url.startswith("/"):
                sub_level -= 1
                url_parts = url_parts[1:]
            if parent_level or not url.startswith("/"):
                start, end = parent_level + 1, parent_level
            else:
                if sub_level:
                    if sub_level == 1:
                        parent_level -= 1
                    start, end = parent_level, 1
                else:
                    start, end = 1, sub_level - 1
            joined_result = "/".join(name_parts[:-start] + url_parts[end:])
            hashed_url = self.url(unquote(joined_result), force=True)
            file_name = hashed_url.split("/")[-1:]
            relative_url = "/".join(url.split("/")[:-1] + file_name)

            # Return the hashed version to the file
            return template % unquote(relative_url)

        return converter

    def post_process(self, paths, dry_run=False, **options):
        """
        Post process the given list of files (called from collectstatic).

        Processing is actually two separate operations:

        1. renaming files to include a hash of their content for cache-busting,
           and copying those files to the target storage.
        2. adjusting files which contain references to other files so they
           refer to the cache-busting filenames.

        If either of these are performed on a file, then that file is considered
        post-processed.
        """
        # don't even dare to process the files if we're in dry run mode
        if dry_run:
            return

        # where to store the new paths
        hashed_paths = {}

        # build a list of adjustable files
        matches = lambda path: matches_patterns(path, self._patterns.keys())
        adjustable_paths = [path for path in paths if matches(path)]

        # then sort the files by the directory level
        path_level = lambda name: len(name.split(os.sep))
        for name in sorted(paths.keys(), key=path_level, reverse=True):

            # use the original, local file, not the copied-but-unprocessed
            # file, which might be somewhere far away, like S3
            storage, path = paths[name]
            with storage.open(path) as original_file:

                # generate the hash with the original content, even for
                # adjustable files.
                hashed_name = self.hashed_name(name, original_file)

                # then get the original's file content..
                if hasattr(original_file, "seek"):
                    original_file.seek(0)

                hashed_file_exists = self.exists(hashed_name)
                processed = False

                # ..to apply each replacement pattern to the content
                if name in adjustable_paths:
                    content = original_file.read().decode(settings.FILE_CHARSET)
                    for patterns in self._patterns.values():
                        for pattern, template in patterns:
                            converter = self.url_converter(name, template)
                            content = pattern.sub(converter, content)
                    if hashed_file_exists:
                        self.delete(hashed_name)
                    # then save the processed result
                    content_file = ContentFile(force_bytes(content))
                    saved_name = self._save(hashed_name, content_file)
                    hashed_name = force_text(saved_name.replace("\\", "/"))
                    processed = True
                else:
                    # or handle the case in which neither processing nor
                    # a change to the original file happened
                    if not hashed_file_exists:
                        processed = True
                        saved_name = self._save(hashed_name, original_file)
                        hashed_name = force_text(saved_name.replace("\\", "/"))

                # and then set the cache accordingly
                hashed_paths[self.cache_key(name.replace("\\", "/"))] = hashed_name
                yield name, hashed_name, processed

        # Finally set the cache
        self.cache.set_many(hashed_paths)