Пример #1
0
    def delete(self, *args, **kwargs):
        '''
        Overides the standard model delete method; sets "effective_to" as the
        current date and time and then calls save() instead.
        '''
        # see django.db.models.deletion.Collection.delete
        using = kwargs.get('using', router.db_for_write(self.__class__,
                                                        instance=self))
        cannot_be_deleted_assert = ("""%s object can't be deleted because its
                                    %s attribute is set to None.""" %
                                    (self._meta.object_name,
                                     self._meta.pk.attname))
        assert self._get_pk_val() is not None, cannot_be_deleted_assert
        collector = Collector(using=using)
        collector.collect([self])
        collector.sort()

        # send pre_delete signals
        def delete(collector):
            for model, obj in collector.instances_with_model():
                if not model._meta.auto_created:
                    signals.pre_delete.send(
                        sender=model, instance=obj, using=using
                    )

            # be compatible with django 1.4.x
            if hasattr(collector, 'fast_deletes'):
                # fast deletes
                for qs in collector.fast_deletes:
                    for instance in qs:
                        self._delete(instance)

            # delete batches
            # be compatible with django>=1.6
            if hasattr(collector, 'batches'):
                for model, batches in six.iteritems(collector.batches):
                    for field, instances in six.iteritems(batches):
                        for instance in instances:
                            self._delete(instance)

            # "delete" instances
            for model, instances in six.iteritems(collector.data):
                for instance in instances:
                    self._delete(instance)

            # send post_delete signals
            for model, obj in collector.instances_with_model():
                if not model._meta.auto_created:
                    signals.post_delete.send(
                        sender=model, instance=obj, using=using
                    )

        # another django>=1.6 thing
        try:
            from django.db.transaction import commit_on_success_unless_managed
        except ImportError:
            delete(collector)
        else:
            commit_on_success_unless_managed(using=using)(delete(collector))
Пример #2
0
    def delete(self, *args, **kwargs):
        '''
        Overides the standard model delete method; sets "effective_to" as the
        current date and time and then calls save() instead.
        '''
        # see django.db.models.deletion.Collection.delete
        using = kwargs.get('using',
                           router.db_for_write(self.__class__, instance=self))
        cannot_be_deleted_assert = (
            """%s object can't be deleted because its
                                    %s attribute is set to None.""" %
            (self._meta.object_name, self._meta.pk.attname))
        assert self._get_pk_val() is not None, cannot_be_deleted_assert
        collector = Collector(using=using)
        collector.collect([self])
        collector.sort()

        # send pre_delete signals
        def delete(collector):
            for model, obj in collector.instances_with_model():
                if not model._meta.auto_created:
                    signals.pre_delete.send(sender=model,
                                            instance=obj,
                                            using=using)

            # be compatible with django 1.4.x
            if hasattr(collector, 'fast_deletes'):
                # fast deletes
                for qs in collector.fast_deletes:
                    for instance in qs:
                        self._delete(instance)

            # delete batches
            # be compatible with django>=1.6
            if hasattr(collector, 'batches'):
                for model, batches in six.iteritems(collector.batches):
                    for field, instances in six.iteritems(batches):
                        for instance in instances:
                            self._delete(instance)

            # "delete" instances
            for model, instances in six.iteritems(collector.data):
                for instance in instances:
                    self._delete(instance)

            # send post_delete signals
            for model, obj in collector.instances_with_model():
                if not model._meta.auto_created:
                    signals.post_delete.send(sender=model,
                                             instance=obj,
                                             using=using)

        # another django>=1.6 thing
        try:
            from django.db.transaction import commit_on_success_unless_managed
        except ImportError:
            delete(collector)
        else:
            commit_on_success_unless_managed(using=using)(delete(collector))
Пример #3
0
    def save_base(self, raw=False, force_insert=False,
                  force_update=False, using=None, update_fields=None):
        """
        Handles the parts of saving which should be done only once per save,
        yet need to be done in raw saves, too. This includes some sanity
        checks and signal sending.

        The 'raw' argument is telling save_base not to save any parent
        models and not to do any changes to the values before save. This
        is used by fixture loading.
        """
        using = using or router.db_for_write(self.__class__, instance=self)
        assert not (force_insert and (force_update or update_fields))
        assert update_fields is None or len(update_fields) > 0
        cls = origin = self.__class__
        # Skip proxies, but keep the origin as the proxy model.
        if cls._meta.proxy:
            cls = cls._meta.concrete_model
        meta = cls._meta
        if not meta.auto_created:
            signals.pre_save.send(sender=origin, instance=self, raw=raw, using=using,
                                  update_fields=update_fields)
        with transaction.commit_on_success_unless_managed(using=using, savepoint=False):
            if not raw:
                self._save_parents(cls, using, update_fields)
            updated = self._save_table(raw, cls, force_insert, force_update, using, update_fields)
        # Store the database on which the object was saved
        self._state.db = using
        # Once saved, this is no longer a to-be-added instance.
        self._state.adding = False

        # Signal that the save is complete
        if not meta.auto_created:
            signals.post_save.send(sender=origin, instance=self, created=(not updated),
                                   update_fields=update_fields, raw=raw, using=using)
Пример #4
0
    def handle_noargs(self, **options):
        db = options.get('database')
        connection = connections[db]
        verbosity = int(options.get('verbosity'))
        interactive = options.get('interactive')
        # The following are stealth options used by Django's internals.
        reset_sequences = options.get('reset_sequences', True)
        allow_cascade = options.get('allow_cascade', False)
        inhibit_post_migrate = options.get('inhibit_post_migrate', False)

        self.style = no_style()

        # Import the 'management' module within each installed app, to register
        # dispatcher events.
        for app_config in apps.get_app_configs():
            try:
                import_module('.management', app_config.name)
            except ImportError:
                pass

        sql_list = sql_flush(self.style, connection, only_django=True,
                             reset_sequences=reset_sequences,
                             allow_cascade=allow_cascade)

        if interactive:
            confirm = input("""You have requested a flush of the database.
This will IRREVERSIBLY DESTROY all data currently in the %r database,
and return each table to a fresh state.
Are you sure you want to do this?

    Type 'yes' to continue, or 'no' to cancel: """ % connection.settings_dict['NAME'])
        else:
            confirm = 'yes'

        if confirm == 'yes':
            try:
                with transaction.commit_on_success_unless_managed():
                    with connection.cursor() as cursor:
                        for sql in sql_list:
                            cursor.execute(sql)
            except Exception as e:
                new_msg = (
                    "Database %s couldn't be flushed. Possible reasons:\n"
                    "  * The database isn't running or isn't configured correctly.\n"
                    "  * At least one of the expected database tables doesn't exist.\n"
                    "  * The SQL was invalid.\n"
                    "Hint: Look at the output of 'django-admin.py sqlflush'. That's the SQL this command wasn't able to run.\n"
                    "The full error: %s") % (connection.settings_dict['NAME'], e)
                six.reraise(CommandError, CommandError(new_msg), sys.exc_info()[2])

            if not inhibit_post_migrate:
                self.emit_post_migrate(verbosity, interactive, db)

            # Reinstall the initial_data fixture.
            if options.get('load_initial_data'):
                # Reinstall the initial_data fixture.
                call_command('loaddata', 'initial_data', **options)

        else:
            self.stdout.write("Flush cancelled.\n")
Пример #5
0
    def handle_noargs(self, **options):
        db = options.get('database')
        connection = connections[db]
        verbosity = int(options.get('verbosity'))
        interactive = options.get('interactive')
        # The following are stealth options used by Django's internals.
        reset_sequences = options.get('reset_sequences', True)
        allow_cascade = options.get('allow_cascade', False)
        inhibit_post_migrate = options.get('inhibit_post_migrate', False)

        self.style = no_style()

        # Import the 'management' module within each installed app, to register
        # dispatcher events.
        for app_name in settings.INSTALLED_APPS:
            try:
                import_module('.management', app_name)
            except ImportError:
                pass

        sql_list = sql_flush(self.style, connection, only_django=True,
                             reset_sequences=reset_sequences,
                             allow_cascade=allow_cascade)

        if interactive:
            confirm = input("""You have requested a flush of the database.
This will IRREVERSIBLY DESTROY all data currently in the %r database,
and return each table to a fresh state.
Are you sure you want to do this?

    Type 'yes' to continue, or 'no' to cancel: """ % connection.settings_dict['NAME'])
        else:
            confirm = 'yes'

        if confirm == 'yes':
            try:
                with transaction.commit_on_success_unless_managed():
                    cursor = connection.cursor()
                    for sql in sql_list:
                        cursor.execute(sql)
            except Exception as e:
                new_msg = (
                    "Database %s couldn't be flushed. Possible reasons:\n"
                    "  * The database isn't running or isn't configured correctly.\n"
                    "  * At least one of the expected database tables doesn't exist.\n"
                    "  * The SQL was invalid.\n"
                    "Hint: Look at the output of 'django-admin.py sqlflush'. That's the SQL this command wasn't able to run.\n"
                    "The full error: %s") % (connection.settings_dict['NAME'], e)
                six.reraise(CommandError, CommandError(new_msg), sys.exc_info()[2])

            if not inhibit_post_migrate:
                self.emit_post_migrate(verbosity, interactive, db)

            # Reinstall the initial_data fixture.
            if options.get('load_initial_data'):
                # Reinstall the initial_data fixture.
                call_command('loaddata', 'initial_data', **options)

        else:
            self.stdout.write("Flush cancelled.\n")
Пример #6
0
    def delete(self):
        """ deletion for versioned objects means setting the 'ends_at' field
        to the current datetime. Applied only for active versions, having
        ends_at=NULL """
        now = timezone.now()

        # sort instance collections
        for model, instances in self.data.items():
            self.data[model] = sorted(instances, key=attrgetter("pk"))

        # if possible, bring the models in an order suitable for databases that
        # don't support transactions or cannot defer constraint checks until the
        # end of a transaction.
        self.sort()

        with transaction.commit_on_success_unless_managed(using=self.using):
            # send pre_delete signals
            for model, obj in self.instances_with_model():
                if not model._meta.auto_created:
                    signals.pre_delete.send(
                        sender=model, instance=obj, using=self.using
                    )

            # fast deletes - TODO check works correctly with versioned rels
            for qs in self.fast_deletes:
                query = sql.UpdateQuery(qs.model)
                pk_list = [obj.pk for obj in qs.all()]
                query.update_batch(pk_list, {'ends_at': now}, self.using)

            # update fields - TODO check works correctly with versioned rels
            for model, instances_for_fieldvalues in six.iteritems(self.field_updates):
                for (field, value), instances in six.iteritems(instances_for_fieldvalues):
                    for o in instances:  # update FK fields
                        setattr(o, field.name, value)
                    model.objects.bulk_create(instances)

            # reverse instance collections
            for instances in six.itervalues(self.data):
                instances.reverse()

            # delete instances by setting 'ends_at' to 'now'
            for model, instances in six.iteritems(self.data):
                query = sql.UpdateQuery(model)
                pk_list = [obj.pk for obj in instances]
                query.update_batch(pk_list, {'ends_at': now}, self.using)

                if not model._meta.auto_created:
                    for obj in instances:
                        signals.post_delete.send(
                            sender=model, instance=obj, using=self.using
                        )

        # update collected instances
        for model, instances_for_fieldvalues in six.iteritems(self.field_updates):
            for (field, value), instances in six.iteritems(instances_for_fieldvalues):
                for obj in instances:
                    setattr(obj, field.attname, value)
        for model, instances in six.iteritems(self.data):
            for instance in instances:
                setattr(instance, model._meta.pk.attname, None)
Пример #7
0
def create_table(model, db='default'):
    '''Create a table in the database for the given model.

    This routine will raise ValueError if the table already exists.
    Other database exceptions may also be raised.
    '''
    model = model._meta.concrete_model
    style = no_style()
    connection = connections[db]
    cursor = connection.cursor()

    # Get a list of already installed *models* so that references work right.
    tables = connection.introspection.table_names()
    if model._meta.db_table in tables:
        raise ValueError('table already exists for given model')
    seen_models = connection.introspection.installed_models(tables)
    created_models = set()
    pending_references = {}

    # Create the tables for each model
    with transaction.commit_on_success_unless_managed(using=db):
        # Create the model's database table, if it doesn't already exist.
        sql, references = connection.creation.sql_create_model(
            model, style, seen_models)
        seen_models.add(model)
        created_models.add(model)
        for refto, refs in references.items():
            pending_references.setdefault(refto, []).extend(refs)
            if refto in seen_models:
                sql.extend(
                    connection.creation.sql_for_pending_references(
                        refto, style, pending_references))
        sql.extend(
            connection.creation.sql_for_pending_references(
                model, style, pending_references))
        for statement in sql:
            cursor.execute(statement)
        tables.append(
            connection.introspection.table_name_converter(
                model._meta.db_table))

    # Install SQL indices for all newly created models
    index_sql = connection.creation.sql_indexes_for_model(model, style)
    if index_sql:
        with transaction.commit_on_success_unless_managed(using=db):
            for sql in index_sql:
                cursor.execute(sql)
Пример #8
0
    def delete(self):
        # sort instance collections
        for model, instances in self.data.items():
            self.data[model] = sorted(instances, key=attrgetter("pk"))

        # if possible, bring the models in an order suitable for databases that
        # don't support transactions or cannot defer constraint checks until the
        # end of a transaction.
        self.sort()

        with transaction.commit_on_success_unless_managed(
                using=self.using, connection=self.connection):
            # send pre_delete signals
            for model, obj in self.instances_with_model():
                if not model._meta.auto_created:
                    signals.pre_delete.send(sender=model,
                                            instance=obj,
                                            using=self.using)

            # fast deletes
            for qs in self.fast_deletes:
                qs._raw_delete(using=self.using)

            # update fields
            for model, instances_for_fieldvalues in six.iteritems(
                    self.field_updates):
                query = sql.UpdateQuery(model)
                for (field, value
                     ), instances in six.iteritems(instances_for_fieldvalues):
                    query.update_batch([obj.pk for obj in instances],
                                       {field.name: value}, self.using)

            # reverse instance collections
            for instances in six.itervalues(self.data):
                instances.reverse()

            # delete instances
            for model, instances in six.iteritems(self.data):
                query = sql.DeleteQuery(model)
                pk_list = [obj.pk for obj in instances]
                query.delete_batch(pk_list, self.using)

                if not model._meta.auto_created:
                    for obj in instances:
                        signals.post_delete.send(sender=model,
                                                 instance=obj,
                                                 using=self.using)

        # update collected instances
        for model, instances_for_fieldvalues in six.iteritems(
                self.field_updates):
            for (field,
                 value), instances in six.iteritems(instances_for_fieldvalues):
                for obj in instances:
                    setattr(obj, field.attname, value)
        for model, instances in six.iteritems(self.data):
            for instance in instances:
                setattr(instance, model._meta.pk.attname, None)
 def _reset_sequences(self, db_name):
     conn = connections[db_name]
     if conn.features.supports_sequence_reset:
         sql_list = conn.ops.sequence_reset_by_name_sql(no_style(), conn.introspection.sequence_list())
         if sql_list:
             with transaction.commit_on_success_unless_managed(using=db_name):
                 cursor = conn.cursor()
                 for sql in sql_list:
                     cursor.execute(sql)
Пример #10
0
 def _clear(self, queryset, bulk):
     db = router.db_for_write(self.model, instance=self.instance)
     queryset = queryset.using(db)
     if bulk:
         queryset.delete()
     else:
         with transaction.commit_on_success_unless_managed(using=db, savepoint=False):
             for obj in queryset:
                 obj.delete()
Пример #11
0
 def _clear(self, queryset, bulk):
     db = router.db_for_write(self.model, instance=self.instance)
     queryset = queryset.using(db)
     if bulk:
         queryset.delete()
     else:
         with transaction.commit_on_success_unless_managed(using=db, savepoint=False):
             for obj in queryset:
                 obj.delete()
Пример #12
0
def create_table(model, db='default'):
    '''Create a table in the database for the given model.

    This routine will raise ValueError if the table already exists.
    Other database exceptions may also be raised.
    '''
    model = model._meta.concrete_model
    style = no_style()
    connection = connections[db]
    cursor = connection.cursor()

    # Get a list of already installed *models* so that references work right.
    tables = connection.introspection.table_names()
    if model._meta.db_table in tables:
        raise ValueError('table already exists for given model')
    seen_models = connection.introspection.installed_models(tables)
    created_models = set()
    pending_references = {}

    # Create the tables for each model
    with transaction.commit_on_success_unless_managed(using=db):
        # Create the model's database table, if it doesn't already exist.
        sql, references = connection.creation.sql_create_model(
                model, style, seen_models)
        seen_models.add(model)
        created_models.add(model)
        for refto, refs in references.items():
            pending_references.setdefault(refto, []).extend(refs)
            if refto in seen_models:
                sql.extend(connection.creation.sql_for_pending_references(
                        refto, style, pending_references))
        sql.extend(connection.creation.sql_for_pending_references(
                model, style, pending_references))
        for statement in sql:
            cursor.execute(statement)
        tables.append(connection.introspection.table_name_converter(
                model._meta.db_table))

    # Install SQL indices for all newly created models
    index_sql = connection.creation.sql_indexes_for_model(model, style)
    if index_sql:
        with transaction.commit_on_success_unless_managed(using=db):
            for sql in index_sql:
                cursor.execute(sql)
Пример #13
0
 def _reset_sequences(self, db_name):
     conn = connections[db_name]
     if conn.features.supports_sequence_reset:
         sql_list = conn.ops.sequence_reset_by_name_sql(
             no_style(), conn.introspection.sequence_list())
         if sql_list:
             with transaction.commit_on_success_unless_managed(using=db_name):
                 cursor = conn.cursor()
                 for sql in sql_list:
                     cursor.execute(sql)
Пример #14
0
def method_set_order(ordered_obj, self, id_list, using=None):
    if using is None:
        using = DEFAULT_DB_ALIAS
    rel_val = getattr(self, ordered_obj._meta.order_with_respect_to.rel.field_name)
    order_name = ordered_obj._meta.order_with_respect_to.name
    # FIXME: It would be nice if there was an "update many" version of update
    # for situations like this.
    with transaction.commit_on_success_unless_managed(using=using):
        for i, j in enumerate(id_list):
            ordered_obj.objects.filter(**{'pk': j, order_name: rel_val}).update(_order=i)
Пример #15
0
    def create_table(self, database, tablename):
        cache = BaseDatabaseCache(tablename, {})
        if not router.allow_migrate(database, cache.cache_model_class):
            return
        connection = connections[database]

        if tablename in connection.introspection.table_names():
            if self.verbosity > 0:
                self.stdout.write("Cache table '%s' already exists." %
                                  tablename)
            return

        fields = (
            # "key" is a reserved word in MySQL, so use "cache_key" instead.
            models.CharField(name='cache_key',
                             max_length=255,
                             unique=True,
                             primary_key=True),
            models.TextField(name='value'),
            models.DateTimeField(name='expires', db_index=True),
        )
        table_output = []
        index_output = []
        qn = connection.ops.quote_name
        for f in fields:
            field_output = [qn(f.name), f.db_type(connection=connection)]
            field_output.append("%sNULL" % ("NOT " if not f.null else ""))
            if f.primary_key:
                field_output.append("PRIMARY KEY")
            elif f.unique:
                field_output.append("UNIQUE")
            if f.db_index:
                unique = "UNIQUE " if f.unique else ""
                index_output.append("CREATE %sINDEX %s ON %s (%s);" %
                                    (unique, qn('%s_%s' % (tablename, f.name)),
                                     qn(tablename), qn(f.name)))
            table_output.append(" ".join(field_output))
        full_statement = ["CREATE TABLE %s (" % qn(tablename)]
        for i, line in enumerate(table_output):
            full_statement.append(
                '    %s%s' % (line, ',' if i < len(table_output) - 1 else ''))
        full_statement.append(');')
        with transaction.commit_on_success_unless_managed():
            curs = connection.cursor()
            try:
                curs.execute("\n".join(full_statement))
            except DatabaseError as e:
                raise CommandError(
                    "Cache table '%s' could not be created.\nThe error was: %s."
                    % (tablename, force_text(e)))
            for statement in index_output:
                curs.execute(statement)
        if self.verbosity > 1:
            self.stdout.write("Cache table '%s' created." % tablename)
Пример #16
0
    def delete(self):
        # sort instance collections
        for model, instances in self.data.items():
            self.data[model] = sorted(instances, key=attrgetter("pk"))

        # if possible, bring the models in an order suitable for databases that
        # don't support transactions or cannot defer constraint checks until the
        # end of a transaction.
        self.sort()

        with transaction.commit_on_success_unless_managed(using=self.using, connection=self.connection):
            # send pre_delete signals
            for model, obj in self.instances_with_model():
                if not model._meta.auto_created:
                    signals.pre_delete.send(
                        sender=model, instance=obj, using=self.using
                    )

            # fast deletes
            for qs in self.fast_deletes:
                qs._raw_delete(using=self.using)

            # update fields
            for model, instances_for_fieldvalues in six.iteritems(self.field_updates):
                query = sql.UpdateQuery(model)
                for (field, value), instances in six.iteritems(instances_for_fieldvalues):
                    query.update_batch([obj.pk for obj in instances],
                                       {field.name: value}, self.using)

            # reverse instance collections
            for instances in six.itervalues(self.data):
                instances.reverse()

            # delete instances
            for model, instances in six.iteritems(self.data):
                query = sql.DeleteQuery(model)
                pk_list = [obj.pk for obj in instances]
                query.delete_batch(pk_list, self.using)

                if not model._meta.auto_created:
                    for obj in instances:
                        signals.post_delete.send(
                            sender=model, instance=obj, using=self.using
                        )

        # update collected instances
        for model, instances_for_fieldvalues in six.iteritems(self.field_updates):
            for (field, value), instances in six.iteritems(instances_for_fieldvalues):
                for obj in instances:
                    setattr(obj, field.attname, value)
        for model, instances in six.iteritems(self.data):
            for instance in instances:
                setattr(instance, model._meta.pk.attname, None)
Пример #17
0
    def bulk_create(self, objs, batch_size=None):
        """ wrapping around a usual bulk_create to provide version-specific
        information for all objects. As with original bulk creation, reverse
        relationships and M2Ms are not supported. DOES set the PRIMARY KEY for
        new objects, updates PK and creation modification dates for existing
        objects. Works with BaseVersionedObject's only.

        WARNING: has side-effects """
        def close_records(ids_to_close):
            query = sql.UpdateQuery(self.model)

            pk_field = query.get_meta().pk
            query.add_update_values({'ends_at': now})
            query.where = query.where_class()
            constr = sql.where.Constraint(None, pk_field.column, pk_field)
            query.where.add((constr, 'in', ids_to_close), sql.where.AND)
            query.add_q(Q(ends_at__isnull=True))
            query.get_compiler(self.db).execute_sql(None)

        assert batch_size is None or batch_size > 0

        if self.model._meta.parents:
            raise ValueError("Can't bulk create an inherited model")

        if not objs:
            return objs

        now = timezone.now()
        ids_to_close = []
        for obj in objs:  # this loop modifies given objects

            if obj.pk:  # existing object, need to "close" old version later
                ids_to_close.append(obj.pk)

            else:  # new object
                obj.pk = get_new_local_id()
                obj.date_created = now

            obj.starts_at = now
            obj.guid = uuid.uuid1().hex

        self._for_write = True
        fields = self.model._meta.local_fields
        with transaction.commit_on_success_unless_managed(using=self.db):

            # close old records by setting 'ends_at' to 'now', must be first
            close_records(ids_to_close)

            # insert records with new / updated objects
            self._batched_insert(list(objs), fields, batch_size)

        return objs
Пример #18
0
    def bulk_create(self, objs, batch_size=None):
        """ wrapping around a usual bulk_create to provide version-specific
        information for all objects. As with original bulk creation, reverse
        relationships and M2Ms are not supported. DOES set the PRIMARY KEY for
        new objects, updates PK and creation modification dates for existing
        objects. Works with BaseVersionedObject's only.

        WARNING: has side-effects """
        def close_records(ids_to_close):
            query = sql.UpdateQuery(self.model)

            pk_field = query.get_meta().pk
            query.add_update_values({'ends_at': now})
            query.where = query.where_class()
            constr = sql.where.Constraint(None, pk_field.column, pk_field)
            query.where.add((constr, 'in', ids_to_close), sql.where.AND)
            query.add_q(Q(ends_at__isnull=True))
            query.get_compiler(self.db).execute_sql(None)

        assert batch_size is None or batch_size > 0

        if self.model._meta.parents:
            raise ValueError("Can't bulk create an inherited model")

        if not objs:
            return objs

        now = timezone.now()
        ids_to_close = []
        for obj in objs:  # this loop modifies given objects

            if obj.pk:  # existing object, need to "close" old version later
                ids_to_close.append(obj.pk)

            else:  # new object
                obj.pk = get_new_local_id()
                obj.date_created = now

            obj.starts_at = now
            obj.guid = uuid.uuid1().hex

        self._for_write = True
        fields = self.model._meta.local_fields
        with transaction.commit_on_success_unless_managed(using=self.db):

            # close old records by setting 'ends_at' to 'now', must be first
            close_records(ids_to_close)

            # insert records with new / updated objects
            self._batched_insert(list(objs), fields, batch_size)

        return objs
Пример #19
0
    def create_table(self, database, tablename):
        cache = BaseDatabaseCache(tablename, {})
        if not router.allow_migrate(database, cache.cache_model_class):
            return
        connection = connections[database]

        if tablename in connection.introspection.table_names():
            if self.verbosity > 0:
                self.stdout.write("Cache table '%s' already exists." % tablename)
            return

        fields = (
            # "key" is a reserved word in MySQL, so use "cache_key" instead.
            models.CharField(name='cache_key', max_length=255, unique=True, primary_key=True),
            models.TextField(name='value'),
            models.DateTimeField(name='expires', db_index=True),
        )
        table_output = []
        index_output = []
        qn = connection.ops.quote_name
        for f in fields:
            field_output = [qn(f.name), f.db_type(connection=connection)]
            field_output.append("%sNULL" % ("NOT " if not f.null else ""))
            if f.primary_key:
                field_output.append("PRIMARY KEY")
            elif f.unique:
                field_output.append("UNIQUE")
            if f.db_index:
                unique = "UNIQUE " if f.unique else ""
                index_output.append("CREATE %sINDEX %s ON %s (%s);" %
                    (unique, qn('%s_%s' % (tablename, f.name)), qn(tablename),
                    qn(f.name)))
            table_output.append(" ".join(field_output))
        full_statement = ["CREATE TABLE %s (" % qn(tablename)]
        for i, line in enumerate(table_output):
            full_statement.append('    %s%s' % (line, ',' if i < len(table_output) - 1 else ''))
        full_statement.append(');')
        with transaction.commit_on_success_unless_managed():
            curs = connection.cursor()
            try:
                curs.execute("\n".join(full_statement))
            except DatabaseError as e:
                raise CommandError(
                    "Cache table '%s' could not be created.\nThe error was: %s." %
                    (tablename, force_text(e)))
            for statement in index_output:
                curs.execute(statement)
        if self.verbosity > 1:
            self.stdout.write("Cache table '%s' created." % tablename)
Пример #20
0
 def handle_label(self, tablename, **options):
     db = options.get('database')
     cache = BaseDatabaseCache(tablename, {})
     if not router.allow_syncdb(db, cache.cache_model_class):
         return
     connection = connections[db]
     fields = (
         # "key" is a reserved word in MySQL, so use "cache_key" instead.
         models.CharField(name='cache_key',
                          max_length=255,
                          unique=True,
                          primary_key=True),
         models.TextField(name='value'),
         models.DateTimeField(name='expires', db_index=True),
     )
     table_output = []
     index_output = []
     qn = connection.ops.quote_name
     for f in fields:
         field_output = [qn(f.name), f.db_type(connection=connection)]
         field_output.append("%sNULL" % (not f.null and "NOT " or ""))
         if f.primary_key:
             field_output.append("PRIMARY KEY")
         elif f.unique:
             field_output.append("UNIQUE")
         if f.db_index:
             unique = f.unique and "UNIQUE " or ""
             index_output.append("CREATE %sINDEX %s ON %s (%s);" % \
                 (unique, qn('%s_%s' % (tablename, f.name)), qn(tablename),
                 qn(f.name)))
         table_output.append(" ".join(field_output))
     full_statement = ["CREATE TABLE %s (" % qn(tablename)]
     for i, line in enumerate(table_output):
         full_statement.append(
             '    %s%s' % (line, i < len(table_output) - 1 and ',' or ''))
     full_statement.append(');')
     with transaction.commit_on_success_unless_managed():
         curs = connection.cursor()
         try:
             curs.execute("\n".join(full_statement))
         except DatabaseError as e:
             raise CommandError(
                 "Cache table '%s' could not be created.\nThe error was: %s."
                 % (tablename, force_text(e)))
         for statement in index_output:
             curs.execute(statement)
Пример #21
0
 def test_commit_on_success_success(self):
     results = []
     try:
         with transaction.commit_on_success():
             results.append('begins')
             defer(lambda *a, **kw: results.append('DEFERRED A'))
             try:
                 with transaction.commit_on_success_unless_managed():
                     results.append('nested begins')
                     defer(lambda *a, **kw: results.append('DEFERRED NESTED'))
                     results.append('nested ends')
             except Failure:
                 pass
             defer(lambda *a, **kw: results.append('DEFERRED B'))
             results.append('ends')
     except Failure:
         pass
     self.failUnlessEqual(results, ['begins', 'nested begins', 'nested ends', 'ends', 'DEFERRED A', 'DEFERRED NESTED', 'DEFERRED B'], results)
Пример #22
0
 def handle_label(self, tablename, **options):
     db = options.get('database')
     cache = BaseDatabaseCache(tablename, {})
     if not router.allow_syncdb(db, cache.cache_model_class):
         return
     connection = connections[db]
     fields = (
         # "key" is a reserved word in MySQL, so use "cache_key" instead.
         models.CharField(name='cache_key', max_length=255, unique=True, primary_key=True),
         models.TextField(name='value'),
         models.DateTimeField(name='expires', db_index=True),
     )
     table_output = []
     index_output = []
     qn = connection.ops.quote_name
     for f in fields:
         field_output = [qn(f.name), f.db_type(connection=connection)]
         field_output.append("%sNULL" % (not f.null and "NOT " or ""))
         if f.primary_key:
             field_output.append("PRIMARY KEY")
         elif f.unique:
             field_output.append("UNIQUE")
         if f.db_index:
             unique = f.unique and "UNIQUE " or ""
             index_output.append("CREATE %sINDEX %s ON %s (%s);" % \
                 (unique, qn('%s_%s' % (tablename, f.name)), qn(tablename),
                 qn(f.name)))
         table_output.append(" ".join(field_output))
     full_statement = ["CREATE TABLE %s (" % qn(tablename)]
     for i, line in enumerate(table_output):
         full_statement.append('    %s%s' % (line, i < len(table_output)-1 and ',' or ''))
     full_statement.append(');')
     with transaction.commit_on_success_unless_managed():
         curs = connection.cursor()
         try:
             curs.execute("\n".join(full_statement))
         except DatabaseError as e:
             raise CommandError(
                 "Cache table '%s' could not be created.\nThe error was: %s." %
                     (tablename, force_text(e)))
         for statement in index_output:
             curs.execute(statement)
Пример #23
0
 def test_commit_on_success_nested_failure(self):
     results = []
     try:
         with transaction.commit_on_success():
             results.append('begins')
             defer(lambda *a, **kw: results.append('DEFERRED A'))
             try:
                 with transaction.commit_on_success_unless_managed():
                     results.append('nested begins')
                     defer(lambda *a, **kw: results.append('DEFERRED NESTED'))
                     results.append('nested ends')
                     raise Failure
             except Failure:
                 pass
             defer(lambda *a, **kw: results.append('DEFERRED B'))
             results.append('ends')
     except Failure:
         pass
     # When using commit_on_success_unless_managed, DEFERRED NESTED is called even if the nested fails.
     self.failUnlessEqual(results, ['begins', 'nested begins', 'nested ends', 'ends', 'DEFERRED A', 'DEFERRED NESTED', 'DEFERRED B'], results)
Пример #24
0
    def handle(self, *fixture_labels, **options):

        self.ignore = options.get("ignore")
        self.using = options.get("database")

        if not len(fixture_labels):
            raise CommandError(
                "No database fixture specified. Please provide the path of at " "least one fixture in the command line."
            )

        self.verbosity = int(options.get("verbosity"))

        with transaction.commit_on_success_unless_managed(using=self.using):
            self.loaddata(fixture_labels)

        # Close the DB connection -- unless we're still in a transaction. This
        # is required as a workaround for an  edge case in MySQL: if the same
        # connection is used to create tables, load data, and query, the query
        # can return incorrect results. See Django #7572, MySQL #37735.
        if transaction.get_autocommit(self.using):
            connections[self.using].close()
Пример #25
0
    def handle(self, *fixture_labels, **options):

        self.ignore = options.get('ignore')
        self.using = options.get('database')

        if not len(fixture_labels):
            raise CommandError(
                "No database fixture specified. Please provide the path "
                "of at least one fixture in the command line.")

        self.verbosity = int(options.get('verbosity'))

        with transaction.commit_on_success_unless_managed(using=self.using):
            self.loaddata(fixture_labels)

        # Close the DB connection -- unless we're still in a transaction. This
        # is required as a workaround for an  edge case in MySQL: if the same
        # connection is used to create tables, load data, and query, the query
        # can return incorrect results. See Django #7572, MySQL #37735.
        if transaction.get_autocommit(self.using):
            connections[self.using].close()
Пример #26
0
    def sync_apps(self, connection, app_labels):
        "Runs the old syncdb-style operation on a list of app_labels."
        cursor = connection.cursor()

        # Get a list of already installed *models* so that references work right.
        tables = connection.introspection.table_names()
        seen_models = connection.introspection.installed_models(tables)
        created_models = set()
        pending_references = {}

        # Build the manifest of apps and models that are to be synchronized
        all_models = [
            (app_config.label,
                router.get_migratable_models(app_config.models_module, connection.alias, include_auto_created=True))
            for app_config in apps.get_app_configs(only_with_models_module=True)
            if app_config.label in app_labels
        ]

        def model_installed(model):
            opts = model._meta
            converter = connection.introspection.table_name_converter
            # Note that if a model is unmanaged we short-circuit and never try to install it
            return not ((converter(opts.db_table) in tables) or
                (opts.auto_created and converter(opts.auto_created._meta.db_table) in tables))

        manifest = OrderedDict(
            (app_name, list(filter(model_installed, model_list)))
            for app_name, model_list in all_models
        )

        create_models = set(itertools.chain(*manifest.values()))
        emit_pre_migrate_signal(create_models, self.verbosity, self.interactive, connection.alias)

        # Create the tables for each model
        if self.verbosity >= 1:
            self.stdout.write("  Creating tables...\n")
        with transaction.atomic(using=connection.alias, savepoint=False):
            for app_name, model_list in manifest.items():
                for model in model_list:
                    # Create the model's database table, if it doesn't already exist.
                    if self.verbosity >= 3:
                        self.stdout.write("    Processing %s.%s model\n" % (app_name, model._meta.object_name))
                    sql, references = connection.creation.sql_create_model(model, no_style(), seen_models)
                    seen_models.add(model)
                    created_models.add(model)
                    for refto, refs in references.items():
                        pending_references.setdefault(refto, []).extend(refs)
                        if refto in seen_models:
                            sql.extend(connection.creation.sql_for_pending_references(refto, no_style(), pending_references))
                    sql.extend(connection.creation.sql_for_pending_references(model, no_style(), pending_references))
                    if self.verbosity >= 1 and sql:
                        self.stdout.write("    Creating table %s\n" % model._meta.db_table)
                    for statement in sql:
                        cursor.execute(statement)
                    tables.append(connection.introspection.table_name_converter(model._meta.db_table))

        # We force a commit here, as that was the previous behaviour.
        # If you can prove we don't need this, remove it.
        transaction.set_dirty(using=connection.alias)

        # The connection may have been closed by a syncdb handler.
        cursor = connection.cursor()

        # Install custom SQL for the app (but only if this
        # is a model we've just created)
        if self.verbosity >= 1:
            self.stdout.write("  Installing custom SQL...\n")
        for app_name, model_list in manifest.items():
            for model in model_list:
                if model in created_models:
                    custom_sql = custom_sql_for_model(model, no_style(), connection)
                    if custom_sql:
                        if self.verbosity >= 2:
                            self.stdout.write("    Installing custom SQL for %s.%s model\n" % (app_name, model._meta.object_name))
                        try:
                            with transaction.commit_on_success_unless_managed(using=connection.alias):
                                for sql in custom_sql:
                                    cursor.execute(sql)
                        except Exception as e:
                            self.stderr.write("    Failed to install custom SQL for %s.%s model: %s\n" % (app_name, model._meta.object_name, e))
                            if self.show_traceback:
                                traceback.print_exc()
                    else:
                        if self.verbosity >= 3:
                            self.stdout.write("    No custom SQL for %s.%s model\n" % (app_name, model._meta.object_name))

        if self.verbosity >= 1:
            self.stdout.write("  Installing indexes...\n")

        # Install SQL indices for all newly created models
        for app_name, model_list in manifest.items():
            for model in model_list:
                if model in created_models:
                    index_sql = connection.creation.sql_indexes_for_model(model, no_style())
                    if index_sql:
                        if self.verbosity >= 2:
                            self.stdout.write("    Installing index for %s.%s model\n" % (app_name, model._meta.object_name))
                        try:
                            with transaction.commit_on_success_unless_managed(using=connection.alias):
                                for sql in index_sql:
                                    cursor.execute(sql)
                        except Exception as e:
                            self.stderr.write("    Failed to install index for %s.%s model: %s\n" % (app_name, model._meta.object_name, e))

        # Load initial_data fixtures (unless that has been disabled)
        if self.load_initial_data:
            call_command('loaddata', 'initial_data', verbosity=self.verbosity, database=connection.alias, skip_validation=True)

        return created_models
Пример #27
0
    def delete(self):
        """ deletion for versioned objects means setting the 'ends_at' field
        to the current datetime. Applied only for active versions, having
        ends_at=NULL """
        now = timezone.now()

        # sort instance collections
        for model, instances in self.data.items():
            self.data[model] = sorted(instances, key=attrgetter("pk"))

        # if possible, bring the models in an order suitable for databases that
        # don't support transactions or cannot defer constraint checks until the
        # end of a transaction.
        self.sort()

        with transaction.commit_on_success_unless_managed(using=self.using):
            # send pre_delete signals
            for model, obj in self.instances_with_model():
                if not model._meta.auto_created:
                    signals.pre_delete.send(sender=model,
                                            instance=obj,
                                            using=self.using)

            # fast deletes - TODO check works correctly with versioned rels
            for qs in self.fast_deletes:
                query = sql.UpdateQuery(qs.model)
                pk_list = [obj.pk for obj in qs.all()]
                query.update_batch(pk_list, {'ends_at': now}, self.using)

            # update fields - TODO check works correctly with versioned rels
            for model, instances_for_fieldvalues in six.iteritems(
                    self.field_updates):
                for (field, value
                     ), instances in six.iteritems(instances_for_fieldvalues):
                    for o in instances:  # update FK fields
                        setattr(o, field.name, value)
                    model.objects.bulk_create(instances)

            # reverse instance collections
            for instances in six.itervalues(self.data):
                instances.reverse()

            # delete instances by setting 'ends_at' to 'now'
            for model, instances in six.iteritems(self.data):
                query = sql.UpdateQuery(model)
                pk_list = [obj.pk for obj in instances]
                query.update_batch(pk_list, {'ends_at': now}, self.using)

                if not model._meta.auto_created:
                    for obj in instances:
                        signals.post_delete.send(sender=model,
                                                 instance=obj,
                                                 using=self.using)

        # update collected instances
        for model, instances_for_fieldvalues in six.iteritems(
                self.field_updates):
            for (field,
                 value), instances in six.iteritems(instances_for_fieldvalues):
                for obj in instances:
                    setattr(obj, field.attname, value)
        for model, instances in six.iteritems(self.data):
            for instance in instances:
                setattr(instance, model._meta.pk.attname, None)
Пример #28
0
    def delete(self, schema):
        if schema == None:  # add schema
            import inspect
            fx = inspect.stack()
            error_detail = ""
            for x in fx:
                error_detail += "\n\t {0}, line {1}".format(fx[1], fx[2])
            raise (Exception(
                "can not call ''{1}'' without schema in '{0}'.\nDetail:\n{2}".
                format(__file__, "Collector.delete", error_detail)))

        # sort instance collections
        for model, instances in self.data.items():
            self.data[model] = sorted(instances, key=attrgetter("pk"))

        # if possible, bring the models in an order suitable for databases that
        # don't support transactions or cannot defer constraint checks until the
        # end of a transaction.
        self.sort()

        with transaction.commit_on_success_unless_managed(using=self.using):
            # send pre_delete signals
            for model, obj in self.instances_with_model():
                if not model._meta.auto_created:
                    signals.pre_delete.send(sender=model,
                                            instance=obj,
                                            using=self.using,
                                            schema=schema)

            # fast deletes
            for qs in self.fast_deletes:
                qs._raw_delete(using=self.using)

            # update fields
            for model, instances_for_fieldvalues in six.iteritems(
                    self.field_updates):
                query = sql.UpdateQuery(model)
                for (field, value
                     ), instances in six.iteritems(instances_for_fieldvalues):
                    query.update_batch([obj.pk for obj in instances],
                                       {field.name: value}, self.using)

            # reverse instance collections
            for instances in six.itervalues(self.data):
                instances.reverse()

            # delete instances
            for model, instances in six.iteritems(self.data):
                query = sql.DeleteQuery(model)
                pk_list = [obj.pk for obj in instances]
                query.delete_batch(pk_list, self.using, schema=schema)

                if not model._meta.auto_created:
                    for obj in instances:
                        signals.post_delete.send(sender=model,
                                                 instance=obj,
                                                 using=self.using,
                                                 schema=schema)

        # update collected instances
        for model, instances_for_fieldvalues in six.iteritems(
                self.field_updates):
            for (field,
                 value), instances in six.iteritems(instances_for_fieldvalues):
                for obj in instances:
                    setattr(obj, field.attname, value)
        for model, instances in six.iteritems(self.data):
            for instance in instances:
                setattr(instance, model._meta.pk.attname, None)
Пример #29
0
    def handle_noargs(self, **options):
        db = options.get("database")
        connection = connections[db]
        verbosity = int(options.get("verbosity"))
        interactive = options.get("interactive")
        # 'reset_sequences' is a stealth option
        reset_sequences = options.get("reset_sequences", True)

        self.style = no_style()

        # Import the 'management' module within each installed app, to register
        # dispatcher events.
        for app_name in settings.INSTALLED_APPS:
            try:
                import_module(".management", app_name)
            except ImportError:
                pass

        sql_list = sql_flush(self.style, connection, only_django=True, reset_sequences=reset_sequences)

        if interactive:
            confirm = input(
                """You have requested a flush of the database.
This will IRREVERSIBLY DESTROY all data currently in the %r database,
and return each table to the state it was in after syncdb.
Are you sure you want to do this?

    Type 'yes' to continue, or 'no' to cancel: """
                % connection.settings_dict["NAME"]
            )
        else:
            confirm = "yes"

        if confirm == "yes":
            try:
                with transaction.commit_on_success_unless_managed():
                    cursor = connection.cursor()
                    for sql in sql_list:
                        cursor.execute(sql)
            except Exception as e:
                raise CommandError(
                    """Database %s couldn't be flushed. Possible reasons:
  * The database isn't running or isn't configured correctly.
  * At least one of the expected database tables doesn't exist.
  * The SQL was invalid.
Hint: Look at the output of 'django-admin.py sqlflush'. That's the SQL this command wasn't able to run.
The full error: %s"""
                    % (connection.settings_dict["NAME"], e)
                )

            # Emit the post sync signal. This allows individual
            # applications to respond as if the database had been
            # sync'd from scratch.
            all_models = []
            for app in models.get_apps():
                all_models.extend(
                    [m for m in models.get_models(app, include_auto_created=True) if router.allow_syncdb(db, m)]
                )
            emit_post_sync_signal(set(all_models), verbosity, interactive, db)

            # Reinstall the initial_data fixture.
            if options.get("load_initial_data"):
                # Reinstall the initial_data fixture.
                call_command("loaddata", "initial_data", **options)

        else:
            self.stdout.write("Flush cancelled.\n")
Пример #30
0
    def save_base(self, raw=False, cls=None, origin=None, force_insert=False,
                  force_update=False, using=None, update_fields=None):
        """
        Does the heavy-lifting involved in saving. Subclasses shouldn't need to
        override this method. It's separate from save() in order to hide the
        need for overrides of save() to pass around internal-only parameters
        ('raw', 'cls', and 'origin').
        """
        using = using or router.db_for_write(self.__class__, instance=self)
        assert not (force_insert and (force_update or update_fields))
        assert update_fields is None or len(update_fields) > 0
        if cls is None:
            cls = self.__class__
            meta = cls._meta
            if not meta.proxy:
                origin = cls
        else:
            meta = cls._meta

        if origin and not meta.auto_created:
            signals.pre_save.send(sender=origin, instance=self, raw=raw, using=using,
                                  update_fields=update_fields)

        # If we are in a raw save, save the object exactly as presented.
        # That means that we don't try to be smart about saving attributes
        # that might have come from the parent class - we just save the
        # attributes we have been given to the class we have been given.
        # We also go through this process to defer the save of proxy objects
        # to their actual underlying model.
        if not raw or meta.proxy:
            if meta.proxy:
                org = cls
            else:
                org = None
            for parent, field in meta.parents.items():
                # At this point, parent's primary key field may be unknown
                # (for example, from administration form which doesn't fill
                # this field). If so, fill it.
                if field and getattr(self, parent._meta.pk.attname) is None and getattr(self, field.attname) is not None:
                    setattr(self, parent._meta.pk.attname, getattr(self, field.attname))

                self.save_base(cls=parent, origin=org, using=using,
                               update_fields=update_fields)

                if field:
                    setattr(self, field.attname, self._get_pk_val(parent._meta))
                    # Since we didn't have an instance of the parent handy, we
                    # set attname directly, bypassing the descriptor.
                    # Invalidate the related object cache, in case it's been
                    # accidentally populated. A fresh instance will be
                    # re-built from the database if necessary.
                    cache_name = field.get_cache_name()
                    if hasattr(self, cache_name):
                        delattr(self, cache_name)

            if meta.proxy:
                return

        if not meta.proxy:
            non_pks = [f for f in meta.local_fields if not f.primary_key]

            if update_fields:
                non_pks = [f for f in non_pks if f.name in update_fields or f.attname in update_fields]

            with transaction.commit_on_success_unless_managed(using=using):
                # First, try an UPDATE. If that doesn't update anything, do an INSERT.
                pk_val = self._get_pk_val(meta)
                pk_set = pk_val is not None
                record_exists = True
                manager = cls._base_manager
                if pk_set:
                    # Determine if we should do an update (pk already exists, forced update,
                    # no force_insert)
                    if ((force_update or update_fields) or (not force_insert and
                            manager.using(using).filter(pk=pk_val).exists())):
                        if force_update or non_pks:
                            values = [(f, None, (raw and getattr(self, f.attname) or f.pre_save(self, False))) for f in non_pks]
                            if values:
                                rows = manager.using(using).filter(pk=pk_val)._update(values)
                                if force_update and not rows:
                                    raise DatabaseError("Forced update did not affect any rows.")
                                if update_fields and not rows:
                                    raise DatabaseError("Save with update_fields did not affect any rows.")
                    else:
                        record_exists = False
                if not pk_set or not record_exists:
                    if meta.order_with_respect_to:
                        # If this is a model with an order_with_respect_to
                        # autopopulate the _order field
                        field = meta.order_with_respect_to
                        order_value = manager.using(using).filter(**{field.name: getattr(self, field.attname)}).count()
                        self._order = order_value

                    fields = meta.local_fields
                    if not pk_set:
                        if force_update or update_fields:
                            raise ValueError("Cannot force an update in save() with no primary key.")
                        fields = [f for f in fields if not isinstance(f, AutoField)]

                    record_exists = False

                    update_pk = bool(meta.has_auto_field and not pk_set)
                    result = manager._insert([self], fields=fields, return_id=update_pk, using=using, raw=raw)

                    if update_pk:
                        setattr(self, meta.pk.attname, result)

        # Store the database on which the object was saved
        self._state.db = using
        # Once saved, this is no longer a to-be-added instance.
        self._state.adding = False

        # Signal that the save is complete
        if origin and not meta.auto_created:
            signals.post_save.send(sender=origin, instance=self, created=(not record_exists),
                                   update_fields=update_fields, raw=raw, using=using)
Пример #31
0
    def sync_apps(self, connection, app_labels):
        "Runs the old syncdb-style operation on a list of app_labels."
        cursor = connection.cursor()

        try:
            # Get a list of already installed *models* so that references work right.
            tables = connection.introspection.table_names(cursor)
            seen_models = connection.introspection.installed_models(tables)
            created_models = set()
            pending_references = {}

            # Build the manifest of apps and models that are to be synchronized
            all_models = [
                (app_config.label,
                 router.get_migratable_models(app_config,
                                              connection.alias,
                                              include_auto_created=True))
                for app_config in apps.get_app_configs()
                if app_config.models_module is not None
                and app_config.label in app_labels
            ]

            def model_installed(model):
                opts = model._meta
                converter = connection.introspection.table_name_converter
                # Note that if a model is unmanaged we short-circuit and never try to install it
                return not ((converter(opts.db_table) in tables) or
                            (opts.auto_created and converter(
                                opts.auto_created._meta.db_table) in tables))

            manifest = OrderedDict(
                (app_name, list(filter(model_installed, model_list)))
                for app_name, model_list in all_models)

            create_models = set(itertools.chain(*manifest.values()))
            emit_pre_migrate_signal(create_models, self.verbosity,
                                    self.interactive, connection.alias)

            # Create the tables for each model
            if self.verbosity >= 1:
                self.stdout.write("  Creating tables...\n")
            with transaction.atomic(using=connection.alias, savepoint=False):
                for app_name, model_list in manifest.items():
                    for model in model_list:
                        # Create the model's database table, if it doesn't already exist.
                        if self.verbosity >= 3:
                            self.stdout.write(
                                "    Processing %s.%s model\n" %
                                (app_name, model._meta.object_name))
                        sql, references = connection.creation.sql_create_model(
                            model, no_style(), seen_models)
                        seen_models.add(model)
                        created_models.add(model)
                        for refto, refs in references.items():
                            pending_references.setdefault(refto,
                                                          []).extend(refs)
                            if refto in seen_models:
                                sql.extend(
                                    connection.creation.
                                    sql_for_pending_references(
                                        refto, no_style(), pending_references))
                        sql.extend(
                            connection.creation.sql_for_pending_references(
                                model, no_style(), pending_references))
                        if self.verbosity >= 1 and sql:
                            self.stdout.write("    Creating table %s\n" %
                                              model._meta.db_table)
                        for statement in sql:
                            cursor.execute(statement)
                        tables.append(
                            connection.introspection.table_name_converter(
                                model._meta.db_table))

            # We force a commit here, as that was the previous behavior.
            # If you can prove we don't need this, remove it.
            transaction.set_dirty(using=connection.alias)
        finally:
            cursor.close()

        # The connection may have been closed by a syncdb handler.
        cursor = connection.cursor()
        try:
            # Install custom SQL for the app (but only if this
            # is a model we've just created)
            if self.verbosity >= 1:
                self.stdout.write("  Installing custom SQL...\n")
            for app_name, model_list in manifest.items():
                for model in model_list:
                    if model in created_models:
                        custom_sql = custom_sql_for_model(
                            model, no_style(), connection)
                        if custom_sql:
                            if self.verbosity >= 2:
                                self.stdout.write(
                                    "    Installing custom SQL for %s.%s model\n"
                                    % (app_name, model._meta.object_name))
                            try:
                                with transaction.commit_on_success_unless_managed(
                                        using=connection.alias):
                                    for sql in custom_sql:
                                        cursor.execute(sql)
                            except Exception as e:
                                self.stderr.write(
                                    "    Failed to install custom SQL for %s.%s model: %s\n"
                                    % (app_name, model._meta.object_name, e))
                                if self.show_traceback:
                                    traceback.print_exc()
                        else:
                            if self.verbosity >= 3:
                                self.stdout.write(
                                    "    No custom SQL for %s.%s model\n" %
                                    (app_name, model._meta.object_name))

            if self.verbosity >= 1:
                self.stdout.write("  Installing indexes...\n")

            # Install SQL indices for all newly created models
            for app_name, model_list in manifest.items():
                for model in model_list:
                    if model in created_models:
                        index_sql = connection.creation.sql_indexes_for_model(
                            model, no_style())
                        if index_sql:
                            if self.verbosity >= 2:
                                self.stdout.write(
                                    "    Installing index for %s.%s model\n" %
                                    (app_name, model._meta.object_name))
                            try:
                                with transaction.commit_on_success_unless_managed(
                                        using=connection.alias):
                                    for sql in index_sql:
                                        cursor.execute(sql)
                            except Exception as e:
                                self.stderr.write(
                                    "    Failed to install index for %s.%s model: %s\n"
                                    % (app_name, model._meta.object_name, e))
        finally:
            cursor.close()

        # Load initial_data fixtures (unless that has been disabled)
        if self.load_initial_data:
            for app_label in app_labels:
                call_command('loaddata',
                             'initial_data',
                             verbosity=self.verbosity,
                             database=connection.alias,
                             skip_validation=True,
                             app_label=app_label,
                             hide_empty=True)

        return created_models
Пример #32
0
    def handle_noargs(self, **options):

        verbosity = int(options.get('verbosity'))
        interactive = options.get('interactive')
        show_traceback = options.get('traceback')
        load_initial_data = options.get('load_initial_data')

        self.style = no_style()

        # Import the 'management' module within each installed app, to register
        # dispatcher events.
        for app_name in settings.INSTALLED_APPS:
            try:
                import_module('.management', app_name)
            except ImportError as exc:
                # This is slightly hackish. We want to ignore ImportErrors
                # if the "management" module itself is missing -- but we don't
                # want to ignore the exception if the management module exists
                # but raises an ImportError for some reason. The only way we
                # can do this is to check the text of the exception. Note that
                # we're a bit broad in how we check the text, because different
                # Python implementations may not use the same text.
                # CPython uses the text "No module named management"
                # PyPy uses "No module named myproject.myapp.management"
                msg = exc.args[0]
                if not msg.startswith(
                        'No module named') or 'management' not in msg:
                    raise

        db = options.get('database')
        connection = connections[db]
        cursor = connection.cursor()

        # Get a list of already installed *models* so that references work right.
        tables = connection.introspection.table_names()
        seen_models = connection.introspection.installed_models(tables)
        created_models = set()
        pending_references = {}

        # Build the manifest of apps and models that are to be synchronized
        all_models = [(app.__name__.split('.')[-2], [
            m for m in models.get_models(app, include_auto_created=True)
            if router.allow_syncdb(db, m)
        ]) for app in models.get_apps()]

        def model_installed(model):
            opts = model._meta
            converter = connection.introspection.table_name_converter
            return not ((converter(opts.db_table) in tables) or
                        (opts.auto_created and converter(
                            opts.auto_created._meta.db_table) in tables))

        manifest = SortedDict(
            (app_name, list(filter(model_installed, model_list)))
            for app_name, model_list in all_models)

        # Create the tables for each model
        if verbosity >= 1:
            self.stdout.write("Creating tables ...\n")
        with transaction.commit_on_success_unless_managed(using=db):
            for app_name, model_list in manifest.items():
                for model in model_list:
                    # Create the model's database table, if it doesn't already exist.
                    if verbosity >= 3:
                        self.stdout.write("Processing %s.%s model\n" %
                                          (app_name, model._meta.object_name))
                    sql, references = connection.creation.sql_create_model(
                        model, self.style, seen_models)
                    seen_models.add(model)
                    created_models.add(model)
                    for refto, refs in references.items():
                        pending_references.setdefault(refto, []).extend(refs)
                        if refto in seen_models:
                            sql.extend(
                                connection.creation.sql_for_pending_references(
                                    refto, self.style, pending_references))
                    sql.extend(
                        connection.creation.sql_for_pending_references(
                            model, self.style, pending_references))
                    if verbosity >= 1 and sql:
                        self.stdout.write("Creating table %s\n" %
                                          model._meta.db_table)
                    for statement in sql:
                        cursor.execute(statement)
                    tables.append(
                        connection.introspection.table_name_converter(
                            model._meta.db_table))

        # Send the post_syncdb signal, so individual apps can do whatever they need
        # to do at this point.
        emit_post_sync_signal(created_models, verbosity, interactive, db)

        # The connection may have been closed by a syncdb handler.
        cursor = connection.cursor()

        # Install custom SQL for the app (but only if this
        # is a model we've just created)
        if verbosity >= 1:
            self.stdout.write("Installing custom SQL ...\n")
        for app_name, model_list in manifest.items():
            for model in model_list:
                if model in created_models:
                    custom_sql = custom_sql_for_model(model, self.style,
                                                      connection)
                    if custom_sql:
                        if verbosity >= 2:
                            self.stdout.write(
                                "Installing custom SQL for %s.%s model\n" %
                                (app_name, model._meta.object_name))
                        try:
                            with transaction.commit_on_success_unless_managed(
                                    using=db):
                                for sql in custom_sql:
                                    cursor.execute(sql)
                        except Exception as e:
                            self.stderr.write("Failed to install custom SQL for %s.%s model: %s\n" % \
                                                (app_name, model._meta.object_name, e))
                            if show_traceback:
                                traceback.print_exc()
                    else:
                        if verbosity >= 3:
                            self.stdout.write(
                                "No custom SQL for %s.%s model\n" %
                                (app_name, model._meta.object_name))

        if verbosity >= 1:
            self.stdout.write("Installing indexes ...\n")
        # Install SQL indices for all newly created models
        for app_name, model_list in manifest.items():
            for model in model_list:
                if model in created_models:
                    index_sql = connection.creation.sql_indexes_for_model(
                        model, self.style)
                    if index_sql:
                        if verbosity >= 2:
                            self.stdout.write(
                                "Installing index for %s.%s model\n" %
                                (app_name, model._meta.object_name))
                        try:
                            with transaction.commit_on_success_unless_managed(
                                    using=db):
                                for sql in index_sql:
                                    cursor.execute(sql)
                        except Exception as e:
                            self.stderr.write("Failed to install index for %s.%s model: %s\n" % \
                                                (app_name, model._meta.object_name, e))

        # Load initial_data fixtures (unless that has been disabled)
        if load_initial_data:
            call_command('loaddata',
                         'initial_data',
                         verbosity=verbosity,
                         database=db,
                         skip_validation=True)
Пример #33
0
    def handle_noargs(self, **options):
        db = options.get('database')
        connection = connections[db]
        verbosity = int(options.get('verbosity'))
        interactive = options.get('interactive')
        # 'reset_sequences' and 'allow_cascade' are stealth options
        reset_sequences = options.get('reset_sequences', True)
        allow_cascade = options.get('allow_cascade', False)

        self.style = no_style()

        # Import the 'management' module within each installed app, to register
        # dispatcher events.
        for app_name in settings.INSTALLED_APPS:
            try:
                import_module('.management', app_name)
            except ImportError:
                pass

        sql_list = sql_flush(self.style, connection, only_django=True,
                             reset_sequences=reset_sequences,
                             allow_cascade=allow_cascade)

        if interactive:
            confirm = input("""You have requested a flush of the database.
This will IRREVERSIBLY DESTROY all data currently in the %r database,
and return each table to the state it was in after syncdb.
Are you sure you want to do this?

    Type 'yes' to continue, or 'no' to cancel: """ % connection.settings_dict['NAME'])
        else:
            confirm = 'yes'

        if confirm == 'yes':
            try:
                with transaction.commit_on_success_unless_managed():
                    cursor = connection.cursor()
                    for sql in sql_list:
                        cursor.execute(sql)
            except Exception as e:
                new_msg = (
                    "Database %s couldn't be flushed. Possible reasons:\n"
                    "  * The database isn't running or isn't configured correctly.\n"
                    "  * At least one of the expected database tables doesn't exist.\n"
                    "  * The SQL was invalid.\n"
                    "Hint: Look at the output of 'django-admin.py sqlflush'. That's the SQL this command wasn't able to run.\n"
                    "The full error: %s") % (connection.settings_dict['NAME'], e)
                six.reraise(CommandError, CommandError(new_msg), sys.exc_info()[2])
            # Emit the post sync signal. This allows individual
            # applications to respond as if the database had been
            # sync'd from scratch.
            all_models = []
            for app in models.get_apps():
                all_models.extend([
                    m for m in models.get_models(app, include_auto_created=True)
                    if router.allow_syncdb(db, m)
                ])
            emit_post_sync_signal(set(all_models), verbosity, interactive, db)

            # Reinstall the initial_data fixture.
            if options.get('load_initial_data'):
                # Reinstall the initial_data fixture.
                call_command('loaddata', 'initial_data', **options)

        else:
            self.stdout.write("Flush cancelled.\n")
Пример #34
0
    def handle_noargs(self, **options):

        verbosity = int(options.get("verbosity"))
        interactive = options.get("interactive")
        show_traceback = options.get("traceback")
        load_initial_data = options.get("load_initial_data")

        self.style = no_style()

        # Import the 'management' module within each installed app, to register
        # dispatcher events.
        for app_name in settings.INSTALLED_APPS:
            try:
                import_module(".management", app_name)
            except ImportError as exc:
                # This is slightly hackish. We want to ignore ImportErrors
                # if the "management" module itself is missing -- but we don't
                # want to ignore the exception if the management module exists
                # but raises an ImportError for some reason. The only way we
                # can do this is to check the text of the exception. Note that
                # we're a bit broad in how we check the text, because different
                # Python implementations may not use the same text.
                # CPython uses the text "No module named management"
                # PyPy uses "No module named myproject.myapp.management"
                msg = exc.args[0]
                if not msg.startswith("No module named") or "management" not in msg:
                    raise

        db = options.get("database")
        connection = connections[db]
        cursor = connection.cursor()

        # Get a list of already installed *models* so that references work right.
        tables = connection.introspection.table_names()
        seen_models = connection.introspection.installed_models(tables)
        created_models = set()
        pending_references = {}

        # Build the manifest of apps and models that are to be synchronized
        all_models = [
            (
                app.__name__.split(".")[-2],
                [m for m in models.get_models(app, include_auto_created=True) if router.allow_syncdb(db, m)],
            )
            for app in models.get_apps()
        ]

        def model_installed(model):
            opts = model._meta
            converter = connection.introspection.table_name_converter
            return not (
                (converter(opts.db_table) in tables)
                or (opts.auto_created and converter(opts.auto_created._meta.db_table) in tables)
            )

        manifest = SortedDict(
            (app_name, list(filter(model_installed, model_list))) for app_name, model_list in all_models
        )

        create_models = set([x for x in itertools.chain(*manifest.values())])
        emit_pre_sync_signal(create_models, verbosity, interactive, db)

        # Create the tables for each model
        if verbosity >= 1:
            self.stdout.write("Creating tables ...\n")
        with transaction.commit_on_success_unless_managed(using=db):
            for app_name, model_list in manifest.items():
                for model in model_list:
                    # Create the model's database table, if it doesn't already exist.
                    if verbosity >= 3:
                        self.stdout.write("Processing %s.%s model\n" % (app_name, model._meta.object_name))
                    sql, references = connection.creation.sql_create_model(model, self.style, seen_models)
                    seen_models.add(model)
                    created_models.add(model)
                    for refto, refs in references.items():
                        pending_references.setdefault(refto, []).extend(refs)
                        if refto in seen_models:
                            sql.extend(
                                connection.creation.sql_for_pending_references(refto, self.style, pending_references)
                            )
                    sql.extend(connection.creation.sql_for_pending_references(model, self.style, pending_references))
                    if verbosity >= 1 and sql:
                        self.stdout.write("Creating table %s\n" % model._meta.db_table)
                    for statement in sql:
                        cursor.execute(statement)
                    tables.append(connection.introspection.table_name_converter(model._meta.db_table))

        # Send the post_syncdb signal, so individual apps can do whatever they need
        # to do at this point.
        emit_post_sync_signal(created_models, verbosity, interactive, db)

        # The connection may have been closed by a syncdb handler.
        cursor = connection.cursor()

        # Install custom SQL for the app (but only if this
        # is a model we've just created)
        if verbosity >= 1:
            self.stdout.write("Installing custom SQL ...\n")
        for app_name, model_list in manifest.items():
            for model in model_list:
                if model in created_models:
                    custom_sql = custom_sql_for_model(model, self.style, connection)
                    if custom_sql:
                        if verbosity >= 2:
                            self.stdout.write(
                                "Installing custom SQL for %s.%s model\n" % (app_name, model._meta.object_name)
                            )
                        try:
                            with transaction.commit_on_success_unless_managed(using=db):
                                for sql in custom_sql:
                                    cursor.execute(sql)
                        except Exception as e:
                            self.stderr.write(
                                "Failed to install custom SQL for %s.%s model: %s\n"
                                % (app_name, model._meta.object_name, e)
                            )
                            if show_traceback:
                                traceback.print_exc()
                    else:
                        if verbosity >= 3:
                            self.stdout.write("No custom SQL for %s.%s model\n" % (app_name, model._meta.object_name))

        if verbosity >= 1:
            self.stdout.write("Installing indexes ...\n")
        # Install SQL indices for all newly created models
        for app_name, model_list in manifest.items():
            for model in model_list:
                if model in created_models:
                    index_sql = connection.creation.sql_indexes_for_model(model, self.style)
                    if index_sql:
                        if verbosity >= 2:
                            self.stdout.write(
                                "Installing index for %s.%s model\n" % (app_name, model._meta.object_name)
                            )
                        try:
                            with transaction.commit_on_success_unless_managed(using=db):
                                for sql in index_sql:
                                    cursor.execute(sql)
                        except Exception as e:
                            self.stderr.write(
                                "Failed to install index for %s.%s model: %s\n" % (app_name, model._meta.object_name, e)
                            )

        # Load initial_data fixtures (unless that has been disabled)
        if load_initial_data:
            call_command("loaddata", "initial_data", verbosity=verbosity, database=db, skip_validation=True)