Пример #1
0
    def test_partial_backup_restore_hits_klass_limit(self):
        with transaction.atomic():
            instance = largish_test_data()

        admin = instance.owner
        with instance_context(instance):
            backup_klasses = list(Klass.objects.values_list('name', flat=True))

        query_args = {'class': backup_klasses}
        backup = G(Backup, instance=instance, query_args=query_args)
        backup.refresh_from_db()

        new_instance = G(Instance, admin=admin)
        with instance_context(new_instance):
            klasses = list(Klass.objects.all())

        with mock.patch('apps.billing.models.AdminLimit.get_classes_count', mock.Mock(return_value=1)):
            with transaction.atomic():
                restore = Restore.objects.create(target_instance=new_instance, archive=backup.archive, owner=admin)
        restore.refresh_from_db()
        self.assertEqual(restore.status, Restore.STATUSES.ERROR)
        new_instance.refresh_from_db()
        self.assertNotEqual(*compare_instances(instance, new_instance))

        # make sure there are no new klasses.
        with instance_context(new_instance):
            self.assertEqual(klasses, list(Klass.objects.all()))
Пример #2
0
    def backup_instance(self, storage, instance, query_args=None):
        """ query_args is a dict in a form of
        {
            'model_backup_name': [id_list],
            'model_backup_name3': [], #no data
        }
        If there is no key in query_args, queryset is not filtered
        """
        with instance_context(instance):
            db = get_instance_db(instance)

            # get migrations
            targets = self.get_instance_migrations(instance)
            storage.start_model(self.MIGRATIONS_STORAGE)
            for target in targets:
                storage.append(target)
            storage.end_model()

            with transaction.atomic(using=db):
                cursor = transaction.get_connection(db).cursor()
                cursor.execute(
                    'SET TRANSACTION ISOLATION LEVEL REPEATABLE READ;')
                for model in self.default_sorted:
                    options = self.get_options_for_model(model)
                    storage.start_model(options.get_name())
                    try:
                        options.backup(storage, query_args)
                    except Exception:
                        logger.warning('Exception for model %s',
                                       model,
                                       exc_info=1)
                        raise
                    storage.end_model()
Пример #3
0
    def handle(self, *args, **options):
        self.verbosity = int(options.get('verbosity'))

        tenant_models = []
        self._notice('* Processing global models.')

        for subclass in LiveAbstractModel.__subclasses__():
            if is_model_in_tenant_apps(subclass):
                tenant_models.append(subclass)
            else:
                # Process common subclasses
                with transaction.atomic():
                    self._notice('- Deleting objects from model %s.' %
                                 subclass._meta.object_name)
                    subclass.all_objects.dead().delete()

        # Process instanced data
        for instance in Instance.objects.iterator():
            self._notice('* Processing models from instance: %s.' %
                         instance.name)

            with instance_context(instance):
                db = get_instance_db(instance)
                for tenant_model in tenant_models:
                    with transaction.atomic(db):
                        self._notice('- Deleting objects from model %s.' %
                                     tenant_model._meta.object_name)
                        tenant_model.all_objects.dead().delete()
Пример #4
0
 def get_instance_migrations(self, instance):
     from django.db.migrations.loader import MigrationLoader
     db = get_instance_db(instance)
     with instance_context(instance):
         targets = [target
                    for target in MigrationLoader(connections[db]).graph.leaf_nodes()
                    if target[0] in self.apps]
     return targets
Пример #5
0
 def test_partial_backup_size_exceeded(self):
     with instance_context(self.instance):
         klass = Klass.objects.first()
     backup = G(Backup,
                instance=self.instance,
                query_args={'class': [klass.name]})
     backup.run()
     backup.refresh_from_db()
     self.assertEqual(backup.status, Backup.STATUSES.ERROR)
Пример #6
0
 def test_old_data_restore(self):
     from .old_instance import data
     storage = DictStorage('DUMMY')
     storage.update(data)
     admin = G(Admin, is_active=True)
     instance = Instance.objects.create(name="restore_old_data", owner=admin)
     default_site.restore_to_new_schema(storage, instance)
     instance.refresh_from_db()
     with instance_context(instance):
         self.assertTrue(Klass.objects.exists())
         self.assertTrue(Channel.objects.exists())
Пример #7
0
    def restore_to_instance(self, storage, instance, models_sorted, apps=None, partial=False):
        db = get_instance_db(instance)
        connection = connections[db]

        with instance_context(instance), transaction.atomic(using=db):
            if not partial:
                self.truncate_models(connection, models_sorted)

            for model in models_sorted:
                self.get_options_for_model(model).restore(storage, partial)

            self.reset_sequences(connection, models_sorted)
Пример #8
0
    def test_partial_backup_restore(self):
        with transaction.atomic():
            instance = largish_test_data()

        admin = instance.owner
        with instance_context(instance):
            backup_klass = Klass.objects.last()
            objects_count = backup_klass.objects_count

        query_args = {'class': [backup_klass.name]}
        backup = G(Backup, instance=instance, query_args=query_args)
        new_instance = G(Instance, admin=admin)

        with transaction.atomic():
            Restore.objects.create(target_instance=new_instance, backup=backup, owner=admin)
        new_instance.refresh_from_db()

        with instance_context(new_instance):
            self.assertEqual(Klass.objects.count(), 2)
            new_klass = Klass.objects.last()
            self.assertEqual(new_klass.name, backup_klass.name)
            self.assertEqual(new_klass.objects_count, objects_count)
            self.assertEqual(new_klass.existing_indexes, backup_klass.existing_indexes)
Пример #9
0
    def test_tenant_model_separation(self):
        self.assertRaises(AttributeError, TenantModel().save)
        inst1 = G(Instance, name='test')
        inst2 = G(Instance, name='test2')
        expected_pk = 1

        with instance_context(inst1):
            model1 = TenantModel.create(char='A')
        with instance_context(inst2):
            model2 = TenantModel.create(char='B')

        self.assertEqual(model1.pk, model2.pk)

        # Now refresh from redis just to be sure we are not dealing with bogus data
        with instance_context(inst1):
            model1 = TenantModel.get(pk=expected_pk)
            object_key1 = model1.get_object_key(pk=expected_pk)
        with instance_context(inst2):
            model2 = TenantModel.get(pk=model2.pk)
            object_key2 = model2.get_object_key(pk=expected_pk)

        self.assertNotEqual(model1.char, model2.char)
        self.assertNotEqual(object_key1, object_key2)
Пример #10
0
    def test_restore(self):
        with transaction.atomic():
            instance = largish_test_data()

        admin = instance.owner
        backup = G(Backup, instance=instance)
        new_instance = G(Instance, admin=admin)
        old_indexes = {}
        with instance_context(instance):
            for klass in Klass.objects.all():
                if klass.existing_indexes:
                    old_indexes[klass.id] = klass.existing_indexes

        self.assertTrue(len(old_indexes) > 0)

        with transaction.atomic():
            G(Restore, target_instance=new_instance, backup=backup)

        new_instance.refresh_from_db()
        self.assertEqual(*compare_instances(instance, new_instance))
        with instance_context(new_instance):
            for klass in Klass.objects.all():
                if klass.existing_indexes:
                    self.assertEqual(klass.existing_indexes, old_indexes[klass.id])
Пример #11
0
    def refresh(cls, instance, storage_size=None):
        with instance_context(instance):
            from apps.codeboxes.models import CodeBoxSchedule
            from apps.push_notifications.models import APNSDevice

            cls.objects.filter(
                type=InstanceIndicator.TYPES.SCHEDULES_COUNT, instance=instance,
            ).update(value=CodeBoxSchedule.objects.count())

            InstanceIndicator.objects.filter(
                type=InstanceIndicator.TYPES.APNS_DEVICES_COUNT, instance=instance,
            ).update(value=APNSDevice.objects.filter(is_active=True).count())

            if storage_size:
                InstanceIndicator.objects.filter(
                    type=InstanceIndicator.TYPES.STORAGE_SIZE, instance=instance,
                ).update(value=storage_size)
Пример #12
0
 def test_partial_backup(self):
     with instance_context(self.instance):
         klass = Klass.objects.first()
     backup = G(Backup,
                instance=self.instance,
                query_args={'class': [klass.name]})
     backup.run()
     backup.refresh_from_db()
     self.assertEqual(backup.status, Backup.STATUSES.SUCCESS)
     self.assertEqual(backup.archive.size, backup.size)
     zf = ZipFile(backup.archive.file, 'r')
     self.assertEqual(
         json.load(zf.open('class/00000000.json'))[0]['id'], klass.id)
     self.assertTrue(
         all(x['_klass_id'] == klass.id
             for x in json.load(zf.open('data_object/00000000.json'))))
     migrations = json.load(
         zf.open('%s/00000000.json' % default_site.MIGRATIONS_STORAGE))
     self.assertTrue(len(migrations) > 0)
Пример #13
0
def reasonably_large_instance(admin):  # noqa
    instance = G(Instance, name='backup-test', owner=admin)
    with instance_context(instance):
        for _ in range(3):
            G(ApiKey, instance=instance)
        klasses = []
        for idx in range(10):
            G(Group)
            klass = G(Klass, name='Klass%d' % idx,
                      schema=[{'name': 'string%d' % idx, 'type': 'string', 'filter_index': True},
                              {'name': 'int%d' % idx, 'type': 'integer'},
                              {'name': 'file%d' % idx, 'type': 'file'},
                              {'name': 'geo%d' % idx, 'type': 'geopoint'}])
            create_data_objects(klass)
            klasses.append(klass)

        for i in range(10):
            codebox = G(CodeBox, label='test-%d' % (i,), source='test source')
            G(Webhook, codebox=codebox, public=True, name='data-endpoint-%s' % (i, ))
            G(Trigger, codebox=codebox, klass=random.choice(klasses))
            G(CodeBoxSchedule, codebox=codebox, crontab='1 * * * *')
        for _ in range(10):
            G(Channel)
        for _ in range(10):
            G(GCMDevice)
            G(APNSDevice)
        for i in range(10):
            G(ResponseTemplate, name='response-template-%d' % (i, ))

        hosting = G(Hosting, label='test_hosting', description='test description', instance=instance)
        for i in range(10):
            with tempfile.NamedTemporaryFile(suffix='.html') as tmp_file:
                tmp_file.write(b'File %d' % i)
                tmp_file.seek(0)
                file_object = File(tmp_file)
                HostingFile(
                    path='example/path/name{}.html'.format(i),
                    level=2,
                    size=file_object.size,
                    file_object=file_object,
                    hosting=hosting,
                ).save()

        socket_zip = pack_test_data_into_zip_file("""
endpoints:
  custom_endpoint:
    file: scripts/custom_script_1.py
""", [CUSTOM_SCRIPT_1])

        for i in range(2):
            socket = Socket(name='name-{}'.format(i))
            socket.zip_file.save('zip_file', ContentFile(socket_zip))

        for i in range(2):
            with tempfile.NamedTemporaryFile(suffix='.fs') as tmp_file:
                tmp_file.write(b'File %d' % i)
                tmp_file.seek(0)
                file_object = File(tmp_file)
                SocketEnvironment(
                    name='Env %d' % i,
                    status=SocketEnvironment.STATUSES.OK,
                    fs_file=file_object,
                ).save()

        GCMConfig.objects.create(production_api_key='production', development_api_key='development')
        APNSConfig.objects.create(production_certificate=urandom(4), development_certificate=urandom(4))

        for _ in range(10):
            G(User)
            G(Membership)

    return instance
Пример #14
0
    def restore_to_new_schema(self, storage, instance, partial=False):
        from apps.instances.models import Instance
        from .executor import BackupMigrationExecutor

        db = get_instance_db(instance)
        con = connections[db]

        stored_targets = self.get_stored_migration_targets(storage)

        new_instance = Instance(owner=instance.owner,
                                name="_%s" % generate_key(),
                                schema_name="%s_{self.id}_%s" %
                                (instance.id, instance.name),
                                database=instance.database)

        # If there are no stored migrations sync_schema on create
        try:
            new_instance.save(sync_schema=not stored_targets)
            new_instance.storage_prefix = "%s_%s" % (instance.pk,
                                                     new_instance.pk)
            new_instance.save()

            apps = None
            if stored_targets:
                with instance_context(new_instance):
                    executor = BackupMigrationExecutor(con)
                    state = executor.migrate(stored_targets)
                    apps = state.apps

                    if partial:
                        signals.post_tenant_migrate.send(
                            sender=new_instance,
                            tenant=new_instance,
                            using=con.alias,
                            created=True,
                            partial=True,
                        )

            models_sorted = self.calculate_sorted(apps)
            self.restore_to_instance(storage,
                                     new_instance,
                                     models_sorted,
                                     apps,
                                     partial=partial)

            # Upgrade schema to current version
            # migrate will detect that this instance is already created
            # and will forward to current project state
            # it will not fire post_migrate signals
            # and it will fire post_tenant_migrate with schema_created=False
            # Initial data will be fed from migrations (a common way how we do it in project)
            if stored_targets:
                call_command('migrate',
                             shared=False,
                             schema_name=new_instance.schema_name,
                             interactive=False,
                             verbosity=settings.SCHEMA_MIGRATIONS_VERBOSITY)

            # swap prefixes. When new_instance is deleted, old instance files will also be deleted.
            instance.storage_prefix, new_instance.storage_prefix = (
                new_instance.get_storage_prefix(),
                instance.get_storage_prefix())
            # swap schemas
            instance.schema_name, new_instance.schema_name = (
                new_instance.schema_name, instance.schema_name)
            instance.save()
            new_instance.save()
        finally:
            new_instance.delete()
Пример #15
0
    def test_non_empty_backup(self):
        instance = G(Instance, name='testtest')
        with instance_context(instance):
            G(CodeBox, label='test', source="test source")

        G(Backup, instance=instance)