def handle(self, *args, **options): self.verbosity = int(options.get('verbosity')) tenant_models = [] self._notice('* Processing global models.') for subclass in LiveAbstractModel.__subclasses__(): if is_model_in_tenant_apps(subclass): tenant_models.append(subclass) else: # Process common subclasses with transaction.atomic(): self._notice('- Deleting objects from model %s.' % subclass._meta.object_name) subclass.all_objects.dead().delete() # Process instanced data for instance in Instance.objects.iterator(): self._notice('* Processing models from instance: %s.' % instance.name) with instance_context(instance): db = get_instance_db(instance) for tenant_model in tenant_models: with transaction.atomic(db): self._notice('- Deleting objects from model %s.' % tenant_model._meta.object_name) tenant_model.all_objects.dead().delete()
def _migrate_tenants(self, tenants, apps): for tenant in tenants: db = get_instance_db(tenant) connection = connections[db] set_current_instance(tenant) self._notice("=== Running migrate for schema: %s" % tenant.schema_name) signals.pre_tenant_migrate.send(sender=tenant, tenant=tenant, verbosity=self.verbosity, using=connection.alias) schema_created = self._migrate_schema(connection, tenant) with ignore_signal(post_migrate): self.run_migrations(connection, apps, schema_created=schema_created, skip_checks=True) signals.post_tenant_migrate.send( sender=tenant, tenant=tenant, verbosity=self.verbosity, using=connection.alias, created=schema_created, partial=False, )
def backup_instance(self, storage, instance, query_args=None): """ query_args is a dict in a form of { 'model_backup_name': [id_list], 'model_backup_name3': [], #no data } If there is no key in query_args, queryset is not filtered """ with instance_context(instance): db = get_instance_db(instance) # get migrations targets = self.get_instance_migrations(instance) storage.start_model(self.MIGRATIONS_STORAGE) for target in targets: storage.append(target) storage.end_model() with transaction.atomic(using=db): cursor = transaction.get_connection(db).cursor() cursor.execute( 'SET TRANSACTION ISOLATION LEVEL REPEATABLE READ;') for model in self.default_sorted: options = self.get_options_for_model(model) storage.start_model(options.get_name()) try: options.backup(storage, query_args) except Exception: logger.warning('Exception for model %s', model, exc_info=1) raise storage.end_model()
def _check_indexes(self, index_changes): db = get_instance_db(self.instance) cursor = connections[db].cursor() for index_type, index_op in index_changes.items(): for index_op_type, field_names in index_op.items(): if index_op_type == '+': should_exist = True else: should_exist = False for field_name in field_names: if isinstance(field_name, tuple): field_name = field_name[0] index_name = 'data_klass_{}_{}_{}'.format( self.klass.id, index_type, field_name) cursor.execute(CHECK_INDEX_SQL, ( index_name, self.instance.schema_name, )) row = cursor.fetchone() if row: exists = bool(row[0]) else: exists = False self.assertEqual(exists, should_exist)
def get_instance_migrations(self, instance): from django.db.migrations.loader import MigrationLoader db = get_instance_db(instance) with instance_context(instance): targets = [target for target in MigrationLoader(connections[db]).graph.leaf_nodes() if target[0] in self.apps] return targets
def _get_current_db(self): instance = get_current_instance() if instance: db = get_instance_db(instance) # Check if we are in atomic block for that connection if connections[db].in_atomic_block: return db return DEFAULT_DB_ALIAS
def run(self, klass_pk, **kwargs): db = get_instance_db(self.instance) cursor = connections[db].cursor() cursor.execute( SELECT_INDEX_SQL.format(klass_pk=klass_pk, schema_name=self.instance.schema_name)) for row in cursor.fetchall(): cursor.execute( DROP_INDEX_SQL.format(index_name=row[0], concurrently=''))
def save_object(self, obj): # Install socket(s) in database db = get_instance_db(get_current_instance()) with transaction.atomic(db): # Process in reverse order so that we process in FIFO for socket, dependencies, is_partial in self.socket_install['data'][::-1]: socket.zip_file = None if socket.id is None: socket.save() self.install_socket(socket, dependencies, partial=is_partial) super().save_object(socket)
def cleanup(cls, socket, installed_objects): # Skip cleanup for new sockets. if socket.pk is None: return installed_klasses = set([klass.name for klass in installed_objects[Klass.__name__]]) # Clean up class that are no longer referenced. for class_name, field_dict in socket.old_value('installed').get(cls.yaml_type, {}).items(): if class_name not in installed_klasses: unref_data_klass(socket.pk, class_name, field_dict, using=get_instance_db(get_current_instance()))
def restore_to_instance(self, storage, instance, models_sorted, apps=None, partial=False): db = get_instance_db(instance) connection = connections[db] with instance_context(instance), transaction.atomic(using=db): if not partial: self.truncate_models(connection, models_sorted) for model in models_sorted: self.get_options_for_model(model).restore(storage, partial) self.reset_sequences(connection, models_sorted)
def dispatch(self, request, *args, **kwargs): """ `.dispatch()` is pretty much the same as Django's regular dispatch, but with extra hooks for startup, finalize, and exception handling. """ self.args = args self.kwargs = kwargs request = self.initialize_request(request, *args, **kwargs) self.request = request self.headers = self.default_response_headers # deprecate? self.is_atomic = False try: self.initial(request, *args, **kwargs) # Get the appropriate handler method if request.method.lower() in self.http_method_names: handler = getattr(self, request.method.lower(), self.http_method_not_allowed) else: handler = self.http_method_not_allowed force_atomic = getattr(handler, 'force_atomic', None) if force_atomic is None: force_atomic = request.method not in permissions.SAFE_METHODS if force_atomic: db = None # Model class router takes precedence model_class = getattr(self, 'model', None) if model_class: db = router.db_for_write(model_class) else: # Fallback to instance db instance = getattr(request, 'instance', None) if instance: db = get_instance_db(instance) with transaction.atomic(db): self.is_atomic = True response = handler(request, *args, **kwargs) else: response = handler(request, *args, **kwargs) except Exception as exc: response = self.handle_exception(exc) self.response = self.finalize_response(request, response, *args, **kwargs) return self.response
def update_class(self, klass): schema = copy.deepcopy(klass.schema) refs = klass.refs socket_pk = self.socket.pk # Validate schema first KlassSerializer(instance=klass, data={ 'schema': self.dependency['schema'], 'ignored_target_classes': self.ignored_class_names() }, partial=True).is_valid(raise_exception=True) # Add field references. ref_fields = refs.get('fields', {}) ref_props = refs.get('props', {}) refs['fields'] = ref_fields refs['props'] = ref_props if 'managed_by' in refs and socket_pk not in refs['managed_by']: refs['managed_by'].append(socket_pk) # Check if klass is compatible dep_fields = {f['name']: f for f in self.dependency['schema']} self.merge_class_schema(schema, dep_fields, ref_fields, ref_props) # Cleanup klass references. klass.schema = schema cleanup_data_klass_ref(klass, using=get_instance_db(get_current_instance())) metadata = klass.metadata metadata.update(self.dependency['metadata']) # Run last validation serializer = KlassSerializer(instance=klass, data={ 'schema': klass.schema, 'ignored_target_classes': self.ignored_class_names(), 'metadata': metadata, }, partial=True) serializer.is_valid(raise_exception=True) return serializer.save(refs=refs)
def db_for_model(self, model, for_read=False, instance=None, context=None, **hints): if model is self.TENANT_MODEL: if context == 'new': return INSTANCES_NEW_DB_ALIAS if context == 'contents': # As instance objects are cached, temporarily use getattr so it works # with objects that miss that field return instance.database or INSTANCES_DB_ALIAS elif is_model_in_tenant_apps(model): return get_instance_db(get_current_instance(), for_read=for_read) return DEFAULT_DB_ALIAS
def run(self, klass_pk, op, op_args=None, **kwargs): db = get_instance_db(self.instance) with transaction.atomic(db): try: klass = Klass.objects.select_for_update().get(pk=klass_pk) except Klass.DoesNotExist: self.get_logger().warning( 'Cannot process Klass[pk=%s] in %s as it no longer exists.', klass_pk, self.instance) return if klass.is_locked: raise self.retry() if op == 'delete': klass.delete() elif op == 'cleanup_refs': klass.cleanup_refs(save=True)
def setUp(self): self.instance = G(Instance, name='testtest') set_current_instance(self.instance) self.db = get_instance_db(self.instance) self.social_logger = self.logger_class() self.social_logger.backend = self.social_profile_class.BACKENDS.FACEBOOK
def process_dependencies(self, dependencies): db = get_instance_db(get_current_instance()) with transaction.atomic(db): with transaction.atomic(): SocketProcessorTask.install_socket(Socket(), dependencies)
def instance_post_delete_handler(sender, instance, **kwargs): db = get_instance_db(instance) drop_schema(connections[db], schema_name=instance.schema_name) DeleteFilesTask.delay(instance.get_storage_prefix(), all_buckets=True)
def process_data_object_index(instance, klass_pk, index_type, index_data, concurrently=True, create=True): field_column = None index_flags = None if create: field_name, field_column, field_type, index_flags = index_data else: field_name, field_type = index_data db = get_instance_db(instance) cursor = connections[db].cursor() index_flags = index_flags or {} unique = index_flags.get('unique', False) unique_keyword = '' if unique: unique_keyword = 'UNIQUE' concurrently_keyword = '' if concurrently: concurrently_keyword = CONCURRENTLY_KEYWORD index_data = INDEX_DATA[index_type] index_data = index_data.get(field_type) or index_data['default'] for idx, index_info in enumerate(index_data): index_name = index_info['name'].format(klass_pk=klass_pk, field_name=field_name) if create: if field_column is None: # This should never happen raise RuntimeError( 'Called process_data_object_index with create=True yet field_column is None.' ) # pragma: no cover cursor.execute(CHECK_INDEX_SQL, (index_name, instance.schema_name)) row = cursor.fetchone() if row: # Selects true when index is valid and ready if row[0]: return # Otherwise drop it cursor.execute( DROP_INDEX_SQL.format(index_name=index_name, concurrently=concurrently_keyword)) index_using = index_info['using'].format(db_type=field_column) # Only first index should be unique (and created non-concurrently) sql = CREATE_INDEX_SQL.format( index_name=index_name, concurrently='' if idx == 0 and unique else concurrently_keyword, index_using=index_using, unique=unique_keyword if idx == 0 else '') cursor.execute(sql, (klass_pk, )) else: cursor.execute( DROP_INDEX_SQL.format(index_name=index_name, concurrently=concurrently_keyword))
def restore_to_new_schema(self, storage, instance, partial=False): from apps.instances.models import Instance from .executor import BackupMigrationExecutor db = get_instance_db(instance) con = connections[db] stored_targets = self.get_stored_migration_targets(storage) new_instance = Instance(owner=instance.owner, name="_%s" % generate_key(), schema_name="%s_{self.id}_%s" % (instance.id, instance.name), database=instance.database) # If there are no stored migrations sync_schema on create try: new_instance.save(sync_schema=not stored_targets) new_instance.storage_prefix = "%s_%s" % (instance.pk, new_instance.pk) new_instance.save() apps = None if stored_targets: with instance_context(new_instance): executor = BackupMigrationExecutor(con) state = executor.migrate(stored_targets) apps = state.apps if partial: signals.post_tenant_migrate.send( sender=new_instance, tenant=new_instance, using=con.alias, created=True, partial=True, ) models_sorted = self.calculate_sorted(apps) self.restore_to_instance(storage, new_instance, models_sorted, apps, partial=partial) # Upgrade schema to current version # migrate will detect that this instance is already created # and will forward to current project state # it will not fire post_migrate signals # and it will fire post_tenant_migrate with schema_created=False # Initial data will be fed from migrations (a common way how we do it in project) if stored_targets: call_command('migrate', shared=False, schema_name=new_instance.schema_name, interactive=False, verbosity=settings.SCHEMA_MIGRATIONS_VERBOSITY) # swap prefixes. When new_instance is deleted, old instance files will also be deleted. instance.storage_prefix, new_instance.storage_prefix = ( new_instance.get_storage_prefix(), instance.get_storage_prefix()) # swap schemas instance.schema_name, new_instance.schema_name = ( new_instance.schema_name, instance.schema_name) instance.save() new_instance.save() finally: new_instance.delete()