def test_save_timed_schedule_instance(self): self.assertEqual(TimedScheduleInstance.objects.using(partition_config.get_proxy_db()).count(), 0) self.assertEqual(TimedScheduleInstance.objects.using(self.db1).count(), 0) self.assertEqual(TimedScheduleInstance.objects.using(self.db2).count(), 0) instance = self.make_timed_schedule_instance(self.p2_uuid) save_timed_schedule_instance(instance) self.assertEqual(TimedScheduleInstance.objects.using(partition_config.get_proxy_db()).count(), 0) self.assertEqual(TimedScheduleInstance.objects.using(self.db1).count(), 0) self.assertEqual(TimedScheduleInstance.objects.using(self.db2).count(), 1)
def test_save_timed_schedule_instance(self): self.assertEqual(TimedScheduleInstance.objects.using(partition_config.get_proxy_db()).count(), 0) self.assertEqual(TimedScheduleInstance.objects.using(self.db1).count(), 0) self.assertEqual(TimedScheduleInstance.objects.using(self.db2).count(), 0) instance = self.make_timed_schedule_instance(self.p2_uuid) save_timed_schedule_instance(instance) self.assertEqual(TimedScheduleInstance.objects.using(partition_config.get_proxy_db()).count(), 0) self.assertEqual(TimedScheduleInstance.objects.using(self.db1).count(), 0) self.assertEqual(TimedScheduleInstance.objects.using(self.db2).count(), 1)
def _update_pl_proxy_cluster(existing_config, verbose): existing_shards = _get_current_shards(existing_config) new_shard_configs = partition_config.get_shards() if verbose: print('{0} Existing config {0}'.format('-' * 42)) print(existing_config) print('-' * 100) shards_to_update = get_shards_to_update(existing_shards, new_shard_configs) if not shards_to_update: print('No changes. Exiting.') else: print("Shards to update:") existing_shards_by_id = {shard.id: shard for shard in existing_shards} for new in shards_to_update: print(" {} -> {}".format( existing_shards_by_id[new.id].get_server_option_string(), new.get_server_option_string())) if _confirm("Update these shards?"): alter_sql = _get_alter_server_sql(shards_to_update) if verbose: print(alter_sql) with connections[ partition_config.get_proxy_db()].cursor() as cursor: cursor.execute(alter_sql) else: print('Abort')
def _update_pl_proxy_cluster(existing_config, verbose): existing_shards = _get_current_shards(existing_config) new_shard_configs = partition_config.get_shards() if verbose: print '{0} Existing config {0}'.format('-' * 42) print existing_config print '-' * 100 shards_to_update = get_shards_to_update(existing_shards, new_shard_configs) if not shards_to_update: print 'No changes. Exiting.' else: print "Shards to update:" existing_shards_by_id = {shard.id: shard for shard in existing_shards} for new in shards_to_update: print " {} -> {}".format( existing_shards_by_id[new.id].get_server_option_string(), new.get_server_option_string() ) if _confirm("Update these shards?"): alter_sql = _get_alter_server_sql(shards_to_update) if verbose: print alter_sql with connections[partition_config.get_proxy_db()].cursor() as cursor: cursor.execute(alter_sql) else: print 'Abort'
def get_objects_to_dump(domain, excludes): """ :param domain: domain name to filter with :param app_list: List of (app_config, model) tuples to dump :param excluded_models: List of model classes to exclude :return: generator yielding models objects """ excluded_apps, excluded_models = get_excluded_apps_and_models(excludes) app_config_models = _get_app_list(excluded_apps) # Collate the objects to be serialized. for model in serializers.sort_dependencies(app_config_models.items()): if model in excluded_models: continue using = router.db_for_read(model) if settings.USE_PARTITIONED_DATABASE and using == partition_config.get_proxy_db( ): using = partition_config.get_form_processing_dbs() else: using = [using] for db_alias in using: if not model._meta.proxy and router.allow_migrate_model( db_alias, model): objects = model._default_manager queryset = objects.using(db_alias).order_by( model._meta.pk.name) filters = get_model_domain_filters(model, domain) for filter in filters: for obj in queryset.filter(filter).iterator(): yield obj
def _get_existing_cluster_config(cluster_name): proxy_db = partition_config.get_proxy_db() with connections[proxy_db].cursor() as cursor: cursor.execute('SELECT * from pg_foreign_server where srvname = %s', [cluster_name]) results = list(fetchall_as_namedtuple(cursor)) if results: return results[0]
def get_all_model_querysets_for_domain(model_class, domain): using = router.db_for_read(model_class) if settings.USE_PARTITIONED_DATABASE and using == partition_config.get_proxy_db(): using = partition_config.get_form_processing_dbs() else: using = [using] for db_alias in using: if not model_class._meta.proxy and router.allow_migrate_model(db_alias, model_class): objects = model_class._default_manager queryset = objects.using(db_alias).order_by(model_class._meta.pk.name) filters = get_model_domain_filters(model_class, domain) for filter in filters: yield model_class, queryset.filter(filter)
def get_all_model_querysets_for_domain(model_class, domain): using = router.db_for_read(model_class) if settings.USE_PARTITIONED_DATABASE and using == partition_config.get_proxy_db(): using = partition_config.get_form_processing_dbs() else: using = [using] for db_alias in using: if not model_class._meta.proxy and router.allow_migrate_model(db_alias, model_class): objects = model_class._default_manager queryset = objects.using(db_alias).order_by(model_class._meta.pk.name) filters = get_model_domain_filters(model_class, domain) for filter in filters: yield model_class, queryset.filter(filter)
def _group_objects_by_db(objects): """ :param objects: Deserialized object dictionaries :return: List of tuples of (db_alias, [object,...]) """ objects_by_db = defaultdict(list) for obj in objects: app_label = obj['model'] model = apps.get_model(app_label) db_alias = router.db_for_write(model) if settings.USE_PARTITIONED_DATABASE and db_alias == partition_config.get_proxy_db(): doc_id = _get_doc_id(app_label, obj) db_alias = ShardAccessor.get_database_for_doc(doc_id) objects_by_db[db_alias].append(obj) return list(objects_by_db.items())
def _group_objects_by_db(objects): """ :param objects: Deserialized object dictionaries :return: List of tuples of (db_alias, [object,...]) """ objects_by_db = defaultdict(list) for obj in objects: app_label = obj['model'] model = apps.get_model(app_label) db_alias = router.db_for_write(model) if settings.USE_PARTITIONED_DATABASE and db_alias == partition_config.get_proxy_db( ): doc_id = _get_doc_id(app_label, obj) db_alias = ShardAccessor.get_database_for_doc(doc_id) objects_by_db[db_alias].append(obj) return objects_by_db.items()
def get_all_model_iterators_builders_for_domain(model_class, domain, limit_to_db=None): using = router.db_for_read(model_class) if settings.USE_PARTITIONED_DATABASE and using == partition_config.get_proxy_db(): using = partition_config.get_form_processing_dbs() else: using = [using] if limit_to_db: if limit_to_db not in using: raise DomainDumpError('DB specified is not valide for ' 'model class: {} not in {}'.format(limit_to_db, using)) using = [limit_to_db] for db_alias in using: if not model_class._meta.proxy and router.allow_migrate_model(db_alias, model_class): iterator_builder = APP_LABELS_WITH_FILTER_KWARGS_TO_DUMP[get_model_label(model_class)] yield model_class, iterator_builder.build(domain, model_class, db_alias)
def get_all_model_iterators_builders_for_domain(model_class, domain, limit_to_db=None): using = router.db_for_read(model_class) if settings.USE_PARTITIONED_DATABASE and using == partition_config.get_proxy_db(): using = partition_config.get_form_processing_dbs() else: using = [using] if limit_to_db: if limit_to_db not in using: raise DomainDumpError('DB specified is not valide for ' 'model class: {} not in {}'.format(limit_to_db, using)) using = [limit_to_db] for db_alias in using: if not model_class._meta.proxy and router.allow_migrate_model(db_alias, model_class): iterator_builder = APP_LABELS_WITH_FILTER_KWARGS_TO_DUMP[get_model_label(model_class)] yield model_class, iterator_builder.build(domain, model_class, db_alias)
def create_pl_proxy_cluster(verbose=False, drop_existing=False): proxy_db = partition_config.get_proxy_db() if drop_existing: with connections[proxy_db].cursor() as cursor: cursor.execute(get_drop_server_sql()) config_sql = get_pl_proxy_server_config_sql(partition_config.get_shards()) user_mapping_sql = get_user_mapping_sql() if verbose: print('Running SQL') print(config_sql) print(user_mapping_sql) with connections[proxy_db].cursor() as cursor: cursor.execute(config_sql) cursor.execute(user_mapping_sql)
def create_pl_proxy_cluster(verbose=False, drop_existing=False): proxy_db = partition_config.get_proxy_db() if drop_existing: with connections[proxy_db].cursor() as cursor: cursor.execute(get_drop_server_sql()) config_sql = get_pl_proxy_server_config_sql(partition_config.get_shards()) user_mapping_sql = get_user_mapping_sql() if verbose: print 'Running SQL' print config_sql print user_mapping_sql with connections[proxy_db].cursor() as cursor: cursor.execute(config_sql) cursor.execute(user_mapping_sql)
def test_models_are_located_in_correct_dbs(self, app_label, is_partitioned): main_db = partition_config.get_main_db() proxy_db = partition_config.get_proxy_db() partitioned_dbs = partition_config.get_form_processing_dbs() for model_class in self.get_models(app_label): if is_partitioned: # models do not exist in main db self.assertModelDoesNotExist(model_class, main_db) # models exist in paritioned dbs for db in ([proxy_db] + partitioned_dbs): self.assertModelExists(model_class, db) else: # models exist in main db self.assertModelExists(model_class, main_db) # models do not exist in partitioned dbs for db in ([proxy_db] + partitioned_dbs): self.assertModelDoesNotExist(model_class, db)
def test_models_are_located_in_correct_dbs(self, app_label, is_partitioned): main_db = partition_config.get_main_db() proxy_db = partition_config.get_proxy_db() partitioned_dbs = partition_config.get_form_processing_dbs() for model_class in self.get_models(app_label): if is_partitioned: # models do not exist in main db self.assertModelDoesNotExist(model_class, main_db) # models exist in paritioned dbs for db in ([proxy_db] + partitioned_dbs): self.assertModelExists(model_class, db) else: # models exist in main db self.assertModelExists(model_class, main_db) # models do not exist in partitioned dbs for db in ([proxy_db] + partitioned_dbs): self.assertModelDoesNotExist(model_class, db)
def test_models_are_located_in_correct_dbs(self): main_db = partition_config.get_main_db() proxy_db = partition_config.get_proxy_db() partitioned_dbs = partition_config.get_form_processing_dbs() for model_class in self.get_scheduling_models(): # scheduling models exist in main db self.assertModelExists(model_class, main_db) # scheduling models do not exist in partitioned dbs for db in ([proxy_db] + partitioned_dbs): self.assertModelDoesNotExist(model_class, db) for model_class in self.get_scheduling_partitioned_models(): # scheduling partitioned models do not exist in main db self.assertModelDoesNotExist(model_class, main_db) # scheduling partitioned models exist in paritioned dbs for db in ([proxy_db] + partitioned_dbs): self.assertModelExists(model_class, db)
def get_user_mapping_sql(): proxy_db = partition_config.get_proxy_db() proxy_db_config = settings.DATABASES[proxy_db].copy() proxy_db_config['server_name'] = settings.PL_PROXY_CLUSTER_NAME return USER_MAPPING_TEMPLATE.format(**proxy_db_config)
def get_user_mapping_sql(): proxy_db = partition_config.get_proxy_db() proxy_db_config = settings.DATABASES[proxy_db].copy() proxy_db_config['server_name'] = settings.PL_PROXY_CLUSTER_NAME return USER_MAPPING_TEMPLATE.format(**proxy_db_config)