def _get_db_aliases_to_query(): if not settings.USE_PARTITIONED_DATABASE: return [None] # use the default database else: from corehq.sql_db.config import PartitionConfig partition_config = PartitionConfig() return partition_config.get_form_processing_dbs()
def setUpClass(cls): super(ShardingTests, cls).setUpClass() if not settings.USE_PARTITIONED_DATABASE: # https://github.com/nose-devs/nose/issues/946 raise SkipTest('Only applicable if sharding is setup') cls.partion_config = PartitionConfig() assert len(cls.partion_config.get_form_processing_dbs()) > 1
def _get_existing_cluster_config(cluster_name): proxy_db = PartitionConfig().get_proxy_db() with connections[proxy_db].cursor() as cursor: cursor.execute('SELECT * from pg_foreign_server where srvname = %s', [cluster_name]) results = list(fetchall_as_namedtuple(cursor)) if results: return results[0]
def _update_pl_proxy_cluster(existing_config, verbose): existing_shards = _get_current_shards(existing_config) config = PartitionConfig() new_shard_configs = config.get_shards() shards_to_update = get_shards_to_update(existing_shards, new_shard_configs) if not shards_to_update: print 'No changes. Exiting.' else: print "Shards to update:" existing_shards_by_id = {shard.id: shard for shard in existing_shards} for new in shards_to_update: print " {} -> {}".format( existing_shards_by_id[new.id].get_server_option_string(), new.get_server_option_string() ) if _confirm("Update these shards?"): alter_sql = _get_alter_server_sql(shards_to_update) if verbose: print alter_sql with connections[config.get_proxy_db()].cursor() as cursor: cursor.execute(alter_sql) else: print 'Abort'
def __init__(self, model_filter_attribute): self.model_filter_attribute = model_filter_attribute if not settings.USE_PARTITIONED_DATABASE: self.db_list = [None] # use the default database else: from corehq.sql_db.config import PartitionConfig partition_config = PartitionConfig() self.db_list = partition_config.get_form_processing_dbs()
def create_update_pl_proxy_config(): if not (settings.UNIT_TESTING and settings.USE_PARTITIONED_DATABASE): return noop_migration() drop_server_sql = get_drop_server_sql() sql_statements = [ get_pl_proxy_server_config_sql(PartitionConfig().get_shards()), get_user_mapping_sql() ] return HqRunSQL('\n'.join(sql_statements), drop_server_sql)
def create_pl_proxy_cluster(verbose=False, drop_existing=False): config = PartitionConfig() proxy_db = config.get_proxy_db() if drop_existing: with connections[proxy_db].cursor() as cursor: cursor.execute(get_drop_server_sql()) config_sql = get_pl_proxy_server_config_sql(config.get_shards()) user_mapping_sql = get_user_mapping_sql() if verbose: print 'Running SQL' print config_sql print user_mapping_sql with connections[proxy_db].cursor() as cursor: cursor.execute(config_sql) cursor.execute(user_mapping_sql)
def get_user_mapping_sql(): proxy_db = PartitionConfig().get_proxy_db() proxy_db_config = settings.DATABASES[proxy_db].copy() proxy_db_config['server_name'] = settings.PL_PROXY_CLUSTER_NAME return USER_MAPPING_TEMPLATE.format(**proxy_db_config)
def _get_db_list_to_query(): if settings.USE_PARTITIONED_DATABASE: return PartitionConfig().get_form_processing_dbs() return [None]