def pghoard(db, tmpdir, request): # pylint: disable=redefined-outer-name test_site = request.function.__name__ if os.environ.get("pghoard_test_walreceiver"): active_backup_mode = "walreceiver" else: active_backup_mode = "pg_receivexlog" config = { "alert_file_dir": os.path.join(str(tmpdir), "alerts"), "backup_location": os.path.join(str(tmpdir), "backupspool"), "backup_sites": { test_site: { "active_backup_mode": active_backup_mode, "basebackup_count": 2, "basebackup_interval_hours": 24, "pg_bin_directory": db.pgbin, "pg_data_directory": db.pgdata, "nodes": [db.user], "object_storage": { "storage_type": "local", "directory": os.path.join(str(tmpdir), "backups"), }, }, }, "compression": { "algorithm": "snappy" if snappy else "lzma", }, "http_address": "127.0.0.1", "http_port": random.randint(1024, 32000), "json_state_file_path": tmpdir.join("pghoard_state.json").strpath, "maintenance_mode_file": tmpdir.join("maintenance_mode_file").strpath, } confpath = os.path.join(str(tmpdir), "config.json") with open(confpath, "w") as fp: json.dump(config, fp) backup_site_path = os.path.join(config["backup_location"], test_site) basebackup_path = os.path.join(backup_site_path, "basebackup") backup_xlog_path = os.path.join(backup_site_path, "xlog") backup_timeline_path = os.path.join(backup_site_path, "timeline") os.makedirs(config["alert_file_dir"]) os.makedirs(basebackup_path) os.makedirs(backup_xlog_path) os.makedirs(backup_timeline_path) pgh = PGHoard(confpath) pgh.test_site = test_site pgh.start_threads_on_startup() if snappy: pgh.Compressor = snappy.StreamCompressor else: pgh.Compressor = lambda: lzma.LZMACompressor(preset=0) # pylint: disable=redefined-variable-type time.sleep(0.05) # Hack to give the server time to start up yield pgh pgh.quit()
def pghoard(db, tmpdir, request): # pylint: disable=redefined-outer-name test_site = request.function.__name__ if os.environ.get("pghoard_test_walreceiver"): active_backup_mode = "walreceiver" else: active_backup_mode = "pg_receivexlog" config = { "alert_file_dir": os.path.join(str(tmpdir), "alerts"), "backup_location": os.path.join(str(tmpdir), "backupspool"), "backup_sites": { test_site: { "active_backup_mode": active_backup_mode, "basebackup_count": 2, "basebackup_interval_hours": 24, "pg_bin_directory": db.pgbin, "pg_data_directory": db.pgdata, "pg_xlog_directory": os.path.join(db.pgdata, "pg_xlog"), "nodes": [db.user], "object_storage": { "storage_type": "local", "directory": os.path.join(str(tmpdir), "backups"), }, }, }, "http_address": "127.0.0.1", "http_port": random.randint(1024, 32000), "compression": { "algorithm": "snappy" if snappy else "lzma", } } confpath = os.path.join(str(tmpdir), "config.json") with open(confpath, "w") as fp: json.dump(config, fp) backup_site_path = os.path.join(config["backup_location"], test_site) basebackup_path = os.path.join(backup_site_path, "basebackup") backup_xlog_path = os.path.join(backup_site_path, "xlog") backup_timeline_path = os.path.join(backup_site_path, "timeline") os.makedirs(config["alert_file_dir"]) os.makedirs(basebackup_path) os.makedirs(backup_xlog_path) os.makedirs(backup_timeline_path) pgh = PGHoard(confpath) pgh.test_site = test_site pgh.start_threads_on_startup() if snappy: pgh.Compressor = snappy.StreamCompressor else: pgh.Compressor = lambda: lzma.LZMACompressor(preset=0) # pylint: disable=redefined-variable-type time.sleep(0.05) # Hack to give the server time to start up yield pgh pgh.quit()
def pghoard_base( db, tmpdir, request, compression="snappy", # pylint: disable=redefined-outer-name transfer_count=None, metrics_cfg=None): test_site = request.function.__name__ if os.environ.get("pghoard_test_walreceiver"): active_backup_mode = "walreceiver" else: active_backup_mode = "pg_receivexlog" if compression == "snappy" and not snappy: compression = "lzma" config = { "alert_file_dir": os.path.join(str(tmpdir), "alerts"), "backup_location": os.path.join(str(tmpdir), "backupspool"), "backup_sites": { test_site: { "active_backup_mode": active_backup_mode, "basebackup_count": 2, "basebackup_interval_hours": 24, "pg_bin_directory": db.pgbin, "pg_data_directory": db.pgdata, "nodes": [db.user], "object_storage": { "storage_type": "local", "directory": os.path.join(str(tmpdir), "backups"), }, }, }, "compression": { "algorithm": compression, }, "http_address": "127.0.0.1", "http_port": random.randint(1024, 32000), "json_state_file_path": tmpdir.join("pghoard_state.json").strpath, "maintenance_mode_file": tmpdir.join("maintenance_mode_file").strpath, # Set process count to 1 to avoid launching subprocesses during basebackup tests. # The new processes would be created with fork, which doesn't work properly due to # all the fds and other things that are created during typical test setup. There # is separate test case that executes the multiprocess version. "restore_process_count": 1, "tar_executable": "tar", } if metrics_cfg is not None: config.update(metrics_cfg) if transfer_count is not None: config["transfer"] = {"thread_count": transfer_count} confpath = os.path.join(str(tmpdir), "config.json") with open(confpath, "w") as fp: json.dump(config, fp) backup_site_path = os.path.join(config["backup_location"], test_site) basebackup_path = os.path.join(backup_site_path, "basebackup") backup_xlog_path = os.path.join(backup_site_path, "xlog") backup_timeline_path = os.path.join(backup_site_path, "timeline") os.makedirs(config["alert_file_dir"]) os.makedirs(basebackup_path) os.makedirs(backup_xlog_path) os.makedirs(backup_timeline_path) pgh = PGHoard(confpath) pgh.test_site = test_site pgh.start_threads_on_startup() if compression == "snappy": pgh.Compressor = snappy.StreamCompressor else: pgh.Compressor = lambda: lzma.LZMACompressor(preset=0) # pylint: disable=redefined-variable-type time.sleep(0.05) # Hack to give the server time to start up yield pgh pgh.quit()
def pghoard_base(db, tmpdir, request, compression="snappy", # pylint: disable=redefined-outer-name transfer_count=None, metrics_cfg=None): test_site = request.function.__name__ if os.environ.get("pghoard_test_walreceiver"): active_backup_mode = "walreceiver" else: active_backup_mode = "pg_receivexlog" if compression == "snappy" and not snappy: compression = "lzma" config = { "alert_file_dir": os.path.join(str(tmpdir), "alerts"), "backup_location": os.path.join(str(tmpdir), "backupspool"), "backup_sites": { test_site: { "active_backup_mode": active_backup_mode, "basebackup_count": 2, "basebackup_interval_hours": 24, "pg_bin_directory": db.pgbin, "pg_data_directory": db.pgdata, "nodes": [db.user], "object_storage": { "storage_type": "local", "directory": os.path.join(str(tmpdir), "backups"), }, }, }, "compression": { "algorithm": compression, }, "http_address": "127.0.0.1", "http_port": random.randint(1024, 32000), "json_state_file_path": tmpdir.join("pghoard_state.json").strpath, "maintenance_mode_file": tmpdir.join("maintenance_mode_file").strpath, # Set process count to 1 to avoid launching subprocesses during basebackup tests. # The new processes would be created with fork, which doesn't work properly due to # all the fds and other things that are created during typical test setup. There # is separate test case that executes the multiprocess version. "restore_process_count": 1, "tar_executable": "tar", } if metrics_cfg is not None: config.update(metrics_cfg) if transfer_count is not None: config["transfer"] = {"thread_count": transfer_count} confpath = os.path.join(str(tmpdir), "config.json") with open(confpath, "w") as fp: json.dump(config, fp) backup_site_path = os.path.join(config["backup_location"], test_site) basebackup_path = os.path.join(backup_site_path, "basebackup") backup_xlog_path = os.path.join(backup_site_path, "xlog") backup_timeline_path = os.path.join(backup_site_path, "timeline") os.makedirs(config["alert_file_dir"]) os.makedirs(basebackup_path) os.makedirs(backup_xlog_path) os.makedirs(backup_timeline_path) pgh = PGHoard(confpath) pgh.test_site = test_site pgh.start_threads_on_startup() if compression == "snappy": pgh.Compressor = snappy.StreamCompressor else: pgh.Compressor = lambda: lzma.LZMACompressor(preset=0) # pylint: disable=redefined-variable-type time.sleep(0.05) # Hack to give the server time to start up yield pgh pgh.quit()