Пример #1
0
    def setup_method(self, method):
        super().setup_method(method)
        self.config = self.config_template({
            "backup_sites": {
                self.test_site: {
                    "basebackup_count": 1,
                    "basebackup_interval_hours": 1,
                    "nodes": [
                        {
                            "host": "127.0.0.4",
                        },
                    ],
                },
            },
        })
        config_path = os.path.join(self.temp_dir, "pghoard.json")
        write_json_file(config_path, self.config)

        self.pghoard = PGHoard(config_path)
        # This is the "final storage location" when using "local" storage type
        self.local_storage_dir = os.path.join(
            self.config["backup_sites"][self.test_site]["object_storage"]
            ["directory"], self.test_site)

        self.real_check_pg_server_version = self.pghoard.check_pg_server_version
        self.pghoard.check_pg_server_version = Mock(return_value=90404)
        self.real_check_pg_versions_ok = self.pghoard.check_pg_versions_ok
        self.pghoard.check_pg_versions_ok = Mock(return_value=True)
Пример #2
0
    def setup_method(self, method):
        super().setup_method(method)
        self.config = self.config_template()
        self.config["backup_sites"][self.test_site].update({
            "basebackup_count":
            1,
            "basebackup_interval_hours":
            1,
            "nodes": [{
                "host": "127.0.0.4"
            }],
        })
        config_path = os.path.join(self.temp_dir, "pghoard.json")
        write_json_file(config_path, self.config)
        os.makedirs(self.config["alert_file_dir"], exist_ok=True)

        backup_site_path = os.path.join(self.config["backup_location"],
                                        self.test_site)
        self.compressed_xlog_path = os.path.join(backup_site_path, "xlog")
        os.makedirs(self.compressed_xlog_path)
        self.basebackup_path = os.path.join(backup_site_path, "basebackup")
        os.makedirs(self.basebackup_path)
        self.pghoard = PGHoard(config_path)
        self.real_check_pg_server_version = self.pghoard.check_pg_server_version
        self.pghoard.check_pg_server_version = Mock(return_value=90404)
        self.real_check_pg_versions_ok = self.pghoard.check_pg_versions_ok
        self.pghoard.check_pg_versions_ok = Mock(return_value=True)
Пример #3
0
def pghoard(db, tmpdir, request):  # pylint: disable=redefined-outer-name
    test_site = request.function.__name__

    if os.environ.get("pghoard_test_walreceiver"):
        active_backup_mode = "walreceiver"
    else:
        active_backup_mode = "pg_receivexlog"

    config = {
        "alert_file_dir": os.path.join(str(tmpdir), "alerts"),
        "backup_location": os.path.join(str(tmpdir), "backupspool"),
        "backup_sites": {
            test_site: {
                "active_backup_mode": active_backup_mode,
                "basebackup_count": 2,
                "basebackup_interval_hours": 24,
                "pg_bin_directory": db.pgbin,
                "pg_data_directory": db.pgdata,
                "nodes": [db.user],
                "object_storage": {
                    "storage_type": "local",
                    "directory": os.path.join(str(tmpdir), "backups"),
                },
            },
        },
        "compression": {
            "algorithm": "snappy" if snappy else "lzma",
        },
        "http_address": "127.0.0.1",
        "http_port": random.randint(1024, 32000),
        "json_state_file_path": tmpdir.join("pghoard_state.json").strpath,
        "maintenance_mode_file": tmpdir.join("maintenance_mode_file").strpath,
    }

    confpath = os.path.join(str(tmpdir), "config.json")
    with open(confpath, "w") as fp:
        json.dump(config, fp)

    backup_site_path = os.path.join(config["backup_location"], test_site)
    basebackup_path = os.path.join(backup_site_path, "basebackup")
    backup_xlog_path = os.path.join(backup_site_path, "xlog")
    backup_timeline_path = os.path.join(backup_site_path, "timeline")

    os.makedirs(config["alert_file_dir"])
    os.makedirs(basebackup_path)
    os.makedirs(backup_xlog_path)
    os.makedirs(backup_timeline_path)

    pgh = PGHoard(confpath)
    pgh.test_site = test_site
    pgh.start_threads_on_startup()
    if snappy:
        pgh.Compressor = snappy.StreamCompressor
    else:
        pgh.Compressor = lambda: lzma.LZMACompressor(preset=0)  # pylint: disable=redefined-variable-type

    time.sleep(0.05)  # Hack to give the server time to start up
    yield pgh
    pgh.quit()
Пример #4
0
def pghoard_base(
        db,
        tmpdir,
        request,
        compression="snappy",  # pylint: disable=redefined-outer-name
        transfer_count=None,
        metrics_cfg=None):
    test_site = request.function.__name__

    if os.environ.get("pghoard_test_walreceiver"):
        active_backup_mode = "walreceiver"
    else:
        active_backup_mode = "pg_receivexlog"

    if compression == "snappy" and not snappy:
        compression = "lzma"

    config = {
        "alert_file_dir": os.path.join(str(tmpdir), "alerts"),
        "backup_location": os.path.join(str(tmpdir), "backupspool"),
        "backup_sites": {
            test_site: {
                "active_backup_mode": active_backup_mode,
                "basebackup_count": 2,
                "basebackup_interval_hours": 24,
                "pg_bin_directory": db.pgbin,
                "pg_data_directory": db.pgdata,
                "nodes": [db.user],
                "object_storage": {
                    "storage_type": "local",
                    "directory": os.path.join(str(tmpdir), "backups"),
                },
            },
        },
        "compression": {
            "algorithm": compression,
        },
        "http_address": "127.0.0.1",
        "http_port": random.randint(1024, 32000),
        "json_state_file_path": tmpdir.join("pghoard_state.json").strpath,
        "maintenance_mode_file": tmpdir.join("maintenance_mode_file").strpath,
        # Set process count to 1 to avoid launching subprocesses during basebackup tests.
        # The new processes would be created with fork, which doesn't work properly due to
        # all the fds and other things that are created during typical test setup. There
        # is separate test case that executes the multiprocess version.
        "restore_process_count": 1,
        "tar_executable": "tar",
    }

    if metrics_cfg is not None:
        config.update(metrics_cfg)

    if transfer_count is not None:
        config["transfer"] = {"thread_count": transfer_count}

    confpath = os.path.join(str(tmpdir), "config.json")
    with open(confpath, "w") as fp:
        json.dump(config, fp)

    backup_site_path = os.path.join(config["backup_location"], test_site)
    basebackup_path = os.path.join(backup_site_path, "basebackup")
    backup_xlog_path = os.path.join(backup_site_path, "xlog")
    backup_timeline_path = os.path.join(backup_site_path, "timeline")

    os.makedirs(config["alert_file_dir"])
    os.makedirs(basebackup_path)
    os.makedirs(backup_xlog_path)
    os.makedirs(backup_timeline_path)

    pgh = PGHoard(confpath)
    pgh.test_site = test_site
    pgh.start_threads_on_startup()
    if compression == "snappy":
        pgh.Compressor = snappy.StreamCompressor
    else:
        pgh.Compressor = lambda: lzma.LZMACompressor(preset=0)  # pylint: disable=redefined-variable-type

    time.sleep(0.05)  # Hack to give the server time to start up
    yield pgh
    pgh.quit()