Example #1
0
def create_pgpass_file(connection_string_or_info):
    """Look up password from the given object which can be a dict or a
    string and write a possible password in a pgpass file;
    returns a connection_string without a password in it"""
    info = pgutil.get_connection_info(connection_string_or_info)
    if "password" not in info:
        return pgutil.create_connection_string(info)
    linekey = "{host}:{port}:{dbname}:{user}:".format(
        host=info.get("host", "localhost"),
        port=info.get("port", 5432),
        user=info.get("user", ""),
        dbname=info.get("dbname", "*"))
    pwline = "{linekey}{password}".format(linekey=linekey, password=info.pop("password"))
    pgpass_path = os.path.join(os.environ.get("HOME"), ".pgpass")
    if os.path.exists(pgpass_path):
        with open(pgpass_path, "r") as fp:
            pgpass_lines = fp.read().splitlines()
    else:
        pgpass_lines = []
    if pwline in pgpass_lines:
        LOG.debug("Not adding authentication data to: %s since it's already there", pgpass_path)
    else:
        # filter out any existing lines with our linekey and add the new line
        pgpass_lines = [line for line in pgpass_lines if not line.startswith(linekey)] + [pwline]
        content = "\n".join(pgpass_lines) + "\n"
        with open(pgpass_path, "w") as fp:
            os.fchmod(fp.fileno(), 0o600)
            fp.write(content)
        LOG.debug("Wrote %r to %r", pwline, pgpass_path)
    return pgutil.create_connection_string(info)
Example #2
0
def create_pgpass_file(connection_string_or_info):
    """Look up password from the given object which can be a dict or a
    string and write a possible password in a pgpass file;
    returns a connection_string without a password in it"""
    info = pgutil.get_connection_info(connection_string_or_info)
    if "password" not in info:
        return pgutil.create_connection_string(info)
    linekey = "{host}:{port}:{dbname}:{user}:".format(
        host=info.get("host", "localhost"),
        port=info.get("port", 5432),
        user=info.get("user", ""),
        dbname=info.get("dbname", "*"))
    pwline = "{linekey}{password}".format(linekey=linekey,
                                          password=info.pop("password"))
    pgpass_path = os.path.join(os.environ.get("HOME"), ".pgpass")
    if os.path.exists(pgpass_path):
        with open(pgpass_path, "r") as fp:
            pgpass_lines = fp.read().splitlines()
    else:
        pgpass_lines = []
    if pwline in pgpass_lines:
        LOG.debug(
            "Not adding authentication data to: %s since it's already there",
            pgpass_path)
    else:
        # filter out any existing lines with our linekey and add the new line
        pgpass_lines = [
            line for line in pgpass_lines if not line.startswith(linekey)
        ] + [pwline]
        content = "\n".join(pgpass_lines) + "\n"
        with open(pgpass_path, "w") as fp:
            os.fchmod(fp.fileno(), 0o600)
            fp.write(content)
        LOG.debug("Wrote %r to %r", pwline, pgpass_path)
    return pgutil.create_connection_string(info)
Example #3
0
    def test_auth_alert_files(self, db, pghoard):
        def clean_alert_files():
            for f in os.listdir(pghoard.config["alert_file_dir"]):
                os.unlink(os.path.join(pghoard.config["alert_file_dir"], f))

        # connecting using the proper user should work and not yield any alerts
        clean_alert_files()
        conn_str = create_connection_string(db.user)
        assert pghoard.check_pg_server_version(conn_str) is not None
        assert os.listdir(pghoard.config["alert_file_dir"]) == []

        # nonexistent user should yield a configuration error
        clean_alert_files()
        conn_str = create_connection_string(dict(db.user, user="******"))
        assert pghoard.check_pg_server_version(conn_str) is None
        assert os.listdir(pghoard.config["alert_file_dir"]) == ["configuration_error"]

        # so should the disabled user
        clean_alert_files()
        conn_str = create_connection_string(dict(db.user, user="******"))
        assert pghoard.check_pg_server_version(conn_str) is None
        assert os.listdir(pghoard.config["alert_file_dir"]) == ["configuration_error"]

        # existing user with an invalid password should cause an authentication error
        clean_alert_files()
        conn_str = create_connection_string(dict(db.user, user="******"))
        assert pghoard.check_pg_server_version(conn_str) is None
        assert os.listdir(pghoard.config["alert_file_dir"]) == ["authentication_error"]
Example #4
0
    def test_auth_alert_files(self, db, pghoard):
        def clean_alert_files():
            for f in os.listdir(pghoard.config["alert_file_dir"]):
                os.unlink(os.path.join(pghoard.config["alert_file_dir"], f))

        # connecting using the proper user should work and not yield any alerts
        clean_alert_files()
        conn_str = create_connection_string(db.user)
        assert pghoard.check_pg_server_version(conn_str, pghoard.test_site) is not None
        assert os.listdir(pghoard.config["alert_file_dir"]) == []

        # nonexistent user should yield a configuration error
        # Make sure we're not caching the pg_version
        del pghoard.config["backup_sites"][pghoard.test_site]["pg_version"]
        clean_alert_files()
        conn_str = create_connection_string(dict(db.user, user="******"))
        assert pghoard.check_pg_server_version(conn_str, pghoard.test_site) is None
        assert os.listdir(pghoard.config["alert_file_dir"]) == ["configuration_error"]

        # so should the disabled user
        clean_alert_files()
        conn_str = create_connection_string(dict(db.user, user="******"))
        assert pghoard.check_pg_server_version(conn_str, pghoard.test_site) is None
        assert os.listdir(pghoard.config["alert_file_dir"]) == ["configuration_error"]

        # existing user with an invalid password should cause an authentication error
        clean_alert_files()
        conn_str = create_connection_string(dict(db.user, user="******"))
        assert pghoard.check_pg_server_version(conn_str, pghoard.test_site) is None
        assert os.listdir(pghoard.config["alert_file_dir"]) == ["authentication_error"]
Example #5
0
def test_connection_info():
    url = "postgres://*****:*****@dbhost.local:5555/abc?replication=true&sslmode=foobar&sslmode=require"
    cs = "host=dbhost.local user='******'   dbname='abc'\n" \
         "replication=true   password=secret sslmode=require port=5555"
    ci = {
        "host": "dbhost.local",
        "port": "5555",
        "user": "******",
        "password": "******",
        "dbname": "abc",
        "replication": "true",
        "sslmode": "require",
    }
    assert get_connection_info(ci) == get_connection_info(cs)
    assert get_connection_info(ci) == get_connection_info(url)

    basic_cstr = "host=localhost user=os"
    assert create_connection_string(get_connection_info(basic_cstr)) == "host='localhost' user='******'"

    assert get_connection_info("foo=bar bar='\\'x'") == {"foo": "bar", "bar": "'x"}

    with raises(ValueError):
        get_connection_info("foo=bar x")
    with raises(ValueError):
        get_connection_info("foo=bar bar='x")
Example #6
0
def recovery_db():
    with setup_pg() as pg:
        # Make sure pgespresso extension is installed before we turn this into a standby
        conn_str = pgutil.create_connection_string(pg.user)
        conn = psycopg2.connect(conn_str)
        cursor = conn.cursor()
        cursor.execute("SELECT 1 FROM pg_available_extensions WHERE name = 'pgespresso' AND default_version >= '1.2'")
        if cursor.fetchone():
            cursor.execute("CREATE EXTENSION pgespresso")
        conn.commit()
        conn.close()
        # Now perform a clean shutdown and restart in recovery
        pg.kill(force=False, immediate=False)

        recovery_conf = [
            "recovery_target_timeline = 'latest'",
            "restore_command = 'false'",
        ]
        if LooseVersion(pg.ver) >= "12":
            with open(os.path.join(pg.pgdata, "standby.signal"), "w") as fp:
                pass

            recovery_conf_path = "postgresql.auto.conf"
            open_mode = "a"  # As it might exist already in some cases
        else:
            recovery_conf.append("standby_mode = 'on'")
            recovery_conf_path = "recovery.conf"
            open_mode = "w"

        with open(os.path.join(pg.pgdata, recovery_conf_path), open_mode) as fp:
            fp.write("\n".join(recovery_conf) + "\n")
        pg.run_pg()
        yield pg
Example #7
0
 def _switch_wal(self, db, count):
     conn = psycopg2.connect(create_connection_string(db.user))
     conn.autocommit = True
     cursor = conn.cursor()
     if conn.server_version >= 100000:
         cursor.execute("SELECT pg_walfile_name(pg_current_wal_lsn())")
     else:
         cursor.execute(
             "SELECT pg_xlogfile_name(pg_current_xlog_location())")
     start_wal = cursor.fetchone()[0]
     cursor.execute("CREATE TABLE IF NOT EXISTS testint (i INT)")
     for n in range(count):
         cursor.execute("INSERT INTO testint (i) VALUES (%s)", [n])
         if conn.server_version >= 100000:
             cursor.execute("SELECT pg_switch_wal()")
         else:
             cursor.execute("SELECT pg_switch_xlog()")
     if conn.server_version >= 100000:
         cursor.execute("SELECT pg_walfile_name(pg_current_wal_lsn())")
     else:
         cursor.execute(
             "SELECT pg_xlogfile_name(pg_current_xlog_location())")
     end_wal = cursor.fetchone()[0]
     conn.close()
     return start_wal, end_wal
Example #8
0
    def test_pause_on_disk_full(self, db, pghoard_separate_volume):
        pghoard = pghoard_separate_volume
        conn_str = create_connection_string(db.user)
        conn = psycopg2.connect(conn_str)
        conn.autocommit = True
        cursor = conn.cursor()

        wal_directory = os.path.join(pghoard.config["backup_location"],
                                     pghoard.test_site, "xlog_incoming")
        os.makedirs(wal_directory, exist_ok=True)

        pghoard.receivexlog_listener(pghoard.test_site, db.user, wal_directory)
        # Create 20 new WAL segments in very quick succession. Our volume for incoming WALs is only 100
        # MiB so if logic for automatically suspending pg_receive(xlog|wal) wasn't working the volume
        # would certainly fill up and the files couldn't be processed. Now this should work fine.
        for _ in range(16):
            if conn.server_version >= 100000:
                cursor.execute("SELECT txid_current(), pg_switch_wal()")
            else:
                cursor.execute("SELECT txid_current(), pg_switch_xlog()")

        start = time.monotonic()
        site = "test_pause_on_disk_full"
        while True:
            xlogs = pghoard.transfer_agents[0].state[site]["upload"]["xlog"][
                "xlogs_since_basebackup"]
            if xlogs >= 15:
                break
            elif time.monotonic() - start > 15:
                assert False, "Expected at least 15 xlog uploads, got {}".format(
                    xlogs)
            time.sleep(0.1)
Example #9
0
def test_connection_info():
    url = "postgres://*****:*****@dbhost.local:5555/abc?replication=true&sslmode=foobar&sslmode=require"
    cs = "host=dbhost.local user='******'   dbname='abc'\n" \
         "replication=true   password=secret sslmode=require port=5555"
    ci = {
        "host": "dbhost.local",
        "port": "5555",
        "user": "******",
        "password": "******",
        "dbname": "abc",
        "replication": "true",
        "sslmode": "require",
    }
    assert get_connection_info(ci) == get_connection_info(cs)
    assert get_connection_info(ci) == get_connection_info(url)

    basic_cstr = "host=localhost user=os"
    assert create_connection_string(
        get_connection_info(basic_cstr)) == "host='localhost' user='******'"

    assert get_connection_info("foo=bar bar='\\'x'") == {
        "foo": "bar",
        "bar": "'x"
    }

    with raises(ValueError):
        get_connection_info("foo=bar x")
    with raises(ValueError):
        get_connection_info("foo=bar bar='x")
Example #10
0
 def test_basebackups_replica_local_tar_pgespresso(self, capsys, recovery_db, pghoard, tmpdir):
     conn_str = pgutil.create_connection_string(recovery_db.user)
     with psycopg2.connect(conn_str) as conn:
         conn.autocommit = True
         cursor = conn.cursor()
         cursor.execute("SELECT 1 FROM pg_available_extensions WHERE name = 'pgespresso' AND default_version >= '1.2'")
         if not cursor.fetchone():
             pytest.skip("pgespresso not available")
     self._test_basebackups(capsys, recovery_db, pghoard, tmpdir, "local-tar", replica=True)
Example #11
0
 def test_basebackups_replica_local_tar_pgespresso(self, capsys, recovery_db, pghoard, tmpdir):
     conn_str = pgutil.create_connection_string(recovery_db.user)
     with psycopg2.connect(conn_str) as conn:
         conn.autocommit = True
         cursor = conn.cursor()
         cursor.execute("SELECT 1 FROM pg_available_extensions WHERE name = 'pgespresso' AND default_version >= '1.2'")
         if not cursor.fetchone():
             pytest.skip("pgespresso not available")
     self._test_basebackups(capsys, recovery_db, pghoard, tmpdir, "local-tar", replica=True)
Example #12
0
 def test_basebackups_local_tar_pgespresso(self, capsys, db, pghoard, tmpdir):
     conn_str = pgutil.create_connection_string(db.user)
     with psycopg2.connect(conn_str) as conn:
         conn.autocommit = True
         cursor = conn.cursor()
         cursor.execute("SELECT 1 FROM pg_available_extensions WHERE name = 'pgespresso' AND default_version >= '1.2'")
         if not cursor.fetchone():
             pytest.skip("pgespresso not available")
         try:
             cursor.execute("CREATE EXTENSION pgespresso")
             self._test_basebackups(capsys, db, pghoard, tmpdir, BaseBackupMode.local_tar)
         finally:
             cursor.execute("DROP EXTENSION pgespresso")
Example #13
0
 def _switch_xlog(self, db, count):
     conn = psycopg2.connect(create_connection_string(db.user))
     conn.autocommit = True
     cursor = conn.cursor()
     cursor.execute("SELECT pg_xlogfile_name(pg_current_xlog_location())")
     start_xlog = cursor.fetchone()[0]
     cursor.execute("CREATE TABLE IF NOT EXISTS testint (i INT)")
     for n in range(count):
         cursor.execute("INSERT INTO testint (i) VALUES (%s)", [n])
         cursor.execute("SELECT pg_switch_xlog()")
     cursor.execute("SELECT pg_xlogfile_name(pg_current_xlog_location())")
     end_xlog = cursor.fetchone()[0]
     conn.close()
     return start_xlog, end_xlog
Example #14
0
 def test_basebackups_local_tar_exclusive_conflict(self, capsys, db, pghoard, tmpdir):
     if db.pgver >= "9.6":
         pytest.skip("PostgreSQL < 9.6 required for exclusive backup tests")
     conn_str = pgutil.create_connection_string(db.user)
     need_stop = False
     try:
         with psycopg2.connect(conn_str) as conn:
             conn.autocommit = True
             cursor = conn.cursor()
             cursor.execute("SELECT pg_start_backup('conflicting')")  # pylint: disable=used-before-assignment
             need_stop = True
         self._test_basebackups(capsys, db, pghoard, tmpdir, "local-tar")
         need_stop = False
     finally:
         if need_stop:
             with psycopg2.connect(conn_str) as conn:
                 conn.autocommit = True
                 cursor = conn.cursor()
                 cursor.execute("SELECT pg_stop_backup()")
Example #15
0
 def test_basebackups_local_tar_exclusive_conflict(self, capsys, db, pghoard, tmpdir):
     if db.pgver >= "9.6":
         pytest.skip("PostgreSQL < 9.6 required for exclusive backup tests")
     conn_str = pgutil.create_connection_string(db.user)
     need_stop = False
     try:
         with psycopg2.connect(conn_str) as conn:
             conn.autocommit = True
             cursor = conn.cursor()
             cursor.execute("SELECT pg_start_backup('conflicting')")  # pylint: disable=used-before-assignment
             need_stop = True
         self._test_basebackups(capsys, db, pghoard, tmpdir, "local-tar")
         need_stop = False
     finally:
         if need_stop:
             with psycopg2.connect(conn_str) as conn:
                 conn.autocommit = True
                 cursor = conn.cursor()
                 cursor.execute("SELECT pg_stop_backup()")
Example #16
0
def recovery_db():
    with setup_pg() as pg:
        # Make sure pgespresso extension is installed before we turn this into a standby
        conn_str = pgutil.create_connection_string(pg.user)
        conn = psycopg2.connect(conn_str)
        cursor = conn.cursor()
        cursor.execute("SELECT 1 FROM pg_available_extensions WHERE name = 'pgespresso' AND default_version >= '1.2'")
        if cursor.fetchone():
            cursor.execute("CREATE EXTENSION pgespresso")
        conn.commit()
        conn.close()
        # Now perform a clean shutdown and restart in recovery
        pg.kill(force=False, immediate=False)
        with open(os.path.join(pg.pgdata, "recovery.conf"), "w") as fp:
            fp.write(
                "standby_mode = 'on'\n"
                "recovery_target_timeline = 'latest'\n"
                "restore_command = 'false'\n"
            )
        pg.run_pg()
        yield pg
Example #17
0
def recovery_db():
    with setup_pg() as pg:
        # Make sure pgespresso extension is installed before we turn this into a standby
        conn_str = pgutil.create_connection_string(pg.user)
        conn = psycopg2.connect(conn_str)
        cursor = conn.cursor()
        cursor.execute(
            "SELECT 1 FROM pg_available_extensions WHERE name = 'pgespresso' AND default_version >= '1.2'"
        )
        if cursor.fetchone():
            cursor.execute("CREATE EXTENSION pgespresso")
        conn.commit()
        conn.close()
        # Now perform a clean shutdown and restart in recovery
        pg.kill(force=False, immediate=False)
        with open(os.path.join(pg.pgdata, "recovery.conf"), "w") as fp:
            fp.write("standby_mode = 'on'\n"
                     "recovery_target_timeline = 'latest'\n"
                     "restore_command = 'false'\n")
        pg.run_pg()
        yield pg
Example #18
0
    def test_basebackups_tablespaces(self, capsys, db, pghoard, tmpdir):
        # Create a test tablespace for this instance, but make sure we drop it at the end of the test as the
        # database we use is shared by all test cases, and tablespaces are a global concept so the test
        # tablespace could interfere with other tests
        tspath = tmpdir.join("extra-ts").strpath
        os.makedirs(tspath)
        conn_str = pgutil.create_connection_string(db.user)
        conn = psycopg2.connect(conn_str)
        conn.autocommit = True
        cursor = conn.cursor()
        cursor.execute("CREATE TABLESPACE tstest LOCATION %s", [tspath])
        r_db, r_conn = None, None
        try:
            cursor.execute(
                "CREATE TABLE tstest (id BIGSERIAL PRIMARY KEY, value BIGINT) TABLESPACE tstest"
            )
            cursor.execute(
                "INSERT INTO tstest (value) SELECT * FROM generate_series(1, 1000)"
            )
            cursor.execute("CHECKPOINT")
            cursor.execute(
                "SELECT oid, pg_tablespace_location(oid) FROM pg_tablespace WHERE spcname = 'tstest'"
            )
            res = cursor.fetchone()
            assert res[1] == tspath

            # Start receivexlog since we want the WALs to be able to restore later on
            wal_directory = os.path.join(pghoard.config["backup_location"],
                                         pghoard.test_site, "xlog_incoming")
            makedirs(wal_directory, exist_ok=True)
            pghoard.receivexlog_listener(pghoard.test_site, db.user,
                                         wal_directory)
            if conn.server_version >= 100000:
                cursor.execute("SELECT txid_current(), pg_switch_wal()")
            else:
                cursor.execute("SELECT txid_current(), pg_switch_xlog()")

            self._test_create_basebackup(capsys, db, pghoard, "local-tar")

            if conn.server_version >= 100000:
                cursor.execute("SELECT txid_current(), pg_switch_wal()")
                cursor.execute("SELECT txid_current(), pg_switch_wal()")
            else:
                cursor.execute("SELECT txid_current(), pg_switch_xlog()")
                cursor.execute("SELECT txid_current(), pg_switch_xlog()")

            backup_out = tmpdir.join("test-restore").strpath
            backup_ts_out = tmpdir.join("test-restore-tstest").strpath

            # Tablespaces are extracted to their previous absolute paths by default, but the path must be empty
            # and it isn't as it's still used by the running PG
            with pytest.raises(RestoreError) as excinfo:
                Restore().run([
                    "get-basebackup",
                    "--config",
                    pghoard.config_path,
                    "--site",
                    pghoard.test_site,
                    "--target-dir",
                    backup_out,
                ])
            assert "Tablespace 'tstest' target directory" in str(excinfo.value)
            assert "not empty" in str(excinfo.value)
            # We can't restore tablespaces to non-existent directories either
            with pytest.raises(RestoreError) as excinfo:
                Restore().run([
                    "get-basebackup",
                    "--config",
                    pghoard.config_path,
                    "--site",
                    pghoard.test_site,
                    "--target-dir",
                    backup_out,
                    "--tablespace-dir",
                    "tstest={}".format(backup_ts_out),
                ])
            assert "Tablespace 'tstest' target directory" in str(excinfo.value)
            assert "does not exist" in str(excinfo.value)
            os.makedirs(backup_ts_out)
            # We can't restore if the directory isn't writable
            os.chmod(backup_ts_out, 0o500)
            with pytest.raises(RestoreError) as excinfo:
                Restore().run([
                    "get-basebackup",
                    "--config",
                    pghoard.config_path,
                    "--site",
                    pghoard.test_site,
                    "--target-dir",
                    backup_out,
                    "--tablespace-dir",
                    "tstest={}".format(backup_ts_out),
                ])
            assert "Tablespace 'tstest' target directory" in str(excinfo.value)
            assert "empty, but not writable" in str(excinfo.value)
            os.chmod(backup_ts_out, 0o700)
            # We can't proceed if we request mappings for non-existent tablespaces
            backup_other_out = tmpdir.join("test-restore-other").strpath
            os.makedirs(backup_other_out)
            with pytest.raises(RestoreError) as excinfo:
                Restore().run([
                    "get-basebackup",
                    "--config",
                    pghoard.config_path,
                    "--site",
                    pghoard.test_site,
                    "--target-dir",
                    backup_out,
                    "--tablespace-dir",
                    "tstest={}".format(backup_ts_out),
                    "--tablespace-dir",
                    "other={}".format(backup_other_out),
                ])
            assert "Tablespace mapping for ['other'] was requested, but" in str(
                excinfo.value)

            # Now, finally, everything should be valid and we can proceed with restore
            Restore().run([
                "get-basebackup",
                "--config",
                pghoard.config_path,
                "--site",
                pghoard.test_site,
                "--restore-to-master",
                "--target-dir",
                backup_out,
                "--tablespace-dir",
                "tstest={}".format(backup_ts_out),
            ])

            # Adjust the generated recovery.conf to point pghoard_postgres_command to our instance
            new_py_restore_cmd = "PYTHONPATH={} python3 -m pghoard.postgres_command --mode restore".format(
                os.path.dirname(os.path.dirname(__file__)))
            new_go_restore_cmd = "{}/pghoard_postgres_command_go --mode restore".format(
                os.path.dirname(os.path.dirname(__file__)))
            with open(os.path.join(backup_out, "recovery.conf"), "r+") as fp:
                rconf = fp.read()
                rconf = rconf.replace(
                    "pghoard_postgres_command_go --mode restore",
                    new_go_restore_cmd)
                rconf = rconf.replace(
                    "pghoard_postgres_command --mode restore",
                    new_py_restore_cmd)
                fp.seek(0)
                fp.write(rconf)

            r_db = PGTester(backup_out)
            r_db.user = dict(db.user, host=backup_out)
            r_db.run_pg()
            r_conn_str = pgutil.create_connection_string(r_db.user)

            # Wait for PG to start up
            start_time = time.monotonic()
            while True:
                try:
                    r_conn = psycopg2.connect(r_conn_str)
                    break
                except psycopg2.OperationalError as ex:
                    if "starting up" in str(ex):
                        assert time.monotonic() - start_time <= 10
                        time.sleep(1)
                    else:
                        raise

            r_cursor = r_conn.cursor()
            # Make sure the tablespace is defined and points to the right (new) path
            r_cursor.execute(
                "SELECT oid, pg_tablespace_location(oid) FROM pg_tablespace WHERE spcname = 'tstest'"
            )
            r_res = r_cursor.fetchone()
            assert r_res[1] == backup_ts_out

            # We should be able to read from the table in the tablespace and the values should match what we stored before
            r_cursor.execute("SELECT id FROM tstest")
            r_res = r_cursor.fetchall()
            cursor.execute("SELECT id FROM tstest")
            orig_res = cursor.fetchall()
            assert r_res == orig_res

        finally:
            if r_conn:
                r_conn.close()
            if r_db:
                r_db.kill(force=True)
            cursor.execute("DROP TABLE IF EXISTS tstest")
            cursor.execute("DROP TABLESPACE tstest")
            conn.close()
Example #19
0
    def test_basebackups_tablespaces(self, capsys, db, pghoard, tmpdir):
        # Create a test tablespace for this instance, but make sure we drop it at the end of the test as the
        # database we use is shared by all test cases, and tablespaces are a global concept so the test
        # tablespace could interfere with other tests
        tspath = tmpdir.join("extra-ts").strpath
        os.makedirs(tspath)
        conn_str = pgutil.create_connection_string(db.user)
        conn = psycopg2.connect(conn_str)
        conn.autocommit = True
        cursor = conn.cursor()
        cursor.execute("CREATE TABLESPACE tstest LOCATION %s", [tspath])
        r_db, r_conn = None, None
        try:
            cursor.execute("CREATE TABLE tstest (id BIGSERIAL PRIMARY KEY) TABLESPACE tstest")
            cursor.execute("INSERT INTO tstest (id) VALUES (default)")
            cursor.execute("SELECT oid, pg_tablespace_location(oid) FROM pg_tablespace WHERE spcname = 'tstest'")
            res = cursor.fetchone()
            assert res[1] == tspath

            # Start receivexlog since we want the WALs to be able to restore later on
            xlog_directory = os.path.join(pghoard.config["backup_location"], pghoard.test_site, "xlog_incoming")
            makedirs(xlog_directory, exist_ok=True)
            pghoard.receivexlog_listener(pghoard.test_site, db.user, xlog_directory)
            cursor.execute("SELECT txid_current(), pg_switch_xlog()")
            self._test_create_basebackup(capsys, db, pghoard, "local-tar")
            cursor.execute("SELECT txid_current(), pg_switch_xlog()")
            cursor.execute("SELECT txid_current(), pg_switch_xlog()")

            backup_out = tmpdir.join("test-restore").strpath
            backup_ts_out = tmpdir.join("test-restore-tstest").strpath

            # Tablespaces are extracted to their previous absolute paths by default, but the path must be empty
            # and it isn't as it's still used by the running PG
            with pytest.raises(RestoreError) as excinfo:
                Restore().run([
                    "get-basebackup",
                    "--config", pghoard.config_path,
                    "--site", pghoard.test_site,
                    "--target-dir", backup_out,
                ])
            assert "Tablespace 'tstest' target directory" in str(excinfo.value)
            assert "not empty" in str(excinfo.value)
            # We can't restore tablespaces to non-existent directories either
            with pytest.raises(RestoreError) as excinfo:
                Restore().run([
                    "get-basebackup",
                    "--config", pghoard.config_path,
                    "--site", pghoard.test_site,
                    "--target-dir", backup_out,
                    "--tablespace-dir", "tstest={}".format(backup_ts_out),
                ])
            assert "Tablespace 'tstest' target directory" in str(excinfo.value)
            assert "does not exist" in str(excinfo.value)
            os.makedirs(backup_ts_out)
            # We can't restore if the directory isn't writable
            os.chmod(backup_ts_out, 0o500)
            with pytest.raises(RestoreError) as excinfo:
                Restore().run([
                    "get-basebackup",
                    "--config", pghoard.config_path,
                    "--site", pghoard.test_site,
                    "--target-dir", backup_out,
                    "--tablespace-dir", "tstest={}".format(backup_ts_out),
                ])
            assert "Tablespace 'tstest' target directory" in str(excinfo.value)
            assert "empty, but not writable" in str(excinfo.value)
            os.chmod(backup_ts_out, 0o700)
            # We can't proceed if we request mappings for non-existent tablespaces
            backup_other_out = tmpdir.join("test-restore-other").strpath
            os.makedirs(backup_other_out)
            with pytest.raises(RestoreError) as excinfo:
                Restore().run([
                    "get-basebackup",
                    "--config", pghoard.config_path,
                    "--site", pghoard.test_site,
                    "--target-dir", backup_out,
                    "--tablespace-dir", "tstest={}".format(backup_ts_out),
                    "--tablespace-dir", "other={}".format(backup_other_out),
                ])
            assert "Tablespace mapping for ['other'] was requested, but" in str(excinfo.value)

            # Now, finally, everything should be valid and we can proceed with restore
            Restore().run([
                "get-basebackup",
                "--config", pghoard.config_path,
                "--site", pghoard.test_site,
                "--restore-to-master",
                "--target-dir", backup_out,
                "--tablespace-dir", "tstest={}".format(backup_ts_out),
            ])

            # Adjust the generated recovery.conf to point pghoard_postgres_command to our instance
            new_cmd = "PYTHONPATH={} python3 -m pghoard.postgres_command".format(os.path.dirname(os.path.dirname(__file__)))
            with open(os.path.join(backup_out, "recovery.conf"), "r+") as fp:
                rconf = fp.read()
                rconf = rconf.replace("pghoard_postgres_command", new_cmd)
                fp.seek(0)
                fp.write(rconf)

            r_db = TestPG(backup_out)
            r_db.user = dict(db.user, host=backup_out)
            r_db.run_pg()
            r_conn_str = pgutil.create_connection_string(r_db.user)

            # Wait for PG to start up
            start_time = time.monotonic()
            while True:
                try:
                    r_conn = psycopg2.connect(r_conn_str)
                    break
                except psycopg2.OperationalError as ex:
                    if "starting up" in str(ex):
                        assert time.monotonic() - start_time <= 10
                        time.sleep(1)
                    else:
                        raise

            r_cursor = r_conn.cursor()
            # Make sure the tablespace is defined and points to the right (new) path
            r_cursor.execute("SELECT oid, pg_tablespace_location(oid) FROM pg_tablespace WHERE spcname = 'tstest'")
            r_res = r_cursor.fetchone()
            assert r_res[1] == backup_ts_out

            # We should be able to read from the table in the tablespace and the values should match what we stored before
            r_cursor.execute("SELECT id FROM tstest")
            r_res = r_cursor.fetchall()
            cursor.execute("SELECT id FROM tstest")
            orig_res = cursor.fetchall()
            assert r_res == orig_res

        finally:
            if r_conn:
                r_conn.close()
            if r_db:
                r_db.kill(force=True)
            cursor.execute("DROP TABLE IF EXISTS tstest")
            cursor.execute("DROP TABLESPACE tstest")
            conn.close()