예제 #1
0
def test_pageserver_http_api_client_auth_enabled(zenith_env_builder: ZenithEnvBuilder):
    zenith_env_builder.pageserver_auth_enabled = True
    env = zenith_env_builder.init_start()

    management_token = env.auth_keys.generate_management_token()

    client = env.pageserver.http_client(auth_token=management_token)
    check_client(client, env.initial_tenant)
예제 #2
0
def test_compute_auth_to_pageserver(zenith_env_builder: ZenithEnvBuilder, with_safekeepers: bool):
    zenith_env_builder.pageserver_auth_enabled = True
    if with_safekeepers:
        zenith_env_builder.num_safekeepers = 3
    env = zenith_env_builder.init_start()

    branch = f'test_compute_auth_to_pageserver{with_safekeepers}'
    env.zenith_cli.create_branch(branch)
    pg = env.postgres.create_start(branch)

    with closing(pg.connect()) as conn:
        with conn.cursor() as cur:
            # we rely upon autocommit after each statement
            # as waiting for acceptors happens there
            cur.execute('CREATE TABLE t(key int primary key, value text)')
            cur.execute("INSERT INTO t SELECT generate_series(1,100000), 'payload'")
            cur.execute('SELECT sum(key) FROM t')
            assert cur.fetchone() == (5000050000, )
예제 #3
0
def test_pageserver_auth(zenith_env_builder: ZenithEnvBuilder):
    zenith_env_builder.pageserver_auth_enabled = True
    env = zenith_env_builder.init_start()

    ps = env.pageserver

    tenant_token = env.auth_keys.generate_tenant_token(env.initial_tenant.hex)
    tenant_http_client = env.pageserver.http_client(tenant_token)
    invalid_tenant_token = env.auth_keys.generate_tenant_token(uuid4().hex)
    invalid_tenant_http_client = env.pageserver.http_client(invalid_tenant_token)

    management_token = env.auth_keys.generate_management_token()
    management_http_client = env.pageserver.http_client(management_token)

    # this does not invoke auth check and only decodes jwt and checks it for validity
    # check both tokens
    ps.safe_psql("set FOO", password=tenant_token)
    ps.safe_psql("set FOO", password=management_token)

    new_timeline_id = env.zenith_cli.create_branch('test_pageserver_auth',
                                                   tenant_id=env.initial_tenant)

    # tenant can create branches
    tenant_http_client.timeline_create(tenant_id=env.initial_tenant,
                                       ancestor_timeline_id=new_timeline_id)
    # console can create branches for tenant
    management_http_client.timeline_create(tenant_id=env.initial_tenant,
                                           ancestor_timeline_id=new_timeline_id)

    # fail to create branch using token with different tenant_id
    with pytest.raises(ZenithPageserverApiException,
                       match='Forbidden: Tenant id mismatch. Permission denied'):
        invalid_tenant_http_client.timeline_create(tenant_id=env.initial_tenant,
                                                   ancestor_timeline_id=new_timeline_id)

    # create tenant using management token
    management_http_client.tenant_create()

    # fail to create tenant using tenant token
    with pytest.raises(
            ZenithPageserverApiException,
            match='Forbidden: Attempt to access management api with tenant scope. Permission denied'
    ):
        tenant_http_client.tenant_create()
예제 #4
0
def test_restart_compute(zenith_env_builder: ZenithEnvBuilder,
                         with_safekeepers: bool):
    zenith_env_builder.pageserver_auth_enabled = True
    if with_safekeepers:
        zenith_env_builder.num_safekeepers = 3
    env = zenith_env_builder.init_start()

    env.zenith_cli.create_branch('test_restart_compute')
    pg = env.postgres.create_start('test_restart_compute')
    log.info("postgres is running on 'test_restart_compute' branch")

    with closing(pg.connect()) as conn:
        with conn.cursor() as cur:
            cur.execute('CREATE TABLE t(key int primary key, value text)')
            cur.execute(
                "INSERT INTO t SELECT generate_series(1,100000), 'payload'")
            cur.execute('SELECT sum(key) FROM t')
            r = cur.fetchone()
            assert r == (5000050000, )
            log.info(f"res = {r}")

    # Remove data directory and restart
    pg.stop_and_destroy().create_start('test_restart_compute')

    with closing(pg.connect()) as conn:
        with conn.cursor() as cur:
            # We can still see the row
            cur.execute('SELECT sum(key) FROM t')
            r = cur.fetchone()
            assert r == (5000050000, )
            log.info(f"res = {r}")

            # Insert another row
            cur.execute("INSERT INTO t VALUES (100001, 'payload2')")
            cur.execute('SELECT count(*) FROM t')

            r = cur.fetchone()
            assert r == (100001, )
            log.info(f"res = {r}")

    # Again remove data directory and restart
    pg.stop_and_destroy().create_start('test_restart_compute')

    # That select causes lots of FPI's and increases probability of wakeepers
    # lagging behind after query completion
    with closing(pg.connect()) as conn:
        with conn.cursor() as cur:
            # We can still see the rows
            cur.execute('SELECT count(*) FROM t')

            r = cur.fetchone()
            assert r == (100001, )
            log.info(f"res = {r}")

    # And again remove data directory and restart
    pg.stop_and_destroy().create_start('test_restart_compute')

    with closing(pg.connect()) as conn:
        with conn.cursor() as cur:
            # We can still see the rows
            cur.execute('SELECT count(*) FROM t')

            r = cur.fetchone()
            assert r == (100001, )
            log.info(f"res = {r}")