Exemplo n.º 1
0
def test_concurrent_access(config):
    """test when two backends compete with each other to extract state from third running backend"""

    acon1, acon2, acon3 = common.n_async_connect(config, 3)
    acurs1, acurs2, acurs3 = acon1.cursor(), acon2.cursor(), acon3.cursor()
    query = 'select count(*) from foo join bar on foo.c1=bar.c1'

    common.set_guc(acon3, 'max_parallel_workers_per_gather', 0)
    acurs3.execute(query)
    time.sleep(0.1)
    acurs1.callproc('pg_query_state', (acon3.get_backend_pid(), ))
    acurs2.callproc('pg_query_state', (acon3.get_backend_pid(), ))
    common.wait(acon1)
    common.wait(acon2)
    common.wait(acon3)

    qs1, qs2 = acurs1.fetchall(), acurs2.fetchall()
    assert len(qs1) == len(qs2) == 1 \
     and qs1[0][0] == qs2[0][0] == acon3.get_backend_pid() \
     and qs1[0][1] == qs2[0][1] == 0 \
     and qs1[0][2] == qs2[0][2] == query \
     and len(qs1[0][3]) > 0 and len(qs2[0][3]) > 0 \
     and qs1[0][4] == qs2[0][4] == None

    common.n_close((acon1, acon2, acon3))
Exemplo n.º 2
0
def test_buffers(config):
    """test buffer statistics"""

    acon1, acon2 = common.n_async_connect(config, 2)
    query = 'select count(*) from foo join bar on foo.c1=bar.c1 and unlock_if_eq_1(foo.c1)=bar.c1'
    expected = r"""Aggregate \(Current loop: actual rows=0, loop number=1\)
  ->  Hash Join \(Current loop: actual rows=\d+, loop number=1\)
        Hash Cond: \(foo.c1 = bar.c1\)
        Join Filter: \(unlock_if_eq_1\(foo.c1\) = bar.c1\)
        Buffers: shared hit=\d+, temp read=\d+ written=\d+
        ->  Seq Scan on foo \(Current loop: actual rows=1000000, loop number=1\)
              Buffers: [^\n]*
        ->  Hash \(Current loop: actual rows=500000, loop number=1\)
              Buckets: \d+  Batches: \d+  Memory Usage: \d+kB
              Buffers: shared hit=\d+, temp written=\d+
              ->  Seq Scan on bar \(Current loop: actual rows=\d+, loop number=1\)
                    Buffers: .*"""

    common.set_guc(acon1, 'pg_query_state.enable_buffers', 'on')

    qs, notices = common.onetime_query_state_locks(config, acon1, acon2, query,
                                                   {'buffers': True})

    assert len(qs) == 2
    assert re.match(expected, qs[0][3])
    assert len(notices) == 0

    common.n_close((acon1, acon2))
Exemplo n.º 3
0
def test_timing(config):
    """test timing statistics"""

    acon1, acon2 = common.n_async_connect(config, 2)
    query = 'select count(*) from foo join bar on foo.c1=bar.c1 and unlock_if_eq_1(foo.c1)=bar.c1'

    expected = r"""Aggregate \(Current loop: running time=\d+.\d+ actual rows=0, loop number=1\)
  ->  Hash Join \(Current loop: actual time=\d+.\d+..\d+.\d+ rows=\d+, loop number=1\)
        Hash Cond: \(foo.c1 = bar.c1\)
        Join Filter: \(unlock_if_eq_1\(foo.c1\) = bar.c1\)
        ->  Seq Scan on foo \(Current loop: actual time=\d+.\d+..\d+.\d+ rows=1000000, loop number=1\)
        ->  Hash \(Current loop: actual time=\d+.\d+..\d+.\d+ rows=500000, loop number=1\)
              Buckets: \d+  Batches: \d+  Memory Usage: \d+kB
              ->  Seq Scan on bar \(Current loop: actual time=\d+.\d+..\d+.\d+ rows=\d+, loop number=1\)"""

    common.set_guc(acon1, 'pg_query_state.enable_timing', 'on')

    qs, notices = common.onetime_query_state_locks(config, acon1, acon2, query,
                                                   {'timing': True})

    assert len(qs) == 2
    assert re.match(expected, qs[0][3])
    assert len(notices) == 0

    common.n_close((acon1, acon2))
Exemplo n.º 4
0
def test_buffers(config):
	"""test buffer statistics"""

	acon, = common.n_async_connect(config)
	query = 'select count(*) from foo join bar on foo.c1=bar.c1'
	expected = r"""Aggregate \(Current loop: actual rows=0, loop number=1\)
  ->  Hash Join \(Current loop: actual rows=0, loop number=1\)
        Hash Cond: \(foo.c1 = bar.c1\)
        ->  Seq Scan on foo \(Current loop: actual rows=1, loop number=1\)
              Buffers: [^\n]*
        ->  Hash \(Current loop: actual rows=0, loop number=1\)
              Buckets: \d+  Batches: \d+  Memory Usage: \d+kB
              ->  Seq Scan on bar \(Current loop: actual rows=\d+, loop number=1\)
                    Buffers: .*"""

	common.set_guc(acon, 'pg_query_state.enable_buffers', 'on')

	qs, notices = common.onetime_query_state(config, acon, query, {'buffers': True})
	assert len(qs) == 1 and re.match(expected, qs[0][3])
	assert len(notices) == 0

	common.n_close((acon,))
Exemplo n.º 5
0
def run_tpcds(config):
    """TPC-DS stress test"""

    TPC_DS_EXCLUDE_LIST = []  # actual numbers of TPC-DS tests to exclude
    TPC_DS_STATEMENT_TIMEOUT = 20000  # statement_timeout in ms

    print('Preparing TPC-DS queries...')
    queries = []
    for query_file in sorted(
            os.listdir(
                'tmp_stress/tpcds-result-reproduction/query_qualification/')):
        with open(
                'tmp_stress/tpcds-result-reproduction/query_qualification/%s' %
                query_file, 'r') as f:
            queries.append(f.read())

    acon, = common.n_async_connect(config)
    pid = acon.get_backend_pid()

    print('Starting TPC-DS queries...')
    timeout_list = []
    bar = progressbar.ProgressBar(max_value=len(queries))
    for i, query in enumerate(queries):
        bar.update(i + 1)
        if i + 1 in TPC_DS_EXCLUDE_LIST:
            continue
        try:
            # Set query timeout to TPC_DS_STATEMENT_TIMEOUT / 1000 seconds
            common.set_guc(acon, 'statement_timeout', TPC_DS_STATEMENT_TIMEOUT)

            # run query
            acurs = acon.cursor()
            acurs.execute(query)

            # periodically run pg_query_state on running backend trying to get
            # crash of PostgreSQL
            MAX_FIRST_GETTING_QS_RETRIES = 10
            PG_QS_DELAY, BEFORE_GETTING_QS_DELAY = 0.1, 0.1
            BEFORE_GETTING_QS, GETTING_QS = range(2)
            state, n_first_getting_qs_retries = BEFORE_GETTING_QS, 0
            while True:
                result, notices = common.pg_query_state(config, pid)
                # run state machine to determine the first getting of query state
                # and query finishing
                if state == BEFORE_GETTING_QS:
                    if len(result
                           ) > 0 or common.BACKEND_IS_ACTIVE_INFO in notices:
                        state = GETTING_QS
                        continue
                    n_first_getting_qs_retries += 1
                    if n_first_getting_qs_retries >= MAX_FIRST_GETTING_QS_RETRIES:
                        # pg_query_state callings don't return any result, more likely run
                        # query has completed
                        break
                    time.sleep(BEFORE_GETTING_QS_DELAY)
                elif state == GETTING_QS:
                    if common.BACKEND_IS_IDLE_INFO in notices:
                        break
                    time.sleep(PG_QS_DELAY)

            # wait for real query completion
            common.wait(acon)

        except psycopg2.extensions.QueryCanceledError:
            timeout_list.append(i + 1)

    common.n_close((acon, ))

    if len(timeout_list) > 0:
        print(
            '\nThere were pg_query_state timeouts (%s s) on queries:' %
            TPC_DS_STATEMENT_TIMEOUT, timeout_list)